Skip to content

Commit 8b0030d

Browse files
committed
KVM: arm64: Convert to the gfn-based MMU notifier callbacks
Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 1b7ce8e commit 8b0030d

File tree

2 files changed

+32
-82
lines changed

2 files changed

+32
-82
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -582,6 +582,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
582582
struct kvm_vcpu_events *events);
583583

584584
#define KVM_ARCH_WANT_MMU_NOTIFIER
585+
#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
585586

586587
void kvm_arm_halt_guest(struct kvm *kvm);
587588
void kvm_arm_resume_guest(struct kvm *kvm);

arch/arm64/kvm/mmu.c

Lines changed: 31 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -839,7 +839,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
839839
* gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
840840
* the page we just got a reference to gets unmapped before we have a
841841
* chance to grab the mmu_lock, which ensure that if the page gets
842-
* unmapped afterwards, the call to kvm_unmap_hva will take it away
842+
* unmapped afterwards, the call to kvm_unmap_gfn will take it away
843843
* from us again properly. This smp_rmb() interacts with the smp_wmb()
844844
* in kvm_mmu_notifier_invalidate_<page|range_end>.
845845
*/
@@ -1064,123 +1064,72 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
10641064
return ret;
10651065
}
10661066

1067-
static int handle_hva_to_gpa(struct kvm *kvm,
1068-
unsigned long start,
1069-
unsigned long end,
1070-
int (*handler)(struct kvm *kvm,
1071-
gpa_t gpa, u64 size,
1072-
void *data),
1073-
void *data)
1067+
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_info *info)
10741068
{
1075-
struct kvm_memslots *slots;
1076-
struct kvm_memory_slot *memslot;
1077-
int ret = 0;
1078-
1079-
slots = kvm_memslots(kvm);
1080-
1081-
/* we only care about the pages that the guest sees */
1082-
kvm_for_each_memslot(memslot, slots) {
1083-
unsigned long hva_start, hva_end;
1084-
gfn_t gpa;
1085-
1086-
hva_start = max(start, memslot->userspace_addr);
1087-
hva_end = min(end, memslot->userspace_addr +
1088-
(memslot->npages << PAGE_SHIFT));
1089-
if (hva_start >= hva_end)
1090-
continue;
1091-
1092-
gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
1093-
ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
1094-
}
1095-
1096-
return ret;
1097-
}
1069+
if (!kvm->arch.mmu.pgt)
1070+
return 0;
10981071

1099-
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1100-
{
1101-
unsigned flags = *(unsigned *)data;
1102-
bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
1072+
__unmap_stage2_range(&kvm->arch.mmu, info->start << PAGE_SHIFT,
1073+
(info->end - info->start) << PAGE_SHIFT,
1074+
info->may_block);
11031075

1104-
__unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
11051076
return 0;
11061077
}
11071078

1108-
int kvm_unmap_hva_range(struct kvm *kvm,
1109-
unsigned long start, unsigned long end, unsigned flags)
1079+
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_info *info)
11101080
{
1081+
kvm_pfn_t pfn = pte_pfn(info->pte);
1082+
11111083
if (!kvm->arch.mmu.pgt)
11121084
return 0;
11131085

1114-
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
1115-
return 0;
1116-
}
1117-
1118-
static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1119-
{
1120-
kvm_pfn_t *pfn = (kvm_pfn_t *)data;
1086+
WARN_ON(info->end - info->start != 1);
11211087

1122-
WARN_ON(size != PAGE_SIZE);
1088+
/*
1089+
* We've moved a page around, probably through CoW, so let's treat it
1090+
* just like a translation fault and clean the cache to the PoC.
1091+
*/
1092+
clean_dcache_guest_page(pfn, PAGE_SIZE);
11231093

11241094
/*
11251095
* The MMU notifiers will have unmapped a huge PMD before calling
1126-
* ->change_pte() (which in turn calls kvm_set_spte_hva()) and
1096+
* ->change_pte() (which in turn calls kvm_set_spte_gfn()) and
11271097
* therefore we never need to clear out a huge PMD through this
11281098
* calling path and a memcache is not required.
11291099
*/
1130-
kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, gpa, PAGE_SIZE,
1131-
__pfn_to_phys(*pfn), KVM_PGTABLE_PROT_R, NULL);
1100+
kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, info->start << PAGE_SHIFT,
1101+
PAGE_SIZE, __pfn_to_phys(pfn),
1102+
KVM_PGTABLE_PROT_R, NULL);
1103+
11321104
return 0;
11331105
}
11341106

1135-
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1107+
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_info *info)
11361108
{
1137-
unsigned long end = hva + PAGE_SIZE;
1138-
kvm_pfn_t pfn = pte_pfn(pte);
1109+
u64 size = (info->end - info->start) << PAGE_SHIFT;
1110+
kvm_pte_t kpte;
1111+
pte_t pte;
11391112

11401113
if (!kvm->arch.mmu.pgt)
11411114
return 0;
11421115

1143-
/*
1144-
* We've moved a page around, probably through CoW, so let's treat it
1145-
* just like a translation fault and clean the cache to the PoC.
1146-
*/
1147-
clean_dcache_guest_page(pfn, PAGE_SIZE);
1148-
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pfn);
1149-
return 0;
1150-
}
11511116

1152-
static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1153-
{
1154-
pte_t pte;
1155-
kvm_pte_t kpte;
11561117

11571118
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
1158-
kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, gpa);
1119+
1120+
kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt,
1121+
info->start << PAGE_SHIFT);
11591122
pte = __pte(kpte);
11601123
return pte_valid(pte) && pte_young(pte);
11611124
}
11621125

1163-
static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1164-
{
1165-
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
1166-
return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, gpa);
1167-
}
1168-
1169-
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1170-
{
1171-
if (!kvm->arch.mmu.pgt)
1172-
return 0;
1173-
1174-
return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
1175-
}
1176-
1177-
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1126+
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_info *info)
11781127
{
11791128
if (!kvm->arch.mmu.pgt)
11801129
return 0;
11811130

1182-
return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
1183-
kvm_test_age_hva_handler, NULL);
1131+
return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
1132+
info->start << PAGE_SHIFT);
11841133
}
11851134

11861135
phys_addr_t kvm_mmu_get_httbr(void)

0 commit comments

Comments
 (0)