Skip to content

Commit e912bea

Browse files
committed
KVM: MIPS/MMU: Convert to the gfn-based MMU notifier callbacks
Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 8b0030d commit e912bea

File tree

2 files changed

+17
-81
lines changed

2 files changed

+17
-81
lines changed

arch/mips/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -967,6 +967,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
967967
bool write);
968968

969969
#define KVM_ARCH_WANT_MMU_NOTIFIER
970+
#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
970971

971972
/* Emulation */
972973
int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);

arch/mips/kvm/mmu.c

Lines changed: 16 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -439,85 +439,36 @@ static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
439439
end_gfn << PAGE_SHIFT);
440440
}
441441

442-
static int handle_hva_to_gpa(struct kvm *kvm,
443-
unsigned long start,
444-
unsigned long end,
445-
int (*handler)(struct kvm *kvm, gfn_t gfn,
446-
gpa_t gfn_end,
447-
struct kvm_memory_slot *memslot,
448-
void *data),
449-
void *data)
442+
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_info *info)
450443
{
451-
struct kvm_memslots *slots;
452-
struct kvm_memory_slot *memslot;
453-
int ret = 0;
454-
455-
slots = kvm_memslots(kvm);
456-
457-
/* we only care about the pages that the guest sees */
458-
kvm_for_each_memslot(memslot, slots) {
459-
unsigned long hva_start, hva_end;
460-
gfn_t gfn, gfn_end;
461-
462-
hva_start = max(start, memslot->userspace_addr);
463-
hva_end = min(end, memslot->userspace_addr +
464-
(memslot->npages << PAGE_SHIFT));
465-
if (hva_start >= hva_end)
466-
continue;
467-
468-
/*
469-
* {gfn(page) | page intersects with [hva_start, hva_end)} =
470-
* {gfn_start, gfn_start+1, ..., gfn_end-1}.
471-
*/
472-
gfn = hva_to_gfn_memslot(hva_start, memslot);
473-
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
474-
475-
ret |= handler(kvm, gfn, gfn_end, memslot, data);
476-
}
477-
478-
return ret;
479-
}
480-
481-
482-
static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
483-
struct kvm_memory_slot *memslot, void *data)
484-
{
485-
kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
486-
return 1;
487-
}
488-
489-
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
490-
unsigned flags)
491-
{
492-
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
444+
kvm_mips_flush_gpa_pt(kvm, info->start, info->end);
493445

494446
kvm_mips_callbacks->flush_shadow_all(kvm);
495447
return 0;
496448
}
497449

498-
static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
499-
struct kvm_memory_slot *memslot, void *data)
450+
static bool __kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_info *info)
500451
{
501-
gpa_t gpa = gfn << PAGE_SHIFT;
502-
pte_t hva_pte = *(pte_t *)data;
452+
gpa_t gpa = info->start << PAGE_SHIFT;
453+
pte_t hva_pte = info->pte;
503454
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
504455
pte_t old_pte;
505456

506457
if (!gpa_pte)
507-
return 0;
458+
return false;
508459

509460
/* Mapping may need adjusting depending on memslot flags */
510461
old_pte = *gpa_pte;
511-
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
462+
if (info->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
512463
hva_pte = pte_mkclean(hva_pte);
513-
else if (memslot->flags & KVM_MEM_READONLY)
464+
else if (info->slot->flags & KVM_MEM_READONLY)
514465
hva_pte = pte_wrprotect(hva_pte);
515466

516467
set_pte(gpa_pte, hva_pte);
517468

518469
/* Replacing an absent or old page doesn't need flushes */
519470
if (!pte_present(old_pte) || !pte_young(old_pte))
520-
return 0;
471+
return false;
521472

522473
/* Pages swapped, aged, moved, or cleaned require flushes */
523474
return !pte_present(hva_pte) ||
@@ -526,44 +477,28 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
526477
(pte_dirty(old_pte) && !pte_dirty(hva_pte));
527478
}
528479

529-
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
480+
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_info *info)
530481
{
531-
unsigned long end = hva + PAGE_SIZE;
532-
int ret;
533-
534-
ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
535-
if (ret)
482+
if (__kvm_set_spte_gfn(kvm, info))
536483
kvm_mips_callbacks->flush_shadow_all(kvm);
537-
return 0;
484+
return false;
538485
}
539486

540-
static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
541-
struct kvm_memory_slot *memslot, void *data)
487+
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_info *info)
542488
{
543-
return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
489+
return kvm_mips_mkold_gpa_pt(kvm, info->start, info->end);
544490
}
545491

546-
static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
547-
struct kvm_memory_slot *memslot, void *data)
492+
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_info *info)
548493
{
549-
gpa_t gpa = gfn << PAGE_SHIFT;
494+
gpa_t gpa = info->start << PAGE_SHIFT;
550495
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
551496

552497
if (!gpa_pte)
553498
return 0;
554499
return pte_young(*gpa_pte);
555500
}
556501

557-
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
558-
{
559-
return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
560-
}
561-
562-
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
563-
{
564-
return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
565-
}
566-
567502
/**
568503
* _kvm_mips_map_page_fast() - Fast path GPA fault handler.
569504
* @vcpu: VCPU pointer.

0 commit comments

Comments
 (0)