@@ -439,85 +439,36 @@ static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
439
439
end_gfn << PAGE_SHIFT );
440
440
}
441
441
442
- static int handle_hva_to_gpa (struct kvm * kvm ,
443
- unsigned long start ,
444
- unsigned long end ,
445
- int (* handler )(struct kvm * kvm , gfn_t gfn ,
446
- gpa_t gfn_end ,
447
- struct kvm_memory_slot * memslot ,
448
- void * data ),
449
- void * data )
442
+ bool kvm_unmap_gfn_range (struct kvm * kvm , struct kvm_gfn_info * info )
450
443
{
451
- struct kvm_memslots * slots ;
452
- struct kvm_memory_slot * memslot ;
453
- int ret = 0 ;
454
-
455
- slots = kvm_memslots (kvm );
456
-
457
- /* we only care about the pages that the guest sees */
458
- kvm_for_each_memslot (memslot , slots ) {
459
- unsigned long hva_start , hva_end ;
460
- gfn_t gfn , gfn_end ;
461
-
462
- hva_start = max (start , memslot -> userspace_addr );
463
- hva_end = min (end , memslot -> userspace_addr +
464
- (memslot -> npages << PAGE_SHIFT ));
465
- if (hva_start >= hva_end )
466
- continue ;
467
-
468
- /*
469
- * {gfn(page) | page intersects with [hva_start, hva_end)} =
470
- * {gfn_start, gfn_start+1, ..., gfn_end-1}.
471
- */
472
- gfn = hva_to_gfn_memslot (hva_start , memslot );
473
- gfn_end = hva_to_gfn_memslot (hva_end + PAGE_SIZE - 1 , memslot );
474
-
475
- ret |= handler (kvm , gfn , gfn_end , memslot , data );
476
- }
477
-
478
- return ret ;
479
- }
480
-
481
-
482
- static int kvm_unmap_hva_handler (struct kvm * kvm , gfn_t gfn , gfn_t gfn_end ,
483
- struct kvm_memory_slot * memslot , void * data )
484
- {
485
- kvm_mips_flush_gpa_pt (kvm , gfn , gfn_end );
486
- return 1 ;
487
- }
488
-
489
- int kvm_unmap_hva_range (struct kvm * kvm , unsigned long start , unsigned long end ,
490
- unsigned flags )
491
- {
492
- handle_hva_to_gpa (kvm , start , end , & kvm_unmap_hva_handler , NULL );
444
+ kvm_mips_flush_gpa_pt (kvm , info -> start , info -> end );
493
445
494
446
kvm_mips_callbacks -> flush_shadow_all (kvm );
495
447
return 0 ;
496
448
}
497
449
498
- static int kvm_set_spte_handler (struct kvm * kvm , gfn_t gfn , gfn_t gfn_end ,
499
- struct kvm_memory_slot * memslot , void * data )
450
+ static bool __kvm_set_spte_gfn (struct kvm * kvm , struct kvm_gfn_info * info )
500
451
{
501
- gpa_t gpa = gfn << PAGE_SHIFT ;
502
- pte_t hva_pte = * ( pte_t * ) data ;
452
+ gpa_t gpa = info -> start << PAGE_SHIFT ;
453
+ pte_t hva_pte = info -> pte ;
503
454
pte_t * gpa_pte = kvm_mips_pte_for_gpa (kvm , NULL , gpa );
504
455
pte_t old_pte ;
505
456
506
457
if (!gpa_pte )
507
- return 0 ;
458
+ return false ;
508
459
509
460
/* Mapping may need adjusting depending on memslot flags */
510
461
old_pte = * gpa_pte ;
511
- if (memslot -> flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty (old_pte ))
462
+ if (info -> slot -> flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty (old_pte ))
512
463
hva_pte = pte_mkclean (hva_pte );
513
- else if (memslot -> flags & KVM_MEM_READONLY )
464
+ else if (info -> slot -> flags & KVM_MEM_READONLY )
514
465
hva_pte = pte_wrprotect (hva_pte );
515
466
516
467
set_pte (gpa_pte , hva_pte );
517
468
518
469
/* Replacing an absent or old page doesn't need flushes */
519
470
if (!pte_present (old_pte ) || !pte_young (old_pte ))
520
- return 0 ;
471
+ return false ;
521
472
522
473
/* Pages swapped, aged, moved, or cleaned require flushes */
523
474
return !pte_present (hva_pte ) ||
@@ -526,44 +477,28 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
526
477
(pte_dirty (old_pte ) && !pte_dirty (hva_pte ));
527
478
}
528
479
529
- int kvm_set_spte_hva (struct kvm * kvm , unsigned long hva , pte_t pte )
480
+ bool kvm_set_spte_gfn (struct kvm * kvm , struct kvm_gfn_info * info )
530
481
{
531
- unsigned long end = hva + PAGE_SIZE ;
532
- int ret ;
533
-
534
- ret = handle_hva_to_gpa (kvm , hva , end , & kvm_set_spte_handler , & pte );
535
- if (ret )
482
+ if (__kvm_set_spte_gfn (kvm , info ))
536
483
kvm_mips_callbacks -> flush_shadow_all (kvm );
537
- return 0 ;
484
+ return false ;
538
485
}
539
486
540
- static int kvm_age_hva_handler (struct kvm * kvm , gfn_t gfn , gfn_t gfn_end ,
541
- struct kvm_memory_slot * memslot , void * data )
487
+ bool kvm_age_gfn (struct kvm * kvm , struct kvm_gfn_info * info )
542
488
{
543
- return kvm_mips_mkold_gpa_pt (kvm , gfn , gfn_end );
489
+ return kvm_mips_mkold_gpa_pt (kvm , info -> start , info -> end );
544
490
}
545
491
546
- static int kvm_test_age_hva_handler (struct kvm * kvm , gfn_t gfn , gfn_t gfn_end ,
547
- struct kvm_memory_slot * memslot , void * data )
492
+ bool kvm_test_age_gfn (struct kvm * kvm , struct kvm_gfn_info * info )
548
493
{
549
- gpa_t gpa = gfn << PAGE_SHIFT ;
494
+ gpa_t gpa = info -> start << PAGE_SHIFT ;
550
495
pte_t * gpa_pte = kvm_mips_pte_for_gpa (kvm , NULL , gpa );
551
496
552
497
if (!gpa_pte )
553
498
return 0 ;
554
499
return pte_young (* gpa_pte );
555
500
}
556
501
557
- int kvm_age_hva (struct kvm * kvm , unsigned long start , unsigned long end )
558
- {
559
- return handle_hva_to_gpa (kvm , start , end , kvm_age_hva_handler , NULL );
560
- }
561
-
562
- int kvm_test_age_hva (struct kvm * kvm , unsigned long hva )
563
- {
564
- return handle_hva_to_gpa (kvm , hva , hva , kvm_test_age_hva_handler , NULL );
565
- }
566
-
567
502
/**
568
503
* _kvm_mips_map_page_fast() - Fast path GPA fault handler.
569
504
* @vcpu: VCPU pointer.
0 commit comments