@@ -1298,26 +1298,25 @@ static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1298
1298
return flush ;
1299
1299
}
1300
1300
1301
- static int kvm_unmap_rmapp (struct kvm * kvm , struct kvm_rmap_head * rmap_head ,
1302
- struct kvm_memory_slot * slot , gfn_t gfn , int level ,
1303
- unsigned long data )
1301
+ static bool kvm_unmap_rmapp (struct kvm * kvm , struct kvm_rmap_head * rmap_head ,
1302
+ struct kvm_memory_slot * slot , gfn_t gfn , int level ,
1303
+ pte_t unused )
1304
1304
{
1305
1305
return kvm_zap_rmapp (kvm , rmap_head , slot );
1306
1306
}
1307
1307
1308
- static int kvm_set_pte_rmapp (struct kvm * kvm , struct kvm_rmap_head * rmap_head ,
1309
- struct kvm_memory_slot * slot , gfn_t gfn , int level ,
1310
- unsigned long data )
1308
+ static bool kvm_set_pte_rmapp (struct kvm * kvm , struct kvm_rmap_head * rmap_head ,
1309
+ struct kvm_memory_slot * slot , gfn_t gfn , int level ,
1310
+ pte_t pte )
1311
1311
{
1312
1312
u64 * sptep ;
1313
1313
struct rmap_iterator iter ;
1314
1314
int need_flush = 0 ;
1315
1315
u64 new_spte ;
1316
- pte_t * ptep = (pte_t * )data ;
1317
1316
kvm_pfn_t new_pfn ;
1318
1317
1319
- WARN_ON (pte_huge (* ptep ));
1320
- new_pfn = pte_pfn (* ptep );
1318
+ WARN_ON (pte_huge (pte ));
1319
+ new_pfn = pte_pfn (pte );
1321
1320
1322
1321
restart :
1323
1322
for_each_rmap_spte (rmap_head , & iter , sptep ) {
@@ -1326,7 +1325,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1326
1325
1327
1326
need_flush = 1 ;
1328
1327
1329
- if (pte_write (* ptep )) {
1328
+ if (pte_write (pte )) {
1330
1329
pte_list_remove (rmap_head , sptep );
1331
1330
goto restart ;
1332
1331
} else {
@@ -1414,86 +1413,52 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1414
1413
slot_rmap_walk_okay(_iter_); \
1415
1414
slot_rmap_walk_next(_iter_))
1416
1415
1417
- typedef int (* rmap_handler_t )(struct kvm * kvm , struct kvm_rmap_head * rmap_head ,
1418
- struct kvm_memory_slot * slot , gfn_t gfn ,
1419
- int level , unsigned long data );
1416
+ typedef bool (* rmap_handler_t )(struct kvm * kvm , struct kvm_rmap_head * rmap_head ,
1417
+ struct kvm_memory_slot * slot , gfn_t gfn ,
1418
+ int level , pte_t pte );
1420
1419
1421
- static __always_inline int kvm_handle_hva_range (struct kvm * kvm ,
1422
- unsigned long start ,
1423
- unsigned long end ,
1424
- unsigned long data ,
1425
- rmap_handler_t handler )
1420
+ static __always_inline bool kvm_handle_gfn_range (struct kvm * kvm ,
1421
+ struct kvm_gfn_info * info ,
1422
+ rmap_handler_t handler )
1426
1423
{
1427
- struct kvm_memslots * slots ;
1428
- struct kvm_memory_slot * memslot ;
1429
1424
struct slot_rmap_walk_iterator iterator ;
1430
- int ret = 0 ;
1431
- int i ;
1432
-
1433
- for (i = 0 ; i < KVM_ADDRESS_SPACE_NUM ; i ++ ) {
1434
- slots = __kvm_memslots (kvm , i );
1435
- kvm_for_each_memslot (memslot , slots ) {
1436
- unsigned long hva_start , hva_end ;
1437
- gfn_t gfn_start , gfn_end ;
1425
+ bool ret = false;
1438
1426
1439
- hva_start = max (start , memslot -> userspace_addr );
1440
- hva_end = min (end , memslot -> userspace_addr +
1441
- (memslot -> npages << PAGE_SHIFT ));
1442
- if (hva_start >= hva_end )
1443
- continue ;
1444
- /*
1445
- * {gfn(page) | page intersects with [hva_start, hva_end)} =
1446
- * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1447
- */
1448
- gfn_start = hva_to_gfn_memslot (hva_start , memslot );
1449
- gfn_end = hva_to_gfn_memslot (hva_end + PAGE_SIZE - 1 , memslot );
1450
-
1451
- for_each_slot_rmap_range (memslot , PG_LEVEL_4K ,
1452
- KVM_MAX_HUGEPAGE_LEVEL ,
1453
- gfn_start , gfn_end - 1 ,
1454
- & iterator )
1455
- ret |= handler (kvm , iterator .rmap , memslot ,
1456
- iterator .gfn , iterator .level , data );
1457
- }
1458
- }
1427
+ for_each_slot_rmap_range (info -> slot , PG_LEVEL_4K , KVM_MAX_HUGEPAGE_LEVEL ,
1428
+ info -> start , info -> end - 1 , & iterator )
1429
+ ret |= handler (kvm , iterator .rmap , info -> slot , iterator .gfn ,
1430
+ iterator .level , info -> pte );
1459
1431
1460
1432
return ret ;
1461
1433
}
1462
1434
1463
- static int kvm_handle_hva (struct kvm * kvm , unsigned long hva ,
1464
- unsigned long data , rmap_handler_t handler )
1435
+ bool kvm_unmap_gfn_range (struct kvm * kvm , struct kvm_gfn_info * info )
1465
1436
{
1466
- return kvm_handle_hva_range (kvm , hva , hva + 1 , data , handler );
1467
- }
1468
-
1469
- int kvm_unmap_hva_range (struct kvm * kvm , unsigned long start , unsigned long end ,
1470
- unsigned flags )
1471
- {
1472
- int r ;
1437
+ bool flush ;
1473
1438
1474
- r = kvm_handle_hva_range (kvm , start , end , 0 , kvm_unmap_rmapp );
1439
+ flush = kvm_handle_gfn_range (kvm , info , kvm_unmap_rmapp );
1475
1440
1476
1441
if (is_tdp_mmu_enabled (kvm ))
1477
- r |= kvm_tdp_mmu_zap_hva_range (kvm , start , end );
1442
+ flush |= kvm_tdp_mmu_unmap_gfn_range (kvm , info , flush );
1478
1443
1479
- return r ;
1444
+ return flush ;
1480
1445
}
1481
1446
1482
- int kvm_set_spte_hva (struct kvm * kvm , unsigned long hva , pte_t pte )
1447
+ bool kvm_set_spte_gfn (struct kvm * kvm , struct kvm_gfn_info * info )
1483
1448
{
1484
- int r ;
1449
+ bool flush ;
1485
1450
1486
- r = kvm_handle_hva (kvm , hva , ( unsigned long ) & pte , kvm_set_pte_rmapp );
1451
+ flush = kvm_handle_gfn_range (kvm , info , kvm_set_pte_rmapp );
1487
1452
1488
1453
if (is_tdp_mmu_enabled (kvm ))
1489
- r |= kvm_tdp_mmu_set_spte_hva (kvm , hva , & pte );
1454
+ flush |= kvm_tdp_mmu_set_spte_gfn (kvm , info );
1490
1455
1491
- return r ;
1456
+ return flush ;
1492
1457
}
1493
1458
1494
- static int kvm_age_rmapp (struct kvm * kvm , struct kvm_rmap_head * rmap_head ,
1495
- struct kvm_memory_slot * slot , gfn_t gfn , int level ,
1496
- unsigned long data )
1459
+ static bool kvm_age_rmapp (struct kvm * kvm , struct kvm_rmap_head * rmap_head ,
1460
+ struct kvm_memory_slot * slot , gfn_t gfn , int level ,
1461
+ pte_t unused )
1497
1462
{
1498
1463
u64 * sptep ;
1499
1464
struct rmap_iterator iter ;
@@ -1506,9 +1471,9 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1506
1471
return young ;
1507
1472
}
1508
1473
1509
- static int kvm_test_age_rmapp (struct kvm * kvm , struct kvm_rmap_head * rmap_head ,
1510
- struct kvm_memory_slot * slot , gfn_t gfn ,
1511
- int level , unsigned long data )
1474
+ static bool kvm_test_age_rmapp (struct kvm * kvm , struct kvm_rmap_head * rmap_head ,
1475
+ struct kvm_memory_slot * slot , gfn_t gfn ,
1476
+ int level , pte_t unused )
1512
1477
{
1513
1478
u64 * sptep ;
1514
1479
struct rmap_iterator iter ;
@@ -1530,29 +1495,31 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1530
1495
1531
1496
rmap_head = gfn_to_rmap (vcpu -> kvm , gfn , sp );
1532
1497
1533
- kvm_unmap_rmapp (vcpu -> kvm , rmap_head , NULL , gfn , sp -> role .level , 0 );
1498
+ kvm_unmap_rmapp (vcpu -> kvm , rmap_head , NULL , gfn , sp -> role .level , __pte ( 0 ) );
1534
1499
kvm_flush_remote_tlbs_with_address (vcpu -> kvm , sp -> gfn ,
1535
1500
KVM_PAGES_PER_HPAGE (sp -> role .level ));
1536
1501
}
1537
1502
1538
- int kvm_age_hva (struct kvm * kvm , unsigned long start , unsigned long end )
1503
+ bool kvm_age_gfn (struct kvm * kvm , struct kvm_gfn_info * info )
1539
1504
{
1540
- int young = false;
1505
+ bool young ;
1506
+
1507
+ young = kvm_handle_gfn_range (kvm , info , kvm_age_rmapp );
1541
1508
1542
- young = kvm_handle_hva_range (kvm , start , end , 0 , kvm_age_rmapp );
1543
1509
if (is_tdp_mmu_enabled (kvm ))
1544
- young |= kvm_tdp_mmu_age_hva_range (kvm , start , end );
1510
+ young |= kvm_tdp_mmu_age_gfn_range (kvm , info );
1545
1511
1546
1512
return young ;
1547
1513
}
1548
1514
1549
- int kvm_test_age_hva (struct kvm * kvm , unsigned long hva )
1515
+ bool kvm_test_age_gfn (struct kvm * kvm , struct kvm_gfn_info * info )
1550
1516
{
1551
- int young = false;
1517
+ bool young ;
1518
+
1519
+ young = kvm_handle_gfn_range (kvm , info , kvm_test_age_rmapp );
1552
1520
1553
- young = kvm_handle_hva (kvm , hva , 0 , kvm_test_age_rmapp );
1554
1521
if (is_tdp_mmu_enabled (kvm ))
1555
- young |= kvm_tdp_mmu_test_age_hva (kvm , hva );
1522
+ young |= kvm_tdp_mmu_test_age_gfn (kvm , info );
1556
1523
1557
1524
return young ;
1558
1525
}
0 commit comments