36
36
37
37
#include "trace.h"
38
38
39
+ #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
40
+
39
41
static inline u64 synic_read_sint (struct kvm_vcpu_hv_synic * synic , int sint )
40
42
{
41
43
return atomic64_read (& synic -> sint [sint ]);
@@ -1277,37 +1279,47 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1277
1279
return kvm_hv_get_msr (vcpu , msr , pdata , host );
1278
1280
}
1279
1281
1280
- static __always_inline bool hv_vcpu_in_sparse_set ( struct kvm_vcpu_hv * hv_vcpu ,
1281
- u64 sparse_banks [] ,
1282
- u64 valid_bank_mask )
1282
+ static __always_inline unsigned long * sparse_set_to_vcpu_mask (
1283
+ struct kvm * kvm , u64 * sparse_banks , u64 valid_bank_mask ,
1284
+ u64 * vp_bitmap , unsigned long * vcpu_bitmap )
1283
1285
{
1284
- int bank = hv_vcpu -> vp_index / 64 , sbank ;
1285
-
1286
- if (bank >= 64 )
1287
- return false;
1286
+ struct kvm_hv * hv = & kvm -> arch .hyperv ;
1287
+ struct kvm_vcpu * vcpu ;
1288
+ int i , bank , sbank = 0 ;
1288
1289
1289
- if (!(valid_bank_mask & BIT_ULL (bank )))
1290
- return false;
1290
+ memset (vp_bitmap , 0 ,
1291
+ KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof (* vp_bitmap ));
1292
+ for_each_set_bit (bank , (unsigned long * )& valid_bank_mask ,
1293
+ KVM_HV_MAX_SPARSE_VCPU_SET_BITS )
1294
+ vp_bitmap [bank ] = sparse_banks [sbank ++ ];
1291
1295
1292
- /* Sparse bank number equals to the number of set bits before it */
1293
- sbank = bitmap_weight ((unsigned long * )& valid_bank_mask , bank );
1296
+ if (likely (!atomic_read (& hv -> num_mismatched_vp_indexes ))) {
1297
+ /* for all vcpus vp_index == vcpu_idx */
1298
+ return (unsigned long * )vp_bitmap ;
1299
+ }
1294
1300
1295
- return !!(sparse_banks [sbank ] & BIT_ULL (hv_vcpu -> vp_index % 64 ));
1301
+ bitmap_zero (vcpu_bitmap , KVM_MAX_VCPUS );
1302
+ kvm_for_each_vcpu (i , vcpu , kvm ) {
1303
+ if (test_bit (vcpu_to_hv_vcpu (vcpu )-> vp_index ,
1304
+ (unsigned long * )vp_bitmap ))
1305
+ __set_bit (i , vcpu_bitmap );
1306
+ }
1307
+ return vcpu_bitmap ;
1296
1308
}
1297
1309
1298
1310
static u64 kvm_hv_flush_tlb (struct kvm_vcpu * current_vcpu , u64 ingpa ,
1299
1311
u16 rep_cnt , bool ex )
1300
1312
{
1301
1313
struct kvm * kvm = current_vcpu -> kvm ;
1302
- struct kvm_hv * hv = & kvm -> arch .hyperv ;
1303
1314
struct kvm_vcpu_hv * hv_vcpu = & current_vcpu -> arch .hyperv ;
1304
1315
struct hv_tlb_flush_ex flush_ex ;
1305
1316
struct hv_tlb_flush flush ;
1306
- struct kvm_vcpu * vcpu ;
1307
- unsigned long vcpu_bitmap [BITS_TO_LONGS (KVM_MAX_VCPUS )] = {0 };
1317
+ u64 vp_bitmap [KVM_HV_MAX_SPARSE_VCPU_SET_BITS ];
1318
+ DECLARE_BITMAP (vcpu_bitmap , KVM_MAX_VCPUS );
1319
+ unsigned long * vcpu_mask ;
1308
1320
u64 valid_bank_mask ;
1309
1321
u64 sparse_banks [64 ];
1310
- int sparse_banks_len , i , bank , sbank ;
1322
+ int sparse_banks_len ;
1311
1323
bool all_cpus ;
1312
1324
1313
1325
if (!ex ) {
@@ -1350,73 +1362,58 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
1350
1362
return HV_STATUS_INVALID_HYPERCALL_INPUT ;
1351
1363
}
1352
1364
1353
- /*
1354
- * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
1355
- * analyze it here, flush TLB regardless of the specified address space.
1356
- */
1357
1365
cpumask_clear (& hv_vcpu -> tlb_flush );
1358
1366
1359
- if (all_cpus ) {
1360
- kvm_make_vcpus_request_mask (kvm ,
1361
- KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP ,
1362
- NULL , & hv_vcpu -> tlb_flush );
1363
- goto ret_success ;
1364
- }
1365
-
1366
- if (atomic_read (& hv -> num_mismatched_vp_indexes )) {
1367
- kvm_for_each_vcpu (i , vcpu , kvm ) {
1368
- if (hv_vcpu_in_sparse_set (& vcpu -> arch .hyperv ,
1369
- sparse_banks ,
1370
- valid_bank_mask ))
1371
- __set_bit (i , vcpu_bitmap );
1372
- }
1373
- goto flush_request ;
1374
- }
1367
+ vcpu_mask = all_cpus ? NULL :
1368
+ sparse_set_to_vcpu_mask (kvm , sparse_banks , valid_bank_mask ,
1369
+ vp_bitmap , vcpu_bitmap );
1375
1370
1376
1371
/*
1377
- * num_mismatched_vp_indexes is zero so every vcpu has
1378
- * vp_index == vcpu_idx .
1372
+ * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
1373
+ * analyze it here, flush TLB regardless of the specified address space .
1379
1374
*/
1380
- sbank = 0 ;
1381
- for_each_set_bit (bank , (unsigned long * )& valid_bank_mask ,
1382
- BITS_PER_LONG ) {
1383
- for_each_set_bit (i ,
1384
- (unsigned long * )& sparse_banks [sbank ],
1385
- BITS_PER_LONG ) {
1386
- u32 vp_index = bank * 64 + i ;
1387
-
1388
- /* A non-existent vCPU was specified */
1389
- if (vp_index >= KVM_MAX_VCPUS )
1390
- return HV_STATUS_INVALID_HYPERCALL_INPUT ;
1391
-
1392
- __set_bit (vp_index , vcpu_bitmap );
1393
- }
1394
- sbank ++ ;
1395
- }
1396
-
1397
- flush_request :
1398
1375
kvm_make_vcpus_request_mask (kvm ,
1399
1376
KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP ,
1400
- vcpu_bitmap , & hv_vcpu -> tlb_flush );
1377
+ vcpu_mask , & hv_vcpu -> tlb_flush );
1401
1378
1402
1379
ret_success :
1403
1380
/* We always do full TLB flush, set rep_done = rep_cnt. */
1404
1381
return (u64 )HV_STATUS_SUCCESS |
1405
1382
((u64 )rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET );
1406
1383
}
1407
1384
1385
+ static void kvm_send_ipi_to_many (struct kvm * kvm , u32 vector ,
1386
+ unsigned long * vcpu_bitmap )
1387
+ {
1388
+ struct kvm_lapic_irq irq = {
1389
+ .delivery_mode = APIC_DM_FIXED ,
1390
+ .vector = vector
1391
+ };
1392
+ struct kvm_vcpu * vcpu ;
1393
+ int i ;
1394
+
1395
+ kvm_for_each_vcpu (i , vcpu , kvm ) {
1396
+ if (vcpu_bitmap && !test_bit (i , vcpu_bitmap ))
1397
+ continue ;
1398
+
1399
+ /* We fail only when APIC is disabled */
1400
+ kvm_apic_set_irq (vcpu , & irq , NULL );
1401
+ }
1402
+ }
1403
+
1408
1404
static u64 kvm_hv_send_ipi (struct kvm_vcpu * current_vcpu , u64 ingpa , u64 outgpa ,
1409
1405
bool ex , bool fast )
1410
1406
{
1411
1407
struct kvm * kvm = current_vcpu -> kvm ;
1412
- struct kvm_hv * hv = & kvm -> arch .hyperv ;
1413
1408
struct hv_send_ipi_ex send_ipi_ex ;
1414
1409
struct hv_send_ipi send_ipi ;
1415
- struct kvm_vcpu * vcpu ;
1410
+ u64 vp_bitmap [KVM_HV_MAX_SPARSE_VCPU_SET_BITS ];
1411
+ DECLARE_BITMAP (vcpu_bitmap , KVM_MAX_VCPUS );
1412
+ unsigned long * vcpu_mask ;
1416
1413
unsigned long valid_bank_mask ;
1417
1414
u64 sparse_banks [64 ];
1418
- int sparse_banks_len , bank , i , sbank ;
1419
- struct kvm_lapic_irq irq = {. delivery_mode = APIC_DM_FIXED } ;
1415
+ int sparse_banks_len ;
1416
+ u32 vector ;
1420
1417
bool all_cpus ;
1421
1418
1422
1419
if (!ex ) {
@@ -1425,18 +1422,18 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa,
1425
1422
sizeof (send_ipi ))))
1426
1423
return HV_STATUS_INVALID_HYPERCALL_INPUT ;
1427
1424
sparse_banks [0 ] = send_ipi .cpu_mask ;
1428
- irq . vector = send_ipi .vector ;
1425
+ vector = send_ipi .vector ;
1429
1426
} else {
1430
1427
/* 'reserved' part of hv_send_ipi should be 0 */
1431
1428
if (unlikely (ingpa >> 32 != 0 ))
1432
1429
return HV_STATUS_INVALID_HYPERCALL_INPUT ;
1433
1430
sparse_banks [0 ] = outgpa ;
1434
- irq . vector = (u32 )ingpa ;
1431
+ vector = (u32 )ingpa ;
1435
1432
}
1436
1433
all_cpus = false;
1437
1434
valid_bank_mask = BIT_ULL (0 );
1438
1435
1439
- trace_kvm_hv_send_ipi (irq . vector , sparse_banks [0 ]);
1436
+ trace_kvm_hv_send_ipi (vector , sparse_banks [0 ]);
1440
1437
} else {
1441
1438
if (unlikely (kvm_read_guest (kvm , ingpa , & send_ipi_ex ,
1442
1439
sizeof (send_ipi_ex ))))
@@ -1446,7 +1443,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa,
1446
1443
send_ipi_ex .vp_set .format ,
1447
1444
send_ipi_ex .vp_set .valid_bank_mask );
1448
1445
1449
- irq . vector = send_ipi_ex .vector ;
1446
+ vector = send_ipi_ex .vector ;
1450
1447
valid_bank_mask = send_ipi_ex .vp_set .valid_bank_mask ;
1451
1448
sparse_banks_len = bitmap_weight (& valid_bank_mask , 64 ) *
1452
1449
sizeof (sparse_banks [0 ]);
@@ -1465,42 +1462,14 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa,
1465
1462
return HV_STATUS_INVALID_HYPERCALL_INPUT ;
1466
1463
}
1467
1464
1468
- if ((irq .vector < HV_IPI_LOW_VECTOR ) ||
1469
- (irq .vector > HV_IPI_HIGH_VECTOR ))
1465
+ if ((vector < HV_IPI_LOW_VECTOR ) || (vector > HV_IPI_HIGH_VECTOR ))
1470
1466
return HV_STATUS_INVALID_HYPERCALL_INPUT ;
1471
1467
1472
- if (all_cpus || atomic_read (& hv -> num_mismatched_vp_indexes )) {
1473
- kvm_for_each_vcpu (i , vcpu , kvm ) {
1474
- if (all_cpus || hv_vcpu_in_sparse_set (
1475
- & vcpu -> arch .hyperv , sparse_banks ,
1476
- valid_bank_mask )) {
1477
- /* We fail only when APIC is disabled */
1478
- kvm_apic_set_irq (vcpu , & irq , NULL );
1479
- }
1480
- }
1481
- goto ret_success ;
1482
- }
1468
+ vcpu_mask = all_cpus ? NULL :
1469
+ sparse_set_to_vcpu_mask (kvm , sparse_banks , valid_bank_mask ,
1470
+ vp_bitmap , vcpu_bitmap );
1483
1471
1484
- /*
1485
- * num_mismatched_vp_indexes is zero so every vcpu has
1486
- * vp_index == vcpu_idx.
1487
- */
1488
- sbank = 0 ;
1489
- for_each_set_bit (bank , (unsigned long * )& valid_bank_mask , 64 ) {
1490
- for_each_set_bit (i , (unsigned long * )& sparse_banks [sbank ], 64 ) {
1491
- u32 vp_index = bank * 64 + i ;
1492
- struct kvm_vcpu * vcpu =
1493
- get_vcpu_by_vpidx (kvm , vp_index );
1494
-
1495
- /* Unknown vCPU specified */
1496
- if (!vcpu )
1497
- continue ;
1498
-
1499
- /* We fail only when APIC is disabled */
1500
- kvm_apic_set_irq (vcpu , & irq , NULL );
1501
- }
1502
- sbank ++ ;
1503
- }
1472
+ kvm_send_ipi_to_many (kvm , vector , vcpu_mask );
1504
1473
1505
1474
ret_success :
1506
1475
return HV_STATUS_SUCCESS ;
0 commit comments