@@ -405,38 +405,16 @@ static int dmar_map_gfx = 1;
405
405
static int dmar_forcedac ;
406
406
static int intel_iommu_strict ;
407
407
static int intel_iommu_superpage = 1 ;
408
- static int intel_iommu_ecs = 1 ;
409
- static int intel_iommu_pasid28 ;
408
+ static int intel_iommu_sm = 1 ;
410
409
static int iommu_identity_mapping ;
411
410
412
411
#define IDENTMAP_ALL 1
413
412
#define IDENTMAP_GFX 2
414
413
#define IDENTMAP_AZALIA 4
415
414
416
- /* Broadwell and Skylake have broken ECS support — normal so-called "second
417
- * level" translation of DMA requests-without-PASID doesn't actually happen
418
- * unless you also set the NESTE bit in an extended context-entry. Which of
419
- * course means that SVM doesn't work because it's trying to do nested
420
- * translation of the physical addresses it finds in the process page tables,
421
- * through the IOVA->phys mapping found in the "second level" page tables.
422
- *
423
- * The VT-d specification was retroactively changed to change the definition
424
- * of the capability bits and pretend that Broadwell/Skylake never happened...
425
- * but unfortunately the wrong bit was changed. It's ECS which is broken, but
426
- * for some reason it was the PASID capability bit which was redefined (from
427
- * bit 28 on BDW/SKL to bit 40 in future).
428
- *
429
- * So our test for ECS needs to eschew those implementations which set the old
430
- * PASID capabiity bit 28, since those are the ones on which ECS is broken.
431
- * Unless we are working around the 'pasid28' limitations, that is, by putting
432
- * the device into passthrough mode for normal DMA and thus masking the bug.
433
- */
434
- #define ecs_enabled (iommu ) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
435
- (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
436
- /* PASID support is thus enabled if ECS is enabled and *either* of the old
437
- * or new capability bits are set. */
438
- #define pasid_enabled (iommu ) (ecs_enabled(iommu) && \
439
- (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
415
+ #define sm_supported (iommu ) (intel_iommu_sm && ecap_smts((iommu)->ecap))
416
+ #define pasid_supported (iommu ) (sm_supported(iommu) && \
417
+ ecap_pasid((iommu)->ecap))
440
418
441
419
int intel_iommu_gfx_mapped ;
442
420
EXPORT_SYMBOL_GPL (intel_iommu_gfx_mapped );
@@ -516,15 +494,9 @@ static int __init intel_iommu_setup(char *str)
516
494
} else if (!strncmp (str , "sp_off" , 6 )) {
517
495
pr_info ("Disable supported super page\n" );
518
496
intel_iommu_superpage = 0 ;
519
- } else if (!strncmp (str , "ecs_off" , 7 )) {
520
- printk (KERN_INFO
521
- "Intel-IOMMU: disable extended context table support\n" );
522
- intel_iommu_ecs = 0 ;
523
- } else if (!strncmp (str , "pasid28" , 7 )) {
524
- printk (KERN_INFO
525
- "Intel-IOMMU: enable pre-production PASID support\n" );
526
- intel_iommu_pasid28 = 1 ;
527
- iommu_identity_mapping |= IDENTMAP_GFX ;
497
+ } else if (!strncmp (str , "sm_off" , 6 )) {
498
+ pr_info ("Intel-IOMMU: disable scalable mode support\n" );
499
+ intel_iommu_sm = 0 ;
528
500
} else if (!strncmp (str , "tboot_noforce" , 13 )) {
529
501
printk (KERN_INFO
530
502
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n" );
@@ -771,7 +743,7 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
771
743
u64 * entry ;
772
744
773
745
entry = & root -> lo ;
774
- if (ecs_enabled (iommu )) {
746
+ if (sm_supported (iommu )) {
775
747
if (devfn >= 0x80 ) {
776
748
devfn -= 0x80 ;
777
749
entry = & root -> hi ;
@@ -913,7 +885,7 @@ static void free_context_table(struct intel_iommu *iommu)
913
885
if (context )
914
886
free_pgtable_page (context );
915
887
916
- if (!ecs_enabled (iommu ))
888
+ if (!sm_supported (iommu ))
917
889
continue ;
918
890
919
891
context = iommu_context_addr (iommu , i , 0x80 , 0 );
@@ -1265,8 +1237,6 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
1265
1237
unsigned long flag ;
1266
1238
1267
1239
addr = virt_to_phys (iommu -> root_entry );
1268
- if (ecs_enabled (iommu ))
1269
- addr |= DMA_RTADDR_RTT ;
1270
1240
1271
1241
raw_spin_lock_irqsave (& iommu -> register_lock , flag );
1272
1242
dmar_writeq (iommu -> reg + DMAR_RTADDR_REG , addr );
@@ -1755,7 +1725,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
1755
1725
free_context_table (iommu );
1756
1726
1757
1727
#ifdef CONFIG_INTEL_IOMMU_SVM
1758
- if (pasid_enabled (iommu )) {
1728
+ if (pasid_supported (iommu )) {
1759
1729
if (ecap_prs (iommu -> ecap ))
1760
1730
intel_svm_finish_prq (iommu );
1761
1731
intel_svm_exit (iommu );
@@ -2464,8 +2434,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2464
2434
dmar_find_matched_atsr_unit (pdev ))
2465
2435
info -> ats_supported = 1 ;
2466
2436
2467
- if (ecs_enabled (iommu )) {
2468
- if (pasid_enabled (iommu )) {
2437
+ if (sm_supported (iommu )) {
2438
+ if (pasid_supported (iommu )) {
2469
2439
int features = pci_pasid_features (pdev );
2470
2440
if (features >= 0 )
2471
2441
info -> pasid_supported = features | 1 ;
@@ -3277,7 +3247,7 @@ static int __init init_dmars(void)
3277
3247
* We need to ensure the system pasid table is no bigger
3278
3248
* than the smallest supported.
3279
3249
*/
3280
- if (pasid_enabled (iommu )) {
3250
+ if (pasid_supported (iommu )) {
3281
3251
u32 temp = 2 << ecap_pss (iommu -> ecap );
3282
3252
3283
3253
intel_pasid_max_id = min_t (u32 , temp ,
@@ -3338,7 +3308,7 @@ static int __init init_dmars(void)
3338
3308
if (!ecap_pass_through (iommu -> ecap ))
3339
3309
hw_pass_through = 0 ;
3340
3310
#ifdef CONFIG_INTEL_IOMMU_SVM
3341
- if (pasid_enabled (iommu ))
3311
+ if (pasid_supported (iommu ))
3342
3312
intel_svm_init (iommu );
3343
3313
#endif
3344
3314
}
@@ -3442,7 +3412,7 @@ static int __init init_dmars(void)
3442
3412
iommu_flush_write_buffer (iommu );
3443
3413
3444
3414
#ifdef CONFIG_INTEL_IOMMU_SVM
3445
- if (pasid_enabled (iommu ) && ecap_prs (iommu -> ecap )) {
3415
+ if (pasid_supported (iommu ) && ecap_prs (iommu -> ecap )) {
3446
3416
ret = intel_svm_enable_prq (iommu );
3447
3417
if (ret )
3448
3418
goto free_iommu ;
@@ -4331,7 +4301,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4331
4301
goto out ;
4332
4302
4333
4303
#ifdef CONFIG_INTEL_IOMMU_SVM
4334
- if (pasid_enabled (iommu ))
4304
+ if (pasid_supported (iommu ))
4335
4305
intel_svm_init (iommu );
4336
4306
#endif
4337
4307
@@ -4348,7 +4318,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4348
4318
iommu_flush_write_buffer (iommu );
4349
4319
4350
4320
#ifdef CONFIG_INTEL_IOMMU_SVM
4351
- if (pasid_enabled (iommu ) && ecap_prs (iommu -> ecap )) {
4321
+ if (pasid_supported (iommu ) && ecap_prs (iommu -> ecap )) {
4352
4322
ret = intel_svm_enable_prq (iommu );
4353
4323
if (ret )
4354
4324
goto disable_iommu ;
0 commit comments