Skip to content

Commit b16d0cb

Browse files
David WoodhouseDavid Woodhouse
authored andcommitted
iommu/vt-d: Always enable PASID/PRI PCI capabilities before ATS
The behaviour if you enable PASID support after ATS is undefined. So we have to enable it first, even if we don't know whether we'll need it. This is safe enough; unless we set up a context that permits it, the device can't actually *do* anything with it. Also shift the feature detction to dmar_insert_one_dev_info() as it only needs to happen once. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
1 parent 8a94ade commit b16d0cb

File tree

2 files changed

+76
-35
lines changed

2 files changed

+76
-35
lines changed

drivers/iommu/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ config INTEL_IOMMU
138138
config INTEL_IOMMU_SVM
139139
bool "Support for Shared Virtual Memory with Intel IOMMU"
140140
depends on INTEL_IOMMU && X86
141+
select PCI_PASID
141142
help
142143
Shared Virtual Memory (SVM) provides a facility for devices
143144
to access DMA resources through process address space by

drivers/iommu/intel-iommu.c

Lines changed: 75 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -418,10 +418,13 @@ struct device_domain_info {
418418
struct list_head global; /* link to global list */
419419
u8 bus; /* PCI bus number */
420420
u8 devfn; /* PCI devfn number */
421-
struct {
422-
u8 enabled:1;
423-
u8 qdep;
424-
} ats; /* ATS state */
421+
u8 pasid_supported:3;
422+
u8 pasid_enabled:1;
423+
u8 pri_supported:1;
424+
u8 pri_enabled:1;
425+
u8 ats_supported:1;
426+
u8 ats_enabled:1;
427+
u8 ats_qdep;
425428
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
426429
struct intel_iommu *iommu; /* IOMMU used by this device */
427430
struct dmar_domain *domain; /* pointer to domain */
@@ -1420,37 +1423,22 @@ static struct device_domain_info *
14201423
iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
14211424
u8 bus, u8 devfn)
14221425
{
1423-
bool found = false;
14241426
struct device_domain_info *info;
1425-
struct pci_dev *pdev;
14261427

14271428
assert_spin_locked(&device_domain_lock);
14281429

1429-
if (!ecap_dev_iotlb_support(iommu->ecap))
1430-
return NULL;
1431-
14321430
if (!iommu->qi)
14331431
return NULL;
14341432

14351433
list_for_each_entry(info, &domain->devices, link)
14361434
if (info->iommu == iommu && info->bus == bus &&
14371435
info->devfn == devfn) {
1438-
found = true;
1436+
if (info->ats_supported && info->dev)
1437+
return info;
14391438
break;
14401439
}
14411440

1442-
if (!found || !info->dev || !dev_is_pci(info->dev))
1443-
return NULL;
1444-
1445-
pdev = to_pci_dev(info->dev);
1446-
1447-
if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1448-
return NULL;
1449-
1450-
if (!dmar_find_matched_atsr_unit(pdev))
1451-
return NULL;
1452-
1453-
return info;
1441+
return NULL;
14541442
}
14551443

14561444
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
@@ -1461,20 +1449,48 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
14611449
return;
14621450

14631451
pdev = to_pci_dev(info->dev);
1464-
if (pci_enable_ats(pdev, VTD_PAGE_SHIFT))
1465-
return;
14661452

1467-
info->ats.enabled = 1;
1468-
info->ats.qdep = pci_ats_queue_depth(pdev);
1453+
#ifdef CONFIG_INTEL_IOMMU_SVM
1454+
/* The PCIe spec, in its wisdom, declares that the behaviour of
1455+
the device if you enable PASID support after ATS support is
1456+
undefined. So always enable PASID support on devices which
1457+
have it, even if we can't yet know if we're ever going to
1458+
use it. */
1459+
if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1460+
info->pasid_enabled = 1;
1461+
1462+
if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1463+
info->pri_enabled = 1;
1464+
#endif
1465+
if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1466+
info->ats_enabled = 1;
1467+
info->ats_qdep = pci_ats_queue_depth(pdev);
1468+
}
14691469
}
14701470

14711471
static void iommu_disable_dev_iotlb(struct device_domain_info *info)
14721472
{
1473-
if (!info->ats.enabled)
1473+
struct pci_dev *pdev;
1474+
1475+
if (dev_is_pci(info->dev))
14741476
return;
14751477

1476-
pci_disable_ats(to_pci_dev(info->dev));
1477-
info->ats.enabled = 0;
1478+
pdev = to_pci_dev(info->dev);
1479+
1480+
if (info->ats_enabled) {
1481+
pci_disable_ats(pdev);
1482+
info->ats_enabled = 0;
1483+
}
1484+
#ifdef CONFIG_INTEL_IOMMU_SVM
1485+
if (info->pri_enabled) {
1486+
pci_disable_pri(pdev);
1487+
info->pri_enabled = 0;
1488+
}
1489+
if (info->pasid_enabled) {
1490+
pci_disable_pasid(pdev);
1491+
info->pasid_enabled = 0;
1492+
}
1493+
#endif
14781494
}
14791495

14801496
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
@@ -1486,11 +1502,11 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
14861502

14871503
spin_lock_irqsave(&device_domain_lock, flags);
14881504
list_for_each_entry(info, &domain->devices, link) {
1489-
if (!info->ats.enabled)
1505+
if (!info->ats_enabled)
14901506
continue;
14911507

14921508
sid = info->bus << 8 | info->devfn;
1493-
qdep = info->ats.qdep;
1509+
qdep = info->ats_qdep;
14941510
qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
14951511
}
14961512
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -1952,8 +1968,10 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
19521968
}
19531969

19541970
info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1955-
translation = info ? CONTEXT_TT_DEV_IOTLB :
1956-
CONTEXT_TT_MULTI_LEVEL;
1971+
if (info && info->ats_supported)
1972+
translation = CONTEXT_TT_DEV_IOTLB;
1973+
else
1974+
translation = CONTEXT_TT_MULTI_LEVEL;
19571975

19581976
context_set_address_root(context, virt_to_phys(pgd));
19591977
context_set_address_width(context, iommu->agaw);
@@ -2291,12 +2309,34 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
22912309

22922310
info->bus = bus;
22932311
info->devfn = devfn;
2294-
info->ats.enabled = 0;
2295-
info->ats.qdep = 0;
2312+
info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2313+
info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2314+
info->ats_qdep = 0;
22962315
info->dev = dev;
22972316
info->domain = domain;
22982317
info->iommu = iommu;
22992318

2319+
if (dev && dev_is_pci(dev)) {
2320+
struct pci_dev *pdev = to_pci_dev(info->dev);
2321+
2322+
if (ecap_dev_iotlb_support(iommu->ecap) &&
2323+
pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2324+
dmar_find_matched_atsr_unit(pdev))
2325+
info->ats_supported = 1;
2326+
2327+
if (ecs_enabled(iommu)) {
2328+
if (pasid_enabled(iommu)) {
2329+
int features = pci_pasid_features(pdev);
2330+
if (features >= 0)
2331+
info->pasid_supported = features | 1;
2332+
}
2333+
2334+
if (info->ats_supported && ecap_prs(iommu->ecap) &&
2335+
pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2336+
info->pri_supported = 1;
2337+
}
2338+
}
2339+
23002340
spin_lock_irqsave(&device_domain_lock, flags);
23012341
if (dev)
23022342
found = find_domain(dev);

0 commit comments

Comments
 (0)