Skip to content

Commit 256e462

Browse files
committed
iommu/amd: Make use of the generic IOVA allocator
Remove the old address allocation code and make use of the generic IOVA allocator that is also used by other dma-ops implementations. Signed-off-by: Joerg Roedel <jroedel@suse.de>
1 parent 518d9b4 commit 256e462

File tree

1 file changed

+26
-157
lines changed

1 file changed

+26
-157
lines changed

drivers/iommu/amd_iommu.c

Lines changed: 26 additions & 157 deletions
Original file line numberDiff line numberDiff line change
@@ -1649,167 +1649,32 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
16491649
return -ENOMEM;
16501650
}
16511651

1652-
static dma_addr_t dma_ops_aperture_alloc(struct dma_ops_domain *dom,
1653-
struct aperture_range *range,
1654-
unsigned long pages,
1655-
unsigned long dma_mask,
1656-
unsigned long boundary_size,
1657-
unsigned long align_mask,
1658-
bool trylock)
1659-
{
1660-
unsigned long offset, limit, flags;
1661-
dma_addr_t address;
1662-
bool flush = false;
1663-
1664-
offset = range->offset >> PAGE_SHIFT;
1665-
limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
1666-
dma_mask >> PAGE_SHIFT);
1667-
1668-
if (trylock) {
1669-
if (!spin_trylock_irqsave(&range->bitmap_lock, flags))
1670-
return -1;
1671-
} else {
1672-
spin_lock_irqsave(&range->bitmap_lock, flags);
1673-
}
1674-
1675-
address = iommu_area_alloc(range->bitmap, limit, range->next_bit,
1676-
pages, offset, boundary_size, align_mask);
1677-
if (address == -1) {
1678-
/* Nothing found, retry one time */
1679-
address = iommu_area_alloc(range->bitmap, limit,
1680-
0, pages, offset, boundary_size,
1681-
align_mask);
1682-
flush = true;
1683-
}
1684-
1685-
if (address != -1)
1686-
range->next_bit = address + pages;
1687-
1688-
spin_unlock_irqrestore(&range->bitmap_lock, flags);
1689-
1690-
if (flush) {
1691-
domain_flush_tlb(&dom->domain);
1692-
domain_flush_complete(&dom->domain);
1693-
}
1694-
1695-
return address;
1696-
}
1697-
1698-
static unsigned long dma_ops_area_alloc(struct device *dev,
1699-
struct dma_ops_domain *dom,
1700-
unsigned int pages,
1701-
unsigned long align_mask,
1702-
u64 dma_mask)
1652+
static unsigned long dma_ops_alloc_iova(struct device *dev,
1653+
struct dma_ops_domain *dma_dom,
1654+
unsigned int pages, u64 dma_mask)
17031655
{
1704-
unsigned long boundary_size, mask;
1705-
unsigned long address = -1;
1706-
bool first = true;
1707-
u32 start, i;
1708-
1709-
preempt_disable();
1710-
1711-
mask = dma_get_seg_boundary(dev);
1712-
1713-
again:
1714-
start = this_cpu_read(*dom->next_index);
1715-
1716-
/* Sanity check - is it really necessary? */
1717-
if (unlikely(start > APERTURE_MAX_RANGES)) {
1718-
start = 0;
1719-
this_cpu_write(*dom->next_index, 0);
1720-
}
1721-
1722-
boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
1723-
1UL << (BITS_PER_LONG - PAGE_SHIFT);
1656+
unsigned long pfn = 0;
17241657

1725-
for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1726-
struct aperture_range *range;
1727-
int index;
1728-
1729-
index = (start + i) % APERTURE_MAX_RANGES;
1658+
pages = __roundup_pow_of_two(pages);
17301659

1731-
range = dom->aperture[index];
1732-
1733-
if (!range || range->offset >= dma_mask)
1734-
continue;
1735-
1736-
address = dma_ops_aperture_alloc(dom, range, pages,
1737-
dma_mask, boundary_size,
1738-
align_mask, first);
1739-
if (address != -1) {
1740-
address = range->offset + (address << PAGE_SHIFT);
1741-
this_cpu_write(*dom->next_index, index);
1742-
break;
1743-
}
1744-
}
1745-
1746-
if (address == -1 && first) {
1747-
first = false;
1748-
goto again;
1749-
}
1660+
if (dma_mask > DMA_BIT_MASK(32))
1661+
pfn = alloc_iova_fast(&dma_dom->iovad, pages,
1662+
IOVA_PFN(DMA_BIT_MASK(32)));
17501663

1751-
preempt_enable();
1664+
if (!pfn)
1665+
pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask));
17521666

1753-
return address;
1667+
return (pfn << PAGE_SHIFT);
17541668
}
17551669

1756-
static unsigned long dma_ops_alloc_addresses(struct device *dev,
1757-
struct dma_ops_domain *dom,
1758-
unsigned int pages,
1759-
unsigned long align_mask,
1760-
u64 dma_mask)
1670+
static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
1671+
unsigned long address,
1672+
unsigned int pages)
17611673
{
1762-
unsigned long address = -1;
1763-
1764-
while (address == -1) {
1765-
address = dma_ops_area_alloc(dev, dom, pages,
1766-
align_mask, dma_mask);
1767-
1768-
if (address == -1 && alloc_new_range(dom, false, GFP_ATOMIC))
1769-
break;
1770-
}
1771-
1772-
if (unlikely(address == -1))
1773-
address = DMA_ERROR_CODE;
1774-
1775-
WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
1776-
1777-
return address;
1778-
}
1779-
1780-
/*
1781-
* The address free function.
1782-
*
1783-
* called with domain->lock held
1784-
*/
1785-
static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1786-
unsigned long address,
1787-
unsigned int pages)
1788-
{
1789-
unsigned i = address >> APERTURE_RANGE_SHIFT;
1790-
struct aperture_range *range = dom->aperture[i];
1791-
unsigned long flags;
1792-
1793-
BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
1794-
1795-
#ifdef CONFIG_IOMMU_STRESS
1796-
if (i < 4)
1797-
return;
1798-
#endif
1799-
1800-
if (amd_iommu_unmap_flush) {
1801-
domain_flush_tlb(&dom->domain);
1802-
domain_flush_complete(&dom->domain);
1803-
}
1804-
1805-
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
1806-
1807-
spin_lock_irqsave(&range->bitmap_lock, flags);
1808-
if (address + pages > range->next_bit)
1809-
range->next_bit = address + pages;
1810-
bitmap_clear(range->bitmap, address, pages);
1811-
spin_unlock_irqrestore(&range->bitmap_lock, flags);
1674+
pages = __roundup_pow_of_two(pages);
1675+
address >>= PAGE_SHIFT;
18121676

1677+
free_iova_fast(&dma_dom->iovad, address, pages);
18131678
}
18141679

18151680
/****************************************************************************
@@ -2586,9 +2451,7 @@ static dma_addr_t __map_single(struct device *dev,
25862451
if (align)
25872452
align_mask = (1UL << get_order(size)) - 1;
25882453

2589-
address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
2590-
dma_mask);
2591-
2454+
address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
25922455
if (address == DMA_ERROR_CODE)
25932456
goto out;
25942457

@@ -2626,7 +2489,10 @@ static dma_addr_t __map_single(struct device *dev,
26262489
iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
26272490
}
26282491

2629-
dma_ops_free_addresses(dma_dom, address, pages);
2492+
domain_flush_tlb(&dma_dom->domain);
2493+
domain_flush_complete(&dma_dom->domain);
2494+
2495+
dma_ops_free_iova(dma_dom, address, pages);
26302496

26312497
return DMA_ERROR_CODE;
26322498
}
@@ -2658,7 +2524,10 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
26582524
start += PAGE_SIZE;
26592525
}
26602526

2661-
dma_ops_free_addresses(dma_dom, dma_addr, pages);
2527+
domain_flush_tlb(&dma_dom->domain);
2528+
domain_flush_complete(&dma_dom->domain);
2529+
2530+
dma_ops_free_iova(dma_dom, dma_addr, pages);
26622531
}
26632532

26642533
/*

0 commit comments

Comments
 (0)