Skip to content

Commit bee60e9

Browse files
Ganapatrao Kulkarnijoergroedel
authored andcommitted
iommu/iova: Optimise attempts to allocate iova from 32bit address range
As an optimisation for PCI devices, there is always first attempt been made to allocate iova from SAC address range. This will lead to unnecessary attempts, when there are no free ranges available. Adding fix to track recently failed iova address size and allow further attempts, only if requested size is lesser than a failed size. The size is updated when any replenish happens. Reviewed-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Ganapatrao Kulkarni <ganapatrao.kulkarni@cavium.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
1 parent 6bf4ca7 commit bee60e9

File tree

2 files changed

+16
-7
lines changed

2 files changed

+16
-7
lines changed

drivers/iommu/iova.c

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
5656
iovad->granule = granule;
5757
iovad->start_pfn = start_pfn;
5858
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
59+
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
5960
iovad->flush_cb = NULL;
6061
iovad->fq = NULL;
6162
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
@@ -139,8 +140,10 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
139140

140141
cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
141142
if (free->pfn_hi < iovad->dma_32bit_pfn &&
142-
free->pfn_lo >= cached_iova->pfn_lo)
143+
free->pfn_lo >= cached_iova->pfn_lo) {
143144
iovad->cached32_node = rb_next(&free->node);
145+
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
146+
}
144147

145148
cached_iova = rb_entry(iovad->cached_node, struct iova, node);
146149
if (free->pfn_lo >= cached_iova->pfn_lo)
@@ -190,6 +193,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
190193

191194
/* Walk the tree backwards */
192195
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
196+
if (limit_pfn <= iovad->dma_32bit_pfn &&
197+
size >= iovad->max32_alloc_size)
198+
goto iova32_full;
199+
193200
curr = __get_cached_rbnode(iovad, limit_pfn);
194201
curr_iova = rb_entry(curr, struct iova, node);
195202
do {
@@ -200,10 +207,8 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
200207
curr_iova = rb_entry(curr, struct iova, node);
201208
} while (curr && new_pfn <= curr_iova->pfn_hi);
202209

203-
if (limit_pfn < size || new_pfn < iovad->start_pfn) {
204-
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
205-
return -ENOMEM;
206-
}
210+
if (limit_pfn < size || new_pfn < iovad->start_pfn)
211+
goto iova32_full;
207212

208213
/* pfn_lo will point to size aligned address if size_aligned is set */
209214
new->pfn_lo = new_pfn;
@@ -214,9 +219,12 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
214219
__cached_rbnode_insert_update(iovad, new);
215220

216221
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
217-
218-
219222
return 0;
223+
224+
iova32_full:
225+
iovad->max32_alloc_size = size;
226+
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
227+
return -ENOMEM;
220228
}
221229

222230
static struct kmem_cache *iova_cache;

include/linux/iova.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ struct iova_domain {
7575
unsigned long granule; /* pfn granularity for this domain */
7676
unsigned long start_pfn; /* Lower limit for this domain */
7777
unsigned long dma_32bit_pfn;
78+
unsigned long max32_alloc_size; /* Size of last failed allocation */
7879
struct iova anchor; /* rbtree lookup anchor */
7980
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
8081

0 commit comments

Comments
 (0)