Skip to content

Commit 58d6ea3

Browse files
author
Matthew Wilcox
committed
xarray: Add XArray unconditional store operations
xa_store() differs from radix_tree_insert() in that it will overwrite an existing element in the array rather than returning an error. This is the behaviour which most users want, and those that want more complex behaviour generally want to use the xas family of routines anyway. For memory allocation, xa_store() will first attempt to request memory from the slab allocator; if memory is not immediately available, it will drop the xa_lock and allocate memory, keeping a pointer in the xa_state. It does not use the per-CPU cache, although those will continue to exist until all radix tree users are converted to the xarray. This patch also includes xa_erase() and __xa_erase() for a streamlined way to store NULL. Since there is no need to allocate memory in order to store a NULL in the XArray, we do not need to trouble the user with deciding what memory allocation flags to use. Signed-off-by: Matthew Wilcox <willy@infradead.org>
1 parent 9b89a03 commit 58d6ea3

File tree

10 files changed

+1055
-9
lines changed

10 files changed

+1055
-9
lines changed

include/linux/xarray.h

Lines changed: 145 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -205,10 +205,17 @@ typedef unsigned __bitwise xa_mark_t;
205205
#define XA_PRESENT ((__force xa_mark_t)8U)
206206
#define XA_MARK_MAX XA_MARK_2
207207

208+
enum xa_lock_type {
209+
XA_LOCK_IRQ = 1,
210+
XA_LOCK_BH = 2,
211+
};
212+
208213
/*
209214
* Values for xa_flags. The radix tree stores its GFP flags in the xa_flags,
210215
* and we remain compatible with that.
211216
*/
217+
#define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
218+
#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
212219
#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
213220
(__force unsigned)(mark)))
214221

@@ -267,6 +274,7 @@ struct xarray {
267274

268275
void xa_init_flags(struct xarray *, gfp_t flags);
269276
void *xa_load(struct xarray *, unsigned long index);
277+
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
270278
bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
271279
void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
272280
void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -309,6 +317,23 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
309317
return xa->xa_flags & XA_FLAGS_MARK(mark);
310318
}
311319

320+
/**
321+
* xa_erase() - Erase this entry from the XArray.
322+
* @xa: XArray.
323+
* @index: Index of entry.
324+
*
325+
* This function is the equivalent of calling xa_store() with %NULL as
326+
* the third argument. The XArray does not need to allocate memory, so
327+
* the user does not need to provide GFP flags.
328+
*
329+
* Context: Process context. Takes and releases the xa_lock.
330+
* Return: The entry which used to be at this index.
331+
*/
332+
static inline void *xa_erase(struct xarray *xa, unsigned long index)
333+
{
334+
return xa_store(xa, index, NULL, 0);
335+
}
336+
312337
#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
313338
#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
314339
#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
@@ -322,11 +347,65 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
322347
spin_unlock_irqrestore(&(xa)->xa_lock, flags)
323348

324349
/*
325-
* Versions of the normal API which require the caller to hold the xa_lock.
326-
*/
350+
* Versions of the normal API which require the caller to hold the
351+
* xa_lock. If the GFP flags allow it, they will drop the lock to
352+
* allocate memory, then reacquire it afterwards. These functions
353+
* may also re-enable interrupts if the XArray flags indicate the
354+
* locking should be interrupt safe.
355+
*/
356+
void *__xa_erase(struct xarray *, unsigned long index);
357+
void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
327358
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
328359
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
329360

361+
/**
362+
* xa_erase_bh() - Erase this entry from the XArray.
363+
* @xa: XArray.
364+
* @index: Index of entry.
365+
*
366+
* This function is the equivalent of calling xa_store() with %NULL as
367+
* the third argument. The XArray does not need to allocate memory, so
368+
* the user does not need to provide GFP flags.
369+
*
370+
* Context: Process context. Takes and releases the xa_lock while
371+
* disabling softirqs.
372+
* Return: The entry which used to be at this index.
373+
*/
374+
static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
375+
{
376+
void *entry;
377+
378+
xa_lock_bh(xa);
379+
entry = __xa_erase(xa, index);
380+
xa_unlock_bh(xa);
381+
382+
return entry;
383+
}
384+
385+
/**
386+
* xa_erase_irq() - Erase this entry from the XArray.
387+
* @xa: XArray.
388+
* @index: Index of entry.
389+
*
390+
* This function is the equivalent of calling xa_store() with %NULL as
391+
* the third argument. The XArray does not need to allocate memory, so
392+
* the user does not need to provide GFP flags.
393+
*
394+
* Context: Process context. Takes and releases the xa_lock while
395+
* disabling interrupts.
396+
* Return: The entry which used to be at this index.
397+
*/
398+
static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
399+
{
400+
void *entry;
401+
402+
xa_lock_irq(xa);
403+
entry = __xa_erase(xa, index);
404+
xa_unlock_irq(xa);
405+
406+
return entry;
407+
}
408+
330409
/* Everything below here is the Advanced API. Proceed with caution. */
331410

332411
/*
@@ -441,6 +520,12 @@ static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
441520
lockdep_is_held(&xa->xa_lock));
442521
}
443522

523+
/* Private */
524+
static inline void *xa_mk_node(const struct xa_node *node)
525+
{
526+
return (void *)((unsigned long)node | 2);
527+
}
528+
444529
/* Private */
445530
static inline struct xa_node *xa_to_node(const void *entry)
446531
{
@@ -647,6 +732,12 @@ static inline bool xas_not_node(struct xa_node *node)
647732
return ((unsigned long)node & 3) || !node;
648733
}
649734

735+
/* True if the node represents head-of-tree, RESTART or BOUNDS */
736+
static inline bool xas_top(struct xa_node *node)
737+
{
738+
return node <= XAS_RESTART;
739+
}
740+
650741
/**
651742
* xas_reset() - Reset an XArray operation state.
652743
* @xas: XArray operation state.
@@ -683,10 +774,14 @@ static inline bool xas_retry(struct xa_state *xas, const void *entry)
683774
}
684775

685776
void *xas_load(struct xa_state *);
777+
void *xas_store(struct xa_state *, void *entry);
686778

687779
bool xas_get_mark(const struct xa_state *, xa_mark_t);
688780
void xas_set_mark(const struct xa_state *, xa_mark_t);
689781
void xas_clear_mark(const struct xa_state *, xa_mark_t);
782+
void xas_init_marks(const struct xa_state *);
783+
784+
bool xas_nomem(struct xa_state *, gfp_t);
690785

691786
/**
692787
* xas_reload() - Refetch an entry from the xarray.
@@ -711,4 +806,52 @@ static inline void *xas_reload(struct xa_state *xas)
711806
return xa_head(xas->xa);
712807
}
713808

809+
/**
810+
* xas_set() - Set up XArray operation state for a different index.
811+
* @xas: XArray operation state.
812+
* @index: New index into the XArray.
813+
*
814+
* Move the operation state to refer to a different index. This will
815+
* have the effect of starting a walk from the top; see xas_next()
816+
* to move to an adjacent index.
817+
*/
818+
static inline void xas_set(struct xa_state *xas, unsigned long index)
819+
{
820+
xas->xa_index = index;
821+
xas->xa_node = XAS_RESTART;
822+
}
823+
824+
/**
825+
* xas_set_order() - Set up XArray operation state for a multislot entry.
826+
* @xas: XArray operation state.
827+
* @index: Target of the operation.
828+
* @order: Entry occupies 2^@order indices.
829+
*/
830+
static inline void xas_set_order(struct xa_state *xas, unsigned long index,
831+
unsigned int order)
832+
{
833+
#ifdef CONFIG_XARRAY_MULTI
834+
xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
835+
xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
836+
xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
837+
xas->xa_node = XAS_RESTART;
838+
#else
839+
BUG_ON(order > 0);
840+
xas_set(xas, index);
841+
#endif
842+
}
843+
844+
/**
845+
* xas_set_update() - Set up XArray operation state for a callback.
846+
* @xas: XArray operation state.
847+
* @update: Function to call when updating a node.
848+
*
849+
* The XArray can notify a caller after it has updated an xa_node.
850+
* This is advanced functionality and is only needed by the page cache.
851+
*/
852+
static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
853+
{
854+
xas->xa_update = update;
855+
}
856+
714857
#endif /* _LINUX_XARRAY_H */

lib/radix-tree.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
4747
/*
4848
* Radix tree node cache.
4949
*/
50-
static struct kmem_cache *radix_tree_node_cachep;
50+
struct kmem_cache *radix_tree_node_cachep;
5151

5252
/*
5353
* The radix tree is variable-height, so an insert operation not only has
@@ -365,7 +365,7 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
365365
return ret;
366366
}
367367

368-
static void radix_tree_node_rcu_free(struct rcu_head *head)
368+
void radix_tree_node_rcu_free(struct rcu_head *head)
369369
{
370370
struct radix_tree_node *node =
371371
container_of(head, struct radix_tree_node, rcu_head);

0 commit comments

Comments
 (0)