@@ -205,10 +205,17 @@ typedef unsigned __bitwise xa_mark_t;
205
205
#define XA_PRESENT ((__force xa_mark_t)8U)
206
206
#define XA_MARK_MAX XA_MARK_2
207
207
208
+ enum xa_lock_type {
209
+ XA_LOCK_IRQ = 1 ,
210
+ XA_LOCK_BH = 2 ,
211
+ };
212
+
208
213
/*
209
214
* Values for xa_flags. The radix tree stores its GFP flags in the xa_flags,
210
215
* and we remain compatible with that.
211
216
*/
217
+ #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
218
+ #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
212
219
#define XA_FLAGS_MARK (mark ) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
213
220
(__force unsigned)(mark)))
214
221
@@ -267,6 +274,7 @@ struct xarray {
267
274
268
275
void xa_init_flags (struct xarray * , gfp_t flags );
269
276
void * xa_load (struct xarray * , unsigned long index );
277
+ void * xa_store (struct xarray * , unsigned long index , void * entry , gfp_t );
270
278
bool xa_get_mark (struct xarray * , unsigned long index , xa_mark_t );
271
279
void xa_set_mark (struct xarray * , unsigned long index , xa_mark_t );
272
280
void xa_clear_mark (struct xarray * , unsigned long index , xa_mark_t );
@@ -309,6 +317,23 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
309
317
return xa -> xa_flags & XA_FLAGS_MARK (mark );
310
318
}
311
319
320
+ /**
321
+ * xa_erase() - Erase this entry from the XArray.
322
+ * @xa: XArray.
323
+ * @index: Index of entry.
324
+ *
325
+ * This function is the equivalent of calling xa_store() with %NULL as
326
+ * the third argument. The XArray does not need to allocate memory, so
327
+ * the user does not need to provide GFP flags.
328
+ *
329
+ * Context: Process context. Takes and releases the xa_lock.
330
+ * Return: The entry which used to be at this index.
331
+ */
332
+ static inline void * xa_erase (struct xarray * xa , unsigned long index )
333
+ {
334
+ return xa_store (xa , index , NULL , 0 );
335
+ }
336
+
312
337
#define xa_trylock (xa ) spin_trylock(&(xa)->xa_lock)
313
338
#define xa_lock (xa ) spin_lock(&(xa)->xa_lock)
314
339
#define xa_unlock (xa ) spin_unlock(&(xa)->xa_lock)
@@ -322,11 +347,65 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
322
347
spin_unlock_irqrestore(&(xa)->xa_lock, flags)
323
348
324
349
/*
325
- * Versions of the normal API which require the caller to hold the xa_lock.
326
- */
350
+ * Versions of the normal API which require the caller to hold the
351
+ * xa_lock. If the GFP flags allow it, they will drop the lock to
352
+ * allocate memory, then reacquire it afterwards. These functions
353
+ * may also re-enable interrupts if the XArray flags indicate the
354
+ * locking should be interrupt safe.
355
+ */
356
+ void * __xa_erase (struct xarray * , unsigned long index );
357
+ void * __xa_store (struct xarray * , unsigned long index , void * entry , gfp_t );
327
358
void __xa_set_mark (struct xarray * , unsigned long index , xa_mark_t );
328
359
void __xa_clear_mark (struct xarray * , unsigned long index , xa_mark_t );
329
360
361
+ /**
362
+ * xa_erase_bh() - Erase this entry from the XArray.
363
+ * @xa: XArray.
364
+ * @index: Index of entry.
365
+ *
366
+ * This function is the equivalent of calling xa_store() with %NULL as
367
+ * the third argument. The XArray does not need to allocate memory, so
368
+ * the user does not need to provide GFP flags.
369
+ *
370
+ * Context: Process context. Takes and releases the xa_lock while
371
+ * disabling softirqs.
372
+ * Return: The entry which used to be at this index.
373
+ */
374
+ static inline void * xa_erase_bh (struct xarray * xa , unsigned long index )
375
+ {
376
+ void * entry ;
377
+
378
+ xa_lock_bh (xa );
379
+ entry = __xa_erase (xa , index );
380
+ xa_unlock_bh (xa );
381
+
382
+ return entry ;
383
+ }
384
+
385
+ /**
386
+ * xa_erase_irq() - Erase this entry from the XArray.
387
+ * @xa: XArray.
388
+ * @index: Index of entry.
389
+ *
390
+ * This function is the equivalent of calling xa_store() with %NULL as
391
+ * the third argument. The XArray does not need to allocate memory, so
392
+ * the user does not need to provide GFP flags.
393
+ *
394
+ * Context: Process context. Takes and releases the xa_lock while
395
+ * disabling interrupts.
396
+ * Return: The entry which used to be at this index.
397
+ */
398
+ static inline void * xa_erase_irq (struct xarray * xa , unsigned long index )
399
+ {
400
+ void * entry ;
401
+
402
+ xa_lock_irq (xa );
403
+ entry = __xa_erase (xa , index );
404
+ xa_unlock_irq (xa );
405
+
406
+ return entry ;
407
+ }
408
+
330
409
/* Everything below here is the Advanced API. Proceed with caution. */
331
410
332
411
/*
@@ -441,6 +520,12 @@ static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
441
520
lockdep_is_held (& xa -> xa_lock ));
442
521
}
443
522
523
+ /* Private */
524
+ static inline void * xa_mk_node (const struct xa_node * node )
525
+ {
526
+ return (void * )((unsigned long )node | 2 );
527
+ }
528
+
444
529
/* Private */
445
530
static inline struct xa_node * xa_to_node (const void * entry )
446
531
{
@@ -647,6 +732,12 @@ static inline bool xas_not_node(struct xa_node *node)
647
732
return ((unsigned long )node & 3 ) || !node ;
648
733
}
649
734
735
+ /* True if the node represents head-of-tree, RESTART or BOUNDS */
736
+ static inline bool xas_top (struct xa_node * node )
737
+ {
738
+ return node <= XAS_RESTART ;
739
+ }
740
+
650
741
/**
651
742
* xas_reset() - Reset an XArray operation state.
652
743
* @xas: XArray operation state.
@@ -683,10 +774,14 @@ static inline bool xas_retry(struct xa_state *xas, const void *entry)
683
774
}
684
775
685
776
void * xas_load (struct xa_state * );
777
+ void * xas_store (struct xa_state * , void * entry );
686
778
687
779
bool xas_get_mark (const struct xa_state * , xa_mark_t );
688
780
void xas_set_mark (const struct xa_state * , xa_mark_t );
689
781
void xas_clear_mark (const struct xa_state * , xa_mark_t );
782
+ void xas_init_marks (const struct xa_state * );
783
+
784
+ bool xas_nomem (struct xa_state * , gfp_t );
690
785
691
786
/**
692
787
* xas_reload() - Refetch an entry from the xarray.
@@ -711,4 +806,52 @@ static inline void *xas_reload(struct xa_state *xas)
711
806
return xa_head (xas -> xa );
712
807
}
713
808
809
+ /**
810
+ * xas_set() - Set up XArray operation state for a different index.
811
+ * @xas: XArray operation state.
812
+ * @index: New index into the XArray.
813
+ *
814
+ * Move the operation state to refer to a different index. This will
815
+ * have the effect of starting a walk from the top; see xas_next()
816
+ * to move to an adjacent index.
817
+ */
818
+ static inline void xas_set (struct xa_state * xas , unsigned long index )
819
+ {
820
+ xas -> xa_index = index ;
821
+ xas -> xa_node = XAS_RESTART ;
822
+ }
823
+
824
+ /**
825
+ * xas_set_order() - Set up XArray operation state for a multislot entry.
826
+ * @xas: XArray operation state.
827
+ * @index: Target of the operation.
828
+ * @order: Entry occupies 2^@order indices.
829
+ */
830
+ static inline void xas_set_order (struct xa_state * xas , unsigned long index ,
831
+ unsigned int order )
832
+ {
833
+ #ifdef CONFIG_XARRAY_MULTI
834
+ xas -> xa_index = order < BITS_PER_LONG ? (index >> order ) << order : 0 ;
835
+ xas -> xa_shift = order - (order % XA_CHUNK_SHIFT );
836
+ xas -> xa_sibs = (1 << (order % XA_CHUNK_SHIFT )) - 1 ;
837
+ xas -> xa_node = XAS_RESTART ;
838
+ #else
839
+ BUG_ON (order > 0 );
840
+ xas_set (xas , index );
841
+ #endif
842
+ }
843
+
844
+ /**
845
+ * xas_set_update() - Set up XArray operation state for a callback.
846
+ * @xas: XArray operation state.
847
+ * @update: Function to call when updating a node.
848
+ *
849
+ * The XArray can notify a caller after it has updated an xa_node.
850
+ * This is advanced functionality and is only needed by the page cache.
851
+ */
852
+ static inline void xas_set_update (struct xa_state * xas , xa_update_node_t update )
853
+ {
854
+ xas -> xa_update = update ;
855
+ }
856
+
714
857
#endif /* _LINUX_XARRAY_H */
0 commit comments