Skip to content

Commit 4e35e60

Browse files
author
Nick Piggin
committed
kernel: add bl_list
Introduce a type of hlist that can support the use of the lowest bit in the hlist_head. This will be subsequently used to implement per-bucket bit spinlock for inode and dentry hashes, and may be useful in other cases such as network hashes. Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nick Piggin <npiggin@kernel.dk>
1 parent 880566e commit 4e35e60

File tree

2 files changed

+271
-0
lines changed

2 files changed

+271
-0
lines changed

include/linux/list_bl.h

Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
#ifndef _LINUX_LIST_BL_H
2+
#define _LINUX_LIST_BL_H
3+
4+
#include <linux/list.h>
5+
6+
/*
7+
* Special version of lists, where head of the list has a lock in the lowest
8+
* bit. This is useful for scalable hash tables without increasing memory
9+
* footprint overhead.
10+
*
11+
* For modification operations, the 0 bit of hlist_bl_head->first
12+
* pointer must be set.
13+
*
14+
* With some small modifications, this can easily be adapted to store several
15+
* arbitrary bits (not just a single lock bit), if the need arises to store
16+
* some fast and compact auxiliary data.
17+
*/
18+
19+
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
20+
#define LIST_BL_LOCKMASK 1UL
21+
#else
22+
#define LIST_BL_LOCKMASK 0UL
23+
#endif
24+
25+
#ifdef CONFIG_DEBUG_LIST
26+
#define LIST_BL_BUG_ON(x) BUG_ON(x)
27+
#else
28+
#define LIST_BL_BUG_ON(x)
29+
#endif
30+
31+
32+
struct hlist_bl_head {
33+
struct hlist_bl_node *first;
34+
};
35+
36+
struct hlist_bl_node {
37+
struct hlist_bl_node *next, **pprev;
38+
};
39+
#define INIT_HLIST_BL_HEAD(ptr) \
40+
((ptr)->first = NULL)
41+
42+
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
43+
{
44+
h->next = NULL;
45+
h->pprev = NULL;
46+
}
47+
48+
#define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member)
49+
50+
static inline int hlist_bl_unhashed(const struct hlist_bl_node *h)
51+
{
52+
return !h->pprev;
53+
}
54+
55+
static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
56+
{
57+
return (struct hlist_bl_node *)
58+
((unsigned long)h->first & ~LIST_BL_LOCKMASK);
59+
}
60+
61+
static inline void hlist_bl_set_first(struct hlist_bl_head *h,
62+
struct hlist_bl_node *n)
63+
{
64+
LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
65+
LIST_BL_BUG_ON(!((unsigned long)h->first & LIST_BL_LOCKMASK));
66+
h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
67+
}
68+
69+
static inline int hlist_bl_empty(const struct hlist_bl_head *h)
70+
{
71+
return !((unsigned long)h->first & ~LIST_BL_LOCKMASK);
72+
}
73+
74+
static inline void hlist_bl_add_head(struct hlist_bl_node *n,
75+
struct hlist_bl_head *h)
76+
{
77+
struct hlist_bl_node *first = hlist_bl_first(h);
78+
79+
n->next = first;
80+
if (first)
81+
first->pprev = &n->next;
82+
n->pprev = &h->first;
83+
hlist_bl_set_first(h, n);
84+
}
85+
86+
static inline void __hlist_bl_del(struct hlist_bl_node *n)
87+
{
88+
struct hlist_bl_node *next = n->next;
89+
struct hlist_bl_node **pprev = n->pprev;
90+
91+
LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
92+
93+
/* pprev may be `first`, so be careful not to lose the lock bit */
94+
*pprev = (struct hlist_bl_node *)
95+
((unsigned long)next |
96+
((unsigned long)*pprev & LIST_BL_LOCKMASK));
97+
if (next)
98+
next->pprev = pprev;
99+
}
100+
101+
static inline void hlist_bl_del(struct hlist_bl_node *n)
102+
{
103+
__hlist_bl_del(n);
104+
n->next = LIST_POISON1;
105+
n->pprev = LIST_POISON2;
106+
}
107+
108+
static inline void hlist_bl_del_init(struct hlist_bl_node *n)
109+
{
110+
if (!hlist_bl_unhashed(n)) {
111+
__hlist_bl_del(n);
112+
INIT_HLIST_BL_NODE(n);
113+
}
114+
}
115+
116+
/**
117+
* hlist_bl_for_each_entry - iterate over list of given type
118+
* @tpos: the type * to use as a loop cursor.
119+
* @pos: the &struct hlist_node to use as a loop cursor.
120+
* @head: the head for your list.
121+
* @member: the name of the hlist_node within the struct.
122+
*
123+
*/
124+
#define hlist_bl_for_each_entry(tpos, pos, head, member) \
125+
for (pos = hlist_bl_first(head); \
126+
pos && \
127+
({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
128+
pos = pos->next)
129+
130+
/**
131+
* hlist_bl_for_each_entry_safe - iterate over list of given type safe against removal of list entry
132+
* @tpos: the type * to use as a loop cursor.
133+
* @pos: the &struct hlist_node to use as a loop cursor.
134+
* @n: another &struct hlist_node to use as temporary storage
135+
* @head: the head for your list.
136+
* @member: the name of the hlist_node within the struct.
137+
*/
138+
#define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \
139+
for (pos = hlist_bl_first(head); \
140+
pos && ({ n = pos->next; 1; }) && \
141+
({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
142+
pos = n)
143+
144+
#endif

include/linux/rculist_bl.h

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
#ifndef _LINUX_RCULIST_BL_H
2+
#define _LINUX_RCULIST_BL_H
3+
4+
/*
5+
* RCU-protected bl list version. See include/linux/list_bl.h.
6+
*/
7+
#include <linux/list_bl.h>
8+
#include <linux/rcupdate.h>
9+
10+
static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
11+
struct hlist_bl_node *n)
12+
{
13+
LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
14+
LIST_BL_BUG_ON(!((unsigned long)h->first & LIST_BL_LOCKMASK));
15+
rcu_assign_pointer(h->first,
16+
(struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK));
17+
}
18+
19+
static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
20+
{
21+
return (struct hlist_bl_node *)
22+
((unsigned long)rcu_dereference(h->first) & ~LIST_BL_LOCKMASK);
23+
}
24+
25+
/**
26+
* hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization
27+
* @n: the element to delete from the hash list.
28+
*
29+
* Note: hlist_bl_unhashed() on the node returns true after this. It is
30+
* useful for RCU based read lockfree traversal if the writer side
31+
* must know if the list entry is still hashed or already unhashed.
32+
*
33+
* In particular, it means that we can not poison the forward pointers
34+
* that may still be used for walking the hash list and we can only
35+
* zero the pprev pointer so list_unhashed() will return true after
36+
* this.
37+
*
38+
* The caller must take whatever precautions are necessary (such as
39+
* holding appropriate locks) to avoid racing with another
40+
* list-mutation primitive, such as hlist_bl_add_head_rcu() or
41+
* hlist_bl_del_rcu(), running on this same list. However, it is
42+
* perfectly legal to run concurrently with the _rcu list-traversal
43+
* primitives, such as hlist_bl_for_each_entry_rcu().
44+
*/
45+
static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
46+
{
47+
if (!hlist_bl_unhashed(n)) {
48+
__hlist_bl_del(n);
49+
n->pprev = NULL;
50+
}
51+
}
52+
53+
/**
54+
* hlist_bl_del_rcu - deletes entry from hash list without re-initialization
55+
* @n: the element to delete from the hash list.
56+
*
57+
* Note: hlist_bl_unhashed() on entry does not return true after this,
58+
* the entry is in an undefined state. It is useful for RCU based
59+
* lockfree traversal.
60+
*
61+
* In particular, it means that we can not poison the forward
62+
* pointers that may still be used for walking the hash list.
63+
*
64+
* The caller must take whatever precautions are necessary
65+
* (such as holding appropriate locks) to avoid racing
66+
* with another list-mutation primitive, such as hlist_bl_add_head_rcu()
67+
* or hlist_bl_del_rcu(), running on this same list.
68+
* However, it is perfectly legal to run concurrently with
69+
* the _rcu list-traversal primitives, such as
70+
* hlist_bl_for_each_entry().
71+
*/
72+
static inline void hlist_bl_del_rcu(struct hlist_bl_node *n)
73+
{
74+
__hlist_bl_del(n);
75+
n->pprev = LIST_POISON2;
76+
}
77+
78+
/**
79+
* hlist_bl_add_head_rcu
80+
* @n: the element to add to the hash list.
81+
* @h: the list to add to.
82+
*
83+
* Description:
84+
* Adds the specified element to the specified hlist_bl,
85+
* while permitting racing traversals.
86+
*
87+
* The caller must take whatever precautions are necessary
88+
* (such as holding appropriate locks) to avoid racing
89+
* with another list-mutation primitive, such as hlist_bl_add_head_rcu()
90+
* or hlist_bl_del_rcu(), running on this same list.
91+
* However, it is perfectly legal to run concurrently with
92+
* the _rcu list-traversal primitives, such as
93+
* hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency
94+
* problems on Alpha CPUs. Regardless of the type of CPU, the
95+
* list-traversal primitive must be guarded by rcu_read_lock().
96+
*/
97+
static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
98+
struct hlist_bl_head *h)
99+
{
100+
struct hlist_bl_node *first;
101+
102+
/* don't need hlist_bl_first_rcu because we're under lock */
103+
first = hlist_bl_first(h);
104+
105+
n->next = first;
106+
if (first)
107+
first->pprev = &n->next;
108+
n->pprev = &h->first;
109+
110+
/* need _rcu because we can have concurrent lock free readers */
111+
hlist_bl_set_first_rcu(h, n);
112+
}
113+
/**
114+
* hlist_bl_for_each_entry_rcu - iterate over rcu list of given type
115+
* @tpos: the type * to use as a loop cursor.
116+
* @pos: the &struct hlist_bl_node to use as a loop cursor.
117+
* @head: the head for your list.
118+
* @member: the name of the hlist_bl_node within the struct.
119+
*
120+
*/
121+
#define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \
122+
for (pos = hlist_bl_first_rcu(head); \
123+
pos && \
124+
({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \
125+
pos = rcu_dereference_raw(pos->next))
126+
127+
#endif

0 commit comments

Comments
 (0)