33
33
#include <linux/bootmem.h>
34
34
#include <linux/fs_struct.h>
35
35
#include <linux/hardirq.h>
36
+ #include <linux/bit_spinlock.h>
37
+ #include <linux/rculist_bl.h>
36
38
#include "internal.h"
37
39
38
40
/*
39
41
* Usage:
40
42
* dcache_inode_lock protects:
41
43
* - i_dentry, d_alias, d_inode
42
- * dcache_hash_lock protects:
43
- * - the dcache hash table, s_anon lists
44
+ * dcache_hash_bucket lock protects:
45
+ * - the dcache hash table
46
+ * s_anon bl list spinlock protects:
47
+ * - the s_anon list (see __d_drop)
44
48
* dcache_lru_lock protects:
45
49
* - the dcache lru lists and counters
46
50
* d_lock protects:
57
61
* dcache_inode_lock
58
62
* dentry->d_lock
59
63
* dcache_lru_lock
60
- * dcache_hash_lock
64
+ * dcache_hash_bucket lock
65
+ * s_anon lock
61
66
*
62
67
* If there is an ancestor relationship:
63
68
* dentry->d_parent->...->d_parent->d_lock
@@ -74,7 +79,6 @@ int sysctl_vfs_cache_pressure __read_mostly = 100;
74
79
EXPORT_SYMBOL_GPL (sysctl_vfs_cache_pressure );
75
80
76
81
__cacheline_aligned_in_smp DEFINE_SPINLOCK (dcache_inode_lock );
77
- static __cacheline_aligned_in_smp DEFINE_SPINLOCK (dcache_hash_lock );
78
82
static __cacheline_aligned_in_smp DEFINE_SPINLOCK (dcache_lru_lock );
79
83
__cacheline_aligned_in_smp DEFINE_SEQLOCK (rename_lock );
80
84
@@ -96,7 +100,29 @@ static struct kmem_cache *dentry_cache __read_mostly;
96
100
97
101
static unsigned int d_hash_mask __read_mostly ;
98
102
static unsigned int d_hash_shift __read_mostly ;
99
- static struct hlist_head * dentry_hashtable __read_mostly ;
103
+
104
+ struct dcache_hash_bucket {
105
+ struct hlist_bl_head head ;
106
+ };
107
+ static struct dcache_hash_bucket * dentry_hashtable __read_mostly ;
108
+
109
+ static inline struct dcache_hash_bucket * d_hash (struct dentry * parent ,
110
+ unsigned long hash )
111
+ {
112
+ hash += ((unsigned long ) parent ^ GOLDEN_RATIO_PRIME ) / L1_CACHE_BYTES ;
113
+ hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME ) >> D_HASHBITS );
114
+ return dentry_hashtable + (hash & D_HASHMASK );
115
+ }
116
+
117
+ static inline void spin_lock_bucket (struct dcache_hash_bucket * b )
118
+ {
119
+ bit_spin_lock (0 , (unsigned long * )& b -> head .first );
120
+ }
121
+
122
+ static inline void spin_unlock_bucket (struct dcache_hash_bucket * b )
123
+ {
124
+ __bit_spin_unlock (0 , (unsigned long * )& b -> head .first );
125
+ }
100
126
101
127
/* Statistics gathering. */
102
128
struct dentry_stat_t dentry_stat = {
@@ -144,7 +170,7 @@ static void d_free(struct dentry *dentry)
144
170
dentry -> d_op -> d_release (dentry );
145
171
146
172
/* if dentry was never inserted into hash, immediate free is OK */
147
- if (hlist_unhashed (& dentry -> d_hash ))
173
+ if (hlist_bl_unhashed (& dentry -> d_hash ))
148
174
__d_free (& dentry -> d_u .d_rcu );
149
175
else
150
176
call_rcu (& dentry -> d_u .d_rcu , __d_free );
@@ -302,11 +328,27 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
302
328
void __d_drop (struct dentry * dentry )
303
329
{
304
330
if (!(dentry -> d_flags & DCACHE_UNHASHED )) {
305
- dentry -> d_flags |= DCACHE_UNHASHED ;
306
- spin_lock (& dcache_hash_lock );
307
- hlist_del_rcu (& dentry -> d_hash );
308
- spin_unlock (& dcache_hash_lock );
309
- dentry_rcuwalk_barrier (dentry );
331
+ if (unlikely (dentry -> d_flags & DCACHE_DISCONNECTED )) {
332
+ bit_spin_lock (0 ,
333
+ (unsigned long * )& dentry -> d_sb -> s_anon .first );
334
+ dentry -> d_flags |= DCACHE_UNHASHED ;
335
+ hlist_bl_del_init (& dentry -> d_hash );
336
+ __bit_spin_unlock (0 ,
337
+ (unsigned long * )& dentry -> d_sb -> s_anon .first );
338
+ } else {
339
+ struct dcache_hash_bucket * b ;
340
+ b = d_hash (dentry -> d_parent , dentry -> d_name .hash );
341
+ spin_lock_bucket (b );
342
+ /*
343
+ * We may not actually need to put DCACHE_UNHASHED
344
+ * manipulations under the hash lock, but follow
345
+ * the principle of least surprise.
346
+ */
347
+ dentry -> d_flags |= DCACHE_UNHASHED ;
348
+ hlist_bl_del_rcu (& dentry -> d_hash );
349
+ spin_unlock_bucket (b );
350
+ dentry_rcuwalk_barrier (dentry );
351
+ }
310
352
}
311
353
}
312
354
EXPORT_SYMBOL (__d_drop );
@@ -961,8 +1003,8 @@ void shrink_dcache_for_umount(struct super_block *sb)
961
1003
spin_unlock (& dentry -> d_lock );
962
1004
shrink_dcache_for_umount_subtree (dentry );
963
1005
964
- while (!hlist_empty (& sb -> s_anon )) {
965
- dentry = hlist_entry ( sb -> s_anon . first , struct dentry , d_hash );
1006
+ while (!hlist_bl_empty (& sb -> s_anon )) {
1007
+ dentry = hlist_bl_entry ( hlist_bl_first ( & sb -> s_anon ) , struct dentry , d_hash );
966
1008
shrink_dcache_for_umount_subtree (dentry );
967
1009
}
968
1010
}
@@ -1263,7 +1305,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1263
1305
dentry -> d_sb = NULL ;
1264
1306
dentry -> d_op = NULL ;
1265
1307
dentry -> d_fsdata = NULL ;
1266
- INIT_HLIST_NODE (& dentry -> d_hash );
1308
+ INIT_HLIST_BL_NODE (& dentry -> d_hash );
1267
1309
INIT_LIST_HEAD (& dentry -> d_lru );
1268
1310
INIT_LIST_HEAD (& dentry -> d_subdirs );
1269
1311
INIT_LIST_HEAD (& dentry -> d_alias );
@@ -1459,14 +1501,6 @@ struct dentry * d_alloc_root(struct inode * root_inode)
1459
1501
}
1460
1502
EXPORT_SYMBOL (d_alloc_root );
1461
1503
1462
- static inline struct hlist_head * d_hash (struct dentry * parent ,
1463
- unsigned long hash )
1464
- {
1465
- hash += ((unsigned long ) parent ^ GOLDEN_RATIO_PRIME ) / L1_CACHE_BYTES ;
1466
- hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME ) >> D_HASHBITS );
1467
- return dentry_hashtable + (hash & D_HASHMASK );
1468
- }
1469
-
1470
1504
/**
1471
1505
* d_obtain_alias - find or allocate a dentry for a given inode
1472
1506
* @inode: inode to allocate the dentry for
@@ -1521,11 +1555,11 @@ struct dentry *d_obtain_alias(struct inode *inode)
1521
1555
tmp -> d_sb = inode -> i_sb ;
1522
1556
tmp -> d_inode = inode ;
1523
1557
tmp -> d_flags |= DCACHE_DISCONNECTED ;
1524
- tmp -> d_flags &= ~DCACHE_UNHASHED ;
1525
1558
list_add (& tmp -> d_alias , & inode -> i_dentry );
1526
- spin_lock (& dcache_hash_lock );
1527
- hlist_add_head (& tmp -> d_hash , & inode -> i_sb -> s_anon );
1528
- spin_unlock (& dcache_hash_lock );
1559
+ bit_spin_lock (0 , (unsigned long * )& tmp -> d_sb -> s_anon .first );
1560
+ tmp -> d_flags &= ~DCACHE_UNHASHED ;
1561
+ hlist_bl_add_head (& tmp -> d_hash , & tmp -> d_sb -> s_anon );
1562
+ __bit_spin_unlock (0 , (unsigned long * )& tmp -> d_sb -> s_anon .first );
1529
1563
spin_unlock (& tmp -> d_lock );
1530
1564
spin_unlock (& dcache_inode_lock );
1531
1565
@@ -1567,7 +1601,7 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1567
1601
d_move (new , dentry );
1568
1602
iput (inode );
1569
1603
} else {
1570
- /* already taking dcache_inode_lock, so d_add() by hand */
1604
+ /* already got dcache_inode_lock, so d_add() by hand */
1571
1605
__d_instantiate (dentry , inode );
1572
1606
spin_unlock (& dcache_inode_lock );
1573
1607
security_d_instantiate (dentry , inode );
@@ -1702,8 +1736,8 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
1702
1736
unsigned int len = name -> len ;
1703
1737
unsigned int hash = name -> hash ;
1704
1738
const unsigned char * str = name -> name ;
1705
- struct hlist_head * head = d_hash (parent , hash );
1706
- struct hlist_node * node ;
1739
+ struct dcache_hash_bucket * b = d_hash (parent , hash );
1740
+ struct hlist_bl_node * node ;
1707
1741
struct dentry * dentry ;
1708
1742
1709
1743
/*
@@ -1726,7 +1760,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
1726
1760
*
1727
1761
* See Documentation/vfs/dcache-locking.txt for more details.
1728
1762
*/
1729
- hlist_for_each_entry_rcu (dentry , node , head , d_hash ) {
1763
+ hlist_bl_for_each_entry_rcu (dentry , node , & b -> head , d_hash ) {
1730
1764
struct inode * i ;
1731
1765
const char * tname ;
1732
1766
int tlen ;
@@ -1820,8 +1854,8 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
1820
1854
unsigned int len = name -> len ;
1821
1855
unsigned int hash = name -> hash ;
1822
1856
const unsigned char * str = name -> name ;
1823
- struct hlist_head * head = d_hash (parent ,hash );
1824
- struct hlist_node * node ;
1857
+ struct dcache_hash_bucket * b = d_hash (parent , hash );
1858
+ struct hlist_bl_node * node ;
1825
1859
struct dentry * found = NULL ;
1826
1860
struct dentry * dentry ;
1827
1861
@@ -1847,7 +1881,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
1847
1881
*/
1848
1882
rcu_read_lock ();
1849
1883
1850
- hlist_for_each_entry_rcu (dentry , node , head , d_hash ) {
1884
+ hlist_bl_for_each_entry_rcu (dentry , node , & b -> head , d_hash ) {
1851
1885
const char * tname ;
1852
1886
int tlen ;
1853
1887
@@ -1998,11 +2032,13 @@ void d_delete(struct dentry * dentry)
1998
2032
}
1999
2033
EXPORT_SYMBOL (d_delete );
2000
2034
2001
- static void __d_rehash (struct dentry * entry , struct hlist_head * list )
2035
+ static void __d_rehash (struct dentry * entry , struct dcache_hash_bucket * b )
2002
2036
{
2003
-
2037
+ BUG_ON (!d_unhashed (entry ));
2038
+ spin_lock_bucket (b );
2004
2039
entry -> d_flags &= ~DCACHE_UNHASHED ;
2005
- hlist_add_head_rcu (& entry -> d_hash , list );
2040
+ hlist_bl_add_head_rcu (& entry -> d_hash , & b -> head );
2041
+ spin_unlock_bucket (b );
2006
2042
}
2007
2043
2008
2044
static void _d_rehash (struct dentry * entry )
@@ -2020,9 +2056,7 @@ static void _d_rehash(struct dentry * entry)
2020
2056
void d_rehash (struct dentry * entry )
2021
2057
{
2022
2058
spin_lock (& entry -> d_lock );
2023
- spin_lock (& dcache_hash_lock );
2024
2059
_d_rehash (entry );
2025
- spin_unlock (& dcache_hash_lock );
2026
2060
spin_unlock (& entry -> d_lock );
2027
2061
}
2028
2062
EXPORT_SYMBOL (d_rehash );
@@ -2165,15 +2199,16 @@ void d_move(struct dentry * dentry, struct dentry * target)
2165
2199
write_seqcount_begin (& dentry -> d_seq );
2166
2200
write_seqcount_begin (& target -> d_seq );
2167
2201
2168
- /* Move the dentry to the target hash queue, if on different bucket */
2169
- spin_lock (& dcache_hash_lock );
2170
- if (!d_unhashed (dentry ))
2171
- hlist_del_rcu (& dentry -> d_hash );
2202
+ /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2203
+
2204
+ /*
2205
+ * Move the dentry to the target hash queue. Don't bother checking
2206
+ * for the same hash queue because of how unlikely it is.
2207
+ */
2208
+ __d_drop (dentry );
2172
2209
__d_rehash (dentry , d_hash (target -> d_parent , target -> d_name .hash ));
2173
- spin_unlock (& dcache_hash_lock );
2174
2210
2175
2211
/* Unhash the target: dput() will then get rid of it */
2176
- /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2177
2212
__d_drop (target );
2178
2213
2179
2214
list_del (& dentry -> d_u .d_child );
@@ -2369,9 +2404,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2369
2404
2370
2405
spin_lock (& actual -> d_lock );
2371
2406
found :
2372
- spin_lock (& dcache_hash_lock );
2373
2407
_d_rehash (actual );
2374
- spin_unlock (& dcache_hash_lock );
2375
2408
spin_unlock (& actual -> d_lock );
2376
2409
spin_unlock (& dcache_inode_lock );
2377
2410
out_nolock :
@@ -2953,7 +2986,7 @@ static void __init dcache_init_early(void)
2953
2986
2954
2987
dentry_hashtable =
2955
2988
alloc_large_system_hash ("Dentry cache" ,
2956
- sizeof (struct hlist_head ),
2989
+ sizeof (struct dcache_hash_bucket ),
2957
2990
dhash_entries ,
2958
2991
13 ,
2959
2992
HASH_EARLY ,
@@ -2962,7 +2995,7 @@ static void __init dcache_init_early(void)
2962
2995
0 );
2963
2996
2964
2997
for (loop = 0 ; loop < (1 << d_hash_shift ); loop ++ )
2965
- INIT_HLIST_HEAD (& dentry_hashtable [loop ]);
2998
+ INIT_HLIST_BL_HEAD (& dentry_hashtable [loop ]. head );
2966
2999
}
2967
3000
2968
3001
static void __init dcache_init (void )
@@ -2985,7 +3018,7 @@ static void __init dcache_init(void)
2985
3018
2986
3019
dentry_hashtable =
2987
3020
alloc_large_system_hash ("Dentry cache" ,
2988
- sizeof (struct hlist_head ),
3021
+ sizeof (struct dcache_hash_bucket ),
2989
3022
dhash_entries ,
2990
3023
13 ,
2991
3024
0 ,
@@ -2994,7 +3027,7 @@ static void __init dcache_init(void)
2994
3027
0 );
2995
3028
2996
3029
for (loop = 0 ; loop < (1 << d_hash_shift ); loop ++ )
2997
- INIT_HLIST_HEAD (& dentry_hashtable [loop ]);
3030
+ INIT_HLIST_BL_HEAD (& dentry_hashtable [loop ]. head );
2998
3031
}
2999
3032
3000
3033
/* SLAB cache for __getname() consumers */
0 commit comments