34
34
#include <linux/mm.h>
35
35
#include <linux/slab.h>
36
36
#include <linux/sched.h>
37
- #include <linux/init .h>
37
+ #include <linux/list_bl .h>
38
38
#include <linux/mbcache.h>
39
-
39
+ #include <linux/init.h>
40
40
41
41
#ifdef MB_CACHE_DEBUG
42
42
# define mb_debug (f ...) do { \
@@ -87,21 +87,38 @@ static LIST_HEAD(mb_cache_lru_list);
87
87
static DEFINE_SPINLOCK (mb_cache_spinlock );
88
88
89
89
static inline int
90
- __mb_cache_entry_is_hashed (struct mb_cache_entry * ce )
90
+ __mb_cache_entry_is_block_hashed (struct mb_cache_entry * ce )
91
91
{
92
- return !list_empty (& ce -> e_block_list );
92
+ return !hlist_bl_unhashed (& ce -> e_block_list );
93
93
}
94
94
95
95
96
- static void
97
- __mb_cache_entry_unhash (struct mb_cache_entry * ce )
96
+ static inline void
97
+ __mb_cache_entry_unhash_block (struct mb_cache_entry * ce )
98
98
{
99
- if (__mb_cache_entry_is_hashed (ce )) {
100
- list_del_init (& ce -> e_block_list );
101
- list_del (& ce -> e_index .o_list );
102
- }
99
+ if (__mb_cache_entry_is_block_hashed (ce ))
100
+ hlist_bl_del_init (& ce -> e_block_list );
101
+ }
102
+
103
+ static inline int
104
+ __mb_cache_entry_is_index_hashed (struct mb_cache_entry * ce )
105
+ {
106
+ return !hlist_bl_unhashed (& ce -> e_index .o_list );
103
107
}
104
108
109
+ static inline void
110
+ __mb_cache_entry_unhash_index (struct mb_cache_entry * ce )
111
+ {
112
+ if (__mb_cache_entry_is_index_hashed (ce ))
113
+ hlist_bl_del_init (& ce -> e_index .o_list );
114
+ }
115
+
116
+ static inline void
117
+ __mb_cache_entry_unhash (struct mb_cache_entry * ce )
118
+ {
119
+ __mb_cache_entry_unhash_index (ce );
120
+ __mb_cache_entry_unhash_block (ce );
121
+ }
105
122
106
123
static void
107
124
__mb_cache_entry_forget (struct mb_cache_entry * ce , gfp_t gfp_mask )
@@ -125,7 +142,7 @@ __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
125
142
ce -> e_used -= MB_CACHE_WRITER ;
126
143
ce -> e_used -- ;
127
144
if (!(ce -> e_used || ce -> e_queued )) {
128
- if (!__mb_cache_entry_is_hashed (ce ))
145
+ if (!__mb_cache_entry_is_block_hashed (ce ))
129
146
goto forget ;
130
147
mb_assert (list_empty (& ce -> e_lru_list ));
131
148
list_add_tail (& ce -> e_lru_list , & mb_cache_lru_list );
@@ -221,18 +238,18 @@ mb_cache_create(const char *name, int bucket_bits)
221
238
cache -> c_name = name ;
222
239
atomic_set (& cache -> c_entry_count , 0 );
223
240
cache -> c_bucket_bits = bucket_bits ;
224
- cache -> c_block_hash = kmalloc (bucket_count * sizeof ( struct list_head ),
225
- GFP_KERNEL );
241
+ cache -> c_block_hash = kmalloc (bucket_count *
242
+ sizeof ( struct hlist_bl_head ), GFP_KERNEL );
226
243
if (!cache -> c_block_hash )
227
244
goto fail ;
228
245
for (n = 0 ; n < bucket_count ; n ++ )
229
- INIT_LIST_HEAD (& cache -> c_block_hash [n ]);
230
- cache -> c_index_hash = kmalloc (bucket_count * sizeof ( struct list_head ),
231
- GFP_KERNEL );
246
+ INIT_HLIST_BL_HEAD (& cache -> c_block_hash [n ]);
247
+ cache -> c_index_hash = kmalloc (bucket_count *
248
+ sizeof ( struct hlist_bl_head ), GFP_KERNEL );
232
249
if (!cache -> c_index_hash )
233
250
goto fail ;
234
251
for (n = 0 ; n < bucket_count ; n ++ )
235
- INIT_LIST_HEAD (& cache -> c_index_hash [n ]);
252
+ INIT_HLIST_BL_HEAD (& cache -> c_index_hash [n ]);
236
253
cache -> c_entry_cache = kmem_cache_create (name ,
237
254
sizeof (struct mb_cache_entry ), 0 ,
238
255
SLAB_RECLAIM_ACCOUNT |SLAB_MEM_SPREAD , NULL );
@@ -364,10 +381,13 @@ mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
364
381
return NULL ;
365
382
atomic_inc (& cache -> c_entry_count );
366
383
INIT_LIST_HEAD (& ce -> e_lru_list );
367
- INIT_LIST_HEAD (& ce -> e_block_list );
384
+ INIT_HLIST_BL_NODE (& ce -> e_block_list );
385
+ INIT_HLIST_BL_NODE (& ce -> e_index .o_list );
368
386
ce -> e_cache = cache ;
369
387
ce -> e_queued = 0 ;
370
388
}
389
+ ce -> e_block_hash_p = & cache -> c_block_hash [0 ];
390
+ ce -> e_index_hash_p = & cache -> c_index_hash [0 ];
371
391
ce -> e_used = 1 + MB_CACHE_WRITER ;
372
392
return ce ;
373
393
}
@@ -393,25 +413,32 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
393
413
{
394
414
struct mb_cache * cache = ce -> e_cache ;
395
415
unsigned int bucket ;
396
- struct list_head * l ;
416
+ struct hlist_bl_node * l ;
397
417
int error = - EBUSY ;
418
+ struct hlist_bl_head * block_hash_p ;
419
+ struct hlist_bl_head * index_hash_p ;
420
+ struct mb_cache_entry * lce ;
398
421
422
+ mb_assert (ce );
399
423
bucket = hash_long ((unsigned long )bdev + (block & 0xffffffff ),
400
424
cache -> c_bucket_bits );
425
+ block_hash_p = & cache -> c_block_hash [bucket ];
401
426
spin_lock (& mb_cache_spinlock );
402
- list_for_each_prev (l , & cache -> c_block_hash [bucket ]) {
403
- struct mb_cache_entry * ce =
404
- list_entry (l , struct mb_cache_entry , e_block_list );
405
- if (ce -> e_bdev == bdev && ce -> e_block == block )
427
+ hlist_bl_for_each_entry (lce , l , block_hash_p , e_block_list ) {
428
+ if (lce -> e_bdev == bdev && lce -> e_block == block )
406
429
goto out ;
407
430
}
431
+ mb_assert (!__mb_cache_entry_is_block_hashed (ce ));
408
432
__mb_cache_entry_unhash (ce );
409
433
ce -> e_bdev = bdev ;
410
434
ce -> e_block = block ;
411
- list_add ( & ce -> e_block_list , & cache -> c_block_hash [ bucket ]) ;
435
+ ce -> e_block_hash_p = block_hash_p ;
412
436
ce -> e_index .o_key = key ;
413
437
bucket = hash_long (key , cache -> c_bucket_bits );
414
- list_add (& ce -> e_index .o_list , & cache -> c_index_hash [bucket ]);
438
+ index_hash_p = & cache -> c_index_hash [bucket ];
439
+ ce -> e_index_hash_p = index_hash_p ;
440
+ hlist_bl_add_head (& ce -> e_index .o_list , index_hash_p );
441
+ hlist_bl_add_head (& ce -> e_block_list , block_hash_p );
415
442
error = 0 ;
416
443
out :
417
444
spin_unlock (& mb_cache_spinlock );
@@ -463,14 +490,16 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
463
490
sector_t block )
464
491
{
465
492
unsigned int bucket ;
466
- struct list_head * l ;
493
+ struct hlist_bl_node * l ;
467
494
struct mb_cache_entry * ce ;
495
+ struct hlist_bl_head * block_hash_p ;
468
496
469
497
bucket = hash_long ((unsigned long )bdev + (block & 0xffffffff ),
470
498
cache -> c_bucket_bits );
499
+ block_hash_p = & cache -> c_block_hash [bucket ];
471
500
spin_lock (& mb_cache_spinlock );
472
- list_for_each ( l , & cache -> c_block_hash [ bucket ] ) {
473
- ce = list_entry ( l , struct mb_cache_entry , e_block_list );
501
+ hlist_bl_for_each_entry ( ce , l , block_hash_p , e_block_list ) {
502
+ mb_assert ( ce -> e_block_hash_p == block_hash_p );
474
503
if (ce -> e_bdev == bdev && ce -> e_block == block ) {
475
504
DEFINE_WAIT (wait );
476
505
@@ -489,7 +518,7 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
489
518
finish_wait (& mb_cache_queue , & wait );
490
519
ce -> e_used += 1 + MB_CACHE_WRITER ;
491
520
492
- if (!__mb_cache_entry_is_hashed (ce )) {
521
+ if (!__mb_cache_entry_is_block_hashed (ce )) {
493
522
__mb_cache_entry_release_unlock (ce );
494
523
return NULL ;
495
524
}
@@ -506,12 +535,14 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
506
535
#if !defined(MB_CACHE_INDEXES_COUNT ) || (MB_CACHE_INDEXES_COUNT > 0 )
507
536
508
537
static struct mb_cache_entry *
509
- __mb_cache_entry_find (struct list_head * l , struct list_head * head ,
538
+ __mb_cache_entry_find (struct hlist_bl_node * l , struct hlist_bl_head * head ,
510
539
struct block_device * bdev , unsigned int key )
511
540
{
512
- while (l != head ) {
541
+ while (l != NULL ) {
513
542
struct mb_cache_entry * ce =
514
- list_entry (l , struct mb_cache_entry , e_index .o_list );
543
+ hlist_bl_entry (l , struct mb_cache_entry ,
544
+ e_index .o_list );
545
+ mb_assert (ce -> e_index_hash_p == head );
515
546
if (ce -> e_bdev == bdev && ce -> e_index .o_key == key ) {
516
547
DEFINE_WAIT (wait );
517
548
@@ -532,7 +563,7 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head,
532
563
}
533
564
finish_wait (& mb_cache_queue , & wait );
534
565
535
- if (!__mb_cache_entry_is_hashed (ce )) {
566
+ if (!__mb_cache_entry_is_block_hashed (ce )) {
536
567
__mb_cache_entry_release_unlock (ce );
537
568
spin_lock (& mb_cache_spinlock );
538
569
return ERR_PTR (- EAGAIN );
@@ -562,12 +593,16 @@ mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
562
593
unsigned int key )
563
594
{
564
595
unsigned int bucket = hash_long (key , cache -> c_bucket_bits );
565
- struct list_head * l ;
566
- struct mb_cache_entry * ce ;
596
+ struct hlist_bl_node * l ;
597
+ struct mb_cache_entry * ce = NULL ;
598
+ struct hlist_bl_head * index_hash_p ;
567
599
600
+ index_hash_p = & cache -> c_index_hash [bucket ];
568
601
spin_lock (& mb_cache_spinlock );
569
- l = cache -> c_index_hash [bucket ].next ;
570
- ce = __mb_cache_entry_find (l , & cache -> c_index_hash [bucket ], bdev , key );
602
+ if (!hlist_bl_empty (index_hash_p )) {
603
+ l = hlist_bl_first (index_hash_p );
604
+ ce = __mb_cache_entry_find (l , index_hash_p , bdev , key );
605
+ }
571
606
spin_unlock (& mb_cache_spinlock );
572
607
return ce ;
573
608
}
@@ -597,12 +632,16 @@ mb_cache_entry_find_next(struct mb_cache_entry *prev,
597
632
{
598
633
struct mb_cache * cache = prev -> e_cache ;
599
634
unsigned int bucket = hash_long (key , cache -> c_bucket_bits );
600
- struct list_head * l ;
635
+ struct hlist_bl_node * l ;
601
636
struct mb_cache_entry * ce ;
637
+ struct hlist_bl_head * index_hash_p ;
602
638
639
+ index_hash_p = & cache -> c_index_hash [bucket ];
640
+ mb_assert (prev -> e_index_hash_p == index_hash_p );
603
641
spin_lock (& mb_cache_spinlock );
642
+ mb_assert (!hlist_bl_empty (index_hash_p ));
604
643
l = prev -> e_index .o_list .next ;
605
- ce = __mb_cache_entry_find (l , & cache -> c_index_hash [ bucket ] , bdev , key );
644
+ ce = __mb_cache_entry_find (l , index_hash_p , bdev , key );
606
645
__mb_cache_entry_release_unlock (prev );
607
646
return ce ;
608
647
}
0 commit comments