30
30
#define TARGET_BUCKET_SIZE 64
31
31
32
32
struct nfsd_drc_bucket {
33
+ struct rb_root rb_head ;
33
34
struct list_head lru_head ;
34
35
spinlock_t cache_lock ;
35
36
};
@@ -129,6 +130,7 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum)
129
130
if (rp ) {
130
131
rp -> c_state = RC_UNUSED ;
131
132
rp -> c_type = RC_NOCACHE ;
133
+ RB_CLEAR_NODE (& rp -> c_node );
132
134
INIT_LIST_HEAD (& rp -> c_lru );
133
135
134
136
memset (& rp -> c_key , 0 , sizeof (rp -> c_key ));
@@ -145,13 +147,14 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum)
145
147
}
146
148
147
149
static void
148
- nfsd_reply_cache_free_locked (struct svc_cacherep * rp )
150
+ nfsd_reply_cache_free_locked (struct nfsd_drc_bucket * b , struct svc_cacherep * rp )
149
151
{
150
152
if (rp -> c_type == RC_REPLBUFF && rp -> c_replvec .iov_base ) {
151
153
drc_mem_usage -= rp -> c_replvec .iov_len ;
152
154
kfree (rp -> c_replvec .iov_base );
153
155
}
154
156
if (rp -> c_state != RC_UNUSED ) {
157
+ rb_erase (& rp -> c_node , & b -> rb_head );
155
158
list_del (& rp -> c_lru );
156
159
atomic_dec (& num_drc_entries );
157
160
drc_mem_usage -= sizeof (* rp );
@@ -163,7 +166,7 @@ static void
163
166
nfsd_reply_cache_free (struct nfsd_drc_bucket * b , struct svc_cacherep * rp )
164
167
{
165
168
spin_lock (& b -> cache_lock );
166
- nfsd_reply_cache_free_locked (rp );
169
+ nfsd_reply_cache_free_locked (b , rp );
167
170
spin_unlock (& b -> cache_lock );
168
171
}
169
172
@@ -219,7 +222,7 @@ void nfsd_reply_cache_shutdown(void)
219
222
struct list_head * head = & drc_hashtbl [i ].lru_head ;
220
223
while (!list_empty (head )) {
221
224
rp = list_first_entry (head , struct svc_cacherep , c_lru );
222
- nfsd_reply_cache_free_locked (rp );
225
+ nfsd_reply_cache_free_locked (& drc_hashtbl [ i ], rp );
223
226
}
224
227
}
225
228
@@ -258,7 +261,7 @@ prune_bucket(struct nfsd_drc_bucket *b)
258
261
if (atomic_read (& num_drc_entries ) <= max_drc_entries &&
259
262
time_before (jiffies , rp -> c_timestamp + RC_EXPIRE ))
260
263
break ;
261
- nfsd_reply_cache_free_locked (rp );
264
+ nfsd_reply_cache_free_locked (b , rp );
262
265
freed ++ ;
263
266
}
264
267
return freed ;
@@ -349,17 +352,29 @@ static struct svc_cacherep *
349
352
nfsd_cache_insert (struct nfsd_drc_bucket * b , struct svc_cacherep * key )
350
353
{
351
354
struct svc_cacherep * rp , * ret = key ;
352
- struct list_head * rh = & b -> lru_head ;
355
+ struct rb_node * * p = & b -> rb_head .rb_node ,
356
+ * parent = NULL ;
353
357
unsigned int entries = 0 ;
358
+ int cmp ;
354
359
355
- list_for_each_entry ( rp , rh , c_lru ) {
360
+ while ( * p != NULL ) {
356
361
++ entries ;
357
- if (nfsd_cache_key_cmp (key , rp ) == 0 ) {
362
+ parent = * p ;
363
+ rp = rb_entry (parent , struct svc_cacherep , c_node );
364
+
365
+ cmp = nfsd_cache_key_cmp (key , rp );
366
+ if (cmp < 0 )
367
+ p = & parent -> rb_left ;
368
+ else if (cmp > 0 )
369
+ p = & parent -> rb_right ;
370
+ else {
358
371
ret = rp ;
359
- break ;
372
+ goto out ;
360
373
}
361
374
}
362
-
375
+ rb_link_node (& key -> c_node , parent , p );
376
+ rb_insert_color (& key -> c_node , & b -> rb_head );
377
+ out :
363
378
/* tally hash chain length stats */
364
379
if (entries > longest_chain ) {
365
380
longest_chain = entries ;
@@ -414,7 +429,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
414
429
spin_lock (& b -> cache_lock );
415
430
found = nfsd_cache_insert (b , rp );
416
431
if (found != rp ) {
417
- nfsd_reply_cache_free_locked (rp );
432
+ nfsd_reply_cache_free_locked (NULL , rp );
418
433
rp = found ;
419
434
goto found_entry ;
420
435
}
@@ -462,7 +477,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
462
477
break ;
463
478
default :
464
479
printk (KERN_WARNING "nfsd: bad repcache type %d\n" , rp -> c_type );
465
- nfsd_reply_cache_free_locked (rp );
480
+ nfsd_reply_cache_free_locked (b , rp );
466
481
}
467
482
468
483
goto out ;
0 commit comments