Skip to content

Commit 76ecec2

Browse files
trondmyJ. Bruce Fields
authored andcommitted
knfsd: Simplify NFS duplicate replay cache
Simplify the duplicate replay cache by initialising the preallocated cache entry, so that we can use it as a key for the cache lookup. Note that the 99.999% case we want to optimise for is still the one where the lookup fails, and we have to add this entry to the cache, so preinitialising should not cause a performance penalty. Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
1 parent 3e87da5 commit 76ecec2

File tree

1 file changed

+44
-50
lines changed

1 file changed

+44
-50
lines changed

fs/nfsd/nfscache.c

Lines changed: 44 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ nfsd_cache_hash(__be32 xid)
121121
}
122122

123123
static struct svc_cacherep *
124-
nfsd_reply_cache_alloc(void)
124+
nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum)
125125
{
126126
struct svc_cacherep *rp;
127127

@@ -130,6 +130,16 @@ nfsd_reply_cache_alloc(void)
130130
rp->c_state = RC_UNUSED;
131131
rp->c_type = RC_NOCACHE;
132132
INIT_LIST_HEAD(&rp->c_lru);
133+
134+
rp->c_xid = rqstp->rq_xid;
135+
rp->c_proc = rqstp->rq_proc;
136+
memset(&rp->c_addr, 0, sizeof(rp->c_addr));
137+
rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
138+
rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
139+
rp->c_prot = rqstp->rq_prot;
140+
rp->c_vers = rqstp->rq_vers;
141+
rp->c_len = rqstp->rq_arg.len;
142+
rp->c_csum = csum;
133143
}
134144
return rp;
135145
}
@@ -141,9 +151,11 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
141151
drc_mem_usage -= rp->c_replvec.iov_len;
142152
kfree(rp->c_replvec.iov_base);
143153
}
144-
list_del(&rp->c_lru);
145-
atomic_dec(&num_drc_entries);
146-
drc_mem_usage -= sizeof(*rp);
154+
if (rp->c_state != RC_UNUSED) {
155+
list_del(&rp->c_lru);
156+
atomic_dec(&num_drc_entries);
157+
drc_mem_usage -= sizeof(*rp);
158+
}
147159
kmem_cache_free(drc_slab, rp);
148160
}
149161

@@ -319,24 +331,23 @@ nfsd_cache_csum(struct svc_rqst *rqstp)
319331
}
320332

321333
static bool
322-
nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
334+
nfsd_cache_match(const struct svc_cacherep *key, const struct svc_cacherep *rp)
323335
{
324336
/* Check RPC XID first */
325-
if (rqstp->rq_xid != rp->c_xid)
337+
if (key->c_xid != rp->c_xid)
326338
return false;
327339
/* compare checksum of NFS data */
328-
if (csum != rp->c_csum) {
340+
if (key->c_csum != rp->c_csum) {
329341
++payload_misses;
330342
return false;
331343
}
332344

333345
/* Other discriminators */
334-
if (rqstp->rq_proc != rp->c_proc ||
335-
rqstp->rq_prot != rp->c_prot ||
336-
rqstp->rq_vers != rp->c_vers ||
337-
rqstp->rq_arg.len != rp->c_len ||
338-
!rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
339-
rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
346+
if (key->c_proc != rp->c_proc ||
347+
key->c_prot != rp->c_prot ||
348+
key->c_vers != rp->c_vers ||
349+
key->c_len != rp->c_len ||
350+
memcmp(&key->c_addr, &rp->c_addr, sizeof(key->c_addr)) != 0)
340351
return false;
341352

342353
return true;
@@ -345,19 +356,18 @@ nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
345356
/*
346357
* Search the request hash for an entry that matches the given rqstp.
347358
* Must be called with cache_lock held. Returns the found entry or
348-
* NULL on failure.
359+
* inserts an empty key on failure.
349360
*/
350361
static struct svc_cacherep *
351-
nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
352-
__wsum csum)
362+
nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key)
353363
{
354-
struct svc_cacherep *rp, *ret = NULL;
364+
struct svc_cacherep *rp, *ret = key;
355365
struct list_head *rh = &b->lru_head;
356366
unsigned int entries = 0;
357367

358368
list_for_each_entry(rp, rh, c_lru) {
359369
++entries;
360-
if (nfsd_cache_match(rqstp, csum, rp)) {
370+
if (nfsd_cache_match(key, rp)) {
361371
ret = rp;
362372
break;
363373
}
@@ -374,6 +384,7 @@ nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
374384
atomic_read(&num_drc_entries));
375385
}
376386

387+
lru_put_end(b, ret);
377388
return ret;
378389
}
379390

@@ -389,9 +400,6 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
389400
{
390401
struct svc_cacherep *rp, *found;
391402
__be32 xid = rqstp->rq_xid;
392-
u32 proto = rqstp->rq_prot,
393-
vers = rqstp->rq_vers,
394-
proc = rqstp->rq_proc;
395403
__wsum csum;
396404
u32 hash = nfsd_cache_hash(xid);
397405
struct nfsd_drc_bucket *b = &drc_hashtbl[hash];
@@ -410,52 +418,38 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
410418
* Since the common case is a cache miss followed by an insert,
411419
* preallocate an entry.
412420
*/
413-
rp = nfsd_reply_cache_alloc();
414-
spin_lock(&b->cache_lock);
415-
if (likely(rp)) {
416-
atomic_inc(&num_drc_entries);
417-
drc_mem_usage += sizeof(*rp);
421+
rp = nfsd_reply_cache_alloc(rqstp, csum);
422+
if (!rp) {
423+
dprintk("nfsd: unable to allocate DRC entry!\n");
424+
return rtn;
418425
}
419426

420-
/* go ahead and prune the cache */
421-
prune_bucket(b);
422-
423-
found = nfsd_cache_search(b, rqstp, csum);
424-
if (found) {
425-
if (likely(rp))
426-
nfsd_reply_cache_free_locked(rp);
427+
spin_lock(&b->cache_lock);
428+
found = nfsd_cache_insert(b, rp);
429+
if (found != rp) {
430+
nfsd_reply_cache_free_locked(rp);
427431
rp = found;
428432
goto found_entry;
429433
}
430434

431-
if (!rp) {
432-
dprintk("nfsd: unable to allocate DRC entry!\n");
433-
goto out;
434-
}
435-
436435
nfsdstats.rcmisses++;
437436
rqstp->rq_cacherep = rp;
438437
rp->c_state = RC_INPROG;
439-
rp->c_xid = xid;
440-
rp->c_proc = proc;
441-
rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
442-
rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
443-
rp->c_prot = proto;
444-
rp->c_vers = vers;
445-
rp->c_len = rqstp->rq_arg.len;
446-
rp->c_csum = csum;
447438

448-
lru_put_end(b, rp);
439+
atomic_inc(&num_drc_entries);
440+
drc_mem_usage += sizeof(*rp);
441+
442+
/* go ahead and prune the cache */
443+
prune_bucket(b);
449444
out:
450445
spin_unlock(&b->cache_lock);
451446
return rtn;
452447

453448
found_entry:
454-
nfsdstats.rchits++;
455449
/* We found a matching entry which is either in progress or done. */
456-
lru_put_end(b, rp);
457-
450+
nfsdstats.rchits++;
458451
rtn = RC_DROPIT;
452+
459453
/* Request being processed */
460454
if (rp->c_state == RC_INPROG)
461455
goto out;

0 commit comments

Comments
 (0)