Skip to content

Commit ce7c252

Browse files
committed
SUNRPC: Add a separate spinlock to protect the RPC request receive list
This further reduces contention with the transport_lock, and allows us to convert to using a non-bh-safe spinlock, since the list is now never accessed from a bh context. Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
1 parent 040249d commit ce7c252

File tree

6 files changed

+41
-31
lines changed

6 files changed

+41
-31
lines changed

include/linux/sunrpc/xprt.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -232,6 +232,7 @@ struct rpc_xprt {
232232
*/
233233
spinlock_t transport_lock; /* lock transport info */
234234
spinlock_t reserve_lock; /* lock slot table */
235+
spinlock_t recv_lock; /* lock receive list */
235236
u32 xid; /* Next XID value to use */
236237
struct rpc_task * snd_task; /* Task blocked in send */
237238
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */

net/sunrpc/svcsock.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1001,7 +1001,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
10011001

10021002
if (!bc_xprt)
10031003
return -EAGAIN;
1004-
spin_lock_bh(&bc_xprt->transport_lock);
1004+
spin_lock(&bc_xprt->recv_lock);
10051005
req = xprt_lookup_rqst(bc_xprt, xid);
10061006
if (!req)
10071007
goto unlock_notfound;
@@ -1019,7 +1019,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
10191019
memcpy(dst->iov_base, src->iov_base, src->iov_len);
10201020
xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
10211021
rqstp->rq_arg.len = 0;
1022-
spin_unlock_bh(&bc_xprt->transport_lock);
1022+
spin_unlock(&bc_xprt->recv_lock);
10231023
return 0;
10241024
unlock_notfound:
10251025
printk(KERN_NOTICE
@@ -1028,7 +1028,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
10281028
__func__, ntohl(calldir),
10291029
bc_xprt, ntohl(xid));
10301030
unlock_eagain:
1031-
spin_unlock_bh(&bc_xprt->transport_lock);
1031+
spin_unlock(&bc_xprt->recv_lock);
10321032
return -EAGAIN;
10331033
}
10341034

net/sunrpc/xprt.c

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -872,17 +872,17 @@ void xprt_unpin_rqst(struct rpc_rqst *req)
872872
}
873873

874874
static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
875-
__must_hold(&req->rq_xprt->transport_lock)
875+
__must_hold(&req->rq_xprt->recv_lock)
876876
{
877877
struct rpc_task *task = req->rq_task;
878878

879879
if (task && test_bit(RPC_TASK_MSG_RECV, &task->tk_runstate)) {
880-
spin_unlock_bh(&req->rq_xprt->transport_lock);
880+
spin_unlock(&req->rq_xprt->recv_lock);
881881
set_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate);
882882
wait_on_bit(&task->tk_runstate, RPC_TASK_MSG_RECV,
883883
TASK_UNINTERRUPTIBLE);
884884
clear_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate);
885-
spin_lock_bh(&req->rq_xprt->transport_lock);
885+
spin_lock(&req->rq_xprt->recv_lock);
886886
}
887887
}
888888

@@ -1008,13 +1008,13 @@ void xprt_transmit(struct rpc_task *task)
10081008
/*
10091009
* Add to the list only if we're expecting a reply
10101010
*/
1011-
spin_lock_bh(&xprt->transport_lock);
10121011
/* Update the softirq receive buffer */
10131012
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
10141013
sizeof(req->rq_private_buf));
10151014
/* Add request to the receive list */
1015+
spin_lock(&xprt->recv_lock);
10161016
list_add_tail(&req->rq_list, &xprt->recv);
1017-
spin_unlock_bh(&xprt->transport_lock);
1017+
spin_unlock(&xprt->recv_lock);
10181018
xprt_reset_majortimeo(req);
10191019
/* Turn off autodisconnect */
10201020
del_singleshot_timer_sync(&xprt->timer);
@@ -1329,15 +1329,18 @@ void xprt_release(struct rpc_task *task)
13291329
task->tk_ops->rpc_count_stats(task, task->tk_calldata);
13301330
else if (task->tk_client)
13311331
rpc_count_iostats(task, task->tk_client->cl_metrics);
1332+
spin_lock(&xprt->recv_lock);
1333+
if (!list_empty(&req->rq_list)) {
1334+
list_del(&req->rq_list);
1335+
xprt_wait_on_pinned_rqst(req);
1336+
}
1337+
spin_unlock(&xprt->recv_lock);
13321338
spin_lock_bh(&xprt->transport_lock);
13331339
xprt->ops->release_xprt(xprt, task);
13341340
if (xprt->ops->release_request)
13351341
xprt->ops->release_request(task);
1336-
if (!list_empty(&req->rq_list))
1337-
list_del(&req->rq_list);
13381342
xprt->last_used = jiffies;
13391343
xprt_schedule_autodisconnect(xprt);
1340-
xprt_wait_on_pinned_rqst(req);
13411344
spin_unlock_bh(&xprt->transport_lock);
13421345
if (req->rq_buffer)
13431346
xprt->ops->buf_free(task);
@@ -1361,6 +1364,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
13611364

13621365
spin_lock_init(&xprt->transport_lock);
13631366
spin_lock_init(&xprt->reserve_lock);
1367+
spin_lock_init(&xprt->recv_lock);
13641368

13651369
INIT_LIST_HEAD(&xprt->free);
13661370
INIT_LIST_HEAD(&xprt->recv);

net/sunrpc/xprtrdma/rpc_rdma.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1051,7 +1051,7 @@ rpcrdma_reply_handler(struct work_struct *work)
10511051
* RPC completion while holding the transport lock to ensure
10521052
* the rep, rqst, and rq_task pointers remain stable.
10531053
*/
1054-
spin_lock_bh(&xprt->transport_lock);
1054+
spin_lock(&xprt->recv_lock);
10551055
rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
10561056
if (!rqst)
10571057
goto out_norqst;
@@ -1136,7 +1136,7 @@ rpcrdma_reply_handler(struct work_struct *work)
11361136
xprt_release_rqst_cong(rqst->rq_task);
11371137

11381138
xprt_complete_rqst(rqst->rq_task, status);
1139-
spin_unlock_bh(&xprt->transport_lock);
1139+
spin_unlock(&xprt->recv_lock);
11401140
dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
11411141
__func__, xprt, rqst, status);
11421142
return;
@@ -1187,12 +1187,12 @@ rpcrdma_reply_handler(struct work_struct *work)
11871187
r_xprt->rx_stats.bad_reply_count++;
11881188
goto out;
11891189

1190-
/* The req was still available, but by the time the transport_lock
1190+
/* The req was still available, but by the time the recv_lock
11911191
* was acquired, the rqst and task had been released. Thus the RPC
11921192
* has already been terminated.
11931193
*/
11941194
out_norqst:
1195-
spin_unlock_bh(&xprt->transport_lock);
1195+
spin_unlock(&xprt->recv_lock);
11961196
rpcrdma_buffer_put(req);
11971197
dprintk("RPC: %s: race, no rqst left for req %p\n",
11981198
__func__, req);

net/sunrpc/xprtrdma/svc_rdma_backchannel.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
5252
if (src->iov_len < 24)
5353
goto out_shortreply;
5454

55-
spin_lock_bh(&xprt->transport_lock);
55+
spin_lock(&xprt->recv_lock);
5656
req = xprt_lookup_rqst(xprt, xid);
5757
if (!req)
5858
goto out_notfound;
@@ -69,17 +69,20 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
6969
else if (credits > r_xprt->rx_buf.rb_bc_max_requests)
7070
credits = r_xprt->rx_buf.rb_bc_max_requests;
7171

72+
spin_lock_bh(&xprt->transport_lock);
7273
cwnd = xprt->cwnd;
7374
xprt->cwnd = credits << RPC_CWNDSHIFT;
7475
if (xprt->cwnd > cwnd)
7576
xprt_release_rqst_cong(req->rq_task);
77+
spin_unlock_bh(&xprt->transport_lock);
78+
7679

7780
ret = 0;
7881
xprt_complete_rqst(req->rq_task, rcvbuf->len);
7982
rcvbuf->len = 0;
8083

8184
out_unlock:
82-
spin_unlock_bh(&xprt->transport_lock);
85+
spin_unlock(&xprt->recv_lock);
8386
out:
8487
return ret;
8588

net/sunrpc/xprtsock.c

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -969,12 +969,12 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt,
969969
return;
970970

971971
/* Look up and lock the request corresponding to the given XID */
972-
spin_lock_bh(&xprt->transport_lock);
972+
spin_lock(&xprt->recv_lock);
973973
rovr = xprt_lookup_rqst(xprt, *xp);
974974
if (!rovr)
975975
goto out_unlock;
976976
xprt_pin_rqst(rovr);
977-
spin_unlock_bh(&xprt->transport_lock);
977+
spin_unlock(&xprt->recv_lock);
978978
task = rovr->rq_task;
979979

980980
copied = rovr->rq_private_buf.buflen;
@@ -983,16 +983,16 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt,
983983

984984
if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
985985
dprintk("RPC: sk_buff copy failed\n");
986-
spin_lock_bh(&xprt->transport_lock);
986+
spin_lock(&xprt->recv_lock);
987987
goto out_unpin;
988988
}
989989

990-
spin_lock_bh(&xprt->transport_lock);
990+
spin_lock(&xprt->recv_lock);
991991
xprt_complete_rqst(task, copied);
992992
out_unpin:
993993
xprt_unpin_rqst(rovr);
994994
out_unlock:
995-
spin_unlock_bh(&xprt->transport_lock);
995+
spin_unlock(&xprt->recv_lock);
996996
}
997997

998998
static void xs_local_data_receive(struct sock_xprt *transport)
@@ -1055,12 +1055,12 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
10551055
return;
10561056

10571057
/* Look up and lock the request corresponding to the given XID */
1058-
spin_lock_bh(&xprt->transport_lock);
1058+
spin_lock(&xprt->recv_lock);
10591059
rovr = xprt_lookup_rqst(xprt, *xp);
10601060
if (!rovr)
10611061
goto out_unlock;
10621062
xprt_pin_rqst(rovr);
1063-
spin_unlock_bh(&xprt->transport_lock);
1063+
spin_unlock(&xprt->recv_lock);
10641064
task = rovr->rq_task;
10651065

10661066
if ((copied = rovr->rq_private_buf.buflen) > repsize)
@@ -1069,19 +1069,21 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
10691069
/* Suck it into the iovec, verify checksum if not done by hw. */
10701070
if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
10711071
__UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
1072-
spin_lock_bh(&xprt->transport_lock);
1072+
spin_lock(&xprt->recv_lock);
10731073
goto out_unpin;
10741074
}
10751075

10761076
__UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
10771077

10781078
spin_lock_bh(&xprt->transport_lock);
10791079
xprt_adjust_cwnd(xprt, task, copied);
1080+
spin_unlock_bh(&xprt->transport_lock);
1081+
spin_lock(&xprt->recv_lock);
10801082
xprt_complete_rqst(task, copied);
10811083
out_unpin:
10821084
xprt_unpin_rqst(rovr);
10831085
out_unlock:
1084-
spin_unlock_bh(&xprt->transport_lock);
1086+
spin_unlock(&xprt->recv_lock);
10851087
}
10861088

10871089
static void xs_udp_data_receive(struct sock_xprt *transport)
@@ -1344,24 +1346,24 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
13441346
dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
13451347

13461348
/* Find and lock the request corresponding to this xid */
1347-
spin_lock_bh(&xprt->transport_lock);
1349+
spin_lock(&xprt->recv_lock);
13481350
req = xprt_lookup_rqst(xprt, transport->tcp_xid);
13491351
if (!req) {
13501352
dprintk("RPC: XID %08x request not found!\n",
13511353
ntohl(transport->tcp_xid));
1352-
spin_unlock_bh(&xprt->transport_lock);
1354+
spin_unlock(&xprt->recv_lock);
13531355
return -1;
13541356
}
13551357
xprt_pin_rqst(req);
1356-
spin_unlock_bh(&xprt->transport_lock);
1358+
spin_unlock(&xprt->recv_lock);
13571359

13581360
xs_tcp_read_common(xprt, desc, req);
13591361

1360-
spin_lock_bh(&xprt->transport_lock);
1362+
spin_lock(&xprt->recv_lock);
13611363
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
13621364
xprt_complete_rqst(req->rq_task, transport->tcp_copied);
13631365
xprt_unpin_rqst(req);
1364-
spin_unlock_bh(&xprt->transport_lock);
1366+
spin_unlock(&xprt->recv_lock);
13651367
return 0;
13661368
}
13671369

0 commit comments

Comments
 (0)