Skip to content

Commit 81fa327

Browse files
chuckleverJ. Bruce Fields
authored andcommitted
svcrdma: Poll CQs in "workqueue" mode
svcrdma calls svc_xprt_put() in its completion handlers, which currently run in IRQ context. However, svc_xprt_put() is meant to be invoked in process context, not in IRQ context. After the last transport reference is gone, it directly calls a transport release function that expects to run in process context. Change the CQ polling modes to IB_POLL_WORKQUEUE so that svcrdma invokes svc_xprt_put() only in process context. As an added benefit, bottom half-disabled spin locking can be eliminated from I/O paths. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
1 parent a3ab867 commit 81fa327

File tree

2 files changed

+16
-16
lines changed

2 files changed

+16
-16
lines changed

net/sunrpc/xprtrdma/svc_rdma_recvfrom.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -606,12 +606,12 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
606606

607607
dprintk("svcrdma: rqstp=%p\n", rqstp);
608608

609-
spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
609+
spin_lock(&rdma_xprt->sc_rq_dto_lock);
610610
if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
611611
ctxt = list_first_entry(&rdma_xprt->sc_read_complete_q,
612612
struct svc_rdma_op_ctxt, list);
613613
list_del(&ctxt->list);
614-
spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
614+
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
615615
rdma_read_complete(rqstp, ctxt);
616616
goto complete;
617617
} else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
@@ -623,7 +623,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
623623
clear_bit(XPT_DATA, &xprt->xpt_flags);
624624
ctxt = NULL;
625625
}
626-
spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
626+
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
627627
if (!ctxt) {
628628
/* This is the EAGAIN path. The svc_recv routine will
629629
* return -EAGAIN, the nfsd thread will go to call into

net/sunrpc/xprtrdma/svc_rdma_transport.c

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -188,15 +188,15 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
188188
{
189189
struct svc_rdma_op_ctxt *ctxt = NULL;
190190

191-
spin_lock_bh(&xprt->sc_ctxt_lock);
191+
spin_lock(&xprt->sc_ctxt_lock);
192192
xprt->sc_ctxt_used++;
193193
if (list_empty(&xprt->sc_ctxts))
194194
goto out_empty;
195195

196196
ctxt = list_first_entry(&xprt->sc_ctxts,
197197
struct svc_rdma_op_ctxt, list);
198198
list_del(&ctxt->list);
199-
spin_unlock_bh(&xprt->sc_ctxt_lock);
199+
spin_unlock(&xprt->sc_ctxt_lock);
200200

201201
out:
202202
ctxt->count = 0;
@@ -208,15 +208,15 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
208208
/* Either pre-allocation missed the mark, or send
209209
* queue accounting is broken.
210210
*/
211-
spin_unlock_bh(&xprt->sc_ctxt_lock);
211+
spin_unlock(&xprt->sc_ctxt_lock);
212212

213213
ctxt = alloc_ctxt(xprt, GFP_NOIO);
214214
if (ctxt)
215215
goto out;
216216

217-
spin_lock_bh(&xprt->sc_ctxt_lock);
217+
spin_lock(&xprt->sc_ctxt_lock);
218218
xprt->sc_ctxt_used--;
219-
spin_unlock_bh(&xprt->sc_ctxt_lock);
219+
spin_unlock(&xprt->sc_ctxt_lock);
220220
WARN_ONCE(1, "svcrdma: empty RDMA ctxt list?\n");
221221
return NULL;
222222
}
@@ -253,10 +253,10 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
253253
for (i = 0; i < ctxt->count; i++)
254254
put_page(ctxt->pages[i]);
255255

256-
spin_lock_bh(&xprt->sc_ctxt_lock);
256+
spin_lock(&xprt->sc_ctxt_lock);
257257
xprt->sc_ctxt_used--;
258258
list_add(&ctxt->list, &xprt->sc_ctxts);
259-
spin_unlock_bh(&xprt->sc_ctxt_lock);
259+
spin_unlock(&xprt->sc_ctxt_lock);
260260
}
261261

262262
static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt)
@@ -921,14 +921,14 @@ struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
921921
{
922922
struct svc_rdma_fastreg_mr *frmr = NULL;
923923

924-
spin_lock_bh(&rdma->sc_frmr_q_lock);
924+
spin_lock(&rdma->sc_frmr_q_lock);
925925
if (!list_empty(&rdma->sc_frmr_q)) {
926926
frmr = list_entry(rdma->sc_frmr_q.next,
927927
struct svc_rdma_fastreg_mr, frmr_list);
928928
list_del_init(&frmr->frmr_list);
929929
frmr->sg_nents = 0;
930930
}
931-
spin_unlock_bh(&rdma->sc_frmr_q_lock);
931+
spin_unlock(&rdma->sc_frmr_q_lock);
932932
if (frmr)
933933
return frmr;
934934

@@ -941,10 +941,10 @@ void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
941941
if (frmr) {
942942
ib_dma_unmap_sg(rdma->sc_cm_id->device,
943943
frmr->sg, frmr->sg_nents, frmr->direction);
944-
spin_lock_bh(&rdma->sc_frmr_q_lock);
944+
spin_lock(&rdma->sc_frmr_q_lock);
945945
WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
946946
list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
947-
spin_unlock_bh(&rdma->sc_frmr_q_lock);
947+
spin_unlock(&rdma->sc_frmr_q_lock);
948948
}
949949
}
950950

@@ -1026,13 +1026,13 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
10261026
goto errout;
10271027
}
10281028
newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth,
1029-
0, IB_POLL_SOFTIRQ);
1029+
0, IB_POLL_WORKQUEUE);
10301030
if (IS_ERR(newxprt->sc_sq_cq)) {
10311031
dprintk("svcrdma: error creating SQ CQ for connect request\n");
10321032
goto errout;
10331033
}
10341034
newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_rq_depth,
1035-
0, IB_POLL_SOFTIRQ);
1035+
0, IB_POLL_WORKQUEUE);
10361036
if (IS_ERR(newxprt->sc_rq_cq)) {
10371037
dprintk("svcrdma: error creating RQ CQ for connect request\n");
10381038
goto errout;

0 commit comments

Comments
 (0)