Skip to content

Commit 4201c74

Browse files
chuckleverJ. Bruce Fields
authored andcommitted
svcrdma: Introduce svc_rdma_send_ctxt
svc_rdma_op_ctxt's are pre-allocated and maintained on a per-xprt free list. This eliminates the overhead of calling kmalloc / kfree, both of which grab a globally shared lock that disables interrupts. Introduce a replacement to svc_rdma_op_ctxt's that is built especially for the svcrdma Send path. Subsequent patches will take advantage of this new structure by allocating real resources which are then cached in these objects. The allocations are freed when the transport is torn down. I've renamed the structure so that static type checking can be used to ensure that uses of op_ctxt and send_ctxt are not confused. As an additional clean up, structure fields are renamed to conform with kernel coding conventions. Additional clean ups: - Handle svc_rdma_send_ctxt_get allocation failure at each call site, rather than pre-allocating and hoping we guessed correctly - All send_ctxt_put call-sites request page freeing, so remove the @free_pages argument - All send_ctxt_put call-sites unmap SGEs, so fold that into svc_rdma_send_ctxt_put Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
1 parent 2326279 commit 4201c74

File tree

5 files changed

+254
-266
lines changed

5 files changed

+254
-266
lines changed

include/linux/sunrpc/svc_rdma.h

Lines changed: 23 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,8 @@ struct svcxprt_rdma {
109109

110110
struct ib_pd *sc_pd;
111111

112-
spinlock_t sc_ctxt_lock;
113-
struct list_head sc_ctxts;
112+
spinlock_t sc_send_lock;
113+
struct list_head sc_send_ctxts;
114114
int sc_ctxt_used;
115115
spinlock_t sc_rw_ctxt_lock;
116116
struct list_head sc_rw_ctxts;
@@ -158,6 +158,19 @@ struct svc_rdma_recv_ctxt {
158158
struct page *rc_pages[RPCSVC_MAXPAGES];
159159
};
160160

161+
enum {
162+
RPCRDMA_MAX_SGES = 1 + (RPCRDMA_MAX_INLINE_THRESH / PAGE_SIZE),
163+
};
164+
165+
struct svc_rdma_send_ctxt {
166+
struct list_head sc_list;
167+
struct ib_send_wr sc_send_wr;
168+
struct ib_cqe sc_cqe;
169+
int sc_page_count;
170+
struct page *sc_pages[RPCSVC_MAXPAGES];
171+
struct ib_sge sc_sges[RPCRDMA_MAX_SGES];
172+
};
173+
161174
/* svc_rdma_backchannel.c */
162175
extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
163176
__be32 *rdma_resp,
@@ -183,24 +196,22 @@ extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
183196
struct xdr_buf *xdr);
184197

185198
/* svc_rdma_sendto.c */
199+
extern void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma);
200+
extern struct svc_rdma_send_ctxt *
201+
svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma);
202+
extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
203+
struct svc_rdma_send_ctxt *ctxt);
204+
extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr);
186205
extern int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma,
187-
struct svc_rdma_op_ctxt *ctxt,
206+
struct svc_rdma_send_ctxt *ctxt,
188207
__be32 *rdma_resp, unsigned int len);
189208
extern int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma,
190-
struct svc_rdma_op_ctxt *ctxt,
209+
struct svc_rdma_send_ctxt *ctxt,
191210
u32 inv_rkey);
192211
extern int svc_rdma_sendto(struct svc_rqst *);
193212

194213
/* svc_rdma_transport.c */
195-
extern void svc_rdma_wc_send(struct ib_cq *, struct ib_wc *);
196-
extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);
197-
extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);
198-
extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);
199-
extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
200214
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
201-
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
202-
extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
203-
extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
204215
extern void svc_sq_reap(struct svcxprt_rdma *);
205216
extern void svc_rq_reap(struct svcxprt_rdma *);
206217
extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);

net/sunrpc/xprtrdma/svc_rdma_backchannel.c

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
// SPDX-License-Identifier: GPL-2.0
22
/*
3-
* Copyright (c) 2015 Oracle. All rights reserved.
3+
* Copyright (c) 2015-2018 Oracle. All rights reserved.
44
*
55
* Support for backward direction RPCs on RPC/RDMA (server-side).
66
*/
@@ -117,10 +117,14 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
117117
static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
118118
struct rpc_rqst *rqst)
119119
{
120-
struct svc_rdma_op_ctxt *ctxt;
120+
struct svc_rdma_send_ctxt *ctxt;
121121
int ret;
122122

123-
ctxt = svc_rdma_get_context(rdma);
123+
ctxt = svc_rdma_send_ctxt_get(rdma);
124+
if (!ctxt) {
125+
ret = -ENOMEM;
126+
goto out_err;
127+
}
124128

125129
/* rpcrdma_bc_send_request builds the transport header and
126130
* the backchannel RPC message in the same buffer. Thus only
@@ -144,8 +148,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
144148
return ret;
145149

146150
out_unmap:
147-
svc_rdma_unmap_dma(ctxt);
148-
svc_rdma_put_context(ctxt, 1);
151+
svc_rdma_send_ctxt_put(rdma, ctxt);
149152
ret = -EIO;
150153
goto out_err;
151154
}

net/sunrpc/xprtrdma/svc_rdma_recvfrom.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -601,7 +601,7 @@ static void rdma_read_complete(struct svc_rqst *rqstp,
601601
static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
602602
__be32 *rdma_argp, int status)
603603
{
604-
struct svc_rdma_op_ctxt *ctxt;
604+
struct svc_rdma_send_ctxt *ctxt;
605605
__be32 *p, *err_msgp;
606606
unsigned int length;
607607
struct page *page;
@@ -631,7 +631,10 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
631631
length = (unsigned long)p - (unsigned long)err_msgp;
632632

633633
/* Map transport header; no RPC message payload */
634-
ctxt = svc_rdma_get_context(xprt);
634+
ctxt = svc_rdma_send_ctxt_get(xprt);
635+
if (!ctxt)
636+
return;
637+
635638
ret = svc_rdma_map_reply_hdr(xprt, ctxt, err_msgp, length);
636639
if (ret) {
637640
dprintk("svcrdma: Error %d mapping send for protocol error\n",
@@ -640,10 +643,8 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
640643
}
641644

642645
ret = svc_rdma_post_send_wr(xprt, ctxt, 0);
643-
if (ret) {
644-
svc_rdma_unmap_dma(ctxt);
645-
svc_rdma_put_context(ctxt, 1);
646-
}
646+
if (ret)
647+
svc_rdma_send_ctxt_put(xprt, ctxt);
647648
}
648649

649650
/* By convention, backchannel calls arrive via rdma_msg type

0 commit comments

Comments
 (0)