Skip to content

Commit 6f0afc2

Browse files
chuckleveramschuma-ntap
authored andcommitted
xprtrdma: Remove atomic send completion counting
The sendctx circular queue now guarantees that xprtrdma cannot overflow the Send Queue, so remove the remaining bits of the original Send WQE counting mechanism. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
1 parent 01bb35c commit 6f0afc2

File tree

3 files changed

+0
-33
lines changed

3 files changed

+0
-33
lines changed

net/sunrpc/xprtrdma/frwr_ops.c

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -419,7 +419,6 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
419419
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
420420
IB_ACCESS_REMOTE_READ;
421421

422-
rpcrdma_set_signaled(&r_xprt->rx_ep, &reg_wr->wr);
423422
rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
424423
if (rc)
425424
goto out_senderr;
@@ -507,12 +506,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
507506
f->fr_cqe.done = frwr_wc_localinv_wake;
508507
reinit_completion(&f->fr_linv_done);
509508

510-
/* Initialize CQ count, since there is always a signaled
511-
* WR being posted here. The new cqcount depends on how
512-
* many SQEs are about to be consumed.
513-
*/
514-
rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
515-
516509
/* Transport disconnect drains the receive CQ before it
517510
* replaces the QP. The RPC reply handler won't call us
518511
* unless ri_id->qp is a valid pointer.
@@ -545,7 +538,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
545538
/* Find and reset the MRs in the LOCAL_INV WRs that did not
546539
* get posted.
547540
*/
548-
rpcrdma_init_cqcount(&r_xprt->rx_ep, -count);
549541
while (bad_wr) {
550542
f = container_of(bad_wr, struct rpcrdma_frmr,
551543
fr_invwr);

net/sunrpc/xprtrdma/verbs.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -553,10 +553,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
553553
ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH,
554554
cdata->max_requests >> 2);
555555
ep->rep_send_count = ep->rep_send_batch;
556-
ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
557-
if (ep->rep_cqinit <= 2)
558-
ep->rep_cqinit = 0; /* always signal? */
559-
rpcrdma_init_cqcount(ep, 0);
560556
init_waitqueue_head(&ep->rep_connect_wait);
561557
INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
562558

net/sunrpc/xprtrdma/xprt_rdma.h

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,6 @@ enum {
9595
struct rpcrdma_ep {
9696
unsigned int rep_send_count;
9797
unsigned int rep_send_batch;
98-
atomic_t rep_cqcount;
99-
int rep_cqinit;
10098
int rep_connected;
10199
struct ib_qp_init_attr rep_attr;
102100
wait_queue_head_t rep_connect_wait;
@@ -106,25 +104,6 @@ struct rpcrdma_ep {
106104
struct delayed_work rep_connect_worker;
107105
};
108106

109-
static inline void
110-
rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count)
111-
{
112-
atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count);
113-
}
114-
115-
/* To update send queue accounting, provider must take a
116-
* send completion every now and then.
117-
*/
118-
static inline void
119-
rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr)
120-
{
121-
send_wr->send_flags = 0;
122-
if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) {
123-
rpcrdma_init_cqcount(ep, 0);
124-
send_wr->send_flags = IB_SEND_SIGNALED;
125-
}
126-
}
127-
128107
/* Pre-allocate extra Work Requests for handling backward receives
129108
* and sends. This is a fixed value because the Work Queues are
130109
* allocated when the forward channel is set up.

0 commit comments

Comments
 (0)