Skip to content

Commit 2fad659

Browse files
chuckleveramschuma-ntap
authored andcommitted
xprtrdma: Wait on empty sendctx queue
Currently, when the sendctx queue is exhausted during marshaling, the RPC/RDMA transport places the RPC task on the delayq, which forces a wait for HZ >> 2 before the marshal and send is retried. With this change, the transport now places such an RPC task on the pending queue, and wakes it just as soon as more sendctxs become available. This typically takes less than a millisecond, and the write_space waking mechanism is less deadlock-prone. Moreover, the waiting RPC task is holding the transport's write lock, which blocks the transport from sending RPCs. Therefore faster recovery from sendctx queue exhaustion is desirable. Cf. commit 5804891455d5 ("xprtrdma: ->send_request returns -EAGAIN when there are no free MRs"). Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
1 parent ed3aa74 commit 2fad659

File tree

3 files changed

+14
-2
lines changed

3 files changed

+14
-2
lines changed

net/sunrpc/xprtrdma/rpc_rdma.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -695,7 +695,7 @@ rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
695695
{
696696
req->rl_sendctx = rpcrdma_sendctx_get_locked(&r_xprt->rx_buf);
697697
if (!req->rl_sendctx)
698-
return -ENOBUFS;
698+
return -EAGAIN;
699699
req->rl_sendctx->sc_wr.num_sge = 0;
700700
req->rl_sendctx->sc_unmap_count = 0;
701701
req->rl_sendctx->sc_req = req;

net/sunrpc/xprtrdma/verbs.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -878,6 +878,7 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
878878
sc->sc_xprt = r_xprt;
879879
buf->rb_sc_ctxs[i] = sc;
880880
}
881+
buf->rb_flags = 0;
881882

882883
return 0;
883884

@@ -935,7 +936,7 @@ struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf)
935936
* completions recently. This is a sign the Send Queue is
936937
* backing up. Cause the caller to pause and try again.
937938
*/
938-
dprintk("RPC: %s: empty sendctx queue\n", __func__);
939+
set_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags);
939940
r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf);
940941
r_xprt->rx_stats.empty_sendctx_q++;
941942
return NULL;
@@ -970,6 +971,11 @@ rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
970971

971972
/* Paired with READ_ONCE */
972973
smp_store_release(&buf->rb_sc_tail, next_tail);
974+
975+
if (test_and_clear_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags)) {
976+
smp_mb__after_atomic();
977+
xprt_write_space(&sc->sc_xprt->rx_xprt);
978+
}
973979
}
974980

975981
static void

net/sunrpc/xprtrdma/xprt_rdma.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -400,6 +400,7 @@ struct rpcrdma_buffer {
400400
spinlock_t rb_lock; /* protect buf lists */
401401
struct list_head rb_send_bufs;
402402
struct list_head rb_recv_bufs;
403+
unsigned long rb_flags;
403404
u32 rb_max_requests;
404405
u32 rb_credits; /* most recent credit grant */
405406
int rb_posted_receives;
@@ -417,6 +418,11 @@ struct rpcrdma_buffer {
417418
};
418419
#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
419420

421+
/* rb_flags */
422+
enum {
423+
RPCRDMA_BUF_F_EMPTY_SCQ = 0,
424+
};
425+
420426
/*
421427
* Internal structure for transport instance creation. This
422428
* exists primarily for modularity.

0 commit comments

Comments
 (0)