Skip to content

Commit ee6c073

Browse files
committed
Merge tag 'nfsd-5.0-1' of git://linux-nfs.org/~bfields/linux
Pull nfsd fixes from Bruce Fields: "Two small nfsd bugfixes for 5.0, for an RDMA bug and a file clone bug" * tag 'nfsd-5.0-1' of git://linux-nfs.org/~bfields/linux: svcrdma: Remove max_sge check at connect time nfsd: Fix error return values for nfsd4_clone_file_range()
2 parents 8b5cdbe + e248aa7 commit ee6c073

File tree

3 files changed

+106
-14
lines changed

3 files changed

+106
-14
lines changed

fs/nfsd/vfs.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -557,9 +557,11 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
557557
loff_t cloned;
558558

559559
cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
560+
if (cloned < 0)
561+
return nfserrno(cloned);
560562
if (count && cloned != count)
561-
cloned = -EINVAL;
562-
return nfserrno(cloned < 0 ? cloned : 0);
563+
return nfserrno(-EINVAL);
564+
return 0;
563565
}
564566

565567
ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,

net/sunrpc/xprtrdma/svc_rdma_sendto.c

Lines changed: 99 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -537,6 +537,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
537537
DMA_TO_DEVICE);
538538
}
539539

540+
/* If the xdr_buf has more elements than the device can
541+
* transmit in a single RDMA Send, then the reply will
542+
* have to be copied into a bounce buffer.
543+
*/
544+
static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
545+
struct xdr_buf *xdr,
546+
__be32 *wr_lst)
547+
{
548+
int elements;
549+
550+
/* xdr->head */
551+
elements = 1;
552+
553+
/* xdr->pages */
554+
if (!wr_lst) {
555+
unsigned int remaining;
556+
unsigned long pageoff;
557+
558+
pageoff = xdr->page_base & ~PAGE_MASK;
559+
remaining = xdr->page_len;
560+
while (remaining) {
561+
++elements;
562+
remaining -= min_t(u32, PAGE_SIZE - pageoff,
563+
remaining);
564+
pageoff = 0;
565+
}
566+
}
567+
568+
/* xdr->tail */
569+
if (xdr->tail[0].iov_len)
570+
++elements;
571+
572+
/* assume 1 SGE is needed for the transport header */
573+
return elements >= rdma->sc_max_send_sges;
574+
}
575+
576+
/* The device is not capable of sending the reply directly.
577+
* Assemble the elements of @xdr into the transport header
578+
* buffer.
579+
*/
580+
static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
581+
struct svc_rdma_send_ctxt *ctxt,
582+
struct xdr_buf *xdr, __be32 *wr_lst)
583+
{
584+
unsigned char *dst, *tailbase;
585+
unsigned int taillen;
586+
587+
dst = ctxt->sc_xprt_buf;
588+
dst += ctxt->sc_sges[0].length;
589+
590+
memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
591+
dst += xdr->head[0].iov_len;
592+
593+
tailbase = xdr->tail[0].iov_base;
594+
taillen = xdr->tail[0].iov_len;
595+
if (wr_lst) {
596+
u32 xdrpad;
597+
598+
xdrpad = xdr_padsize(xdr->page_len);
599+
if (taillen && xdrpad) {
600+
tailbase += xdrpad;
601+
taillen -= xdrpad;
602+
}
603+
} else {
604+
unsigned int len, remaining;
605+
unsigned long pageoff;
606+
struct page **ppages;
607+
608+
ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
609+
pageoff = xdr->page_base & ~PAGE_MASK;
610+
remaining = xdr->page_len;
611+
while (remaining) {
612+
len = min_t(u32, PAGE_SIZE - pageoff, remaining);
613+
614+
memcpy(dst, page_address(*ppages), len);
615+
remaining -= len;
616+
dst += len;
617+
pageoff = 0;
618+
}
619+
}
620+
621+
if (taillen)
622+
memcpy(dst, tailbase, taillen);
623+
624+
ctxt->sc_sges[0].length += xdr->len;
625+
ib_dma_sync_single_for_device(rdma->sc_pd->device,
626+
ctxt->sc_sges[0].addr,
627+
ctxt->sc_sges[0].length,
628+
DMA_TO_DEVICE);
629+
630+
return 0;
631+
}
632+
540633
/* svc_rdma_map_reply_msg - Map the buffer holding RPC message
541634
* @rdma: controlling transport
542635
* @ctxt: send_ctxt for the Send WR
@@ -559,8 +652,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
559652
u32 xdr_pad;
560653
int ret;
561654

562-
if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
563-
return -EIO;
655+
if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
656+
return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
657+
658+
++ctxt->sc_cur_sge_no;
564659
ret = svc_rdma_dma_map_buf(rdma, ctxt,
565660
xdr->head[0].iov_base,
566661
xdr->head[0].iov_len);
@@ -591,8 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
591686
while (remaining) {
592687
len = min_t(u32, PAGE_SIZE - page_off, remaining);
593688

594-
if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
595-
return -EIO;
689+
++ctxt->sc_cur_sge_no;
596690
ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
597691
page_off, len);
598692
if (ret < 0)
@@ -606,8 +700,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
606700
len = xdr->tail[0].iov_len;
607701
tail:
608702
if (len) {
609-
if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
610-
return -EIO;
703+
++ctxt->sc_cur_sge_no;
611704
ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
612705
if (ret < 0)
613706
return ret;

net/sunrpc/xprtrdma/svc_rdma_transport.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -419,12 +419,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
419419
/* Transport header, head iovec, tail iovec */
420420
newxprt->sc_max_send_sges = 3;
421421
/* Add one SGE per page list entry */
422-
newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE;
423-
if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) {
424-
pr_err("svcrdma: too few Send SGEs available (%d needed)\n",
425-
newxprt->sc_max_send_sges);
426-
goto errout;
427-
}
422+
newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
423+
if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
424+
newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
428425
newxprt->sc_max_req_size = svcrdma_max_req_size;
429426
newxprt->sc_max_requests = svcrdma_max_requests;
430427
newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;

0 commit comments

Comments
 (0)