@@ -537,6 +537,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
537
537
DMA_TO_DEVICE );
538
538
}
539
539
540
+ /* If the xdr_buf has more elements than the device can
541
+ * transmit in a single RDMA Send, then the reply will
542
+ * have to be copied into a bounce buffer.
543
+ */
544
+ static bool svc_rdma_pull_up_needed (struct svcxprt_rdma * rdma ,
545
+ struct xdr_buf * xdr ,
546
+ __be32 * wr_lst )
547
+ {
548
+ int elements ;
549
+
550
+ /* xdr->head */
551
+ elements = 1 ;
552
+
553
+ /* xdr->pages */
554
+ if (!wr_lst ) {
555
+ unsigned int remaining ;
556
+ unsigned long pageoff ;
557
+
558
+ pageoff = xdr -> page_base & ~PAGE_MASK ;
559
+ remaining = xdr -> page_len ;
560
+ while (remaining ) {
561
+ ++ elements ;
562
+ remaining -= min_t (u32 , PAGE_SIZE - pageoff ,
563
+ remaining );
564
+ pageoff = 0 ;
565
+ }
566
+ }
567
+
568
+ /* xdr->tail */
569
+ if (xdr -> tail [0 ].iov_len )
570
+ ++ elements ;
571
+
572
+ /* assume 1 SGE is needed for the transport header */
573
+ return elements >= rdma -> sc_max_send_sges ;
574
+ }
575
+
576
+ /* The device is not capable of sending the reply directly.
577
+ * Assemble the elements of @xdr into the transport header
578
+ * buffer.
579
+ */
580
+ static int svc_rdma_pull_up_reply_msg (struct svcxprt_rdma * rdma ,
581
+ struct svc_rdma_send_ctxt * ctxt ,
582
+ struct xdr_buf * xdr , __be32 * wr_lst )
583
+ {
584
+ unsigned char * dst , * tailbase ;
585
+ unsigned int taillen ;
586
+
587
+ dst = ctxt -> sc_xprt_buf ;
588
+ dst += ctxt -> sc_sges [0 ].length ;
589
+
590
+ memcpy (dst , xdr -> head [0 ].iov_base , xdr -> head [0 ].iov_len );
591
+ dst += xdr -> head [0 ].iov_len ;
592
+
593
+ tailbase = xdr -> tail [0 ].iov_base ;
594
+ taillen = xdr -> tail [0 ].iov_len ;
595
+ if (wr_lst ) {
596
+ u32 xdrpad ;
597
+
598
+ xdrpad = xdr_padsize (xdr -> page_len );
599
+ if (taillen && xdrpad ) {
600
+ tailbase += xdrpad ;
601
+ taillen -= xdrpad ;
602
+ }
603
+ } else {
604
+ unsigned int len , remaining ;
605
+ unsigned long pageoff ;
606
+ struct page * * ppages ;
607
+
608
+ ppages = xdr -> pages + (xdr -> page_base >> PAGE_SHIFT );
609
+ pageoff = xdr -> page_base & ~PAGE_MASK ;
610
+ remaining = xdr -> page_len ;
611
+ while (remaining ) {
612
+ len = min_t (u32 , PAGE_SIZE - pageoff , remaining );
613
+
614
+ memcpy (dst , page_address (* ppages ), len );
615
+ remaining -= len ;
616
+ dst += len ;
617
+ pageoff = 0 ;
618
+ }
619
+ }
620
+
621
+ if (taillen )
622
+ memcpy (dst , tailbase , taillen );
623
+
624
+ ctxt -> sc_sges [0 ].length += xdr -> len ;
625
+ ib_dma_sync_single_for_device (rdma -> sc_pd -> device ,
626
+ ctxt -> sc_sges [0 ].addr ,
627
+ ctxt -> sc_sges [0 ].length ,
628
+ DMA_TO_DEVICE );
629
+
630
+ return 0 ;
631
+ }
632
+
540
633
/* svc_rdma_map_reply_msg - Map the buffer holding RPC message
541
634
* @rdma: controlling transport
542
635
* @ctxt: send_ctxt for the Send WR
@@ -559,8 +652,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
559
652
u32 xdr_pad ;
560
653
int ret ;
561
654
562
- if (++ ctxt -> sc_cur_sge_no >= rdma -> sc_max_send_sges )
563
- return - EIO ;
655
+ if (svc_rdma_pull_up_needed (rdma , xdr , wr_lst ))
656
+ return svc_rdma_pull_up_reply_msg (rdma , ctxt , xdr , wr_lst );
657
+
658
+ ++ ctxt -> sc_cur_sge_no ;
564
659
ret = svc_rdma_dma_map_buf (rdma , ctxt ,
565
660
xdr -> head [0 ].iov_base ,
566
661
xdr -> head [0 ].iov_len );
@@ -591,8 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
591
686
while (remaining ) {
592
687
len = min_t (u32 , PAGE_SIZE - page_off , remaining );
593
688
594
- if (++ ctxt -> sc_cur_sge_no >= rdma -> sc_max_send_sges )
595
- return - EIO ;
689
+ ++ ctxt -> sc_cur_sge_no ;
596
690
ret = svc_rdma_dma_map_page (rdma , ctxt , * ppages ++ ,
597
691
page_off , len );
598
692
if (ret < 0 )
@@ -606,8 +700,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
606
700
len = xdr -> tail [0 ].iov_len ;
607
701
tail :
608
702
if (len ) {
609
- if (++ ctxt -> sc_cur_sge_no >= rdma -> sc_max_send_sges )
610
- return - EIO ;
703
+ ++ ctxt -> sc_cur_sge_no ;
611
704
ret = svc_rdma_dma_map_buf (rdma , ctxt , base , len );
612
705
if (ret < 0 )
613
706
return ret ;
0 commit comments