@@ -194,6 +194,8 @@ struct ntb_transport_mw {
194
194
void __iomem * vbase ;
195
195
size_t xlat_size ;
196
196
size_t buff_size ;
197
+ size_t alloc_size ;
198
+ void * alloc_addr ;
197
199
void * virt_addr ;
198
200
dma_addr_t dma_addr ;
199
201
};
@@ -672,13 +674,59 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
672
674
return ;
673
675
674
676
ntb_mw_clear_trans (nt -> ndev , PIDX , num_mw );
675
- dma_free_coherent (& pdev -> dev , mw -> buff_size ,
676
- mw -> virt_addr , mw -> dma_addr );
677
+ dma_free_coherent (& pdev -> dev , mw -> alloc_size ,
678
+ mw -> alloc_addr , mw -> dma_addr );
677
679
mw -> xlat_size = 0 ;
678
680
mw -> buff_size = 0 ;
681
+ mw -> alloc_size = 0 ;
682
+ mw -> alloc_addr = NULL ;
679
683
mw -> virt_addr = NULL ;
680
684
}
681
685
686
+ static int ntb_alloc_mw_buffer (struct ntb_transport_mw * mw ,
687
+ struct device * dma_dev , size_t align )
688
+ {
689
+ dma_addr_t dma_addr ;
690
+ void * alloc_addr , * virt_addr ;
691
+ int rc ;
692
+
693
+ alloc_addr = dma_alloc_coherent (dma_dev , mw -> alloc_size ,
694
+ & dma_addr , GFP_KERNEL );
695
+ if (!alloc_addr ) {
696
+ dev_err (dma_dev , "Unable to alloc MW buff of size %zu\n" ,
697
+ mw -> alloc_size );
698
+ return - ENOMEM ;
699
+ }
700
+ virt_addr = alloc_addr ;
701
+
702
+ /*
703
+ * we must ensure that the memory address allocated is BAR size
704
+ * aligned in order for the XLAT register to take the value. This
705
+ * is a requirement of the hardware. It is recommended to setup CMA
706
+ * for BAR sizes equal or greater than 4MB.
707
+ */
708
+ if (!IS_ALIGNED (dma_addr , align )) {
709
+ if (mw -> alloc_size > mw -> buff_size ) {
710
+ virt_addr = PTR_ALIGN (alloc_addr , align );
711
+ dma_addr = ALIGN (dma_addr , align );
712
+ } else {
713
+ rc = - ENOMEM ;
714
+ goto err ;
715
+ }
716
+ }
717
+
718
+ mw -> alloc_addr = alloc_addr ;
719
+ mw -> virt_addr = virt_addr ;
720
+ mw -> dma_addr = dma_addr ;
721
+
722
+ return 0 ;
723
+
724
+ err :
725
+ dma_free_coherent (dma_dev , mw -> alloc_size , alloc_addr , dma_addr );
726
+
727
+ return rc ;
728
+ }
729
+
682
730
static int ntb_set_mw (struct ntb_transport_ctx * nt , int num_mw ,
683
731
resource_size_t size )
684
732
{
@@ -710,28 +758,20 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
710
758
/* Alloc memory for receiving data. Must be aligned */
711
759
mw -> xlat_size = xlat_size ;
712
760
mw -> buff_size = buff_size ;
761
+ mw -> alloc_size = buff_size ;
713
762
714
- mw -> virt_addr = dma_alloc_coherent (& pdev -> dev , buff_size ,
715
- & mw -> dma_addr , GFP_KERNEL );
716
- if (!mw -> virt_addr ) {
717
- mw -> xlat_size = 0 ;
718
- mw -> buff_size = 0 ;
719
- dev_err (& pdev -> dev , "Unable to alloc MW buff of size %zu\n" ,
720
- buff_size );
721
- return - ENOMEM ;
722
- }
723
-
724
- /*
725
- * we must ensure that the memory address allocated is BAR size
726
- * aligned in order for the XLAT register to take the value. This
727
- * is a requirement of the hardware. It is recommended to setup CMA
728
- * for BAR sizes equal or greater than 4MB.
729
- */
730
- if (!IS_ALIGNED (mw -> dma_addr , xlat_align )) {
731
- dev_err (& pdev -> dev , "DMA memory %pad is not aligned\n" ,
732
- & mw -> dma_addr );
733
- ntb_free_mw (nt , num_mw );
734
- return - ENOMEM ;
763
+ rc = ntb_alloc_mw_buffer (mw , & pdev -> dev , xlat_align );
764
+ if (rc ) {
765
+ mw -> alloc_size *= 2 ;
766
+ rc = ntb_alloc_mw_buffer (mw , & pdev -> dev , xlat_align );
767
+ if (rc ) {
768
+ dev_err (& pdev -> dev ,
769
+ "Unable to alloc aligned MW buff\n" );
770
+ mw -> xlat_size = 0 ;
771
+ mw -> buff_size = 0 ;
772
+ mw -> alloc_size = 0 ;
773
+ return rc ;
774
+ }
735
775
}
736
776
737
777
/* Notify HW the memory location of the receive buffer */
0 commit comments