@@ -543,26 +543,6 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
543
543
return tlb_addr ;
544
544
}
545
545
546
- /*
547
- * Allocates bounce buffer and returns its physical address.
548
- */
549
- static phys_addr_t
550
- map_single (struct device * hwdev , phys_addr_t phys , size_t size ,
551
- enum dma_data_direction dir , unsigned long attrs )
552
- {
553
- dma_addr_t start_dma_addr ;
554
-
555
- if (swiotlb_force == SWIOTLB_NO_FORCE ) {
556
- dev_warn_ratelimited (hwdev , "Cannot do DMA to address %pa\n" ,
557
- & phys );
558
- return SWIOTLB_MAP_ERROR ;
559
- }
560
-
561
- start_dma_addr = __phys_to_dma (hwdev , io_tlb_start );
562
- return swiotlb_tbl_map_single (hwdev , start_dma_addr , phys , size ,
563
- dir , attrs );
564
- }
565
-
566
546
/*
567
547
* tlb_addr is the physical address of the bounce buffer to unmap.
568
548
*/
@@ -714,6 +694,34 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size,
714
694
return true;
715
695
}
716
696
697
+ static dma_addr_t swiotlb_bounce_page (struct device * dev , phys_addr_t * phys ,
698
+ size_t size , enum dma_data_direction dir , unsigned long attrs )
699
+ {
700
+ dma_addr_t dma_addr ;
701
+
702
+ if (unlikely (swiotlb_force == SWIOTLB_NO_FORCE )) {
703
+ dev_warn_ratelimited (dev ,
704
+ "Cannot do DMA to address %pa\n" , phys );
705
+ return DIRECT_MAPPING_ERROR ;
706
+ }
707
+
708
+ /* Oh well, have to allocate and map a bounce buffer. */
709
+ * phys = swiotlb_tbl_map_single (dev , __phys_to_dma (dev , io_tlb_start ),
710
+ * phys , size , dir , attrs );
711
+ if (* phys == SWIOTLB_MAP_ERROR )
712
+ return DIRECT_MAPPING_ERROR ;
713
+
714
+ /* Ensure that the address returned is DMA'ble */
715
+ dma_addr = __phys_to_dma (dev , * phys );
716
+ if (unlikely (!dma_capable (dev , dma_addr , size ))) {
717
+ swiotlb_tbl_unmap_single (dev , * phys , size , dir ,
718
+ attrs | DMA_ATTR_SKIP_CPU_SYNC );
719
+ return DIRECT_MAPPING_ERROR ;
720
+ }
721
+
722
+ return dma_addr ;
723
+ }
724
+
717
725
/*
718
726
* Map a single buffer of the indicated size for DMA in streaming mode. The
719
727
* physical address to use is returned.
@@ -726,7 +734,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
726
734
enum dma_data_direction dir ,
727
735
unsigned long attrs )
728
736
{
729
- phys_addr_t map , phys = page_to_phys (page ) + offset ;
737
+ phys_addr_t phys = page_to_phys (page ) + offset ;
730
738
dma_addr_t dev_addr = phys_to_dma (dev , phys );
731
739
732
740
BUG_ON (dir == DMA_NONE );
@@ -739,22 +747,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
739
747
return dev_addr ;
740
748
741
749
trace_swiotlb_bounced (dev , dev_addr , size , swiotlb_force );
742
-
743
- /* Oh well, have to allocate and map a bounce buffer. */
744
- map = map_single (dev , phys , size , dir , attrs );
745
- if (map == SWIOTLB_MAP_ERROR )
746
- return DIRECT_MAPPING_ERROR ;
747
-
748
- dev_addr = __phys_to_dma (dev , map );
749
-
750
- /* Ensure that the address returned is DMA'ble */
751
- if (dma_capable (dev , dev_addr , size ))
752
- return dev_addr ;
753
-
754
- attrs |= DMA_ATTR_SKIP_CPU_SYNC ;
755
- swiotlb_tbl_unmap_single (dev , map , size , dir , attrs );
756
-
757
- return DIRECT_MAPPING_ERROR ;
750
+ return swiotlb_bounce_page (dev , & phys , size , dir , attrs );
758
751
}
759
752
760
753
/*
0 commit comments