@@ -622,78 +622,6 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
622
622
}
623
623
}
624
624
625
- static inline bool dma_coherent_ok (struct device * dev , dma_addr_t addr ,
626
- size_t size )
627
- {
628
- u64 mask = DMA_BIT_MASK (32 );
629
-
630
- if (dev && dev -> coherent_dma_mask )
631
- mask = dev -> coherent_dma_mask ;
632
- return addr + size - 1 <= mask ;
633
- }
634
-
635
- static void *
636
- swiotlb_alloc_buffer (struct device * dev , size_t size , dma_addr_t * dma_handle ,
637
- unsigned long attrs )
638
- {
639
- phys_addr_t phys_addr ;
640
-
641
- if (swiotlb_force == SWIOTLB_NO_FORCE )
642
- goto out_warn ;
643
-
644
- phys_addr = swiotlb_tbl_map_single (dev ,
645
- __phys_to_dma (dev , io_tlb_start ),
646
- 0 , size , DMA_FROM_DEVICE , attrs );
647
- if (phys_addr == SWIOTLB_MAP_ERROR )
648
- goto out_warn ;
649
-
650
- * dma_handle = __phys_to_dma (dev , phys_addr );
651
- if (!dma_coherent_ok (dev , * dma_handle , size ))
652
- goto out_unmap ;
653
-
654
- memset (phys_to_virt (phys_addr ), 0 , size );
655
- return phys_to_virt (phys_addr );
656
-
657
- out_unmap :
658
- dev_warn (dev , "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n" ,
659
- (unsigned long long )dev -> coherent_dma_mask ,
660
- (unsigned long long )* dma_handle );
661
-
662
- /*
663
- * DMA_TO_DEVICE to avoid memcpy in unmap_single.
664
- * DMA_ATTR_SKIP_CPU_SYNC is optional.
665
- */
666
- swiotlb_tbl_unmap_single (dev , phys_addr , size , DMA_TO_DEVICE ,
667
- DMA_ATTR_SKIP_CPU_SYNC );
668
- out_warn :
669
- if (!(attrs & DMA_ATTR_NO_WARN ) && printk_ratelimit ()) {
670
- dev_warn (dev ,
671
- "swiotlb: coherent allocation failed, size=%zu\n" ,
672
- size );
673
- dump_stack ();
674
- }
675
- return NULL ;
676
- }
677
-
678
- static bool swiotlb_free_buffer (struct device * dev , size_t size ,
679
- dma_addr_t dma_addr )
680
- {
681
- phys_addr_t phys_addr = dma_to_phys (dev , dma_addr );
682
-
683
- WARN_ON_ONCE (irqs_disabled ());
684
-
685
- if (!is_swiotlb_buffer (phys_addr ))
686
- return false;
687
-
688
- /*
689
- * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
690
- * DMA_ATTR_SKIP_CPU_SYNC is optional.
691
- */
692
- swiotlb_tbl_unmap_single (dev , phys_addr , size , DMA_TO_DEVICE ,
693
- DMA_ATTR_SKIP_CPU_SYNC );
694
- return true;
695
- }
696
-
697
625
static dma_addr_t swiotlb_bounce_page (struct device * dev , phys_addr_t * phys ,
698
626
size_t size , enum dma_data_direction dir , unsigned long attrs )
699
627
{
@@ -926,39 +854,10 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
926
854
return __phys_to_dma (hwdev , io_tlb_end - 1 ) <= mask ;
927
855
}
928
856
929
- void * swiotlb_alloc (struct device * dev , size_t size , dma_addr_t * dma_handle ,
930
- gfp_t gfp , unsigned long attrs )
931
- {
932
- void * vaddr ;
933
-
934
- /* temporary workaround: */
935
- if (gfp & __GFP_NOWARN )
936
- attrs |= DMA_ATTR_NO_WARN ;
937
-
938
- /*
939
- * Don't print a warning when the first allocation attempt fails.
940
- * swiotlb_alloc_coherent() will print a warning when the DMA memory
941
- * allocation ultimately failed.
942
- */
943
- gfp |= __GFP_NOWARN ;
944
-
945
- vaddr = dma_direct_alloc (dev , size , dma_handle , gfp , attrs );
946
- if (!vaddr )
947
- vaddr = swiotlb_alloc_buffer (dev , size , dma_handle , attrs );
948
- return vaddr ;
949
- }
950
-
951
- void swiotlb_free (struct device * dev , size_t size , void * vaddr ,
952
- dma_addr_t dma_addr , unsigned long attrs )
953
- {
954
- if (!swiotlb_free_buffer (dev , size , dma_addr ))
955
- dma_direct_free (dev , size , vaddr , dma_addr , attrs );
956
- }
957
-
958
857
const struct dma_map_ops swiotlb_dma_ops = {
959
858
.mapping_error = dma_direct_mapping_error ,
960
- .alloc = swiotlb_alloc ,
961
- .free = swiotlb_free ,
859
+ .alloc = dma_direct_alloc ,
860
+ .free = dma_direct_free ,
962
861
.sync_single_for_cpu = swiotlb_sync_single_for_cpu ,
963
862
.sync_single_for_device = swiotlb_sync_single_for_device ,
964
863
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu ,
0 commit comments