42
42
#include "dma.h"
43
43
#include "mm.h"
44
44
45
+ struct arm_dma_alloc_args {
46
+ struct device * dev ;
47
+ size_t size ;
48
+ gfp_t gfp ;
49
+ pgprot_t prot ;
50
+ const void * caller ;
51
+ bool want_vaddr ;
52
+ };
53
+
54
+ struct arm_dma_free_args {
55
+ struct device * dev ;
56
+ size_t size ;
57
+ void * cpu_addr ;
58
+ struct page * page ;
59
+ bool want_vaddr ;
60
+ };
61
+
62
+ struct arm_dma_allocator {
63
+ void * (* alloc )(struct arm_dma_alloc_args * args ,
64
+ struct page * * ret_page );
65
+ void (* free )(struct arm_dma_free_args * args );
66
+ };
67
+
45
68
struct arm_dma_buffer {
46
69
struct list_head list ;
47
70
void * virt ;
71
+ struct arm_dma_allocator * allocator ;
48
72
};
49
73
50
74
static LIST_HEAD (arm_dma_bufs );
@@ -617,7 +641,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
617
641
#define __alloc_remap_buffer (dev , size , gfp , prot , ret , c , wv ) NULL
618
642
#define __alloc_from_pool (size , ret_page ) NULL
619
643
#define __alloc_from_contiguous (dev , size , prot , ret , c , wv ) NULL
620
- #define __free_from_pool (cpu_addr , size ) 0
644
+ #define __free_from_pool (cpu_addr , size ) do { } while (0)
621
645
#define __free_from_contiguous (dev , page , cpu_addr , size , wv ) do { } while (0)
622
646
#define __dma_free_remap (cpu_addr , size ) do { } while (0)
623
647
@@ -635,7 +659,78 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
635
659
return page_address (page );
636
660
}
637
661
662
+ static void * simple_allocator_alloc (struct arm_dma_alloc_args * args ,
663
+ struct page * * ret_page )
664
+ {
665
+ return __alloc_simple_buffer (args -> dev , args -> size , args -> gfp ,
666
+ ret_page );
667
+ }
668
+
669
+ static void simple_allocator_free (struct arm_dma_free_args * args )
670
+ {
671
+ __dma_free_buffer (args -> page , args -> size );
672
+ }
673
+
674
+ static struct arm_dma_allocator simple_allocator = {
675
+ .alloc = simple_allocator_alloc ,
676
+ .free = simple_allocator_free ,
677
+ };
678
+
679
+ static void * cma_allocator_alloc (struct arm_dma_alloc_args * args ,
680
+ struct page * * ret_page )
681
+ {
682
+ return __alloc_from_contiguous (args -> dev , args -> size , args -> prot ,
683
+ ret_page , args -> caller ,
684
+ args -> want_vaddr );
685
+ }
686
+
687
+ static void cma_allocator_free (struct arm_dma_free_args * args )
688
+ {
689
+ __free_from_contiguous (args -> dev , args -> page , args -> cpu_addr ,
690
+ args -> size , args -> want_vaddr );
691
+ }
692
+
693
+ static struct arm_dma_allocator cma_allocator = {
694
+ .alloc = cma_allocator_alloc ,
695
+ .free = cma_allocator_free ,
696
+ };
638
697
698
+ static void * pool_allocator_alloc (struct arm_dma_alloc_args * args ,
699
+ struct page * * ret_page )
700
+ {
701
+ return __alloc_from_pool (args -> size , ret_page );
702
+ }
703
+
704
+ static void pool_allocator_free (struct arm_dma_free_args * args )
705
+ {
706
+ __free_from_pool (args -> cpu_addr , args -> size );
707
+ }
708
+
709
+ static struct arm_dma_allocator pool_allocator = {
710
+ .alloc = pool_allocator_alloc ,
711
+ .free = pool_allocator_free ,
712
+ };
713
+
714
+ static void * remap_allocator_alloc (struct arm_dma_alloc_args * args ,
715
+ struct page * * ret_page )
716
+ {
717
+ return __alloc_remap_buffer (args -> dev , args -> size , args -> gfp ,
718
+ args -> prot , ret_page , args -> caller ,
719
+ args -> want_vaddr );
720
+ }
721
+
722
+ static void remap_allocator_free (struct arm_dma_free_args * args )
723
+ {
724
+ if (args -> want_vaddr )
725
+ __dma_free_remap (args -> cpu_addr , args -> size );
726
+
727
+ __dma_free_buffer (args -> page , args -> size );
728
+ }
729
+
730
+ static struct arm_dma_allocator remap_allocator = {
731
+ .alloc = remap_allocator_alloc ,
732
+ .free = remap_allocator_free ,
733
+ };
639
734
640
735
static void * __dma_alloc (struct device * dev , size_t size , dma_addr_t * handle ,
641
736
gfp_t gfp , pgprot_t prot , bool is_coherent ,
@@ -644,8 +739,16 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
644
739
u64 mask = get_coherent_dma_mask (dev );
645
740
struct page * page = NULL ;
646
741
void * addr ;
647
- bool want_vaddr ;
742
+ bool allowblock , cma ;
648
743
struct arm_dma_buffer * buf ;
744
+ struct arm_dma_alloc_args args = {
745
+ .dev = dev ,
746
+ .size = PAGE_ALIGN (size ),
747
+ .gfp = gfp ,
748
+ .prot = prot ,
749
+ .caller = caller ,
750
+ .want_vaddr = !dma_get_attr (DMA_ATTR_NO_KERNEL_MAPPING , attrs ),
751
+ };
649
752
650
753
#ifdef CONFIG_DMA_API_DEBUG
651
754
u64 limit = (mask + 1 ) & ~mask ;
@@ -674,29 +777,28 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
674
777
* platform; see CONFIG_HUGETLBFS.
675
778
*/
676
779
gfp &= ~(__GFP_COMP );
780
+ args .gfp = gfp ;
677
781
678
782
* handle = DMA_ERROR_CODE ;
679
- size = PAGE_ALIGN (size );
680
- want_vaddr = !dma_get_attr (DMA_ATTR_NO_KERNEL_MAPPING , attrs );
681
-
682
- if (nommu ())
683
- addr = __alloc_simple_buffer (dev , size , gfp , & page );
684
- else if (dev_get_cma_area (dev ) && (gfp & __GFP_DIRECT_RECLAIM ))
685
- addr = __alloc_from_contiguous (dev , size , prot , & page ,
686
- caller , want_vaddr );
687
- else if (is_coherent )
688
- addr = __alloc_simple_buffer (dev , size , gfp , & page );
689
- else if (!gfpflags_allow_blocking (gfp ))
690
- addr = __alloc_from_pool (size , & page );
783
+ allowblock = gfpflags_allow_blocking (gfp );
784
+ cma = allowblock ? dev_get_cma_area (dev ) : false;
785
+
786
+ if (cma )
787
+ buf -> allocator = & cma_allocator ;
788
+ else if (nommu () || is_coherent )
789
+ buf -> allocator = & simple_allocator ;
790
+ else if (allowblock )
791
+ buf -> allocator = & remap_allocator ;
691
792
else
692
- addr = __alloc_remap_buffer (dev , size , gfp , prot , & page ,
693
- caller , want_vaddr );
793
+ buf -> allocator = & pool_allocator ;
794
+
795
+ addr = buf -> allocator -> alloc (& args , & page );
694
796
695
797
if (page ) {
696
798
unsigned long flags ;
697
799
698
800
* handle = pfn_to_dma (dev , page_to_pfn (page ));
699
- buf -> virt = want_vaddr ? addr : page ;
801
+ buf -> virt = args . want_vaddr ? addr : page ;
700
802
701
803
spin_lock_irqsave (& arm_dma_bufs_lock , flags );
702
804
list_add (& buf -> list , & arm_dma_bufs );
@@ -705,7 +807,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
705
807
kfree (buf );
706
808
}
707
809
708
- return want_vaddr ? addr : page ;
810
+ return args . want_vaddr ? addr : page ;
709
811
}
710
812
711
813
/*
@@ -781,31 +883,20 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
781
883
bool is_coherent )
782
884
{
783
885
struct page * page = pfn_to_page (dma_to_pfn (dev , handle ));
784
- bool want_vaddr = !dma_get_attr (DMA_ATTR_NO_KERNEL_MAPPING , attrs );
785
886
struct arm_dma_buffer * buf ;
887
+ struct arm_dma_free_args args = {
888
+ .dev = dev ,
889
+ .size = PAGE_ALIGN (size ),
890
+ .cpu_addr = cpu_addr ,
891
+ .page = page ,
892
+ .want_vaddr = !dma_get_attr (DMA_ATTR_NO_KERNEL_MAPPING , attrs ),
893
+ };
786
894
787
895
buf = arm_dma_buffer_find (cpu_addr );
788
896
if (WARN (!buf , "Freeing invalid buffer %p\n" , cpu_addr ))
789
897
return ;
790
898
791
- size = PAGE_ALIGN (size );
792
-
793
- if (nommu ()) {
794
- __dma_free_buffer (page , size );
795
- } else if (!is_coherent && __free_from_pool (cpu_addr , size )) {
796
- return ;
797
- } else if (!dev_get_cma_area (dev )) {
798
- if (want_vaddr && !is_coherent )
799
- __dma_free_remap (cpu_addr , size );
800
- __dma_free_buffer (page , size );
801
- } else {
802
- /*
803
- * Non-atomic allocations cannot be freed with IRQs disabled
804
- */
805
- WARN_ON (irqs_disabled ());
806
- __free_from_contiguous (dev , page , cpu_addr , size , want_vaddr );
807
- }
808
-
899
+ buf -> allocator -> free (& args );
809
900
kfree (buf );
810
901
}
811
902
0 commit comments