42
42
#include "dma.h"
43
43
#include "mm.h"
44
44
45
+ struct arm_dma_buffer {
46
+ struct list_head list ;
47
+ void * virt ;
48
+ };
49
+
50
+ static LIST_HEAD (arm_dma_bufs );
51
+ static DEFINE_SPINLOCK (arm_dma_bufs_lock );
52
+
53
+ static struct arm_dma_buffer * arm_dma_buffer_find (void * virt )
54
+ {
55
+ struct arm_dma_buffer * buf , * found = NULL ;
56
+ unsigned long flags ;
57
+
58
+ spin_lock_irqsave (& arm_dma_bufs_lock , flags );
59
+ list_for_each_entry (buf , & arm_dma_bufs , list ) {
60
+ if (buf -> virt == virt ) {
61
+ list_del (& buf -> list );
62
+ found = buf ;
63
+ break ;
64
+ }
65
+ }
66
+ spin_unlock_irqrestore (& arm_dma_bufs_lock , flags );
67
+ return found ;
68
+ }
69
+
45
70
/*
46
71
* The DMA API is built upon the notion of "buffer ownership". A buffer
47
72
* is either exclusively owned by the CPU (and therefore may be accessed
@@ -620,6 +645,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
620
645
struct page * page = NULL ;
621
646
void * addr ;
622
647
bool want_vaddr ;
648
+ struct arm_dma_buffer * buf ;
623
649
624
650
#ifdef CONFIG_DMA_API_DEBUG
625
651
u64 limit = (mask + 1 ) & ~mask ;
@@ -633,6 +659,10 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
633
659
if (!mask )
634
660
return NULL ;
635
661
662
+ buf = kzalloc (sizeof (* buf ), gfp );
663
+ if (!buf )
664
+ return NULL ;
665
+
636
666
if (mask < 0xffffffffULL )
637
667
gfp |= GFP_DMA ;
638
668
@@ -662,8 +692,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
662
692
addr = __alloc_remap_buffer (dev , size , gfp , prot , & page ,
663
693
caller , want_vaddr );
664
694
665
- if (page )
695
+ if (page ) {
696
+ unsigned long flags ;
697
+
666
698
* handle = pfn_to_dma (dev , page_to_pfn (page ));
699
+ buf -> virt = want_vaddr ? addr : page ;
700
+
701
+ spin_lock_irqsave (& arm_dma_bufs_lock , flags );
702
+ list_add (& buf -> list , & arm_dma_bufs );
703
+ spin_unlock_irqrestore (& arm_dma_bufs_lock , flags );
704
+ } else {
705
+ kfree (buf );
706
+ }
667
707
668
708
return want_vaddr ? addr : page ;
669
709
}
@@ -742,6 +782,11 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
742
782
{
743
783
struct page * page = pfn_to_page (dma_to_pfn (dev , handle ));
744
784
bool want_vaddr = !dma_get_attr (DMA_ATTR_NO_KERNEL_MAPPING , attrs );
785
+ struct arm_dma_buffer * buf ;
786
+
787
+ buf = arm_dma_buffer_find (cpu_addr );
788
+ if (WARN (!buf , "Freeing invalid buffer %p\n" , cpu_addr ))
789
+ return ;
745
790
746
791
size = PAGE_ALIGN (size );
747
792
@@ -760,6 +805,8 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
760
805
WARN_ON (irqs_disabled ());
761
806
__free_from_contiguous (dev , page , cpu_addr , size , want_vaddr );
762
807
}
808
+
809
+ kfree (buf );
763
810
}
764
811
765
812
void arm_dma_free (struct device * dev , size_t size , void * cpu_addr ,
0 commit comments