@@ -289,11 +289,11 @@ static void __dma_free_buffer(struct page *page, size_t size)
289
289
290
290
static void * __alloc_from_contiguous (struct device * dev , size_t size ,
291
291
pgprot_t prot , struct page * * ret_page ,
292
- const void * caller );
292
+ const void * caller , bool want_vaddr );
293
293
294
294
static void * __alloc_remap_buffer (struct device * dev , size_t size , gfp_t gfp ,
295
295
pgprot_t prot , struct page * * ret_page ,
296
- const void * caller );
296
+ const void * caller , bool want_vaddr );
297
297
298
298
static void *
299
299
__dma_alloc_remap (struct page * page , size_t size , gfp_t gfp , pgprot_t prot ,
@@ -357,10 +357,10 @@ static int __init atomic_pool_init(void)
357
357
358
358
if (dev_get_cma_area (NULL ))
359
359
ptr = __alloc_from_contiguous (NULL , atomic_pool_size , prot ,
360
- & page , atomic_pool_init );
360
+ & page , atomic_pool_init , true );
361
361
else
362
362
ptr = __alloc_remap_buffer (NULL , atomic_pool_size , gfp , prot ,
363
- & page , atomic_pool_init );
363
+ & page , atomic_pool_init , true );
364
364
if (ptr ) {
365
365
int ret ;
366
366
@@ -467,20 +467,23 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
467
467
468
468
static void * __alloc_remap_buffer (struct device * dev , size_t size , gfp_t gfp ,
469
469
pgprot_t prot , struct page * * ret_page ,
470
- const void * caller )
470
+ const void * caller , bool want_vaddr )
471
471
{
472
472
struct page * page ;
473
- void * ptr ;
473
+ void * ptr = NULL ;
474
474
page = __dma_alloc_buffer (dev , size , gfp );
475
475
if (!page )
476
476
return NULL ;
477
+ if (!want_vaddr )
478
+ goto out ;
477
479
478
480
ptr = __dma_alloc_remap (page , size , gfp , prot , caller );
479
481
if (!ptr ) {
480
482
__dma_free_buffer (page , size );
481
483
return NULL ;
482
484
}
483
485
486
+ out :
484
487
* ret_page = page ;
485
488
return ptr ;
486
489
}
@@ -523,19 +526,22 @@ static int __free_from_pool(void *start, size_t size)
523
526
524
527
static void * __alloc_from_contiguous (struct device * dev , size_t size ,
525
528
pgprot_t prot , struct page * * ret_page ,
526
- const void * caller )
529
+ const void * caller , bool want_vaddr )
527
530
{
528
531
unsigned long order = get_order (size );
529
532
size_t count = size >> PAGE_SHIFT ;
530
533
struct page * page ;
531
- void * ptr ;
534
+ void * ptr = NULL ;
532
535
533
536
page = dma_alloc_from_contiguous (dev , count , order );
534
537
if (!page )
535
538
return NULL ;
536
539
537
540
__dma_clear_buffer (page , size );
538
541
542
+ if (!want_vaddr )
543
+ goto out ;
544
+
539
545
if (PageHighMem (page )) {
540
546
ptr = __dma_alloc_remap (page , size , GFP_KERNEL , prot , caller );
541
547
if (!ptr ) {
@@ -546,17 +552,21 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
546
552
__dma_remap (page , size , prot );
547
553
ptr = page_address (page );
548
554
}
555
+
556
+ out :
549
557
* ret_page = page ;
550
558
return ptr ;
551
559
}
552
560
553
561
static void __free_from_contiguous (struct device * dev , struct page * page ,
554
- void * cpu_addr , size_t size )
562
+ void * cpu_addr , size_t size , bool want_vaddr )
555
563
{
556
- if (PageHighMem (page ))
557
- __dma_free_remap (cpu_addr , size );
558
- else
559
- __dma_remap (page , size , PAGE_KERNEL );
564
+ if (want_vaddr ) {
565
+ if (PageHighMem (page ))
566
+ __dma_free_remap (cpu_addr , size );
567
+ else
568
+ __dma_remap (page , size , PAGE_KERNEL );
569
+ }
560
570
dma_release_from_contiguous (dev , page , size >> PAGE_SHIFT );
561
571
}
562
572
@@ -574,12 +584,12 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
574
584
575
585
#define nommu () 1
576
586
577
- #define __get_dma_pgprot (attrs , prot ) __pgprot(0)
578
- #define __alloc_remap_buffer (dev , size , gfp , prot , ret , c ) NULL
587
+ #define __get_dma_pgprot (attrs , prot ) __pgprot(0)
588
+ #define __alloc_remap_buffer (dev , size , gfp , prot , ret , c , wv ) NULL
579
589
#define __alloc_from_pool (size , ret_page ) NULL
580
- #define __alloc_from_contiguous (dev , size , prot , ret , c ) NULL
590
+ #define __alloc_from_contiguous (dev , size , prot , ret , c , wv ) NULL
581
591
#define __free_from_pool (cpu_addr , size ) 0
582
- #define __free_from_contiguous (dev , page , cpu_addr , size ) do { } while (0)
592
+ #define __free_from_contiguous (dev , page , cpu_addr , size , wv ) do { } while (0)
583
593
#define __dma_free_remap (cpu_addr , size ) do { } while (0)
584
594
585
595
#endif /* CONFIG_MMU */
@@ -599,11 +609,13 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
599
609
600
610
601
611
static void * __dma_alloc (struct device * dev , size_t size , dma_addr_t * handle ,
602
- gfp_t gfp , pgprot_t prot , bool is_coherent , const void * caller )
612
+ gfp_t gfp , pgprot_t prot , bool is_coherent ,
613
+ struct dma_attrs * attrs , const void * caller )
603
614
{
604
615
u64 mask = get_coherent_dma_mask (dev );
605
616
struct page * page = NULL ;
606
617
void * addr ;
618
+ bool want_vaddr ;
607
619
608
620
#ifdef CONFIG_DMA_API_DEBUG
609
621
u64 limit = (mask + 1 ) & ~mask ;
@@ -631,20 +643,21 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
631
643
632
644
* handle = DMA_ERROR_CODE ;
633
645
size = PAGE_ALIGN (size );
646
+ want_vaddr = !dma_get_attr (DMA_ATTR_NO_KERNEL_MAPPING , attrs );
634
647
635
648
if (is_coherent || nommu ())
636
649
addr = __alloc_simple_buffer (dev , size , gfp , & page );
637
650
else if (!(gfp & __GFP_WAIT ))
638
651
addr = __alloc_from_pool (size , & page );
639
652
else if (!dev_get_cma_area (dev ))
640
- addr = __alloc_remap_buffer (dev , size , gfp , prot , & page , caller );
653
+ addr = __alloc_remap_buffer (dev , size , gfp , prot , & page , caller , want_vaddr );
641
654
else
642
- addr = __alloc_from_contiguous (dev , size , prot , & page , caller );
655
+ addr = __alloc_from_contiguous (dev , size , prot , & page , caller , want_vaddr );
643
656
644
- if (addr )
657
+ if (page )
645
658
* handle = pfn_to_dma (dev , page_to_pfn (page ));
646
659
647
- return addr ;
660
+ return want_vaddr ? addr : page ;
648
661
}
649
662
650
663
/*
@@ -661,7 +674,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
661
674
return memory ;
662
675
663
676
return __dma_alloc (dev , size , handle , gfp , prot , false,
664
- __builtin_return_address (0 ));
677
+ attrs , __builtin_return_address (0 ));
665
678
}
666
679
667
680
static void * arm_coherent_dma_alloc (struct device * dev , size_t size ,
@@ -674,7 +687,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
674
687
return memory ;
675
688
676
689
return __dma_alloc (dev , size , handle , gfp , prot , true,
677
- __builtin_return_address (0 ));
690
+ attrs , __builtin_return_address (0 ));
678
691
}
679
692
680
693
/*
@@ -715,6 +728,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
715
728
bool is_coherent )
716
729
{
717
730
struct page * page = pfn_to_page (dma_to_pfn (dev , handle ));
731
+ bool want_vaddr = !dma_get_attr (DMA_ATTR_NO_KERNEL_MAPPING , attrs );
718
732
719
733
if (dma_release_from_coherent (dev , get_order (size ), cpu_addr ))
720
734
return ;
@@ -726,14 +740,15 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
726
740
} else if (__free_from_pool (cpu_addr , size )) {
727
741
return ;
728
742
} else if (!dev_get_cma_area (dev )) {
729
- __dma_free_remap (cpu_addr , size );
743
+ if (want_vaddr )
744
+ __dma_free_remap (cpu_addr , size );
730
745
__dma_free_buffer (page , size );
731
746
} else {
732
747
/*
733
748
* Non-atomic allocations cannot be freed with IRQs disabled
734
749
*/
735
750
WARN_ON (irqs_disabled ());
736
- __free_from_contiguous (dev , page , cpu_addr , size );
751
+ __free_from_contiguous (dev , page , cpu_addr , size , want_vaddr );
737
752
}
738
753
}
739
754
0 commit comments