Skip to content

Commit 6e8266e

Browse files
carlocaioneRussell King
authored andcommitted
ARM: 8304/1: Respect NO_KERNEL_MAPPING when we don't have an IOMMU
Even without an iommu, NO_KERNEL_MAPPING is still convenient to save on kernel address space in places where we don't need a kernel mapping. Implement support for it in the two places where we're creating an expensive mapping. __alloc_from_pool uses an internal pool from which we already have virtual addresses, so it's not relevant, and __alloc_simple_buffer uses alloc_pages, which will always return a lowmem page, which is already mapped into kernel space, so we can't prevent a mapping for it in that case. Signed-off-by: Jasper St. Pierre <jstpierre@mecheye.net> Signed-off-by: Carlo Caione <carlo@caione.org> Reviewed-by: Rob Clark <robdclark@gmail.com> Reviewed-by: Daniel Drake <dsd@endlessm.com> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
1 parent 415ae10 commit 6e8266e

File tree

1 file changed

+41
-26
lines changed

1 file changed

+41
-26
lines changed

arch/arm/mm/dma-mapping.c

Lines changed: 41 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -289,11 +289,11 @@ static void __dma_free_buffer(struct page *page, size_t size)
289289

290290
static void *__alloc_from_contiguous(struct device *dev, size_t size,
291291
pgprot_t prot, struct page **ret_page,
292-
const void *caller);
292+
const void *caller, bool want_vaddr);
293293

294294
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
295295
pgprot_t prot, struct page **ret_page,
296-
const void *caller);
296+
const void *caller, bool want_vaddr);
297297

298298
static void *
299299
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
@@ -357,10 +357,10 @@ static int __init atomic_pool_init(void)
357357

358358
if (dev_get_cma_area(NULL))
359359
ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
360-
&page, atomic_pool_init);
360+
&page, atomic_pool_init, true);
361361
else
362362
ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
363-
&page, atomic_pool_init);
363+
&page, atomic_pool_init, true);
364364
if (ptr) {
365365
int ret;
366366

@@ -467,20 +467,23 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
467467

468468
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
469469
pgprot_t prot, struct page **ret_page,
470-
const void *caller)
470+
const void *caller, bool want_vaddr)
471471
{
472472
struct page *page;
473-
void *ptr;
473+
void *ptr = NULL;
474474
page = __dma_alloc_buffer(dev, size, gfp);
475475
if (!page)
476476
return NULL;
477+
if (!want_vaddr)
478+
goto out;
477479

478480
ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
479481
if (!ptr) {
480482
__dma_free_buffer(page, size);
481483
return NULL;
482484
}
483485

486+
out:
484487
*ret_page = page;
485488
return ptr;
486489
}
@@ -523,19 +526,22 @@ static int __free_from_pool(void *start, size_t size)
523526

524527
static void *__alloc_from_contiguous(struct device *dev, size_t size,
525528
pgprot_t prot, struct page **ret_page,
526-
const void *caller)
529+
const void *caller, bool want_vaddr)
527530
{
528531
unsigned long order = get_order(size);
529532
size_t count = size >> PAGE_SHIFT;
530533
struct page *page;
531-
void *ptr;
534+
void *ptr = NULL;
532535

533536
page = dma_alloc_from_contiguous(dev, count, order);
534537
if (!page)
535538
return NULL;
536539

537540
__dma_clear_buffer(page, size);
538541

542+
if (!want_vaddr)
543+
goto out;
544+
539545
if (PageHighMem(page)) {
540546
ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
541547
if (!ptr) {
@@ -546,17 +552,21 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
546552
__dma_remap(page, size, prot);
547553
ptr = page_address(page);
548554
}
555+
556+
out:
549557
*ret_page = page;
550558
return ptr;
551559
}
552560

553561
static void __free_from_contiguous(struct device *dev, struct page *page,
554-
void *cpu_addr, size_t size)
562+
void *cpu_addr, size_t size, bool want_vaddr)
555563
{
556-
if (PageHighMem(page))
557-
__dma_free_remap(cpu_addr, size);
558-
else
559-
__dma_remap(page, size, PAGE_KERNEL);
564+
if (want_vaddr) {
565+
if (PageHighMem(page))
566+
__dma_free_remap(cpu_addr, size);
567+
else
568+
__dma_remap(page, size, PAGE_KERNEL);
569+
}
560570
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
561571
}
562572

@@ -574,12 +584,12 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
574584

575585
#define nommu() 1
576586

577-
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
578-
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
587+
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
588+
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
579589
#define __alloc_from_pool(size, ret_page) NULL
580-
#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
590+
#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
581591
#define __free_from_pool(cpu_addr, size) 0
582-
#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
592+
#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
583593
#define __dma_free_remap(cpu_addr, size) do { } while (0)
584594

585595
#endif /* CONFIG_MMU */
@@ -599,11 +609,13 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
599609

600610

601611
static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
602-
gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
612+
gfp_t gfp, pgprot_t prot, bool is_coherent,
613+
struct dma_attrs *attrs, const void *caller)
603614
{
604615
u64 mask = get_coherent_dma_mask(dev);
605616
struct page *page = NULL;
606617
void *addr;
618+
bool want_vaddr;
607619

608620
#ifdef CONFIG_DMA_API_DEBUG
609621
u64 limit = (mask + 1) & ~mask;
@@ -631,20 +643,21 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
631643

632644
*handle = DMA_ERROR_CODE;
633645
size = PAGE_ALIGN(size);
646+
want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
634647

635648
if (is_coherent || nommu())
636649
addr = __alloc_simple_buffer(dev, size, gfp, &page);
637650
else if (!(gfp & __GFP_WAIT))
638651
addr = __alloc_from_pool(size, &page);
639652
else if (!dev_get_cma_area(dev))
640-
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
653+
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
641654
else
642-
addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
655+
addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
643656

644-
if (addr)
657+
if (page)
645658
*handle = pfn_to_dma(dev, page_to_pfn(page));
646659

647-
return addr;
660+
return want_vaddr ? addr : page;
648661
}
649662

650663
/*
@@ -661,7 +674,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
661674
return memory;
662675

663676
return __dma_alloc(dev, size, handle, gfp, prot, false,
664-
__builtin_return_address(0));
677+
attrs, __builtin_return_address(0));
665678
}
666679

667680
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
@@ -674,7 +687,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
674687
return memory;
675688

676689
return __dma_alloc(dev, size, handle, gfp, prot, true,
677-
__builtin_return_address(0));
690+
attrs, __builtin_return_address(0));
678691
}
679692

680693
/*
@@ -715,6 +728,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
715728
bool is_coherent)
716729
{
717730
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
731+
bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
718732

719733
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
720734
return;
@@ -726,14 +740,15 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
726740
} else if (__free_from_pool(cpu_addr, size)) {
727741
return;
728742
} else if (!dev_get_cma_area(dev)) {
729-
__dma_free_remap(cpu_addr, size);
743+
if (want_vaddr)
744+
__dma_free_remap(cpu_addr, size);
730745
__dma_free_buffer(page, size);
731746
} else {
732747
/*
733748
* Non-atomic allocations cannot be freed with IRQs disabled
734749
*/
735750
WARN_ON(irqs_disabled());
736-
__free_from_contiguous(dev, page, cpu_addr, size);
751+
__free_from_contiguous(dev, page, cpu_addr, size, want_vaddr);
737752
}
738753
}
739754

0 commit comments

Comments
 (0)