Skip to content

Commit 44176bb

Browse files
geertuctmarinas
authored andcommitted
arm64: Add support for DMA_ATTR_FORCE_CONTIGUOUS to IOMMU
Add support for allocating physically contiguous DMA buffers on arm64 systems with an IOMMU. This can be useful when two or more devices with different memory requirements are involved in buffer sharing. Note that as this uses the CMA allocator, setting the DMA_ATTR_FORCE_CONTIGUOUS attribute has a runtime-dependency on CONFIG_DMA_CMA, just like on arm32. For arm64 systems using swiotlb, no changes are needed to support the allocation of physically contiguous DMA buffers: - swiotlb always uses physically contiguous buffers (up to IO_TLB_SEGSIZE = 128 pages), - arm64's __dma_alloc_coherent() already calls dma_alloc_from_contiguous() when CMA is available. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
1 parent f13d52c commit 44176bb

File tree

1 file changed

+48
-15
lines changed

1 file changed

+48
-15
lines changed

arch/arm64/mm/dma-mapping.c

Lines changed: 48 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -584,20 +584,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
584584
*/
585585
gfp |= __GFP_ZERO;
586586

587-
if (gfpflags_allow_blocking(gfp)) {
588-
struct page **pages;
589-
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
590-
591-
pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
592-
handle, flush_page);
593-
if (!pages)
594-
return NULL;
595-
596-
addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
597-
__builtin_return_address(0));
598-
if (!addr)
599-
iommu_dma_free(dev, pages, iosize, handle);
600-
} else {
587+
if (!gfpflags_allow_blocking(gfp)) {
601588
struct page *page;
602589
/*
603590
* In atomic context we can't remap anything, so we'll only
@@ -621,6 +608,45 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
621608
__free_from_pool(addr, size);
622609
addr = NULL;
623610
}
611+
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
612+
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
613+
struct page *page;
614+
615+
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
616+
get_order(size), gfp);
617+
if (!page)
618+
return NULL;
619+
620+
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
621+
if (iommu_dma_mapping_error(dev, *handle)) {
622+
dma_release_from_contiguous(dev, page,
623+
size >> PAGE_SHIFT);
624+
return NULL;
625+
}
626+
if (!coherent)
627+
__dma_flush_area(page_to_virt(page), iosize);
628+
629+
addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
630+
prot,
631+
__builtin_return_address(0));
632+
if (!addr) {
633+
iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
634+
dma_release_from_contiguous(dev, page,
635+
size >> PAGE_SHIFT);
636+
}
637+
} else {
638+
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
639+
struct page **pages;
640+
641+
pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
642+
handle, flush_page);
643+
if (!pages)
644+
return NULL;
645+
646+
addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
647+
__builtin_return_address(0));
648+
if (!addr)
649+
iommu_dma_free(dev, pages, iosize, handle);
624650
}
625651
return addr;
626652
}
@@ -632,7 +658,8 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
632658

633659
size = PAGE_ALIGN(size);
634660
/*
635-
* @cpu_addr will be one of 3 things depending on how it was allocated:
661+
* @cpu_addr will be one of 4 things depending on how it was allocated:
662+
* - A remapped array of pages for contiguous allocations.
636663
* - A remapped array of pages from iommu_dma_alloc(), for all
637664
* non-atomic allocations.
638665
* - A non-cacheable alias from the atomic pool, for atomic
@@ -644,6 +671,12 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
644671
if (__in_atomic_pool(cpu_addr, size)) {
645672
iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
646673
__free_from_pool(cpu_addr, size);
674+
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
675+
struct page *page = vmalloc_to_page(cpu_addr);
676+
677+
iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
678+
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
679+
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
647680
} else if (is_vmalloc_addr(cpu_addr)){
648681
struct vm_struct *area = find_vm_area(cpu_addr);
649682

0 commit comments

Comments
 (0)