Skip to content

Commit a4a4330

Browse files
author
Christoph Hellwig
committed
swiotlb: add support for non-coherent DMA
Handle architectures that are not cache coherent directly in the main swiotlb code by calling arch_sync_dma_for_{device,cpu} in all the right places from the various dma_map/unmap/sync methods when the device is non-coherent. Because swiotlb now uses dma_direct_alloc for the coherent allocation that side is already taken care of by the dma-direct code calling into arch_dma_{alloc,free} for devices that are non-coherent. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
1 parent fafadcd commit a4a4330

File tree

1 file changed

+23
-10
lines changed

1 file changed

+23
-10
lines changed

kernel/dma/swiotlb.c

Lines changed: 23 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121

2222
#include <linux/cache.h>
2323
#include <linux/dma-direct.h>
24+
#include <linux/dma-noncoherent.h>
2425
#include <linux/mm.h>
2526
#include <linux/export.h>
2627
#include <linux/spinlock.h>
@@ -671,11 +672,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
671672
* we can safely return the device addr and not worry about bounce
672673
* buffering it.
673674
*/
674-
if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
675-
return dev_addr;
675+
if (!dma_capable(dev, dev_addr, size) ||
676+
swiotlb_force == SWIOTLB_FORCE) {
677+
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
678+
dev_addr = swiotlb_bounce_page(dev, &phys, size, dir, attrs);
679+
}
680+
681+
if (!dev_is_dma_coherent(dev) &&
682+
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
683+
arch_sync_dma_for_device(dev, phys, size, dir);
676684

677-
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
678-
return swiotlb_bounce_page(dev, &phys, size, dir, attrs);
685+
return dev_addr;
679686
}
680687

681688
/*
@@ -694,6 +701,10 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
694701

695702
BUG_ON(dir == DMA_NONE);
696703

704+
if (!dev_is_dma_coherent(hwdev) &&
705+
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
706+
arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
707+
697708
if (is_swiotlb_buffer(paddr)) {
698709
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
699710
return;
@@ -730,15 +741,17 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
730741

731742
BUG_ON(dir == DMA_NONE);
732743

733-
if (is_swiotlb_buffer(paddr)) {
744+
if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_CPU)
745+
arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
746+
747+
if (is_swiotlb_buffer(paddr))
734748
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
735-
return;
736-
}
737749

738-
if (dir != DMA_FROM_DEVICE)
739-
return;
750+
if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_DEVICE)
751+
arch_sync_dma_for_device(hwdev, paddr, size, dir);
740752

741-
dma_mark_clean(phys_to_virt(paddr), size);
753+
if (!is_swiotlb_buffer(paddr) && dir == DMA_FROM_DEVICE)
754+
dma_mark_clean(phys_to_virt(paddr), size);
742755
}
743756

744757
void

0 commit comments

Comments
 (0)