Skip to content

Commit fafadcd

Browse files
author
Christoph Hellwig
committed
swiotlb: don't dip into swiotlb pool for coherent allocations
All architectures that support swiotlb also have a zone that backs up these less than full addressing allocations (usually ZONE_DMA32). Because of that it is rather pointless to fall back to the global swiotlb buffer if the normal dma direct allocation failed - the only thing this will do is to eat up bounce buffers that would be more useful to serve streaming mappings. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
1 parent c4dae36 commit fafadcd

File tree

3 files changed

+5
-111
lines changed

3 files changed

+5
-111
lines changed

arch/arm64/mm/dma-mapping.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
112112
return addr;
113113
}
114114

115-
ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
115+
ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
116116
if (!ptr)
117117
goto no_mem;
118118

@@ -133,7 +133,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
133133
return coherent_ptr;
134134

135135
no_map:
136-
swiotlb_free(dev, size, ptr, *dma_handle, attrs);
136+
dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs);
137137
no_mem:
138138
return NULL;
139139
}
@@ -151,7 +151,7 @@ static void __dma_free(struct device *dev, size_t size,
151151
return;
152152
vunmap(vaddr);
153153
}
154-
swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs);
154+
dma_direct_free_pages(dev, size, swiotlb_addr, dma_handle, attrs);
155155
}
156156

157157
static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,

include/linux/swiotlb.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -67,11 +67,6 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
6767

6868
/* Accessory functions. */
6969

70-
void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
71-
gfp_t flags, unsigned long attrs);
72-
void swiotlb_free(struct device *dev, size_t size, void *vaddr,
73-
dma_addr_t dma_addr, unsigned long attrs);
74-
7570
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
7671
unsigned long offset, size_t size,
7772
enum dma_data_direction dir,

kernel/dma/swiotlb.c

Lines changed: 2 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -622,78 +622,6 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
622622
}
623623
}
624624

625-
static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
626-
size_t size)
627-
{
628-
u64 mask = DMA_BIT_MASK(32);
629-
630-
if (dev && dev->coherent_dma_mask)
631-
mask = dev->coherent_dma_mask;
632-
return addr + size - 1 <= mask;
633-
}
634-
635-
static void *
636-
swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
637-
unsigned long attrs)
638-
{
639-
phys_addr_t phys_addr;
640-
641-
if (swiotlb_force == SWIOTLB_NO_FORCE)
642-
goto out_warn;
643-
644-
phys_addr = swiotlb_tbl_map_single(dev,
645-
__phys_to_dma(dev, io_tlb_start),
646-
0, size, DMA_FROM_DEVICE, attrs);
647-
if (phys_addr == SWIOTLB_MAP_ERROR)
648-
goto out_warn;
649-
650-
*dma_handle = __phys_to_dma(dev, phys_addr);
651-
if (!dma_coherent_ok(dev, *dma_handle, size))
652-
goto out_unmap;
653-
654-
memset(phys_to_virt(phys_addr), 0, size);
655-
return phys_to_virt(phys_addr);
656-
657-
out_unmap:
658-
dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
659-
(unsigned long long)dev->coherent_dma_mask,
660-
(unsigned long long)*dma_handle);
661-
662-
/*
663-
* DMA_TO_DEVICE to avoid memcpy in unmap_single.
664-
* DMA_ATTR_SKIP_CPU_SYNC is optional.
665-
*/
666-
swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
667-
DMA_ATTR_SKIP_CPU_SYNC);
668-
out_warn:
669-
if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
670-
dev_warn(dev,
671-
"swiotlb: coherent allocation failed, size=%zu\n",
672-
size);
673-
dump_stack();
674-
}
675-
return NULL;
676-
}
677-
678-
static bool swiotlb_free_buffer(struct device *dev, size_t size,
679-
dma_addr_t dma_addr)
680-
{
681-
phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
682-
683-
WARN_ON_ONCE(irqs_disabled());
684-
685-
if (!is_swiotlb_buffer(phys_addr))
686-
return false;
687-
688-
/*
689-
* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
690-
* DMA_ATTR_SKIP_CPU_SYNC is optional.
691-
*/
692-
swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
693-
DMA_ATTR_SKIP_CPU_SYNC);
694-
return true;
695-
}
696-
697625
static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys,
698626
size_t size, enum dma_data_direction dir, unsigned long attrs)
699627
{
@@ -926,39 +854,10 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
926854
return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
927855
}
928856

929-
void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
930-
gfp_t gfp, unsigned long attrs)
931-
{
932-
void *vaddr;
933-
934-
/* temporary workaround: */
935-
if (gfp & __GFP_NOWARN)
936-
attrs |= DMA_ATTR_NO_WARN;
937-
938-
/*
939-
* Don't print a warning when the first allocation attempt fails.
940-
* swiotlb_alloc_coherent() will print a warning when the DMA memory
941-
* allocation ultimately failed.
942-
*/
943-
gfp |= __GFP_NOWARN;
944-
945-
vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
946-
if (!vaddr)
947-
vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
948-
return vaddr;
949-
}
950-
951-
void swiotlb_free(struct device *dev, size_t size, void *vaddr,
952-
dma_addr_t dma_addr, unsigned long attrs)
953-
{
954-
if (!swiotlb_free_buffer(dev, size, dma_addr))
955-
dma_direct_free(dev, size, vaddr, dma_addr, attrs);
956-
}
957-
958857
const struct dma_map_ops swiotlb_dma_ops = {
959858
.mapping_error = dma_direct_mapping_error,
960-
.alloc = swiotlb_alloc,
961-
.free = swiotlb_free,
859+
.alloc = dma_direct_alloc,
860+
.free = dma_direct_free,
962861
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
963862
.sync_single_for_device = swiotlb_sync_single_for_device,
964863
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,

0 commit comments

Comments
 (0)