Skip to content

Commit dff8d6c

Browse files
author
Christoph Hellwig
committed
swiotlb: remove the overflow buffer
Like all other dma mapping drivers just return an error code instead of an actual memory buffer. The reason for the overflow buffer was that at the time swiotlb was invented there was no way to check for dma mapping errors, but this has long been fixed. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
1 parent 8088546 commit dff8d6c

File tree

6 files changed

+8
-64
lines changed

6 files changed

+8
-64
lines changed

arch/arm64/mm/dma-mapping.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
324324
static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
325325
{
326326
if (swiotlb)
327-
return swiotlb_dma_mapping_error(hwdev, addr);
327+
return dma_direct_mapping_error(hwdev, addr);
328328
return 0;
329329
}
330330

arch/powerpc/kernel/dma-swiotlb.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
*
1212
*/
1313

14-
#include <linux/dma-mapping.h>
14+
#include <linux/dma-direct.h>
1515
#include <linux/memblock.h>
1616
#include <linux/pfn.h>
1717
#include <linux/of_platform.h>
@@ -59,7 +59,7 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = {
5959
.sync_single_for_device = swiotlb_sync_single_for_device,
6060
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
6161
.sync_sg_for_device = swiotlb_sync_sg_for_device,
62-
.mapping_error = swiotlb_dma_mapping_error,
62+
.mapping_error = dma_direct_mapping_error,
6363
.get_required_mask = swiotlb_powerpc_get_required,
6464
};
6565

include/linux/dma-direct.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@
55
#include <linux/dma-mapping.h>
66
#include <linux/mem_encrypt.h>
77

8+
#define DIRECT_MAPPING_ERROR 0
9+
810
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
911
#include <asm/dma-direct.h>
1012
#else

include/linux/swiotlb.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -106,9 +106,6 @@ extern void
106106
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
107107
int nelems, enum dma_data_direction dir);
108108

109-
extern int
110-
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
111-
112109
extern int
113110
swiotlb_dma_supported(struct device *hwdev, u64 mask);
114111

kernel/dma/direct.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,6 @@
1414
#include <linux/pfn.h>
1515
#include <linux/set_memory.h>
1616

17-
#define DIRECT_MAPPING_ERROR 0
18-
1917
/*
2018
* Most architectures use ZONE_DMA for the first 16 Megabytes, but
2119
* some use it for entirely different regions:

kernel/dma/swiotlb.c

Lines changed: 3 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -72,13 +72,6 @@ static phys_addr_t io_tlb_start, io_tlb_end;
7272
*/
7373
static unsigned long io_tlb_nslabs;
7474

75-
/*
76-
* When the IOMMU overflows we return a fallback buffer. This sets the size.
77-
*/
78-
static unsigned long io_tlb_overflow = 32*1024;
79-
80-
static phys_addr_t io_tlb_overflow_buffer;
81-
8275
/*
8376
* This is a free list describing the number of free entries available from
8477
* each index
@@ -126,7 +119,6 @@ setup_io_tlb_npages(char *str)
126119
return 0;
127120
}
128121
early_param("swiotlb", setup_io_tlb_npages);
129-
/* make io_tlb_overflow tunable too? */
130122

131123
unsigned long swiotlb_nr_tbl(void)
132124
{
@@ -194,16 +186,10 @@ void __init swiotlb_update_mem_attributes(void)
194186
bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
195187
set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
196188
memset(vaddr, 0, bytes);
197-
198-
vaddr = phys_to_virt(io_tlb_overflow_buffer);
199-
bytes = PAGE_ALIGN(io_tlb_overflow);
200-
set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
201-
memset(vaddr, 0, bytes);
202189
}
203190

204191
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
205192
{
206-
void *v_overflow_buffer;
207193
unsigned long i, bytes;
208194

209195
bytes = nslabs << IO_TLB_SHIFT;
@@ -212,17 +198,6 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
212198
io_tlb_start = __pa(tlb);
213199
io_tlb_end = io_tlb_start + bytes;
214200

215-
/*
216-
* Get the overflow emergency buffer
217-
*/
218-
v_overflow_buffer = memblock_virt_alloc_low_nopanic(
219-
PAGE_ALIGN(io_tlb_overflow),
220-
PAGE_SIZE);
221-
if (!v_overflow_buffer)
222-
return -ENOMEM;
223-
224-
io_tlb_overflow_buffer = __pa(v_overflow_buffer);
225-
226201
/*
227202
* Allocate and initialize the free list array. This array is used
228203
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
@@ -330,7 +305,6 @@ int
330305
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
331306
{
332307
unsigned long i, bytes;
333-
unsigned char *v_overflow_buffer;
334308

335309
bytes = nslabs << IO_TLB_SHIFT;
336310

@@ -341,19 +315,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
341315
set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
342316
memset(tlb, 0, bytes);
343317

344-
/*
345-
* Get the overflow emergency buffer
346-
*/
347-
v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
348-
get_order(io_tlb_overflow));
349-
if (!v_overflow_buffer)
350-
goto cleanup2;
351-
352-
set_memory_decrypted((unsigned long)v_overflow_buffer,
353-
io_tlb_overflow >> PAGE_SHIFT);
354-
memset(v_overflow_buffer, 0, io_tlb_overflow);
355-
io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
356-
357318
/*
358319
* Allocate and initialize the free list array. This array is used
359320
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
@@ -390,10 +351,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
390351
sizeof(int)));
391352
io_tlb_list = NULL;
392353
cleanup3:
393-
free_pages((unsigned long)v_overflow_buffer,
394-
get_order(io_tlb_overflow));
395-
io_tlb_overflow_buffer = 0;
396-
cleanup2:
397354
io_tlb_end = 0;
398355
io_tlb_start = 0;
399356
io_tlb_nslabs = 0;
@@ -407,17 +364,13 @@ void __init swiotlb_exit(void)
407364
return;
408365

409366
if (late_alloc) {
410-
free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
411-
get_order(io_tlb_overflow));
412367
free_pages((unsigned long)io_tlb_orig_addr,
413368
get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
414369
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
415370
sizeof(int)));
416371
free_pages((unsigned long)phys_to_virt(io_tlb_start),
417372
get_order(io_tlb_nslabs << IO_TLB_SHIFT));
418373
} else {
419-
memblock_free_late(io_tlb_overflow_buffer,
420-
PAGE_ALIGN(io_tlb_overflow));
421374
memblock_free_late(__pa(io_tlb_orig_addr),
422375
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
423376
memblock_free_late(__pa(io_tlb_list),
@@ -790,7 +743,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
790743
/* Oh well, have to allocate and map a bounce buffer. */
791744
map = map_single(dev, phys, size, dir, attrs);
792745
if (map == SWIOTLB_MAP_ERROR)
793-
return __phys_to_dma(dev, io_tlb_overflow_buffer);
746+
return DIRECT_MAPPING_ERROR;
794747

795748
dev_addr = __phys_to_dma(dev, map);
796749

@@ -801,7 +754,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
801754
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
802755
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
803756

804-
return __phys_to_dma(dev, io_tlb_overflow_buffer);
757+
return DIRECT_MAPPING_ERROR;
805758
}
806759

807760
/*
@@ -985,12 +938,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
985938
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
986939
}
987940

988-
int
989-
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
990-
{
991-
return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
992-
}
993-
994941
/*
995942
* Return whether the given device DMA address mask can be supported
996943
* properly. For example, if your device can only drive the low 24-bits
@@ -1033,7 +980,7 @@ void swiotlb_free(struct device *dev, size_t size, void *vaddr,
1033980
}
1034981

1035982
const struct dma_map_ops swiotlb_dma_ops = {
1036-
.mapping_error = swiotlb_dma_mapping_error,
983+
.mapping_error = dma_direct_mapping_error,
1037984
.alloc = swiotlb_alloc,
1038985
.free = swiotlb_free,
1039986
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,

0 commit comments

Comments
 (0)