@@ -72,13 +72,6 @@ static phys_addr_t io_tlb_start, io_tlb_end;
72
72
*/
73
73
static unsigned long io_tlb_nslabs ;
74
74
75
- /*
76
- * When the IOMMU overflows we return a fallback buffer. This sets the size.
77
- */
78
- static unsigned long io_tlb_overflow = 32 * 1024 ;
79
-
80
- static phys_addr_t io_tlb_overflow_buffer ;
81
-
82
75
/*
83
76
* This is a free list describing the number of free entries available from
84
77
* each index
@@ -126,7 +119,6 @@ setup_io_tlb_npages(char *str)
126
119
return 0 ;
127
120
}
128
121
early_param ("swiotlb" , setup_io_tlb_npages );
129
- /* make io_tlb_overflow tunable too? */
130
122
131
123
unsigned long swiotlb_nr_tbl (void )
132
124
{
@@ -194,16 +186,10 @@ void __init swiotlb_update_mem_attributes(void)
194
186
bytes = PAGE_ALIGN (io_tlb_nslabs << IO_TLB_SHIFT );
195
187
set_memory_decrypted ((unsigned long )vaddr , bytes >> PAGE_SHIFT );
196
188
memset (vaddr , 0 , bytes );
197
-
198
- vaddr = phys_to_virt (io_tlb_overflow_buffer );
199
- bytes = PAGE_ALIGN (io_tlb_overflow );
200
- set_memory_decrypted ((unsigned long )vaddr , bytes >> PAGE_SHIFT );
201
- memset (vaddr , 0 , bytes );
202
189
}
203
190
204
191
int __init swiotlb_init_with_tbl (char * tlb , unsigned long nslabs , int verbose )
205
192
{
206
- void * v_overflow_buffer ;
207
193
unsigned long i , bytes ;
208
194
209
195
bytes = nslabs << IO_TLB_SHIFT ;
@@ -212,17 +198,6 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
212
198
io_tlb_start = __pa (tlb );
213
199
io_tlb_end = io_tlb_start + bytes ;
214
200
215
- /*
216
- * Get the overflow emergency buffer
217
- */
218
- v_overflow_buffer = memblock_virt_alloc_low_nopanic (
219
- PAGE_ALIGN (io_tlb_overflow ),
220
- PAGE_SIZE );
221
- if (!v_overflow_buffer )
222
- return - ENOMEM ;
223
-
224
- io_tlb_overflow_buffer = __pa (v_overflow_buffer );
225
-
226
201
/*
227
202
* Allocate and initialize the free list array. This array is used
228
203
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
330
305
swiotlb_late_init_with_tbl (char * tlb , unsigned long nslabs )
331
306
{
332
307
unsigned long i , bytes ;
333
- unsigned char * v_overflow_buffer ;
334
308
335
309
bytes = nslabs << IO_TLB_SHIFT ;
336
310
@@ -341,19 +315,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
341
315
set_memory_decrypted ((unsigned long )tlb , bytes >> PAGE_SHIFT );
342
316
memset (tlb , 0 , bytes );
343
317
344
- /*
345
- * Get the overflow emergency buffer
346
- */
347
- v_overflow_buffer = (void * )__get_free_pages (GFP_DMA ,
348
- get_order (io_tlb_overflow ));
349
- if (!v_overflow_buffer )
350
- goto cleanup2 ;
351
-
352
- set_memory_decrypted ((unsigned long )v_overflow_buffer ,
353
- io_tlb_overflow >> PAGE_SHIFT );
354
- memset (v_overflow_buffer , 0 , io_tlb_overflow );
355
- io_tlb_overflow_buffer = virt_to_phys (v_overflow_buffer );
356
-
357
318
/*
358
319
* Allocate and initialize the free list array. This array is used
359
320
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
@@ -390,10 +351,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
390
351
sizeof (int )));
391
352
io_tlb_list = NULL ;
392
353
cleanup3 :
393
- free_pages ((unsigned long )v_overflow_buffer ,
394
- get_order (io_tlb_overflow ));
395
- io_tlb_overflow_buffer = 0 ;
396
- cleanup2 :
397
354
io_tlb_end = 0 ;
398
355
io_tlb_start = 0 ;
399
356
io_tlb_nslabs = 0 ;
@@ -407,17 +364,13 @@ void __init swiotlb_exit(void)
407
364
return ;
408
365
409
366
if (late_alloc ) {
410
- free_pages ((unsigned long )phys_to_virt (io_tlb_overflow_buffer ),
411
- get_order (io_tlb_overflow ));
412
367
free_pages ((unsigned long )io_tlb_orig_addr ,
413
368
get_order (io_tlb_nslabs * sizeof (phys_addr_t )));
414
369
free_pages ((unsigned long )io_tlb_list , get_order (io_tlb_nslabs *
415
370
sizeof (int )));
416
371
free_pages ((unsigned long )phys_to_virt (io_tlb_start ),
417
372
get_order (io_tlb_nslabs << IO_TLB_SHIFT ));
418
373
} else {
419
- memblock_free_late (io_tlb_overflow_buffer ,
420
- PAGE_ALIGN (io_tlb_overflow ));
421
374
memblock_free_late (__pa (io_tlb_orig_addr ),
422
375
PAGE_ALIGN (io_tlb_nslabs * sizeof (phys_addr_t )));
423
376
memblock_free_late (__pa (io_tlb_list ),
@@ -790,7 +743,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
790
743
/* Oh well, have to allocate and map a bounce buffer. */
791
744
map = map_single (dev , phys , size , dir , attrs );
792
745
if (map == SWIOTLB_MAP_ERROR )
793
- return __phys_to_dma ( dev , io_tlb_overflow_buffer ) ;
746
+ return DIRECT_MAPPING_ERROR ;
794
747
795
748
dev_addr = __phys_to_dma (dev , map );
796
749
@@ -801,7 +754,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
801
754
attrs |= DMA_ATTR_SKIP_CPU_SYNC ;
802
755
swiotlb_tbl_unmap_single (dev , map , size , dir , attrs );
803
756
804
- return __phys_to_dma ( dev , io_tlb_overflow_buffer ) ;
757
+ return DIRECT_MAPPING_ERROR ;
805
758
}
806
759
807
760
/*
@@ -985,12 +938,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
985
938
swiotlb_sync_sg (hwdev , sg , nelems , dir , SYNC_FOR_DEVICE );
986
939
}
987
940
988
- int
989
- swiotlb_dma_mapping_error (struct device * hwdev , dma_addr_t dma_addr )
990
- {
991
- return (dma_addr == __phys_to_dma (hwdev , io_tlb_overflow_buffer ));
992
- }
993
-
994
941
/*
995
942
* Return whether the given device DMA address mask can be supported
996
943
* properly. For example, if your device can only drive the low 24-bits
@@ -1033,7 +980,7 @@ void swiotlb_free(struct device *dev, size_t size, void *vaddr,
1033
980
}
1034
981
1035
982
const struct dma_map_ops swiotlb_dma_ops = {
1036
- .mapping_error = swiotlb_dma_mapping_error ,
983
+ .mapping_error = dma_direct_mapping_error ,
1037
984
.alloc = swiotlb_alloc ,
1038
985
.free = swiotlb_free ,
1039
986
.sync_single_for_cpu = swiotlb_sync_single_for_cpu ,
0 commit comments