50
50
#include <asm/x86_init.h>
51
51
#include <asm/iommu_table.h>
52
52
53
+ #define CALGARY_MAPPING_ERROR 0
54
+
53
55
#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
54
56
int use_calgary __read_mostly = 1 ;
55
57
#else
@@ -252,7 +254,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
252
254
if (panic_on_overflow )
253
255
panic ("Calgary: fix the allocator.\n" );
254
256
else
255
- return DMA_ERROR_CODE ;
257
+ return CALGARY_MAPPING_ERROR ;
256
258
}
257
259
}
258
260
@@ -272,10 +274,10 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
272
274
273
275
entry = iommu_range_alloc (dev , tbl , npages );
274
276
275
- if (unlikely (entry == DMA_ERROR_CODE )) {
277
+ if (unlikely (entry == CALGARY_MAPPING_ERROR )) {
276
278
pr_warn ("failed to allocate %u pages in iommu %p\n" ,
277
279
npages , tbl );
278
- return DMA_ERROR_CODE ;
280
+ return CALGARY_MAPPING_ERROR ;
279
281
}
280
282
281
283
/* set the return dma address */
@@ -295,7 +297,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
295
297
unsigned long flags ;
296
298
297
299
/* were we called with bad_dma_address? */
298
- badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE );
300
+ badend = CALGARY_MAPPING_ERROR + (EMERGENCY_PAGES * PAGE_SIZE );
299
301
if (unlikely (dma_addr < badend )) {
300
302
WARN (1 , KERN_ERR "Calgary: driver tried unmapping bad DMA "
301
303
"address 0x%Lx\n" , dma_addr );
@@ -380,7 +382,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
380
382
npages = iommu_num_pages (vaddr , s -> length , PAGE_SIZE );
381
383
382
384
entry = iommu_range_alloc (dev , tbl , npages );
383
- if (entry == DMA_ERROR_CODE ) {
385
+ if (entry == CALGARY_MAPPING_ERROR ) {
384
386
/* makes sure unmap knows to stop */
385
387
s -> dma_length = 0 ;
386
388
goto error ;
@@ -398,7 +400,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
398
400
error :
399
401
calgary_unmap_sg (dev , sg , nelems , dir , 0 );
400
402
for_each_sg (sg , s , nelems , i ) {
401
- sg -> dma_address = DMA_ERROR_CODE ;
403
+ sg -> dma_address = CALGARY_MAPPING_ERROR ;
402
404
sg -> dma_length = 0 ;
403
405
}
404
406
return 0 ;
@@ -453,7 +455,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
453
455
454
456
/* set up tces to cover the allocated range */
455
457
mapping = iommu_alloc (dev , tbl , ret , npages , DMA_BIDIRECTIONAL );
456
- if (mapping == DMA_ERROR_CODE )
458
+ if (mapping == CALGARY_MAPPING_ERROR )
457
459
goto free ;
458
460
* dma_handle = mapping ;
459
461
return ret ;
@@ -478,13 +480,19 @@ static void calgary_free_coherent(struct device *dev, size_t size,
478
480
free_pages ((unsigned long )vaddr , get_order (size ));
479
481
}
480
482
483
+ static int calgary_mapping_error (struct device * dev , dma_addr_t dma_addr )
484
+ {
485
+ return dma_addr == CALGARY_MAPPING_ERROR ;
486
+ }
487
+
481
488
static const struct dma_map_ops calgary_dma_ops = {
482
489
.alloc = calgary_alloc_coherent ,
483
490
.free = calgary_free_coherent ,
484
491
.map_sg = calgary_map_sg ,
485
492
.unmap_sg = calgary_unmap_sg ,
486
493
.map_page = calgary_map_page ,
487
494
.unmap_page = calgary_unmap_page ,
495
+ .mapping_error = calgary_mapping_error ,
488
496
};
489
497
490
498
static inline void __iomem * busno_to_bbar (unsigned char num )
@@ -732,7 +740,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
732
740
struct iommu_table * tbl = pci_iommu (dev -> bus );
733
741
734
742
/* reserve EMERGENCY_PAGES from bad_dma_address and up */
735
- iommu_range_reserve (tbl , DMA_ERROR_CODE , EMERGENCY_PAGES );
743
+ iommu_range_reserve (tbl , CALGARY_MAPPING_ERROR , EMERGENCY_PAGES );
736
744
737
745
/* avoid the BIOS/VGA first 640KB-1MB region */
738
746
/* for CalIOC2 - avoid the entire first MB */
0 commit comments