@@ -198,11 +198,11 @@ static unsigned long iommu_range_alloc(struct device *dev,
198
198
if (unlikely (npages == 0 )) {
199
199
if (printk_ratelimit ())
200
200
WARN_ON (1 );
201
- return DMA_ERROR_CODE ;
201
+ return IOMMU_MAPPING_ERROR ;
202
202
}
203
203
204
204
if (should_fail_iommu (dev ))
205
- return DMA_ERROR_CODE ;
205
+ return IOMMU_MAPPING_ERROR ;
206
206
207
207
/*
208
208
* We don't need to disable preemption here because any CPU can
@@ -278,7 +278,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
278
278
} else {
279
279
/* Give up */
280
280
spin_unlock_irqrestore (& (pool -> lock ), flags );
281
- return DMA_ERROR_CODE ;
281
+ return IOMMU_MAPPING_ERROR ;
282
282
}
283
283
}
284
284
@@ -310,13 +310,13 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
310
310
unsigned long attrs )
311
311
{
312
312
unsigned long entry ;
313
- dma_addr_t ret = DMA_ERROR_CODE ;
313
+ dma_addr_t ret = IOMMU_MAPPING_ERROR ;
314
314
int build_fail ;
315
315
316
316
entry = iommu_range_alloc (dev , tbl , npages , NULL , mask , align_order );
317
317
318
- if (unlikely (entry == DMA_ERROR_CODE ))
319
- return DMA_ERROR_CODE ;
318
+ if (unlikely (entry == IOMMU_MAPPING_ERROR ))
319
+ return IOMMU_MAPPING_ERROR ;
320
320
321
321
entry += tbl -> it_offset ; /* Offset into real TCE table */
322
322
ret = entry << tbl -> it_page_shift ; /* Set the return dma address */
@@ -328,12 +328,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
328
328
329
329
/* tbl->it_ops->set() only returns non-zero for transient errors.
330
330
* Clean up the table bitmap in this case and return
331
- * DMA_ERROR_CODE . For all other errors the functionality is
331
+ * IOMMU_MAPPING_ERROR . For all other errors the functionality is
332
332
* not altered.
333
333
*/
334
334
if (unlikely (build_fail )) {
335
335
__iommu_free (tbl , ret , npages );
336
- return DMA_ERROR_CODE ;
336
+ return IOMMU_MAPPING_ERROR ;
337
337
}
338
338
339
339
/* Flush/invalidate TLB caches if necessary */
@@ -478,7 +478,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
478
478
DBG (" - vaddr: %lx, size: %lx\n" , vaddr , slen );
479
479
480
480
/* Handle failure */
481
- if (unlikely (entry == DMA_ERROR_CODE )) {
481
+ if (unlikely (entry == IOMMU_MAPPING_ERROR )) {
482
482
if (!(attrs & DMA_ATTR_NO_WARN ) &&
483
483
printk_ratelimit ())
484
484
dev_info (dev , "iommu_alloc failed, tbl %p "
@@ -545,7 +545,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
545
545
*/
546
546
if (outcount < incount ) {
547
547
outs = sg_next (outs );
548
- outs -> dma_address = DMA_ERROR_CODE ;
548
+ outs -> dma_address = IOMMU_MAPPING_ERROR ;
549
549
outs -> dma_length = 0 ;
550
550
}
551
551
@@ -563,7 +563,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
563
563
npages = iommu_num_pages (s -> dma_address , s -> dma_length ,
564
564
IOMMU_PAGE_SIZE (tbl ));
565
565
__iommu_free (tbl , vaddr , npages );
566
- s -> dma_address = DMA_ERROR_CODE ;
566
+ s -> dma_address = IOMMU_MAPPING_ERROR ;
567
567
s -> dma_length = 0 ;
568
568
}
569
569
if (s == outs )
@@ -777,7 +777,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
777
777
unsigned long mask , enum dma_data_direction direction ,
778
778
unsigned long attrs )
779
779
{
780
- dma_addr_t dma_handle = DMA_ERROR_CODE ;
780
+ dma_addr_t dma_handle = IOMMU_MAPPING_ERROR ;
781
781
void * vaddr ;
782
782
unsigned long uaddr ;
783
783
unsigned int npages , align ;
@@ -797,7 +797,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
797
797
dma_handle = iommu_alloc (dev , tbl , vaddr , npages , direction ,
798
798
mask >> tbl -> it_page_shift , align ,
799
799
attrs );
800
- if (dma_handle == DMA_ERROR_CODE ) {
800
+ if (dma_handle == IOMMU_MAPPING_ERROR ) {
801
801
if (!(attrs & DMA_ATTR_NO_WARN ) &&
802
802
printk_ratelimit ()) {
803
803
dev_info (dev , "iommu_alloc failed, tbl %p "
@@ -869,7 +869,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
869
869
io_order = get_iommu_order (size , tbl );
870
870
mapping = iommu_alloc (dev , tbl , ret , nio_pages , DMA_BIDIRECTIONAL ,
871
871
mask >> tbl -> it_page_shift , io_order , 0 );
872
- if (mapping == DMA_ERROR_CODE ) {
872
+ if (mapping == IOMMU_MAPPING_ERROR ) {
873
873
free_pages ((unsigned long )ret , order );
874
874
return NULL ;
875
875
}
0 commit comments