@@ -180,6 +180,11 @@ static void arm_dma_sync_single_for_device(struct device *dev,
180
180
__dma_page_cpu_to_dev (page , offset , size , dir );
181
181
}
182
182
183
+ static int arm_dma_mapping_error (struct device * dev , dma_addr_t dma_addr )
184
+ {
185
+ return dma_addr == ARM_MAPPING_ERROR ;
186
+ }
187
+
183
188
const struct dma_map_ops arm_dma_ops = {
184
189
.alloc = arm_dma_alloc ,
185
190
.free = arm_dma_free ,
@@ -193,6 +198,7 @@ const struct dma_map_ops arm_dma_ops = {
193
198
.sync_single_for_device = arm_dma_sync_single_for_device ,
194
199
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu ,
195
200
.sync_sg_for_device = arm_dma_sync_sg_for_device ,
201
+ .mapping_error = arm_dma_mapping_error ,
196
202
};
197
203
EXPORT_SYMBOL (arm_dma_ops );
198
204
@@ -211,6 +217,7 @@ const struct dma_map_ops arm_coherent_dma_ops = {
211
217
.get_sgtable = arm_dma_get_sgtable ,
212
218
.map_page = arm_coherent_dma_map_page ,
213
219
.map_sg = arm_dma_map_sg ,
220
+ .mapping_error = arm_dma_mapping_error ,
214
221
};
215
222
EXPORT_SYMBOL (arm_coherent_dma_ops );
216
223
@@ -799,7 +806,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
799
806
gfp &= ~(__GFP_COMP );
800
807
args .gfp = gfp ;
801
808
802
- * handle = DMA_ERROR_CODE ;
809
+ * handle = ARM_MAPPING_ERROR ;
803
810
allowblock = gfpflags_allow_blocking (gfp );
804
811
cma = allowblock ? dev_get_cma_area (dev ) : false;
805
812
@@ -1254,15 +1261,15 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1254
1261
if (i == mapping -> nr_bitmaps ) {
1255
1262
if (extend_iommu_mapping (mapping )) {
1256
1263
spin_unlock_irqrestore (& mapping -> lock , flags );
1257
- return DMA_ERROR_CODE ;
1264
+ return ARM_MAPPING_ERROR ;
1258
1265
}
1259
1266
1260
1267
start = bitmap_find_next_zero_area (mapping -> bitmaps [i ],
1261
1268
mapping -> bits , 0 , count , align );
1262
1269
1263
1270
if (start > mapping -> bits ) {
1264
1271
spin_unlock_irqrestore (& mapping -> lock , flags );
1265
- return DMA_ERROR_CODE ;
1272
+ return ARM_MAPPING_ERROR ;
1266
1273
}
1267
1274
1268
1275
bitmap_set (mapping -> bitmaps [i ], start , count );
@@ -1445,7 +1452,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
1445
1452
int i ;
1446
1453
1447
1454
dma_addr = __alloc_iova (mapping , size );
1448
- if (dma_addr == DMA_ERROR_CODE )
1455
+ if (dma_addr == ARM_MAPPING_ERROR )
1449
1456
return dma_addr ;
1450
1457
1451
1458
iova = dma_addr ;
@@ -1472,7 +1479,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
1472
1479
fail :
1473
1480
iommu_unmap (mapping -> domain , dma_addr , iova - dma_addr );
1474
1481
__free_iova (mapping , dma_addr , size );
1475
- return DMA_ERROR_CODE ;
1482
+ return ARM_MAPPING_ERROR ;
1476
1483
}
1477
1484
1478
1485
static int __iommu_remove_mapping (struct device * dev , dma_addr_t iova , size_t size )
@@ -1533,7 +1540,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
1533
1540
return NULL ;
1534
1541
1535
1542
* handle = __iommu_create_mapping (dev , & page , size , attrs );
1536
- if (* handle == DMA_ERROR_CODE )
1543
+ if (* handle == ARM_MAPPING_ERROR )
1537
1544
goto err_mapping ;
1538
1545
1539
1546
return addr ;
@@ -1561,7 +1568,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
1561
1568
struct page * * pages ;
1562
1569
void * addr = NULL ;
1563
1570
1564
- * handle = DMA_ERROR_CODE ;
1571
+ * handle = ARM_MAPPING_ERROR ;
1565
1572
size = PAGE_ALIGN (size );
1566
1573
1567
1574
if (coherent_flag == COHERENT || !gfpflags_allow_blocking (gfp ))
@@ -1582,7 +1589,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
1582
1589
return NULL ;
1583
1590
1584
1591
* handle = __iommu_create_mapping (dev , pages , size , attrs );
1585
- if (* handle == DMA_ERROR_CODE )
1592
+ if (* handle == ARM_MAPPING_ERROR )
1586
1593
goto err_buffer ;
1587
1594
1588
1595
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING )
@@ -1732,10 +1739,10 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1732
1739
int prot ;
1733
1740
1734
1741
size = PAGE_ALIGN (size );
1735
- * handle = DMA_ERROR_CODE ;
1742
+ * handle = ARM_MAPPING_ERROR ;
1736
1743
1737
1744
iova_base = iova = __alloc_iova (mapping , size );
1738
- if (iova == DMA_ERROR_CODE )
1745
+ if (iova == ARM_MAPPING_ERROR )
1739
1746
return - ENOMEM ;
1740
1747
1741
1748
for (count = 0 , s = sg ; count < (size >> PAGE_SHIFT ); s = sg_next (s )) {
@@ -1775,7 +1782,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1775
1782
for (i = 1 ; i < nents ; i ++ ) {
1776
1783
s = sg_next (s );
1777
1784
1778
- s -> dma_address = DMA_ERROR_CODE ;
1785
+ s -> dma_address = ARM_MAPPING_ERROR ;
1779
1786
s -> dma_length = 0 ;
1780
1787
1781
1788
if (s -> offset || (size & ~PAGE_MASK ) || size + s -> length > max ) {
@@ -1950,7 +1957,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
1950
1957
int ret , prot , len = PAGE_ALIGN (size + offset );
1951
1958
1952
1959
dma_addr = __alloc_iova (mapping , len );
1953
- if (dma_addr == DMA_ERROR_CODE )
1960
+ if (dma_addr == ARM_MAPPING_ERROR )
1954
1961
return dma_addr ;
1955
1962
1956
1963
prot = __dma_info_to_prot (dir , attrs );
@@ -1962,7 +1969,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
1962
1969
return dma_addr + offset ;
1963
1970
fail :
1964
1971
__free_iova (mapping , dma_addr , len );
1965
- return DMA_ERROR_CODE ;
1972
+ return ARM_MAPPING_ERROR ;
1966
1973
}
1967
1974
1968
1975
/**
@@ -2056,7 +2063,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
2056
2063
size_t len = PAGE_ALIGN (size + offset );
2057
2064
2058
2065
dma_addr = __alloc_iova (mapping , len );
2059
- if (dma_addr == DMA_ERROR_CODE )
2066
+ if (dma_addr == ARM_MAPPING_ERROR )
2060
2067
return dma_addr ;
2061
2068
2062
2069
prot = __dma_info_to_prot (dir , attrs ) | IOMMU_MMIO ;
@@ -2068,7 +2075,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
2068
2075
return dma_addr + offset ;
2069
2076
fail :
2070
2077
__free_iova (mapping , dma_addr , len );
2071
- return DMA_ERROR_CODE ;
2078
+ return ARM_MAPPING_ERROR ;
2072
2079
}
2073
2080
2074
2081
/**
@@ -2140,6 +2147,8 @@ const struct dma_map_ops iommu_ops = {
2140
2147
2141
2148
.map_resource = arm_iommu_map_resource ,
2142
2149
.unmap_resource = arm_iommu_unmap_resource ,
2150
+
2151
+ .mapping_error = arm_dma_mapping_error ,
2143
2152
};
2144
2153
2145
2154
const struct dma_map_ops iommu_coherent_ops = {
@@ -2156,6 +2165,8 @@ const struct dma_map_ops iommu_coherent_ops = {
2156
2165
2157
2166
.map_resource = arm_iommu_map_resource ,
2158
2167
.unmap_resource = arm_iommu_unmap_resource ,
2168
+
2169
+ .mapping_error = arm_dma_mapping_error ,
2159
2170
};
2160
2171
2161
2172
/**
0 commit comments