15
15
#include <linux/scatterlist.h>
16
16
#include <linux/string.h>
17
17
#include <linux/gfp.h>
18
+ #include <linux/highmem.h>
18
19
19
20
#include <asm/cache.h>
20
21
#include <asm/io.h>
21
22
22
23
#include <dma-coherence.h>
23
24
24
- static inline unsigned long dma_addr_to_virt (struct device * dev ,
25
+ static inline struct page * dma_addr_to_page (struct device * dev ,
25
26
dma_addr_t dma_addr )
26
27
{
27
- unsigned long addr = plat_dma_addr_to_phys (dev , dma_addr );
28
-
29
- return (unsigned long )phys_to_virt (addr );
28
+ return pfn_to_page (
29
+ plat_dma_addr_to_phys (dev , dma_addr ) >> PAGE_SHIFT );
30
30
}
31
31
32
32
/*
@@ -148,33 +148,70 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
148
148
free_pages (addr , get_order (size ));
149
149
}
150
150
151
- static inline void __dma_sync ( unsigned long addr , size_t size ,
151
+ static inline void __dma_sync_virtual ( void * addr , size_t size ,
152
152
enum dma_data_direction direction )
153
153
{
154
154
switch (direction ) {
155
155
case DMA_TO_DEVICE :
156
- dma_cache_wback (addr , size );
156
+ dma_cache_wback (( unsigned long ) addr , size );
157
157
break ;
158
158
159
159
case DMA_FROM_DEVICE :
160
- dma_cache_inv (addr , size );
160
+ dma_cache_inv (( unsigned long ) addr , size );
161
161
break ;
162
162
163
163
case DMA_BIDIRECTIONAL :
164
- dma_cache_wback_inv (addr , size );
164
+ dma_cache_wback_inv (( unsigned long ) addr , size );
165
165
break ;
166
166
167
167
default :
168
168
BUG ();
169
169
}
170
170
}
171
171
172
+ /*
173
+ * A single sg entry may refer to multiple physically contiguous
174
+ * pages. But we still need to process highmem pages individually.
175
+ * If highmem is not configured then the bulk of this loop gets
176
+ * optimized out.
177
+ */
178
+ static inline void __dma_sync (struct page * page ,
179
+ unsigned long offset , size_t size , enum dma_data_direction direction )
180
+ {
181
+ size_t left = size ;
182
+
183
+ do {
184
+ size_t len = left ;
185
+
186
+ if (PageHighMem (page )) {
187
+ void * addr ;
188
+
189
+ if (offset + len > PAGE_SIZE ) {
190
+ if (offset >= PAGE_SIZE ) {
191
+ page += offset >> PAGE_SHIFT ;
192
+ offset &= ~PAGE_MASK ;
193
+ }
194
+ len = PAGE_SIZE - offset ;
195
+ }
196
+
197
+ addr = kmap_atomic (page );
198
+ __dma_sync_virtual (addr + offset , len , direction );
199
+ kunmap_atomic (addr );
200
+ } else
201
+ __dma_sync_virtual (page_address (page ) + offset ,
202
+ size , direction );
203
+ offset = 0 ;
204
+ page ++ ;
205
+ left -= len ;
206
+ } while (left );
207
+ }
208
+
172
209
static void mips_dma_unmap_page (struct device * dev , dma_addr_t dma_addr ,
173
210
size_t size , enum dma_data_direction direction , struct dma_attrs * attrs )
174
211
{
175
212
if (cpu_is_noncoherent_r10000 (dev ))
176
- __dma_sync (dma_addr_to_virt (dev , dma_addr ), size ,
177
- direction );
213
+ __dma_sync (dma_addr_to_page (dev , dma_addr ),
214
+ dma_addr & ~ PAGE_MASK , size , direction );
178
215
179
216
plat_unmap_dma_mem (dev , dma_addr , size , direction );
180
217
}
@@ -185,13 +222,11 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
185
222
int i ;
186
223
187
224
for (i = 0 ; i < nents ; i ++ , sg ++ ) {
188
- unsigned long addr ;
189
-
190
- addr = (unsigned long ) sg_virt (sg );
191
- if (!plat_device_is_coherent (dev ) && addr )
192
- __dma_sync (addr , sg -> length , direction );
193
- sg -> dma_address = plat_map_dma_mem (dev ,
194
- (void * )addr , sg -> length );
225
+ if (!plat_device_is_coherent (dev ))
226
+ __dma_sync (sg_page (sg ), sg -> offset , sg -> length ,
227
+ direction );
228
+ sg -> dma_address = plat_map_dma_mem_page (dev , sg_page (sg )) +
229
+ sg -> offset ;
195
230
}
196
231
197
232
return nents ;
@@ -201,55 +236,42 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
201
236
unsigned long offset , size_t size , enum dma_data_direction direction ,
202
237
struct dma_attrs * attrs )
203
238
{
204
- unsigned long addr ;
205
-
206
- addr = (unsigned long ) page_address (page ) + offset ;
207
-
208
239
if (!plat_device_is_coherent (dev ))
209
- __dma_sync (addr , size , direction );
240
+ __dma_sync (page , offset , size , direction );
210
241
211
- return plat_map_dma_mem (dev , ( void * ) addr , size ) ;
242
+ return plat_map_dma_mem_page (dev , page ) + offset ;
212
243
}
213
244
214
245
static void mips_dma_unmap_sg (struct device * dev , struct scatterlist * sg ,
215
246
int nhwentries , enum dma_data_direction direction ,
216
247
struct dma_attrs * attrs )
217
248
{
218
- unsigned long addr ;
219
249
int i ;
220
250
221
251
for (i = 0 ; i < nhwentries ; i ++ , sg ++ ) {
222
252
if (!plat_device_is_coherent (dev ) &&
223
- direction != DMA_TO_DEVICE ) {
224
- addr = (unsigned long ) sg_virt (sg );
225
- if (addr )
226
- __dma_sync (addr , sg -> length , direction );
227
- }
253
+ direction != DMA_TO_DEVICE )
254
+ __dma_sync (sg_page (sg ), sg -> offset , sg -> length ,
255
+ direction );
228
256
plat_unmap_dma_mem (dev , sg -> dma_address , sg -> length , direction );
229
257
}
230
258
}
231
259
232
260
static void mips_dma_sync_single_for_cpu (struct device * dev ,
233
261
dma_addr_t dma_handle , size_t size , enum dma_data_direction direction )
234
262
{
235
- if (cpu_is_noncoherent_r10000 (dev )) {
236
- unsigned long addr ;
237
-
238
- addr = dma_addr_to_virt (dev , dma_handle );
239
- __dma_sync (addr , size , direction );
240
- }
263
+ if (cpu_is_noncoherent_r10000 (dev ))
264
+ __dma_sync (dma_addr_to_page (dev , dma_handle ),
265
+ dma_handle & ~PAGE_MASK , size , direction );
241
266
}
242
267
243
268
static void mips_dma_sync_single_for_device (struct device * dev ,
244
269
dma_addr_t dma_handle , size_t size , enum dma_data_direction direction )
245
270
{
246
271
plat_extra_sync_for_device (dev );
247
- if (!plat_device_is_coherent (dev )) {
248
- unsigned long addr ;
249
-
250
- addr = dma_addr_to_virt (dev , dma_handle );
251
- __dma_sync (addr , size , direction );
252
- }
272
+ if (!plat_device_is_coherent (dev ))
273
+ __dma_sync (dma_addr_to_page (dev , dma_handle ),
274
+ dma_handle & ~PAGE_MASK , size , direction );
253
275
}
254
276
255
277
static void mips_dma_sync_sg_for_cpu (struct device * dev ,
@@ -260,8 +282,8 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
260
282
/* Make sure that gcc doesn't leave the empty loop body. */
261
283
for (i = 0 ; i < nelems ; i ++ , sg ++ ) {
262
284
if (cpu_is_noncoherent_r10000 (dev ))
263
- __dma_sync (( unsigned long ) page_address ( sg_page (sg )) ,
264
- sg -> length , direction );
285
+ __dma_sync (sg_page (sg ), sg -> offset , sg -> length ,
286
+ direction );
265
287
}
266
288
}
267
289
@@ -273,8 +295,8 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
273
295
/* Make sure that gcc doesn't leave the empty loop body. */
274
296
for (i = 0 ; i < nelems ; i ++ , sg ++ ) {
275
297
if (!plat_device_is_coherent (dev ))
276
- __dma_sync (( unsigned long ) page_address ( sg_page (sg )) ,
277
- sg -> length , direction );
298
+ __dma_sync (sg_page (sg ), sg -> offset , sg -> length ,
299
+ direction );
278
300
}
279
301
}
280
302
@@ -295,7 +317,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
295
317
296
318
plat_extra_sync_for_device (dev );
297
319
if (!plat_device_is_coherent (dev ))
298
- __dma_sync (( unsigned long ) vaddr , size , direction );
320
+ __dma_sync_virtual ( vaddr , size , direction );
299
321
}
300
322
301
323
EXPORT_SYMBOL (dma_cache_sync );
0 commit comments