33
33
#ifndef _SMC91X_H_
34
34
#define _SMC91X_H_
35
35
36
+ #include <linux/dmaengine.h>
36
37
#include <linux/smc91x.h>
37
38
38
39
/*
@@ -244,6 +245,7 @@ struct smc_local {
244
245
u_long physaddr ;
245
246
struct device * device ;
246
247
#endif
248
+ struct dma_chan * dma_chan ;
247
249
void __iomem * base ;
248
250
void __iomem * datacs ;
249
251
@@ -265,21 +267,47 @@ struct smc_local {
265
267
* as RX which can overrun memory and lose packets.
266
268
*/
267
269
#include <linux/dma-mapping.h>
268
- #include <mach/ dma.h>
270
+ #include <linux/dma/pxa- dma.h>
269
271
270
272
#ifdef SMC_insl
271
273
#undef SMC_insl
272
274
#define SMC_insl (a , r , p , l ) \
273
275
smc_pxa_dma_insl(a, lp, r, dev->dma, p, l)
276
+ static inline void
277
+ smc_pxa_dma_inpump (struct smc_local * lp , u_char * buf , int len )
278
+ {
279
+ dma_addr_t dmabuf ;
280
+ struct dma_async_tx_descriptor * tx ;
281
+ dma_cookie_t cookie ;
282
+ enum dma_status status ;
283
+ struct dma_tx_state state ;
284
+
285
+ dmabuf = dma_map_single (lp -> device , buf , len , DMA_FROM_DEVICE );
286
+ tx = dmaengine_prep_slave_single (lp -> dma_chan , dmabuf , len ,
287
+ DMA_DEV_TO_MEM , 0 );
288
+ if (tx ) {
289
+ cookie = dmaengine_submit (tx );
290
+ dma_async_issue_pending (lp -> dma_chan );
291
+ do {
292
+ status = dmaengine_tx_status (lp -> dma_chan , cookie ,
293
+ & state );
294
+ cpu_relax ();
295
+ } while (status != DMA_COMPLETE && status != DMA_ERROR &&
296
+ state .residue );
297
+ dmaengine_terminate_all (lp -> dma_chan );
298
+ }
299
+ dma_unmap_single (lp -> device , dmabuf , len , DMA_FROM_DEVICE );
300
+ }
301
+
274
302
static inline void
275
303
smc_pxa_dma_insl (void __iomem * ioaddr , struct smc_local * lp , int reg , int dma ,
276
304
u_char * buf , int len )
277
305
{
278
- u_long physaddr = lp -> physaddr ;
279
- dma_addr_t dmabuf ;
306
+ struct dma_slave_config config ;
307
+ int ret ;
280
308
281
309
/* fallback if no DMA available */
282
- if (dma == ( unsigned char ) -1 ) {
310
+ if (! lp -> dma_chan ) {
283
311
readsl (ioaddr + reg , buf , len );
284
312
return ;
285
313
}
@@ -291,18 +319,22 @@ smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
291
319
len -- ;
292
320
}
293
321
322
+ memset (& config , 0 , sizeof (config ));
323
+ config .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
324
+ config .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
325
+ config .src_addr = lp -> physaddr + reg ;
326
+ config .dst_addr = lp -> physaddr + reg ;
327
+ config .src_maxburst = 32 ;
328
+ config .dst_maxburst = 32 ;
329
+ ret = dmaengine_slave_config (lp -> dma_chan , & config );
330
+ if (ret ) {
331
+ dev_err (lp -> device , "dma channel configuration failed: %d\n" ,
332
+ ret );
333
+ return ;
334
+ }
335
+
294
336
len *= 4 ;
295
- dmabuf = dma_map_single (lp -> device , buf , len , DMA_FROM_DEVICE );
296
- DCSR (dma ) = DCSR_NODESC ;
297
- DTADR (dma ) = dmabuf ;
298
- DSADR (dma ) = physaddr + reg ;
299
- DCMD (dma ) = (DCMD_INCTRGADDR | DCMD_BURST32 |
300
- DCMD_WIDTH4 | (DCMD_LENGTH & len ));
301
- DCSR (dma ) = DCSR_NODESC | DCSR_RUN ;
302
- while (!(DCSR (dma ) & DCSR_STOPSTATE ))
303
- cpu_relax ();
304
- DCSR (dma ) = 0 ;
305
- dma_unmap_single (lp -> device , dmabuf , len , DMA_FROM_DEVICE );
337
+ smc_pxa_dma_inpump (lp , buf , len );
306
338
}
307
339
#endif
308
340
@@ -314,11 +346,11 @@ static inline void
314
346
smc_pxa_dma_insw (void __iomem * ioaddr , struct smc_local * lp , int reg , int dma ,
315
347
u_char * buf , int len )
316
348
{
317
- u_long physaddr = lp -> physaddr ;
318
- dma_addr_t dmabuf ;
349
+ struct dma_slave_config config ;
350
+ int ret ;
319
351
320
352
/* fallback if no DMA available */
321
- if (dma == ( unsigned char ) -1 ) {
353
+ if (! lp -> dma_chan ) {
322
354
readsw (ioaddr + reg , buf , len );
323
355
return ;
324
356
}
@@ -330,26 +362,25 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
330
362
len -- ;
331
363
}
332
364
365
+ memset (& config , 0 , sizeof (config ));
366
+ config .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES ;
367
+ config .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES ;
368
+ config .src_addr = lp -> physaddr + reg ;
369
+ config .dst_addr = lp -> physaddr + reg ;
370
+ config .src_maxburst = 32 ;
371
+ config .dst_maxburst = 32 ;
372
+ ret = dmaengine_slave_config (lp -> dma_chan , & config );
373
+ if (ret ) {
374
+ dev_err (lp -> device , "dma channel configuration failed: %d\n" ,
375
+ ret );
376
+ return ;
377
+ }
378
+
333
379
len *= 2 ;
334
- dmabuf = dma_map_single (lp -> device , buf , len , DMA_FROM_DEVICE );
335
- DCSR (dma ) = DCSR_NODESC ;
336
- DTADR (dma ) = dmabuf ;
337
- DSADR (dma ) = physaddr + reg ;
338
- DCMD (dma ) = (DCMD_INCTRGADDR | DCMD_BURST32 |
339
- DCMD_WIDTH2 | (DCMD_LENGTH & len ));
340
- DCSR (dma ) = DCSR_NODESC | DCSR_RUN ;
341
- while (!(DCSR (dma ) & DCSR_STOPSTATE ))
342
- cpu_relax ();
343
- DCSR (dma ) = 0 ;
344
- dma_unmap_single (lp -> device , dmabuf , len , DMA_FROM_DEVICE );
380
+ smc_pxa_dma_inpump (lp , buf , len );
345
381
}
346
382
#endif
347
383
348
- static void
349
- smc_pxa_dma_irq (int dma , void * dummy )
350
- {
351
- DCSR (dma ) = 0 ;
352
- }
353
384
#endif /* CONFIG_ARCH_PXA */
354
385
355
386
0 commit comments