22
22
#include <linux/platform_device.h>
23
23
#include <linux/delay.h>
24
24
#include <linux/interrupt.h>
25
+ #include <linux/dmaengine.h>
25
26
#include <linux/dma-mapping.h>
27
+ #include <linux/dma/pxa-dma.h>
26
28
#include <linux/clk.h>
27
29
#include <linux/err.h>
28
30
#include <linux/mmc/host.h>
37
39
#include <asm/sizes.h>
38
40
39
41
#include <mach/hardware.h>
40
- #include <mach/dma.h>
41
42
#include <linux/platform_data/mmc-pxamci.h>
42
43
43
44
#include "pxamci.h"
@@ -58,7 +59,6 @@ struct pxamci_host {
58
59
struct clk * clk ;
59
60
unsigned long clkrate ;
60
61
int irq ;
61
- int dma ;
62
62
unsigned int clkrt ;
63
63
unsigned int cmdat ;
64
64
unsigned int imask ;
@@ -69,8 +69,10 @@ struct pxamci_host {
69
69
struct mmc_command * cmd ;
70
70
struct mmc_data * data ;
71
71
72
+ struct dma_chan * dma_chan_rx ;
73
+ struct dma_chan * dma_chan_tx ;
74
+ dma_cookie_t dma_cookie ;
72
75
dma_addr_t sg_dma ;
73
- struct pxa_dma_desc * sg_cpu ;
74
76
unsigned int dma_len ;
75
77
76
78
unsigned int dma_dir ;
@@ -173,14 +175,18 @@ static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
173
175
spin_unlock_irqrestore (& host -> lock , flags );
174
176
}
175
177
178
+ static void pxamci_dma_irq (void * param );
179
+
176
180
static void pxamci_setup_data (struct pxamci_host * host , struct mmc_data * data )
177
181
{
182
+ struct dma_async_tx_descriptor * tx ;
183
+ enum dma_data_direction direction ;
184
+ struct dma_slave_config config ;
185
+ struct dma_chan * chan ;
178
186
unsigned int nob = data -> blocks ;
179
187
unsigned long long clks ;
180
188
unsigned int timeout ;
181
- bool dalgn = 0 ;
182
- u32 dcmd ;
183
- int i ;
189
+ int ret ;
184
190
185
191
host -> data = data ;
186
192
@@ -195,54 +201,48 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
195
201
timeout = (unsigned int )clks + (data -> timeout_clks << host -> clkrt );
196
202
writel ((timeout + 255 ) / 256 , host -> base + MMC_RDTO );
197
203
204
+ memset (& config , 0 , sizeof (config ));
205
+ config .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
206
+ config .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
207
+ config .src_addr = host -> res -> start + MMC_RXFIFO ;
208
+ config .dst_addr = host -> res -> start + MMC_TXFIFO ;
209
+ config .src_maxburst = 32 ;
210
+ config .dst_maxburst = 32 ;
211
+
198
212
if (data -> flags & MMC_DATA_READ ) {
199
213
host -> dma_dir = DMA_FROM_DEVICE ;
200
- dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC ;
201
- DRCMR (host -> dma_drcmrtx ) = 0 ;
202
- DRCMR (host -> dma_drcmrrx ) = host -> dma | DRCMR_MAPVLD ;
214
+ direction = DMA_DEV_TO_MEM ;
215
+ chan = host -> dma_chan_rx ;
203
216
} else {
204
217
host -> dma_dir = DMA_TO_DEVICE ;
205
- dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG ;
206
- DRCMR (host -> dma_drcmrrx ) = 0 ;
207
- DRCMR (host -> dma_drcmrtx ) = host -> dma | DRCMR_MAPVLD ;
218
+ direction = DMA_MEM_TO_DEV ;
219
+ chan = host -> dma_chan_tx ;
208
220
}
209
221
210
- dcmd |= DCMD_BURST32 | DCMD_WIDTH1 ;
222
+ config .direction = direction ;
223
+
224
+ ret = dmaengine_slave_config (chan , & config );
225
+ if (ret < 0 ) {
226
+ dev_err (mmc_dev (host -> mmc ), "dma slave config failed\n" );
227
+ return ;
228
+ }
211
229
212
- host -> dma_len = dma_map_sg (mmc_dev ( host -> mmc ) , data -> sg , data -> sg_len ,
230
+ host -> dma_len = dma_map_sg (chan -> device -> dev , data -> sg , data -> sg_len ,
213
231
host -> dma_dir );
214
232
215
- for (i = 0 ; i < host -> dma_len ; i ++ ) {
216
- unsigned int length = sg_dma_len (& data -> sg [i ]);
217
- host -> sg_cpu [i ].dcmd = dcmd | length ;
218
- if (length & 31 && !(data -> flags & MMC_DATA_READ ))
219
- host -> sg_cpu [i ].dcmd |= DCMD_ENDIRQEN ;
220
- /* Not aligned to 8-byte boundary? */
221
- if (sg_dma_address (& data -> sg [i ]) & 0x7 )
222
- dalgn = 1 ;
223
- if (data -> flags & MMC_DATA_READ ) {
224
- host -> sg_cpu [i ].dsadr = host -> res -> start + MMC_RXFIFO ;
225
- host -> sg_cpu [i ].dtadr = sg_dma_address (& data -> sg [i ]);
226
- } else {
227
- host -> sg_cpu [i ].dsadr = sg_dma_address (& data -> sg [i ]);
228
- host -> sg_cpu [i ].dtadr = host -> res -> start + MMC_TXFIFO ;
229
- }
230
- host -> sg_cpu [i ].ddadr = host -> sg_dma + (i + 1 ) *
231
- sizeof (struct pxa_dma_desc );
233
+ tx = dmaengine_prep_slave_sg (chan , data -> sg , host -> dma_len , direction ,
234
+ DMA_PREP_INTERRUPT );
235
+ if (!tx ) {
236
+ dev_err (mmc_dev (host -> mmc ), "prep_slave_sg() failed\n" );
237
+ return ;
232
238
}
233
- host -> sg_cpu [host -> dma_len - 1 ].ddadr = DDADR_STOP ;
234
- wmb ();
235
239
236
- /*
237
- * The PXA27x DMA controller encounters overhead when working with
238
- * unaligned (to 8-byte boundaries) data, so switch on byte alignment
239
- * mode only if we have unaligned data.
240
- */
241
- if (dalgn )
242
- DALGN |= (1 << host -> dma );
243
- else
244
- DALGN &= ~(1 << host -> dma );
245
- DDADR (host -> dma ) = host -> sg_dma ;
240
+ if (!(data -> flags & MMC_DATA_READ )) {
241
+ tx -> callback = pxamci_dma_irq ;
242
+ tx -> callback_param = host ;
243
+ }
244
+
245
+ host -> dma_cookie = dmaengine_submit (tx );
246
246
247
247
/*
248
248
* workaround for erratum #91:
@@ -251,7 +251,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
251
251
* before starting DMA.
252
252
*/
253
253
if (!cpu_is_pxa27x () || data -> flags & MMC_DATA_READ )
254
- DCSR ( host -> dma ) = DCSR_RUN ;
254
+ dma_async_issue_pending ( chan ) ;
255
255
}
256
256
257
257
static void pxamci_start_cmd (struct pxamci_host * host , struct mmc_command * cmd , unsigned int cmdat )
@@ -343,7 +343,7 @@ static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
343
343
* enable DMA late
344
344
*/
345
345
if (cpu_is_pxa27x () && host -> data -> flags & MMC_DATA_WRITE )
346
- DCSR (host -> dma ) = DCSR_RUN ;
346
+ dma_async_issue_pending (host -> dma_chan_tx ) ;
347
347
} else {
348
348
pxamci_finish_request (host , host -> mrq );
349
349
}
@@ -354,13 +354,17 @@ static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
354
354
static int pxamci_data_done (struct pxamci_host * host , unsigned int stat )
355
355
{
356
356
struct mmc_data * data = host -> data ;
357
+ struct dma_chan * chan ;
357
358
358
359
if (!data )
359
360
return 0 ;
360
361
361
- DCSR (host -> dma ) = 0 ;
362
- dma_unmap_sg (mmc_dev (host -> mmc ), data -> sg , data -> sg_len ,
363
- host -> dma_dir );
362
+ if (data -> flags & MMC_DATA_READ )
363
+ chan = host -> dma_chan_rx ;
364
+ else
365
+ chan = host -> dma_chan_tx ;
366
+ dma_unmap_sg (chan -> device -> dev ,
367
+ data -> sg , data -> sg_len , host -> dma_dir );
364
368
365
369
if (stat & STAT_READ_TIME_OUT )
366
370
data -> error = - ETIMEDOUT ;
@@ -552,20 +556,37 @@ static const struct mmc_host_ops pxamci_ops = {
552
556
.enable_sdio_irq = pxamci_enable_sdio_irq ,
553
557
};
554
558
555
- static void pxamci_dma_irq (int dma , void * devid )
559
+ static void pxamci_dma_irq (void * param )
556
560
{
557
- struct pxamci_host * host = devid ;
558
- int dcsr = DCSR (dma );
559
- DCSR (dma ) = dcsr & ~DCSR_STOPIRQEN ;
561
+ struct pxamci_host * host = param ;
562
+ struct dma_tx_state state ;
563
+ enum dma_status status ;
564
+ struct dma_chan * chan ;
565
+ unsigned long flags ;
566
+
567
+ spin_lock_irqsave (& host -> lock , flags );
568
+
569
+ if (!host -> data )
570
+ goto out_unlock ;
560
571
561
- if (dcsr & DCSR_ENDINTR ) {
572
+ if (host -> data -> flags & MMC_DATA_READ )
573
+ chan = host -> dma_chan_rx ;
574
+ else
575
+ chan = host -> dma_chan_tx ;
576
+
577
+ status = dmaengine_tx_status (chan , host -> dma_cookie , & state );
578
+
579
+ if (likely (status == DMA_COMPLETE )) {
562
580
writel (BUF_PART_FULL , host -> base + MMC_PRTBUF );
563
581
} else {
564
- pr_err ("%s: DMA error on channel %d (DCSR=%#x) \n" ,
565
- mmc_hostname ( host -> mmc ), dma , dcsr );
582
+ pr_err ("%s: DMA error on %s channel \n" , mmc_hostname ( host -> mmc ) ,
583
+ host -> data -> flags & MMC_DATA_READ ? "rx" : "tx" );
566
584
host -> data -> error = - EIO ;
567
585
pxamci_data_done (host , 0 );
568
586
}
587
+
588
+ out_unlock :
589
+ spin_unlock_irqrestore (& host -> lock , flags );
569
590
}
570
591
571
592
static irqreturn_t pxamci_detect_irq (int irq , void * devid )
@@ -625,7 +646,9 @@ static int pxamci_probe(struct platform_device *pdev)
625
646
struct mmc_host * mmc ;
626
647
struct pxamci_host * host = NULL ;
627
648
struct resource * r , * dmarx , * dmatx ;
649
+ struct pxad_param param_rx , param_tx ;
628
650
int ret , irq , gpio_cd = -1 , gpio_ro = -1 , gpio_power = -1 ;
651
+ dma_cap_mask_t mask ;
629
652
630
653
ret = pxamci_of_init (pdev );
631
654
if (ret )
@@ -671,7 +694,6 @@ static int pxamci_probe(struct platform_device *pdev)
671
694
672
695
host = mmc_priv (mmc );
673
696
host -> mmc = mmc ;
674
- host -> dma = -1 ;
675
697
host -> pdata = pdev -> dev .platform_data ;
676
698
host -> clkrt = CLKRT_OFF ;
677
699
@@ -702,12 +724,6 @@ static int pxamci_probe(struct platform_device *pdev)
702
724
MMC_CAP_SD_HIGHSPEED ;
703
725
}
704
726
705
- host -> sg_cpu = dma_alloc_coherent (& pdev -> dev , PAGE_SIZE , & host -> sg_dma , GFP_KERNEL );
706
- if (!host -> sg_cpu ) {
707
- ret = - ENOMEM ;
708
- goto out ;
709
- }
710
-
711
727
spin_lock_init (& host -> lock );
712
728
host -> res = r ;
713
729
host -> irq = irq ;
@@ -728,32 +744,45 @@ static int pxamci_probe(struct platform_device *pdev)
728
744
writel (64 , host -> base + MMC_RESTO );
729
745
writel (host -> imask , host -> base + MMC_I_MASK );
730
746
731
- host -> dma = pxa_request_dma (DRIVER_NAME , DMA_PRIO_LOW ,
732
- pxamci_dma_irq , host );
733
- if (host -> dma < 0 ) {
734
- ret = - EBUSY ;
735
- goto out ;
736
- }
737
-
738
747
ret = request_irq (host -> irq , pxamci_irq , 0 , DRIVER_NAME , host );
739
748
if (ret )
740
749
goto out ;
741
750
742
751
platform_set_drvdata (pdev , mmc );
743
752
744
- dmarx = platform_get_resource (pdev , IORESOURCE_DMA , 0 );
745
- if (!dmarx ) {
746
- ret = - ENXIO ;
753
+ if (!pdev -> dev .of_node ) {
754
+ dmarx = platform_get_resource (pdev , IORESOURCE_DMA , 0 );
755
+ dmatx = platform_get_resource (pdev , IORESOURCE_DMA , 1 );
756
+ if (!dmarx || !dmatx ) {
757
+ ret = - ENXIO ;
758
+ goto out ;
759
+ }
760
+ param_rx .prio = PXAD_PRIO_LOWEST ;
761
+ param_rx .drcmr = dmarx -> start ;
762
+ param_tx .prio = PXAD_PRIO_LOWEST ;
763
+ param_tx .drcmr = dmatx -> start ;
764
+ }
765
+
766
+ dma_cap_zero (mask );
767
+ dma_cap_set (DMA_SLAVE , mask );
768
+
769
+ host -> dma_chan_rx =
770
+ dma_request_slave_channel_compat (mask , pxad_filter_fn ,
771
+ & param_rx , & pdev -> dev , "rx" );
772
+ if (host -> dma_chan_rx == NULL ) {
773
+ dev_err (& pdev -> dev , "unable to request rx dma channel\n" );
774
+ ret = - ENODEV ;
747
775
goto out ;
748
776
}
749
- host -> dma_drcmrrx = dmarx -> start ;
750
777
751
- dmatx = platform_get_resource (pdev , IORESOURCE_DMA , 1 );
752
- if (!dmatx ) {
753
- ret = - ENXIO ;
778
+ host -> dma_chan_tx =
779
+ dma_request_slave_channel_compat (mask , pxad_filter_fn ,
780
+ & param_tx , & pdev -> dev , "tx" );
781
+ if (host -> dma_chan_tx == NULL ) {
782
+ dev_err (& pdev -> dev , "unable to request tx dma channel\n" );
783
+ ret = - ENODEV ;
754
784
goto out ;
755
785
}
756
- host -> dma_drcmrtx = dmatx -> start ;
757
786
758
787
if (host -> pdata ) {
759
788
gpio_cd = host -> pdata -> gpio_card_detect ;
@@ -814,12 +843,12 @@ static int pxamci_probe(struct platform_device *pdev)
814
843
gpio_free (gpio_power );
815
844
out :
816
845
if (host ) {
817
- if (host -> dma >= 0 )
818
- pxa_free_dma (host -> dma );
846
+ if (host -> dma_chan_rx )
847
+ dma_release_channel (host -> dma_chan_rx );
848
+ if (host -> dma_chan_tx )
849
+ dma_release_channel (host -> dma_chan_tx );
819
850
if (host -> base )
820
851
iounmap (host -> base );
821
- if (host -> sg_cpu )
822
- dma_free_coherent (& pdev -> dev , PAGE_SIZE , host -> sg_cpu , host -> sg_dma );
823
852
if (host -> clk )
824
853
clk_put (host -> clk );
825
854
}
@@ -863,13 +892,12 @@ static int pxamci_remove(struct platform_device *pdev)
863
892
END_CMD_RES |PRG_DONE |DATA_TRAN_DONE ,
864
893
host -> base + MMC_I_MASK );
865
894
866
- DRCMR (host -> dma_drcmrrx ) = 0 ;
867
- DRCMR (host -> dma_drcmrtx ) = 0 ;
868
-
869
895
free_irq (host -> irq , host );
870
- pxa_free_dma (host -> dma );
896
+ dmaengine_terminate_all (host -> dma_chan_rx );
897
+ dmaengine_terminate_all (host -> dma_chan_tx );
898
+ dma_release_channel (host -> dma_chan_rx );
899
+ dma_release_channel (host -> dma_chan_tx );
871
900
iounmap (host -> base );
872
- dma_free_coherent (& pdev -> dev , PAGE_SIZE , host -> sg_cpu , host -> sg_dma );
873
901
874
902
clk_put (host -> clk );
875
903
0 commit comments