38
38
39
39
#include "dmaengine.h"
40
40
41
+ #define CREATE_TRACE_POINTS
42
+ #include <trace/events/tegra_apb_dma.h>
43
+
41
44
#define TEGRA_APBDMA_GENERAL 0x0
42
45
#define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
43
46
@@ -146,7 +149,7 @@ struct tegra_dma_channel_regs {
146
149
};
147
150
148
151
/*
149
- * tegra_dma_sg_req: Dma request details to configure hardware. This
152
+ * tegra_dma_sg_req: DMA request details to configure hardware. This
150
153
* contains the details for one transfer to configure DMA hw.
151
154
* The client's request for data transfer can be broken into multiple
152
155
* sub-transfer as per requester details and hw support.
@@ -155,7 +158,7 @@ struct tegra_dma_channel_regs {
155
158
*/
156
159
struct tegra_dma_sg_req {
157
160
struct tegra_dma_channel_regs ch_regs ;
158
- int req_len ;
161
+ unsigned int req_len ;
159
162
bool configured ;
160
163
bool last_sg ;
161
164
struct list_head node ;
@@ -169,8 +172,8 @@ struct tegra_dma_sg_req {
169
172
*/
170
173
struct tegra_dma_desc {
171
174
struct dma_async_tx_descriptor txd ;
172
- int bytes_requested ;
173
- int bytes_transferred ;
175
+ unsigned int bytes_requested ;
176
+ unsigned int bytes_transferred ;
174
177
enum dma_status dma_status ;
175
178
struct list_head node ;
176
179
struct list_head tx_list ;
@@ -186,7 +189,7 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
186
189
/* tegra_dma_channel: Channel specific information */
187
190
struct tegra_dma_channel {
188
191
struct dma_chan dma_chan ;
189
- char name [30 ];
192
+ char name [12 ];
190
193
bool config_init ;
191
194
int id ;
192
195
int irq ;
@@ -574,7 +577,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
574
577
struct tegra_dma_sg_req * hsgreq = NULL ;
575
578
576
579
if (list_empty (& tdc -> pending_sg_req )) {
577
- dev_err (tdc2dev (tdc ), "Dma is running without req\n" );
580
+ dev_err (tdc2dev (tdc ), "DMA is running without req\n" );
578
581
tegra_dma_stop (tdc );
579
582
return false;
580
583
}
@@ -587,7 +590,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
587
590
hsgreq = list_first_entry (& tdc -> pending_sg_req , typeof (* hsgreq ), node );
588
591
if (!hsgreq -> configured ) {
589
592
tegra_dma_stop (tdc );
590
- dev_err (tdc2dev (tdc ), "Error in dma transfer, aborting dma \n" );
593
+ dev_err (tdc2dev (tdc ), "Error in DMA transfer, aborting DMA \n" );
591
594
tegra_dma_abort_all (tdc );
592
595
return false;
593
596
}
@@ -636,7 +639,10 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
636
639
637
640
sgreq = list_first_entry (& tdc -> pending_sg_req , typeof (* sgreq ), node );
638
641
dma_desc = sgreq -> dma_desc ;
639
- dma_desc -> bytes_transferred += sgreq -> req_len ;
642
+ /* if we dma for long enough the transfer count will wrap */
643
+ dma_desc -> bytes_transferred =
644
+ (dma_desc -> bytes_transferred + sgreq -> req_len ) %
645
+ dma_desc -> bytes_requested ;
640
646
641
647
/* Callback need to be call */
642
648
if (!dma_desc -> cb_count )
@@ -669,6 +675,8 @@ static void tegra_dma_tasklet(unsigned long data)
669
675
dmaengine_desc_get_callback (& dma_desc -> txd , & cb );
670
676
cb_count = dma_desc -> cb_count ;
671
677
dma_desc -> cb_count = 0 ;
678
+ trace_tegra_dma_complete_cb (& tdc -> dma_chan , cb_count ,
679
+ cb .callback );
672
680
spin_unlock_irqrestore (& tdc -> lock , flags );
673
681
while (cb_count -- )
674
682
dmaengine_desc_callback_invoke (& cb , NULL );
@@ -685,6 +693,7 @@ static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
685
693
686
694
spin_lock_irqsave (& tdc -> lock , flags );
687
695
696
+ trace_tegra_dma_isr (& tdc -> dma_chan , irq );
688
697
status = tdc_read (tdc , TEGRA_APBDMA_CHAN_STATUS );
689
698
if (status & TEGRA_APBDMA_STATUS_ISE_EOC ) {
690
699
tdc_write (tdc , TEGRA_APBDMA_CHAN_STATUS , status );
@@ -843,6 +852,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
843
852
dma_set_residue (txstate , residual );
844
853
}
845
854
855
+ trace_tegra_dma_tx_status (& tdc -> dma_chan , cookie , txstate );
846
856
spin_unlock_irqrestore (& tdc -> lock , flags );
847
857
return ret ;
848
858
}
@@ -919,7 +929,7 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
919
929
return 0 ;
920
930
921
931
default :
922
- dev_err (tdc2dev (tdc ), "Dma direction is not supported\n" );
932
+ dev_err (tdc2dev (tdc ), "DMA direction is not supported\n" );
923
933
return - EINVAL ;
924
934
}
925
935
return - EINVAL ;
@@ -952,7 +962,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
952
962
enum dma_slave_buswidth slave_bw ;
953
963
954
964
if (!tdc -> config_init ) {
955
- dev_err (tdc2dev (tdc ), "dma channel is not configured\n" );
965
+ dev_err (tdc2dev (tdc ), "DMA channel is not configured\n" );
956
966
return NULL ;
957
967
}
958
968
if (sg_len < 1 ) {
@@ -985,7 +995,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
985
995
986
996
dma_desc = tegra_dma_desc_get (tdc );
987
997
if (!dma_desc ) {
988
- dev_err (tdc2dev (tdc ), "Dma descriptors not available\n" );
998
+ dev_err (tdc2dev (tdc ), "DMA descriptors not available\n" );
989
999
return NULL ;
990
1000
}
991
1001
INIT_LIST_HEAD (& dma_desc -> tx_list );
@@ -1005,14 +1015,14 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
1005
1015
if ((len & 3 ) || (mem & 3 ) ||
1006
1016
(len > tdc -> tdma -> chip_data -> max_dma_count )) {
1007
1017
dev_err (tdc2dev (tdc ),
1008
- "Dma length/memory address is not supported\n" );
1018
+ "DMA length/memory address is not supported\n" );
1009
1019
tegra_dma_desc_put (tdc , dma_desc );
1010
1020
return NULL ;
1011
1021
}
1012
1022
1013
1023
sg_req = tegra_dma_sg_req_get (tdc );
1014
1024
if (!sg_req ) {
1015
- dev_err (tdc2dev (tdc ), "Dma sg-req not available\n" );
1025
+ dev_err (tdc2dev (tdc ), "DMA sg-req not available\n" );
1016
1026
tegra_dma_desc_put (tdc , dma_desc );
1017
1027
return NULL ;
1018
1028
}
@@ -1087,7 +1097,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1087
1097
* terminating the DMA.
1088
1098
*/
1089
1099
if (tdc -> busy ) {
1090
- dev_err (tdc2dev (tdc ), "Request not allowed when dma running\n" );
1100
+ dev_err (tdc2dev (tdc ), "Request not allowed when DMA running\n" );
1091
1101
return NULL ;
1092
1102
}
1093
1103
@@ -1144,7 +1154,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1144
1154
while (remain_len ) {
1145
1155
sg_req = tegra_dma_sg_req_get (tdc );
1146
1156
if (!sg_req ) {
1147
- dev_err (tdc2dev (tdc ), "Dma sg-req not available\n" );
1157
+ dev_err (tdc2dev (tdc ), "DMA sg-req not available\n" );
1148
1158
tegra_dma_desc_put (tdc , dma_desc );
1149
1159
return NULL ;
1150
1160
}
@@ -1319,8 +1329,9 @@ static int tegra_dma_probe(struct platform_device *pdev)
1319
1329
return - ENODEV ;
1320
1330
}
1321
1331
1322
- tdma = devm_kzalloc (& pdev -> dev , sizeof (* tdma ) + cdata -> nr_channels *
1323
- sizeof (struct tegra_dma_channel ), GFP_KERNEL );
1332
+ tdma = devm_kzalloc (& pdev -> dev ,
1333
+ struct_size (tdma , channels , cdata -> nr_channels ),
1334
+ GFP_KERNEL );
1324
1335
if (!tdma )
1325
1336
return - ENOMEM ;
1326
1337
0 commit comments