86
86
#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
87
87
#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
88
88
#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
89
+ #define XILINX_DMA_DMASR_SG_MASK BIT(3)
89
90
#define XILINX_DMA_DMASR_IDLE BIT(1)
90
91
#define XILINX_DMA_DMASR_HALTED BIT(0)
91
92
#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
161
162
#define XILINX_DMA_REG_BTT 0x28
162
163
163
164
/* AXI DMA Specific Masks/Bit fields */
164
- #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
165
+ #define XILINX_DMA_MAX_TRANS_LEN_MIN 8
166
+ #define XILINX_DMA_MAX_TRANS_LEN_MAX 23
167
+ #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
165
168
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
166
169
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
167
170
#define XILINX_DMA_CR_COALESCE_SHIFT 16
@@ -412,7 +415,6 @@ struct xilinx_dma_config {
412
415
* @dev: Device Structure
413
416
* @common: DMA device structure
414
417
* @chan: Driver specific DMA channel
415
- * @has_sg: Specifies whether Scatter-Gather is present or not
416
418
* @mcdma: Specifies whether Multi-Channel is present or not
417
419
* @flush_on_fsync: Flush on frame sync
418
420
* @ext_addr: Indicates 64 bit addressing is supported by dma device
@@ -425,13 +427,13 @@ struct xilinx_dma_config {
425
427
* @rxs_clk: DMA s2mm stream clock
426
428
* @nr_channels: Number of channels DMA device supports
427
429
* @chan_id: DMA channel identifier
430
+ * @max_buffer_len: Max buffer length
428
431
*/
429
432
struct xilinx_dma_device {
430
433
void __iomem * regs ;
431
434
struct device * dev ;
432
435
struct dma_device common ;
433
436
struct xilinx_dma_chan * chan [XILINX_DMA_MAX_CHANS_PER_DEVICE ];
434
- bool has_sg ;
435
437
bool mcdma ;
436
438
u32 flush_on_fsync ;
437
439
bool ext_addr ;
@@ -444,6 +446,7 @@ struct xilinx_dma_device {
444
446
struct clk * rxs_clk ;
445
447
u32 nr_channels ;
446
448
u32 chan_id ;
449
+ u32 max_buffer_len ;
447
450
};
448
451
449
452
/* Macros */
@@ -959,6 +962,34 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
959
962
return 0 ;
960
963
}
961
964
965
+ /**
966
+ * xilinx_dma_calc_copysize - Calculate the amount of data to copy
967
+ * @chan: Driver specific DMA channel
968
+ * @size: Total data that needs to be copied
969
+ * @done: Amount of data that has been already copied
970
+ *
971
+ * Return: Amount of data that has to be copied
972
+ */
973
+ static int xilinx_dma_calc_copysize (struct xilinx_dma_chan * chan ,
974
+ int size , int done )
975
+ {
976
+ size_t copy ;
977
+
978
+ copy = min_t (size_t , size - done ,
979
+ chan -> xdev -> max_buffer_len );
980
+
981
+ if ((copy + done < size ) &&
982
+ chan -> xdev -> common .copy_align ) {
983
+ /*
984
+ * If this is not the last descriptor, make sure
985
+ * the next one will be properly aligned
986
+ */
987
+ copy = rounddown (copy ,
988
+ (1 << chan -> xdev -> common .copy_align ));
989
+ }
990
+ return copy ;
991
+ }
992
+
962
993
/**
963
994
* xilinx_dma_tx_status - Get DMA transaction status
964
995
* @dchan: DMA channel
@@ -992,7 +1023,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
992
1023
list_for_each_entry (segment , & desc -> segments , node ) {
993
1024
hw = & segment -> hw ;
994
1025
residue += (hw -> control - hw -> status ) &
995
- XILINX_DMA_MAX_TRANS_LEN ;
1026
+ chan -> xdev -> max_buffer_len ;
996
1027
}
997
1028
}
998
1029
spin_unlock_irqrestore (& chan -> lock , flags );
@@ -1070,7 +1101,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1070
1101
struct xilinx_vdma_config * config = & chan -> config ;
1071
1102
struct xilinx_dma_tx_descriptor * desc , * tail_desc ;
1072
1103
u32 reg , j ;
1073
- struct xilinx_vdma_tx_segment * tail_segment ;
1104
+ struct xilinx_vdma_tx_segment * segment , * last = NULL ;
1105
+ int i = 0 ;
1074
1106
1075
1107
/* This function was invoked with lock held */
1076
1108
if (chan -> err )
@@ -1087,17 +1119,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1087
1119
tail_desc = list_last_entry (& chan -> pending_list ,
1088
1120
struct xilinx_dma_tx_descriptor , node );
1089
1121
1090
- tail_segment = list_last_entry (& tail_desc -> segments ,
1091
- struct xilinx_vdma_tx_segment , node );
1092
-
1093
- /*
1094
- * If hardware is idle, then all descriptors on the running lists are
1095
- * done, start new transfers
1096
- */
1097
- if (chan -> has_sg )
1098
- dma_ctrl_write (chan , XILINX_DMA_REG_CURDESC ,
1099
- desc -> async_tx .phys );
1100
-
1101
1122
/* Configure the hardware using info in the config structure */
1102
1123
if (chan -> has_vflip ) {
1103
1124
reg = dma_read (chan , XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP );
@@ -1114,15 +1135,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1114
1135
else
1115
1136
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN ;
1116
1137
1117
- /*
1118
- * With SG, start with circular mode, so that BDs can be fetched.
1119
- * In direct register mode, if not parking, enable circular mode
1120
- */
1121
- if (chan -> has_sg || !config -> park )
1122
- reg |= XILINX_DMA_DMACR_CIRC_EN ;
1123
-
1138
+ /* If not parking, enable circular mode */
1124
1139
if (config -> park )
1125
1140
reg &= ~XILINX_DMA_DMACR_CIRC_EN ;
1141
+ else
1142
+ reg |= XILINX_DMA_DMACR_CIRC_EN ;
1126
1143
1127
1144
dma_ctrl_write (chan , XILINX_DMA_REG_DMACR , reg );
1128
1145
@@ -1144,48 +1161,38 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1144
1161
return ;
1145
1162
1146
1163
/* Start the transfer */
1147
- if (chan -> has_sg ) {
1148
- dma_ctrl_write (chan , XILINX_DMA_REG_TAILDESC ,
1149
- tail_segment -> phys );
1150
- list_splice_tail_init (& chan -> pending_list , & chan -> active_list );
1151
- chan -> desc_pendingcount = 0 ;
1152
- } else {
1153
- struct xilinx_vdma_tx_segment * segment , * last = NULL ;
1154
- int i = 0 ;
1155
-
1156
- if (chan -> desc_submitcount < chan -> num_frms )
1157
- i = chan -> desc_submitcount ;
1158
-
1159
- list_for_each_entry (segment , & desc -> segments , node ) {
1160
- if (chan -> ext_addr )
1161
- vdma_desc_write_64 (chan ,
1162
- XILINX_VDMA_REG_START_ADDRESS_64 (i ++ ),
1163
- segment -> hw .buf_addr ,
1164
- segment -> hw .buf_addr_msb );
1165
- else
1166
- vdma_desc_write (chan ,
1164
+ if (chan -> desc_submitcount < chan -> num_frms )
1165
+ i = chan -> desc_submitcount ;
1166
+
1167
+ list_for_each_entry (segment , & desc -> segments , node ) {
1168
+ if (chan -> ext_addr )
1169
+ vdma_desc_write_64 (chan ,
1170
+ XILINX_VDMA_REG_START_ADDRESS_64 (i ++ ),
1171
+ segment -> hw .buf_addr ,
1172
+ segment -> hw .buf_addr_msb );
1173
+ else
1174
+ vdma_desc_write (chan ,
1167
1175
XILINX_VDMA_REG_START_ADDRESS (i ++ ),
1168
1176
segment -> hw .buf_addr );
1169
1177
1170
- last = segment ;
1171
- }
1178
+ last = segment ;
1179
+ }
1172
1180
1173
- if (!last )
1174
- return ;
1181
+ if (!last )
1182
+ return ;
1175
1183
1176
- /* HW expects these parameters to be same for one transaction */
1177
- vdma_desc_write (chan , XILINX_DMA_REG_HSIZE , last -> hw .hsize );
1178
- vdma_desc_write (chan , XILINX_DMA_REG_FRMDLY_STRIDE ,
1179
- last -> hw .stride );
1180
- vdma_desc_write (chan , XILINX_DMA_REG_VSIZE , last -> hw .vsize );
1184
+ /* HW expects these parameters to be same for one transaction */
1185
+ vdma_desc_write (chan , XILINX_DMA_REG_HSIZE , last -> hw .hsize );
1186
+ vdma_desc_write (chan , XILINX_DMA_REG_FRMDLY_STRIDE ,
1187
+ last -> hw .stride );
1188
+ vdma_desc_write (chan , XILINX_DMA_REG_VSIZE , last -> hw .vsize );
1181
1189
1182
- chan -> desc_submitcount ++ ;
1183
- chan -> desc_pendingcount -- ;
1184
- list_del (& desc -> node );
1185
- list_add_tail (& desc -> node , & chan -> active_list );
1186
- if (chan -> desc_submitcount == chan -> num_frms )
1187
- chan -> desc_submitcount = 0 ;
1188
- }
1190
+ chan -> desc_submitcount ++ ;
1191
+ chan -> desc_pendingcount -- ;
1192
+ list_del (& desc -> node );
1193
+ list_add_tail (& desc -> node , & chan -> active_list );
1194
+ if (chan -> desc_submitcount == chan -> num_frms )
1195
+ chan -> desc_submitcount = 0 ;
1189
1196
1190
1197
chan -> idle = false;
1191
1198
}
@@ -1254,7 +1261,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1254
1261
1255
1262
/* Start the transfer */
1256
1263
dma_ctrl_write (chan , XILINX_DMA_REG_BTT ,
1257
- hw -> control & XILINX_DMA_MAX_TRANS_LEN );
1264
+ hw -> control & chan -> xdev -> max_buffer_len );
1258
1265
}
1259
1266
1260
1267
list_splice_tail_init (& chan -> pending_list , & chan -> active_list );
@@ -1357,7 +1364,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1357
1364
1358
1365
/* Start the transfer */
1359
1366
dma_ctrl_write (chan , XILINX_DMA_REG_BTT ,
1360
- hw -> control & XILINX_DMA_MAX_TRANS_LEN );
1367
+ hw -> control & chan -> xdev -> max_buffer_len );
1361
1368
}
1362
1369
1363
1370
list_splice_tail_init (& chan -> pending_list , & chan -> active_list );
@@ -1718,7 +1725,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1718
1725
struct xilinx_cdma_tx_segment * segment ;
1719
1726
struct xilinx_cdma_desc_hw * hw ;
1720
1727
1721
- if (!len || len > XILINX_DMA_MAX_TRANS_LEN )
1728
+ if (!len || len > chan -> xdev -> max_buffer_len )
1722
1729
return NULL ;
1723
1730
1724
1731
desc = xilinx_dma_alloc_tx_descriptor (chan );
@@ -1808,8 +1815,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1808
1815
* Calculate the maximum number of bytes to transfer,
1809
1816
* making sure it is less than the hw limit
1810
1817
*/
1811
- copy = min_t ( size_t , sg_dma_len (sg ) - sg_used ,
1812
- XILINX_DMA_MAX_TRANS_LEN );
1818
+ copy = xilinx_dma_calc_copysize ( chan , sg_dma_len (sg ),
1819
+ sg_used );
1813
1820
hw = & segment -> hw ;
1814
1821
1815
1822
/* Fill in the descriptor */
@@ -1913,8 +1920,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1913
1920
* Calculate the maximum number of bytes to transfer,
1914
1921
* making sure it is less than the hw limit
1915
1922
*/
1916
- copy = min_t ( size_t , period_len - sg_used ,
1917
- XILINX_DMA_MAX_TRANS_LEN );
1923
+ copy = xilinx_dma_calc_copysize ( chan , period_len ,
1924
+ sg_used );
1918
1925
hw = & segment -> hw ;
1919
1926
xilinx_axidma_buf (chan , hw , buf_addr , sg_used ,
1920
1927
period_len * i );
@@ -2389,7 +2396,6 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2389
2396
2390
2397
chan -> dev = xdev -> dev ;
2391
2398
chan -> xdev = xdev ;
2392
- chan -> has_sg = xdev -> has_sg ;
2393
2399
chan -> desc_pendingcount = 0x0 ;
2394
2400
chan -> ext_addr = xdev -> ext_addr ;
2395
2401
/* This variable ensures that descriptors are not
@@ -2489,6 +2495,15 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2489
2495
chan -> stop_transfer = xilinx_dma_stop_transfer ;
2490
2496
}
2491
2497
2498
+ /* check if SG is enabled (only for AXIDMA and CDMA) */
2499
+ if (xdev -> dma_config -> dmatype != XDMA_TYPE_VDMA ) {
2500
+ if (dma_ctrl_read (chan , XILINX_DMA_REG_DMASR ) &
2501
+ XILINX_DMA_DMASR_SG_MASK )
2502
+ chan -> has_sg = true;
2503
+ dev_dbg (chan -> dev , "ch %d: SG %s\n" , chan -> id ,
2504
+ chan -> has_sg ? "enabled" : "disabled" );
2505
+ }
2506
+
2492
2507
/* Initialize the tasklet */
2493
2508
tasklet_init (& chan -> tasklet , xilinx_dma_do_tasklet ,
2494
2509
(unsigned long )chan );
@@ -2596,7 +2611,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
2596
2611
struct xilinx_dma_device * xdev ;
2597
2612
struct device_node * child , * np = pdev -> dev .of_node ;
2598
2613
struct resource * io ;
2599
- u32 num_frames , addr_width ;
2614
+ u32 num_frames , addr_width , len_width ;
2600
2615
int i , err ;
2601
2616
2602
2617
/* Allocate and initialize the DMA engine structure */
@@ -2627,9 +2642,24 @@ static int xilinx_dma_probe(struct platform_device *pdev)
2627
2642
return PTR_ERR (xdev -> regs );
2628
2643
2629
2644
/* Retrieve the DMA engine properties from the device tree */
2630
- xdev -> has_sg = of_property_read_bool (node , "xlnx,include-sg" );
2631
- if (xdev -> dma_config -> dmatype == XDMA_TYPE_AXIDMA )
2645
+ xdev -> max_buffer_len = GENMASK (XILINX_DMA_MAX_TRANS_LEN_MAX - 1 , 0 );
2646
+
2647
+ if (xdev -> dma_config -> dmatype == XDMA_TYPE_AXIDMA ) {
2632
2648
xdev -> mcdma = of_property_read_bool (node , "xlnx,mcdma" );
2649
+ if (!of_property_read_u32 (node , "xlnx,sg-length-width" ,
2650
+ & len_width )) {
2651
+ if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
2652
+ len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX ) {
2653
+ dev_warn (xdev -> dev ,
2654
+ "invalid xlnx,sg-length-width property value. Using default width\n" );
2655
+ } else {
2656
+ if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX )
2657
+ dev_warn (xdev -> dev , "Please ensure that IP supports buffer length > 23 bits\n" );
2658
+ xdev -> max_buffer_len =
2659
+ GENMASK (len_width - 1 , 0 );
2660
+ }
2661
+ }
2662
+ }
2633
2663
2634
2664
if (xdev -> dma_config -> dmatype == XDMA_TYPE_VDMA ) {
2635
2665
err = of_property_read_u32 (node , "xlnx,num-fstores" ,
0 commit comments