Skip to content

Commit feb59d7

Browse files
committed
Merge branch 'topic/xilinx' into for-linus
2 parents 42cb6e0 + c2be36a commit feb59d7

File tree

2 files changed

+104
-73
lines changed

2 files changed

+104
-73
lines changed

Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,11 @@ Required properties:
3737
Required properties for VDMA:
3838
- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
3939

40-
Optional properties:
41-
- xlnx,include-sg: Tells configured for Scatter-mode in
42-
the hardware.
4340
Optional properties for AXI DMA:
41+
- xlnx,sg-length-width: Should be set to the width in bits of the length
42+
register as configured in h/w. Takes values {8...26}. If the property
43+
is missing or invalid then the default value 23 is used. This is the
44+
maximum value that is supported by all IP versions.
4445
- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware.
4546
Optional properties for VDMA:
4647
- xlnx,flush-fsync: Tells which channel to Flush on Frame sync.

drivers/dma/xilinx/xilinx_dma.c

Lines changed: 100 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@
8686
#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
8787
#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
8888
#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
89+
#define XILINX_DMA_DMASR_SG_MASK BIT(3)
8990
#define XILINX_DMA_DMASR_IDLE BIT(1)
9091
#define XILINX_DMA_DMASR_HALTED BIT(0)
9192
#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
@@ -161,7 +162,9 @@
161162
#define XILINX_DMA_REG_BTT 0x28
162163

163164
/* AXI DMA Specific Masks/Bit fields */
164-
#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
165+
#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
166+
#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
167+
#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
165168
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
166169
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
167170
#define XILINX_DMA_CR_COALESCE_SHIFT 16
@@ -412,7 +415,6 @@ struct xilinx_dma_config {
412415
* @dev: Device Structure
413416
* @common: DMA device structure
414417
* @chan: Driver specific DMA channel
415-
* @has_sg: Specifies whether Scatter-Gather is present or not
416418
* @mcdma: Specifies whether Multi-Channel is present or not
417419
* @flush_on_fsync: Flush on frame sync
418420
* @ext_addr: Indicates 64 bit addressing is supported by dma device
@@ -425,13 +427,13 @@ struct xilinx_dma_config {
425427
* @rxs_clk: DMA s2mm stream clock
426428
* @nr_channels: Number of channels DMA device supports
427429
* @chan_id: DMA channel identifier
430+
* @max_buffer_len: Max buffer length
428431
*/
429432
struct xilinx_dma_device {
430433
void __iomem *regs;
431434
struct device *dev;
432435
struct dma_device common;
433436
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
434-
bool has_sg;
435437
bool mcdma;
436438
u32 flush_on_fsync;
437439
bool ext_addr;
@@ -444,6 +446,7 @@ struct xilinx_dma_device {
444446
struct clk *rxs_clk;
445447
u32 nr_channels;
446448
u32 chan_id;
449+
u32 max_buffer_len;
447450
};
448451

449452
/* Macros */
@@ -959,6 +962,34 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
959962
return 0;
960963
}
961964

965+
/**
966+
* xilinx_dma_calc_copysize - Calculate the amount of data to copy
967+
* @chan: Driver specific DMA channel
968+
* @size: Total data that needs to be copied
969+
* @done: Amount of data that has been already copied
970+
*
971+
* Return: Amount of data that has to be copied
972+
*/
973+
static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
974+
int size, int done)
975+
{
976+
size_t copy;
977+
978+
copy = min_t(size_t, size - done,
979+
chan->xdev->max_buffer_len);
980+
981+
if ((copy + done < size) &&
982+
chan->xdev->common.copy_align) {
983+
/*
984+
* If this is not the last descriptor, make sure
985+
* the next one will be properly aligned
986+
*/
987+
copy = rounddown(copy,
988+
(1 << chan->xdev->common.copy_align));
989+
}
990+
return copy;
991+
}
992+
962993
/**
963994
* xilinx_dma_tx_status - Get DMA transaction status
964995
* @dchan: DMA channel
@@ -992,7 +1023,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
9921023
list_for_each_entry(segment, &desc->segments, node) {
9931024
hw = &segment->hw;
9941025
residue += (hw->control - hw->status) &
995-
XILINX_DMA_MAX_TRANS_LEN;
1026+
chan->xdev->max_buffer_len;
9961027
}
9971028
}
9981029
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1070,7 +1101,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
10701101
struct xilinx_vdma_config *config = &chan->config;
10711102
struct xilinx_dma_tx_descriptor *desc, *tail_desc;
10721103
u32 reg, j;
1073-
struct xilinx_vdma_tx_segment *tail_segment;
1104+
struct xilinx_vdma_tx_segment *segment, *last = NULL;
1105+
int i = 0;
10741106

10751107
/* This function was invoked with lock held */
10761108
if (chan->err)
@@ -1087,17 +1119,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
10871119
tail_desc = list_last_entry(&chan->pending_list,
10881120
struct xilinx_dma_tx_descriptor, node);
10891121

1090-
tail_segment = list_last_entry(&tail_desc->segments,
1091-
struct xilinx_vdma_tx_segment, node);
1092-
1093-
/*
1094-
* If hardware is idle, then all descriptors on the running lists are
1095-
* done, start new transfers
1096-
*/
1097-
if (chan->has_sg)
1098-
dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1099-
desc->async_tx.phys);
1100-
11011122
/* Configure the hardware using info in the config structure */
11021123
if (chan->has_vflip) {
11031124
reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
@@ -1114,15 +1135,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
11141135
else
11151136
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
11161137

1117-
/*
1118-
* With SG, start with circular mode, so that BDs can be fetched.
1119-
* In direct register mode, if not parking, enable circular mode
1120-
*/
1121-
if (chan->has_sg || !config->park)
1122-
reg |= XILINX_DMA_DMACR_CIRC_EN;
1123-
1138+
/* If not parking, enable circular mode */
11241139
if (config->park)
11251140
reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1141+
else
1142+
reg |= XILINX_DMA_DMACR_CIRC_EN;
11261143

11271144
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
11281145

@@ -1144,48 +1161,38 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
11441161
return;
11451162

11461163
/* Start the transfer */
1147-
if (chan->has_sg) {
1148-
dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1149-
tail_segment->phys);
1150-
list_splice_tail_init(&chan->pending_list, &chan->active_list);
1151-
chan->desc_pendingcount = 0;
1152-
} else {
1153-
struct xilinx_vdma_tx_segment *segment, *last = NULL;
1154-
int i = 0;
1155-
1156-
if (chan->desc_submitcount < chan->num_frms)
1157-
i = chan->desc_submitcount;
1158-
1159-
list_for_each_entry(segment, &desc->segments, node) {
1160-
if (chan->ext_addr)
1161-
vdma_desc_write_64(chan,
1162-
XILINX_VDMA_REG_START_ADDRESS_64(i++),
1163-
segment->hw.buf_addr,
1164-
segment->hw.buf_addr_msb);
1165-
else
1166-
vdma_desc_write(chan,
1164+
if (chan->desc_submitcount < chan->num_frms)
1165+
i = chan->desc_submitcount;
1166+
1167+
list_for_each_entry(segment, &desc->segments, node) {
1168+
if (chan->ext_addr)
1169+
vdma_desc_write_64(chan,
1170+
XILINX_VDMA_REG_START_ADDRESS_64(i++),
1171+
segment->hw.buf_addr,
1172+
segment->hw.buf_addr_msb);
1173+
else
1174+
vdma_desc_write(chan,
11671175
XILINX_VDMA_REG_START_ADDRESS(i++),
11681176
segment->hw.buf_addr);
11691177

1170-
last = segment;
1171-
}
1178+
last = segment;
1179+
}
11721180

1173-
if (!last)
1174-
return;
1181+
if (!last)
1182+
return;
11751183

1176-
/* HW expects these parameters to be same for one transaction */
1177-
vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1178-
vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1179-
last->hw.stride);
1180-
vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1184+
/* HW expects these parameters to be same for one transaction */
1185+
vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1186+
vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1187+
last->hw.stride);
1188+
vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
11811189

1182-
chan->desc_submitcount++;
1183-
chan->desc_pendingcount--;
1184-
list_del(&desc->node);
1185-
list_add_tail(&desc->node, &chan->active_list);
1186-
if (chan->desc_submitcount == chan->num_frms)
1187-
chan->desc_submitcount = 0;
1188-
}
1190+
chan->desc_submitcount++;
1191+
chan->desc_pendingcount--;
1192+
list_del(&desc->node);
1193+
list_add_tail(&desc->node, &chan->active_list);
1194+
if (chan->desc_submitcount == chan->num_frms)
1195+
chan->desc_submitcount = 0;
11891196

11901197
chan->idle = false;
11911198
}
@@ -1254,7 +1261,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
12541261

12551262
/* Start the transfer */
12561263
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1257-
hw->control & XILINX_DMA_MAX_TRANS_LEN);
1264+
hw->control & chan->xdev->max_buffer_len);
12581265
}
12591266

12601267
list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -1357,7 +1364,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
13571364

13581365
/* Start the transfer */
13591366
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1360-
hw->control & XILINX_DMA_MAX_TRANS_LEN);
1367+
hw->control & chan->xdev->max_buffer_len);
13611368
}
13621369

13631370
list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -1718,7 +1725,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
17181725
struct xilinx_cdma_tx_segment *segment;
17191726
struct xilinx_cdma_desc_hw *hw;
17201727

1721-
if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
1728+
if (!len || len > chan->xdev->max_buffer_len)
17221729
return NULL;
17231730

17241731
desc = xilinx_dma_alloc_tx_descriptor(chan);
@@ -1808,8 +1815,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
18081815
* Calculate the maximum number of bytes to transfer,
18091816
* making sure it is less than the hw limit
18101817
*/
1811-
copy = min_t(size_t, sg_dma_len(sg) - sg_used,
1812-
XILINX_DMA_MAX_TRANS_LEN);
1818+
copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
1819+
sg_used);
18131820
hw = &segment->hw;
18141821

18151822
/* Fill in the descriptor */
@@ -1913,8 +1920,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
19131920
* Calculate the maximum number of bytes to transfer,
19141921
* making sure it is less than the hw limit
19151922
*/
1916-
copy = min_t(size_t, period_len - sg_used,
1917-
XILINX_DMA_MAX_TRANS_LEN);
1923+
copy = xilinx_dma_calc_copysize(chan, period_len,
1924+
sg_used);
19181925
hw = &segment->hw;
19191926
xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
19201927
period_len * i);
@@ -2389,7 +2396,6 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
23892396

23902397
chan->dev = xdev->dev;
23912398
chan->xdev = xdev;
2392-
chan->has_sg = xdev->has_sg;
23932399
chan->desc_pendingcount = 0x0;
23942400
chan->ext_addr = xdev->ext_addr;
23952401
/* This variable ensures that descriptors are not
@@ -2489,6 +2495,15 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
24892495
chan->stop_transfer = xilinx_dma_stop_transfer;
24902496
}
24912497

2498+
/* check if SG is enabled (only for AXIDMA and CDMA) */
2499+
if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2500+
if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2501+
XILINX_DMA_DMASR_SG_MASK)
2502+
chan->has_sg = true;
2503+
dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2504+
chan->has_sg ? "enabled" : "disabled");
2505+
}
2506+
24922507
/* Initialize the tasklet */
24932508
tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
24942509
(unsigned long)chan);
@@ -2596,7 +2611,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
25962611
struct xilinx_dma_device *xdev;
25972612
struct device_node *child, *np = pdev->dev.of_node;
25982613
struct resource *io;
2599-
u32 num_frames, addr_width;
2614+
u32 num_frames, addr_width, len_width;
26002615
int i, err;
26012616

26022617
/* Allocate and initialize the DMA engine structure */
@@ -2627,9 +2642,24 @@ static int xilinx_dma_probe(struct platform_device *pdev)
26272642
return PTR_ERR(xdev->regs);
26282643

26292644
/* Retrieve the DMA engine properties from the device tree */
2630-
xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
2631-
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2645+
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
2646+
2647+
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
26322648
xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2649+
if (!of_property_read_u32(node, "xlnx,sg-length-width",
2650+
&len_width)) {
2651+
if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
2652+
len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
2653+
dev_warn(xdev->dev,
2654+
"invalid xlnx,sg-length-width property value. Using default width\n");
2655+
} else {
2656+
if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
2657+
dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
2658+
xdev->max_buffer_len =
2659+
GENMASK(len_width - 1, 0);
2660+
}
2661+
}
2662+
}
26332663

26342664
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
26352665
err = of_property_read_u32(node, "xlnx,num-fstores",

0 commit comments

Comments
 (0)