Skip to content

Commit eaadcfe

Browse files
committed
Merge tag 'dmaengine-fixes-3.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine fixes from Dan Williams: - deprecation of net_dma to be removed in 3.14 - crash regression fix in pl330 from the dmaengine_unmap rework - crash regression fix for any channel running raid ops without CONFIG_ASYNC_TX_DMA from dmaengine_unmap - memory leak regression in mv_xor from dmaengine_unmap - build warning regressions in mv_xor, fsldma, ppc4xx, txx9, and at_hdmac from dmaengine_unmap - sleep in atomic regression in dma_async_memcpy_pg_to_pg - new fix in mv_xor for handling channel initialization failures * tag 'dmaengine-fixes-3.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine: net_dma: mark broken dma: pl330: ensure DMA descriptors are zero-initialised dmaengine: fix sleep in atomic dmaengine: mv_xor: fix oops when channels fail to initialise dma: mv_xor: Use dmaengine_unmap_data for the self-tests dmaengine: fix enable for high order unmap pools dma: fix build warnings in txx9 dmatest: fix build warning on mips dma: fix fsldma build warnings dma: fix build warnings in ppc4xx dmaengine: at_hdmac: remove unused function dma: mv_xor: remove mv_desc_get_dest_addr()
2 parents 46dd083 + 7787380 commit eaadcfe

File tree

9 files changed

+79
-109
lines changed

9 files changed

+79
-109
lines changed

drivers/dma/Kconfig

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ config INTEL_IOATDMA
6262
tristate "Intel I/OAT DMA support"
6363
depends on PCI && X86
6464
select DMA_ENGINE
65+
select DMA_ENGINE_RAID
6566
select DCA
6667
help
6768
Enable support for the Intel(R) I/OAT DMA engine present
@@ -112,6 +113,7 @@ config MV_XOR
112113
bool "Marvell XOR engine support"
113114
depends on PLAT_ORION
114115
select DMA_ENGINE
116+
select DMA_ENGINE_RAID
115117
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
116118
---help---
117119
Enable support for the Marvell XOR engine.
@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
187189
tristate "AMCC PPC440SPe ADMA support"
188190
depends on 440SPe || 440SP
189191
select DMA_ENGINE
192+
select DMA_ENGINE_RAID
190193
select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
191194
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
192195
help
@@ -352,6 +355,7 @@ config NET_DMA
352355
bool "Network: TCP receive copy offload"
353356
depends on DMA_ENGINE && NET
354357
default (INTEL_IOATDMA || FSL_DMA)
358+
depends on BROKEN
355359
help
356360
This enables the use of DMA engines in the network stack to
357361
offload receive copy-to-user operations, freeing CPU cycles.
@@ -377,4 +381,7 @@ config DMATEST
377381
Simple DMA test client. Say N unless you're debugging a
378382
DMA Device driver.
379383

384+
config DMA_ENGINE_RAID
385+
bool
386+
380387
endif

drivers/dma/at_hdmac_regs.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
347347
{
348348
return &chan->dev->device;
349349
}
350-
static struct device *chan2parent(struct dma_chan *chan)
351-
{
352-
return chan->dev->device.parent;
353-
}
354350

355351
#if defined(VERBOSE_DEBUG)
356352
static void vdbg_dump_regs(struct at_dma_chan *atchan)

drivers/dma/dmaengine.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -912,7 +912,7 @@ struct dmaengine_unmap_pool {
912912
#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
913913
static struct dmaengine_unmap_pool unmap_pool[] = {
914914
__UNMAP_POOL(2),
915-
#if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
915+
#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
916916
__UNMAP_POOL(16),
917917
__UNMAP_POOL(128),
918918
__UNMAP_POOL(256),
@@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
10541054
dma_cookie_t cookie;
10551055
unsigned long flags;
10561056

1057-
unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
1057+
unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
10581058
if (!unmap)
10591059
return -ENOMEM;
10601060

drivers/dma/dmatest.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -539,9 +539,9 @@ static int dmatest_func(void *data)
539539

540540
um->len = params->buf_size;
541541
for (i = 0; i < src_cnt; i++) {
542-
unsigned long buf = (unsigned long) thread->srcs[i];
542+
void *buf = thread->srcs[i];
543543
struct page *pg = virt_to_page(buf);
544-
unsigned pg_off = buf & ~PAGE_MASK;
544+
unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
545545

546546
um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
547547
um->len, DMA_TO_DEVICE);
@@ -559,9 +559,9 @@ static int dmatest_func(void *data)
559559
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
560560
dsts = &um->addr[src_cnt];
561561
for (i = 0; i < dst_cnt; i++) {
562-
unsigned long buf = (unsigned long) thread->dsts[i];
562+
void *buf = thread->dsts[i];
563563
struct page *pg = virt_to_page(buf);
564-
unsigned pg_off = buf & ~PAGE_MASK;
564+
unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
565565

566566
dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
567567
DMA_BIDIRECTIONAL);

drivers/dma/fsldma.c

Lines changed: 1 addition & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
8686
hw->count = CPU_TO_DMA(chan, count, 32);
8787
}
8888

89-
static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
90-
{
91-
return DMA_TO_CPU(chan, desc->hw.count, 32);
92-
}
93-
9489
static void set_desc_src(struct fsldma_chan *chan,
9590
struct fsl_dma_ld_hw *hw, dma_addr_t src)
9691
{
@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
10196
hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
10297
}
10398

104-
static dma_addr_t get_desc_src(struct fsldma_chan *chan,
105-
struct fsl_desc_sw *desc)
106-
{
107-
u64 snoop_bits;
108-
109-
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
110-
? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
111-
return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
112-
}
113-
11499
static void set_desc_dst(struct fsldma_chan *chan,
115100
struct fsl_dma_ld_hw *hw, dma_addr_t dst)
116101
{
@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
121106
hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
122107
}
123108

124-
static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
125-
struct fsl_desc_sw *desc)
126-
{
127-
u64 snoop_bits;
128-
129-
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
130-
? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
131-
return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
132-
}
133-
134109
static void set_desc_next(struct fsldma_chan *chan,
135110
struct fsl_dma_ld_hw *hw, dma_addr_t next)
136111
{
@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
408383
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
409384
struct fsl_desc_sw *child;
410385
unsigned long flags;
411-
dma_cookie_t cookie;
386+
dma_cookie_t cookie = -EINVAL;
412387

413388
spin_lock_irqsave(&chan->desc_lock, flags);
414389

@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
854829
struct fsl_desc_sw *desc)
855830
{
856831
struct dma_async_tx_descriptor *txd = &desc->async_tx;
857-
struct device *dev = chan->common.device->dev;
858-
dma_addr_t src = get_desc_src(chan, desc);
859-
dma_addr_t dst = get_desc_dst(chan, desc);
860-
u32 len = get_desc_cnt(chan, desc);
861832

862833
/* Run the link descriptor callback function */
863834
if (txd->callback) {

drivers/dma/mv_xor.c

Lines changed: 63 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
5454
hw_desc->desc_command = (1 << 31);
5555
}
5656

57-
static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
58-
{
59-
struct mv_xor_desc *hw_desc = desc->hw_desc;
60-
return hw_desc->phy_dest_addr;
61-
}
62-
6357
static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
6458
u32 byte_count)
6559
{
@@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
787781
/*
788782
* Perform a transaction to verify the HW works.
789783
*/
790-
#define MV_XOR_TEST_SIZE 2000
791784

792785
static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
793786
{
@@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
797790
struct dma_chan *dma_chan;
798791
dma_cookie_t cookie;
799792
struct dma_async_tx_descriptor *tx;
793+
struct dmaengine_unmap_data *unmap;
800794
int err = 0;
801795

802-
src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
796+
src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
803797
if (!src)
804798
return -ENOMEM;
805799

806-
dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
800+
dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
807801
if (!dest) {
808802
kfree(src);
809803
return -ENOMEM;
810804
}
811805

812806
/* Fill in src buffer */
813-
for (i = 0; i < MV_XOR_TEST_SIZE; i++)
807+
for (i = 0; i < PAGE_SIZE; i++)
814808
((u8 *) src)[i] = (u8)i;
815809

816810
dma_chan = &mv_chan->dmachan;
@@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
819813
goto out;
820814
}
821815

822-
dest_dma = dma_map_single(dma_chan->device->dev, dest,
823-
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
816+
unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
817+
if (!unmap) {
818+
err = -ENOMEM;
819+
goto free_resources;
820+
}
821+
822+
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
823+
PAGE_SIZE, DMA_TO_DEVICE);
824+
unmap->to_cnt = 1;
825+
unmap->addr[0] = src_dma;
824826

825-
src_dma = dma_map_single(dma_chan->device->dev, src,
826-
MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
827+
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
828+
PAGE_SIZE, DMA_FROM_DEVICE);
829+
unmap->from_cnt = 1;
830+
unmap->addr[1] = dest_dma;
831+
832+
unmap->len = PAGE_SIZE;
827833

828834
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
829-
MV_XOR_TEST_SIZE, 0);
835+
PAGE_SIZE, 0);
830836
cookie = mv_xor_tx_submit(tx);
831837
mv_xor_issue_pending(dma_chan);
832838
async_tx_ack(tx);
@@ -841,15 +847,16 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
841847
}
842848

843849
dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
844-
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
845-
if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
850+
PAGE_SIZE, DMA_FROM_DEVICE);
851+
if (memcmp(src, dest, PAGE_SIZE)) {
846852
dev_err(dma_chan->device->dev,
847853
"Self-test copy failed compare, disabling\n");
848854
err = -ENODEV;
849855
goto free_resources;
850856
}
851857

852858
free_resources:
859+
dmaengine_unmap_put(unmap);
853860
mv_xor_free_chan_resources(dma_chan);
854861
out:
855862
kfree(src);
@@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
867874
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
868875
dma_addr_t dest_dma;
869876
struct dma_async_tx_descriptor *tx;
877+
struct dmaengine_unmap_data *unmap;
870878
struct dma_chan *dma_chan;
871879
dma_cookie_t cookie;
872880
u8 cmp_byte = 0;
873881
u32 cmp_word;
874882
int err = 0;
883+
int src_count = MV_XOR_NUM_SRC_TEST;
875884

876-
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
885+
for (src_idx = 0; src_idx < src_count; src_idx++) {
877886
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
878887
if (!xor_srcs[src_idx]) {
879888
while (src_idx--)
@@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
890899
}
891900

892901
/* Fill in src buffers */
893-
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
902+
for (src_idx = 0; src_idx < src_count; src_idx++) {
894903
u8 *ptr = page_address(xor_srcs[src_idx]);
895904
for (i = 0; i < PAGE_SIZE; i++)
896905
ptr[i] = (1 << src_idx);
897906
}
898907

899-
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
908+
for (src_idx = 0; src_idx < src_count; src_idx++)
900909
cmp_byte ^= (u8) (1 << src_idx);
901910

902911
cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
@@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
910919
goto out;
911920
}
912921

922+
unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
923+
GFP_KERNEL);
924+
if (!unmap) {
925+
err = -ENOMEM;
926+
goto free_resources;
927+
}
928+
913929
/* test xor */
914-
dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
915-
DMA_FROM_DEVICE);
930+
for (i = 0; i < src_count; i++) {
931+
unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
932+
0, PAGE_SIZE, DMA_TO_DEVICE);
933+
dma_srcs[i] = unmap->addr[i];
934+
unmap->to_cnt++;
935+
}
916936

917-
for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
918-
dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
919-
0, PAGE_SIZE, DMA_TO_DEVICE);
937+
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
938+
DMA_FROM_DEVICE);
939+
dest_dma = unmap->addr[src_count];
940+
unmap->from_cnt = 1;
941+
unmap->len = PAGE_SIZE;
920942

921943
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
922-
MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
944+
src_count, PAGE_SIZE, 0);
923945

924946
cookie = mv_xor_tx_submit(tx);
925947
mv_xor_issue_pending(dma_chan);
@@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
948970
}
949971

950972
free_resources:
973+
dmaengine_unmap_put(unmap);
951974
mv_xor_free_chan_resources(dma_chan);
952975
out:
953-
src_idx = MV_XOR_NUM_SRC_TEST;
976+
src_idx = src_count;
954977
while (src_idx--)
955978
__free_page(xor_srcs[src_idx]);
956979
__free_page(dest);
@@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
11761199
int i = 0;
11771200

11781201
for_each_child_of_node(pdev->dev.of_node, np) {
1202+
struct mv_xor_chan *chan;
11791203
dma_cap_mask_t cap_mask;
11801204
int irq;
11811205

@@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
11931217
goto err_channel_add;
11941218
}
11951219

1196-
xordev->channels[i] =
1197-
mv_xor_channel_add(xordev, pdev, i,
1198-
cap_mask, irq);
1199-
if (IS_ERR(xordev->channels[i])) {
1200-
ret = PTR_ERR(xordev->channels[i]);
1201-
xordev->channels[i] = NULL;
1220+
chan = mv_xor_channel_add(xordev, pdev, i,
1221+
cap_mask, irq);
1222+
if (IS_ERR(chan)) {
1223+
ret = PTR_ERR(chan);
12021224
irq_dispose_mapping(irq);
12031225
goto err_channel_add;
12041226
}
12051227

1228+
xordev->channels[i] = chan;
12061229
i++;
12071230
}
12081231
} else if (pdata && pdata->channels) {
12091232
for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
12101233
struct mv_xor_channel_data *cd;
1234+
struct mv_xor_chan *chan;
12111235
int irq;
12121236

12131237
cd = &pdata->channels[i];
@@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
12221246
goto err_channel_add;
12231247
}
12241248

1225-
xordev->channels[i] =
1226-
mv_xor_channel_add(xordev, pdev, i,
1227-
cd->cap_mask, irq);
1228-
if (IS_ERR(xordev->channels[i])) {
1229-
ret = PTR_ERR(xordev->channels[i]);
1249+
chan = mv_xor_channel_add(xordev, pdev, i,
1250+
cd->cap_mask, irq);
1251+
if (IS_ERR(chan)) {
1252+
ret = PTR_ERR(chan);
12301253
goto err_channel_add;
12311254
}
1255+
1256+
xordev->channels[i] = chan;
12321257
}
12331258
}
12341259

0 commit comments

Comments
 (0)