Skip to content

Commit

Permalink
Merge tag 'dmaengine-fixes-3.13-rc4' of git://git.kernel.org/pub/scm/…
Browse files Browse the repository at this point in the history
…linux/kernel/git/djbw/dmaengine

Pull dmaengine fixes from Dan Williams:

 - deprecation of net_dma to be removed in 3.14

 - crash regression fix in pl330 from the dmaengine_unmap rework

 - crash regression fix for any channel running raid ops without
   CONFIG_ASYNC_TX_DMA from dmaengine_unmap

 - memory leak regression in mv_xor from dmaengine_unmap

 - build warning regressions in mv_xor, fsldma, ppc4xx, txx9, and
   at_hdmac from dmaengine_unmap

 - sleep in atomic regression in dma_async_memcpy_pg_to_pg

 - new fix in mv_xor for handling channel initialization failures

* tag 'dmaengine-fixes-3.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine:
  net_dma: mark broken
  dma: pl330: ensure DMA descriptors are zero-initialised
  dmaengine: fix sleep in atomic
  dmaengine: mv_xor: fix oops when channels fail to initialise
  dma: mv_xor: Use dmaengine_unmap_data for the self-tests
  dmaengine: fix enable for high order unmap pools
  dma: fix build warnings in txx9
  dmatest: fix build warning on mips
  dma: fix fsldma build warnings
  dma: fix build warnings in ppc4xx
  dmaengine: at_hdmac: remove unused function
  dma: mv_xor: remove mv_desc_get_dest_addr()
  • Loading branch information
torvalds committed Dec 20, 2013
2 parents 46dd083 + 7787380 commit eaadcfe
Show file tree
Hide file tree
Showing 9 changed files with 79 additions and 109 deletions.
7 changes: 7 additions & 0 deletions drivers/dma/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86
select DMA_ENGINE
select DMA_ENGINE_RAID
select DCA
help
Enable support for the Intel(R) I/OAT DMA engine present
Expand Down Expand Up @@ -112,6 +113,7 @@ config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
select DMA_ENGINE
select DMA_ENGINE_RAID
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help---
Enable support for the Marvell XOR engine.
Expand Down Expand Up @@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
tristate "AMCC PPC440SPe ADMA support"
depends on 440SPe || 440SP
select DMA_ENGINE
select DMA_ENGINE_RAID
select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
Expand Down Expand Up @@ -352,6 +355,7 @@ config NET_DMA
bool "Network: TCP receive copy offload"
depends on DMA_ENGINE && NET
default (INTEL_IOATDMA || FSL_DMA)
depends on BROKEN
help
This enables the use of DMA engines in the network stack to
offload receive copy-to-user operations, freeing CPU cycles.
Expand All @@ -377,4 +381,7 @@ config DMATEST
Simple DMA test client. Say N unless you're debugging a
DMA Device driver.

config DMA_ENGINE_RAID
bool

endif
4 changes: 0 additions & 4 deletions drivers/dma/at_hdmac_regs.h
Original file line number Diff line number Diff line change
Expand Up @@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
{
return &chan->dev->device;
}
static struct device *chan2parent(struct dma_chan *chan)
{
return chan->dev->device.parent;
}

#if defined(VERBOSE_DEBUG)
static void vdbg_dump_regs(struct at_dma_chan *atchan)
Expand Down
4 changes: 2 additions & 2 deletions drivers/dma/dmaengine.c
Original file line number Diff line number Diff line change
Expand Up @@ -912,7 +912,7 @@ struct dmaengine_unmap_pool {
#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
static struct dmaengine_unmap_pool unmap_pool[] = {
__UNMAP_POOL(2),
#if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
__UNMAP_POOL(16),
__UNMAP_POOL(128),
__UNMAP_POOL(256),
Expand Down Expand Up @@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
dma_cookie_t cookie;
unsigned long flags;

unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
if (!unmap)
return -ENOMEM;

Expand Down
8 changes: 4 additions & 4 deletions drivers/dma/dmatest.c
Original file line number Diff line number Diff line change
Expand Up @@ -539,9 +539,9 @@ static int dmatest_func(void *data)

um->len = params->buf_size;
for (i = 0; i < src_cnt; i++) {
unsigned long buf = (unsigned long) thread->srcs[i];
void *buf = thread->srcs[i];
struct page *pg = virt_to_page(buf);
unsigned pg_off = buf & ~PAGE_MASK;
unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;

um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
um->len, DMA_TO_DEVICE);
Expand All @@ -559,9 +559,9 @@ static int dmatest_func(void *data)
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
dsts = &um->addr[src_cnt];
for (i = 0; i < dst_cnt; i++) {
unsigned long buf = (unsigned long) thread->dsts[i];
void *buf = thread->dsts[i];
struct page *pg = virt_to_page(buf);
unsigned pg_off = buf & ~PAGE_MASK;
unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;

dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
DMA_BIDIRECTIONAL);
Expand Down
31 changes: 1 addition & 30 deletions drivers/dma/fsldma.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
hw->count = CPU_TO_DMA(chan, count, 32);
}

static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
{
return DMA_TO_CPU(chan, desc->hw.count, 32);
}

static void set_desc_src(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
Expand All @@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
}

static dma_addr_t get_desc_src(struct fsldma_chan *chan,
struct fsl_desc_sw *desc)
{
u64 snoop_bits;

snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
}

static void set_desc_dst(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{
Expand All @@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
}

static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
struct fsl_desc_sw *desc)
{
u64 snoop_bits;

snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
}

static void set_desc_next(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
Expand Down Expand Up @@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child;
unsigned long flags;
dma_cookie_t cookie;
dma_cookie_t cookie = -EINVAL;

spin_lock_irqsave(&chan->desc_lock, flags);

Expand Down Expand Up @@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
struct fsl_desc_sw *desc)
{
struct dma_async_tx_descriptor *txd = &desc->async_tx;
struct device *dev = chan->common.device->dev;
dma_addr_t src = get_desc_src(chan, desc);
dma_addr_t dst = get_desc_dst(chan, desc);
u32 len = get_desc_cnt(chan, desc);

/* Run the link descriptor callback function */
if (txd->callback) {
Expand Down
101 changes: 63 additions & 38 deletions drivers/dma/mv_xor.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
hw_desc->desc_command = (1 << 31);
}

static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
return hw_desc->phy_dest_addr;
}

static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
u32 byte_count)
{
Expand Down Expand Up @@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
/*
* Perform a transaction to verify the HW works.
*/
#define MV_XOR_TEST_SIZE 2000

static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
{
Expand All @@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
struct dma_chan *dma_chan;
dma_cookie_t cookie;
struct dma_async_tx_descriptor *tx;
struct dmaengine_unmap_data *unmap;
int err = 0;

src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!src)
return -ENOMEM;

dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!dest) {
kfree(src);
return -ENOMEM;
}

/* Fill in src buffer */
for (i = 0; i < MV_XOR_TEST_SIZE; i++)
for (i = 0; i < PAGE_SIZE; i++)
((u8 *) src)[i] = (u8)i;

dma_chan = &mv_chan->dmachan;
Expand All @@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
goto out;
}

dest_dma = dma_map_single(dma_chan->device->dev, dest,
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
if (!unmap) {
err = -ENOMEM;
goto free_resources;
}

src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
PAGE_SIZE, DMA_TO_DEVICE);
unmap->to_cnt = 1;
unmap->addr[0] = src_dma;

src_dma = dma_map_single(dma_chan->device->dev, src,
MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
PAGE_SIZE, DMA_FROM_DEVICE);
unmap->from_cnt = 1;
unmap->addr[1] = dest_dma;

unmap->len = PAGE_SIZE;

tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
MV_XOR_TEST_SIZE, 0);
PAGE_SIZE, 0);
cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan);
async_tx_ack(tx);
Expand All @@ -841,15 +847,16 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
}

dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
PAGE_SIZE, DMA_FROM_DEVICE);
if (memcmp(src, dest, PAGE_SIZE)) {
dev_err(dma_chan->device->dev,
"Self-test copy failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
}

free_resources:
dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan);
out:
kfree(src);
Expand All @@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
dma_addr_t dest_dma;
struct dma_async_tx_descriptor *tx;
struct dmaengine_unmap_data *unmap;
struct dma_chan *dma_chan;
dma_cookie_t cookie;
u8 cmp_byte = 0;
u32 cmp_word;
int err = 0;
int src_count = MV_XOR_NUM_SRC_TEST;

for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
for (src_idx = 0; src_idx < src_count; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
if (!xor_srcs[src_idx]) {
while (src_idx--)
Expand All @@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
}

/* Fill in src buffers */
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
for (src_idx = 0; src_idx < src_count; src_idx++) {
u8 *ptr = page_address(xor_srcs[src_idx]);
for (i = 0; i < PAGE_SIZE; i++)
ptr[i] = (1 << src_idx);
}

for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
for (src_idx = 0; src_idx < src_count; src_idx++)
cmp_byte ^= (u8) (1 << src_idx);

cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
Expand All @@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
goto out;
}

unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
GFP_KERNEL);
if (!unmap) {
err = -ENOMEM;
goto free_resources;
}

/* test xor */
dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
DMA_FROM_DEVICE);
for (i = 0; i < src_count; i++) {
unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
0, PAGE_SIZE, DMA_TO_DEVICE);
dma_srcs[i] = unmap->addr[i];
unmap->to_cnt++;
}

for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
0, PAGE_SIZE, DMA_TO_DEVICE);
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
DMA_FROM_DEVICE);
dest_dma = unmap->addr[src_count];
unmap->from_cnt = 1;
unmap->len = PAGE_SIZE;

tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
src_count, PAGE_SIZE, 0);

cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan);
Expand Down Expand Up @@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
}

free_resources:
dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan);
out:
src_idx = MV_XOR_NUM_SRC_TEST;
src_idx = src_count;
while (src_idx--)
__free_page(xor_srcs[src_idx]);
__free_page(dest);
Expand Down Expand Up @@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
int i = 0;

for_each_child_of_node(pdev->dev.of_node, np) {
struct mv_xor_chan *chan;
dma_cap_mask_t cap_mask;
int irq;

Expand All @@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
goto err_channel_add;
}

xordev->channels[i] =
mv_xor_channel_add(xordev, pdev, i,
cap_mask, irq);
if (IS_ERR(xordev->channels[i])) {
ret = PTR_ERR(xordev->channels[i]);
xordev->channels[i] = NULL;
chan = mv_xor_channel_add(xordev, pdev, i,
cap_mask, irq);
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
irq_dispose_mapping(irq);
goto err_channel_add;
}

xordev->channels[i] = chan;
i++;
}
} else if (pdata && pdata->channels) {
for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
struct mv_xor_channel_data *cd;
struct mv_xor_chan *chan;
int irq;

cd = &pdata->channels[i];
Expand All @@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
goto err_channel_add;
}

xordev->channels[i] =
mv_xor_channel_add(xordev, pdev, i,
cd->cap_mask, irq);
if (IS_ERR(xordev->channels[i])) {
ret = PTR_ERR(xordev->channels[i]);
chan = mv_xor_channel_add(xordev, pdev, i,
cd->cap_mask, irq);
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
goto err_channel_add;
}

xordev->channels[i] = chan;
}
}

Expand Down
Loading

0 comments on commit eaadcfe

Please sign in to comment.