Skip to content

Commit

Permalink
dmaengine: ti: edma: Support for interleaved mem to mem transfer
Browse files Browse the repository at this point in the history
Add basic interleaved support via EDMA.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
  • Loading branch information
Peter Ujfalusi committed Jul 3, 2020
1 parent db21507 commit cc41d90
Showing 1 changed file with 79 additions and 0 deletions.
79 changes: 79 additions & 0 deletions drivers/dma/ti/edma.c
Original file line number Diff line number Diff line change
Expand Up @@ -1275,6 +1275,81 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}

static struct dma_async_tx_descriptor *
edma_prep_dma_interleaved(struct dma_chan *chan,
struct dma_interleaved_template *xt,
unsigned long tx_flags)
{
struct device *dev = chan->device->dev;
struct edma_chan *echan = to_edma_chan(chan);
struct edmacc_param *param;
struct edma_desc *edesc;
size_t src_icg, dst_icg;
int src_bidx, dst_bidx;

/* Slave mode is not supported */
if (is_slave_direction(xt->dir))
return NULL;

if (xt->frame_size != 1 || xt->numf == 0)
return NULL;

if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
return NULL;

src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
if (src_icg) {
src_bidx = src_icg + xt->sgl[0].size;
} else if (xt->src_inc) {
src_bidx = xt->sgl[0].size;
} else {
dev_err(dev, "%s: SRC constant addressing is not supported\n",
__func__);
return NULL;
}

dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
if (dst_icg) {
dst_bidx = dst_icg + xt->sgl[0].size;
} else if (xt->dst_inc) {
dst_bidx = xt->sgl[0].size;
} else {
dev_err(dev, "%s: DST constant addressing is not supported\n",
__func__);
return NULL;
}

if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
return NULL;

edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
if (!edesc)
return NULL;

edesc->direction = DMA_MEM_TO_MEM;
edesc->echan = echan;
edesc->pset_nr = 1;

param = &edesc->pset[0].param;

param->src = xt->src_start;
param->dst = xt->dst_start;
param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
param->ccnt = 1;
param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
param->src_dst_cidx = 0;

param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
param->opt |= ITCCHEN;
/* Enable transfer complete interrupt if requested */
if (tx_flags & DMA_PREP_INTERRUPT)
param->opt |= TCINTEN;
else
edesc->polled = true;

return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}

static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
Expand Down Expand Up @@ -1917,7 +1992,9 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
"Legacy memcpy is enabled, things might not work\n");

dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
s_ddev->directions = BIT(DMA_MEM_TO_MEM);
}

Expand Down Expand Up @@ -1953,8 +2030,10 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)

dma_cap_zero(m_ddev->cap_mask);
dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);

m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
m_ddev->device_free_chan_resources = edma_free_chan_resources;
m_ddev->device_issue_pending = edma_issue_pending;
Expand Down

0 comments on commit cc41d90

Please sign in to comment.