Skip to content

Commit 56ea27f

Browse files
committed
dmaengine: consolidate memcpy apis
Copying from page to page (dma_async_memcpy_pg_to_pg) is the superset, make the other two apis use that one in preparation for providing a common dma unmap implementation. The common implementation just wants to assume all buffers are mapped with dma_map_page(). Cc: Vinod Koul <vinod.koul@intel.com> Cc: Tomasz Figa <t.figa@samsung.com> Cc: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
1 parent d1cab34 commit 56ea27f

File tree

1 file changed

+45
-92
lines changed

1 file changed

+45
-92
lines changed

drivers/dma/dmaengine.c

Lines changed: 45 additions & 92 deletions
Original file line numberDiff line numberDiff line change
@@ -902,37 +902,39 @@ void dma_async_device_unregister(struct dma_device *device)
902902
EXPORT_SYMBOL(dma_async_device_unregister);
903903

904904
/**
905-
* dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
905+
* dma_async_memcpy_pg_to_pg - offloaded copy from page to page
906906
* @chan: DMA channel to offload copy to
907-
* @dest: destination address (virtual)
908-
* @src: source address (virtual)
907+
* @dest_pg: destination page
908+
* @dest_off: offset in page to copy to
909+
* @src_pg: source page
910+
* @src_off: offset in page to copy from
909911
* @len: length
910912
*
911-
* Both @dest and @src must be mappable to a bus address according to the
912-
* DMA mapping API rules for streaming mappings.
913-
* Both @dest and @src must stay memory resident (kernel memory or locked
914-
* user space pages).
913+
* Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
914+
* address according to the DMA mapping API rules for streaming mappings.
915+
* Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
916+
* (kernel memory or locked user space pages).
915917
*/
916918
dma_cookie_t
917-
dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
918-
void *src, size_t len)
919+
dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
920+
unsigned int dest_off, struct page *src_pg, unsigned int src_off,
921+
size_t len)
919922
{
920923
struct dma_device *dev = chan->device;
921924
struct dma_async_tx_descriptor *tx;
922925
dma_addr_t dma_dest, dma_src;
923926
dma_cookie_t cookie;
924927
unsigned long flags;
925928

926-
dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
927-
dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
928-
flags = DMA_CTRL_ACK |
929-
DMA_COMPL_SRC_UNMAP_SINGLE |
930-
DMA_COMPL_DEST_UNMAP_SINGLE;
929+
dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
930+
dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
931+
DMA_FROM_DEVICE);
932+
flags = DMA_CTRL_ACK;
931933
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
932934

933935
if (!tx) {
934-
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
935-
dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
936+
dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
937+
dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
936938
return -ENOMEM;
937939
}
938940

@@ -946,6 +948,29 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
946948

947949
return cookie;
948950
}
951+
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
952+
953+
/**
954+
* dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
955+
* @chan: DMA channel to offload copy to
956+
* @dest: destination address (virtual)
957+
* @src: source address (virtual)
958+
* @len: length
959+
*
960+
* Both @dest and @src must be mappable to a bus address according to the
961+
* DMA mapping API rules for streaming mappings.
962+
* Both @dest and @src must stay memory resident (kernel memory or locked
963+
* user space pages).
964+
*/
965+
dma_cookie_t
966+
dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
967+
void *src, size_t len)
968+
{
969+
return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
970+
(unsigned long) dest & ~PAGE_MASK,
971+
virt_to_page(src),
972+
(unsigned long) src & ~PAGE_MASK, len);
973+
}
949974
EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
950975

951976
/**
@@ -963,86 +988,14 @@ EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
963988
*/
964989
dma_cookie_t
965990
dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
966-
unsigned int offset, void *kdata, size_t len)
991+
unsigned int offset, void *kdata, size_t len)
967992
{
968-
struct dma_device *dev = chan->device;
969-
struct dma_async_tx_descriptor *tx;
970-
dma_addr_t dma_dest, dma_src;
971-
dma_cookie_t cookie;
972-
unsigned long flags;
973-
974-
dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
975-
dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
976-
flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
977-
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
978-
979-
if (!tx) {
980-
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
981-
dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
982-
return -ENOMEM;
983-
}
984-
985-
tx->callback = NULL;
986-
cookie = tx->tx_submit(tx);
987-
988-
preempt_disable();
989-
__this_cpu_add(chan->local->bytes_transferred, len);
990-
__this_cpu_inc(chan->local->memcpy_count);
991-
preempt_enable();
992-
993-
return cookie;
993+
return dma_async_memcpy_pg_to_pg(chan, page, offset,
994+
virt_to_page(kdata),
995+
(unsigned long) kdata & ~PAGE_MASK, len);
994996
}
995997
EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
996998

997-
/**
998-
* dma_async_memcpy_pg_to_pg - offloaded copy from page to page
999-
* @chan: DMA channel to offload copy to
1000-
* @dest_pg: destination page
1001-
* @dest_off: offset in page to copy to
1002-
* @src_pg: source page
1003-
* @src_off: offset in page to copy from
1004-
* @len: length
1005-
*
1006-
* Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
1007-
* address according to the DMA mapping API rules for streaming mappings.
1008-
* Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
1009-
* (kernel memory or locked user space pages).
1010-
*/
1011-
dma_cookie_t
1012-
dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1013-
unsigned int dest_off, struct page *src_pg, unsigned int src_off,
1014-
size_t len)
1015-
{
1016-
struct dma_device *dev = chan->device;
1017-
struct dma_async_tx_descriptor *tx;
1018-
dma_addr_t dma_dest, dma_src;
1019-
dma_cookie_t cookie;
1020-
unsigned long flags;
1021-
1022-
dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
1023-
dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
1024-
DMA_FROM_DEVICE);
1025-
flags = DMA_CTRL_ACK;
1026-
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
1027-
1028-
if (!tx) {
1029-
dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
1030-
dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
1031-
return -ENOMEM;
1032-
}
1033-
1034-
tx->callback = NULL;
1035-
cookie = tx->tx_submit(tx);
1036-
1037-
preempt_disable();
1038-
__this_cpu_add(chan->local->bytes_transferred, len);
1039-
__this_cpu_inc(chan->local->memcpy_count);
1040-
preempt_enable();
1041-
1042-
return cookie;
1043-
}
1044-
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1045-
1046999
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
10471000
struct dma_chan *chan)
10481001
{

0 commit comments

Comments
 (0)