Skip to content

Commit

Permalink
xsk: support virtio DMA map
Browse files Browse the repository at this point in the history
When device is a virtio device, use virtio's DMA interface.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
  • Loading branch information
fengidri authored and intel-lab-lkp committed Feb 2, 2023
1 parent e75c780 commit 370aefe
Showing 1 changed file with 45 additions and 14 deletions.
59 changes: 45 additions & 14 deletions net/xdp/xsk_buff_pool.c
Expand Up @@ -3,6 +3,7 @@
#include <net/xsk_buff_pool.h>
#include <net/xdp_sock.h>
#include <net/xdp_sock_drv.h>
#include <linux/virtio.h>

#include "xsk_queue.h"
#include "xdp_umem.h"
Expand Down Expand Up @@ -334,8 +335,12 @@ static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
dma = &dma_map->dma_pages[i];
if (*dma) {
*dma &= ~XSK_NEXT_PG_CONTIG_MASK;
dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
DMA_BIDIRECTIONAL, attrs);
if (is_virtio_device(dma_map->dev))
virtio_dma_unmap(dma_map->dev, *dma, PAGE_SIZE,
DMA_BIDIRECTIONAL);
else
dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
DMA_BIDIRECTIONAL, attrs);
*dma = 0;
}
}
Expand Down Expand Up @@ -435,22 +440,40 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
return 0;
}

pool->dma_sync_for_cpu = dma_sync_for_cpu;
pool->dma_sync_for_device = dma_sync_for_device;
if (is_virtio_device(dev)) {
pool->dma_sync_for_cpu = virtio_dma_sync_signle_range_for_cpu;
pool->dma_sync_for_device = virtio_dma_sync_signle_range_for_device;

} else {
pool->dma_sync_for_cpu = dma_sync_for_cpu;
pool->dma_sync_for_device = dma_sync_for_device;
}

dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
if (!dma_map)
return -ENOMEM;

for (i = 0; i < dma_map->dma_pages_cnt; i++) {
dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
DMA_BIDIRECTIONAL, attrs);
if (dma_mapping_error(dev, dma)) {
__xp_dma_unmap(dma_map, attrs);
return -ENOMEM;
if (is_virtio_device(dev)) {
dma = virtio_dma_map_page(dev, pages[i], 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);

if (virtio_dma_mapping_error(dev, dma))
goto err;

if (virtio_dma_need_sync(dev, dma))
dma_map->dma_need_sync = true;

} else {
dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
DMA_BIDIRECTIONAL, attrs);

if (dma_mapping_error(dev, dma))
goto err;

if (dma_need_sync(dev, dma))
dma_map->dma_need_sync = true;
}
if (dma_need_sync(dev, dma))
dma_map->dma_need_sync = true;
dma_map->dma_pages[i] = dma;
}

Expand All @@ -464,6 +487,9 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
}

return 0;
err:
__xp_dma_unmap(dma_map, attrs);
return -ENOMEM;
}
EXPORT_SYMBOL(xp_dma_map);

Expand Down Expand Up @@ -546,9 +572,14 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
xskb->xdp.data_meta = xskb->xdp.data;

if (pool->dma_need_sync) {
dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
pool->frame_len,
DMA_BIDIRECTIONAL);
if (is_virtio_device(pool->dev))
virtio_dma_sync_signle_range_for_device(pool->dev, xskb->dma, 0,
pool->frame_len,
DMA_BIDIRECTIONAL);
else
dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
pool->frame_len,
DMA_BIDIRECTIONAL);
}
return &xskb->xdp;
}
Expand Down

0 comments on commit 370aefe

Please sign in to comment.