Skip to content

Commit

Permalink
Merge branch 'virtio_net-rx-enable-premapped-mode-by-default'
Browse files Browse the repository at this point in the history
Xuan Zhuo says:

====================
virtio_net: rx enable premapped mode by default

Actually, for the virtio drivers, we can enable premapped mode whatever
the value of use_dma_api. Because we provide the virtio dma apis.
So the driver can enable premapped mode unconditionally.

This patch set makes the big mode of virtio-net to support premapped mode.
And enable premapped mode for rx by default.

Based on the following points, we do not use page pool to manage these
    pages:

    1. virtio-net uses the DMA APIs wrapped by virtio core. Therefore,
       we can only prevent the page pool from performing DMA operations, and
       let the driver perform DMA operations on the allocated pages.
    2. But when the page pool releases the page, we have no chance to
       execute dma unmap.
    3. A solution to #2 is to execute dma unmap every time before putting
       the page back to the page pool. (This is actually a waste, we don't
       execute unmap so frequently.)
    4. But there is another problem, we still need to use page.dma_addr to
       save the dma address. Using page.dma_addr while using page pool is
       unsafe behavior.
    5. And we need space the chain the pages submitted once to virtio core.

    More:
        https://lore.kernel.org/all/CACGkMEu=Aok9z2imB_c5qVuujSh=vjj1kx12fy9N7hqyi+M5Ow@mail.gmail.com/

Why we do not use the page space to store the dma?

    http://lore.kernel.org/all/CACGkMEuyeJ9mMgYnnB42=hw6umNuo=agn7VBqBqYPd7GN=+39Q@mail.gmail.com
====================

Link: https://lore.kernel.org/r/20240511031404.30903-1-xuanzhuo@linux.alibaba.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
kuba-moo committed May 14, 2024
2 parents 6e62702 + 9719f03 commit f4edb4d
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 59 deletions.
90 changes: 37 additions & 53 deletions drivers/net/virtio_net.c
Original file line number Diff line number Diff line change
Expand Up @@ -348,9 +348,6 @@ struct receive_queue {

/* Record the last dma info to free after new pages is allocated. */
struct virtnet_rq_dma *last_dma;

/* Do dma by self */
bool do_dma;
};

/* This structure can contain rss message with maximum settings for indirection table and keysize
Expand Down Expand Up @@ -746,7 +743,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,

shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));

/* copy small packet so we can reuse these pages */
if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
skb = virtnet_build_skb(buf, truesize, p - buf, len);
if (unlikely(!skb))
Expand Down Expand Up @@ -850,7 +846,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
void *buf;

buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
if (buf && rq->do_dma)
if (buf)
virtnet_rq_unmap(rq, buf, *len);

return buf;
Expand All @@ -863,11 +859,6 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
u32 offset;
void *head;

if (!rq->do_dma) {
sg_init_one(rq->sg, buf, len);
return;
}

head = page_address(rq->alloc_frag.page);

offset = buf - head;
Expand All @@ -893,44 +884,42 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)

head = page_address(alloc_frag->page);

if (rq->do_dma) {
dma = head;

/* new pages */
if (!alloc_frag->offset) {
if (rq->last_dma) {
/* Now, the new page is allocated, the last dma
* will not be used. So the dma can be unmapped
* if the ref is 0.
*/
virtnet_rq_unmap(rq, rq->last_dma, 0);
rq->last_dma = NULL;
}
dma = head;

dma->len = alloc_frag->size - sizeof(*dma);
/* new pages */
if (!alloc_frag->offset) {
if (rq->last_dma) {
/* Now, the new page is allocated, the last dma
* will not be used. So the dma can be unmapped
* if the ref is 0.
*/
virtnet_rq_unmap(rq, rq->last_dma, 0);
rq->last_dma = NULL;
}

addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
dma->len, DMA_FROM_DEVICE, 0);
if (virtqueue_dma_mapping_error(rq->vq, addr))
return NULL;
dma->len = alloc_frag->size - sizeof(*dma);

dma->addr = addr;
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
dma->len, DMA_FROM_DEVICE, 0);
if (virtqueue_dma_mapping_error(rq->vq, addr))
return NULL;

/* Add a reference to dma to prevent the entire dma from
* being released during error handling. This reference
* will be freed after the pages are no longer used.
*/
get_page(alloc_frag->page);
dma->ref = 1;
alloc_frag->offset = sizeof(*dma);
dma->addr = addr;
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);

rq->last_dma = dma;
}
/* Add a reference to dma to prevent the entire dma from
* being released during error handling. This reference
* will be freed after the pages are no longer used.
*/
get_page(alloc_frag->page);
dma->ref = 1;
alloc_frag->offset = sizeof(*dma);

++dma->ref;
rq->last_dma = dma;
}

++dma->ref;

buf = head + alloc_frag->offset;

get_page(alloc_frag->page);
Expand All @@ -947,12 +936,9 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
if (!vi->mergeable_rx_bufs && vi->big_packets)
return;

for (i = 0; i < vi->max_queue_pairs; i++) {
if (virtqueue_set_dma_premapped(vi->rq[i].vq))
continue;

vi->rq[i].do_dma = true;
}
for (i = 0; i < vi->max_queue_pairs; i++)
/* error should never happen */
BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
}

static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
Expand All @@ -963,7 +949,7 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)

rq = &vi->rq[i];

if (rq->do_dma)
if (!vi->big_packets || vi->mergeable_rx_bufs)
virtnet_rq_unmap(rq, buf, 0);

virtnet_rq_free_buf(vi, rq, buf);
Expand Down Expand Up @@ -2030,8 +2016,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,

err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
if (rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}

Expand Down Expand Up @@ -2145,8 +2130,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
if (rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}

Expand Down Expand Up @@ -2277,7 +2261,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
}
} else {
while (packets < budget &&
(buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
packets++;
}
Expand Down Expand Up @@ -5229,7 +5213,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page) {
if (vi->rq[i].do_dma && vi->rq[i].last_dma)
if (vi->rq[i].last_dma)
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
}
Expand Down
7 changes: 1 addition & 6 deletions drivers/virtio/virtio_ring.c
Original file line number Diff line number Diff line change
Expand Up @@ -2782,7 +2782,7 @@ EXPORT_SYMBOL_GPL(virtqueue_resize);
*
* Returns zero or a negative error.
* 0: success.
* -EINVAL: vring does not use the dma api, so we can not enable premapped mode.
* -EINVAL: too late to enable premapped mode, the vq already contains buffers.
*/
int virtqueue_set_dma_premapped(struct virtqueue *_vq)
{
Expand All @@ -2798,11 +2798,6 @@ int virtqueue_set_dma_premapped(struct virtqueue *_vq)
return -EINVAL;
}

if (!vq->use_dma_api) {
END_USE(vq);
return -EINVAL;
}

vq->premapped = true;
vq->do_unmap = false;

Expand Down

0 comments on commit f4edb4d

Please sign in to comment.