Skip to content

Commit

Permalink
virtio_net: Fix error unwinding of XDP initialization
Browse files Browse the repository at this point in the history
[ Upstream commit 5306623 ]

When initializing XDP in virtnet_open(), some rq xdp initialization
may hit an error causing net device open failed. However, previous
rqs have already initialized XDP and enabled NAPI, which is not the
expected behavior. Need to roll back the previous rq initialization
to avoid leaks in error unwinding of init code.

Also extract helper functions of disable and enable queue pairs.
Use newly introduced disable helper function in error unwinding and
virtnet_close. Use enable helper function in virtnet_open.

Fixes: 754b8a2 ("virtio_net: setup xdp_rxq_info")
Signed-off-by: Feng Liu <feliu@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: William Tu <witu@nvidia.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
Feng Liu authored and gregkh committed May 24, 2023
1 parent ac7106a commit 037768b
Showing 1 changed file with 44 additions and 17 deletions.
61 changes: 44 additions & 17 deletions drivers/net/virtio_net.c
Expand Up @@ -1867,6 +1867,38 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
return received;
}

static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{
virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
napi_disable(&vi->rq[qp_index].napi);
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
}

static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
{
struct net_device *dev = vi->dev;
int err;

err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
vi->rq[qp_index].napi.napi_id);
if (err < 0)
return err;

err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL);
if (err < 0)
goto err_xdp_reg_mem_model;

virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);

return 0;

err_xdp_reg_mem_model:
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
return err;
}

static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
Expand All @@ -1880,22 +1912,20 @@ static int virtnet_open(struct net_device *dev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);

err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
err = virtnet_enable_queue_pair(vi, i);
if (err < 0)
return err;

err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL);
if (err < 0) {
xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
return err;
}

virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
goto err_enable_qp;
}

return 0;

err_enable_qp:
disable_delayed_refill(vi);
cancel_delayed_work_sync(&vi->refill);

for (i--; i >= 0; i--)
virtnet_disable_queue_pair(vi, i);
return err;
}

static int virtnet_poll_tx(struct napi_struct *napi, int budget)
Expand Down Expand Up @@ -2304,11 +2334,8 @@ static int virtnet_close(struct net_device *dev)
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);

for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_napi_tx_disable(&vi->sq[i].napi);
napi_disable(&vi->rq[i].napi);
xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
}
for (i = 0; i < vi->max_queue_pairs; i++)
virtnet_disable_queue_pair(vi, i);

return 0;
}
Expand Down

0 comments on commit 037768b

Please sign in to comment.