Skip to content

Commit ec94670

Browse files
zeffronAlexei Starovoitov
authored andcommitted
bpf: Support specifying ingress via xdp_md context in BPF_PROG_TEST_RUN
Support specifying the ingress_ifindex and rx_queue_index of xdp_md contexts for BPF_PROG_TEST_RUN. The intended use case is to allow testing XDP programs that make decisions based on the ingress interface or RX queue. If ingress_ifindex is specified, look up the device by the provided index in the current namespace and use its xdp_rxq for the xdp_buff. If the rx_queue_index is out of range, or is non-zero when the ingress_ifindex is 0, return -EINVAL. Co-developed-by: Cody Haas <chaas@riotgames.com> Co-developed-by: Lisa Watanabe <lwatanabe@riotgames.com> Signed-off-by: Cody Haas <chaas@riotgames.com> Signed-off-by: Lisa Watanabe <lwatanabe@riotgames.com> Signed-off-by: Zvi Effron <zeffron@riotgames.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Yonghong Song <yhs@fb.com> Link: https://lore.kernel.org/bpf/20210707221657.3985075-4-zeffron@riotgames.com
1 parent 47316f4 commit ec94670

File tree

1 file changed

+49
-7
lines changed

1 file changed

+49
-7
lines changed

net/bpf/test_run.c

Lines changed: 49 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -690,18 +690,60 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
690690

691691
static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
692692
{
693+
unsigned int ingress_ifindex, rx_queue_index;
694+
struct netdev_rx_queue *rxqueue;
695+
struct net_device *device;
696+
693697
if (!xdp_md)
694698
return 0;
695699

696700
if (xdp_md->egress_ifindex != 0)
697701
return -EINVAL;
698702

699-
if (xdp_md->ingress_ifindex != 0 || xdp_md->rx_queue_index != 0)
703+
ingress_ifindex = xdp_md->ingress_ifindex;
704+
rx_queue_index = xdp_md->rx_queue_index;
705+
706+
if (!ingress_ifindex && rx_queue_index)
700707
return -EINVAL;
701708

702-
xdp->data = xdp->data_meta + xdp_md->data;
709+
if (ingress_ifindex) {
710+
device = dev_get_by_index(current->nsproxy->net_ns,
711+
ingress_ifindex);
712+
if (!device)
713+
return -ENODEV;
714+
715+
if (rx_queue_index >= device->real_num_rx_queues)
716+
goto free_dev;
717+
718+
rxqueue = __netif_get_rx_queue(device, rx_queue_index);
703719

720+
if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
721+
goto free_dev;
722+
723+
xdp->rxq = &rxqueue->xdp_rxq;
724+
/* The device is now tracked in the xdp->rxq for later
725+
* dev_put()
726+
*/
727+
}
728+
729+
xdp->data = xdp->data_meta + xdp_md->data;
704730
return 0;
731+
732+
free_dev:
733+
dev_put(device);
734+
return -EINVAL;
735+
}
736+
737+
static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
738+
{
739+
if (!xdp_md)
740+
return;
741+
742+
xdp_md->data = xdp->data - xdp->data_meta;
743+
xdp_md->data_end = xdp->data_end - xdp->data_meta;
744+
745+
if (xdp_md->ingress_ifindex)
746+
dev_put(xdp->rxq->dev);
705747
}
706748

707749
int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
@@ -753,18 +795,18 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
753795

754796
bpf_prog_change_xdp(NULL, prog);
755797
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
798+
/* We convert the xdp_buff back to an xdp_md before checking the return
799+
* code so the reference count of any held netdevice will be decremented
800+
* even if the test run failed.
801+
*/
802+
xdp_convert_buff_to_md(&xdp, ctx);
756803
if (ret)
757804
goto out;
758805

759806
if (xdp.data_meta != data + headroom ||
760807
xdp.data_end != xdp.data_meta + size)
761808
size = xdp.data_end - xdp.data_meta;
762809

763-
if (ctx) {
764-
ctx->data = xdp.data - xdp.data_meta;
765-
ctx->data_end = xdp.data_end - xdp.data_meta;
766-
}
767-
768810
ret = bpf_test_finish(kattr, uattr, xdp.data_meta, size, retval,
769811
duration);
770812
if (!ret)

0 commit comments

Comments
 (0)