Skip to content

Commit

Permalink
Merge branch 'xdp-redirect'
Browse files Browse the repository at this point in the history
John Fastabend says:

====================
Implement XDP bpf_redirect

This series adds two new XDP helper routines bpf_redirect() and
bpf_redirect_map(). The first variant bpf_redirect() is meant
to be used the same way it is currently being used by the cls_bpf
classifier. An xdp packet will be redirected immediately when this
is called.

The other variant bpf_redirect_map(map, key, flags) uses a new
map type called devmap. A devmap uses integers as keys and
net_devices as values. The user provies key/ifindex pairs to
update the map with new net_devices. This provides two benefits
over the normal variant 'bpf_redirect()'. First the datapath
bpf program is abstracted away from using hard-coded ifindex
values. Allowing a single bpf program to be run any many different
environments. Second, and perhaps more important, the map enables
batching packet transmits. The map plus small driver changes
allows for batching all send requests across a NAPI poll loop.
This allows driver writers to optimize the driver xmit path
and only call expensive operations once for a batch of xdp_buffs.

The devmap was designed to support possible future work for
multicast and broadcast as follow-up patches.

To see, in more detail, how to leverage the new helpers and
map from the userspace side please review these two patches,

  xdp: sample program for new bpf_redirect helper
  xdp: bpf redirect with map sample program

Performance numbers provided by Jesper are the following, tested
using the ixgbe driver with CPU E5-1650 v4 @ 3.60GHz:

13,939,674 pkt/s = XDP_DROP without touching memory
14,290,650 pkt/s = xdp1: XDP_DROP with reading packet data
13,221,812 pkt/s = xdp2: XDP_TX   with swap mac (writes into pkt)
 7,596,576 pkt/s = xdp_redirect:    XDP_REDIRECT with swap mac (like XDP_TX)
13,058,435 pkt/s = xdp_redirect_map:XDP_REDIRECT with swap mac + devmap

A big thanks to everyone who helped with this series. Jesper
provided fixes, debugging, code review, performance benchmarks!
Daniel provided lots of useful feedback and code review. And last
but not least Andy provided useful feedback related to supporting
additional drivers, generic xdp implementation, testing, etc. Any
other feedback is welcome but I believe at this point these are
ready to be merged!

Whats left... get the rest of the drivers developers to implement
this in all the drivers.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
davem330 committed Jul 17, 2017
2 parents ff65fa6 + 9d6e005 commit 6093ec2
Show file tree
Hide file tree
Showing 20 changed files with 1,282 additions and 101 deletions.
8 changes: 6 additions & 2 deletions drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -1018,8 +1018,12 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
struct ixgbe_ring *ring;

ixgbe_for_each_ring(ring, q_vector->tx)
adapter->tx_ring[ring->queue_index] = NULL;
ixgbe_for_each_ring(ring, q_vector->tx) {
if (ring_is_xdp(ring))
adapter->xdp_ring[ring->queue_index] = NULL;
else
adapter->tx_ring[ring->queue_index] = NULL;
}

ixgbe_for_each_ring(ring, q_vector->rx)
adapter->rx_ring[ring->queue_index] = NULL;
Expand Down
63 changes: 62 additions & 1 deletion drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -2214,7 +2214,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
struct xdp_buff *xdp)
{
int result = IXGBE_XDP_PASS;
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
u32 act;

Expand All @@ -2231,6 +2231,13 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
case XDP_TX:
result = ixgbe_xmit_xdp_ring(adapter, xdp);
break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
if (!err)
result = IXGBE_XDP_TX;
else
result = IXGBE_XDP_CONSUMED;
break;
default:
bpf_warn_invalid_xdp_action(act);
/* fallthrough */
Expand Down Expand Up @@ -2408,6 +2415,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
wmb();
writel(ring->next_to_use, ring->tail);

xdp_do_flush_map();
}

u64_stats_update_begin(&rx_ring->syncp);
Expand Down Expand Up @@ -5810,6 +5819,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)

usleep_range(10000, 20000);

/* synchronize_sched() needed for pending XDP buffers to drain */
if (adapter->xdp_ring[0])
synchronize_sched();
netif_tx_stop_all_queues(netdev);

/* call carrier off first to avoid false dev_watchdog timeouts */
Expand Down Expand Up @@ -9823,6 +9835,53 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp)
}
}

static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_ring *ring;
int err;

if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
return -EINVAL;

/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
if (unlikely(!ring))
return -EINVAL;

err = ixgbe_xmit_xdp_ring(adapter, xdp);
if (err != IXGBE_XDP_TX)
return -ENOMEM;

return 0;
}

static void ixgbe_xdp_flush(struct net_device *dev)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_ring *ring;

/* Its possible the device went down between xdp xmit and flush so
* we need to ensure device is still up.
*/
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
return;

ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
if (unlikely(!ring))
return;

/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*/
wmb();
writel(ring->next_to_use, ring->tail);

return;
}

static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
Expand Down Expand Up @@ -9869,6 +9928,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
.ndo_features_check = ixgbe_features_check,
.ndo_xdp = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
.ndo_xdp_flush = ixgbe_xdp_flush,
};

/**
Expand Down
5 changes: 5 additions & 0 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -379,4 +379,9 @@ extern const struct bpf_func_proto bpf_get_stackid_proto;
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);

/* Map specifics */
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
void __dev_map_flush(struct bpf_map *map);

#endif /* _LINUX_BPF_H */
3 changes: 3 additions & 0 deletions include/linux/bpf_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,6 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
#ifdef CONFIG_NET
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
#endif
14 changes: 14 additions & 0 deletions include/linux/filter.h
Original file line number Diff line number Diff line change
Expand Up @@ -711,7 +711,21 @@ bool bpf_helper_changes_pkt_data(void *func);

struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);

/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
* same cpu context. Further for best results no more than a single map
* for the do_redirect/do_flush pair should be used. This limitation is
* because we only track one map and force a flush when the map changes.
* This does not appear to be a real limitation for existing software.
*/
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb);
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *prog);
void xdp_do_flush_map(void);

void bpf_warn_invalid_xdp_action(u32 act);
void bpf_warn_invalid_xdp_redirect(u32 ifindex);

#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
Expand Down
11 changes: 10 additions & 1 deletion include/linux/netdevice.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ struct mpls_dev;
/* UDP Tunnel offloads */
struct udp_tunnel_info;
struct bpf_prog;
struct xdp_buff;

void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
Expand Down Expand Up @@ -1138,7 +1139,12 @@ struct xfrmdev_ops {
* int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
* This function is used to set or query state related to XDP on the
* netdevice. See definition of enum xdp_netdev_command for details.
*
* int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp);
* This function is used to submit a XDP packet for transmit on a
* netdevice.
* void (*ndo_xdp_flush)(struct net_device *dev);
* This function is used to inform the driver to flush a paticular
* xpd tx queue. Must be called on same CPU as xdp_xmit.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
Expand Down Expand Up @@ -1323,6 +1329,9 @@ struct net_device_ops {
int needed_headroom);
int (*ndo_xdp)(struct net_device *dev,
struct netdev_xdp *xdp);
int (*ndo_xdp_xmit)(struct net_device *dev,
struct xdp_buff *xdp);
void (*ndo_xdp_flush)(struct net_device *dev);
};

/**
Expand Down
31 changes: 30 additions & 1 deletion include/trace/events/xdp.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
FN(ABORTED) \
FN(DROP) \
FN(PASS) \
FN(TX)
FN(TX) \
FN(REDIRECT)

#define __XDP_ACT_TP_FN(x) \
TRACE_DEFINE_ENUM(XDP_##x);
Expand Down Expand Up @@ -48,6 +49,34 @@ TRACE_EVENT(xdp_exception,
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB))
);

TRACE_EVENT(xdp_redirect,

TP_PROTO(const struct net_device *from,
const struct net_device *to,
const struct bpf_prog *xdp, u32 act),

TP_ARGS(from, to, xdp, act),

TP_STRUCT__entry(
__string(name_from, from->name)
__string(name_to, to->name)
__array(u8, prog_tag, 8)
__field(u32, act)
),

TP_fast_assign(
BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag));
memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag));
__assign_str(name_from, from->name);
__assign_str(name_to, to->name);
__entry->act = act;
),

TP_printk("prog=%s from=%s to=%s action=%s",
__print_hex_str(__entry->prog_tag, 8),
__get_str(name_from), __get_str(name_to),
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB))
);
#endif /* _TRACE_XDP_H */

#include <trace/define_trace.h>
10 changes: 9 additions & 1 deletion include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_LPM_TRIE,
BPF_MAP_TYPE_ARRAY_OF_MAPS,
BPF_MAP_TYPE_HASH_OF_MAPS,
BPF_MAP_TYPE_DEVMAP,
};

enum bpf_prog_type {
Expand Down Expand Up @@ -347,6 +348,11 @@ union bpf_attr {
* @flags: bit 0 - if set, redirect to ingress instead of egress
* other bits - reserved
* Return: TC_ACT_REDIRECT
* int bpf_redirect_map(key, map, flags)
* redirect to endpoint in map
* @key: index in map to lookup
* @map: fd of map to do lookup in
* @flags: --
*
* u32 bpf_get_route_realm(skb)
* retrieve a dst's tclassid
Expand Down Expand Up @@ -591,7 +597,8 @@ union bpf_attr {
FN(get_socket_uid), \
FN(set_hash), \
FN(setsockopt), \
FN(skb_adjust_room),
FN(skb_adjust_room), \
FN(redirect_map),

/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
Expand Down Expand Up @@ -717,6 +724,7 @@ enum xdp_action {
XDP_DROP,
XDP_PASS,
XDP_TX,
XDP_REDIRECT,
};

/* user accessible metadata for XDP packet hook
Expand Down
3 changes: 3 additions & 0 deletions kernel/bpf/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@ obj-y := core.o

obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_BPF_SYSCALL) += devmap.o
endif
ifeq ($(CONFIG_PERF_EVENTS),y)
obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
endif
Expand Down
Loading

0 comments on commit 6093ec2

Please sign in to comment.