Skip to content

Commit

Permalink
ice: Add support for XDP
Browse files Browse the repository at this point in the history
Add support for XDP. Implement ndo_bpf and ndo_xdp_xmit.  Upon load of
an XDP program, allocate additional Tx rings for dedicated XDP use.
The following actions are supported: XDP_TX, XDP_DROP, XDP_REDIRECT,
XDP_PASS, and XDP_ABORTED.

Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
  • Loading branch information
mfijalko authored and Jeff Kirsher committed Nov 4, 2019
1 parent e75d1b2 commit efc2214
Show file tree
Hide file tree
Showing 8 changed files with 825 additions and 57 deletions.
24 changes: 22 additions & 2 deletions drivers/net/ethernet/intel/ice/ice.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,10 @@
#include <linux/ip.h>
#include <linux/sctp.h>
#include <linux/ipv6.h>
#include <linux/pkt_sched.h>
#include <linux/if_bridge.h>
#include <linux/ctype.h>
#include <linux/bpf.h>
#include <linux/avf/virtchnl.h>
#include <net/ipv6.h>
#include "ice_devids.h"
Expand Down Expand Up @@ -78,8 +80,7 @@ extern const char ice_drv_ver[];

#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)

#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \
(ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)))
#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)

#define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
Expand Down Expand Up @@ -282,6 +283,10 @@ struct ice_vsi {
u16 num_rx_desc;
u16 num_tx_desc;
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_ring **xdp_rings; /* XDP ring array */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
} ____cacheline_internodealigned_in_smp;

/* struct that defines an interrupt vector */
Expand Down Expand Up @@ -425,6 +430,16 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
return np->vsi->back;
}

static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
{
return !!vsi->xdp_prog;
}

static inline void ice_set_ring_xdp(struct ice_ring *ring)
{
ring->flags |= ICE_TX_FLAGS_RING_XDP;
}

/**
* ice_get_main_vsi - Get the PF VSI
* @pf: PF instance
Expand All @@ -451,6 +466,11 @@ int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
int ice_destroy_xdp_rings(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
Expand Down
28 changes: 27 additions & 1 deletion drivers/net/ethernet/intel/ice/ice_base.c
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,9 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
*/
static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
{
WARN_ONCE(ice_ring_is_xdp(ring) && tc,
"XDP ring can't belong to TC other than 0");

/* Idea here for calculation is that we subtract the number of queue
* count from TC that ring belongs to from it's absolute queue index
* and as a result we get the queue's index within TC.
Expand Down Expand Up @@ -287,14 +290,30 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* clear the context structure first */
memset(&rlan_ctx, 0, sizeof(rlan_ctx));

ring->rx_buf_len = vsi->rx_buf_len;

if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index);

err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL);
if (err)
return err;
}
/* Receive Queue Base Address.
* Indicates the starting address of the descriptor queue defined in
* 128 Byte units.
*/
rlan_ctx.base = ring->dma >> 7;

rlan_ctx.qlen = ring->count;

/* Receive Packet Data Buffer Size.
* The Packet Data Buffer Size is defined in 128 byte units.
*/
rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;

/* use 32 byte descriptors */
rlan_ctx.dsize = 1;
Expand Down Expand Up @@ -657,6 +676,13 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);

wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
if (ice_is_xdp_ena_vsi(vsi)) {
u32 xdp_txq = txq + vsi->num_xdp_txq;

wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
val);
}
ice_flush(hw);
}

/**
Expand Down
50 changes: 47 additions & 3 deletions drivers/net/ethernet/intel/ice/ice_ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -2577,6 +2577,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_ring *xdp_rings = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
Expand Down Expand Up @@ -2624,6 +2625,11 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
vsi->tx_rings[i]->count = new_tx_cnt;
for (i = 0; i < vsi->alloc_rxq; i++)
vsi->rx_rings[i]->count = new_rx_cnt;
if (ice_is_xdp_ena_vsi(vsi))
for (i = 0; i < vsi->num_xdp_txq; i++)
vsi->xdp_rings[i]->count = new_tx_cnt;
vsi->num_tx_desc = new_tx_cnt;
vsi->num_rx_desc = new_rx_cnt;
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done;
}
Expand All @@ -2650,15 +2656,43 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
tx_rings[i].tx_buf = NULL;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
while (i) {
i--;
while (i--)
ice_clean_tx_ring(&tx_rings[i]);
}
devm_kfree(&pf->pdev->dev, tx_rings);
goto done;
}
}

if (!ice_is_xdp_ena_vsi(vsi))
goto process_rx;

/* alloc updated XDP resources */
netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n",
vsi->xdp_rings[0]->count, new_tx_cnt);

xdp_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_xdp_txq,
sizeof(*xdp_rings), GFP_KERNEL);
if (!xdp_rings) {
err = -ENOMEM;
goto free_tx;
}

for (i = 0; i < vsi->num_xdp_txq; i++) {
/* clone ring and setup updated count */
xdp_rings[i] = *vsi->xdp_rings[i];
xdp_rings[i].count = new_tx_cnt;
xdp_rings[i].desc = NULL;
xdp_rings[i].tx_buf = NULL;
err = ice_setup_tx_ring(&xdp_rings[i]);
if (err) {
while (i--)
ice_clean_tx_ring(&xdp_rings[i]);
devm_kfree(&pf->pdev->dev, xdp_rings);
goto free_tx;
}
ice_set_ring_xdp(&xdp_rings[i]);
}

process_rx:
if (new_rx_cnt == vsi->rx_rings[0]->count)
goto process_link;
Expand Down Expand Up @@ -2737,6 +2771,16 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
devm_kfree(&pf->pdev->dev, rx_rings);
}

if (xdp_rings) {
for (i = 0; i < vsi->num_xdp_txq; i++) {
ice_free_tx_ring(vsi->xdp_rings[i]);
*vsi->xdp_rings[i] = xdp_rings[i];
}
devm_kfree(&pf->pdev->dev, xdp_rings);
}

vsi->num_tx_desc = new_tx_cnt;
vsi->num_rx_desc = new_rx_cnt;
ice_up(vsi);
}
goto done;
Expand Down
68 changes: 59 additions & 9 deletions drivers/net/ethernet/intel/ice/ice_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->rx_rings)
goto err_rings;

vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
/* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
vsi->txq_map = devm_kcalloc(&pf->pdev->dev, (2 * vsi->alloc_txq),
sizeof(*vsi->txq_map), GFP_KERNEL);

if (!vsi->txq_map)
Expand Down Expand Up @@ -1183,6 +1184,20 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
return err;
}

/**
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
* @vsi: VSI
*/
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
{
if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
vsi->max_frame = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
else
vsi->max_frame = ICE_RXBUF_2048;

vsi->rx_buf_len = ICE_RXBUF_2048;
}

/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
Expand All @@ -1197,13 +1212,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_VF)
goto setup_rings;

if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
vsi->max_frame = vsi->netdev->mtu +
ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
else
vsi->max_frame = ICE_RXBUF_2048;

vsi->rx_buf_len = ICE_RXBUF_2048;
ice_vsi_cfg_frame_size(vsi);
setup_rings:
/* set up individual rings */
for (i = 0; i < vsi->num_rxq; i++) {
Expand Down Expand Up @@ -1265,6 +1274,18 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
}

/**
* ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Tx queues dedicated for XDP in given VSI for operation.
*/
int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
{
return ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
}

/**
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value
* @intrl: interrupt rate limit in usecs
Expand Down Expand Up @@ -1488,6 +1509,15 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
}

/**
* ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
* @vsi: the VSI being configured
*/
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
{
return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
}

/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
Expand Down Expand Up @@ -1885,6 +1915,11 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
for (q = 0; q < q_vector->num_ring_tx; q++) {
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
if (ice_is_xdp_ena_vsi(vsi)) {
u32 xdp_txq = txq + vsi->num_xdp_txq;

wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
}
txq++;
}

Expand Down Expand Up @@ -2259,6 +2294,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
vsi->base_vector = 0;
}

if (ice_is_xdp_ena_vsi(vsi))
/* return value check can be skipped here, it always returns
* 0 if reset is in progress
*/
ice_destroy_xdp_rings(vsi);
ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
Expand Down Expand Up @@ -2299,6 +2339,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
goto err_vectors;

ice_vsi_map_rings_to_vectors(vsi);
if (ice_is_xdp_ena_vsi(vsi)) {
vsi->num_xdp_txq = vsi->alloc_txq;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
if (ret)
goto err_vectors;
}
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
Expand All @@ -2325,9 +2371,13 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
}

/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
for (i = 0; i < vsi->tc_cfg.numtc; i++) {
max_txqs[i] = vsi->alloc_txq;

if (ice_is_xdp_ena_vsi(vsi))
max_txqs[i] += vsi->num_xdp_txq;
}

status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
Expand Down
6 changes: 6 additions & 0 deletions drivers/net/ethernet/intel/ice/ice_lib.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,10 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);

int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);

int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);

int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);

void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
Expand Down Expand Up @@ -79,6 +83,8 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);

int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);

void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);

u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);

char *ice_nvm_version_str(struct ice_hw *hw);
Expand Down
Loading

0 comments on commit efc2214

Please sign in to comment.