Skip to content

Commit

Permalink
ice: split ice_ring onto Tx/Rx separate structs
Browse files Browse the repository at this point in the history
While it was convenient to have a generic ring structure that served
both Tx and Rx sides, next commits are going to introduce several
Tx-specific fields, so in order to avoid hurting the Rx side, let's
pull out the Tx ring onto new ice_tx_ring and ice_rx_ring structs.

Rx ring could be handled by the old ice_ring which would reduce the code
churn within this patch, but this would make things asymmetric.

Make the union out of the ring container within ice_q_vector so that it
is possible to iterate over newly introduced ice_tx_ring.

Remove the @SiZe as it's only accessed from control path and it can be
calculated pretty easily.

Change definitions of ice_update_ring_stats and
ice_fetch_u64_stats_per_ring so that they are ring agnostic and can be
used for both Rx and Tx rings.

Sizes of Rx and Tx ring structs are 256 and 192 bytes, respectively. In
Rx ring xdp_rxq_info occupies its own cacheline, so it's the major
difference now.

Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
  • Loading branch information
mfijalko authored and intel-lab-lkp committed Aug 14, 2021
1 parent 8811b5b commit 945abe6
Show file tree
Hide file tree
Showing 19 changed files with 375 additions and 293 deletions.
33 changes: 25 additions & 8 deletions drivers/net/ethernet/intel/ice/ice.h
Expand Up @@ -265,8 +265,8 @@ struct ice_vsi {
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_ring **rx_rings; /* Rx ring array */
struct ice_ring **tx_rings; /* Tx ring array */
struct ice_rx_ring **rx_rings; /* Rx ring array */
struct ice_tx_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */

irqreturn_t (*irq_handler)(int irq, void *data);
Expand Down Expand Up @@ -343,7 +343,7 @@ struct ice_vsi {
u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_ring **xdp_rings; /* XDP ring array */
struct ice_tx_ring **xdp_rings; /* XDP ring array */
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
Expand Down Expand Up @@ -555,25 +555,42 @@ static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
return !!vsi->xdp_prog;
}

static inline void ice_set_ring_xdp(struct ice_ring *ring)
static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
{
ring->flags |= ICE_TX_FLAGS_RING_XDP;
}

/**
* ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: ring to use
* @ring: Rx ring to use
*
* Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise.
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
{
struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;

if (ice_ring_is_xdp(ring))
qid -= vsi->num_xdp_txq;
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL;

return xsk_get_pool_from_qid(vsi->netdev, qid);
}

/**
* ice_tx_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Tx ring to use
*
* Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise. Tx equivalent of ice_xsk_pool.
*/
static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
{
struct ice_vsi *vsi = ring->vsi;
u16 qid;

qid = ring->q_index - vsi->num_xdp_txq;

if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL;
Expand Down
49 changes: 25 additions & 24 deletions drivers/net/ethernet/intel/ice/ice_base.c
Expand Up @@ -148,7 +148,8 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
{
struct ice_q_vector *q_vector;
struct ice_pf *pf = vsi->back;
struct ice_ring *ring;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
struct device *dev;

dev = ice_pf_to_dev(pf);
Expand All @@ -158,10 +159,10 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
}
q_vector = vsi->q_vectors[v_idx];

ice_for_each_ring(ring, q_vector->tx)
ring->q_vector = NULL;
ice_for_each_ring(ring, q_vector->rx)
ring->q_vector = NULL;
ice_for_each_tx_ring(tx_ring, q_vector->tx)
tx_ring->q_vector = NULL;
ice_for_each_rx_ring(rx_ring, q_vector->rx)
rx_ring->q_vector = NULL;

/* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev)
Expand Down Expand Up @@ -208,7 +209,7 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
* @ring: ring to get the absolute queue index
* @tc: traffic class number
*/
static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
{
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");

Expand All @@ -226,7 +227,7 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
* This enables/disables XPS for a given Tx descriptor ring
* based on the TCs enabled for the VSI that ring belongs to.
*/
static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
{
if (!ring->q_vector || !ring->netdev)
return;
Expand All @@ -248,7 +249,7 @@ static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
* Configure the Tx descriptor ring in TLAN context.
*/
static void
ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
{
struct ice_vsi *vsi = ring->vsi;
struct ice_hw *hw = &vsi->back->hw;
Expand All @@ -260,7 +261,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
/* Transmit Queue Length */
tlan_ctx->qlen = ring->count;

ice_set_cgd_num(tlan_ctx, ring);
ice_set_cgd_num(tlan_ctx, ring->dcb_tc);

/* PF number */
tlan_ctx->pf_num = hw->pf_id;
Expand Down Expand Up @@ -314,7 +315,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*
* Returns the offset value for ring into the data buffer.
*/
static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
{
if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD;
Expand All @@ -330,7 +331,7 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
*
* Configure the Rx descriptor ring in RLAN context.
*/
static int ice_setup_rx_ctx(struct ice_ring *ring)
static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
{
int chain_len = ICE_MAX_CHAINED_RX_BUFS;
struct ice_vsi *vsi = ring->vsi;
Expand Down Expand Up @@ -441,7 +442,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
*
* Return 0 on success and a negative value on error.
*/
int ice_vsi_cfg_rxq(struct ice_ring *ring)
int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u16 num_bufs = ICE_DESC_UNUSED(ring);
Expand Down Expand Up @@ -662,33 +663,33 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v;
q_vector->tx.ring = NULL;
q_vector->tx.tx_ring = NULL;
q_vector->tx.itr_idx = ICE_TX_ITR;
q_base = vsi->num_txq - tx_rings_rem;

for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
struct ice_ring *tx_ring = vsi->tx_rings[q_id];
struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];

tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring;
q_vector->tx.ring = tx_ring;
tx_ring->next = q_vector->tx.tx_ring;
q_vector->tx.tx_ring = tx_ring;
}
tx_rings_rem -= tx_rings_per_v;

/* Rx rings mapping to vector */
rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v;
q_vector->rx.ring = NULL;
q_vector->rx.rx_ring = NULL;
q_vector->rx.itr_idx = ICE_RX_ITR;
q_base = vsi->num_rxq - rx_rings_rem;

for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
struct ice_ring *rx_ring = vsi->rx_rings[q_id];
struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];

rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
q_vector->rx.ring = rx_ring;
rx_ring->next = q_vector->rx.rx_ring;
q_vector->rx.rx_ring = rx_ring;
}
rx_rings_rem -= rx_rings_per_v;
}
Expand All @@ -713,7 +714,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @qg_buf: queue group buffer
*/
int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
u8 buf_len = struct_size(qg_buf, txqs, 1);
Expand Down Expand Up @@ -748,7 +749,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
/* Add unique software queue handle of the Tx queue per
* TC into the VSI Tx ring
*/
ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);

status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL);
Expand Down Expand Up @@ -872,7 +873,7 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
*/
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring *ring,
u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
struct ice_pf *pf = vsi->back;
Expand Down Expand Up @@ -929,7 +930,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* are needed for stopping Tx queue
*/
void
ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
u8 tc;
Expand Down
8 changes: 4 additions & 4 deletions drivers/net/ethernet/intel/ice/ice_base.h
Expand Up @@ -6,7 +6,7 @@

#include "ice.h"

int ice_vsi_cfg_rxq(struct ice_ring *ring);
int ice_vsi_cfg_rxq(struct ice_rx_ring *ring);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
Expand All @@ -15,7 +15,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void
Expand All @@ -25,9 +25,9 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring *ring,
u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
void
ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
#endif /* _ICE_BASE_H_ */
5 changes: 3 additions & 2 deletions drivers/net/ethernet/intel/ice/ice_dcb_lib.c
Expand Up @@ -194,7 +194,8 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)
*/
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
{
struct ice_ring *tx_ring, *rx_ring;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
u16 qoffset, qcount;
int i, n;

Expand Down Expand Up @@ -814,7 +815,7 @@ void ice_update_dcb_stats(struct ice_pf *pf)
* tag will already be configured with the correct ID and priority bits
*/
void
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
Expand Down
10 changes: 5 additions & 5 deletions drivers/net/ethernet/intel/ice/ice_dcb_lib.h
Expand Up @@ -28,7 +28,7 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
void
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first);
void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
Expand All @@ -49,9 +49,9 @@ static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q)
}

static inline void
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc)
{
tlan_ctx->cgd_num = ring->dcb_tc;
tlan_ctx->cgd_num = dcb_tc;
}

static inline bool ice_is_dcb_active(struct ice_pf *pf)
Expand Down Expand Up @@ -95,7 +95,7 @@ ice_pf_dcb_cfg(struct ice_pf __always_unused *pf,
}

static inline int
ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring __always_unused *tx_ring,
struct ice_tx_buf __always_unused *first)
{
return 0;
Expand All @@ -119,6 +119,6 @@ static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) { }
static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { }
static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { }
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */

0 comments on commit 945abe6

Please sign in to comment.