Skip to content

Commit

Permalink
net/bnxt: fix missing barriers in completion handling
Browse files Browse the repository at this point in the history
[ upstream commit 5ed30db ]

Ensure that Rx/Tx/Async completion entry fields are accessed
only after the completion's valid flag has been loaded and
verified. This is needed for correct operation on systems that
use relaxed memory consistency models.

Fixes: 2eb53b1 ("net/bnxt: add initial Rx code")
Fixes: 6eb3cc2 ("net/bnxt: add initial Tx code")

Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
  • Loading branch information
Lance Richardson authored and bluca committed Jul 26, 2021
1 parent f58d25a commit 5584a03
Show file tree
Hide file tree
Showing 7 changed files with 53 additions and 21 deletions.
36 changes: 32 additions & 4 deletions drivers/net/bnxt/bnxt_cpr.h
Expand Up @@ -8,13 +8,10 @@
#include <stdbool.h>

#include <rte_io.h>
#include "hsi_struct_def_dpdk.h"

struct bnxt_db_info;

#define CMP_VALID(cmp, raw_cons, ring) \
(!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) & \
CMPL_BASE_V) == !((raw_cons) & ((ring)->ring_size)))

#define CMPL_VALID(cmp, v) \
(!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) & \
CMPL_BASE_V) == !(v))
Expand Down Expand Up @@ -131,4 +128,35 @@ bool bnxt_is_recovery_enabled(struct bnxt *bp);
bool bnxt_is_master_func(struct bnxt *bp);

void bnxt_stop_rxtx(struct bnxt *bp);

/**
* Check validity of a completion ring entry. If the entry is valid, include a
* C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
* completion are not hoisted by the compiler or by the CPU to come before the
* loading of the "valid" field.
*
* Note: the caller must not access any fields in the specified completion
* entry prior to calling this function.
*
* @param cmpl
* Pointer to an entry in the completion ring.
* @param raw_cons
* Raw consumer index of entry in completion ring.
* @param ring_size
* Size of completion ring.
*/
static __rte_always_inline bool
bnxt_cpr_cmp_valid(const void *cmpl, uint32_t raw_cons, uint32_t ring_size)
{
const struct cmpl_base *c = cmpl;
bool expected, valid;

expected = !(raw_cons & ring_size);
valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
if (valid == expected) {
rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
return true;
}
return false;
}
#endif
16 changes: 8 additions & 8 deletions drivers/net/bnxt/bnxt_ethdev.c
Expand Up @@ -3041,7 +3041,7 @@ bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt_cp_ring_info *cpr;
uint32_t desc = 0, raw_cons;
uint32_t desc = 0, raw_cons, cp_ring_size;
struct bnxt_rx_queue *rxq;
struct rx_pkt_cmpl *rxcmp;
int rc;
Expand All @@ -3053,14 +3053,15 @@ bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rxq = dev->data->rx_queues[rx_queue_id];
cpr = rxq->cp_ring;
raw_cons = cpr->cp_raw_cons;
cp_ring_size = cpr->cp_ring_struct->ring_size;

while (1) {
uint32_t agg_cnt, cons, cmpl_type;

cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];

if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
break;

cmpl_type = CMP_TYPE(rxcmp);
Expand Down Expand Up @@ -3104,7 +3105,7 @@ bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
struct bnxt_rx_queue *rxq = rx_queue;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
uint32_t desc, raw_cons;
uint32_t desc, raw_cons, cp_ring_size;
struct bnxt *bp = rxq->bp;
struct rx_pkt_cmpl *rxcmp;
int rc;
Expand All @@ -3118,6 +3119,7 @@ bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)

rxr = rxq->rx_ring;
cpr = rxq->cp_ring;
cp_ring_size = cpr->cp_ring_struct->ring_size;

/*
* For the vector receive case, the completion at the requested
Expand All @@ -3134,7 +3136,7 @@ bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];

if (CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
return RTE_ETH_RX_DESC_DONE;

/* Check whether rx desc has an mbuf attached. */
Expand All @@ -3160,7 +3162,7 @@ bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];

if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
break;

cmpl_type = CMP_TYPE(rxcmp);
Expand Down Expand Up @@ -3214,7 +3216,6 @@ bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
uint32_t ring_mask, raw_cons, nb_tx_pkts = 0;
struct bnxt_ring *cp_ring_struct;
struct cmpl_base *cp_desc_ring;
int rc;

Expand All @@ -3231,7 +3232,6 @@ bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)

raw_cons = cpr->cp_raw_cons;
cp_desc_ring = cpr->cp_desc_ring;
cp_ring_struct = cpr->cp_ring_struct;
ring_mask = cpr->cp_ring_struct->ring_mask;

/* Check to see if hw has posted a completion for the descriptor. */
Expand All @@ -3242,7 +3242,7 @@ bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];

if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
break;

if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
Expand Down
7 changes: 4 additions & 3 deletions drivers/net/bnxt/bnxt_irq.c
Expand Up @@ -21,10 +21,10 @@ void bnxt_int_handler(void *param)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
struct bnxt *bp = eth_dev->data->dev_private;
uint32_t cons, raw_cons, cp_ring_size;
struct bnxt_cp_ring_info *cpr;
struct cmpl_base *cmp;
uint32_t raw_cons;
uint32_t cons;


if (bp == NULL)
return;
Expand All @@ -33,6 +33,7 @@ void bnxt_int_handler(void *param)
return;

raw_cons = cpr->cp_raw_cons;
cp_ring_size = cpr->cp_ring_struct->ring_size;
pthread_mutex_lock(&bp->def_cp_lock);
while (1) {
if (!cpr || !cpr->cp_ring_struct || !cpr->cp_db.doorbell) {
Expand All @@ -48,7 +49,7 @@ void bnxt_int_handler(void *param)
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
cmp = &cpr->cp_desc_ring[cons];

if (!CMP_VALID(cmp, raw_cons, cpr->cp_ring_struct))
if (!bnxt_cpr_cmp_valid(cmp, raw_cons, cp_ring_size))
break;

bnxt_event_hwrm_resp_handler(bp, cmp);
Expand Down
9 changes: 6 additions & 3 deletions drivers/net/bnxt/bnxt_rxr.c
Expand Up @@ -242,7 +242,8 @@ static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
cpr->valid = FLIP_VALID(raw_cp_cons,
cpr->cp_ring_struct->ring_mask,
cpr->valid);
return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
return bnxt_cpr_cmp_valid(agg_cmpl, raw_cp_cons,
cpr->cp_ring_struct->ring_size);
}

/* TPA consume agg buffer out of order, allocate connected data only */
Expand Down Expand Up @@ -827,7 +828,8 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];

if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
if (!bnxt_cpr_cmp_valid(rxcmp1, tmp_raw_cons,
cpr->cp_ring_struct->ring_size))
return -EBUSY;

cpr->valid = FLIP_VALID(cp_cons,
Expand Down Expand Up @@ -1006,7 +1008,8 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];

if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons,
cpr->cp_ring_struct->ring_size))
break;
cpr->valid = FLIP_VALID(cons,
cpr->cp_ring_struct->ring_mask,
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/bnxt/bnxt_rxtx_vec_neon.c
Expand Up @@ -347,7 +347,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];

if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
break;

if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/bnxt/bnxt_rxtx_vec_sse.c
Expand Up @@ -329,7 +329,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];

if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
break;

if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/bnxt/bnxt_txr.c
Expand Up @@ -443,7 +443,7 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];

if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
break;

opaque = rte_le_to_cpu_32(txcmp->opaque);
Expand Down

0 comments on commit 5584a03

Please sign in to comment.