Skip to content

Commit

Permalink
net/mlx5: add reference counter on DPDK Tx queues
Browse files Browse the repository at this point in the history
Use the same design for DPDK queue as for Verbs queue for symmetry, this
also helps in fixing some issues like the DPDK release queue API which
is not expected to fail.  With such design, the queue is released when
the reference counters reaches 0.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
  • Loading branch information
Nélio Laranjeiro authored and Ferruh Yigit committed Oct 12, 2017
1 parent faf2667 commit 6e78005
Show file tree
Hide file tree
Showing 6 changed files with 383 additions and 241 deletions.
16 changes: 5 additions & 11 deletions drivers/net/mlx5/mlx5.c
Original file line number Diff line number Diff line change
Expand Up @@ -225,17 +225,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (priv->txqs != NULL) {
/* XXX race condition if mlx5_tx_burst() is still running. */
usleep(1000);
for (i = 0; (i != priv->txqs_n); ++i) {
struct mlx5_txq_data *txq = (*priv->txqs)[i];
struct mlx5_txq_ctrl *txq_ctrl;

if (txq == NULL)
continue;
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
(*priv->txqs)[i] = NULL;
mlx5_txq_cleanup(txq_ctrl);
rte_free(txq_ctrl);
}
for (i = 0; (i != priv->txqs_n); ++i)
mlx5_priv_txq_release(priv, i);
priv->txqs_n = 0;
priv->txqs = NULL;
}
Expand All @@ -259,6 +250,9 @@ mlx5_dev_close(struct rte_eth_dev *dev)
ret = mlx5_priv_txq_ibv_verify(priv);
if (ret)
WARN("%p: some Verbs Tx queue still remain", (void *)priv);
ret = mlx5_priv_txq_verify(priv);
if (ret)
WARN("%p: some Tx Queues still remain", (void *)priv);
ret = priv_flow_verify(priv);
if (ret)
WARN("%p: some flows still remain", (void *)priv);
Expand Down
1 change: 1 addition & 0 deletions drivers/net/mlx5/mlx5.h
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ struct priv {
TAILQ_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */
LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */
LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */
uint32_t link_speed_capa; /* Link speed capabilities. */
struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
Expand Down
73 changes: 47 additions & 26 deletions drivers/net/mlx5/mlx5_mr.c
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,8 @@ static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
*
* This function should only be called by txq_mp2mr().
*
* @param priv
* Pointer to private structure.
* @param txq
* Pointer to TX queue structure.
* @param[in] mp
Expand All @@ -128,8 +130,8 @@ static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
* mr on success, NULL on failure.
*/
struct mlx5_mr*
mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
unsigned int idx)
priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq,
struct rte_mempool *mp, unsigned int idx)
{
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
Expand All @@ -138,9 +140,9 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
/* Add a new entry, register MR first. */
DEBUG("%p: discovered new memory pool \"%s\" (%p)",
(void *)txq_ctrl, mp->name, (void *)mp);
mr = priv_mr_get(txq_ctrl->priv, mp);
mr = priv_mr_get(priv, mp);
if (mr == NULL)
mr = priv_mr_new(txq_ctrl->priv, mp);
mr = priv_mr_new(priv, mp);
if (unlikely(mr == NULL)) {
DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
(void *)txq_ctrl);
Expand All @@ -151,7 +153,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
(void *)txq_ctrl);
--idx;
priv_mr_release(txq_ctrl->priv, txq->mp2mr[0]);
priv_mr_release(priv, txq->mp2mr[0]);
memmove(&txq->mp2mr[0], &txq->mp2mr[1],
(sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
}
Expand All @@ -163,7 +165,37 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
return mr;
}

struct txq_mp2mr_mbuf_check_data {
/**
* Register a Memory Region (MR) <-> Memory Pool (MP) association in
* txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
*
* This function should only be called by txq_mp2mr().
*
* @param txq
* Pointer to TX queue structure.
* @param[in] mp
* Memory Pool for which a Memory Region lkey must be returned.
* @param idx
* Index of the next available entry.
*
* @return
* mr on success, NULL on failure.
*/
struct mlx5_mr*
mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
unsigned int idx)
{
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
struct mlx5_mr *mr;

priv_lock(txq_ctrl->priv);
mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx);
priv_unlock(txq_ctrl->priv);
return mr;
}

struct mlx5_mp2mr_mbuf_check_data {
int ret;
};

Expand All @@ -185,7 +217,7 @@ static void
txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
uint32_t index __rte_unused)
{
struct txq_mp2mr_mbuf_check_data *data = arg;
struct mlx5_mp2mr_mbuf_check_data *data = arg;
struct rte_mbuf *buf = obj;

/*
Expand All @@ -206,35 +238,24 @@ txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
* Pointer to TX queue structure.
*/
void
mlx5_txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
{
struct mlx5_txq_ctrl *txq_ctrl = arg;
struct txq_mp2mr_mbuf_check_data data = {
struct priv *priv = (struct priv *)arg;
struct mlx5_mp2mr_mbuf_check_data data = {
.ret = 0,
};
uintptr_t start;
uintptr_t end;
unsigned int i;
struct mlx5_mr *mr;

/* Register mempool only if the first element looks like a mbuf. */
if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
data.ret == -1)
return;
if (mlx5_check_mempool(mp, &start, &end) != 0) {
ERROR("mempool %p: not virtually contiguous",
(void *)mp);
mr = priv_mr_get(priv, mp);
if (mr) {
priv_mr_release(priv, mr);
return;
}
for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
if (unlikely(txq_ctrl->txq.mp2mr[i] == NULL)) {
/* Unknown MP, add a new MR for it. */
break;
}
if (start >= (uintptr_t)txq_ctrl->txq.mp2mr[i]->start &&
end <= (uintptr_t)txq_ctrl->txq.mp2mr[i]->end)
return;
}
mlx5_txq_mp2mr_reg(&txq_ctrl->txq, mp, i);
priv_mr_new(priv, mp);
}

/**
Expand Down
17 changes: 13 additions & 4 deletions drivers/net/mlx5/mlx5_rxtx.h
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,8 @@ struct mlx5_txq_ibv {

/* TX queue control descriptor. */
struct mlx5_txq_ctrl {
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
struct priv *priv; /* Back pointer to private data. */
unsigned int socket; /* CPU socket ID for allocations. */
unsigned int max_inline_data; /* Max inline data. */
Expand Down Expand Up @@ -336,9 +338,6 @@ int mlx5_priv_rxq_ibv_verify(struct priv *);

/* mlx5_txq.c */

void mlx5_txq_cleanup(struct mlx5_txq_ctrl *);
int mlx5_txq_ctrl_setup(struct rte_eth_dev *, struct mlx5_txq_ctrl *, uint16_t,
unsigned int, const struct rte_eth_txconf *);
int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
const struct rte_eth_txconf *);
void mlx5_tx_queue_release(void *);
Expand All @@ -348,6 +347,14 @@ struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *, uint16_t);
int mlx5_priv_txq_ibv_release(struct priv *, struct mlx5_txq_ibv *);
int mlx5_priv_txq_ibv_releasable(struct priv *, struct mlx5_txq_ibv *);
int mlx5_priv_txq_ibv_verify(struct priv *);
struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *, uint16_t,
uint16_t, unsigned int,
const struct rte_eth_txconf *);
struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *, uint16_t);
int mlx5_priv_txq_release(struct priv *, uint16_t);
int mlx5_priv_txq_releasable(struct priv *, uint16_t);
int mlx5_priv_txq_verify(struct priv *);
void txq_alloc_elts(struct mlx5_txq_ctrl *);

/* mlx5_rxtx.c */

Expand Down Expand Up @@ -375,7 +382,9 @@ uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t);

/* mlx5_mr.c */

void mlx5_txq_mp2mr_iter(struct rte_mempool *, void *);
void mlx5_mp2mr_iter(struct rte_mempool *, void *);
struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *,
struct rte_mempool *, unsigned int);
struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *, struct rte_mempool *,
unsigned int);

Expand Down
57 changes: 56 additions & 1 deletion drivers/net/mlx5/mlx5_trigger.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,44 @@
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"

static void
priv_txq_stop(struct priv *priv)
{
unsigned int i;

for (i = 0; i != priv->txqs_n; ++i)
mlx5_priv_txq_release(priv, i);
}

static int
priv_txq_start(struct priv *priv)
{
unsigned int i;
int ret = 0;

/* Add memory regions to Tx queues. */
for (i = 0; i != priv->txqs_n; ++i) {
unsigned int idx = 0;
struct mlx5_mr *mr;
struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i);

if (!txq_ctrl)
continue;
LIST_FOREACH(mr, &priv->mr, next)
priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++);
txq_alloc_elts(txq_ctrl);
txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i);
if (!txq_ctrl->ibv) {
ret = ENOMEM;
goto error;
}
}
return -ret;
error:
priv_txq_stop(priv);
return -ret;
}

/**
* DPDK callback to start the device.
*
Expand All @@ -56,16 +94,25 @@ int
mlx5_dev_start(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr = NULL;
int err;

if (mlx5_is_secondary())
return -E_RTE_SECONDARY;

priv_lock(priv);
/* Update Rx/Tx callback. */
priv_dev_select_tx_function(priv, dev);
priv_dev_select_rx_function(priv, dev);
DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
rte_mempool_walk(mlx5_mp2mr_iter, priv);
err = priv_txq_start(priv);
if (err) {
ERROR("%p: TXQ allocation failed: %s",
(void *)dev, strerror(err));
goto error;
}
/* Update send callback. */
priv_dev_select_tx_function(priv, dev);
err = priv_create_hash_rxqs(priv);
if (!err)
err = priv_rehash_flows(priv);
Expand Down Expand Up @@ -94,10 +141,13 @@ mlx5_dev_start(struct rte_eth_dev *dev)
return 0;
error:
/* Rollback. */
LIST_FOREACH(mr, &priv->mr, next)
priv_mr_release(priv, mr);
priv_special_flow_disable_all(priv);
priv_mac_addrs_disable(priv);
priv_destroy_hash_rxqs(priv);
priv_flow_stop(priv);
priv_txq_stop(priv);
priv_unlock(priv);
return -err;
}
Expand All @@ -114,6 +164,7 @@ void
mlx5_dev_stop(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;

if (mlx5_is_secondary())
return;
Expand All @@ -131,6 +182,10 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
priv_destroy_hash_rxqs(priv);
priv_flow_stop(priv);
priv_rx_intr_vec_disable(priv);
priv_txq_stop(priv);
LIST_FOREACH(mr, &priv->mr, next) {
priv_mr_release(priv, mr);
}
priv_dev_interrupt_handler_uninstall(priv, dev);
priv_unlock(priv);
}
Loading

0 comments on commit 6e78005

Please sign in to comment.