Skip to content

Commit

Permalink
net/mlx5: fix shared RSS action release
Browse files Browse the repository at this point in the history
As shared RSS action will be shared by multiple flows, the action
is created as global standalone action and managed only by the
relevant shared action management functions.

Currently, hrxqs will be created by shared RSS action or general
queue action. For hrxqs created by shared RSS action, they should
also only be released with shared RSS action. It's not correct to
release the shared RSS action hrxqs as general queue actions do
in flow destroy.

This commit adds a new fate action type for shared RSS action to
handle the shared RSS action hrxq release correctly.

Fixes: e1592b6 ("net/mlx5: make Rx queue thread safe")

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
  • Loading branch information
smou-mlnx authored and Ferruh Yigit committed Nov 13, 2020
1 parent c3ba8ec commit fabf8a3
Show file tree
Hide file tree
Showing 6 changed files with 86 additions and 54 deletions.
3 changes: 1 addition & 2 deletions drivers/net/mlx5/mlx5.h
Original file line number Diff line number Diff line change
Expand Up @@ -789,11 +789,11 @@ struct mlx5_flow_rss_desc {
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
uint32_t key_len; /**< RSS hash key len. */
uint32_t tunnel; /**< Queue in tunnel. */
uint32_t shared_rss; /**< Shared RSS index. */
union {
uint16_t *queue; /**< Destination queues. */
const uint16_t *const_q; /**< Const pointer convert. */
};
bool standalone; /**< Queue is standalone or not. */
};

#define MLX5_PROC_PRIV(port_id) \
Expand Down Expand Up @@ -836,7 +836,6 @@ struct mlx5_ind_table_obj {
__extension__
struct mlx5_hrxq {
struct mlx5_cache_entry entry; /* Cache entry. */
uint32_t refcnt; /* Reference counter. */
uint32_t standalone:1; /* This object used in shared action. */
struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
RTE_STD_C11
Expand Down
7 changes: 6 additions & 1 deletion drivers/net/mlx5/mlx5_flow.c
Original file line number Diff line number Diff line change
Expand Up @@ -5597,7 +5597,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
buf->entries = 1;
buf->entry[0].pattern = (void *)(uintptr_t)items;
}
flow->shared_rss = flow_get_shared_rss_action(dev, shared_actions,
rss_desc->shared_rss = flow_get_shared_rss_action(dev, shared_actions,
shared_actions_n);
/*
* Record the start index when there is a nested call. All sub-flows
Expand Down Expand Up @@ -5708,6 +5708,11 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
ret = rte_errno; /* Save rte_errno before cleanup. */
flow_mreg_del_copy_action(dev, flow);
flow_drv_destroy(dev, flow);
if (rss_desc->shared_rss)
__atomic_sub_fetch(&((struct mlx5_shared_action_rss *)
mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
rte_errno = ret; /* Restore rte_errno. */
ret = rte_errno;
Expand Down
4 changes: 3 additions & 1 deletion drivers/net/mlx5/mlx5_flow.h
Original file line number Diff line number Diff line change
Expand Up @@ -388,6 +388,7 @@ enum mlx5_flow_fate_type {
MLX5_FLOW_FATE_PORT_ID,
MLX5_FLOW_FATE_DROP,
MLX5_FLOW_FATE_DEFAULT_MISS,
MLX5_FLOW_FATE_SHARED_RSS,
MLX5_FLOW_FATE_MAX,
};

Expand Down Expand Up @@ -651,6 +652,8 @@ struct mlx5_flow_handle {
/**< Generic value indicates the fate action. */
uint32_t rix_default_fate;
/**< Indicates default miss fate action. */
uint32_t rix_srss;
/**< Indicates shared RSS fate action. */
};
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_handle_dv dvh;
Expand Down Expand Up @@ -1027,7 +1030,6 @@ flow_items_to_tunnel(const struct rte_flow_item items[])
/* Flow structure. */
struct rte_flow {
ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */
uint32_t shared_rss; /** < Shared RSS action ID. */
uint32_t dev_handles;
/**< Device flow handles that are part of the flow. */
uint32_t drv_type:2; /**< Driver type. */
Expand Down
88 changes: 58 additions & 30 deletions drivers/net/mlx5/mlx5_flow_dv.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
static int
flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
uint32_t port_id);
static void
flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);

/**
* Initialize flow attributes structure according to flow items' types.
Expand Down Expand Up @@ -8558,7 +8560,7 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
rss_desc->hash_fields = dev_flow->hash_fields;
rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
rss_desc->standalone = false;
rss_desc->shared_rss = 0;
*hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
if (!*hrxq_idx)
return NULL;
Expand Down Expand Up @@ -9780,7 +9782,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
* when expanding items for RSS.
*/
action_flags |= MLX5_FLOW_ACTION_RSS;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
dev_flow->handle->fate_action = rss_desc->shared_rss ?
MLX5_FLOW_FATE_SHARED_RSS :
MLX5_FLOW_FATE_QUEUE;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
flow->age = (uint32_t)(uintptr_t)(action->conf);
Expand Down Expand Up @@ -10574,40 +10578,34 @@ __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] flow
* Shred RSS action holding hash RX queue objects.
* @param[in] dev_flow
* Pointer to the sub flow.
* @param[in] rss_desc
* Pointer to the RSS descriptor.
* @param[out] hrxq
* Pointer to retrieved hash RX queue object.
*
* @return
* Valid hash RX queue index, otherwise 0 and rte_errno is set.
*/
static uint32_t
__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
struct mlx5_flow *dev_flow,
struct mlx5_hrxq **hrxq)
__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
struct mlx5_flow_rss_desc *rss_desc,
struct mlx5_hrxq **hrxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
uint32_t hrxq_idx;

if (flow->shared_rss) {
if (rss_desc->shared_rss) {
hrxq_idx = __flow_dv_action_rss_hrxq_lookup
(dev, flow->shared_rss, dev_flow->hash_fields,
(dev, rss_desc->shared_rss,
dev_flow->hash_fields,
!!(dev_flow->handle->layers &
MLX5_FLOW_LAYER_TUNNEL));
if (hrxq_idx) {
if (hrxq_idx)
*hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
hrxq_idx);
__atomic_fetch_add(&(*hrxq)->refcnt, 1,
__ATOMIC_RELAXED);
}
} else {
struct mlx5_flow_rss_desc *rss_desc =
&wks->rss_desc[!!wks->flow_nested_idx];

*hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
&hrxq_idx);
}
Expand Down Expand Up @@ -10642,8 +10640,15 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
int err;
int idx;
struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
struct mlx5_flow_rss_desc *rss_desc =
&wks->rss_desc[!!wks->flow_nested_idx];

MLX5_ASSERT(wks);
if (rss_desc->shared_rss) {
dh = wks->flows[wks->flow_idx - 1].handle;
MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
dh->rix_srss = rss_desc->shared_rss;
}
for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
dev_flow = &wks->flows[idx];
dv = &dev_flow->dv;
Expand All @@ -10658,19 +10663,21 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
dv->actions[n++] =
priv->drop_queue.hrxq->action;
}
} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
!dv_h->rix_sample && !dv_h->rix_dest_array) {
} else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
!dv_h->rix_sample && !dv_h->rix_dest_array) ||
(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) {
struct mlx5_hrxq *hrxq = NULL;
uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
(dev, flow, dev_flow, &hrxq);
(dev, dev_flow, rss_desc, &hrxq);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot get hash queue");
goto error;
}
dh->rix_hrxq = hrxq_idx;
if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
dh->rix_hrxq = hrxq_idx;
dv->actions[n++] = hrxq->action;
} else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
if (!priv->sh->default_miss_action) {
Expand Down Expand Up @@ -10716,6 +10723,8 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
if (rss_desc->shared_rss)
wks->flows[wks->flow_idx - 1].handle->rix_srss = 0;
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
Expand Down Expand Up @@ -10900,6 +10909,25 @@ flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
&cache->entry);
}

/**
* Release shared RSS action resource.
*
* @param dev
* Pointer to Ethernet device.
* @param srss
* Shared RSS action index.
*/
static void
flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_shared_action_rss *shared_rss;

shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
__atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
}

void
flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
struct mlx5_cache_entry *entry)
Expand Down Expand Up @@ -10964,6 +10992,9 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev,
flow_dv_port_id_action_resource_release(dev,
handle->rix_port_id_action);
break;
case MLX5_FLOW_FATE_SHARED_RSS:
flow_dv_shared_rss_action_release(dev, handle->rix_srss);
break;
default:
DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
break;
Expand Down Expand Up @@ -11130,13 +11161,6 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
if (!flow)
return;
flow_dv_remove(dev, flow);
if (flow->shared_rss) {
struct mlx5_shared_action_rss *shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
flow->shared_rss);

__atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
}
if (flow->counter) {
flow_dv_counter_free(dev, flow->counter);
flow->counter = 0;
Expand Down Expand Up @@ -11241,6 +11265,8 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] action_idx
* Shared RSS action ipool index.
* @param[in, out] action
* Partially initialized shared RSS action.
* @param[out] error
Expand All @@ -11252,6 +11278,7 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
*/
static int
__flow_dv_action_rss_setup(struct rte_eth_dev *dev,
uint32_t action_idx,
struct mlx5_shared_action_rss *action,
struct rte_flow_error *error)
{
Expand All @@ -11263,7 +11290,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
rss_desc.const_q = action->origin.queue;
rss_desc.queue_num = action->origin.queue_num;
rss_desc.standalone = true;
/* Set non-zero value to indicate a shared RSS. */
rss_desc.shared_rss = action_idx;
for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
uint32_t hrxq_idx;
uint64_t hash_fields = mlx5_rss_hash_fields[i];
Expand Down Expand Up @@ -11355,7 +11383,7 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
memcpy(shared_action->queue, rss->queue, queue_size);
origin->queue = shared_action->queue;
origin->queue_num = rss->queue_num;
if (__flow_dv_action_rss_setup(dev, shared_action, error))
if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
goto error_rss_init;
__atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
rte_spinlock_lock(&priv->shared_act_sl);
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/mlx5/mlx5_flow_verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1972,7 +1972,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
rss_desc->hash_fields = dev_flow->hash_fields;
rss_desc->tunnel = !!(handle->layers &
MLX5_FLOW_LAYER_TUNNEL);
rss_desc->standalone = false;
rss_desc->shared_rss = 0;
hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
hrxq_idx);
Expand Down
36 changes: 17 additions & 19 deletions drivers/net/mlx5/mlx5_rxq.c
Original file line number Diff line number Diff line change
Expand Up @@ -2211,25 +2211,27 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,
struct mlx5_priv *priv = dev->data->dev_private;
const uint8_t *rss_key = rss_desc->key;
uint32_t rss_key_len = rss_desc->key_len;
bool standalone = !!rss_desc->shared_rss;
const uint16_t *queues =
rss_desc->standalone ? rss_desc->const_q : rss_desc->queue;
standalone ? rss_desc->const_q : rss_desc->queue;
uint32_t queues_n = rss_desc->queue_num;
struct mlx5_hrxq *hrxq = NULL;
uint32_t hrxq_idx = 0;
struct mlx5_ind_table_obj *ind_tbl;
int ret;

queues_n = rss_desc->hash_fields ? queues_n : 1;
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
ind_tbl = standalone ? NULL :
mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
rss_desc->standalone);
standalone);
if (!ind_tbl)
return NULL;
hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
if (!hrxq)
goto error;
hrxq->standalone = rss_desc->standalone;
hrxq->standalone = standalone;
hrxq->idx = hrxq_idx;
hrxq->ind_table = ind_tbl;
hrxq->rss_key_len = rss_key_len;
Expand All @@ -2240,7 +2242,7 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,
goto error;
return hrxq;
error:
mlx5_ind_table_obj_release(dev, ind_tbl, rss_desc->standalone);
mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
return NULL;
Expand Down Expand Up @@ -2294,7 +2296,7 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
.data = rss_desc,
};

if (rss_desc->standalone) {
if (rss_desc->shared_rss) {
hrxq = __mlx5_hrxq_create(dev, rss_desc);
} else {
entry = mlx5_cache_register(&priv->hrxqs, &ctx);
Expand Down Expand Up @@ -2346,11 +2348,8 @@ mlx5_drop_action_create(struct rte_eth_dev *dev)
struct mlx5_hrxq *hrxq = NULL;
int ret;

if (priv->drop_queue.hrxq) {
__atomic_fetch_add(&priv->drop_queue.hrxq->refcnt, 1,
__ATOMIC_RELAXED);
if (priv->drop_queue.hrxq)
return priv->drop_queue.hrxq;
}
hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
if (!hrxq) {
DRV_LOG(WARNING,
Expand All @@ -2369,7 +2368,6 @@ mlx5_drop_action_create(struct rte_eth_dev *dev)
ret = priv->obj_ops.drop_action_create(dev);
if (ret < 0)
goto error;
__atomic_store_n(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
return hrxq;
error:
if (hrxq) {
Expand All @@ -2393,14 +2391,14 @@ mlx5_drop_action_destroy(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;

if (__atomic_sub_fetch(&hrxq->refcnt, 1, __ATOMIC_RELAXED) == 0) {
priv->obj_ops.drop_action_destroy(dev);
mlx5_free(priv->drop_queue.rxq);
mlx5_free(hrxq->ind_table);
mlx5_free(hrxq);
priv->drop_queue.rxq = NULL;
priv->drop_queue.hrxq = NULL;
}
if (!priv->drop_queue.hrxq)
return;
priv->obj_ops.drop_action_destroy(dev);
mlx5_free(priv->drop_queue.rxq);
mlx5_free(hrxq->ind_table);
mlx5_free(hrxq);
priv->drop_queue.rxq = NULL;
priv->drop_queue.hrxq = NULL;
}

/**
Expand Down

0 comments on commit fabf8a3

Please sign in to comment.