Skip to content

Commit

Permalink
net/mlx5: fix shared RSS and mark actions combination
Browse files Browse the repository at this point in the history
[ upstream commit 8e61555 ]

In order to allow mbuf mark ID update in Rx data-path, there is a
mechanism in the PMD to enable it according to the rte_flows.
When a flow with mark ID and RSS/QUEUE action exists, all the relevant
Rx queues will be enabled to report the mark ID.

When shared RSS action is combined with mark action, the PMD mechanism
misses the Rx queues updates.

This commit handles the shared RSS case in the mechanism too.

Fixes: e1592b6 ("net/mlx5: make Rx queue thread safe")

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
  • Loading branch information
smou-mlnx authored and bluca committed Feb 2, 2021
1 parent 8e9f688 commit 1102e4b
Show file tree
Hide file tree
Showing 2 changed files with 75 additions and 72 deletions.
52 changes: 38 additions & 14 deletions drivers/net/mlx5/mlx5_flow.c
Original file line number Diff line number Diff line change
Expand Up @@ -1002,17 +1002,29 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
struct mlx5_priv *priv = dev->data->dev_private;
const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
struct mlx5_hrxq *hrxq;
struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;

if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
return;
hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
struct mlx5_hrxq *hrxq;

hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
dev_handle->rix_hrxq);
if (!hrxq)
if (hrxq)
ind_tbl = hrxq->ind_table;
} else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
struct mlx5_shared_action_rss *shared_rss;

shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
dev_handle->rix_srss);
if (shared_rss)
ind_tbl = shared_rss->ind_tbl;
}
if (!ind_tbl)
return;
for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
int idx = hrxq->ind_table->queues[i];
for (i = 0; i != ind_tbl->queues_n; ++i) {
int idx = ind_tbl->queues[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
Expand Down Expand Up @@ -1084,18 +1096,30 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
struct mlx5_priv *priv = dev->data->dev_private;
const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
struct mlx5_hrxq *hrxq;
struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;

if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
return;
hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
struct mlx5_hrxq *hrxq;

hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
dev_handle->rix_hrxq);
if (!hrxq)
if (hrxq)
ind_tbl = hrxq->ind_table;
} else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
struct mlx5_shared_action_rss *shared_rss;

shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
dev_handle->rix_srss);
if (shared_rss)
ind_tbl = shared_rss->ind_tbl;
}
if (!ind_tbl)
return;
MLX5_ASSERT(dev->data->dev_started);
for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
int idx = hrxq->ind_table->queues[i];
for (i = 0; i != ind_tbl->queues_n; ++i) {
int idx = ind_tbl->queues[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
Expand Down
95 changes: 37 additions & 58 deletions drivers/net/mlx5/mlx5_flow_dv.c
Original file line number Diff line number Diff line change
Expand Up @@ -10652,47 +10652,6 @@ __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
}
}

/**
* Retrieves hash RX queue suitable for the *flow*.
* If shared action configured for *flow* suitable hash RX queue will be
* retrieved from attached shared action.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] dev_flow
* Pointer to the sub flow.
* @param[in] rss_desc
* Pointer to the RSS descriptor.
* @param[out] hrxq
* Pointer to retrieved hash RX queue object.
*
* @return
* Valid hash RX queue index, otherwise 0 and rte_errno is set.
*/
static uint32_t
__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
struct mlx5_flow_rss_desc *rss_desc,
struct mlx5_hrxq **hrxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t hrxq_idx;

if (rss_desc->shared_rss) {
hrxq_idx = __flow_dv_action_rss_hrxq_lookup
(dev, rss_desc->shared_rss,
dev_flow->hash_fields,
!!(dev_flow->handle->layers &
MLX5_FLOW_LAYER_TUNNEL));
if (hrxq_idx)
*hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
hrxq_idx);
} else {
*hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
&hrxq_idx);
}
return hrxq_idx;
}

/**
* Apply the flow to the NIC, lock free,
* (mutex should be acquired by caller).
Expand Down Expand Up @@ -10724,11 +10683,6 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;

MLX5_ASSERT(wks);
if (rss_desc->shared_rss) {
dh = wks->flows[wks->flow_idx - 1].handle;
MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
dh->rix_srss = rss_desc->shared_rss;
}
for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
dev_flow = &wks->flows[idx];
dv = &dev_flow->dv;
Expand All @@ -10744,20 +10698,42 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
priv->drop_queue.hrxq->action;
}
} else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
!dv_h->rix_sample && !dv_h->rix_dest_array) ||
(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) {
!dv_h->rix_sample && !dv_h->rix_dest_array)) {
struct mlx5_hrxq *hrxq;
uint32_t hrxq_idx;

hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
&hrxq_idx);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot get hash queue");
goto error;
}
dh->rix_hrxq = hrxq_idx;
dv->actions[n++] = hrxq->action;
} else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
struct mlx5_hrxq *hrxq = NULL;
uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
(dev, dev_flow, rss_desc, &hrxq);
uint32_t hrxq_idx;

hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
rss_desc->shared_rss,
dev_flow->hash_fields,
!!(dh->layers &
MLX5_FLOW_LAYER_TUNNEL));
if (hrxq_idx)
hrxq = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_HRXQ],
hrxq_idx);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot get hash queue");
goto error;
}
if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
dh->rix_hrxq = hrxq_idx;
dh->rix_srss = rss_desc->shared_rss;
dv->actions[n++] = hrxq->action;
} else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
if (!priv->sh->default_miss_action) {
Expand Down Expand Up @@ -10799,12 +10775,12 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
mlx5_hrxq_release(dev, dh->rix_hrxq);
dh->rix_hrxq = 0;
} else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
dh->rix_srss = 0;
}
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
if (rss_desc->shared_rss)
wks->flows[wks->flow_idx - 1].handle->rix_srss = 0;
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
Expand Down Expand Up @@ -11072,9 +11048,6 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev,
flow_dv_port_id_action_resource_release(dev,
handle->rix_port_id_action);
break;
case MLX5_FLOW_FATE_SHARED_RSS:
flow_dv_shared_rss_action_release(dev, handle->rix_srss);
break;
default:
DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
break;
Expand Down Expand Up @@ -11237,6 +11210,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_flow_handle *dev_handle;
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t srss = 0;

if (!flow)
return;
Expand Down Expand Up @@ -11281,10 +11255,15 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
if (dev_handle->dvh.rix_tag)
flow_dv_tag_release(dev,
dev_handle->dvh.rix_tag);
flow_dv_fate_resource_release(dev, dev_handle);
if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
flow_dv_fate_resource_release(dev, dev_handle);
else if (!srss)
srss = dev_handle->rix_srss;
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
tmp_idx);
}
if (srss)
flow_dv_shared_rss_action_release(dev, srss);
}

/**
Expand Down

0 comments on commit 1102e4b

Please sign in to comment.