Skip to content

Commit

Permalink
net/mlx5: fix mark enabling for Rx
Browse files Browse the repository at this point in the history
[ upstream commit 082becb ]

To optimize datapath, the mlx5 pmd checked for mark action on flow
creation, and flagged possible destination rxqs (through queue/RSS
actions), then it enabled the mark action logic only for flagged rxqs.

Mark action didn't work if no queue/rss action was in the same flow,
even when the user use multi-group logic to manage the flows.
So, if mark action is performed in group X and the packet is moved to
group Y > X when the packet is forwarded to Rx queues, SW did not get
the mark ID to the mbuf.

Flag Rx datapath to report mark action for any queue when the driver
detects the first mark action after dev_start operation.

Fixes: 8e61555 ("net/mlx5: fix shared RSS and mark actions combination")

Signed-off-by: Raja Zidane <rzidane@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
  • Loading branch information
RajaZid20 authored and kevintraynor committed Feb 21, 2022
1 parent d157628 commit 99f5cd0
Show file tree
Hide file tree
Showing 6 changed files with 41 additions and 34 deletions.
1 change: 1 addition & 0 deletions drivers/net/mlx5/mlx5.h
Expand Up @@ -1413,6 +1413,7 @@ struct mlx5_priv {
unsigned int mtr_en:1; /* Whether support meter. */
unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
unsigned int lb_used:1; /* Loopback queue is referred to. */
uint32_t mark_enabled:1; /* If mark action is enabled on rxqs. */
uint16_t domain_id; /* Switch domain identifier. */
uint16_t vport_id; /* Associated VF vport index (if any). */
uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
Expand Down
53 changes: 28 additions & 25 deletions drivers/net/mlx5/mlx5_flow.c
Expand Up @@ -1234,7 +1234,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
Expand Down Expand Up @@ -1269,15 +1268,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
* this must be always enabled (metadata may arive
* from other port - not from local flows only.
*/
if (priv->config.dv_flow_en &&
priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
mlx5_flow_ext_mreg_supported(dev)) {
rxq_ctrl->rxq.mark = 1;
rxq_ctrl->flow_mark_n = 1;
} else if (mark) {
rxq_ctrl->rxq.mark = 1;
rxq_ctrl->flow_mark_n++;
}
if (tunnel) {
unsigned int j;

Expand All @@ -1295,6 +1285,20 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
}
}

static void
flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;

if (priv->mark_enabled)
return;
LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
rxq_ctrl->rxq.mark = 1;
}
priv->mark_enabled = 1;
}

/**
* Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
*
Expand All @@ -1309,7 +1313,11 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t handle_idx;
struct mlx5_flow_handle *dev_handle;
struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();

MLX5_ASSERT(wks);
if (wks->mark)
flow_rxq_mark_flag_set(dev);
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dev_handle, next)
flow_drv_rxq_flags_set(dev, dev_handle);
Expand All @@ -1329,7 +1337,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
Expand Down Expand Up @@ -1360,15 +1367,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
MLX5_ASSERT(rxq_ctrl != NULL);
if (rxq_ctrl == NULL)
continue;
if (priv->config.dv_flow_en &&
priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
mlx5_flow_ext_mreg_supported(dev)) {
rxq_ctrl->rxq.mark = 1;
rxq_ctrl->flow_mark_n = 1;
} else if (mark) {
rxq_ctrl->flow_mark_n--;
rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
}
if (tunnel) {
unsigned int j;

Expand Down Expand Up @@ -1425,12 +1423,12 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev)

if (rxq == NULL || rxq->ctrl == NULL)
continue;
rxq->ctrl->flow_mark_n = 0;
rxq->ctrl->rxq.mark = 0;
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
rxq->ctrl->flow_tunnels_n[j] = 0;
rxq->ctrl->rxq.tunnel = 0;
}
priv->mark_enabled = 0;
}

/**
Expand Down Expand Up @@ -4811,6 +4809,7 @@ flow_create_split_inner(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct mlx5_flow *dev_flow;
struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();

dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
flow_split_info->flow_idx, error);
Expand All @@ -4829,8 +4828,10 @@ flow_create_split_inner(struct rte_eth_dev *dev,
*/
if (flow_split_info->prefix_layers)
dev_flow->handle->layers = flow_split_info->prefix_layers;
if (flow_split_info->prefix_mark)
dev_flow->handle->mark = 1;
if (flow_split_info->prefix_mark) {
MLX5_ASSERT(wks);
wks->mark = 1;
}
if (sub_flow)
*sub_flow = dev_flow;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
Expand Down Expand Up @@ -6143,7 +6144,7 @@ flow_create_split_meter(struct rte_eth_dev *dev,
MLX5_FLOW_TABLE_LEVEL_METER;
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
flow_split_info->prefix_mark |= dev_flow->handle->mark;
flow_split_info->prefix_mark |= wks->mark;
flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX;
}
/* Add the prefix subflow. */
Expand Down Expand Up @@ -6209,6 +6210,7 @@ flow_create_split_sample(struct rte_eth_dev *dev,
struct mlx5_flow_dv_sample_resource *sample_res;
struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
struct mlx5_flow_tbl_resource *sfx_tbl;
struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
#endif
size_t act_size;
size_t item_size;
Expand Down Expand Up @@ -6295,7 +6297,8 @@ flow_create_split_sample(struct rte_eth_dev *dev,
}
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
flow_split_info->prefix_mark |= dev_flow->handle->mark;
MLX5_ASSERT(wks);
flow_split_info->prefix_mark |= wks->mark;
/* Suffix group level already be scaled with factor, set
* MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
* again in translation.
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/mlx5/mlx5_flow.h
Expand Up @@ -697,7 +697,6 @@ struct mlx5_flow_handle {
void *drv_flow; /**< pointer to driver flow object. */
uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
uint32_t mark:1; /**< Metadata rxq mark flag. */
uint32_t fate_action:3; /**< Fate action type. */
uint32_t flex_item; /**< referenced Flex Item bitmask. */
union {
Expand Down Expand Up @@ -1108,6 +1107,7 @@ struct mlx5_flow_workspace {
/* The final policy when meter policy is hierarchy. */
uint32_t skip_matcher_reg:1;
/* Indicates if need to skip matcher register in translate. */
uint32_t mark:1; /* Indicates if flow contains mark action. */
};

struct mlx5_flow_split_info {
Expand Down
14 changes: 9 additions & 5 deletions drivers/net/mlx5/mlx5_flow_dv.c
Expand Up @@ -11646,7 +11646,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
(((const struct rte_flow_action_mark *)
(sub_actions->conf))->id);

dev_flow->handle->mark = 1;
wks->mark = 1;
pre_rix = dev_flow->handle->dvh.rix_tag;
/* Save the mark resource before sample */
pre_r = dev_flow->dv.tag_resource;
Expand Down Expand Up @@ -12806,7 +12806,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
action_flags |= MLX5_FLOW_ACTION_FLAG;
dev_flow->handle->mark = 1;
wks->mark = 1;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
struct rte_flow_action_mark mark = {
.id = MLX5_FLOW_MARK_DEFAULT,
Expand Down Expand Up @@ -12835,7 +12835,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action_flags |= MLX5_FLOW_ACTION_MARK;
dev_flow->handle->mark = 1;
wks->mark = 1;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
Expand Down Expand Up @@ -15403,7 +15403,9 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
(MLX5_MAX_MODIFY_NUM + 1)];
} mhdr_dummy;
struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();

MLX5_ASSERT(wks);
egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
memset(&dh, 0, sizeof(struct mlx5_flow_handle));
Expand Down Expand Up @@ -15441,7 +15443,7 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
NULL,
"cannot create policy "
"mark action for this color");
dev_flow.handle->mark = 1;
wks->mark = 1;
if (flow_dv_tag_resource_register(dev, tag_be,
&dev_flow, &flow_err))
return -rte_mtr_error_set(error,
Expand Down Expand Up @@ -16866,7 +16868,9 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
struct mlx5_meter_policy_action_container *act_cnt;
uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
uint16_t sub_policy_num;
struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();

MLX5_ASSERT(wks);
rte_spinlock_lock(&mtr_policy->sl);
for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
if (!rss_desc[i])
Expand Down Expand Up @@ -16940,7 +16944,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
if (act_cnt->rix_mark || act_cnt->modify_hdr) {
memset(&dh, 0, sizeof(struct mlx5_flow_handle));
if (act_cnt->rix_mark)
dh.mark = 1;
wks->mark = 1;
dh.fate_action = MLX5_FLOW_FATE_QUEUE;
dh.rix_hrxq = hrxq_idx[i];
flow_drv_rxq_flags_set(dev, &dh);
Expand Down
4 changes: 2 additions & 2 deletions drivers/net/mlx5/mlx5_flow_verbs.c
Expand Up @@ -1693,12 +1693,12 @@ flow_verbs_translate(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_FLAG:
flow_verbs_translate_action_flag(dev_flow, actions);
action_flags |= MLX5_FLOW_ACTION_FLAG;
dev_flow->handle->mark = 1;
wks->mark = 1;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
flow_verbs_translate_action_mark(dev_flow, actions);
action_flags |= MLX5_FLOW_ACTION_MARK;
dev_flow->handle->mark = 1;
wks->mark = 1;
break;
case RTE_FLOW_ACTION_TYPE_DROP:
flow_verbs_translate_action_drop(dev_flow, actions);
Expand Down
1 change: 0 additions & 1 deletion drivers/net/mlx5/mlx5_rx.h
Expand Up @@ -161,7 +161,6 @@ struct mlx5_rxq_ctrl {
uint16_t share_qid; /* Shared RxQ ID in group. */
unsigned int started:1; /* Whether (shared) RXQ has been started. */
unsigned int irq:1; /* Whether IRQ is enabled. */
uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
uint32_t wqn; /* WQ number. */
uint32_t rxseg_n; /* Number of split segment descriptions. */
Expand Down

0 comments on commit 99f5cd0

Please sign in to comment.