Skip to content

Commit

Permalink
net/mlx5: fix tunnel offload private items location
Browse files Browse the repository at this point in the history
[ upstream commit 8c5a231 ]

Tunnel offload API requires application to query PMD for specific flow
items and actions. Application uses these PMD specific elements to
build flow rules according to the tunnel offload model.
The model does not restrict private elements location in a flow rule,
but the current MLX5 PMD implementation expects that tunnel offload
rule will begin with PMD specific elements.
The patch removes that placement limitation.

Fixes: 4ec6360 ("net/mlx5: implement tunnel offload")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
  • Loading branch information
getelson-at-mellanox authored and steevenlee committed Jun 8, 2021
1 parent 6bdf384 commit 96883ce
Show file tree
Hide file tree
Showing 3 changed files with 100 additions and 78 deletions.
48 changes: 32 additions & 16 deletions drivers/net/mlx5/mlx5_flow.c
Expand Up @@ -50,6 +50,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_action *app_actions,
uint32_t flow_idx,
const struct mlx5_flow_tunnel *tunnel,
struct tunnel_default_miss_ctx *ctx,
struct rte_flow_error *error);
static struct mlx5_flow_tunnel *
Expand Down Expand Up @@ -5183,22 +5184,14 @@ flow_create_split_outer(struct rte_eth_dev *dev,
return ret;
}

static struct mlx5_flow_tunnel *
flow_tunnel_from_rule(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[])
static inline struct mlx5_flow_tunnel *
flow_tunnel_from_rule(const struct mlx5_flow *flow)
{
struct mlx5_flow_tunnel *tunnel;

#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
if (is_flow_tunnel_match_rule(dev, attr, items, actions))
tunnel = (struct mlx5_flow_tunnel *)items[0].spec;
else if (is_flow_tunnel_steer_rule(dev, attr, items, actions))
tunnel = (struct mlx5_flow_tunnel *)actions[0].conf;
else
tunnel = NULL;
tunnel = (typeof(tunnel))flow->tunnel;
#pragma GCC diagnostic pop

return tunnel;
Expand Down Expand Up @@ -5392,12 +5385,11 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
error);
if (ret < 0)
goto error;
if (is_flow_tunnel_steer_rule(dev, attr,
buf->entry[i].pattern,
p_actions_rx)) {
if (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) {
ret = flow_tunnel_add_default_miss(dev, flow, attr,
p_actions_rx,
idx,
wks->flows[0].tunnel,
&default_miss_ctx,
error);
if (ret < 0) {
Expand Down Expand Up @@ -5461,7 +5453,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
}
flow_rxq_flags_set(dev, flow);
rte_free(translated_actions);
tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
tunnel = flow_tunnel_from_rule(wks->flows);
if (tunnel) {
flow->tunnel = 1;
flow->tunnel_id = tunnel->tunnel_id;
Expand Down Expand Up @@ -7210,6 +7202,28 @@ int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
return ret;
}

const struct mlx5_flow_tunnel *
mlx5_get_tof(const struct rte_flow_item *item,
const struct rte_flow_action *action,
enum mlx5_tof_rule_type *rule_type)
{
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->type == (typeof(item->type))
MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) {
*rule_type = MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
return flow_items_to_tunnel(item);
}
}
for (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) {
if (action->type == (typeof(action->type))
MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
*rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE;
return flow_actions_to_tunnel(action);
}
}
return NULL;
}

/**
* tunnel offload functionalilty is defined for DV environment only
*/
Expand Down Expand Up @@ -7240,13 +7254,13 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_action *app_actions,
uint32_t flow_idx,
const struct mlx5_flow_tunnel *tunnel,
struct tunnel_default_miss_ctx *ctx,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow *dev_flow;
struct rte_flow_attr miss_attr = *attr;
const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
const struct rte_flow_item miss_items[2] = {
{
.type = RTE_FLOW_ITEM_TYPE_ETH,
Expand Down Expand Up @@ -7332,6 +7346,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
dev_flow->flow = flow;
dev_flow->external = true;
dev_flow->tunnel = tunnel;
dev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE;
/* Subflow object was created, we must include one in the list. */
SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
dev_flow->handle, next);
Expand Down Expand Up @@ -7926,6 +7941,7 @@ flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,
__rte_unused const struct rte_flow_attr *attr,
__rte_unused const struct rte_flow_action *actions,
__rte_unused uint32_t flow_idx,
__rte_unused const struct mlx5_flow_tunnel *tunnel,
__rte_unused struct tunnel_default_miss_ctx *ctx,
__rte_unused struct rte_flow_error *error)
{
Expand Down
46 changes: 27 additions & 19 deletions drivers/net/mlx5/mlx5_flow.h
Expand Up @@ -753,6 +753,16 @@ struct mlx5_flow_verbs_workspace {
/** Maximal number of device sub-flows supported. */
#define MLX5_NUM_MAX_DEV_FLOWS 32

/**
* tunnel offload rules type
*/
enum mlx5_tof_rule_type {
MLX5_TUNNEL_OFFLOAD_NONE = 0,
MLX5_TUNNEL_OFFLOAD_SET_RULE,
MLX5_TUNNEL_OFFLOAD_MATCH_RULE,
MLX5_TUNNEL_OFFLOAD_MISS_RULE,
};

/** Device flow structure. */
__extension__
struct mlx5_flow {
Expand All @@ -774,6 +784,7 @@ struct mlx5_flow {
struct mlx5_flow_handle *handle;
uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
const struct mlx5_flow_tunnel *tunnel;
enum mlx5_tof_rule_type tof_type;
};

/* Flow meter state. */
Expand Down Expand Up @@ -983,10 +994,10 @@ mlx5_tunnel_hub(struct rte_eth_dev *dev)
}

static inline bool
is_tunnel_offload_active(struct rte_eth_dev *dev)
is_tunnel_offload_active(const struct rte_eth_dev *dev)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_priv *priv = dev->data->dev_private;
const struct mlx5_priv *priv = dev->data->dev_private;
return !!priv->config.dv_miss_info;
#else
RTE_SET_USED(dev);
Expand All @@ -995,23 +1006,15 @@ is_tunnel_offload_active(struct rte_eth_dev *dev)
}

static inline bool
is_flow_tunnel_match_rule(__rte_unused struct rte_eth_dev *dev,
__rte_unused const struct rte_flow_attr *attr,
__rte_unused const struct rte_flow_item items[],
__rte_unused const struct rte_flow_action actions[])
is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type)
{
return (items[0].type == (typeof(items[0].type))
MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL);
return tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
}

static inline bool
is_flow_tunnel_steer_rule(__rte_unused struct rte_eth_dev *dev,
__rte_unused const struct rte_flow_attr *attr,
__rte_unused const struct rte_flow_item items[],
__rte_unused const struct rte_flow_action actions[])
is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type)
{
return (actions[0].type == (typeof(actions[0].type))
MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET);
return tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE;
}

static inline const struct mlx5_flow_tunnel *
Expand Down Expand Up @@ -1252,11 +1255,10 @@ struct flow_grp_info {

static inline bool
tunnel_use_standard_attr_group_translate
(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
(const struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[])
const struct mlx5_flow_tunnel *tunnel,
enum mlx5_tof_rule_type tof_rule_type)
{
bool verdict;

Expand All @@ -1272,7 +1274,7 @@ tunnel_use_standard_attr_group_translate
* method
*/
verdict = !attr->group &&
is_flow_tunnel_steer_rule(dev, attr, items, actions);
is_flow_tunnel_steer_rule(tof_rule_type);
} else {
/*
* non-tunnel group translation uses standard method for
Expand Down Expand Up @@ -1505,4 +1507,10 @@ void flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list,
struct mlx5_cache_entry *entry);
struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
uint32_t age_idx);
const struct mlx5_flow_tunnel *
mlx5_get_tof(const struct rte_flow_item *items,
const struct rte_flow_action *actions,
enum mlx5_tof_rule_type *rule_type);


#endif /* RTE_PMD_MLX5_FLOW_H_ */
84 changes: 41 additions & 43 deletions drivers/net/mlx5/mlx5_flow_dv.c
Expand Up @@ -5323,32 +5323,33 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
int16_t rw_act_num = 0;
uint64_t is_root;
const struct mlx5_flow_tunnel *tunnel;
enum mlx5_tof_rule_type tof_rule_type;
struct flow_grp_info grp_info = {
.external = !!external,
.transfer = !!attr->transfer,
.fdb_def_rule = !!priv->fdb_def_rule,
.std_tbl_fix = true,
};
const struct rte_eth_hairpin_conf *conf;

if (items == NULL)
return -1;
if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
tunnel = flow_items_to_tunnel(items);
action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
MLX5_FLOW_ACTION_DECAP;
} else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
tunnel = flow_actions_to_tunnel(actions);
action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
} else {
tunnel = NULL;
tunnel = is_tunnel_offload_active(dev) ?
mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
if (tunnel) {
if (priv->representor)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "decap not supported for VF representor");
if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
MLX5_FLOW_ACTION_DECAP;
grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
(dev, attr, tunnel, tof_rule_type);
}
if (tunnel && priv->representor)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"decap not supported "
"for VF representor");
grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
(dev, tunnel, attr, items, actions);
ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
if (ret < 0)
return ret;
Expand All @@ -5362,15 +5363,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
switch (type) {
case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
if (items[0].type != (typeof(items[0].type))
MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "MLX5 private items "
"must be the first");
break;
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
Expand Down Expand Up @@ -5631,6 +5623,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = MLX5_FLOW_LAYER_ECPRI;
break;
case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
/* tunnel offload item was processed before
* list it here as a supported type
*/
break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
Expand Down Expand Up @@ -6099,15 +6096,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
++actions_n;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
if (actions[0].type != (typeof(actions[0].type))
MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "MLX5 private action "
"must be the first");

action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
/* tunnel offload action was processed before
* list it here as a supported type
*/
break;
default:
return rte_flow_error_set(error, ENOTSUP,
Expand Down Expand Up @@ -9730,12 +9721,13 @@ flow_dv_translate(struct rte_eth_dev *dev,
int tmp_actions_n = 0;
uint32_t table;
int ret = 0;
const struct mlx5_flow_tunnel *tunnel;
const struct mlx5_flow_tunnel *tunnel = NULL;
struct flow_grp_info grp_info = {
.external = !!dev_flow->external,
.transfer = !!attr->transfer,
.fdb_def_rule = !!priv->fdb_def_rule,
.skip_scale = !!dev_flow->skip_scale,
.std_tbl_fix = true,
};

if (!wks)
Expand All @@ -9750,15 +9742,21 @@ flow_dv_translate(struct rte_eth_dev *dev,
MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
/* update normal path action resource into last index of array */
sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
flow_items_to_tunnel(items) :
is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
flow_actions_to_tunnel(actions) :
dev_flow->tunnel ? dev_flow->tunnel : NULL;
if (is_tunnel_offload_active(dev)) {
if (dev_flow->tunnel) {
RTE_VERIFY(dev_flow->tof_type ==
MLX5_TUNNEL_OFFLOAD_MISS_RULE);
tunnel = dev_flow->tunnel;
} else {
tunnel = mlx5_get_tof(items, actions,
&dev_flow->tof_type);
dev_flow->tunnel = tunnel;
}
grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
(dev, attr, tunnel, dev_flow->tof_type);
}
mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
(dev, tunnel, attr, items, actions);
ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
&grp_info, error);
if (ret)
Expand All @@ -9770,7 +9768,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
priority = dev_conf->flow_prio - 1;
/* number of actions must be set to 0 in case of dirty stack. */
mhdr_res->actions_num = 0;
if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
/*
* do not add decap action if match rule drops packet
* HW rejects rules with decap & drop
Expand Down

0 comments on commit 96883ce

Please sign in to comment.