Skip to content

Commit

Permalink
net/mlx5: fix GRE protocol type translation for Verbs
Browse files Browse the repository at this point in the history
[ upstream commit 985b479 ]

When application creates several flows to match on GRE tunnel without
explicitly specifying GRE protocol type value in flow rules, PMD will
translate that to zero mask.
RDMA-CORE cannot distinguish between different inner flow types and
produces identical matchers for each zero mask.

The patch extracts inner header type from flow rule and forces it in
GRE protocol type, if application did not specify any.

Fixes: 84c406e ("net/mlx5: add flow translate function")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
  • Loading branch information
getelson-at-mellanox authored and kevintraynor committed Feb 21, 2022
1 parent d8d5417 commit 379079d
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 26 deletions.
14 changes: 14 additions & 0 deletions drivers/net/mlx5/mlx5_flow.h
Expand Up @@ -1450,6 +1450,20 @@ flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx)
return ct;
}

static inline uint16_t
mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
{
if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
return RTE_ETHER_TYPE_TEB;
else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
return RTE_ETHER_TYPE_IPV4;
else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
return RTE_ETHER_TYPE_IPV6;
else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
return RTE_ETHER_TYPE_MPLS;
return 0;
}

int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
Expand Down
14 changes: 0 additions & 14 deletions drivers/net/mlx5/mlx5_flow_dv.c
Expand Up @@ -93,20 +93,6 @@ static int
flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
uint32_t rix_jump);

static inline uint16_t
mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
{
if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
return RTE_ETHER_TYPE_TEB;
else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
return RTE_ETHER_TYPE_IPV4;
else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
return RTE_ETHER_TYPE_IPV6;
else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
return RTE_ETHER_TYPE_MPLS;
return 0;
}

static int16_t
flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
{
Expand Down
37 changes: 25 additions & 12 deletions drivers/net/mlx5/mlx5_flow_verbs.c
Expand Up @@ -907,6 +907,7 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
.size = size,
};
#else
static const struct rte_flow_item_gre empty_gre = {0,};
const struct rte_flow_item_gre *spec = item->spec;
const struct rte_flow_item_gre *mask = item->mask;
unsigned int size = sizeof(struct ibv_flow_spec_gre);
Expand All @@ -915,17 +916,29 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
.size = size,
};

if (!mask)
mask = &rte_flow_item_gre_mask;
if (spec) {
tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
tunnel.val.protocol = spec->protocol;
tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
tunnel.mask.protocol = mask->protocol;
/* Remove unwanted bits from values. */
tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
if (!spec) {
spec = &empty_gre;
mask = &empty_gre;
} else {
if (!mask)
mask = &rte_flow_item_gre_mask;
}
tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
tunnel.val.protocol = spec->protocol;
tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
tunnel.mask.protocol = mask->protocol;
/* Remove unwanted bits from values. */
tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
tunnel.val.key &= tunnel.mask.key;
if (tunnel.mask.protocol) {
tunnel.val.protocol &= tunnel.mask.protocol;
tunnel.val.key &= tunnel.mask.key;
} else {
tunnel.val.protocol = mlx5_translate_tunnel_etypes(item_flags);
if (tunnel.val.protocol) {
tunnel.mask.protocol = 0xFFFF;
tunnel.val.protocol =
rte_cpu_to_be_16(tunnel.val.protocol);
}
}
#endif
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
Expand Down Expand Up @@ -1803,8 +1816,6 @@ flow_verbs_translate(struct rte_eth_dev *dev,
item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
flow_verbs_translate_item_gre(dev_flow, items,
item_flags);
subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
item_flags |= MLX5_FLOW_LAYER_GRE;
break;
Expand All @@ -1820,6 +1831,8 @@ flow_verbs_translate(struct rte_eth_dev *dev,
NULL, "item not supported");
}
}
if (item_flags & MLX5_FLOW_LAYER_GRE)
flow_verbs_translate_item_gre(dev_flow, items, item_flags);
dev_flow->handle->layers = item_flags;
/* Other members of attr will be ignored. */
dev_flow->verbs.attr.priority =
Expand Down

0 comments on commit 379079d

Please sign in to comment.