Skip to content

Commit

Permalink
net/mlx5: fix GRE protocol type translation for Verbs
Browse files Browse the repository at this point in the history
[ upstream commit 985b479 ]

When application creates several flows to match on GRE tunnel without
explicitly specifying GRE protocol type value in flow rules, PMD will
translate that to zero mask.
RDMA-CORE cannot distinguish between different inner flow types and
produces identical matchers for each zero mask.

The patch extracts inner header type from flow rule and forces it in
GRE protocol type, if application did not specify any.

Fixes: 84c406e ("net/mlx5: add flow translate function")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
  • Loading branch information
getelson-at-mellanox authored and bluca committed Feb 14, 2022
1 parent a5edf85 commit 0374774
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 26 deletions.
14 changes: 14 additions & 0 deletions drivers/net/mlx5/mlx5_flow.h
Original file line number Diff line number Diff line change
Expand Up @@ -1287,6 +1287,20 @@ tunnel_use_standard_attr_group_translate
return verdict;
}

static inline uint16_t
mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
{
if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
return RTE_ETHER_TYPE_TEB;
else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
return RTE_ETHER_TYPE_IPV4;
else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
return RTE_ETHER_TYPE_IPV6;
else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
return RTE_ETHER_TYPE_MPLS;
return 0;
}

int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
Expand Down
14 changes: 0 additions & 14 deletions drivers/net/mlx5/mlx5_flow_dv.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,20 +84,6 @@ flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
static void
flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);

static inline uint16_t
mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
{
if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
return RTE_ETHER_TYPE_TEB;
else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
return RTE_ETHER_TYPE_IPV4;
else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
return RTE_ETHER_TYPE_IPV6;
else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
return RTE_ETHER_TYPE_MPLS;
return 0;
}

/**
* Initialize flow attributes structure according to flow items' types.
*
Expand Down
37 changes: 25 additions & 12 deletions drivers/net/mlx5/mlx5_flow_verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -960,6 +960,7 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
.size = size,
};
#else
static const struct rte_flow_item_gre empty_gre = {0,};
const struct rte_flow_item_gre *spec = item->spec;
const struct rte_flow_item_gre *mask = item->mask;
unsigned int size = sizeof(struct ibv_flow_spec_gre);
Expand All @@ -968,17 +969,29 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
.size = size,
};

if (!mask)
mask = &rte_flow_item_gre_mask;
if (spec) {
tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
tunnel.val.protocol = spec->protocol;
tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
tunnel.mask.protocol = mask->protocol;
/* Remove unwanted bits from values. */
tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
if (!spec) {
spec = &empty_gre;
mask = &empty_gre;
} else {
if (!mask)
mask = &rte_flow_item_gre_mask;
}
tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
tunnel.val.protocol = spec->protocol;
tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
tunnel.mask.protocol = mask->protocol;
/* Remove unwanted bits from values. */
tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
tunnel.val.key &= tunnel.mask.key;
if (tunnel.mask.protocol) {
tunnel.val.protocol &= tunnel.mask.protocol;
tunnel.val.key &= tunnel.mask.key;
} else {
tunnel.val.protocol = mlx5_translate_tunnel_etypes(item_flags);
if (tunnel.val.protocol) {
tunnel.mask.protocol = 0xFFFF;
tunnel.val.protocol =
rte_cpu_to_be_16(tunnel.val.protocol);
}
}
#endif
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
Expand Down Expand Up @@ -1846,8 +1859,6 @@ flow_verbs_translate(struct rte_eth_dev *dev,
item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
flow_verbs_translate_item_gre(dev_flow, items,
item_flags);
subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
item_flags |= MLX5_FLOW_LAYER_GRE;
break;
Expand All @@ -1863,6 +1874,8 @@ flow_verbs_translate(struct rte_eth_dev *dev,
NULL, "item not supported");
}
}
if (item_flags & MLX5_FLOW_LAYER_GRE)
flow_verbs_translate_item_gre(dev_flow, items, item_flags);
dev_flow->handle->layers = item_flags;
/* Other members of attr will be ignored. */
dev_flow->verbs.attr.priority =
Expand Down

0 comments on commit 0374774

Please sign in to comment.