Skip to content

Commit 58a86c4

Browse files
committed
Merge branch 'mlx5-fixes'
Saeed Mahameed says: ==================== Mellanox 100G mlx5 fixes 2016-10-25 This series contains some bug fixes for the mlx5 core and mlx5e driver. From Daniel: - Cache line size determination at runtime, instead of using L1_CACHE_BYTES hard coded value, use cache_line_size() - Always Query HCA caps after setting them even on reset flow From Mohamad: - Reorder netdev cleanup to uregister netdev before detaching it for the kernel to not complain about open resources such as vlans - Change the acl enable prototype to return status, for better error resiliency - Clear health sick bit when starting health poll after reset flow - Fix race between PCI error handlers and health work - PCI error recovery health care simulation, in case when the kernel PCI error handlers are not triggered for some internal firmware errors From Noa: - Avoid passing dma address 0 to firmware when mapping system pages to the firmware From Paul: Some straight forward flow steering fixes - Keep autogroups list ordered - Fix autogroups groups num not decreasing - Correctly initialize last use of flow counters From Saeed: - Choose the nearest LRO timeout to the wanted one instead of blindly choosing "dev_cap.lro_timeout[2]" This series has no conflict with the for-next pull request posted earlier today ("Mellanox mlx5 core driver updates 2016-10-25"). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents bf911e9 + 6b27619 commit 58a86c4

File tree

14 files changed

+210
-69
lines changed

14 files changed

+210
-69
lines changed

drivers/infiniband/hw/mlx5/main.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1019,7 +1019,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
10191019
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
10201020
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
10211021
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
1022-
resp.cache_line_size = L1_CACHE_BYTES;
1022+
resp.cache_line_size = cache_line_size();
10231023
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
10241024
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
10251025
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);

drivers/infiniband/hw/mlx5/qp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ enum {
5252

5353
enum {
5454
MLX5_IB_SQ_STRIDE = 6,
55-
MLX5_IB_CACHE_LINE_SIZE = 64,
5655
};
5756

5857
static const u32 mlx5_ib_opcode[] = {

drivers/net/ethernet/mellanox/mlx5/core/alloc.c

Lines changed: 26 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,13 @@
4141

4242
#include "mlx5_core.h"
4343

44+
struct mlx5_db_pgdir {
45+
struct list_head list;
46+
unsigned long *bitmap;
47+
__be32 *db_page;
48+
dma_addr_t db_dma;
49+
};
50+
4451
/* Handling for queue buffers -- we allocate a bunch of memory and
4552
* register it in a memory region at HCA virtual address 0.
4653
*/
@@ -102,17 +109,28 @@ EXPORT_SYMBOL_GPL(mlx5_buf_free);
102109
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
103110
int node)
104111
{
112+
u32 db_per_page = PAGE_SIZE / cache_line_size();
105113
struct mlx5_db_pgdir *pgdir;
106114

107115
pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
108116
if (!pgdir)
109117
return NULL;
110118

111-
bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
119+
pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
120+
sizeof(unsigned long),
121+
GFP_KERNEL);
122+
123+
if (!pgdir->bitmap) {
124+
kfree(pgdir);
125+
return NULL;
126+
}
127+
128+
bitmap_fill(pgdir->bitmap, db_per_page);
112129

113130
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
114131
&pgdir->db_dma, node);
115132
if (!pgdir->db_page) {
133+
kfree(pgdir->bitmap);
116134
kfree(pgdir);
117135
return NULL;
118136
}
@@ -123,18 +141,19 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
123141
static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
124142
struct mlx5_db *db)
125143
{
144+
u32 db_per_page = PAGE_SIZE / cache_line_size();
126145
int offset;
127146
int i;
128147

129-
i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
130-
if (i >= MLX5_DB_PER_PAGE)
148+
i = find_first_bit(pgdir->bitmap, db_per_page);
149+
if (i >= db_per_page)
131150
return -ENOMEM;
132151

133152
__clear_bit(i, pgdir->bitmap);
134153

135154
db->u.pgdir = pgdir;
136155
db->index = i;
137-
offset = db->index * L1_CACHE_BYTES;
156+
offset = db->index * cache_line_size();
138157
db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
139158
db->dma = pgdir->db_dma + offset;
140159

@@ -181,14 +200,16 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc);
181200

182201
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
183202
{
203+
u32 db_per_page = PAGE_SIZE / cache_line_size();
184204
mutex_lock(&dev->priv.pgdir_mutex);
185205

186206
__set_bit(db->index, db->u.pgdir->bitmap);
187207

188-
if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
208+
if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
189209
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
190210
db->u.pgdir->db_page, db->u.pgdir->db_dma);
191211
list_del(&db->u.pgdir->list);
212+
kfree(db->u.pgdir->bitmap);
192213
kfree(db->u.pgdir);
193214
}
194215

drivers/net/ethernet/mellanox/mlx5/core/en.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,9 @@
8585
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
8686

8787
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
88+
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
89+
#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
90+
8891
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
8992
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
9093
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
@@ -221,6 +224,7 @@ struct mlx5e_params {
221224
struct ieee_ets ets;
222225
#endif
223226
bool rx_am_enabled;
227+
u32 lro_timeout;
224228
};
225229

226230
struct mlx5e_tstamp {
@@ -888,5 +892,6 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
888892
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
889893
struct rtnl_link_stats64 *
890894
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
895+
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
891896

892897
#endif /* __MLX5_EN_H__ */

drivers/net/ethernet/mellanox/mlx5/core/en_main.c

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1971,9 +1971,7 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
19711971
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
19721972
(priv->params.lro_wqe_sz -
19731973
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1974-
MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1975-
MLX5_CAP_ETH(priv->mdev,
1976-
lro_timer_supported_periods[2]));
1974+
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
19771975
}
19781976

19791977
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
@@ -3401,6 +3399,18 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
34013399
}
34023400
}
34033401

3402+
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
3403+
{
3404+
int i;
3405+
3406+
/* The supported periods are organized in ascending order */
3407+
for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
3408+
if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
3409+
break;
3410+
3411+
return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
3412+
}
3413+
34043414
static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
34053415
struct net_device *netdev,
34063416
const struct mlx5e_profile *profile,
@@ -3419,6 +3429,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
34193429
priv->profile = profile;
34203430
priv->ppriv = ppriv;
34213431

3432+
priv->params.lro_timeout =
3433+
mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
3434+
34223435
priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
34233436

34243437
/* set CQE compression */
@@ -4035,7 +4048,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
40354048
const struct mlx5e_profile *profile = priv->profile;
40364049
struct net_device *netdev = priv->netdev;
40374050

4038-
unregister_netdev(netdev);
40394051
destroy_workqueue(priv->wq);
40404052
if (profile->cleanup)
40414053
profile->cleanup(priv);
@@ -4052,6 +4064,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
40524064
for (vport = 1; vport < total_vfs; vport++)
40534065
mlx5_eswitch_unregister_vport_rep(esw, vport);
40544066

4067+
unregister_netdev(priv->netdev);
40554068
mlx5e_detach(mdev, vpriv);
40564069
mlx5e_destroy_netdev(mdev, priv);
40574070
}

drivers/net/ethernet/mellanox/mlx5/core/en_rep.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -457,6 +457,7 @@ void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
457457
struct mlx5e_priv *priv = rep->priv_data;
458458
struct net_device *netdev = priv->netdev;
459459

460+
unregister_netdev(netdev);
460461
mlx5e_detach_netdev(esw->dev, netdev);
461462
mlx5e_destroy_netdev(esw->dev, priv);
462463
}

drivers/net/ethernet/mellanox/mlx5/core/eswitch.c

Lines changed: 34 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -931,8 +931,8 @@ static void esw_vport_change_handler(struct work_struct *work)
931931
mutex_unlock(&esw->state_lock);
932932
}
933933

934-
static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
935-
struct mlx5_vport *vport)
934+
static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
935+
struct mlx5_vport *vport)
936936
{
937937
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
938938
struct mlx5_flow_group *vlan_grp = NULL;
@@ -949,22 +949,24 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
949949
int table_size = 2;
950950
int err = 0;
951951

952-
if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
953-
!IS_ERR_OR_NULL(vport->egress.acl))
954-
return;
952+
if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
953+
return -EOPNOTSUPP;
954+
955+
if (!IS_ERR_OR_NULL(vport->egress.acl))
956+
return 0;
955957

956958
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
957959
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
958960

959961
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
960962
if (!root_ns) {
961963
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
962-
return;
964+
return -EIO;
963965
}
964966

965967
flow_group_in = mlx5_vzalloc(inlen);
966968
if (!flow_group_in)
967-
return;
969+
return -ENOMEM;
968970

969971
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
970972
if (IS_ERR(acl)) {
@@ -1009,6 +1011,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
10091011
mlx5_destroy_flow_group(vlan_grp);
10101012
if (err && !IS_ERR_OR_NULL(acl))
10111013
mlx5_destroy_flow_table(acl);
1014+
return err;
10121015
}
10131016

10141017
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
@@ -1041,8 +1044,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
10411044
vport->egress.acl = NULL;
10421045
}
10431046

1044-
static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1045-
struct mlx5_vport *vport)
1047+
static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1048+
struct mlx5_vport *vport)
10461049
{
10471050
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
10481051
struct mlx5_core_dev *dev = esw->dev;
@@ -1063,22 +1066,24 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
10631066
int table_size = 4;
10641067
int err = 0;
10651068

1066-
if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
1067-
!IS_ERR_OR_NULL(vport->ingress.acl))
1068-
return;
1069+
if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1070+
return -EOPNOTSUPP;
1071+
1072+
if (!IS_ERR_OR_NULL(vport->ingress.acl))
1073+
return 0;
10691074

10701075
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
10711076
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
10721077

10731078
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
10741079
if (!root_ns) {
10751080
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
1076-
return;
1081+
return -EIO;
10771082
}
10781083

10791084
flow_group_in = mlx5_vzalloc(inlen);
10801085
if (!flow_group_in)
1081-
return;
1086+
return -ENOMEM;
10821087

10831088
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
10841089
if (IS_ERR(acl)) {
@@ -1167,6 +1172,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
11671172
}
11681173

11691174
kvfree(flow_group_in);
1175+
return err;
11701176
}
11711177

11721178
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1225,7 +1231,13 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
12251231
return 0;
12261232
}
12271233

1228-
esw_vport_enable_ingress_acl(esw, vport);
1234+
err = esw_vport_enable_ingress_acl(esw, vport);
1235+
if (err) {
1236+
mlx5_core_warn(esw->dev,
1237+
"failed to enable ingress acl (%d) on vport[%d]\n",
1238+
err, vport->vport);
1239+
return err;
1240+
}
12291241

12301242
esw_debug(esw->dev,
12311243
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
@@ -1299,7 +1311,13 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
12991311
return 0;
13001312
}
13011313

1302-
esw_vport_enable_egress_acl(esw, vport);
1314+
err = esw_vport_enable_egress_acl(esw, vport);
1315+
if (err) {
1316+
mlx5_core_warn(esw->dev,
1317+
"failed to enable egress acl (%d) on vport[%d]\n",
1318+
err, vport->vport);
1319+
return err;
1320+
}
13031321

13041322
esw_debug(esw->dev,
13051323
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",

drivers/net/ethernet/mellanox/mlx5/core/fs_core.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -436,6 +436,9 @@ static void del_flow_group(struct fs_node *node)
436436
fs_get_obj(ft, fg->node.parent);
437437
dev = get_dev(&ft->node);
438438

439+
if (ft->autogroup.active)
440+
ft->autogroup.num_groups--;
441+
439442
if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
440443
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
441444
fg->id, ft->id);
@@ -879,7 +882,7 @@ static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *
879882
tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
880883
tree_add_node(&fg->node, &ft->node);
881884
/* Add node to group list */
882-
list_add(&fg->node.list, ft->node.children.prev);
885+
list_add(&fg->node.list, prev_fg);
883886

884887
return fg;
885888
}
@@ -893,7 +896,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
893896
return ERR_PTR(-EPERM);
894897

895898
lock_ref_node(&ft->node);
896-
fg = create_flow_group_common(ft, fg_in, &ft->node.children, false);
899+
fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
897900
unlock_ref_node(&ft->node);
898901

899902
return fg;
@@ -1012,7 +1015,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
10121015
u32 *match_criteria)
10131016
{
10141017
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1015-
struct list_head *prev = &ft->node.children;
1018+
struct list_head *prev = ft->node.children.prev;
10161019
unsigned int candidate_index = 0;
10171020
struct mlx5_flow_group *fg;
10181021
void *match_criteria_addr;

drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
218218
goto err_out;
219219

220220
if (aging) {
221+
counter->cache.lastuse = jiffies;
221222
counter->aging = true;
222223

223224
spin_lock(&fc_stats->addlist_lock);

0 commit comments

Comments
 (0)