Skip to content

Commit

Permalink
net/mlx5e: Fix interoperability between XSK and ICOSQ recovery flow
Browse files Browse the repository at this point in the history
[ Upstream commit 17958d7 ]

Both regular RQ and XSKRQ use the same ICOSQ for UMRs. When doing
recovery for the ICOSQ, don't forget to deactivate XSKRQ.

XSK can be opened and closed while channels are active, so a new mutex
prevents the ICOSQ recovery from running at the same time. The ICOSQ
recovery deactivates and reactivates XSKRQ, so any parallel change in
XSK state would break consistency. As the regular RQ is running, it's
not enough to just flush the recovery work, because it can be
rescheduled.

Fixes: be5323c ("net/mlx5e: Report and recover from CQE error on ICOSQ")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
nvmmax authored and gregkh committed Jan 5, 2022
1 parent 07f13d5 commit 144dbcc
Show file tree
Hide file tree
Showing 5 changed files with 58 additions and 4 deletions.
2 changes: 2 additions & 0 deletions drivers/net/ethernet/mellanox/mlx5/core/en.h
Expand Up @@ -727,6 +727,8 @@ struct mlx5e_channel {
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
int cpu;
/* Sync between icosq recovery and XSK enable/disable. */
struct mutex icosq_recovery_lock;
};

struct mlx5e_ptp;
Expand Down
2 changes: 2 additions & 0 deletions drivers/net/ethernet/mellanox/mlx5/core/en/health.h
Expand Up @@ -30,6 +30,8 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq);
void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c);
void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c);

#define MLX5E_REPORTER_PER_Q_MAX_LEN 256
#define MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC 2000
Expand Down
35 changes: 34 additions & 1 deletion drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
Expand Up @@ -59,6 +59,7 @@ static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq)

static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
{
struct mlx5e_rq *xskrq = NULL;
struct mlx5_core_dev *mdev;
struct mlx5e_icosq *icosq;
struct net_device *dev;
Expand All @@ -67,7 +68,13 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
int err;

icosq = ctx;

mutex_lock(&icosq->channel->icosq_recovery_lock);

/* mlx5e_close_rq cancels this work before RQ and ICOSQ are killed. */
rq = &icosq->channel->rq;
if (test_bit(MLX5E_RQ_STATE_ENABLED, &icosq->channel->xskrq.state))
xskrq = &icosq->channel->xskrq;
mdev = icosq->channel->mdev;
dev = icosq->channel->netdev;
err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
Expand All @@ -81,6 +88,9 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
goto out;

mlx5e_deactivate_rq(rq);
if (xskrq)
mlx5e_deactivate_rq(xskrq);

err = mlx5e_wait_for_icosq_flush(icosq);
if (err)
goto out;
Expand All @@ -94,15 +104,28 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
goto out;

mlx5e_reset_icosq_cc_pc(icosq);

mlx5e_free_rx_in_progress_descs(rq);
if (xskrq)
mlx5e_free_rx_in_progress_descs(xskrq);

clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
mlx5e_activate_icosq(icosq);
mlx5e_activate_rq(rq);

mlx5e_activate_rq(rq);
rq->stats->recover++;

if (xskrq) {
mlx5e_activate_rq(xskrq);
xskrq->stats->recover++;
}

mutex_unlock(&icosq->channel->icosq_recovery_lock);

return 0;
out:
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
mutex_unlock(&icosq->channel->icosq_recovery_lock);
return err;
}

Expand Down Expand Up @@ -703,6 +726,16 @@ void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
}

void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c)
{
mutex_lock(&c->icosq_recovery_lock);
}

void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c)
{
mutex_unlock(&c->icosq_recovery_lock);
}

static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
.name = "rx",
.recover = mlx5e_rx_reporter_recover,
Expand Down
16 changes: 15 additions & 1 deletion drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
Expand Up @@ -4,6 +4,7 @@
#include "setup.h"
#include "en/params.h"
#include "en/txrx.h"
#include "en/health.h"

/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
* change unexpectedly, and mlx5e has a minimum valid stride size for striding
Expand Down Expand Up @@ -170,7 +171,13 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)

void mlx5e_activate_xsk(struct mlx5e_channel *c)
{
/* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid
* activating XSKRQ in the middle of recovery.
*/
mlx5e_reporter_icosq_suspend_recovery(c);
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
mlx5e_reporter_icosq_resume_recovery(c);

/* TX queue is created active. */

spin_lock_bh(&c->async_icosq_lock);
Expand All @@ -180,6 +187,13 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)

void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
{
mlx5e_deactivate_rq(&c->xskrq);
/* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the
* middle of recovery. Suspend the recovery to avoid it.
*/
mlx5e_reporter_icosq_suspend_recovery(c);
clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
mlx5e_reporter_icosq_resume_recovery(c);
synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */

/* TX queue is disabled on close. */
}
7 changes: 5 additions & 2 deletions drivers/net/ethernet/mellanox/mlx5/core/en_main.c
Expand Up @@ -911,8 +911,6 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
cancel_work_sync(&rq->dim.work);
if (rq->icosq)
cancel_work_sync(&rq->icosq->recover_work);
cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
Expand Down Expand Up @@ -1875,6 +1873,8 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_xdpsq_cq;

mutex_init(&c->icosq_recovery_lock);

err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
if (err)
goto err_close_async_icosq;
Expand Down Expand Up @@ -1943,9 +1943,12 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_xdpsq(&c->xdpsq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
/* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
cancel_work_sync(&c->icosq.recover_work);
mlx5e_close_rq(&c->rq);
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
mutex_destroy(&c->icosq_recovery_lock);
mlx5e_close_icosq(&c->async_icosq);
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
Expand Down

0 comments on commit 144dbcc

Please sign in to comment.