Skip to content

Commit 58db728

Browse files
shayshyiSaeed Mahameed
authored andcommitted
net/mlx5: Re-organize mlx5_cmd struct
Downstream patch will split mlx5_cmd_init() to probe and reload routines. As a preparation, organize mlx5_cmd struct so that any field that will be used in the reload routine are grouped at new nested struct. Signed-off-by: Shay Drory <shayd@nvidia.com> Reviewed-by: Moshe Shemesh <moshe@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
1 parent e2bb798 commit 58db728

File tree

3 files changed

+60
-59
lines changed

3 files changed

+60
-59
lines changed

drivers/net/ethernet/mellanox/mlx5/core/cmd.c

Lines changed: 47 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -162,18 +162,18 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd)
162162
int ret;
163163

164164
spin_lock_irqsave(&cmd->alloc_lock, flags);
165-
ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
166-
if (ret < cmd->max_reg_cmds)
167-
clear_bit(ret, &cmd->bitmask);
165+
ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
166+
if (ret < cmd->vars.max_reg_cmds)
167+
clear_bit(ret, &cmd->vars.bitmask);
168168
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
169169

170-
return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
170+
return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
171171
}
172172

173173
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
174174
{
175175
lockdep_assert_held(&cmd->alloc_lock);
176-
set_bit(idx, &cmd->bitmask);
176+
set_bit(idx, &cmd->vars.bitmask);
177177
}
178178

179179
static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
@@ -192,7 +192,7 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
192192

193193
if (ent->idx >= 0) {
194194
cmd_free_index(cmd, ent->idx);
195-
up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
195+
up(ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem);
196196
}
197197

198198
cmd_free_ent(ent);
@@ -202,7 +202,7 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
202202

203203
static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
204204
{
205-
return cmd->cmd_buf + (idx << cmd->log_stride);
205+
return cmd->cmd_buf + (idx << cmd->vars.log_stride);
206206
}
207207

208208
static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
@@ -974,7 +974,7 @@ static void cmd_work_handler(struct work_struct *work)
974974
cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
975975

976976
complete(&ent->handling);
977-
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
977+
sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
978978
down(sem);
979979
if (!ent->page_queue) {
980980
alloc_ret = cmd_alloc_index(cmd);
@@ -994,9 +994,9 @@ static void cmd_work_handler(struct work_struct *work)
994994
}
995995
ent->idx = alloc_ret;
996996
} else {
997-
ent->idx = cmd->max_reg_cmds;
997+
ent->idx = cmd->vars.max_reg_cmds;
998998
spin_lock_irqsave(&cmd->alloc_lock, flags);
999-
clear_bit(ent->idx, &cmd->bitmask);
999+
clear_bit(ent->idx, &cmd->vars.bitmask);
10001000
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
10011001
}
10021002

@@ -1572,31 +1572,31 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
15721572
struct mlx5_cmd *cmd = &dev->cmd;
15731573
int i;
15741574

1575-
for (i = 0; i < cmd->max_reg_cmds; i++)
1576-
down(&cmd->sem);
1577-
down(&cmd->pages_sem);
1575+
for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1576+
down(&cmd->vars.sem);
1577+
down(&cmd->vars.pages_sem);
15781578

15791579
cmd->allowed_opcode = opcode;
15801580

1581-
up(&cmd->pages_sem);
1582-
for (i = 0; i < cmd->max_reg_cmds; i++)
1583-
up(&cmd->sem);
1581+
up(&cmd->vars.pages_sem);
1582+
for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1583+
up(&cmd->vars.sem);
15841584
}
15851585

15861586
static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
15871587
{
15881588
struct mlx5_cmd *cmd = &dev->cmd;
15891589
int i;
15901590

1591-
for (i = 0; i < cmd->max_reg_cmds; i++)
1592-
down(&cmd->sem);
1593-
down(&cmd->pages_sem);
1591+
for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1592+
down(&cmd->vars.sem);
1593+
down(&cmd->vars.pages_sem);
15941594

15951595
cmd->mode = mode;
15961596

1597-
up(&cmd->pages_sem);
1598-
for (i = 0; i < cmd->max_reg_cmds; i++)
1599-
up(&cmd->sem);
1597+
up(&cmd->vars.pages_sem);
1598+
for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1599+
up(&cmd->vars.sem);
16001600
}
16011601

16021602
static int cmd_comp_notifier(struct notifier_block *nb,
@@ -1655,7 +1655,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
16551655

16561656
/* there can be at most 32 command queues */
16571657
vector = vec & 0xffffffff;
1658-
for (i = 0; i < (1 << cmd->log_sz); i++) {
1658+
for (i = 0; i < (1 << cmd->vars.log_sz); i++) {
16591659
if (test_bit(i, &vector)) {
16601660
ent = cmd->ent_arr[i];
16611661

@@ -1744,7 +1744,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
17441744
/* wait for pending handlers to complete */
17451745
mlx5_eq_synchronize_cmd_irq(dev);
17461746
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
1747-
vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
1747+
vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
17481748
if (!vector)
17491749
goto no_trig;
17501750

@@ -1753,14 +1753,14 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
17531753
* to guarantee pending commands will not get freed in the meanwhile.
17541754
* For that reason, it also has to be done inside the alloc_lock.
17551755
*/
1756-
for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
1756+
for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
17571757
cmd_ent_get(cmd->ent_arr[i]);
17581758
vector |= MLX5_TRIGGERED_CMD_COMP;
17591759
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
17601760

17611761
mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
17621762
mlx5_cmd_comp_handler(dev, vector, true);
1763-
for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
1763+
for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
17641764
cmd_ent_put(cmd->ent_arr[i]);
17651765
return;
17661766

@@ -1773,22 +1773,22 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
17731773
struct mlx5_cmd *cmd = &dev->cmd;
17741774
int i;
17751775

1776-
for (i = 0; i < cmd->max_reg_cmds; i++) {
1777-
while (down_trylock(&cmd->sem)) {
1776+
for (i = 0; i < cmd->vars.max_reg_cmds; i++) {
1777+
while (down_trylock(&cmd->vars.sem)) {
17781778
mlx5_cmd_trigger_completions(dev);
17791779
cond_resched();
17801780
}
17811781
}
17821782

1783-
while (down_trylock(&cmd->pages_sem)) {
1783+
while (down_trylock(&cmd->vars.pages_sem)) {
17841784
mlx5_cmd_trigger_completions(dev);
17851785
cond_resched();
17861786
}
17871787

17881788
/* Unlock cmdif */
1789-
up(&cmd->pages_sem);
1790-
for (i = 0; i < cmd->max_reg_cmds; i++)
1791-
up(&cmd->sem);
1789+
up(&cmd->vars.pages_sem);
1790+
for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1791+
up(&cmd->vars.sem);
17921792
}
17931793

17941794
static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
@@ -1858,7 +1858,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
18581858
/* atomic context may not sleep */
18591859
if (callback)
18601860
return -EINVAL;
1861-
down(&dev->cmd.throttle_sem);
1861+
down(&dev->cmd.vars.throttle_sem);
18621862
}
18631863

18641864
pages_queue = is_manage_pages(in);
@@ -1903,7 +1903,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
19031903
free_msg(dev, inb);
19041904
out_up:
19051905
if (throttle_op)
1906-
up(&dev->cmd.throttle_sem);
1906+
up(&dev->cmd.vars.throttle_sem);
19071907
return err;
19081908
}
19091909

@@ -2213,30 +2213,30 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
22132213
goto err_free_pool;
22142214

22152215
cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
2216-
cmd->log_sz = cmd_l >> 4 & 0xf;
2217-
cmd->log_stride = cmd_l & 0xf;
2218-
if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
2216+
cmd->vars.log_sz = cmd_l >> 4 & 0xf;
2217+
cmd->vars.log_stride = cmd_l & 0xf;
2218+
if (1 << cmd->vars.log_sz > MLX5_MAX_COMMANDS) {
22192219
mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
2220-
1 << cmd->log_sz);
2220+
1 << cmd->vars.log_sz);
22212221
err = -EINVAL;
22222222
goto err_free_page;
22232223
}
22242224

2225-
if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
2225+
if (cmd->vars.log_sz + cmd->vars.log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
22262226
mlx5_core_err(dev, "command queue size overflow\n");
22272227
err = -EINVAL;
22282228
goto err_free_page;
22292229
}
22302230

22312231
cmd->state = MLX5_CMDIF_STATE_DOWN;
22322232
cmd->checksum_disabled = 1;
2233-
cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
2234-
cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
2233+
cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
2234+
cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;
22352235

2236-
cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
2237-
if (cmd->cmdif_rev > CMD_IF_REV) {
2236+
cmd->vars.cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
2237+
if (cmd->vars.cmdif_rev > CMD_IF_REV) {
22382238
mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
2239-
CMD_IF_REV, cmd->cmdif_rev);
2239+
CMD_IF_REV, cmd->vars.cmdif_rev);
22402240
err = -EOPNOTSUPP;
22412241
goto err_free_page;
22422242
}
@@ -2246,9 +2246,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
22462246
for (i = 0; i < MLX5_CMD_OP_MAX; i++)
22472247
spin_lock_init(&cmd->stats[i].lock);
22482248

2249-
sema_init(&cmd->sem, cmd->max_reg_cmds);
2250-
sema_init(&cmd->pages_sem, 1);
2251-
sema_init(&cmd->throttle_sem, DIV_ROUND_UP(cmd->max_reg_cmds, 2));
2249+
sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
2250+
sema_init(&cmd->vars.pages_sem, 1);
2251+
sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
22522252

22532253
cmd_h = (u32)((u64)(cmd->dma) >> 32);
22542254
cmd_l = (u32)(cmd->dma);

drivers/net/ethernet/mellanox/mlx5/core/debugfs.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -176,8 +176,8 @@ static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
176176
int ret;
177177

178178
cmd = filp->private_data;
179-
weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
180-
field = cmd->max_reg_cmds - weight;
179+
weight = bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
180+
field = cmd->vars.max_reg_cmds - weight;
181181
ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
182182
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
183183
}

include/linux/mlx5/driver.h

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -287,18 +287,23 @@ struct mlx5_cmd_stats {
287287
struct mlx5_cmd {
288288
struct mlx5_nb nb;
289289

290+
/* members which needs to be queried or reinitialized each reload */
291+
struct {
292+
u16 cmdif_rev;
293+
u8 log_sz;
294+
u8 log_stride;
295+
int max_reg_cmds;
296+
unsigned long bitmask;
297+
struct semaphore sem;
298+
struct semaphore pages_sem;
299+
struct semaphore throttle_sem;
300+
} vars;
290301
enum mlx5_cmdif_state state;
291302
void *cmd_alloc_buf;
292303
dma_addr_t alloc_dma;
293304
int alloc_size;
294305
void *cmd_buf;
295306
dma_addr_t dma;
296-
u16 cmdif_rev;
297-
u8 log_sz;
298-
u8 log_stride;
299-
int max_reg_cmds;
300-
int events;
301-
u32 __iomem *vector;
302307

303308
/* protect command queue allocations
304309
*/
@@ -308,12 +313,8 @@ struct mlx5_cmd {
308313
*/
309314
spinlock_t token_lock;
310315
u8 token;
311-
unsigned long bitmask;
312316
char wq_name[MLX5_CMD_WQ_MAX_NAME];
313317
struct workqueue_struct *wq;
314-
struct semaphore sem;
315-
struct semaphore pages_sem;
316-
struct semaphore throttle_sem;
317318
int mode;
318319
u16 allowed_opcode;
319320
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];

0 commit comments

Comments
 (0)