Skip to content

Commit 4335012

Browse files
zhuyjkuba-moo
authored andcommitted
net/mlx5: Fix build -Wframe-larger-than warnings
When building, the following warnings will appear. " pci_irq.c: In function ‘mlx5_ctrl_irq_request’: pci_irq.c:494:1: warning: the frame size of 1040 bytes is larger than 1024 bytes [-Wframe-larger-than=] pci_irq.c: In function ‘mlx5_irq_request_vector’: pci_irq.c:561:1: warning: the frame size of 1040 bytes is larger than 1024 bytes [-Wframe-larger-than=] eq.c: In function ‘comp_irq_request_sf’: eq.c:897:1: warning: the frame size of 1080 bytes is larger than 1024 bytes [-Wframe-larger-than=] irq_affinity.c: In function ‘irq_pool_request_irq’: irq_affinity.c:74:1: warning: the frame size of 1048 bytes is larger than 1024 bytes [-Wframe-larger-than=] " These warnings indicate that the stack frame size exceeds 1024 bytes in these functions. To resolve this, instead of allocating large memory buffers on the stack, it is better to use kvzalloc to allocate memory dynamically on the heap. This approach reduces stack usage and eliminates these frame size warnings. Acked-by: Junxian Huang <huangjunxian6@hisilicon.com> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Link: https://patch.msgid.link/20250722212023.244296-1-yanjun.zhu@linux.dev Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent 89628a0 commit 4335012

File tree

3 files changed

+58
-23
lines changed

3 files changed

+58
-23
lines changed

drivers/net/ethernet/mellanox/mlx5/core/eq.c

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -876,26 +876,34 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
876876
{
877877
struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
878878
struct mlx5_eq_table *table = dev->priv.eq_table;
879-
struct irq_affinity_desc af_desc = {};
879+
struct irq_affinity_desc *af_desc;
880880
struct mlx5_irq *irq;
881881

882-
/* In case SF irq pool does not exist, fallback to the PF irqs*/
882+
/* In case SF irq pool does not exist, fallback to the PF irqs */
883883
if (!mlx5_irq_pool_is_sf_pool(pool))
884884
return comp_irq_request_pci(dev, vecidx);
885885

886-
af_desc.is_managed = false;
887-
cpumask_copy(&af_desc.mask, cpu_online_mask);
888-
cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
889-
irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
890-
if (IS_ERR(irq))
886+
af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
887+
if (!af_desc)
888+
return -ENOMEM;
889+
890+
af_desc->is_managed = false;
891+
cpumask_copy(&af_desc->mask, cpu_online_mask);
892+
cpumask_andnot(&af_desc->mask, &af_desc->mask, &table->used_cpus);
893+
irq = mlx5_irq_affinity_request(dev, pool, af_desc);
894+
if (IS_ERR(irq)) {
895+
kvfree(af_desc);
891896
return PTR_ERR(irq);
897+
}
892898

893899
cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
894900
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
895901
pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
896902
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
897903
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
898904

905+
kvfree(af_desc);
906+
899907
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
900908
}
901909

drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,29 +47,40 @@ static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
4747
static struct mlx5_irq *
4848
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
4949
{
50-
struct irq_affinity_desc auto_desc = {};
50+
struct irq_affinity_desc *auto_desc;
5151
struct mlx5_irq *irq;
5252
u32 irq_index;
5353
int err;
5454

55+
auto_desc = kvzalloc(sizeof(*auto_desc), GFP_KERNEL);
56+
if (!auto_desc)
57+
return ERR_PTR(-ENOMEM);
58+
5559
err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
56-
if (err)
60+
if (err) {
61+
kvfree(auto_desc);
5762
return ERR_PTR(err);
63+
}
64+
5865
if (pool->irqs_per_cpu) {
5966
if (cpumask_weight(&af_desc->mask) > 1)
6067
/* if req_mask contain more then one CPU, set the least loadad CPU
6168
* of req_mask
6269
*/
6370
cpumask_set_cpu(cpu_get_least_loaded(pool, &af_desc->mask),
64-
&auto_desc.mask);
71+
&auto_desc->mask);
6572
else
6673
cpu_get(pool, cpumask_first(&af_desc->mask));
6774
}
75+
6876
irq = mlx5_irq_alloc(pool, irq_index,
69-
cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
77+
cpumask_empty(&auto_desc->mask) ? af_desc : auto_desc,
7078
NULL);
7179
if (IS_ERR(irq))
7280
xa_erase(&pool->irqs, irq_index);
81+
82+
kvfree(auto_desc);
83+
7384
return irq;
7485
}
7586

drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c

Lines changed: 28 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -470,26 +470,32 @@ void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq)
470470
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
471471
{
472472
struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
473-
struct irq_affinity_desc af_desc;
473+
struct irq_affinity_desc *af_desc;
474474
struct mlx5_irq *irq;
475475

476-
cpumask_copy(&af_desc.mask, cpu_online_mask);
477-
af_desc.is_managed = false;
476+
af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
477+
if (!af_desc)
478+
return ERR_PTR(-ENOMEM);
479+
480+
cpumask_copy(&af_desc->mask, cpu_online_mask);
481+
af_desc->is_managed = false;
478482
if (!mlx5_irq_pool_is_sf_pool(pool)) {
479483
/* In case we are allocating a control IRQ from a pci device's pool.
480484
* This can happen also for a SF if the SFs pool is empty.
481485
*/
482486
if (!pool->xa_num_irqs.max) {
483-
cpumask_clear(&af_desc.mask);
487+
cpumask_clear(&af_desc->mask);
484488
/* In case we only have a single IRQ for PF/VF */
485-
cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc.mask);
489+
cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc->mask);
486490
}
487491
/* Allocate the IRQ in index 0. The vector was already allocated */
488-
irq = irq_pool_request_vector(pool, 0, &af_desc, NULL);
492+
irq = irq_pool_request_vector(pool, 0, af_desc, NULL);
489493
} else {
490-
irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
494+
irq = mlx5_irq_affinity_request(dev, pool, af_desc);
491495
}
492496

497+
kvfree(af_desc);
498+
493499
return irq;
494500
}
495501

@@ -548,16 +554,26 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
548554
{
549555
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
550556
struct mlx5_irq_pool *pool = table->pcif_pool;
551-
struct irq_affinity_desc af_desc;
552557
int offset = MLX5_IRQ_VEC_COMP_BASE;
558+
struct irq_affinity_desc *af_desc;
559+
struct mlx5_irq *irq;
560+
561+
af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
562+
if (!af_desc)
563+
return ERR_PTR(-ENOMEM);
553564

554565
if (!pool->xa_num_irqs.max)
555566
offset = 0;
556567

557-
af_desc.is_managed = false;
558-
cpumask_clear(&af_desc.mask);
559-
cpumask_set_cpu(cpu, &af_desc.mask);
560-
return mlx5_irq_request(dev, vecidx + offset, &af_desc, rmap);
568+
af_desc->is_managed = false;
569+
cpumask_clear(&af_desc->mask);
570+
cpumask_set_cpu(cpu, &af_desc->mask);
571+
572+
irq = mlx5_irq_request(dev, vecidx + offset, af_desc, rmap);
573+
574+
kvfree(af_desc);
575+
576+
return irq;
561577
}
562578

563579
static struct mlx5_irq_pool *

0 commit comments

Comments
 (0)