Skip to content

Commit ab4219d

Browse files
dtatuleakuba-moo
authored andcommitted
net/mlx5e: SHAMPO, Rework header allocation loop
The current loop code was based on the assumption that there can be page leftovers from previous function calls. This patch changes the allocation loop to make it clearer how pages get allocated every MLX5E_SHAMPO_WQ_HEADER_PER_PAGE headers. This change has no functional implications. Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> Signed-off-by: Tariq Toukan <tariqt@nvidia.com> Link: https://patch.msgid.link/20241107194357.683732-13-tariqt@nvidia.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent 945ca43 commit ab4219d

File tree

1 file changed

+17
-15
lines changed
  • drivers/net/ethernet/mellanox/mlx5/core

1 file changed

+17
-15
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -666,8 +666,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
666666
u16 pi, header_offset, err, wqe_bbs;
667667
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
668668
struct mlx5e_umr_wqe *umr_wqe;
669-
int headroom, i;
670-
u64 addr = 0;
669+
int headroom, i = 0;
671670

672671
headroom = rq->buff.headroom;
673672
wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries);
@@ -676,22 +675,25 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
676675
build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries);
677676

678677
WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1));
679-
for (i = 0; i < ksm_entries; i++, index++) {
680-
header_offset = mlx5e_shampo_hd_offset(index);
681-
if (!header_offset) {
682-
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
678+
while (i < ksm_entries) {
679+
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
680+
u64 addr;
681+
682+
err = mlx5e_page_alloc_fragmented(rq, frag_page);
683+
if (unlikely(err))
684+
goto err_unmap;
683685

684-
err = mlx5e_page_alloc_fragmented(rq, frag_page);
685-
if (unlikely(err))
686-
goto err_unmap;
687686

688-
addr = page_pool_get_dma_addr(frag_page->page);
689-
}
687+
addr = page_pool_get_dma_addr(frag_page->page);
690688

691-
umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
692-
.key = cpu_to_be32(lkey),
693-
.va = cpu_to_be64(addr + header_offset + headroom),
694-
};
689+
for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) {
690+
header_offset = mlx5e_shampo_hd_offset(index++);
691+
692+
umr_wqe->inline_ksms[i++] = (struct mlx5_ksm) {
693+
.key = cpu_to_be32(lkey),
694+
.va = cpu_to_be64(addr + header_offset + headroom),
695+
};
696+
}
695697
}
696698

697699
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {

0 commit comments

Comments
 (0)