Skip to content

Commit a440030

Browse files
msanallaSaeed Mahameed
authored andcommitted
net/mlx5e: Update shared buffer along with device buffer changes
Currently, the user can modify device's receive buffer size, modify the mapping between QoS priority groups to buffers and change the buffer state to become lossy/lossless via pfc command. However, the shared receive buffer pool alignments, as a result of such commands, is performed only when the shared buffer is in FW ownership. When a user changes the mapping of priority groups or buffer size, the shared buffer is moved to SW ownership. Therefore, for devices that support shared buffer, handle the shared buffer alignments in accordance to user's desired configurations. Meaning, the following will be performed: 1. For every change of buffer's headroom, recalculate the size of shared buffer to be equal to "total_buffer_size" - "new_headroom_size". The new shared buffer size will be split in ratio of 3:1 between lossy and lossless pools, respectively. 2. For each port buffer change, count the number of lossless buffers. If there is only one lossless buffer, then set its lossless pool usage threshold to be infinite. Otherwise, if there is more than one lossless buffer, set a usage threshold for each lossless buffer. While at it, add more verbosity to debug prints when handling user commands, to assist in future debug. Signed-off-by: Maher Sanalla <msanalla@nvidia.com> Reviewed-by: Moshe Shemesh <moshe@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
1 parent 11f0996 commit a440030

File tree

2 files changed

+219
-4
lines changed

2 files changed

+219
-4
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c

Lines changed: 218 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
7373
port_buffer->buffer[i].lossy);
7474
}
7575

76+
port_buffer->headroom_size = total_used;
7677
port_buffer->port_buffer_size =
7778
MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
7879
port_buffer->spare_buffer_size =
@@ -86,16 +87,204 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
8687
return err;
8788
}
8889

90+
struct mlx5e_buffer_pool {
91+
u32 infi_size;
92+
u32 size;
93+
u32 buff_occupancy;
94+
};
95+
96+
static int mlx5e_port_query_pool(struct mlx5_core_dev *mdev,
97+
struct mlx5e_buffer_pool *buffer_pool,
98+
u32 desc, u8 dir, u8 pool_idx)
99+
{
100+
u32 out[MLX5_ST_SZ_DW(sbpr_reg)] = {};
101+
int err;
102+
103+
err = mlx5e_port_query_sbpr(mdev, desc, dir, pool_idx, out,
104+
sizeof(out));
105+
if (err)
106+
return err;
107+
108+
buffer_pool->size = MLX5_GET(sbpr_reg, out, size);
109+
buffer_pool->infi_size = MLX5_GET(sbpr_reg, out, infi_size);
110+
buffer_pool->buff_occupancy = MLX5_GET(sbpr_reg, out, buff_occupancy);
111+
112+
return err;
113+
}
114+
115+
enum {
116+
MLX5_INGRESS_DIR = 0,
117+
MLX5_EGRESS_DIR = 1,
118+
};
119+
120+
enum {
121+
MLX5_LOSSY_POOL = 0,
122+
MLX5_LOSSLESS_POOL = 1,
123+
};
124+
125+
/* No limit on usage of shared buffer pool (max_buff=0) */
126+
#define MLX5_SB_POOL_NO_THRESHOLD 0
127+
/* Shared buffer pool usage threshold when calculated
128+
* dynamically in alpha units. alpha=13 is equivalent to
129+
* HW_alpha of [(1/128) * 2 ^ (alpha-1)] = 32, where HW_alpha
130+
* equates to the following portion of the shared buffer pool:
131+
* [32 / (1 + n * 32)] While *n* is the number of buffers
132+
* that are using the shared buffer pool.
133+
*/
134+
#define MLX5_SB_POOL_THRESHOLD 13
135+
136+
/* Shared buffer class management parameters */
137+
struct mlx5_sbcm_params {
138+
u8 pool_idx;
139+
u8 max_buff;
140+
u8 infi_size;
141+
};
142+
143+
static const struct mlx5_sbcm_params sbcm_default = {
144+
.pool_idx = MLX5_LOSSY_POOL,
145+
.max_buff = MLX5_SB_POOL_NO_THRESHOLD,
146+
.infi_size = 0,
147+
};
148+
149+
static const struct mlx5_sbcm_params sbcm_lossy = {
150+
.pool_idx = MLX5_LOSSY_POOL,
151+
.max_buff = MLX5_SB_POOL_NO_THRESHOLD,
152+
.infi_size = 1,
153+
};
154+
155+
static const struct mlx5_sbcm_params sbcm_lossless = {
156+
.pool_idx = MLX5_LOSSLESS_POOL,
157+
.max_buff = MLX5_SB_POOL_THRESHOLD,
158+
.infi_size = 0,
159+
};
160+
161+
static const struct mlx5_sbcm_params sbcm_lossless_no_threshold = {
162+
.pool_idx = MLX5_LOSSLESS_POOL,
163+
.max_buff = MLX5_SB_POOL_NO_THRESHOLD,
164+
.infi_size = 1,
165+
};
166+
167+
/**
168+
* select_sbcm_params() - selects the shared buffer pool configuration
169+
*
170+
* @buffer: <input> port buffer to retrieve params of
171+
* @lossless_buff_count: <input> number of lossless buffers in total
172+
*
173+
* The selection is based on the following rules:
174+
* 1. If buffer size is 0, no shared buffer pool is used.
175+
* 2. If buffer is lossy, use lossy shared buffer pool.
176+
* 3. If there are more than 1 lossless buffers, use lossless shared buffer pool
177+
* with threshold.
178+
* 4. If there is only 1 lossless buffer, use lossless shared buffer pool
179+
* without threshold.
180+
*
181+
* @return const struct mlx5_sbcm_params* selected values
182+
*/
183+
static const struct mlx5_sbcm_params *
184+
select_sbcm_params(struct mlx5e_bufferx_reg *buffer, u8 lossless_buff_count)
185+
{
186+
if (buffer->size == 0)
187+
return &sbcm_default;
188+
189+
if (buffer->lossy)
190+
return &sbcm_lossy;
191+
192+
if (lossless_buff_count > 1)
193+
return &sbcm_lossless;
194+
195+
return &sbcm_lossless_no_threshold;
196+
}
197+
198+
static int port_update_pool_cfg(struct mlx5_core_dev *mdev,
199+
struct mlx5e_port_buffer *port_buffer)
200+
{
201+
const struct mlx5_sbcm_params *p;
202+
u8 lossless_buff_count = 0;
203+
int err;
204+
int i;
205+
206+
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
207+
return 0;
208+
209+
for (i = 0; i < MLX5E_MAX_BUFFER; i++)
210+
lossless_buff_count += ((port_buffer->buffer[i].size) &&
211+
(!(port_buffer->buffer[i].lossy)));
212+
213+
for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
214+
p = select_sbcm_params(&port_buffer->buffer[i], lossless_buff_count);
215+
err = mlx5e_port_set_sbcm(mdev, 0, i,
216+
MLX5_INGRESS_DIR,
217+
p->infi_size,
218+
p->max_buff,
219+
p->pool_idx);
220+
if (err)
221+
return err;
222+
}
223+
224+
return 0;
225+
}
226+
227+
static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
228+
u32 current_headroom_size,
229+
u32 new_headroom_size)
230+
{
231+
struct mlx5e_buffer_pool lossless_ipool;
232+
struct mlx5e_buffer_pool lossy_epool;
233+
u32 lossless_ipool_size;
234+
u32 shared_buffer_size;
235+
u32 total_buffer_size;
236+
u32 lossy_epool_size;
237+
int err;
238+
239+
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
240+
return 0;
241+
242+
err = mlx5e_port_query_pool(mdev, &lossy_epool, 0, MLX5_EGRESS_DIR,
243+
MLX5_LOSSY_POOL);
244+
if (err)
245+
return err;
246+
247+
err = mlx5e_port_query_pool(mdev, &lossless_ipool, 0, MLX5_INGRESS_DIR,
248+
MLX5_LOSSLESS_POOL);
249+
if (err)
250+
return err;
251+
252+
total_buffer_size = current_headroom_size + lossy_epool.size +
253+
lossless_ipool.size;
254+
shared_buffer_size = total_buffer_size - new_headroom_size;
255+
256+
if (shared_buffer_size < 4) {
257+
pr_err("Requested port buffer is too large, not enough space left for shared buffer\n");
258+
return -EINVAL;
259+
}
260+
261+
/* Total shared buffer size is split in a ratio of 3:1 between
262+
* lossy and lossless pools respectively.
263+
*/
264+
lossy_epool_size = (shared_buffer_size / 4) * 3;
265+
lossless_ipool_size = shared_buffer_size / 4;
266+
267+
mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
268+
lossy_epool_size);
269+
mlx5e_port_set_sbpr(mdev, 0, MLX5_INGRESS_DIR, MLX5_LOSSLESS_POOL, 0,
270+
lossless_ipool_size);
271+
return 0;
272+
}
273+
89274
static int port_set_buffer(struct mlx5e_priv *priv,
90275
struct mlx5e_port_buffer *port_buffer)
91276
{
92277
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
93278
struct mlx5_core_dev *mdev = priv->mdev;
94279
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
280+
u32 new_headroom_size = 0;
281+
u32 current_headroom_size;
95282
void *in;
96283
int err;
97284
int i;
98285

286+
current_headroom_size = port_buffer->headroom_size;
287+
99288
in = kzalloc(sz, GFP_KERNEL);
100289
if (!in)
101290
return -ENOMEM;
@@ -110,6 +299,7 @@ static int port_set_buffer(struct mlx5e_priv *priv,
110299
u64 xoff = port_buffer->buffer[i].xoff;
111300
u64 xon = port_buffer->buffer[i].xon;
112301

302+
new_headroom_size += size;
113303
do_div(size, port_buff_cell_sz);
114304
do_div(xoff, port_buff_cell_sz);
115305
do_div(xon, port_buff_cell_sz);
@@ -119,6 +309,17 @@ static int port_set_buffer(struct mlx5e_priv *priv,
119309
MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
120310
}
121311

312+
new_headroom_size /= port_buff_cell_sz;
313+
current_headroom_size /= port_buff_cell_sz;
314+
err = port_update_shared_buffer(priv->mdev, current_headroom_size,
315+
new_headroom_size);
316+
if (err)
317+
return err;
318+
319+
err = port_update_pool_cfg(priv->mdev, port_buffer);
320+
if (err)
321+
return err;
322+
122323
err = mlx5e_port_set_pbmc(mdev, in);
123324
out:
124325
kfree(in);
@@ -174,6 +375,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
174375

175376
/**
176377
* update_buffer_lossy - Update buffer configuration based on pfc
378+
* @mdev: port function core device
177379
* @max_mtu: netdev's max_mtu
178380
* @pfc_en: <input> current pfc configuration
179381
* @buffer: <input> current prio to buffer mapping
@@ -192,7 +394,8 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
192394
* @return: 0 if no error,
193395
* sets change to true if buffer configuration was modified.
194396
*/
195-
static int update_buffer_lossy(unsigned int max_mtu,
397+
static int update_buffer_lossy(struct mlx5_core_dev *mdev,
398+
unsigned int max_mtu,
196399
u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz,
197400
struct mlx5e_port_buffer *port_buffer,
198401
bool *change)
@@ -229,6 +432,10 @@ static int update_buffer_lossy(unsigned int max_mtu,
229432
}
230433

231434
if (changed) {
435+
err = port_update_pool_cfg(mdev, port_buffer);
436+
if (err)
437+
return err;
438+
232439
err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
233440
if (err)
234441
return err;
@@ -293,23 +500,30 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
293500
}
294501

295502
if (change & MLX5E_PORT_BUFFER_PFC) {
503+
mlx5e_dbg(HW, priv, "%s: requested PFC per priority bitmask: 0x%x\n",
504+
__func__, pfc->pfc_en);
296505
err = mlx5e_port_query_priority2buffer(priv->mdev, buffer);
297506
if (err)
298507
return err;
299508

300-
err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz,
301-
&port_buffer, &update_buffer);
509+
err = update_buffer_lossy(priv->mdev, max_mtu, pfc->pfc_en, buffer, xoff,
510+
port_buff_cell_sz, &port_buffer,
511+
&update_buffer);
302512
if (err)
303513
return err;
304514
}
305515

306516
if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
307517
update_prio2buffer = true;
518+
for (i = 0; i < MLX5E_MAX_BUFFER; i++)
519+
mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n",
520+
__func__, i, prio2buffer[i]);
521+
308522
err = fill_pfc_en(priv->mdev, &curr_pfc_en);
309523
if (err)
310524
return err;
311525

312-
err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff,
526+
err = update_buffer_lossy(priv->mdev, max_mtu, curr_pfc_en, prio2buffer, xoff,
313527
port_buff_cell_sz, &port_buffer, &update_buffer);
314528
if (err)
315529
return err;

drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ struct mlx5e_bufferx_reg {
6060
struct mlx5e_port_buffer {
6161
u32 port_buffer_size;
6262
u32 spare_buffer_size;
63+
u32 headroom_size;
6364
struct mlx5e_bufferx_reg buffer[MLX5E_MAX_BUFFER];
6465
};
6566

0 commit comments

Comments
 (0)