Skip to content

Commit 7237f5b

Browse files
mfijalkoJeff Kirsher
authored andcommitted
ice: introduce legacy Rx flag
Add an ethtool "legacy-rx" priv flag for toggling the Rx path. This control knob will be mainly used for build_skb usage as well as buffer size/MTU manipulation. In preparation for adding build_skb support in a way that it takes care of how we set the values of max_frame and rx_buf_len fields of struct ice_vsi. Specifically, in this patch mentioned fields are set to values that will allow us to provide headroom and tailroom in-place. This can be mostly broken down onto following: - for legacy-rx "on" ethtool control knob, old behaviour is kept; - for standard 1500 MTU size configure the buffer of size 1536, as network stack is expecting the NET_SKB_PAD to be provided and NET_IP_ALIGN can have a non-zero value (these can be typically equal to 32 and 2, respectively); - for larger MTUs go with max_frame set to 9k and configure the 3k buffer in case when PAGE_SIZE of underlying arch is less than 8k; 3k buffer is implying the need for order 1 page, so that our page recycling scheme can still be applied; With that said, substitute the hardcoded ICE_RXBUF_2048 and PAGE_SIZE values in DMA API that we're making use of with rx_ring->rx_buf_len and ice_rx_pg_size(rx_ring). The latter is an introduced helper for determining the page size based on its order (which was figured out via ice_rx_pg_order). Last but not least, take care of truesize calculation. In the followup patch the headroom/tailroom computation logic will be introduced. This change aligns the buffer and frame configuration with other Intel drivers, most importantly with iavf. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
1 parent 2d4238f commit 7237f5b

File tree

5 files changed

+63
-25
lines changed

5 files changed

+63
-25
lines changed

drivers/net/ethernet/intel/ice/ice.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -331,6 +331,7 @@ enum ice_pf_flags {
331331
ICE_FLAG_NO_MEDIA,
332332
ICE_FLAG_FW_LLDP_AGENT,
333333
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
334+
ICE_FLAG_LEGACY_RX,
334335
ICE_PF_FLAGS_NBITS /* must be last */
335336
};
336337

drivers/net/ethernet/intel/ice/ice_ethtool.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,7 @@ struct ice_priv_flag {
156156
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
157157
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
158158
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
159+
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
159160
};
160161

161162
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -1256,6 +1257,11 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
12561257
"Fail to enable MIB change events\n");
12571258
}
12581259
}
1260+
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
1261+
/* down and up VSI so that changes of Rx cfg are reflected. */
1262+
ice_down(vsi);
1263+
ice_up(vsi);
1264+
}
12591265
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
12601266
return ret;
12611267
}

drivers/net/ethernet/intel/ice/ice_lib.c

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1190,12 +1190,22 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
11901190
*/
11911191
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
11921192
{
1193-
if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
1194-
vsi->max_frame = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
1195-
else
1196-
vsi->max_frame = ICE_RXBUF_2048;
1197-
1198-
vsi->rx_buf_len = ICE_RXBUF_2048;
1193+
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
1194+
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
1195+
vsi->rx_buf_len = ICE_RXBUF_2048;
1196+
#if (PAGE_SIZE < 8192)
1197+
} else if (vsi->netdev->mtu <= ETH_DATA_LEN) {
1198+
vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
1199+
vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
1200+
#endif
1201+
} else {
1202+
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
1203+
#if (PAGE_SIZE < 8192)
1204+
vsi->rx_buf_len = ICE_RXBUF_3072;
1205+
#else
1206+
vsi->rx_buf_len = ICE_RXBUF_2048;
1207+
#endif
1208+
}
11991209
}
12001210

12011211
/**

drivers/net/ethernet/intel/ice/ice_txrx.c

Lines changed: 27 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -310,10 +310,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
310310
*/
311311
dma_sync_single_range_for_cpu(dev, rx_buf->dma,
312312
rx_buf->page_offset,
313-
ICE_RXBUF_2048, DMA_FROM_DEVICE);
313+
rx_ring->rx_buf_len,
314+
DMA_FROM_DEVICE);
314315

315316
/* free resources associated with mapping */
316-
dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
317+
dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
317318
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
318319
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
319320

@@ -529,21 +530,21 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
529530
}
530531

531532
/* alloc new page for storage */
532-
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
533+
page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
533534
if (unlikely(!page)) {
534535
rx_ring->rx_stats.alloc_page_failed++;
535536
return false;
536537
}
537538

538539
/* map page for use */
539-
dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
540+
dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
540541
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
541542

542543
/* if mapping failed free memory back to system since
543544
* there isn't much point in holding memory we can't use
544545
*/
545546
if (dma_mapping_error(rx_ring->dev, dma)) {
546-
__free_pages(page, 0);
547+
__free_pages(page, ice_rx_pg_order(rx_ring));
547548
rx_ring->rx_stats.alloc_page_failed++;
548549
return false;
549550
}
@@ -592,7 +593,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
592593
/* sync the buffer for use by the device */
593594
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
594595
bi->page_offset,
595-
ICE_RXBUF_2048,
596+
rx_ring->rx_buf_len,
596597
DMA_FROM_DEVICE);
597598

598599
/* Refresh the desc even if buffer_addrs didn't change
@@ -663,9 +664,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
663664
*/
664665
static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
665666
{
666-
#if (PAGE_SIZE >= 8192)
667-
unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
668-
#endif
669667
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
670668
struct page *page = rx_buf->page;
671669

@@ -678,7 +676,9 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
678676
if (unlikely((page_count(page) - pagecnt_bias) > 1))
679677
return false;
680678
#else
681-
if (rx_buf->page_offset > last_offset)
679+
#define ICE_LAST_OFFSET \
680+
(SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
681+
if (rx_buf->page_offset > ICE_LAST_OFFSET)
682682
return false;
683683
#endif /* PAGE_SIZE < 8192) */
684684

@@ -696,6 +696,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
696696

697697
/**
698698
* ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
699+
* @rx_ring: Rx descriptor ring to transact packets on
699700
* @rx_buf: buffer containing page to add
700701
* @skb: sk_buff to place the data into
701702
* @size: packet length from rx_desc
@@ -705,13 +706,13 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
705706
* The function will then update the page offset.
706707
*/
707708
static void
708-
ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
709-
unsigned int size)
709+
ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
710+
struct sk_buff *skb, unsigned int size)
710711
{
711712
#if (PAGE_SIZE >= 8192)
712713
unsigned int truesize = SKB_DATA_ALIGN(size);
713714
#else
714-
unsigned int truesize = ICE_RXBUF_2048;
715+
unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
715716
#endif
716717

717718
if (!size)
@@ -830,7 +831,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
830831
#if (PAGE_SIZE >= 8192)
831832
unsigned int truesize = SKB_DATA_ALIGN(size);
832833
#else
833-
unsigned int truesize = ICE_RXBUF_2048;
834+
unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
834835
#endif
835836
skb_add_rx_frag(skb, 0, rx_buf->page,
836837
rx_buf->page_offset + headlen, size, truesize);
@@ -873,8 +874,9 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
873874
rx_ring->rx_stats.page_reuse_count++;
874875
} else {
875876
/* we are not reusing the buffer so unmap it */
876-
dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
877-
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
877+
dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
878+
ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
879+
ICE_RX_DMA_ATTR);
878880
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
879881
}
880882

@@ -1008,9 +1010,15 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
10081010
rcu_read_unlock();
10091011
if (xdp_res) {
10101012
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1013+
unsigned int truesize;
1014+
1015+
#if (PAGE_SIZE < 8192)
1016+
truesize = ice_rx_pg_size(rx_ring) / 2;
1017+
#else
1018+
truesize = SKB_DATA_ALIGN(size);
1019+
#endif
10111020
xdp_xmit |= xdp_res;
1012-
ice_rx_buf_adjust_pg_offset(rx_buf,
1013-
ICE_RXBUF_2048);
1021+
ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
10141022
} else {
10151023
rx_buf->pagecnt_bias++;
10161024
}
@@ -1023,7 +1031,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
10231031
}
10241032
construct_skb:
10251033
if (skb)
1026-
ice_add_rx_frag(rx_buf, skb, size);
1034+
ice_add_rx_frag(rx_ring, rx_buf, skb, size);
10271035
else
10281036
skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
10291037

drivers/net/ethernet/intel/ice/ice_txrx.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,9 @@
77
#include "ice_type.h"
88

99
#define ICE_DFLT_IRQ_WORK 256
10+
#define ICE_RXBUF_3072 3072
1011
#define ICE_RXBUF_2048 2048
12+
#define ICE_RXBUF_1536 1536
1113
#define ICE_MAX_CHAINED_RX_BUFS 5
1214
#define ICE_MAX_BUF_TXD 8
1315
#define ICE_MIN_TX_LEN 17
@@ -262,6 +264,17 @@ struct ice_ring_container {
262264
#define ice_for_each_ring(pos, head) \
263265
for (pos = (head).ring; pos; pos = pos->next)
264266

267+
static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
268+
{
269+
#if (PAGE_SIZE < 8192)
270+
if (ring->rx_buf_len > (PAGE_SIZE / 2))
271+
return 1;
272+
#endif
273+
return 0;
274+
}
275+
276+
#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
277+
265278
union ice_32b_rx_flex_desc;
266279

267280
bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);

0 commit comments

Comments
 (0)