Skip to content

Commit fc72039

Browse files
committed
Merge branch 'bnxt_en-update-for-net-next'
Michael Chan says: ==================== bnxt_en: Update for net-next This patchset contains 2 features: - The page pool implementation for the normal RX path (non-XDP) for paged buffers in the aggregation ring. - Saving of the ring error counters across reset. ==================== Link: https://lore.kernel.org/r/20230817231911.165035-1-michael.chan@broadcom.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2 parents c6cfc6c + 8becd19 commit fc72039

File tree

3 files changed

+107
-110
lines changed

3 files changed

+107
-110
lines changed

drivers/net/ethernet/broadcom/bnxt/bnxt.c

Lines changed: 62 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -761,7 +761,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
761761
unsigned int *offset,
762762
gfp_t gfp)
763763
{
764-
struct device *dev = &bp->pdev->dev;
765764
struct page *page;
766765

767766
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
@@ -774,12 +773,7 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
774773
if (!page)
775774
return NULL;
776775

777-
*mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE,
778-
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
779-
if (dma_mapping_error(dev, *mapping)) {
780-
page_pool_recycle_direct(rxr->page_pool, page);
781-
return NULL;
782-
}
776+
*mapping = page_pool_get_dma_addr(page) + *offset;
783777
return page;
784778
}
785779

@@ -877,48 +871,15 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
877871
struct rx_bd *rxbd =
878872
&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
879873
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
880-
struct pci_dev *pdev = bp->pdev;
881874
struct page *page;
882875
dma_addr_t mapping;
883876
u16 sw_prod = rxr->rx_sw_agg_prod;
884877
unsigned int offset = 0;
885878

886-
if (BNXT_RX_PAGE_MODE(bp)) {
887-
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
888-
889-
if (!page)
890-
return -ENOMEM;
891-
892-
} else {
893-
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
894-
page = rxr->rx_page;
895-
if (!page) {
896-
page = alloc_page(gfp);
897-
if (!page)
898-
return -ENOMEM;
899-
rxr->rx_page = page;
900-
rxr->rx_page_offset = 0;
901-
}
902-
offset = rxr->rx_page_offset;
903-
rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
904-
if (rxr->rx_page_offset == PAGE_SIZE)
905-
rxr->rx_page = NULL;
906-
else
907-
get_page(page);
908-
} else {
909-
page = alloc_page(gfp);
910-
if (!page)
911-
return -ENOMEM;
912-
}
879+
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
913880

914-
mapping = dma_map_page_attrs(&pdev->dev, page, offset,
915-
BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
916-
DMA_ATTR_WEAK_ORDERING);
917-
if (dma_mapping_error(&pdev->dev, mapping)) {
918-
__free_page(page);
919-
return -EIO;
920-
}
921-
}
881+
if (!page)
882+
return -ENOMEM;
922883

923884
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
924885
sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
@@ -1031,8 +992,8 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1031992
return NULL;
1032993
}
1033994
dma_addr -= bp->rx_dma_offset;
1034-
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1035-
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
995+
dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
996+
bp->rx_dir);
1036997
skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1037998
if (!skb) {
1038999
page_pool_recycle_direct(rxr->page_pool, page);
@@ -1065,8 +1026,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
10651026
return NULL;
10661027
}
10671028
dma_addr -= bp->rx_dma_offset;
1068-
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1069-
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1029+
dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1030+
bp->rx_dir);
10701031

10711032
if (unlikely(!payload))
10721033
payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -1182,9 +1143,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
11821143
return 0;
11831144
}
11841145

1185-
dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1186-
bp->rx_dir,
1187-
DMA_ATTR_WEAK_ORDERING);
1146+
dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1147+
bp->rx_dir);
11881148

11891149
total_frag_len += frag_len;
11901150
prod = NEXT_RX_AGG(prod);
@@ -1204,6 +1164,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
12041164
total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
12051165
agg_bufs, tpa, NULL);
12061166
if (!total_frag_len) {
1167+
skb_mark_for_recycle(skb);
12071168
dev_kfree_skb(skb);
12081169
return NULL;
12091170
}
@@ -1794,6 +1755,7 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
17941755
return;
17951756
}
17961757
skb_record_rx_queue(skb, bnapi->index);
1758+
skb_mark_for_recycle(skb);
17971759
napi_gro_receive(&bnapi->napi, skb);
17981760
}
17991761

@@ -2978,10 +2940,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
29782940

29792941
rx_buf->data = NULL;
29802942
if (BNXT_RX_PAGE_MODE(bp)) {
2981-
mapping -= bp->rx_dma_offset;
2982-
dma_unmap_page_attrs(&pdev->dev, mapping,
2983-
BNXT_RX_PAGE_SIZE, bp->rx_dir,
2984-
DMA_ATTR_WEAK_ORDERING);
29852943
page_pool_recycle_direct(rxr->page_pool, data);
29862944
} else {
29872945
dma_unmap_single_attrs(&pdev->dev, mapping,
@@ -3002,30 +2960,13 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
30022960
if (!page)
30032961
continue;
30042962

3005-
if (BNXT_RX_PAGE_MODE(bp)) {
3006-
dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
3007-
BNXT_RX_PAGE_SIZE, bp->rx_dir,
3008-
DMA_ATTR_WEAK_ORDERING);
3009-
rx_agg_buf->page = NULL;
3010-
__clear_bit(i, rxr->rx_agg_bmap);
2963+
rx_agg_buf->page = NULL;
2964+
__clear_bit(i, rxr->rx_agg_bmap);
30112965

3012-
page_pool_recycle_direct(rxr->page_pool, page);
3013-
} else {
3014-
dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
3015-
BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
3016-
DMA_ATTR_WEAK_ORDERING);
3017-
rx_agg_buf->page = NULL;
3018-
__clear_bit(i, rxr->rx_agg_bmap);
3019-
3020-
__free_page(page);
3021-
}
2966+
page_pool_recycle_direct(rxr->page_pool, page);
30222967
}
30232968

30242969
skip_rx_agg_free:
3025-
if (rxr->rx_page) {
3026-
__free_page(rxr->rx_page);
3027-
rxr->rx_page = NULL;
3028-
}
30292970
map = rxr->rx_tpa_idx_map;
30302971
if (map)
30312972
memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
@@ -3244,11 +3185,15 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
32443185
{
32453186
struct page_pool_params pp = { 0 };
32463187

3247-
pp.pool_size = bp->rx_ring_size;
3188+
pp.pool_size = bp->rx_agg_ring_size;
3189+
if (BNXT_RX_PAGE_MODE(bp))
3190+
pp.pool_size += bp->rx_ring_size;
32483191
pp.nid = dev_to_node(&bp->pdev->dev);
32493192
pp.napi = &rxr->bnapi->napi;
32503193
pp.dev = &bp->pdev->dev;
3251-
pp.dma_dir = DMA_BIDIRECTIONAL;
3194+
pp.dma_dir = bp->rx_dir;
3195+
pp.max_len = PAGE_SIZE;
3196+
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
32523197
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
32533198
pp.flags |= PP_FLAG_PAGE_FRAG;
32543199

@@ -9448,10 +9393,16 @@ static void bnxt_disable_napi(struct bnxt *bp)
94489393
return;
94499394

94509395
for (i = 0; i < bp->cp_nr_rings; i++) {
9451-
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9396+
struct bnxt_napi *bnapi = bp->bnapi[i];
9397+
struct bnxt_cp_ring_info *cpr;
94529398

9453-
napi_disable(&bp->bnapi[i]->napi);
9454-
if (bp->bnapi[i]->rx_ring)
9399+
cpr = &bnapi->cp_ring;
9400+
if (bnapi->tx_fault)
9401+
cpr->sw_stats.tx.tx_resets++;
9402+
if (bnapi->in_reset)
9403+
cpr->sw_stats.rx.rx_resets++;
9404+
napi_disable(&bnapi->napi);
9405+
if (bnapi->rx_ring)
94559406
cancel_work_sync(&cpr->dim.work);
94569407
}
94579408
}
@@ -9468,8 +9419,6 @@ static void bnxt_enable_napi(struct bnxt *bp)
94689419
bnapi->tx_fault = 0;
94699420

94709421
cpr = &bnapi->cp_ring;
9471-
if (bnapi->in_reset)
9472-
cpr->sw_stats.rx.rx_resets++;
94739422
bnapi->in_reset = false;
94749423

94759424
bnapi->tx_pkts = 0;
@@ -10738,8 +10687,10 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
1073810687
bnxt_free_skbs(bp);
1073910688

1074010689
/* Save ring stats before shutdown */
10741-
if (bp->bnapi && irq_re_init)
10690+
if (bp->bnapi && irq_re_init) {
1074210691
bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10692+
bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
10693+
}
1074310694
if (irq_re_init) {
1074410695
bnxt_free_irq(bp);
1074510696
bnxt_del_napi(bp);
@@ -10988,6 +10939,35 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1098810939
clear_bit(BNXT_STATE_READ_STATS, &bp->state);
1098910940
}
1099010941

10942+
static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
10943+
struct bnxt_total_ring_err_stats *stats,
10944+
struct bnxt_cp_ring_info *cpr)
10945+
{
10946+
struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
10947+
u64 *hw_stats = cpr->stats.sw_stats;
10948+
10949+
stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
10950+
stats->rx_total_resets += sw_stats->rx.rx_resets;
10951+
stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
10952+
stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
10953+
stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
10954+
stats->rx_total_ring_discards +=
10955+
BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
10956+
stats->tx_total_resets += sw_stats->tx.tx_resets;
10957+
stats->tx_total_ring_discards +=
10958+
BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
10959+
stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
10960+
}
10961+
10962+
void bnxt_get_ring_err_stats(struct bnxt *bp,
10963+
struct bnxt_total_ring_err_stats *stats)
10964+
{
10965+
int i;
10966+
10967+
for (i = 0; i < bp->cp_nr_rings; i++)
10968+
bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
10969+
}
10970+
1099110971
static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
1099210972
{
1099310973
struct net_device *dev = bp->dev;

drivers/net/ethernet/broadcom/bnxt/bnxt.h

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -919,9 +919,6 @@ struct bnxt_rx_ring_info {
919919
unsigned long *rx_agg_bmap;
920920
u16 rx_agg_bmap_size;
921921

922-
struct page *rx_page;
923-
unsigned int rx_page_offset;
924-
925922
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
926923
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
927924

@@ -942,15 +939,32 @@ struct bnxt_rx_sw_stats {
942939
u64 rx_netpoll_discards;
943940
};
944941

942+
struct bnxt_tx_sw_stats {
943+
u64 tx_resets;
944+
};
945+
945946
struct bnxt_cmn_sw_stats {
946947
u64 missed_irqs;
947948
};
948949

949950
struct bnxt_sw_stats {
950951
struct bnxt_rx_sw_stats rx;
952+
struct bnxt_tx_sw_stats tx;
951953
struct bnxt_cmn_sw_stats cmn;
952954
};
953955

956+
struct bnxt_total_ring_err_stats {
957+
u64 rx_total_l4_csum_errors;
958+
u64 rx_total_resets;
959+
u64 rx_total_buf_errors;
960+
u64 rx_total_oom_discards;
961+
u64 rx_total_netpoll_discards;
962+
u64 rx_total_ring_discards;
963+
u64 tx_total_resets;
964+
u64 tx_total_ring_discards;
965+
u64 total_missed_irqs;
966+
};
967+
954968
struct bnxt_stats_mem {
955969
u64 *sw_stats;
956970
u64 *hw_masks;
@@ -2021,6 +2035,8 @@ struct bnxt {
20212035
u8 pri2cos_idx[8];
20222036
u8 pri2cos_valid;
20232037

2038+
struct bnxt_total_ring_err_stats ring_err_stats_prev;
2039+
20242040
u16 hwrm_max_req_len;
20252041
u16 hwrm_max_ext_req_len;
20262042
unsigned int hwrm_cmd_timeout;
@@ -2347,6 +2363,8 @@ int bnxt_half_open_nic(struct bnxt *bp);
23472363
void bnxt_half_close_nic(struct bnxt *bp);
23482364
void bnxt_reenable_sriov(struct bnxt *bp);
23492365
int bnxt_close_nic(struct bnxt *, bool, bool);
2366+
void bnxt_get_ring_err_stats(struct bnxt *bp,
2367+
struct bnxt_total_ring_err_stats *stats);
23502368
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
23512369
u32 *reg_buf);
23522370
void bnxt_fw_exception(struct bnxt *bp);

0 commit comments

Comments
 (0)