Skip to content

Commit

Permalink
net: hns3: extract macro to simplify ring stats update code
Browse files Browse the repository at this point in the history
[ Upstream commit e6d72f6 ]

As the code to update ring stats is alike for different ring stats
type, this patch extract macro to simplify ring stats update code.

Signed-off-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Stable-dep-of: 7d89b53 ("net: hns3: fix miss L3E checking for rx packet")
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
321lipeng authored and gregkh committed Jan 12, 2023
1 parent 7457c5a commit 47868cb
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 92 deletions.
123 changes: 31 additions & 92 deletions drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
Expand Up @@ -1005,9 +1005,7 @@ static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
return false;

if (ALIGN(len, dma_get_cache_alignment()) > space) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_spare_full++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, tx_spare_full);
return false;
}

Expand All @@ -1024,9 +1022,7 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
return false;

if (space < HNS3_MAX_SGL_SIZE) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_spare_full++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, tx_spare_full);
return false;
}

Expand Down Expand Up @@ -1554,9 +1550,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,

ret = hns3_handle_vtags(ring, skb);
if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_vlan_err++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, tx_vlan_err);
return ret;
} else if (ret == HNS3_INNER_VLAN_TAG) {
inner_vtag = skb_vlan_tag_get(skb);
Expand Down Expand Up @@ -1591,28 +1585,22 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,

ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_l4_proto_err++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, tx_l4_proto_err);
return ret;
}

ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
&type_cs_vlan_tso,
&ol_type_vlan_len_msec);
if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_l2l3l4_err++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, tx_l2l3l4_err);
return ret;
}

ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
&type_cs_vlan_tso, &desc_cb->send_bytes);
if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_tso_err++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, tx_tso_err);
return ret;
}
}
Expand Down Expand Up @@ -1705,9 +1693,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
}

if (unlikely(dma_mapping_error(dev, dma))) {
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}

Expand Down Expand Up @@ -1853,9 +1839,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* recursion level of over HNS3_MAX_RECURSION_LEVEL.
*/
if (bd_num == UINT_MAX) {
u64_stats_update_begin(&ring->syncp);
ring->stats.over_max_recursion++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, over_max_recursion);
return -ENOMEM;
}

Expand All @@ -1864,16 +1848,12 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
*/
if (skb->len > HNS3_MAX_TSO_SIZE ||
(!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.hw_limitation++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, hw_limitation);
return -ENOMEM;
}

if (__skb_linearize(skb)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}

Expand Down Expand Up @@ -1903,9 +1883,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,

bd_num = hns3_tx_bd_count(skb->len);

u64_stats_update_begin(&ring->syncp);
ring->stats.tx_copy++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, tx_copy);
}

out:
Expand All @@ -1925,9 +1903,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
return bd_num;
}

u64_stats_update_begin(&ring->syncp);
ring->stats.tx_busy++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, tx_busy);

return -EBUSY;
}
Expand Down Expand Up @@ -2012,9 +1988,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
ring->pending_buf += num;

if (!doorbell) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_more++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, tx_more);
return;
}

Expand Down Expand Up @@ -2064,9 +2038,7 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
ret = skb_copy_bits(skb, 0, buf, size);
if (unlikely(ret < 0)) {
hns3_tx_spare_rollback(ring, cb_len);
u64_stats_update_begin(&ring->syncp);
ring->stats.copy_bits_err++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, copy_bits_err);
return ret;
}

Expand All @@ -2089,9 +2061,8 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
dma_sync_single_for_device(ring_to_dev(ring), dma, size,
DMA_TO_DEVICE);

u64_stats_update_begin(&ring->syncp);
ring->stats.tx_bounce++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, tx_bounce);

return bd_num;
}

Expand Down Expand Up @@ -2121,9 +2092,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
if (unlikely(nents < 0)) {
hns3_tx_spare_rollback(ring, cb_len);
u64_stats_update_begin(&ring->syncp);
ring->stats.skb2sgl_err++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, skb2sgl_err);
return -ENOMEM;
}

Expand All @@ -2132,9 +2101,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
DMA_TO_DEVICE);
if (unlikely(!sgt->nents)) {
hns3_tx_spare_rollback(ring, cb_len);
u64_stats_update_begin(&ring->syncp);
ring->stats.map_sg_err++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, map_sg_err);
return -ENOMEM;
}

Expand All @@ -2146,10 +2113,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
for (i = 0; i < sgt->nents; i++)
bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
sg_dma_len(sgt->sgl + i));

u64_stats_update_begin(&ring->syncp);
ring->stats.tx_sgl++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, tx_sgl);

return bd_num;
}
Expand Down Expand Up @@ -2188,9 +2152,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
hns3_tx_doorbell(ring, 0, !netdev_xmit_more());

u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, sw_err_cnt);

return NETDEV_TX_OK;
}
Expand Down Expand Up @@ -3522,17 +3484,13 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
for (i = 0; i < cleand_count; i++) {
desc_cb = &ring->desc_cb[ring->next_to_use];
if (desc_cb->reuse_flag) {
u64_stats_update_begin(&ring->syncp);
ring->stats.reuse_pg_cnt++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, reuse_pg_cnt);

hns3_reuse_buffer(ring, ring->next_to_use);
} else {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) {
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, sw_err_cnt);

hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n",
Expand All @@ -3544,9 +3502,7 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);

u64_stats_update_begin(&ring->syncp);
ring->stats.non_reuse_pg++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, non_reuse_pg);
}

ring_ptr_move_fw(ring, next_to_use);
Expand All @@ -3573,9 +3529,7 @@ static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
void *frag = napi_alloc_frag(frag_size);

if (unlikely(!frag)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc_err++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, frag_alloc_err);

hns3_rl_err(ring_to_netdev(ring),
"failed to allocate rx frag\n");
Expand All @@ -3587,9 +3541,7 @@ static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
skb_add_rx_frag(skb, i, virt_to_page(frag),
offset_in_page(frag), frag_size, frag_size);

u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, frag_alloc);
return 0;
}

Expand Down Expand Up @@ -3722,9 +3674,7 @@ static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
return false;

u64_stats_update_begin(&ring->syncp);
ring->stats.csum_complete++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, csum_complete);
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)csum);

Expand Down Expand Up @@ -3798,9 +3748,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
BIT(HNS3_RXD_OL3E_B) |
BIT(HNS3_RXD_OL4E_B)))) {
u64_stats_update_begin(&ring->syncp);
ring->stats.l3l4_csum_err++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, l3l4_csum_err);

return;
}
Expand Down Expand Up @@ -3891,10 +3839,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
skb = ring->skb;
if (unlikely(!skb)) {
hns3_rl_err(netdev, "alloc rx skb fail\n");

u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, sw_err_cnt);

return -ENOMEM;
}
Expand Down Expand Up @@ -3925,9 +3870,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (ring->page_pool)
skb_mark_for_recycle(skb);

u64_stats_update_begin(&ring->syncp);
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, seg_pkt_cnt);

ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
__skb_put(skb, ring->pull_len);
Expand Down Expand Up @@ -4119,9 +4062,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ret = hns3_set_gro_and_checksum(ring, skb, l234info,
bd_base_info, ol_info, csum);
if (unlikely(ret)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.rx_err_cnt++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, rx_err_cnt);
return ret;
}

Expand Down Expand Up @@ -5333,9 +5274,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) {
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
hns3_ring_stats_update(ring, sw_err_cnt);
/* if alloc new buffer fail, exit directly
* and reclear in up flow.
*/
Expand Down
7 changes: 7 additions & 0 deletions drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
Expand Up @@ -654,6 +654,13 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)

#define hns3_buf_size(_ring) ((_ring)->buf_size)

#define hns3_ring_stats_update(ring, cnt) do { \
typeof(ring) (tmp) = (ring); \
u64_stats_update_begin(&(tmp)->syncp); \
((tmp)->stats.cnt)++; \
u64_stats_update_end(&(tmp)->syncp); \
} while (0) \

static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
{
#if (PAGE_SIZE < 8192)
Expand Down

0 comments on commit 47868cb

Please sign in to comment.