@@ -113,6 +113,11 @@ static int fbnic_maybe_stop_tx(const struct net_device *dev,
113113
114114 res = netif_txq_maybe_stop (txq , fbnic_desc_unused (ring ), size ,
115115 FBNIC_TX_DESC_WAKEUP );
116+ if (!res ) {
117+ u64_stats_update_begin (& ring -> stats .syncp );
118+ ring -> stats .twq .stop ++ ;
119+ u64_stats_update_end (& ring -> stats .syncp );
120+ }
116121
117122 return !res ;
118123}
@@ -191,19 +196,25 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
191196 skb -> csum_offset / 2 ));
192197
193198 * meta |= cpu_to_le64 (FBNIC_TWD_FLAG_REQ_CSO );
199+ u64_stats_update_begin (& ring -> stats .syncp );
200+ ring -> stats .twq .csum_partial ++ ;
201+ u64_stats_update_end (& ring -> stats .syncp );
194202
195203 * meta |= cpu_to_le64 (FIELD_PREP (FBNIC_TWD_L2_HLEN_MASK , l2len / 2 ) |
196204 FIELD_PREP (FBNIC_TWD_L3_IHLEN_MASK , i3len / 2 ));
197205 return false;
198206}
199207
200208static void
201- fbnic_rx_csum (u64 rcd , struct sk_buff * skb , struct fbnic_ring * rcq )
209+ fbnic_rx_csum (u64 rcd , struct sk_buff * skb , struct fbnic_ring * rcq ,
210+ u64 * csum_cmpl , u64 * csum_none )
202211{
203212 skb_checksum_none_assert (skb );
204213
205- if (unlikely (!(skb -> dev -> features & NETIF_F_RXCSUM )))
214+ if (unlikely (!(skb -> dev -> features & NETIF_F_RXCSUM ))) {
215+ (* csum_none )++ ;
206216 return ;
217+ }
207218
208219 if (FIELD_GET (FBNIC_RCD_META_L4_CSUM_UNNECESSARY , rcd )) {
209220 skb -> ip_summed = CHECKSUM_UNNECESSARY ;
@@ -212,6 +223,7 @@ fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
212223
213224 skb -> ip_summed = CHECKSUM_COMPLETE ;
214225 skb -> csum = (__force __wsum )csum ;
226+ (* csum_cmpl )++ ;
215227 }
216228}
217229
@@ -444,7 +456,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
444456 if (unlikely (discard )) {
445457 u64_stats_update_begin (& ring -> stats .syncp );
446458 ring -> stats .dropped += total_packets ;
447- ring -> stats .ts_lost += ts_lost ;
459+ ring -> stats .twq . ts_lost += ts_lost ;
448460 u64_stats_update_end (& ring -> stats .syncp );
449461
450462 netdev_tx_completed_queue (txq , total_packets , total_bytes );
@@ -456,9 +468,13 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
456468 ring -> stats .packets += total_packets ;
457469 u64_stats_update_end (& ring -> stats .syncp );
458470
459- netif_txq_completed_wake (txq , total_packets , total_bytes ,
460- fbnic_desc_unused (ring ),
461- FBNIC_TX_DESC_WAKEUP );
471+ if (!netif_txq_completed_wake (txq , total_packets , total_bytes ,
472+ fbnic_desc_unused (ring ),
473+ FBNIC_TX_DESC_WAKEUP )) {
474+ u64_stats_update_begin (& ring -> stats .syncp );
475+ ring -> stats .twq .wake ++ ;
476+ u64_stats_update_end (& ring -> stats .syncp );
477+ }
462478}
463479
464480static void fbnic_clean_tsq (struct fbnic_napi_vector * nv ,
@@ -507,7 +523,7 @@ static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
507523
508524 skb_tstamp_tx (skb , & hwtstamp );
509525 u64_stats_update_begin (& ring -> stats .syncp );
510- ring -> stats .ts_packets ++ ;
526+ ring -> stats .twq . ts_packets ++ ;
511527 u64_stats_update_end (& ring -> stats .syncp );
512528}
513529
@@ -661,8 +677,13 @@ static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
661677 struct page * page ;
662678
663679 page = page_pool_dev_alloc_pages (nv -> page_pool );
664- if (!page )
680+ if (!page ) {
681+ u64_stats_update_begin (& bdq -> stats .syncp );
682+ bdq -> stats .rx .alloc_failed ++ ;
683+ u64_stats_update_end (& bdq -> stats .syncp );
684+
665685 break ;
686+ }
666687
667688 fbnic_page_pool_init (bdq , i , page );
668689 fbnic_bd_prep (bdq , i , page );
@@ -875,12 +896,13 @@ static void fbnic_rx_tstamp(struct fbnic_napi_vector *nv, u64 rcd,
875896
876897static void fbnic_populate_skb_fields (struct fbnic_napi_vector * nv ,
877898 u64 rcd , struct sk_buff * skb ,
878- struct fbnic_q_triad * qt )
899+ struct fbnic_q_triad * qt ,
900+ u64 * csum_cmpl , u64 * csum_none )
879901{
880902 struct net_device * netdev = nv -> napi .dev ;
881903 struct fbnic_ring * rcq = & qt -> cmpl ;
882904
883- fbnic_rx_csum (rcd , skb , rcq );
905+ fbnic_rx_csum (rcd , skb , rcq , csum_cmpl , csum_none );
884906
885907 if (netdev -> features & NETIF_F_RXHASH )
886908 skb_set_hash (skb ,
@@ -898,7 +920,8 @@ static bool fbnic_rcd_metadata_err(u64 rcd)
898920static int fbnic_clean_rcq (struct fbnic_napi_vector * nv ,
899921 struct fbnic_q_triad * qt , int budget )
900922{
901- unsigned int packets = 0 , bytes = 0 , dropped = 0 ;
923+ unsigned int packets = 0 , bytes = 0 , dropped = 0 , alloc_failed = 0 ;
924+ u64 csum_complete = 0 , csum_none = 0 ;
902925 struct fbnic_ring * rcq = & qt -> cmpl ;
903926 struct fbnic_pkt_buff * pkt ;
904927 s32 head0 = -1 , head1 = -1 ;
@@ -947,14 +970,22 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
947970
948971 /* Populate skb and invalidate XDP */
949972 if (!IS_ERR_OR_NULL (skb )) {
950- fbnic_populate_skb_fields (nv , rcd , skb , qt );
973+ fbnic_populate_skb_fields (nv , rcd , skb , qt ,
974+ & csum_complete ,
975+ & csum_none );
951976
952977 packets ++ ;
953978 bytes += skb -> len ;
954979
955980 napi_gro_receive (& nv -> napi , skb );
956981 } else {
957- dropped ++ ;
982+ if (!skb ) {
983+ alloc_failed ++ ;
984+ dropped ++ ;
985+ } else {
986+ dropped ++ ;
987+ }
988+
958989 fbnic_put_pkt_buff (nv , pkt , 1 );
959990 }
960991
@@ -977,6 +1008,9 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
9771008 /* Re-add ethernet header length (removed in fbnic_build_skb) */
9781009 rcq -> stats .bytes += ETH_HLEN * packets ;
9791010 rcq -> stats .dropped += dropped ;
1011+ rcq -> stats .rx .alloc_failed += alloc_failed ;
1012+ rcq -> stats .rx .csum_complete += csum_complete ;
1013+ rcq -> stats .rx .csum_none += csum_none ;
9801014 u64_stats_update_end (& rcq -> stats .syncp );
9811015
9821016 /* Unmap and free processed buffers */
@@ -1054,6 +1088,11 @@ void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
10541088 fbn -> rx_stats .bytes += stats -> bytes ;
10551089 fbn -> rx_stats .packets += stats -> packets ;
10561090 fbn -> rx_stats .dropped += stats -> dropped ;
1091+ fbn -> rx_stats .rx .alloc_failed += stats -> rx .alloc_failed ;
1092+ fbn -> rx_stats .rx .csum_complete += stats -> rx .csum_complete ;
1093+ fbn -> rx_stats .rx .csum_none += stats -> rx .csum_none ;
1094+ /* Remember to add new stats here */
1095+ BUILD_BUG_ON (sizeof (fbn -> tx_stats .rx ) / 8 != 3 );
10571096}
10581097
10591098void fbnic_aggregate_ring_tx_counters (struct fbnic_net * fbn ,
@@ -1065,8 +1104,13 @@ void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
10651104 fbn -> tx_stats .bytes += stats -> bytes ;
10661105 fbn -> tx_stats .packets += stats -> packets ;
10671106 fbn -> tx_stats .dropped += stats -> dropped ;
1068- fbn -> tx_stats .ts_lost += stats -> ts_lost ;
1069- fbn -> tx_stats .ts_packets += stats -> ts_packets ;
1107+ fbn -> tx_stats .twq .csum_partial += stats -> twq .csum_partial ;
1108+ fbn -> tx_stats .twq .ts_lost += stats -> twq .ts_lost ;
1109+ fbn -> tx_stats .twq .ts_packets += stats -> twq .ts_packets ;
1110+ fbn -> tx_stats .twq .stop += stats -> twq .stop ;
1111+ fbn -> tx_stats .twq .wake += stats -> twq .wake ;
1112+ /* Remember to add new stats here */
1113+ BUILD_BUG_ON (sizeof (fbn -> tx_stats .twq ) / 8 != 5 );
10701114}
10711115
10721116static void fbnic_remove_tx_ring (struct fbnic_net * fbn ,
0 commit comments