@@ -3314,74 +3314,81 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
33143314 return work_done ;
33153315}
33163316
3317- static void bnxt_free_tx_skbs (struct bnxt * bp )
3317+ static void bnxt_free_one_tx_ring_skbs (struct bnxt * bp ,
3318+ struct bnxt_tx_ring_info * txr , int idx )
33183319{
33193320 int i , max_idx ;
33203321 struct pci_dev * pdev = bp -> pdev ;
33213322
3322- if (!bp -> tx_ring )
3323- return ;
3324-
33253323 max_idx = bp -> tx_nr_pages * TX_DESC_CNT ;
3326- for (i = 0 ; i < bp -> tx_nr_rings ; i ++ ) {
3327- struct bnxt_tx_ring_info * txr = & bp -> tx_ring [i ];
3328- int j ;
33293324
3330- if (!txr -> tx_buf_ring )
3325+ for (i = 0 ; i < max_idx ;) {
3326+ struct bnxt_sw_tx_bd * tx_buf = & txr -> tx_buf_ring [i ];
3327+ struct sk_buff * skb ;
3328+ int j , last ;
3329+
3330+ if (idx < bp -> tx_nr_rings_xdp &&
3331+ tx_buf -> action == XDP_REDIRECT ) {
3332+ dma_unmap_single (& pdev -> dev ,
3333+ dma_unmap_addr (tx_buf , mapping ),
3334+ dma_unmap_len (tx_buf , len ),
3335+ DMA_TO_DEVICE );
3336+ xdp_return_frame (tx_buf -> xdpf );
3337+ tx_buf -> action = 0 ;
3338+ tx_buf -> xdpf = NULL ;
3339+ i ++ ;
33313340 continue ;
3341+ }
33323342
3333- for (j = 0 ; j < max_idx ;) {
3334- struct bnxt_sw_tx_bd * tx_buf = & txr -> tx_buf_ring [j ];
3335- struct sk_buff * skb ;
3336- int k , last ;
3337-
3338- if (i < bp -> tx_nr_rings_xdp &&
3339- tx_buf -> action == XDP_REDIRECT ) {
3340- dma_unmap_single (& pdev -> dev ,
3341- dma_unmap_addr (tx_buf , mapping ),
3342- dma_unmap_len (tx_buf , len ),
3343- DMA_TO_DEVICE );
3344- xdp_return_frame (tx_buf -> xdpf );
3345- tx_buf -> action = 0 ;
3346- tx_buf -> xdpf = NULL ;
3347- j ++ ;
3348- continue ;
3349- }
3343+ skb = tx_buf -> skb ;
3344+ if (!skb ) {
3345+ i ++ ;
3346+ continue ;
3347+ }
33503348
3351- skb = tx_buf -> skb ;
3352- if (!skb ) {
3353- j ++ ;
3354- continue ;
3355- }
3349+ tx_buf -> skb = NULL ;
33563350
3357- tx_buf -> skb = NULL ;
3351+ if (tx_buf -> is_push ) {
3352+ dev_kfree_skb (skb );
3353+ i += 2 ;
3354+ continue ;
3355+ }
33583356
3359- if (tx_buf -> is_push ) {
3360- dev_kfree_skb (skb );
3361- j += 2 ;
3362- continue ;
3363- }
3357+ dma_unmap_single (& pdev -> dev ,
3358+ dma_unmap_addr (tx_buf , mapping ),
3359+ skb_headlen (skb ),
3360+ DMA_TO_DEVICE );
33643361
3365- dma_unmap_single (& pdev -> dev ,
3366- dma_unmap_addr (tx_buf , mapping ),
3367- skb_headlen (skb ),
3368- DMA_TO_DEVICE );
3362+ last = tx_buf -> nr_frags ;
3363+ i += 2 ;
3364+ for (j = 0 ; j < last ; j ++ , i ++ ) {
3365+ int ring_idx = i & bp -> tx_ring_mask ;
3366+ skb_frag_t * frag = & skb_shinfo (skb )-> frags [j ];
33693367
3370- last = tx_buf -> nr_frags ;
3371- j += 2 ;
3372- for (k = 0 ; k < last ; k ++ , j ++ ) {
3373- int ring_idx = j & bp -> tx_ring_mask ;
3374- skb_frag_t * frag = & skb_shinfo (skb )-> frags [k ];
3375-
3376- tx_buf = & txr -> tx_buf_ring [ring_idx ];
3377- dma_unmap_page (
3378- & pdev -> dev ,
3379- dma_unmap_addr (tx_buf , mapping ),
3380- skb_frag_size (frag ), DMA_TO_DEVICE );
3381- }
3382- dev_kfree_skb (skb );
3368+ tx_buf = & txr -> tx_buf_ring [ring_idx ];
3369+ dma_unmap_page (& pdev -> dev ,
3370+ dma_unmap_addr (tx_buf , mapping ),
3371+ skb_frag_size (frag ), DMA_TO_DEVICE );
33833372 }
3384- netdev_tx_reset_queue (netdev_get_tx_queue (bp -> dev , i ));
3373+ dev_kfree_skb (skb );
3374+ }
3375+ netdev_tx_reset_queue (netdev_get_tx_queue (bp -> dev , idx ));
3376+ }
3377+
3378+ static void bnxt_free_tx_skbs (struct bnxt * bp )
3379+ {
3380+ int i ;
3381+
3382+ if (!bp -> tx_ring )
3383+ return ;
3384+
3385+ for (i = 0 ; i < bp -> tx_nr_rings ; i ++ ) {
3386+ struct bnxt_tx_ring_info * txr = & bp -> tx_ring [i ];
3387+
3388+ if (!txr -> tx_buf_ring )
3389+ continue ;
3390+
3391+ bnxt_free_one_tx_ring_skbs (bp , txr , i );
33853392 }
33863393}
33873394
0 commit comments