@@ -643,16 +643,28 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
643643 umr_wqe -> uctrl .mkey_mask = cpu_to_be64 (MLX5_MKEY_MASK_FREE );
644644}
645645
646+ static struct mlx5e_frag_page * mlx5e_shampo_hd_to_frag_page (struct mlx5e_rq * rq , int header_index )
647+ {
648+ BUILD_BUG_ON (MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT );
649+
650+ return & rq -> mpwqe .shampo -> pages [header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE ];
651+ }
652+
653+ static u64 mlx5e_shampo_hd_offset (int header_index )
654+ {
655+ return (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1 )) <<
656+ MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE ;
657+ }
658+
659+ static void mlx5e_free_rx_shampo_hd_entry (struct mlx5e_rq * rq , u16 header_index );
660+
646661static int mlx5e_build_shampo_hd_umr (struct mlx5e_rq * rq ,
647662 struct mlx5e_icosq * sq ,
648663 u16 ksm_entries , u16 index )
649664{
650665 struct mlx5e_shampo_hd * shampo = rq -> mpwqe .shampo ;
651666 u16 pi , header_offset , err , wqe_bbs ;
652667 u32 lkey = rq -> mdev -> mlx5e_res .hw_objs .mkey ;
653- u16 page_index = shampo -> curr_page_index ;
654- struct mlx5e_frag_page * frag_page = NULL ;
655- struct mlx5e_dma_info * dma_info ;
656668 struct mlx5e_umr_wqe * umr_wqe ;
657669 int headroom , i ;
658670 u64 addr = 0 ;
@@ -665,29 +677,20 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
665677
666678 WARN_ON_ONCE (ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1 ));
667679 for (i = 0 ; i < ksm_entries ; i ++ , index ++ ) {
668- dma_info = & shampo -> info [index ];
669- header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1 )) <<
670- MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE ;
671- if (!(header_offset & (PAGE_SIZE - 1 ))) {
672- frag_page = & shampo -> pages [page_index ];
673- page_index = (page_index + 1 ) & (shampo -> pages_per_wq - 1 );
680+ header_offset = mlx5e_shampo_hd_offset (index );
681+ if (!header_offset ) {
682+ struct mlx5e_frag_page * frag_page = mlx5e_shampo_hd_to_frag_page (rq , index );
674683
675684 err = mlx5e_page_alloc_fragmented (rq , frag_page );
676685 if (unlikely (err ))
677686 goto err_unmap ;
678687
679688 addr = page_pool_get_dma_addr (frag_page -> page );
680-
681- dma_info -> addr = addr ;
682- dma_info -> frag_page = frag_page ;
683- } else {
684- dma_info -> addr = addr + header_offset ;
685- dma_info -> frag_page = frag_page ;
686689 }
687690
688691 umr_wqe -> inline_ksms [i ] = (struct mlx5_ksm ) {
689692 .key = cpu_to_be32 (lkey ),
690- .va = cpu_to_be64 (dma_info -> addr + headroom ),
693+ .va = cpu_to_be64 (addr + header_offset + headroom ),
691694 };
692695 }
693696
@@ -698,20 +701,22 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
698701 };
699702
700703 shampo -> pi = (shampo -> pi + ksm_entries ) & (shampo -> hd_per_wq - 1 );
701- shampo -> curr_page_index = page_index ;
702704 sq -> pc += wqe_bbs ;
703705 sq -> doorbell_cseg = & umr_wqe -> ctrl ;
704706
705707 return 0 ;
706708
707709err_unmap :
708- while (-- i >= 0 ) {
709- dma_info = & shampo -> info [-- index ];
710- if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1 ))) {
711- dma_info -> addr = ALIGN_DOWN (dma_info -> addr , PAGE_SIZE );
712- mlx5e_page_release_fragmented (rq , dma_info -> frag_page );
710+ while (-- i ) {
711+ -- index ;
712+ header_offset = mlx5e_shampo_hd_offset (index );
713+ if (!header_offset ) {
714+ struct mlx5e_frag_page * frag_page = mlx5e_shampo_hd_to_frag_page (rq , index );
715+
716+ mlx5e_page_release_fragmented (rq , frag_page );
713717 }
714718 }
719+
715720 rq -> stats -> buff_alloc_err ++ ;
716721 return err ;
717722}
@@ -844,13 +849,11 @@ static void
844849mlx5e_free_rx_shampo_hd_entry (struct mlx5e_rq * rq , u16 header_index )
845850{
846851 struct mlx5e_shampo_hd * shampo = rq -> mpwqe .shampo ;
847- u64 addr = shampo -> info [header_index ].addr ;
848852
849853 if (((header_index + 1 ) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1 )) == 0 ) {
850- struct mlx5e_dma_info * dma_info = & shampo -> info [ header_index ] ;
854+ struct mlx5e_frag_page * frag_page = mlx5e_shampo_hd_to_frag_page ( rq , header_index ) ;
851855
852- dma_info -> addr = ALIGN_DOWN (addr , PAGE_SIZE );
853- mlx5e_page_release_fragmented (rq , dma_info -> frag_page );
856+ mlx5e_page_release_fragmented (rq , frag_page );
854857 }
855858 clear_bit (header_index , shampo -> bitmap );
856859}
@@ -1204,10 +1207,10 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
12041207
12051208static void * mlx5e_shampo_get_packet_hd (struct mlx5e_rq * rq , u16 header_index )
12061209{
1207- struct mlx5e_dma_info * last_head = & rq -> mpwqe . shampo -> info [ header_index ] ;
1208- u16 head_offset = ( last_head -> addr & ( PAGE_SIZE - 1 ) ) + rq -> buff .headroom ;
1210+ struct mlx5e_frag_page * frag_page = mlx5e_shampo_hd_to_frag_page ( rq , header_index ) ;
1211+ u16 head_offset = mlx5e_shampo_hd_offset ( header_index ) + rq -> buff .headroom ;
12091212
1210- return page_address (last_head -> frag_page -> page ) + head_offset ;
1213+ return page_address (frag_page -> page ) + head_offset ;
12111214}
12121215
12131216static void mlx5e_shampo_update_ipv4_udp_hdr (struct mlx5e_rq * rq , struct iphdr * ipv4 )
@@ -2178,29 +2181,30 @@ static struct sk_buff *
21782181mlx5e_skb_from_cqe_shampo (struct mlx5e_rq * rq , struct mlx5e_mpw_info * wi ,
21792182 struct mlx5_cqe64 * cqe , u16 header_index )
21802183{
2181- struct mlx5e_dma_info * head = & rq -> mpwqe .shampo -> info [header_index ];
2182- u16 head_offset = head -> addr & (PAGE_SIZE - 1 );
2184+ struct mlx5e_frag_page * frag_page = mlx5e_shampo_hd_to_frag_page (rq , header_index );
2185+ dma_addr_t page_dma_addr = page_pool_get_dma_addr (frag_page -> page );
2186+ u16 head_offset = mlx5e_shampo_hd_offset (header_index );
2187+ dma_addr_t dma_addr = page_dma_addr + head_offset ;
21832188 u16 head_size = cqe -> shampo .header_size ;
21842189 u16 rx_headroom = rq -> buff .headroom ;
21852190 struct sk_buff * skb = NULL ;
21862191 void * hdr , * data ;
21872192 u32 frag_size ;
21882193
2189- hdr = page_address (head -> frag_page -> page ) + head_offset ;
2194+ hdr = page_address (frag_page -> page ) + head_offset ;
21902195 data = hdr + rx_headroom ;
21912196 frag_size = MLX5_SKB_FRAG_SZ (rx_headroom + head_size );
21922197
21932198 if (likely (frag_size <= BIT (MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE ))) {
21942199 /* build SKB around header */
2195- dma_sync_single_range_for_cpu (rq -> pdev , head -> addr , 0 , frag_size , rq -> buff .map_dir );
2200+ dma_sync_single_range_for_cpu (rq -> pdev , dma_addr , 0 , frag_size , rq -> buff .map_dir );
21962201 net_prefetchw (hdr );
21972202 net_prefetch (data );
21982203 skb = mlx5e_build_linear_skb (rq , hdr , frag_size , rx_headroom , head_size , 0 );
2199-
22002204 if (unlikely (!skb ))
22012205 return NULL ;
22022206
2203- head -> frag_page -> frags ++ ;
2207+ frag_page -> frags ++ ;
22042208 } else {
22052209 /* allocate SKB and copy header for large header */
22062210 rq -> stats -> gro_large_hds ++ ;
@@ -2212,7 +2216,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
22122216 }
22132217
22142218 net_prefetchw (skb -> data );
2215- mlx5e_copy_skb_header (rq , skb , head -> frag_page -> page , head -> addr ,
2219+ mlx5e_copy_skb_header (rq , skb , frag_page -> page , dma_addr ,
22162220 head_offset + rx_headroom ,
22172221 rx_headroom , head_size );
22182222 /* skb linear part was allocated with headlen and aligned to long */
0 commit comments