@@ -4744,45 +4744,18 @@ static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
47444744 return rxqueue ;
47454745}
47464746
4747- static u32 netif_receive_generic_xdp (struct sk_buff * skb ,
4748- struct xdp_buff * xdp ,
4749- struct bpf_prog * xdp_prog )
4747+ u32 bpf_prog_run_generic_xdp (struct sk_buff * skb , struct xdp_buff * xdp ,
4748+ struct bpf_prog * xdp_prog )
47504749{
47514750 void * orig_data , * orig_data_end , * hard_start ;
47524751 struct netdev_rx_queue * rxqueue ;
4753- u32 metalen , act = XDP_DROP ;
47544752 bool orig_bcast , orig_host ;
47554753 u32 mac_len , frame_sz ;
47564754 __be16 orig_eth_type ;
47574755 struct ethhdr * eth ;
4756+ u32 metalen , act ;
47584757 int off ;
47594758
4760- /* Reinjected packets coming from act_mirred or similar should
4761- * not get XDP generic processing.
4762- */
4763- if (skb_is_redirected (skb ))
4764- return XDP_PASS ;
4765-
4766- /* XDP packets must be linear and must have sufficient headroom
4767- * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4768- * native XDP provides, thus we need to do it here as well.
4769- */
4770- if (skb_cloned (skb ) || skb_is_nonlinear (skb ) ||
4771- skb_headroom (skb ) < XDP_PACKET_HEADROOM ) {
4772- int hroom = XDP_PACKET_HEADROOM - skb_headroom (skb );
4773- int troom = skb -> tail + skb -> data_len - skb -> end ;
4774-
4775- /* In case we have to go down the path and also linearize,
4776- * then lets do the pskb_expand_head() work just once here.
4777- */
4778- if (pskb_expand_head (skb ,
4779- hroom > 0 ? ALIGN (hroom , NET_SKB_PAD ) : 0 ,
4780- troom > 0 ? troom + 128 : 0 , GFP_ATOMIC ))
4781- goto do_drop ;
4782- if (skb_linearize (skb ))
4783- goto do_drop ;
4784- }
4785-
47864759 /* The XDP program wants to see the packet starting at the MAC
47874760 * header.
47884761 */
@@ -4837,6 +4810,13 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
48374810 skb -> protocol = eth_type_trans (skb , skb -> dev );
48384811 }
48394812
4813+ /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
4814+ * before calling us again on redirect path. We do not call do_redirect
4815+ * as we leave that up to the caller.
4816+ *
4817+ * Caller is responsible for managing lifetime of skb (i.e. calling
4818+ * kfree_skb in response to actions it cannot handle/XDP_DROP).
4819+ */
48404820 switch (act ) {
48414821 case XDP_REDIRECT :
48424822 case XDP_TX :
@@ -4847,6 +4827,49 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
48474827 if (metalen )
48484828 skb_metadata_set (skb , metalen );
48494829 break ;
4830+ }
4831+
4832+ return act ;
4833+ }
4834+
4835+ static u32 netif_receive_generic_xdp (struct sk_buff * skb ,
4836+ struct xdp_buff * xdp ,
4837+ struct bpf_prog * xdp_prog )
4838+ {
4839+ u32 act = XDP_DROP ;
4840+
4841+ /* Reinjected packets coming from act_mirred or similar should
4842+ * not get XDP generic processing.
4843+ */
4844+ if (skb_is_redirected (skb ))
4845+ return XDP_PASS ;
4846+
4847+ /* XDP packets must be linear and must have sufficient headroom
4848+ * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4849+ * native XDP provides, thus we need to do it here as well.
4850+ */
4851+ if (skb_cloned (skb ) || skb_is_nonlinear (skb ) ||
4852+ skb_headroom (skb ) < XDP_PACKET_HEADROOM ) {
4853+ int hroom = XDP_PACKET_HEADROOM - skb_headroom (skb );
4854+ int troom = skb -> tail + skb -> data_len - skb -> end ;
4855+
4856+ /* In case we have to go down the path and also linearize,
4857+ * then lets do the pskb_expand_head() work just once here.
4858+ */
4859+ if (pskb_expand_head (skb ,
4860+ hroom > 0 ? ALIGN (hroom , NET_SKB_PAD ) : 0 ,
4861+ troom > 0 ? troom + 128 : 0 , GFP_ATOMIC ))
4862+ goto do_drop ;
4863+ if (skb_linearize (skb ))
4864+ goto do_drop ;
4865+ }
4866+
4867+ act = bpf_prog_run_generic_xdp (skb , xdp , xdp_prog );
4868+ switch (act ) {
4869+ case XDP_REDIRECT :
4870+ case XDP_TX :
4871+ case XDP_PASS :
4872+ break ;
48504873 default :
48514874 bpf_warn_invalid_xdp_action (act );
48524875 fallthrough ;
@@ -5312,7 +5335,6 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
53125335 ret = NET_RX_DROP ;
53135336 goto out ;
53145337 }
5315- skb_reset_mac_len (skb );
53165338 }
53175339
53185340 if (eth_type_vlan (skb -> protocol )) {
0 commit comments