Skip to content
Permalink
Browse files
fix
Signed-off-by: Cong Wang <cong.wang@bytedance.com>
  • Loading branch information
Cong Wang committed Feb 16, 2021
1 parent e4b7c10 commit a2a4da967b9c6fb5571d8cc68866812503806f85
Show file tree
Hide file tree
Showing 5 changed files with 53 additions and 35 deletions.
@@ -755,6 +755,9 @@ struct sk_buff {
void (*destructor)(struct sk_buff *skb);
};
struct list_head tcp_tsorted_anchor;
#ifdef CONFIG_NET_SOCK_MSG
unsigned long sk_redir;
#endif
};

#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -459,4 +459,43 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
return false;
return !!psock->saved_data_ready;
}

#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
static inline
bool skb_bpf_ingress(const struct sk_buff *skb)
{
unsigned long sk_redir = skb->sk_redir;

return sk_redir & BPF_F_INGRESS;
}

static inline
void skb_bpf_set_ingress(const struct sk_buff *skb)
{
skb->sk_redir |= BPF_F_INGRESS;
}

static inline
void skb_bpf_set_redir(const struct sk_buff *skb, struct sock *sk_redir, bool ingress)
{
skb->sk_redir = (unsigned long)sk_redir;
if (ingress)
skb->sk_redir |= BPF_F_INGRESS;
}

static inline
struct sock *skb_bpf_redirect_fetch(struct sk_buff *skb)
{
unsigned long sk_redir = skb->sk_redir;

sk_redir &= ~0x1UL;
return (struct sock *) sk_redir;
}

static inline
void skb_bpf_redirect_clear(struct sk_buff *skb)
{
skb->sk_redir = 0;
}
#endif /* CONFIG_NET_SOCK_MSG */
#endif /* _LINUX_SKMSG_H */
@@ -882,30 +882,11 @@ struct tcp_skb_cb {
struct inet6_skb_parm h6;
#endif
} header; /* For incoming skbs */
struct {
__u32 flags;
struct sock *sk_redir;
} bpf;
};
};

#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))

static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
{
return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
}

static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
{
return TCP_SKB_CB(skb)->bpf.sk_redir;
}

static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
{
TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
}

extern const struct inet_connection_sock_af_ops ipv4_specific;

#if IS_ENABLED(CONFIG_IPV6)
@@ -525,7 +525,7 @@ static void sk_psock_backlog(struct work_struct *work)
len = skb->len;
off = 0;
start:
ingress = tcp_skb_bpf_ingress(skb);
ingress = skb_bpf_ingress(skb);
do {
ret = -EIO;
if (likely(psock->sk->sk_socket))
@@ -752,7 +752,7 @@ static void sk_psock_skb_redirect(struct sk_buff *skb)
struct sk_psock *psock_other;
struct sock *sk_other;

sk_other = tcp_skb_bpf_redirect_fetch(skb);
sk_other = skb_bpf_redirect_fetch(skb);
/* This error is a buggy BPF program, it returned a redirect
* return code, but then didn't set a redirect interface.
*/
@@ -802,9 +802,9 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
* TLS context.
*/
skb->sk = psock->sk;
tcp_skb_bpf_redirect_clear(skb);
skb_bpf_redirect_clear(skb);
ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
skb->sk = NULL;
}
sk_psock_tls_verdict_apply(skb, psock->sk, ret);
@@ -828,8 +828,7 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
goto out_free;
}

tcp = TCP_SKB_CB(skb);
tcp->bpf.flags |= BPF_F_INGRESS;
skb_bpf_set_ingress(skb);

/* If the queue is empty then we can submit directly
* into the msg queue. If its not empty we have to
@@ -890,9 +889,9 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
skb_set_owner_r(skb, sk);
prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) {
tcp_skb_bpf_redirect_clear(skb);
skb_bpf_redirect_clear(skb);
ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
}
sk_psock_verdict_apply(psock, skb, ret);
out:
@@ -1005,9 +1004,9 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
skb_set_owner_r(skb, sk);
prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) {
tcp_skb_bpf_redirect_clear(skb);
skb_bpf_redirect_clear(skb);
ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
}
sk_psock_verdict_apply(psock, skb, ret);
out:
@@ -657,7 +657,6 @@ const struct bpf_func_proto bpf_sock_map_update_proto = {
BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
struct bpf_map *, map, u32, key, u64, flags)
{
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
struct sock *sk;

if (unlikely(flags & ~(BPF_F_INGRESS)))
@@ -667,8 +666,7 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP;

tcb->bpf.flags = flags;
tcb->bpf.sk_redir = sk;
skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
return SK_PASS;
}

@@ -1250,7 +1248,6 @@ const struct bpf_func_proto bpf_sock_hash_update_proto = {
BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
struct bpf_map *, map, void *, key, u64, flags)
{
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
struct sock *sk;

if (unlikely(flags & ~(BPF_F_INGRESS)))
@@ -1260,8 +1257,7 @@ BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP;

tcb->bpf.flags = flags;
tcb->bpf.sk_redir = sk;
skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
return SK_PASS;
}

0 comments on commit a2a4da9

Please sign in to comment.