Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

bpf: Fix stale map removal in agent logs #17973

Merged
merged 2 commits into from
Nov 23, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions bpf/bpf_alignchecker.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#define DROP_NOTIFY
#define POLICY_VERDICT_NOTIFY
#define ENABLE_EGRESS_GATEWAY
#define ENABLE_CAPTURE
#undef ENABLE_ARP_RESPONDER

#include <bpf/ctx/unspec.h>
Expand Down
35 changes: 23 additions & 12 deletions bpf/lib/pcap.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@
#include <bpf/ctx/ctx.h>
#include <bpf/api.h>

#ifdef ENABLE_CAPTURE
#include "common.h"
#include "time_cache.h"
#include "lb.h"

struct pcap_timeval {
Expand Down Expand Up @@ -143,7 +145,7 @@ struct capture6_wcard {
__u8 flags; /* reserved: 0 */
};

#if defined(ENABLE_IPV4) && defined(ENABLE_CAPTURE)
#ifdef ENABLE_IPV4
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, struct capture4_wcard);
Expand Down Expand Up @@ -253,9 +255,9 @@ _Pragma("unroll")

return NULL;
}
#endif /* ENABLE_IPV4 && ENABLE_CAPTURE */
#endif /* ENABLE_IPV4 */

#if defined(ENABLE_IPV6) && defined(ENABLE_CAPTURE)
#ifdef ENABLE_IPV6
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, struct capture6_wcard);
Expand Down Expand Up @@ -378,7 +380,7 @@ _Pragma("unroll")

return NULL;
}
#endif /* ENABLE_IPV6 && ENABLE_CAPTURE */
#endif /* ENABLE_IPV6 */

static __always_inline struct capture_rule *
cilium_capture_classify_wcard(struct __ctx_buff *ctx)
Expand All @@ -389,16 +391,16 @@ cilium_capture_classify_wcard(struct __ctx_buff *ctx)
if (!validate_ethertype(ctx, &proto))
return ret;
switch (proto) {
#if defined(ENABLE_IPV4) && defined(ENABLE_CAPTURE)
#ifdef ENABLE_IPV4
case bpf_htons(ETH_P_IP):
ret = cilium_capture4_classify_wcard(ctx);
break;
#endif /* ENABLE_IPV4 && ENABLE_CAPTURE */
#if defined(ENABLE_IPV6) && defined(ENABLE_CAPTURE)
#endif
#ifdef ENABLE_IPV6
case bpf_htons(ETH_P_IPV6):
ret = cilium_capture6_classify_wcard(ctx);
break;
#endif /* ENABLE_IPV6 && ENABLE_CAPTURE */
#endif
default:
break;
}
Expand Down Expand Up @@ -455,19 +457,16 @@ cilium_capture_cached(struct __ctx_buff *ctx __maybe_unused,
static __always_inline void
cilium_capture_in(struct __ctx_buff *ctx __maybe_unused)
{
#ifdef ENABLE_CAPTURE
__u32 cap_len;
__u16 rule_id;

if (cilium_capture_candidate(ctx, &rule_id, &cap_len))
__cilium_capture_in(ctx, rule_id, cap_len);
#endif /* ENABLE_CAPTURE */
}

static __always_inline void
cilium_capture_out(struct __ctx_buff *ctx __maybe_unused)
{
#ifdef ENABLE_CAPTURE
__u32 cap_len;
__u16 rule_id;

Expand All @@ -477,7 +476,19 @@ cilium_capture_out(struct __ctx_buff *ctx __maybe_unused)
*/
if (cilium_capture_cached(ctx, &rule_id, &cap_len))
__cilium_capture_out(ctx, rule_id, cap_len);
#endif /* ENABLE_CAPTURE */
}

#else /* ENABLE_CAPTURE */

static __always_inline void
cilium_capture_in(struct __ctx_buff *ctx __maybe_unused)
{
}

static __always_inline void
cilium_capture_out(struct __ctx_buff *ctx __maybe_unused)
{
}

#endif /* ENABLE_CAPTURE */
#endif /* __LIB_PCAP_H_ */
34 changes: 0 additions & 34 deletions bpf/lib/time.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,38 +27,4 @@
#define bpf_sec_to_jiffies(s) \
({ __u64 __x = (s) * KERNEL_HZ; __x; })

/* Per-CPU ktime cache for faster clock access. */
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__type(key, __u32);
__type(value, __u64);
__uint(pinning, LIBBPF_PIN_BY_NAME);
__uint(max_entries, 1);
} cilium_ktime_cache __section_maps_btf;

/* Currently supported clock types:
*
* - bpf_ktime_cache_set(ns) -> CLOCK_MONOTONIC
* - bpf_ktime_cache_set(boot_ns) -> CLOCK_BOOTTIME
*/
#define bpf_ktime_cache_set(clock) \
({ \
__u32 __z = 0; \
__u64 *__cache = map_lookup_elem(&cilium_ktime_cache, &__z); \
__u64 __ktime = ktime_get_##clock(); \
if (always_succeeds(__cache)) \
*__cache = __ktime; \
__ktime; \
})

#define bpf_ktime_cache_get() \
({ \
__u32 __z = 0; \
__u64 *__cache = map_lookup_elem(&cilium_ktime_cache, &__z); \
__u64 __ktime = 0; \
if (always_succeeds(__cache)) \
__ktime = *__cache; \
__ktime; \
})

#endif /* __LIB_TIME_H_ */
46 changes: 46 additions & 0 deletions bpf/lib/time_cache.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2016-2021 Authors of Cilium */

#ifndef __LIB_TIME_CACHE_H_
#define __LIB_TIME_CACHE_H_

#include <bpf/ctx/ctx.h>
#include <bpf/api.h>

#include "time.h"

/* Per-CPU ktime cache for faster clock access. */
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__type(key, __u32);
__type(value, __u64);
__uint(pinning, LIBBPF_PIN_BY_NAME);
__uint(max_entries, 1);
} cilium_ktime_cache __section_maps_btf;

/* Currently supported clock types:
*
* - bpf_ktime_cache_set(ns) -> CLOCK_MONOTONIC
* - bpf_ktime_cache_set(boot_ns) -> CLOCK_BOOTTIME
*/
#define bpf_ktime_cache_set(clock) \
({ \
__u32 __z = 0; \
__u64 *__cache = map_lookup_elem(&cilium_ktime_cache, &__z); \
__u64 __ktime = ktime_get_##clock(); \
if (always_succeeds(__cache)) \
*__cache = __ktime; \
__ktime; \
})

#define bpf_ktime_cache_get() \
({ \
__u32 __z = 0; \
__u64 *__cache = map_lookup_elem(&cilium_ktime_cache, &__z); \
__u64 __ktime = 0; \
if (always_succeeds(__cache)) \
__ktime = *__cache; \
__ktime; \
})

#endif /* __LIB_TIME_CACHE_H_ */