diff --git a/Makefile b/Makefile index 048cce0b7c..bdb97e37ab 100644 --- a/Makefile +++ b/Makefile @@ -229,6 +229,10 @@ README.md: $(CMD_EMBEDMD) $(OUT_DIR)/help.txt deploy/manifests .PHONY: format format: go-fmt check-license +.PHONY: c-fmt +c-fmt: + clang-format -i --style=GNU $(BPF_SRC) + .PHONY: go-fmt go-fmt: go fmt $(shell go list ./... | grep -E -v "pkg/internal/pprof|pkg/internal/go") diff --git a/parca-agent.bpf.c b/parca-agent.bpf.c index 6ab11fbcf4..59ac876367 100644 --- a/parca-agent.bpf.c +++ b/parca-agent.bpf.c @@ -1,9 +1,12 @@ // +build ignore -// ^^ this is a golang build tag meant to exclude this C file from compilation by the CGO compiler +// ^^ this is a golang build tag meant to exclude this C file from compilation +// by the CGO compiler /* In Linux 5.4 asm_inline was introduced, but it's not supported by clang. * Redefine it to just asm to enable successful compilation. - * see https://github.com/iovisor/bcc/commit/2d1497cde1cc9835f759a707b42dea83bee378b8 for more details + * see + * https://github.com/iovisor/bcc/commit/2d1497cde1cc9835f759a707b42dea83bee378b8 + * for more details */ #include "vmlinux.h" #ifdef asm_inline @@ -15,101 +18,106 @@ #undef container_of //#include "bpf_core_read.h" -#include -#include #include #include +#include +#include #if defined(bpf_target_x86) -#define PT_REGS_PARM6(ctx) ((ctx)->r9) +#define PT_REGS_PARM6(ctx) ((ctx)->r9) #elif defined(bpf_target_arm64) #define PT_REGS_PARM6(x) (((PT_REGS_ARM64 *)(x))->regs[5]) #endif -#define MAX_STACK_ADDRESSES 1024 // Max amount of different stack trace addresses to buffer in the Map -#define MAX_STACK_DEPTH 127 // Max depth of each stack trace to track +// Max amount of different stack trace addresses to buffer in the Map +#define MAX_STACK_ADDRESSES 1024 +// Max depth of each stack trace to track +#define MAX_STACK_DEPTH 127 -#define BPF_MAP(_name, _type, _key_type, _value_type, _max_entries) \ -struct bpf_map_def SEC("maps") _name = { \ - .type = _type, \ - .key_size = sizeof(_key_type), \ - .value_size = sizeof(_value_type), \ - .max_entries = _max_entries, \ -}; +#define BPF_MAP(_name, _type, _key_type, _value_type, _max_entries) \ + struct bpf_map_def SEC ("maps") _name = { \ + .type = _type, \ + .key_size = sizeof (_key_type), \ + .value_size = sizeof (_value_type), \ + .max_entries = _max_entries, \ + }; // Stack Traces are slightly different // in that the value is 1 big byte array // of the stack addresses -#define BPF_STACK_TRACE(_name, _max_entries) \ -struct bpf_map_def SEC("maps") _name = { \ - .type = BPF_MAP_TYPE_STACK_TRACE, \ - .key_size = sizeof(u32), \ - .value_size = sizeof(size_t) * MAX_STACK_DEPTH, \ - .max_entries = _max_entries, \ -}; - -#define BPF_HASH(_name, _key_type, _value_type) \ -BPF_MAP(_name, BPF_MAP_TYPE_HASH, _key_type, _value_type, 10240); - -/*=============================== INTERNAL STRUCTS ===========================*/ - -typedef struct stack_count_key { - u32 pid; - int user_stack_id; - int kernel_stack_id; +#define BPF_STACK_TRACE(_name, _max_entries) \ + struct bpf_map_def SEC ("maps") _name = { \ + .type = BPF_MAP_TYPE_STACK_TRACE, \ + .key_size = sizeof (u32), \ + .value_size = sizeof (size_t) * MAX_STACK_DEPTH, \ + .max_entries = _max_entries, \ + }; + +#define BPF_HASH(_name, _key_type, _value_type) \ + BPF_MAP (_name, BPF_MAP_TYPE_HASH, _key_type, _value_type, 10240); + +/*============================= INTERNAL STRUCTS ============================*/ + +typedef struct stack_count_key +{ + u32 pid; + int user_stack_id; + int kernel_stack_id; } stack_count_key_t; -/*=================================== MAPS =====================================*/ +/*================================ MAPS =====================================*/ -BPF_HASH(counts, stack_count_key_t, u64); -BPF_STACK_TRACE(stack_traces, MAX_STACK_ADDRESSES); +BPF_HASH (counts, stack_count_key_t, u64); +BPF_STACK_TRACE (stack_traces, MAX_STACK_ADDRESSES); -/*============================== HELPER FUNCTIONS ==============================*/ +/*=========================== HELPER FUNCTIONS ==============================*/ static __always_inline void * -bpf_map_lookup_or_try_init(void *map, const void *key, const void *init) +bpf_map_lookup_or_try_init (void *map, const void *key, const void *init) { - void *val; - long err; + void *val; + long err; - val = bpf_map_lookup_elem(map, key); - if (val) - return val; + val = bpf_map_lookup_elem (map, key); + if (val) + return val; - err = bpf_map_update_elem(map, key, init, BPF_NOEXIST); - // 17 == EEXIST - if (err && err != -17) - return 0; + err = bpf_map_update_elem (map, key, init, BPF_NOEXIST); + // 17 == EEXIST + if (err && err != -17) + return 0; - return bpf_map_lookup_elem(map, key); + return bpf_map_lookup_elem (map, key); } // This code gets a bit complex. Probably not suitable for casual hacking. -SEC("perf_event") -int do_sample(struct bpf_perf_event_data *ctx) { - u64 id = bpf_get_current_pid_tgid(); - u32 tgid = id >> 32; - u32 pid = id; +SEC ("perf_event") +int +do_sample (struct bpf_perf_event_data *ctx) +{ + u64 id = bpf_get_current_pid_tgid (); + u32 tgid = id >> 32; + u32 pid = id; - if (pid == 0) - return 0; + if (pid == 0) + return 0; - // create map key - stack_count_key_t key = {.pid = tgid}; + // create map key + stack_count_key_t key = { .pid = tgid }; - // get stacks - key.user_stack_id = bpf_get_stackid(ctx, &stack_traces, BPF_F_USER_STACK); - key.kernel_stack_id = bpf_get_stackid(ctx, &stack_traces, 0); + // get stacks + key.user_stack_id = bpf_get_stackid (ctx, &stack_traces, BPF_F_USER_STACK); + key.kernel_stack_id = bpf_get_stackid (ctx, &stack_traces, 0); - u64 zero = 0; - u64 *count; - count = bpf_map_lookup_or_try_init(&counts, &key, &zero); - if (!count) - return 0; + u64 zero = 0; + u64 *count; + count = bpf_map_lookup_or_try_init (&counts, &key, &zero); + if (!count) + return 0; - __sync_fetch_and_add(count, 1); + __sync_fetch_and_add (count, 1); - return 0; + return 0; } -char LICENSE[] SEC("license") = "GPL"; +char LICENSE[] SEC ("license") = "GPL";