Skip to content

Commit

Permalink
bpf: introduce helper bpf_get_branch_snapshot
Browse files Browse the repository at this point in the history
Introduce bpf_get_branch_snapshot(), which allows tracing pogram to get
branch trace from hardware (e.g. Intel LBR). To use the feature, the
user need to create perf_event with proper branch_record filtering
on each cpu, and then calls bpf_get_branch_snapshot in the bpf function.
On Intel CPUs, VLBR event (raw event 0x1b00) can be use for this.

Signed-off-by: Song Liu <songliubraving@fb.com>
  • Loading branch information
liu-song-6 authored and intel-lab-lkp committed Aug 30, 2021
1 parent 25bdf9e commit 09548da
Show file tree
Hide file tree
Showing 7 changed files with 104 additions and 1 deletion.
2 changes: 2 additions & 0 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -2220,4 +2220,6 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
u32 **bin_buf, u32 num_args);
void bpf_bprintf_cleanup(void);

DECLARE_PER_CPU(struct perf_branch_snapshot, bpf_perf_branch_snapshot);

#endif /* _LINUX_BPF_H */
3 changes: 2 additions & 1 deletion include/linux/filter.h
Original file line number Diff line number Diff line change
Expand Up @@ -575,7 +575,8 @@ struct bpf_prog {
has_callchain_buf:1, /* callchain buffer allocated? */
enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
call_get_func_ip:1; /* Do we call get_func_ip() */
call_get_func_ip:1, /* Do we call get_func_ip() */
call_get_branch:1; /* Do we call get_branch_snapshot() */
enum bpf_prog_type type; /* Type of BPF program */
enum bpf_attach_type expected_attach_type; /* For some prog types */
u32 len; /* Number of filter blocks */
Expand Down
16 changes: 16 additions & 0 deletions include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -4877,6 +4877,21 @@ union bpf_attr {
* Get the struct pt_regs associated with **task**.
* Return
* A pointer to struct pt_regs.
*
* long bpf_get_branch_snapshot(void *entries, u32 size)
* Description
* Get branch trace from hardware engines like Intel LBR. The
* branch trace is taken soon after the trigger point of the
* BPF program, so it may contain some entries after the
* trigger point. The user need to filter these entries
* accordingly.
*
* The data is stored as struct perf_branch_entry into output
* buffer *entries*. *size* is the size of *entries* in bytes.
*
* Return
* > 0, number of valid output entries.
* **-EOPNOTSUPP**, the hardware/kernel does not support this function
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
Expand Down Expand Up @@ -5055,6 +5070,7 @@ union bpf_attr {
FN(get_func_ip), \
FN(get_attach_cookie), \
FN(task_pt_regs), \
FN(get_branch_snapshot), \
/* */

/* integer value in 'imm' field of BPF_CALL instruction selects which helper
Expand Down
13 changes: 13 additions & 0 deletions kernel/bpf/trampoline.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <linux/rcupdate_trace.h>
#include <linux/rcupdate_wait.h>
#include <linux/module.h>
#include <linux/static_call.h>

/* dummy _ops. The verifier will operate on target program's ops. */
const struct bpf_verifier_ops bpf_extension_verifier_ops = {
Expand Down Expand Up @@ -564,6 +565,18 @@ static void notrace inc_misses_counter(struct bpf_prog *prog)
u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
__acquires(RCU)
{
#ifdef CONFIG_PERF_EVENTS
/* Calling migrate_disable costs two entries in the LBR. To save
* some entries, we call perf_snapshot_branch_stack before
* migrate_disable to save some entries. This is OK because we
* care about the branch trace before entering the BPF program.
* If migrate happens exactly here, there isn't much we can do to
* preserve the data.
*/
if (prog->call_get_branch)
static_call(perf_snapshot_branch_stack)(
this_cpu_ptr(&bpf_perf_branch_snapshot));
#endif
rcu_read_lock();
migrate_disable();
if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
Expand Down
12 changes: 12 additions & 0 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -6446,6 +6446,18 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
env->prog->call_get_func_ip = true;
}

if (func_id == BPF_FUNC_get_branch_snapshot) {
if (env->prog->aux->sleepable) {
verbose(env, "sleepable progs cannot call get_branch_snapshot\n");
return -ENOTSUPP;
}
if (!IS_ENABLED(CONFIG_PERF_EVENTS)) {
verbose(env, "func %s#%d not supported without CONFIG_PERF_EVENTS\n",
func_id_name(func_id), func_id);
return -ENOTSUPP;
}
env->prog->call_get_branch = true;
}
if (changes_data)
clear_all_pkt_pointers(env);
return 0;
Expand Down
43 changes: 43 additions & 0 deletions kernel/trace/bpf_trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1017,6 +1017,33 @@ static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
.arg1_type = ARG_PTR_TO_CTX,
};

BPF_CALL_2(bpf_get_branch_snapshot, void *, buf, u32, size)
{
#ifdef CONFIG_PERF_EVENTS
u32 max_size;

if (this_cpu_ptr(&bpf_perf_branch_snapshot)->nr == 0)
return -EOPNOTSUPP;

max_size = this_cpu_ptr(&bpf_perf_branch_snapshot)->nr *
sizeof(struct perf_branch_entry);
memcpy(buf, this_cpu_ptr(&bpf_perf_branch_snapshot)->entries,
min_t(u32, size, max_size));

return this_cpu_ptr(&bpf_perf_branch_snapshot)->nr;
#else
return -EOPNOTSUPP;
#endif
}

static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
.func = bpf_get_branch_snapshot,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
};

static const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
Expand Down Expand Up @@ -1132,6 +1159,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_snprintf_proto;
case BPF_FUNC_get_func_ip:
return &bpf_get_func_ip_proto_tracing;
case BPF_FUNC_get_branch_snapshot:
return &bpf_get_branch_snapshot_proto;
default:
return bpf_base_func_proto(func_id);
}
Expand Down Expand Up @@ -1863,9 +1892,23 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
preempt_enable();
}

DEFINE_PER_CPU(struct perf_branch_snapshot, bpf_perf_branch_snapshot);

static __always_inline
void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
{
#ifdef CONFIG_PERF_EVENTS
/* Calling migrate_disable costs two entries in the LBR. To save
* some entries, we call perf_snapshot_branch_stack before
* migrate_disable to save some entries. This is OK because we
* care about the branch trace before entering the BPF program.
* If migrate happens exactly here, there isn't much we can do to
* preserve the data.
*/
if (prog->call_get_branch)
static_call(perf_snapshot_branch_stack)(
this_cpu_ptr(&bpf_perf_branch_snapshot));
#endif
cant_sleep();
rcu_read_lock();
(void) bpf_prog_run(prog, args);
Expand Down
16 changes: 16 additions & 0 deletions tools/include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -4877,6 +4877,21 @@ union bpf_attr {
* Get the struct pt_regs associated with **task**.
* Return
* A pointer to struct pt_regs.
*
* long bpf_get_branch_snapshot(void *entries, u32 size)
* Description
* Get branch trace from hardware engines like Intel LBR. The
* branch trace is taken soon after the trigger point of the
* BPF program, so it may contain some entries after the
* trigger point. The user need to filter these entries
* accordingly.
*
* The data is stored as struct perf_branch_entry into output
* buffer *entries*. *size* is the size of *entries* in bytes.
*
* Return
* > 0, number of valid output entries.
* **-EOPNOTSUPP**, the hardware/kernel does not support this function
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
Expand Down Expand Up @@ -5055,6 +5070,7 @@ union bpf_attr {
FN(get_func_ip), \
FN(get_attach_cookie), \
FN(task_pt_regs), \
FN(get_branch_snapshot), \
/* */

/* integer value in 'imm' field of BPF_CALL instruction selects which helper
Expand Down

0 comments on commit 09548da

Please sign in to comment.