|
48 | 48 | countdown = int(args.count)
|
49 | 49 | debug = 0
|
50 | 50 |
|
51 |
| -tp = Tracepoint.enable_tracepoint("sched", "sched_switch") |
52 |
| -bpf_text = "#include <uapi/linux/ptrace.h>\n" |
53 |
| -bpf_text += "#include <linux/sched.h>\n" |
54 |
| -bpf_text += tp.generate_decl() |
55 |
| -bpf_text += tp.generate_entry_probe() |
56 |
| -bpf_text += tp.generate_struct() |
| 51 | +bpf_text = """#include <uapi/linux/ptrace.h> |
| 52 | +#include <linux/sched.h> |
| 53 | +""" |
57 | 54 |
|
58 | 55 | if not args.offcpu:
|
59 | 56 | bpf_text += "#define ONCPU\n"
|
|
66 | 63 |
|
67 | 64 |
|
68 | 65 | BPF_HASH(start, u32, u64);
|
69 |
| -BPF_HASH(tgid_for_pid, u32, u32); |
70 | 66 | STORAGE
|
71 | 67 |
|
72 |
| -static inline u32 get_tgid(u32 pid) |
73 |
| -{ |
74 |
| - u32 *stored_tgid = tgid_for_pid.lookup(&pid); |
75 |
| - if (stored_tgid != 0) |
76 |
| - return *stored_tgid; |
77 |
| - return 0xffffffff; |
78 |
| -} |
79 |
| -
|
80 | 68 | static inline void store_start(u32 tgid, u32 pid, u64 ts)
|
81 | 69 | {
|
82 | 70 | if (FILTER)
|
|
99 | 87 | STORE
|
100 | 88 | }
|
101 | 89 |
|
102 |
| -int sched_switch(struct pt_regs *ctx) |
| 90 | +int sched_switch(struct pt_regs *ctx, struct task_struct *prev) |
103 | 91 | {
|
104 | 92 | u64 ts = bpf_ktime_get_ns();
|
105 | 93 | u64 pid_tgid = bpf_get_current_pid_tgid();
|
106 | 94 | u32 tgid = pid_tgid >> 32, pid = pid_tgid;
|
107 |
| - // Keep a mapping of tgid for pid because when sched_switch hits, |
108 |
| - // we only have the tgid information for the *current* pid, but not |
109 |
| - // for the previous one. |
110 |
| - tgid_for_pid.update(&pid, &tgid); |
111 |
| -
|
112 |
| - u64 *di = __trace_di.lookup(&pid_tgid); |
113 |
| - if (di == 0) |
114 |
| - return 0; |
115 |
| -
|
116 |
| - struct sched_switch_trace_entry args = {}; |
117 |
| - bpf_probe_read(&args, sizeof(args), (void *)*di); |
118 | 95 |
|
119 | 96 | #ifdef ONCPU
|
120 |
| - if (args.prev_state == TASK_RUNNING) { |
| 97 | + if (prev->state == TASK_RUNNING) { |
121 | 98 | #else
|
122 | 99 | if (1) {
|
123 | 100 | #endif
|
124 |
| - u32 prev_pid = args.prev_pid; |
125 |
| - u32 prev_tgid = get_tgid(prev_pid); |
126 |
| - if (prev_tgid == 0xffffffff) |
127 |
| - goto BAIL; |
| 101 | + u32 prev_pid = prev->pid; |
| 102 | + u32 prev_tgid = prev->tgid; |
128 | 103 | #ifdef ONCPU
|
129 | 104 | update_hist(prev_tgid, prev_pid, ts);
|
130 | 105 | #else
|
|
173 | 148 | print(bpf_text)
|
174 | 149 |
|
175 | 150 | b = BPF(text=bpf_text)
|
176 |
| -Tracepoint.attach(b) |
177 |
| -b.attach_kprobe(event="perf_trace_sched_switch", fn_name="sched_switch") |
| 151 | +b.attach_kprobe(event="finish_task_switch", fn_name="sched_switch") |
178 | 152 |
|
179 | 153 | print("Tracing %s-CPU time... Hit Ctrl-C to end." %
|
180 | 154 | ("off" if args.offcpu else "on"))
|
|
0 commit comments