Skip to content

Commit

Permalink
retstack: Split extra return stack from function graph tracer
Browse files Browse the repository at this point in the history
  • Loading branch information
mhiramat committed Oct 26, 2017
1 parent eb9b5a7 commit 8804f76
Show file tree
Hide file tree
Showing 10 changed files with 416 additions and 249 deletions.
7 changes: 7 additions & 0 deletions arch/Kconfig
Expand Up @@ -169,6 +169,13 @@ config ARCH_USE_BUILTIN_BSWAP
instructions should set this. And it shouldn't hurt to set it
on architectures that don't have such instructions.


config TASK_RET_STACK
bool
help
Provide per-thread in-kernel extra return stack for function
return hooker (e.g. function graph tracer and kretprobes).

config KRETPROBES
def_bool y
depends on KPROBES && HAVE_KRETPROBES
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/ftrace.c
Expand Up @@ -1046,7 +1046,7 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
}

trace.func = self_addr;
trace.depth = current->curr_ret_stack + 1;
trace.depth = task_ftrace_graph_depth(current) + 1;

/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
Expand Down
6 changes: 3 additions & 3 deletions include/linux/ftrace.h
Expand Up @@ -853,9 +853,9 @@ extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);

static inline int task_curr_ret_stack(struct task_struct *t)
static inline int task_ftrace_graph_depth(struct task_struct *t)
{
return t->curr_ret_stack;
return t->ftrace_graph_depth;
}

static inline void pause_graph_tracing(void)
Expand Down Expand Up @@ -883,7 +883,7 @@ static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
}
static inline void unregister_ftrace_graph(void) { }

static inline int task_curr_ret_stack(struct task_struct *tsk)
static inline int task_ftrace_graph_depth(struct task_struct *tsk)
{
return -1;
}
Expand Down
27 changes: 27 additions & 0 deletions include/linux/retstack.h
@@ -0,0 +1,27 @@
#ifndef _LINUX_RETSTACK_H
#define _LINUX_RETSTACK_H

/*
* Extra return stack for function return hook
*/

#define RETSTACK_ALLOC_SIZE 32
#define RETSTACK_MAX_DEPTH 50

void retstack_abort(void);
bool retstack_is_dead(void);

int retstack_init(void);
int retstack_exit(void);
void retstack_init_idle(struct task_struct *t, int cpu);
void retstack_init_task(struct task_struct *t);
void retstack_exit_task(struct task_struct *t);

int retstack_push(unsigned long ret, unsigned long func, unsigned long *retp,
struct retstack **entry);
int retstack_peek(struct retstack **entry);
int retstack_pop(unsigned long *ret, unsigned long *func);
unsigned long retstack_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);

#endif
12 changes: 8 additions & 4 deletions include/linux/sched.h
Expand Up @@ -1020,21 +1020,25 @@ struct task_struct {
unsigned int kasan_depth;
#endif

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_TASK_RETURN_STACK
/* Index of current stored address in ret_stack: */
int curr_ret_stack;

/* Stack of return addresses for return function tracing: */
struct ftrace_ret_stack *ret_stack;

/* Timestamp for last schedule: */
unsigned long long ftrace_timestamp;

/*
* Number of functions that haven't been traced
* because of depth overrun:
*/
atomic_t trace_overrun;
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current ftrace graph tracing ret_stack: */
int ftrace_graph_depth;

/* Timestamp for last schedule: */
unsigned long long ftrace_timestamp;

/* Pause tracing: */
atomic_t tracing_graph_pause;
Expand Down
1 change: 1 addition & 0 deletions kernel/Makefile
Expand Up @@ -100,6 +100,7 @@ obj-$(CONFIG_TRACEPOINTS) += trace/
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-$(CONFIG_CPU_PM) += cpu_pm.o
obj-$(CONFIG_BPF) += bpf/
obj-$(CONFIG_TASK_RETURN_STACK) += retstack.o

obj-$(CONFIG_PERF_EVENTS) += events/

Expand Down

0 comments on commit 8804f76

Please sign in to comment.