diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index a69f363b61bf73..0d83564d27dd74 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -114,8 +114,15 @@ static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs return arch_ftrace_get_regs(fregs); } +#if ARCH_SUPPORTS_FTRACE_OPS typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs); +#else +typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); +#endif + +typedef void (*ftrace_func_base_t)(void); +#define CAST_FTRACE_FUNC(f) ((ftrace_func_t)((ftrace_func_base_t)(f))) ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index b8a0d1d564fb62..874eff384ca8bc 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -333,7 +333,7 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ static struct ftrace_ops graph_ops = { - .func = ftrace_stub, + .func = CAST_FTRACE_FUNC(ftrace_stub), .flags = FTRACE_OPS_FL_INITIALIZED | FTRACE_OPS_FL_PID | FTRACE_OPS_FL_STUB, diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7b180f61e6d3cc..a3f6733db007a6 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -79,7 +79,7 @@ enum { }; struct ftrace_ops ftrace_list_end __read_mostly = { - .func = ftrace_stub, + .func = CAST_FTRACE_FUNC(ftrace_stub), .flags = FTRACE_OPS_FL_STUB, INIT_OPS_HASH(ftrace_list_end) }; @@ -116,7 +116,7 @@ static int ftrace_disabled __read_mostly; DEFINE_MUTEX(ftrace_lock); struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; -ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; +ftrace_func_t ftrace_trace_function __read_mostly = CAST_FTRACE_FUNC(ftrace_stub); struct ftrace_ops global_ops; #if ARCH_SUPPORTS_FTRACE_OPS @@ -142,6 +142,7 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops) static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs) { +#if ARCH_SUPPORTS_FTRACE_OPS struct trace_array *tr = op->private; int pid; @@ -155,6 +156,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, } op->saved_func(ip, parent_ip, op, fregs); +#endif } static void ftrace_sync_ipi(void *data) @@ -190,7 +192,7 @@ static void update_ftrace_function(void) /* If there's no ftrace_ops registered, just call the stub function */ if (set_function_trace_op == &ftrace_list_end) { - func = ftrace_stub; + func = CAST_FTRACE_FUNC(ftrace_stub); /* * If we are at the end of the list and this ops is @@ -332,7 +334,7 @@ int __register_ftrace_function(struct ftrace_ops *ops) ops->saved_func = ops->func; if (ftrace_pids_enabled(ops)) - ops->func = ftrace_pid_func; + ops->func = CAST_FTRACE_FUNC(ftrace_pid_func); ftrace_update_trampoline(ops); @@ -367,13 +369,13 @@ static void ftrace_update_pid_func(void) struct ftrace_ops *op; /* Only do something if we are tracing something */ - if (ftrace_trace_function == ftrace_stub) + if (ftrace_trace_function == CAST_FTRACE_FUNC(ftrace_stub)) return; do_for_each_ftrace_op(op, ftrace_ops_list) { if (op->flags & FTRACE_OPS_FL_PID) { op->func = ftrace_pids_enabled(op) ? - ftrace_pid_func : op->saved_func; + CAST_FTRACE_FUNC(ftrace_pid_func) : op->saved_func; ftrace_update_trampoline(op); } } while_for_each_ftrace_op(op); @@ -1036,7 +1038,7 @@ static const struct ftrace_hash empty_hash = { #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) struct ftrace_ops global_ops = { - .func = ftrace_stub, + .func = CAST_FTRACE_FUNC(ftrace_stub), .local_hash.notrace_hash = EMPTY_HASH, .local_hash.filter_hash = EMPTY_HASH, INIT_OPS_HASH(global_ops) @@ -4545,7 +4547,7 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr, return -ENOMEM; } probe->probe_ops = probe_ops; - probe->ops.func = function_trace_probe_call; + probe->ops.func = CAST_FTRACE_FUNC(function_trace_probe_call); probe->tr = tr; ftrace_ops_init(&probe->ops); list_add(&probe->list, &tr->func_probes); @@ -6956,7 +6958,7 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) { /* If we filter on pids, update to use the pid function */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { - if (WARN_ON(tr->ops->func != ftrace_stub)) + if (WARN_ON(tr->ops->func != CAST_FTRACE_FUNC(ftrace_stub))) printk("ftrace ops had %pS for function\n", tr->ops->func); } @@ -6966,7 +6968,7 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) void ftrace_reset_array_ops(struct trace_array *tr) { - tr->ops->func = ftrace_stub; + tr->ops->func = CAST_FTRACE_FUNC(ftrace_stub); } static nokprobe_inline void @@ -7006,7 +7008,11 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, pr_warn("op=%p %pS\n", op, op); goto out; } +#if ARCH_SUPPORTS_FTRACE_OPS op->func(ip, parent_ip, op, fregs); +#else + op->func(ip, parent_ip); +#endif } } while_for_each_ftrace_op(op); out: @@ -7050,6 +7056,7 @@ NOKPROBE_SYMBOL(ftrace_ops_no_ops); static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs) { +#if ARCH_SUPPORTS_FTRACE_OPS int bit; bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX); @@ -7063,6 +7070,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, preempt_enable_notrace(); trace_clear_recursion(bit); +#endif } NOKPROBE_SYMBOL(ftrace_ops_assist_func); @@ -7085,7 +7093,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) */ if (ops->flags & (FTRACE_OPS_FL_RECURSION | FTRACE_OPS_FL_RCU)) - return ftrace_ops_assist_func; + return CAST_FTRACE_FUNC(ftrace_ops_assist_func); return ops->func; } @@ -7521,7 +7529,7 @@ void ftrace_kill(void) { ftrace_disabled = 1; ftrace_enabled = 0; - ftrace_trace_function = ftrace_stub; + ftrace_trace_function = CAST_FTRACE_FUNC(ftrace_stub); } /** @@ -7622,7 +7630,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, } /* stopping ftrace calls (just send to ftrace_stub) */ - ftrace_trace_function = ftrace_stub; + ftrace_trace_function = CAST_FTRACE_FUNC(ftrace_stub); ftrace_shutdown_sysctl(); } diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 03be4435d103fa..9fa46f7fcfc9dc 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -484,7 +484,7 @@ static int perf_ftrace_function_register(struct perf_event *event) { struct ftrace_ops *ops = &event->ftrace_ops; - ops->func = perf_ftrace_function_call; + ops->func = CAST_FTRACE_FUNC(perf_ftrace_function_call); ops->private = (void *)(unsigned long)nr_cpu_ids; return register_ftrace_function(ops); diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 1f0e63f5d1f983..9ef630f0d4d950 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -62,7 +62,7 @@ int ftrace_allocate_ftrace_ops(struct trace_array *tr) return -ENOMEM; /* Currently only the non stack version is supported */ - ops->func = function_trace_call; + ops->func = CAST_FTRACE_FUNC(function_trace_call); ops->flags = FTRACE_OPS_FL_PID; tr->ops = ops; @@ -105,13 +105,13 @@ static ftrace_func_t select_trace_function(u32 flags_val) { switch (flags_val & TRACE_FUNC_OPT_MASK) { case TRACE_FUNC_NO_OPTS: - return function_trace_call; + return CAST_FTRACE_FUNC(function_trace_call); case TRACE_FUNC_OPT_STACK: - return function_stack_trace_call; + return CAST_FTRACE_FUNC(function_stack_trace_call); case TRACE_FUNC_OPT_NO_REPEATS: - return function_no_repeats_trace_call; + return CAST_FTRACE_FUNC(function_no_repeats_trace_call); case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS: - return function_stack_no_repeats_trace_call; + return CAST_FTRACE_FUNC(function_stack_no_repeats_trace_call); default: return NULL; } diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 2402de520eca71..abcdf48888f7ed 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -673,7 +673,7 @@ static int __wakeup_tracer_init(struct trace_array *tr) tr->max_latency = 0; wakeup_trace = tr; - ftrace_init_array_ops(tr, wakeup_tracer_call); + ftrace_init_array_ops(tr, CAST_FTRACE_FUNC(wakeup_tracer_call)); start_wakeup_tracer(tr); wakeup_busy = true; diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 63c28504205162..546f769d7de218 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -317,7 +317,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, static struct ftrace_ops trace_ops __read_mostly = { - .func = stack_trace_call, + .func = CAST_FTRACE_FUNC(stack_trace_call), }; static ssize_t