Skip to content

Commit 897f68a

Browse files
Steven Rostedtrostedt
authored andcommitted
ftrace: Use only the preempt version of function tracing
The function tracer had two different versions of function tracing. The disabling of irqs version and the preempt disable version. As function tracing in very intrusive and can cause nasty recursion issues, it has its own recursion protection. But the old method to do this was a flat layer. If it detected that a recursion was happening then it would just return without recording. This made the preempt version (much faster than the irq disabling one) not very useful, because if an interrupt were to occur after the recursion flag was set, the interrupt would not be traced at all, because every function that was traced would think it recursed on itself (due to the context it preempted setting the recursive flag). Now that we have a recursion flag for every context level, we no longer need to worry about that. We can disable preemption, set the current context recursion check bit, and go on. If an interrupt were to come along, it would check its own context bit and happily continue to trace. As the preempt version is faster than the irq disable version, there's no more reason to keep the preempt version around. And the irq disable version still had an issue with missing out on tracing NMI code. Remove the irq disable function tracer version and have the preempt disable version be the default (and only version). Before this patch we had from running: # echo function > /debug/tracing/current_tracer # for i in `seq 10`; do ./hackbench 50; done Time: 12.028 Time: 11.945 Time: 11.925 Time: 11.964 Time: 12.002 Time: 11.910 Time: 11.944 Time: 11.929 Time: 11.941 Time: 11.924 (average: 11.9512) Now we have: # echo function > /debug/tracing/current_tracer # for i in `seq 10`; do ./hackbench 50; done Time: 10.285 Time: 10.407 Time: 10.243 Time: 10.372 Time: 10.380 Time: 10.198 Time: 10.272 Time: 10.354 Time: 10.248 Time: 10.253 (average: 10.3012) a 13.8% savings! Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
1 parent edc15ca commit 897f68a

File tree

1 file changed

+14
-47
lines changed

1 file changed

+14
-47
lines changed

kernel/trace/trace_functions.c

Lines changed: 14 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -47,34 +47,6 @@ static void function_trace_start(struct trace_array *tr)
4747
tracing_reset_online_cpus(tr);
4848
}
4949

50-
static void
51-
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
52-
struct ftrace_ops *op, struct pt_regs *pt_regs)
53-
{
54-
struct trace_array *tr = func_trace;
55-
struct trace_array_cpu *data;
56-
unsigned long flags;
57-
long disabled;
58-
int cpu;
59-
int pc;
60-
61-
if (unlikely(!ftrace_function_enabled))
62-
return;
63-
64-
pc = preempt_count();
65-
preempt_disable_notrace();
66-
local_save_flags(flags);
67-
cpu = raw_smp_processor_id();
68-
data = tr->data[cpu];
69-
disabled = atomic_inc_return(&data->disabled);
70-
71-
if (likely(disabled == 1))
72-
trace_function(tr, ip, parent_ip, flags, pc);
73-
74-
atomic_dec(&data->disabled);
75-
preempt_enable_notrace();
76-
}
77-
7850
/* Our option */
7951
enum {
8052
TRACE_FUNC_OPT_STACK = 0x1,
@@ -85,34 +57,34 @@ static struct tracer_flags func_flags;
8557
static void
8658
function_trace_call(unsigned long ip, unsigned long parent_ip,
8759
struct ftrace_ops *op, struct pt_regs *pt_regs)
88-
8960
{
9061
struct trace_array *tr = func_trace;
9162
struct trace_array_cpu *data;
9263
unsigned long flags;
93-
long disabled;
64+
unsigned int bit;
9465
int cpu;
9566
int pc;
9667

9768
if (unlikely(!ftrace_function_enabled))
9869
return;
9970

100-
/*
101-
* Need to use raw, since this must be called before the
102-
* recursive protection is performed.
103-
*/
104-
local_irq_save(flags);
105-
cpu = raw_smp_processor_id();
106-
data = tr->data[cpu];
107-
disabled = atomic_inc_return(&data->disabled);
71+
pc = preempt_count();
72+
preempt_disable_notrace();
10873

109-
if (likely(disabled == 1)) {
110-
pc = preempt_count();
74+
bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
75+
if (bit < 0)
76+
goto out;
77+
78+
cpu = smp_processor_id();
79+
data = tr->data[cpu];
80+
if (!atomic_read(&data->disabled)) {
81+
local_save_flags(flags);
11182
trace_function(tr, ip, parent_ip, flags, pc);
11283
}
84+
trace_clear_recursion(bit);
11385

114-
atomic_dec(&data->disabled);
115-
local_irq_restore(flags);
86+
out:
87+
preempt_enable_notrace();
11688
}
11789

11890
static void
@@ -185,11 +157,6 @@ static void tracing_start_function_trace(void)
185157
{
186158
ftrace_function_enabled = 0;
187159

188-
if (trace_flags & TRACE_ITER_PREEMPTONLY)
189-
trace_ops.func = function_trace_call_preempt_only;
190-
else
191-
trace_ops.func = function_trace_call;
192-
193160
if (func_flags.val & TRACE_FUNC_OPT_STACK)
194161
register_ftrace_function(&trace_stack_ops);
195162
else

0 commit comments

Comments
 (0)