Skip to content

Commit edc15ca

Browse files
Steven Rostedtrostedt
authored andcommitted
tracing: Avoid unnecessary multiple recursion checks
When function tracing occurs, the following steps are made: If arch does not support a ftrace feature: call internal function (uses INTERNAL bits) which calls... If callback is registered to the "global" list, the list function is called and recursion checks the GLOBAL bits. then this function calls... The function callback, which can use the FTRACE bits to check for recursion. Now if the arch does not suppport a feature, and it calls the global list function which calls the ftrace callback all three of these steps will do a recursion protection. There's no reason to do one if the previous caller already did. The recursion that we are protecting against will go through the same steps again. To prevent the multiple recursion checks, if a recursion bit is set that is higher than the MAX bit of the current check, then we know that the check was made by the previous caller, and we can skip the current check. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
1 parent e46cbf7 commit edc15ca

File tree

2 files changed

+110
-36
lines changed

2 files changed

+110
-36
lines changed

kernel/trace/ftrace.c

Lines changed: 9 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -158,25 +158,15 @@ ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
158158
{
159159
int bit;
160160

161-
if (in_interrupt()) {
162-
if (in_nmi())
163-
bit = TRACE_GLOBAL_NMI_BIT;
164-
165-
else if (in_irq())
166-
bit = TRACE_GLOBAL_IRQ_BIT;
167-
else
168-
bit = TRACE_GLOBAL_SIRQ_BIT;
169-
} else
170-
bit = TRACE_GLOBAL_BIT;
171-
172-
if (unlikely(trace_recursion_test(bit)))
161+
bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
162+
if (bit < 0)
173163
return;
174164

175-
trace_recursion_set(bit);
176165
do_for_each_ftrace_op(op, ftrace_global_list) {
177166
op->func(ip, parent_ip, op, regs);
178167
} while_for_each_ftrace_op(op);
179-
trace_recursion_clear(bit);
168+
169+
trace_clear_recursion(bit);
180170
}
181171

182172
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
@@ -4145,26 +4135,14 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
41454135
struct ftrace_ops *ignored, struct pt_regs *regs)
41464136
{
41474137
struct ftrace_ops *op;
4148-
unsigned int bit;
4138+
int bit;
41494139

41504140
if (function_trace_stop)
41514141
return;
41524142

4153-
if (in_interrupt()) {
4154-
if (in_nmi())
4155-
bit = TRACE_INTERNAL_NMI_BIT;
4156-
4157-
else if (in_irq())
4158-
bit = TRACE_INTERNAL_IRQ_BIT;
4159-
else
4160-
bit = TRACE_INTERNAL_SIRQ_BIT;
4161-
} else
4162-
bit = TRACE_INTERNAL_BIT;
4163-
4164-
if (unlikely(trace_recursion_test(bit)))
4165-
return;
4166-
4167-
trace_recursion_set(bit);
4143+
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4144+
if (bit < 0)
4145+
return;
41684146

41694147
/*
41704148
* Some of the ops may be dynamically allocated,
@@ -4176,7 +4154,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
41764154
op->func(ip, parent_ip, op, regs);
41774155
} while_for_each_ftrace_op(op);
41784156
preempt_enable_notrace();
4179-
trace_recursion_clear(bit);
4157+
trace_clear_recursion(bit);
41804158
}
41814159

41824160
/*

kernel/trace/trace.h

Lines changed: 101 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -297,18 +297,49 @@ struct tracer {
297297
/* Ring buffer has the 10 LSB bits to count */
298298
#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
299299

300-
/* for function tracing recursion */
300+
/*
301+
* For function tracing recursion:
302+
* The order of these bits are important.
303+
*
304+
* When function tracing occurs, the following steps are made:
305+
* If arch does not support a ftrace feature:
306+
* call internal function (uses INTERNAL bits) which calls...
307+
* If callback is registered to the "global" list, the list
308+
* function is called and recursion checks the GLOBAL bits.
309+
* then this function calls...
310+
* The function callback, which can use the FTRACE bits to
311+
* check for recursion.
312+
*
313+
* Now if the arch does not suppport a feature, and it calls
314+
* the global list function which calls the ftrace callback
315+
* all three of these steps will do a recursion protection.
316+
* There's no reason to do one if the previous caller already
317+
* did. The recursion that we are protecting against will
318+
* go through the same steps again.
319+
*
320+
* To prevent the multiple recursion checks, if a recursion
321+
* bit is set that is higher than the MAX bit of the current
322+
* check, then we know that the check was made by the previous
323+
* caller, and we can skip the current check.
324+
*/
301325
enum {
302-
TRACE_INTERNAL_BIT = 11,
303-
TRACE_INTERNAL_NMI_BIT,
304-
TRACE_INTERNAL_IRQ_BIT,
305-
TRACE_INTERNAL_SIRQ_BIT,
326+
TRACE_FTRACE_BIT = 11,
327+
TRACE_FTRACE_NMI_BIT,
328+
TRACE_FTRACE_IRQ_BIT,
329+
TRACE_FTRACE_SIRQ_BIT,
306330

331+
/* GLOBAL_BITs must be greater than FTRACE_BITs */
307332
TRACE_GLOBAL_BIT,
308333
TRACE_GLOBAL_NMI_BIT,
309334
TRACE_GLOBAL_IRQ_BIT,
310335
TRACE_GLOBAL_SIRQ_BIT,
311336

337+
/* INTERNAL_BITs must be greater than GLOBAL_BITs */
338+
TRACE_INTERNAL_BIT,
339+
TRACE_INTERNAL_NMI_BIT,
340+
TRACE_INTERNAL_IRQ_BIT,
341+
TRACE_INTERNAL_SIRQ_BIT,
342+
312343
TRACE_CONTROL_BIT,
313344

314345
/*
@@ -325,6 +356,71 @@ enum {
325356
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
326357
#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
327358

359+
#define TRACE_CONTEXT_BITS 4
360+
361+
#define TRACE_FTRACE_START TRACE_FTRACE_BIT
362+
#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
363+
364+
#define TRACE_GLOBAL_START TRACE_GLOBAL_BIT
365+
#define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)
366+
367+
#define TRACE_LIST_START TRACE_INTERNAL_BIT
368+
#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
369+
370+
#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
371+
372+
static __always_inline int trace_get_context_bit(void)
373+
{
374+
int bit;
375+
376+
if (in_interrupt()) {
377+
if (in_nmi())
378+
bit = 0;
379+
380+
else if (in_irq())
381+
bit = 1;
382+
else
383+
bit = 2;
384+
} else
385+
bit = 3;
386+
387+
return bit;
388+
}
389+
390+
static __always_inline int trace_test_and_set_recursion(int start, int max)
391+
{
392+
unsigned int val = current->trace_recursion;
393+
int bit;
394+
395+
/* A previous recursion check was made */
396+
if ((val & TRACE_CONTEXT_MASK) > max)
397+
return 0;
398+
399+
bit = trace_get_context_bit() + start;
400+
if (unlikely(val & (1 << bit)))
401+
return -1;
402+
403+
val |= 1 << bit;
404+
current->trace_recursion = val;
405+
barrier();
406+
407+
return bit;
408+
}
409+
410+
static __always_inline void trace_clear_recursion(int bit)
411+
{
412+
unsigned int val = current->trace_recursion;
413+
414+
if (!bit)
415+
return;
416+
417+
bit = 1 << bit;
418+
val &= ~bit;
419+
420+
barrier();
421+
current->trace_recursion = val;
422+
}
423+
328424
#define TRACE_PIPE_ALL_CPU -1
329425

330426
static inline struct ring_buffer_iter *

0 commit comments

Comments
 (0)