Skip to content

Commit 0a01640

Browse files
Steven Rostedtrostedt
authored andcommitted
ftrace: Optimize the function tracer list loop
There is lots of places that perform: op = rcu_dereference_raw(ftrace_control_list); while (op != &ftrace_list_end) { Add a helper macro to do this, and also optimize for a single entity. That is, gcc will optimize a loop for either no iterations or more than one iteration. But usually only a single callback is registered to the function tracer, thus the optimized case should be a single pass. to do this we now do: op = rcu_dereference_raw(list); do { [...] } while (likely(op = rcu_dereference_raw((op)->next)) && unlikely((op) != &ftrace_list_end)); An op is always registered (ftrace_list_end when no callbacks is registered), thus when a single callback is registered, the link list looks like: top => callback => ftrace_list_end => NULL. The likely(op = op->next) still must be performed due to the race of removing the callback, where the first op assignment could equal ftrace_list_end. In that case, the op->next would be NULL. But this is unlikely (only happens in a race condition when removing the callback). But it is very likely that the next op would be ftrace_list_end, unless more than one callback has been registered. This tells gcc what the most common case is and makes the fast path with the least amount of branches. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
1 parent 9640388 commit 0a01640

File tree

1 file changed

+26
-22
lines changed

1 file changed

+26
-22
lines changed

kernel/trace/ftrace.c

Lines changed: 26 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,26 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
111111
#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
112112
#endif
113113

114+
/*
115+
* Traverse the ftrace_global_list, invoking all entries. The reason that we
116+
* can use rcu_dereference_raw() is that elements removed from this list
117+
* are simply leaked, so there is no need to interact with a grace-period
118+
* mechanism. The rcu_dereference_raw() calls are needed to handle
119+
* concurrent insertions into the ftrace_global_list.
120+
*
121+
* Silly Alpha and silly pointer-speculation compiler optimizations!
122+
*/
123+
#define do_for_each_ftrace_op(op, list) \
124+
op = rcu_dereference_raw(list); \
125+
do
126+
127+
/*
128+
* Optimized for just a single item in the list (as that is the normal case).
129+
*/
130+
#define while_for_each_ftrace_op(op) \
131+
while (likely(op = rcu_dereference_raw((op)->next)) && \
132+
unlikely((op) != &ftrace_list_end))
133+
114134
/**
115135
* ftrace_nr_registered_ops - return number of ops registered
116136
*
@@ -132,15 +152,6 @@ int ftrace_nr_registered_ops(void)
132152
return cnt;
133153
}
134154

135-
/*
136-
* Traverse the ftrace_global_list, invoking all entries. The reason that we
137-
* can use rcu_dereference_raw() is that elements removed from this list
138-
* are simply leaked, so there is no need to interact with a grace-period
139-
* mechanism. The rcu_dereference_raw() calls are needed to handle
140-
* concurrent insertions into the ftrace_global_list.
141-
*
142-
* Silly Alpha and silly pointer-speculation compiler optimizations!
143-
*/
144155
static void
145156
ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
146157
struct ftrace_ops *op, struct pt_regs *regs)
@@ -149,11 +160,9 @@ ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
149160
return;
150161

151162
trace_recursion_set(TRACE_GLOBAL_BIT);
152-
op = rcu_dereference_raw(ftrace_global_list); /*see above*/
153-
while (op != &ftrace_list_end) {
163+
do_for_each_ftrace_op(op, ftrace_global_list) {
154164
op->func(ip, parent_ip, op, regs);
155-
op = rcu_dereference_raw(op->next); /*see above*/
156-
};
165+
} while_for_each_ftrace_op(op);
157166
trace_recursion_clear(TRACE_GLOBAL_BIT);
158167
}
159168

@@ -4104,14 +4113,11 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
41044113
*/
41054114
preempt_disable_notrace();
41064115
trace_recursion_set(TRACE_CONTROL_BIT);
4107-
op = rcu_dereference_raw(ftrace_control_list);
4108-
while (op != &ftrace_list_end) {
4116+
do_for_each_ftrace_op(op, ftrace_control_list) {
41094117
if (!ftrace_function_local_disabled(op) &&
41104118
ftrace_ops_test(op, ip))
41114119
op->func(ip, parent_ip, op, regs);
4112-
4113-
op = rcu_dereference_raw(op->next);
4114-
};
4120+
} while_for_each_ftrace_op(op);
41154121
trace_recursion_clear(TRACE_CONTROL_BIT);
41164122
preempt_enable_notrace();
41174123
}
@@ -4139,12 +4145,10 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
41394145
* they must be freed after a synchronize_sched().
41404146
*/
41414147
preempt_disable_notrace();
4142-
op = rcu_dereference_raw(ftrace_ops_list);
4143-
while (op != &ftrace_list_end) {
4148+
do_for_each_ftrace_op(op, ftrace_ops_list) {
41444149
if (ftrace_ops_test(op, ip))
41454150
op->func(ip, parent_ip, op, regs);
4146-
op = rcu_dereference_raw(op->next);
4147-
};
4151+
} while_for_each_ftrace_op(op);
41484152
preempt_enable_notrace();
41494153
trace_recursion_clear(TRACE_INTERNAL_BIT);
41504154
}

0 commit comments

Comments
 (0)