Skip to content

Commit b30f0e3

Browse files
fweisbecIngo Molnar
authored andcommitted
sched/preempt: Optimize preemption operations on __schedule() callers
__schedule() disables preemption and some of its callers (the preempt_schedule*() family) also set PREEMPT_ACTIVE. So we have two preempt_count() modifications that could be performed at once. Lets remove the preemption disablement from __schedule() and pull this responsibility to its callers in order to optimize preempt_count() operations in a single place. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1431441711-29753-5-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 90b62b5 commit b30f0e3

File tree

2 files changed

+21
-20
lines changed

2 files changed

+21
-20
lines changed

include/linux/preempt.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,18 @@ extern void preempt_count_sub(int val);
137137
#define preempt_count_inc() preempt_count_add(1)
138138
#define preempt_count_dec() preempt_count_sub(1)
139139

140+
#define preempt_active_enter() \
141+
do { \
142+
preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
143+
barrier(); \
144+
} while (0)
145+
146+
#define preempt_active_exit() \
147+
do { \
148+
barrier(); \
149+
preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
150+
} while (0)
151+
140152
#ifdef CONFIG_PREEMPT_COUNT
141153

142154
#define preempt_disable() \

kernel/sched/core.c

Lines changed: 9 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2773,9 +2773,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
27732773
* - return from syscall or exception to user-space
27742774
* - return from interrupt-handler to user-space
27752775
*
2776-
* WARNING: all callers must re-check need_resched() afterward and reschedule
2777-
* accordingly in case an event triggered the need for rescheduling (such as
2778-
* an interrupt waking up a task) while preemption was disabled in __schedule().
2776+
* WARNING: must be called with preemption disabled!
27792777
*/
27802778
static void __sched __schedule(void)
27812779
{
@@ -2784,7 +2782,6 @@ static void __sched __schedule(void)
27842782
struct rq *rq;
27852783
int cpu;
27862784

2787-
preempt_disable();
27882785
cpu = smp_processor_id();
27892786
rq = cpu_rq(cpu);
27902787
rcu_note_context_switch();
@@ -2848,8 +2845,6 @@ static void __sched __schedule(void)
28482845
raw_spin_unlock_irq(&rq->lock);
28492846

28502847
post_schedule(rq);
2851-
2852-
sched_preempt_enable_no_resched();
28532848
}
28542849

28552850
static inline void sched_submit_work(struct task_struct *tsk)
@@ -2870,7 +2865,9 @@ asmlinkage __visible void __sched schedule(void)
28702865

28712866
sched_submit_work(tsk);
28722867
do {
2868+
preempt_disable();
28732869
__schedule();
2870+
sched_preempt_enable_no_resched();
28742871
} while (need_resched());
28752872
}
28762873
EXPORT_SYMBOL(schedule);
@@ -2909,15 +2906,14 @@ void __sched schedule_preempt_disabled(void)
29092906
static void __sched notrace preempt_schedule_common(void)
29102907
{
29112908
do {
2912-
__preempt_count_add(PREEMPT_ACTIVE);
2909+
preempt_active_enter();
29132910
__schedule();
2914-
__preempt_count_sub(PREEMPT_ACTIVE);
2911+
preempt_active_exit();
29152912

29162913
/*
29172914
* Check again in case we missed a preemption opportunity
29182915
* between schedule and now.
29192916
*/
2920-
barrier();
29212917
} while (need_resched());
29222918
}
29232919

@@ -2964,7 +2960,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
29642960
return;
29652961

29662962
do {
2967-
__preempt_count_add(PREEMPT_ACTIVE);
2963+
preempt_active_enter();
29682964
/*
29692965
* Needs preempt disabled in case user_exit() is traced
29702966
* and the tracer calls preempt_enable_notrace() causing
@@ -2974,8 +2970,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
29742970
__schedule();
29752971
exception_exit(prev_ctx);
29762972

2977-
__preempt_count_sub(PREEMPT_ACTIVE);
2978-
barrier();
2973+
preempt_active_exit();
29792974
} while (need_resched());
29802975
}
29812976
EXPORT_SYMBOL_GPL(preempt_schedule_context);
@@ -2999,17 +2994,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
29992994
prev_state = exception_enter();
30002995

30012996
do {
3002-
__preempt_count_add(PREEMPT_ACTIVE);
2997+
preempt_active_enter();
30032998
local_irq_enable();
30042999
__schedule();
30053000
local_irq_disable();
3006-
__preempt_count_sub(PREEMPT_ACTIVE);
3007-
3008-
/*
3009-
* Check again in case we missed a preemption opportunity
3010-
* between schedule and now.
3011-
*/
3012-
barrier();
3001+
preempt_active_exit();
30133002
} while (need_resched());
30143003

30153004
exception_exit(prev_state);

0 commit comments

Comments
 (0)