Skip to content

Commit 826bfeb

Browse files
Peter Zijlstra (Intel)Ingo Molnar
authored andcommitted
preempt/dynamic: Support dynamic preempt with preempt= boot option
Support the preempt= boot option and patch the static call sites accordingly. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lkml.kernel.org/r/20210118141223.123667-9-frederic@kernel.org
1 parent 40607ee commit 826bfeb

File tree

1 file changed

+67
-1
lines changed

1 file changed

+67
-1
lines changed

kernel/sched/core.c

Lines changed: 67 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5328,9 +5328,75 @@ DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
53285328
EXPORT_STATIC_CALL(preempt_schedule_notrace);
53295329
#endif
53305330

5331-
53325331
#endif /* CONFIG_PREEMPTION */
53335332

5333+
#ifdef CONFIG_PREEMPT_DYNAMIC
5334+
5335+
#include <linux/entry-common.h>
5336+
5337+
/*
5338+
* SC:cond_resched
5339+
* SC:might_resched
5340+
* SC:preempt_schedule
5341+
* SC:preempt_schedule_notrace
5342+
* SC:irqentry_exit_cond_resched
5343+
*
5344+
*
5345+
* NONE:
5346+
* cond_resched <- __cond_resched
5347+
* might_resched <- RET0
5348+
* preempt_schedule <- NOP
5349+
* preempt_schedule_notrace <- NOP
5350+
* irqentry_exit_cond_resched <- NOP
5351+
*
5352+
* VOLUNTARY:
5353+
* cond_resched <- __cond_resched
5354+
* might_resched <- __cond_resched
5355+
* preempt_schedule <- NOP
5356+
* preempt_schedule_notrace <- NOP
5357+
* irqentry_exit_cond_resched <- NOP
5358+
*
5359+
* FULL:
5360+
* cond_resched <- RET0
5361+
* might_resched <- RET0
5362+
* preempt_schedule <- preempt_schedule
5363+
* preempt_schedule_notrace <- preempt_schedule_notrace
5364+
* irqentry_exit_cond_resched <- irqentry_exit_cond_resched
5365+
*/
5366+
static int __init setup_preempt_mode(char *str)
5367+
{
5368+
if (!strcmp(str, "none")) {
5369+
static_call_update(cond_resched, __cond_resched);
5370+
static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
5371+
static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
5372+
static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
5373+
static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
5374+
pr_info("Dynamic Preempt: %s\n", str);
5375+
} else if (!strcmp(str, "voluntary")) {
5376+
static_call_update(cond_resched, __cond_resched);
5377+
static_call_update(might_resched, __cond_resched);
5378+
static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
5379+
static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
5380+
static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
5381+
pr_info("Dynamic Preempt: %s\n", str);
5382+
} else if (!strcmp(str, "full")) {
5383+
static_call_update(cond_resched, (typeof(&__cond_resched)) __static_call_return0);
5384+
static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
5385+
static_call_update(preempt_schedule, __preempt_schedule_func);
5386+
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
5387+
static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
5388+
pr_info("Dynamic Preempt: %s\n", str);
5389+
} else {
5390+
pr_warn("Dynamic Preempt: Unsupported preempt mode %s, default to full\n", str);
5391+
return 1;
5392+
}
5393+
return 0;
5394+
}
5395+
__setup("preempt=", setup_preempt_mode);
5396+
5397+
#endif /* CONFIG_PREEMPT_DYNAMIC */
5398+
5399+
53345400
/*
53355401
* This is the entry point to schedule() from kernel preemption
53365402
* off of irq context.

0 commit comments

Comments
 (0)