Skip to content

Commit

Permalink
sched: Add support for lazy preemption
Browse files Browse the repository at this point in the history
It has become an obsession to mitigate the determinism vs. throughput
loss of RT. Looking at the mainline semantics of preemption points
gives a hint why RT sucks throughput wise for ordinary SCHED_OTHER
tasks. One major issue is the wakeup of tasks which are right away
preempting the waking task while the waking task holds a lock on which
the woken task will block right after having preempted the wakee. In
mainline this is prevented due to the implicit preemption disable of
spin/rw_lock held regions. On RT this is not possible due to the fully
preemptible nature of sleeping spinlocks.

Though for a SCHED_OTHER task preempting another SCHED_OTHER task this
is really not a correctness issue. RT folks are concerned about
SCHED_FIFO/RR tasks preemption and not about the purely fairness
driven SCHED_OTHER preemption latencies.

So I introduced a lazy preemption mechanism which only applies to
SCHED_OTHER tasks preempting another SCHED_OTHER task. Aside of the
existing preempt_count each tasks sports now a preempt_lazy_count
which is manipulated on lock acquiry and release. This is slightly
incorrect as for lazyness reasons I coupled this on
migrate_disable/enable so some other mechanisms get the same treatment
(e.g. get_cpu_light).

Now on the scheduler side instead of setting NEED_RESCHED this sets
NEED_RESCHED_LAZY in case of a SCHED_OTHER/SCHED_OTHER preemption and
therefor allows to exit the waking task the lock held region before
the woken task preempts. That also works better for cross CPU wakeups
as the other side can stay in the adaptive spinning loop.

For RT class preemption there is no change. This simply sets
NEED_RESCHED and forgoes the lazy preemption counter.

 Initial test do not expose any observable latency increasement, but
history shows that I've been proven wrong before :)

The lazy preemption mode is per default on, but with
CONFIG_SCHED_DEBUG enabled it can be disabled via:

 # echo NO_PREEMPT_LAZY >/sys/kernel/debug/sched_features

and reenabled via

 # echo PREEMPT_LAZY >/sys/kernel/debug/sched_features

The test results so far are very machine and workload dependent, but
there is a clear trend that it enhances the non RT workload
performance.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Thomas Gleixner authored and Sebastian Andrzej Siewior committed Sep 13, 2021
1 parent 06dcd8d commit 240c44c
Show file tree
Hide file tree
Showing 12 changed files with 248 additions and 35 deletions.
54 changes: 51 additions & 3 deletions include/linux/preempt.h
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,20 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)

#ifdef CONFIG_PREEMPT_LAZY
#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
#else
#define add_preempt_lazy_count(val) do { } while (0)
#define sub_preempt_lazy_count(val) do { } while (0)
#define inc_preempt_lazy_count() do { } while (0)
#define dec_preempt_lazy_count() do { } while (0)
#define preempt_lazy_count() (0)
#endif

#ifdef CONFIG_PREEMPT_COUNT

#define preempt_disable() \
Expand All @@ -182,6 +196,12 @@ do { \
barrier(); \
} while (0)

#define preempt_lazy_disable() \
do { \
inc_preempt_lazy_count(); \
barrier(); \
} while (0)

#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
Expand Down Expand Up @@ -219,13 +239,31 @@ do { \
__preempt_schedule(); \
} while (0)

/*
* open code preempt_check_resched() because it is not exported to modules and
* used by local_unlock() or bpf_enable_instrumentation().
*/
#define preempt_lazy_enable() \
do { \
dec_preempt_lazy_count(); \
barrier(); \
if (should_resched(0)) \
__preempt_schedule(); \
} while (0)

#else /* !CONFIG_PREEMPTION */
#define preempt_enable() \
do { \
barrier(); \
preempt_count_dec(); \
} while (0)

#define preempt_lazy_enable() \
do { \
dec_preempt_lazy_count(); \
barrier(); \
} while (0)

#define preempt_enable_notrace() \
do { \
barrier(); \
Expand Down Expand Up @@ -267,6 +305,9 @@ do { \
#define preempt_check_resched_rt() barrier()
#define preemptible() 0

#define preempt_lazy_disable() barrier()
#define preempt_lazy_enable() barrier()

#endif /* CONFIG_PREEMPT_COUNT */

#ifdef MODULE
Expand All @@ -285,7 +326,7 @@ do { \
} while (0)
#define preempt_fold_need_resched() \
do { \
if (tif_need_resched()) \
if (tif_need_resched_now()) \
set_preempt_need_resched(); \
} while (0)

Expand Down Expand Up @@ -413,8 +454,15 @@ extern void migrate_enable(void);

#else

static inline void migrate_disable(void) { }
static inline void migrate_enable(void) { }
static inline void migrate_disable(void)
{
preempt_lazy_disable();
}

static inline void migrate_enable(void)
{
preempt_lazy_enable();
}

#endif /* CONFIG_SMP */

Expand Down
37 changes: 37 additions & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -2014,6 +2014,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}

#ifdef CONFIG_PREEMPT_LAZY
static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
{
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
}

static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
{
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
}

static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
{
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
}

static inline int need_resched_lazy(void)
{
return test_thread_flag(TIF_NEED_RESCHED_LAZY);
}

static inline int need_resched_now(void)
{
return test_thread_flag(TIF_NEED_RESCHED);
}

#else
static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
static inline int need_resched_lazy(void) { return 0; }

static inline int need_resched_now(void)
{
return test_thread_flag(TIF_NEED_RESCHED);
}

#endif

#ifdef CONFIG_PREEMPT_RT
static inline bool task_match_saved_state(struct task_struct *p, long match_state)
{
Expand Down
12 changes: 11 additions & 1 deletion include/linux/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
#endif /* !CONFIG_GENERIC_ENTRY */

#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
#ifdef CONFIG_PREEMPT_LAZY
#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \
test_thread_flag(TIF_NEED_RESCHED_LAZY))
#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED))
#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)

#else
#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED)
#define tif_need_resched_lazy() 0
#endif

#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
static inline int arch_within_stack_frames(const void * const stack,
Expand Down
5 changes: 4 additions & 1 deletion include/linux/trace_events.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
unsigned char preempt_lazy_count;
};

#define TRACE_EVENT_TYPE_MAX \
Expand Down Expand Up @@ -157,9 +158,10 @@ static inline void tracing_generic_entry_update(struct trace_entry *entry,
unsigned int trace_ctx)
{
entry->preempt_count = trace_ctx & 0xff;
entry->preempt_lazy_count = (trace_ctx >> 16) & 0xff;
entry->pid = current->pid;
entry->type = type;
entry->flags = trace_ctx >> 16;
entry->flags = trace_ctx >> 24;
}

unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
Expand All @@ -172,6 +174,7 @@ enum trace_flag_type {
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
TRACE_FLAG_NMI = 0x40,
TRACE_FLAG_NEED_RESCHED_LAZY = 0x80,
};

#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Expand Down
6 changes: 6 additions & 0 deletions kernel/Kconfig.preempt
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only

config HAVE_PREEMPT_LAZY
bool

config PREEMPT_LAZY
def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT

choice
prompt "Preemption Model"
default PREEMPT_NONE
Expand Down
80 changes: 78 additions & 2 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -986,6 +986,46 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}

#ifdef CONFIG_PREEMPT_LAZY

static int tsk_is_polling(struct task_struct *p)
{
#ifdef TIF_POLLING_NRFLAG
return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
#else
return 0;
#endif
}

void resched_curr_lazy(struct rq *rq)
{
struct task_struct *curr = rq->curr;
int cpu;

if (!sched_feat(PREEMPT_LAZY)) {
resched_curr(rq);
return;
}

if (test_tsk_need_resched(curr))
return;

if (test_tsk_need_resched_lazy(curr))
return;

set_tsk_need_resched_lazy(curr);

cpu = cpu_of(rq);
if (cpu == smp_processor_id())
return;

/* NEED_RESCHED_LAZY must be visible before we test polling */
smp_mb();
if (!tsk_is_polling(curr))
smp_send_reschedule(cpu);
}
#endif

void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
Expand Down Expand Up @@ -2141,6 +2181,7 @@ void migrate_disable(void)
preempt_disable();
this_rq()->nr_pinned++;
p->migration_disabled = 1;
preempt_lazy_disable();
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_disable);
Expand Down Expand Up @@ -2171,6 +2212,7 @@ void migrate_enable(void)
barrier();
p->migration_disabled = 0;
this_rq()->nr_pinned--;
preempt_lazy_enable();
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_enable);
Expand Down Expand Up @@ -4406,6 +4448,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
#ifdef CONFIG_HAVE_PREEMPT_LAZY
task_thread_info(p)->preempt_lazy_count = 0;
#endif
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
Expand Down Expand Up @@ -6250,6 +6295,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)

next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
clear_tsk_need_resched_lazy(prev);
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
rq->last_seen_need_resched_ns = 0;
Expand Down Expand Up @@ -6467,6 +6513,30 @@ static void __sched notrace preempt_schedule_common(void)
} while (need_resched());
}

#ifdef CONFIG_PREEMPT_LAZY
/*
* If TIF_NEED_RESCHED is then we allow to be scheduled away since this is
* set by a RT task. Oterwise we try to avoid beeing scheduled out as long as
* preempt_lazy_count counter >0.
*/
static __always_inline int preemptible_lazy(void)
{
if (test_thread_flag(TIF_NEED_RESCHED))
return 1;
if (current_thread_info()->preempt_lazy_count)
return 0;
return 1;
}

#else

static inline int preemptible_lazy(void)
{
return 1;
}

#endif

#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
Expand All @@ -6480,7 +6550,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
*/
if (likely(!preemptible()))
return;

if (!preemptible_lazy())
return;
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
Expand Down Expand Up @@ -6513,6 +6584,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
if (likely(!preemptible()))
return;

if (!preemptible_lazy())
return;

do {
/*
* Because the function tracer can trace preempt_count_sub()
Expand Down Expand Up @@ -8674,7 +8748,9 @@ void __init init_idle(struct task_struct *idle, int cpu)

/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);

#ifdef CONFIG_HAVE_PREEMPT_LAZY
task_thread_info(idle)->preempt_lazy_count = 0;
#endif
/*
* The idle tasks have their own, simple scheduling class:
*/
Expand Down

0 comments on commit 240c44c

Please sign in to comment.