Skip to content

Commit d95f412

Browse files
Mike GalbraithIngo Molnar
authored andcommitted
sched: Add yield_to(task, preempt) functionality
Currently only implemented for fair class tasks. Add a yield_to_task method() to the fair scheduling class. allowing the caller of yield_to() to accelerate another thread in it's thread group, task group. Implemented via a scheduler hint, using cfs_rq->next to encourage the target being selected. We can rely on pick_next_entity to keep things fair, so noone can accelerate a thread that has already used its fair share of CPU time. This also means callers should only call yield_to when they really mean it. Calling it too often can result in the scheduler just ignoring the hint. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20110201095051.4ddb7738@annuminas.surriel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
1 parent ac53db5 commit d95f412

File tree

3 files changed

+107
-0
lines changed

3 files changed

+107
-0
lines changed

include/linux/sched.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1058,6 +1058,7 @@ struct sched_class {
10581058
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
10591059
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
10601060
void (*yield_task) (struct rq *rq);
1061+
bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
10611062

10621063
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
10631064

@@ -1972,6 +1973,7 @@ static inline int rt_mutex_getprio(struct task_struct *p)
19721973
# define rt_mutex_adjust_pi(p) do { } while (0)
19731974
#endif
19741975

1976+
extern bool yield_to(struct task_struct *p, bool preempt);
19751977
extern void set_user_nice(struct task_struct *p, long nice);
19761978
extern int task_prio(const struct task_struct *p);
19771979
extern int task_nice(const struct task_struct *p);

kernel/sched.c

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1686,6 +1686,39 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
16861686
__release(rq2->lock);
16871687
}
16881688

1689+
#else /* CONFIG_SMP */
1690+
1691+
/*
1692+
* double_rq_lock - safely lock two runqueues
1693+
*
1694+
* Note this does not disable interrupts like task_rq_lock,
1695+
* you need to do so manually before calling.
1696+
*/
1697+
static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1698+
__acquires(rq1->lock)
1699+
__acquires(rq2->lock)
1700+
{
1701+
BUG_ON(!irqs_disabled());
1702+
BUG_ON(rq1 != rq2);
1703+
raw_spin_lock(&rq1->lock);
1704+
__acquire(rq2->lock); /* Fake it out ;) */
1705+
}
1706+
1707+
/*
1708+
* double_rq_unlock - safely unlock two runqueues
1709+
*
1710+
* Note this does not restore interrupts like task_rq_unlock,
1711+
* you need to do so manually after calling.
1712+
*/
1713+
static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1714+
__releases(rq1->lock)
1715+
__releases(rq2->lock)
1716+
{
1717+
BUG_ON(rq1 != rq2);
1718+
raw_spin_unlock(&rq1->lock);
1719+
__release(rq2->lock);
1720+
}
1721+
16891722
#endif
16901723

16911724
static void calc_load_account_idle(struct rq *this_rq);
@@ -5448,6 +5481,58 @@ void __sched yield(void)
54485481
}
54495482
EXPORT_SYMBOL(yield);
54505483

5484+
/**
5485+
* yield_to - yield the current processor to another thread in
5486+
* your thread group, or accelerate that thread toward the
5487+
* processor it's on.
5488+
*
5489+
* It's the caller's job to ensure that the target task struct
5490+
* can't go away on us before we can do any checks.
5491+
*
5492+
* Returns true if we indeed boosted the target task.
5493+
*/
5494+
bool __sched yield_to(struct task_struct *p, bool preempt)
5495+
{
5496+
struct task_struct *curr = current;
5497+
struct rq *rq, *p_rq;
5498+
unsigned long flags;
5499+
bool yielded = 0;
5500+
5501+
local_irq_save(flags);
5502+
rq = this_rq();
5503+
5504+
again:
5505+
p_rq = task_rq(p);
5506+
double_rq_lock(rq, p_rq);
5507+
while (task_rq(p) != p_rq) {
5508+
double_rq_unlock(rq, p_rq);
5509+
goto again;
5510+
}
5511+
5512+
if (!curr->sched_class->yield_to_task)
5513+
goto out;
5514+
5515+
if (curr->sched_class != p->sched_class)
5516+
goto out;
5517+
5518+
if (task_running(p_rq, p) || p->state)
5519+
goto out;
5520+
5521+
yielded = curr->sched_class->yield_to_task(rq, p, preempt);
5522+
if (yielded)
5523+
schedstat_inc(rq, yld_count);
5524+
5525+
out:
5526+
double_rq_unlock(rq, p_rq);
5527+
local_irq_restore(flags);
5528+
5529+
if (yielded)
5530+
schedule();
5531+
5532+
return yielded;
5533+
}
5534+
EXPORT_SYMBOL_GPL(yield_to);
5535+
54515536
/*
54525537
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
54535538
* that process accounting knows that this is a task in IO wait state.

kernel/sched_fair.c

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1975,6 +1975,25 @@ static void yield_task_fair(struct rq *rq)
19751975
set_skip_buddy(se);
19761976
}
19771977

1978+
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
1979+
{
1980+
struct sched_entity *se = &p->se;
1981+
1982+
if (!se->on_rq)
1983+
return false;
1984+
1985+
/* Tell the scheduler that we'd really like pse to run next. */
1986+
set_next_buddy(se);
1987+
1988+
/* Make p's CPU reschedule; pick_next_entity takes care of fairness. */
1989+
if (preempt)
1990+
resched_task(rq->curr);
1991+
1992+
yield_task_fair(rq);
1993+
1994+
return true;
1995+
}
1996+
19781997
#ifdef CONFIG_SMP
19791998
/**************************************************
19801999
* Fair scheduling class load-balancing methods:
@@ -4243,6 +4262,7 @@ static const struct sched_class fair_sched_class = {
42434262
.enqueue_task = enqueue_task_fair,
42444263
.dequeue_task = dequeue_task_fair,
42454264
.yield_task = yield_task_fair,
4265+
.yield_to_task = yield_to_task_fair,
42464266

42474267
.check_preempt_curr = check_preempt_wakeup,
42484268

0 commit comments

Comments
 (0)