Skip to content

Commit f213a6c

Browse files
committed
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes in this cycle were: - fix affine wakeups (Peter Zijlstra) - improve CPU onlining (and general bootup) scalability on systems with ridiculous number (thousands) of CPUs (Peter Zijlstra) - sched/numa updates (Rik van Riel) - sched/deadline updates (Byungchul Park) - sched/cpufreq enhancements and related cleanups (Viresh Kumar) - sched/debug enhancements (Xie XiuQi) - various fixes" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits) sched/debug: Optimize sched_domain sysctl generation sched/topology: Avoid pointless rebuild sched/topology, cpuset: Avoid spurious/wrong domain rebuilds sched/topology: Improve comments sched/topology: Fix memory leak in __sdt_alloc() sched/completion: Document that reinit_completion() must be called after complete_all() sched/autogroup: Fix error reporting printk text in autogroup_create() sched/fair: Fix wake_affine() for !NUMA_BALANCING sched/debug: Intruduce task_state_to_char() helper function sched/debug: Show task state in /proc/sched_debug sched/debug: Use task_pid_nr_ns in /proc/$pid/sched sched/core: Remove unnecessary initialization init_idle_bootup_task() sched/deadline: Change return value of cpudl_find() sched/deadline: Make find_later_rq() choose a closer CPU in topology sched/numa: Scale scan period with tasks in group and shared/private sched/numa: Slow down scan rate if shared faults dominate sched/pelt: Fix false running accounting sched: Mark pick_next_task_dl() and build_sched_domain() as static sched/cpupri: Don't re-initialize 'struct cpupri' sched/deadline: Don't re-initialize 'struct cpudl' ...
2 parents 621bee3 + bbdacdf commit f213a6c

File tree

18 files changed

+459
-263
lines changed

18 files changed

+459
-263
lines changed

arch/x86/include/asm/topology.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -75,12 +75,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
7575

7676
extern void setup_node_to_cpumask_map(void);
7777

78-
/*
79-
* Returns the number of the node containing Node 'node'. This
80-
* architecture is flat, so it is a pretty simple function!
81-
*/
82-
#define parent_node(node) (node)
83-
8478
#define pcibus_to_node(bus) __pcibus_to_node(bus)
8579

8680
extern int __node_distance(int, int);

fs/proc/base.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1408,12 +1408,13 @@ static const struct file_operations proc_fail_nth_operations = {
14081408
static int sched_show(struct seq_file *m, void *v)
14091409
{
14101410
struct inode *inode = m->private;
1411+
struct pid_namespace *ns = inode->i_sb->s_fs_info;
14111412
struct task_struct *p;
14121413

14131414
p = get_proc_task(inode);
14141415
if (!p)
14151416
return -ESRCH;
1416-
proc_sched_show_task(p, m);
1417+
proc_sched_show_task(p, ns, m);
14171418

14181419
put_task_struct(p);
14191420

include/linux/sched.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1233,6 +1233,19 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
12331233
return task_pgrp_nr_ns(tsk, &init_pid_ns);
12341234
}
12351235

1236+
static inline char task_state_to_char(struct task_struct *task)
1237+
{
1238+
const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
1239+
unsigned long state = task->state;
1240+
1241+
state = state ? __ffs(state) + 1 : 0;
1242+
1243+
/* Make sure the string lines up properly with the number of task states: */
1244+
BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1);
1245+
1246+
return state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?';
1247+
}
1248+
12361249
/**
12371250
* is_global_init - check if a task structure is init. Since init
12381251
* is free to have sub-threads we need to check tgid.

include/linux/sched/debug.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
*/
77

88
struct task_struct;
9+
struct pid_namespace;
910

1011
extern void dump_cpu_task(int cpu);
1112

@@ -34,7 +35,8 @@ extern void sched_show_task(struct task_struct *p);
3435

3536
#ifdef CONFIG_SCHED_DEBUG
3637
struct seq_file;
37-
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
38+
extern void proc_sched_show_task(struct task_struct *p,
39+
struct pid_namespace *ns, struct seq_file *m);
3840
extern void proc_sched_set_task(struct task_struct *p);
3941
#endif
4042

include/linux/sched/task.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ extern int lockdep_tasklist_lock_is_held(void);
3030

3131
extern asmlinkage void schedule_tail(struct task_struct *prev);
3232
extern void init_idle(struct task_struct *idle, int cpu);
33-
extern void init_idle_bootup_task(struct task_struct *idle);
3433

3534
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
3635
extern void sched_dead(struct task_struct *p);

include/linux/sched/topology.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,14 @@ struct sched_domain_shared {
7171
atomic_t ref;
7272
atomic_t nr_busy_cpus;
7373
int has_idle_cores;
74+
75+
/*
76+
* Some variables from the most recent sd_lb_stats for this domain,
77+
* used by wake_affine().
78+
*/
79+
unsigned long nr_running;
80+
unsigned long load;
81+
unsigned long capacity;
7482
};
7583

7684
struct sched_domain {

init/main.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -430,7 +430,6 @@ static noinline void __ref rest_init(void)
430430
* The boot idle thread must execute schedule()
431431
* at least once to get things moving:
432432
*/
433-
init_idle_bootup_task(current);
434433
schedule_preempt_disabled();
435434
/* Call into cpu_idle with preempt disabled */
436435
cpu_startup_entry(CPUHP_ONLINE);

kernel/cgroup/cpuset.c

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2344,13 +2344,7 @@ void cpuset_update_active_cpus(void)
23442344
* We're inside cpu hotplug critical region which usually nests
23452345
* inside cgroup synchronization. Bounce actual hotplug processing
23462346
* to a work item to avoid reverse locking order.
2347-
*
2348-
* We still need to do partition_sched_domains() synchronously;
2349-
* otherwise, the scheduler will get confused and put tasks to the
2350-
* dead CPU. Fall back to the default single domain.
2351-
* cpuset_hotplug_workfn() will rebuild it as necessary.
23522347
*/
2353-
partition_sched_domains(1, NULL, NULL);
23542348
schedule_work(&cpuset_hotplug_work);
23552349
}
23562350

kernel/sched/autogroup.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,6 @@ static inline struct autogroup *autogroup_create(void)
7171
goto out_fail;
7272

7373
tg = sched_create_group(&root_task_group);
74-
7574
if (IS_ERR(tg))
7675
goto out_free;
7776

@@ -101,7 +100,7 @@ static inline struct autogroup *autogroup_create(void)
101100
out_fail:
102101
if (printk_ratelimit()) {
103102
printk(KERN_WARNING "autogroup_create: %s failure.\n",
104-
ag ? "sched_create_group()" : "kmalloc()");
103+
ag ? "sched_create_group()" : "kzalloc()");
105104
}
106105

107106
return autogroup_kref_get(&autogroup_default);

kernel/sched/completion.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,13 @@ EXPORT_SYMBOL(complete);
4747
*
4848
* It may be assumed that this function implies a write memory barrier before
4949
* changing the task state if and only if any tasks are woken up.
50+
*
51+
* Since complete_all() sets the completion of @x permanently to done
52+
* to allow multiple waiters to finish, a call to reinit_completion()
53+
* must be used on @x if @x is to be used again. The code must make
54+
* sure that all waiters have woken and finished before reinitializing
55+
* @x. Also note that the function completion_done() can not be used
56+
* to know if there are still waiters after complete_all() has been called.
5057
*/
5158
void complete_all(struct completion *x)
5259
{
@@ -297,6 +304,7 @@ EXPORT_SYMBOL(try_wait_for_completion);
297304
* Return: 0 if there are waiters (wait_for_completion() in progress)
298305
* 1 if there are no waiters.
299306
*
307+
* Note, this will always return true if complete_all() was called on @X.
300308
*/
301309
bool completion_done(struct completion *x)
302310
{

0 commit comments

Comments
 (0)