Skip to content

Commit 5ab551d

Browse files
committed
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixes: group scheduling corner case fix, two deadline scheduler fixes, effective_load() overflow fix, nested sleep fix, 6144 CPUs system fix" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Fix RCU stall upon -ENOMEM in sched_create_group() sched/deadline: Avoid double-accounting in case of missed deadlines sched/deadline: Fix migration of SCHED_DEADLINE tasks sched: Fix odd values in effective_load() calculations sched, fanotify: Deal with nested sleeps sched: Fix KMALLOC_MAX_SIZE overflow during cpumask allocation
2 parents ddb321a + 7f1a169 commit 5ab551d

File tree

4 files changed

+19
-35
lines changed

4 files changed

+19
-35
lines changed

fs/notify/fanotify/fanotify_user.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
259259
struct fsnotify_event *kevent;
260260
char __user *start;
261261
int ret;
262-
DEFINE_WAIT(wait);
262+
DEFINE_WAIT_FUNC(wait, woken_wake_function);
263263

264264
start = buf;
265265
group = file->private_data;
266266

267267
pr_debug("%s: group=%p\n", __func__, group);
268268

269+
add_wait_queue(&group->notification_waitq, &wait);
269270
while (1) {
270-
prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
271-
272271
mutex_lock(&group->notification_mutex);
273272
kevent = get_one_event(group, count);
274273
mutex_unlock(&group->notification_mutex);
@@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
289288

290289
if (start != buf)
291290
break;
292-
schedule();
291+
292+
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
293293
continue;
294294
}
295295

@@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
318318
buf += ret;
319319
count -= ret;
320320
}
321+
remove_wait_queue(&group->notification_waitq, &wait);
321322

322-
finish_wait(&group->notification_waitq, &wait);
323323
if (start != buf && ret != -EFAULT)
324324
ret = buf - start;
325325
return ret;

kernel/sched/core.c

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7112,9 +7112,6 @@ void __init sched_init(void)
71127112
#endif
71137113
#ifdef CONFIG_RT_GROUP_SCHED
71147114
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7115-
#endif
7116-
#ifdef CONFIG_CPUMASK_OFFSTACK
7117-
alloc_size += num_possible_cpus() * cpumask_size();
71187115
#endif
71197116
if (alloc_size) {
71207117
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
@@ -7135,13 +7132,13 @@ void __init sched_init(void)
71357132
ptr += nr_cpu_ids * sizeof(void **);
71367133

71377134
#endif /* CONFIG_RT_GROUP_SCHED */
7135+
}
71387136
#ifdef CONFIG_CPUMASK_OFFSTACK
7139-
for_each_possible_cpu(i) {
7140-
per_cpu(load_balance_mask, i) = (void *)ptr;
7141-
ptr += cpumask_size();
7142-
}
7143-
#endif /* CONFIG_CPUMASK_OFFSTACK */
7137+
for_each_possible_cpu(i) {
7138+
per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7139+
cpumask_size(), GFP_KERNEL, cpu_to_node(i));
71447140
}
7141+
#endif /* CONFIG_CPUMASK_OFFSTACK */
71457142

71467143
init_rt_bandwidth(&def_rt_bandwidth,
71477144
global_rt_period(), global_rt_runtime());

kernel/sched/deadline.c

Lines changed: 4 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
570570
static
571571
int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
572572
{
573-
int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
574-
int rorun = dl_se->runtime <= 0;
575-
576-
if (!rorun && !dmiss)
577-
return 0;
578-
579-
/*
580-
* If we are beyond our current deadline and we are still
581-
* executing, then we have already used some of the runtime of
582-
* the next instance. Thus, if we do not account that, we are
583-
* stealing bandwidth from the system at each deadline miss!
584-
*/
585-
if (dmiss) {
586-
dl_se->runtime = rorun ? dl_se->runtime : 0;
587-
dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
588-
}
589-
590-
return 1;
573+
return (dl_se->runtime <= 0);
591574
}
592575

593576
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
@@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
826809
* parameters of the task might need updating. Otherwise,
827810
* we want a replenishment of its runtime.
828811
*/
829-
if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
830-
replenish_dl_entity(dl_se, pi_se);
831-
else
812+
if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
832813
update_dl_entity(dl_se, pi_se);
814+
else if (flags & ENQUEUE_REPLENISH)
815+
replenish_dl_entity(dl_se, pi_se);
833816

834817
__enqueue_dl_entity(dl_se);
835818
}

kernel/sched/fair.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
40054005

40064006
static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
40074007
{
4008+
/* init_cfs_bandwidth() was not called */
4009+
if (!cfs_b->throttled_cfs_rq.next)
4010+
return;
4011+
40084012
hrtimer_cancel(&cfs_b->period_timer);
40094013
hrtimer_cancel(&cfs_b->slack_timer);
40104014
}
@@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
44244428
* wl = S * s'_i; see (2)
44254429
*/
44264430
if (W > 0 && w < W)
4427-
wl = (w * tg->shares) / W;
4431+
wl = (w * (long)tg->shares) / W;
44284432
else
44294433
wl = tg->shares;
44304434

0 commit comments

Comments
 (0)