Skip to content

Commit 3b03706

Browse files
author
Ingo Molnar
committed
sched: Fix various typos
Fix ~42 single-word typos in scheduler code comments. We have accumulated a few fun ones over the years. :-) Signed-off-by: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Ben Segall <bsegall@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: linux-kernel@vger.kernel.org
1 parent 90f093f commit 3b03706

File tree

20 files changed

+42
-42
lines changed

20 files changed

+42
-42
lines changed

include/linux/sched.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1097,7 +1097,7 @@ struct task_struct {
10971097
#ifdef CONFIG_CPUSETS
10981098
/* Protected by ->alloc_lock: */
10991099
nodemask_t mems_allowed;
1100-
/* Seqence number to catch updates: */
1100+
/* Sequence number to catch updates: */
11011101
seqcount_spinlock_t mems_allowed_seq;
11021102
int cpuset_mem_spread_rotor;
11031103
int cpuset_slab_spread_rotor;

kernel/sched/clock.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
* Otherwise it tries to create a semi stable clock from a mixture of other
4242
* clocks, including:
4343
*
44-
* - GTOD (clock monotomic)
44+
* - GTOD (clock monotonic)
4545
* - sched_clock()
4646
* - explicit idle events
4747
*

kernel/sched/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8975,7 +8975,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
89758975
return -EINVAL;
89768976

89778977
/*
8978-
* Likewise, bound things on the otherside by preventing insane quota
8978+
* Likewise, bound things on the other side by preventing insane quota
89798979
* periods. This also allows us to normalize in computing quota
89808980
* feasibility.
89818981
*/

kernel/sched/cpuacct.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
104104

105105
/*
106106
* We allow index == CPUACCT_STAT_NSTATS here to read
107-
* the sum of suages.
107+
* the sum of usages.
108108
*/
109109
BUG_ON(index > CPUACCT_STAT_NSTATS);
110110

kernel/sched/cpufreq_schedutil.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -471,7 +471,7 @@ static void sugov_work(struct kthread_work *work)
471471

472472
/*
473473
* Hold sg_policy->update_lock shortly to handle the case where:
474-
* incase sg_policy->next_freq is read here, and then updated by
474+
* in case sg_policy->next_freq is read here, and then updated by
475475
* sugov_deferred_update() just before work_in_progress is set to false
476476
* here, we may miss queueing the new update.
477477
*

kernel/sched/cpupri.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
7777
* When looking at the vector, we need to read the counter,
7878
* do a memory barrier, then read the mask.
7979
*
80-
* Note: This is still all racey, but we can deal with it.
80+
* Note: This is still all racy, but we can deal with it.
8181
* Ideally, we only want to look at masks that are set.
8282
*
8383
* If a mask is not set, then the only thing wrong is that we
@@ -186,7 +186,7 @@ int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
186186
* The cost of this trade-off is not entirely clear and will probably
187187
* be good for some workloads and bad for others.
188188
*
189-
* The main idea here is that if some CPUs were overcommitted, we try
189+
* The main idea here is that if some CPUs were over-committed, we try
190190
* to spread which is what the scheduler traditionally did. Sys admins
191191
* must do proper RT planning to avoid overloading the system if they
192192
* really care.

kernel/sched/cputime.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -563,7 +563,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
563563

564564
/*
565565
* If either stime or utime are 0, assume all runtime is userspace.
566-
* Once a task gets some ticks, the monotonicy code at 'update:'
566+
* Once a task gets some ticks, the monotonicity code at 'update:'
567567
* will ensure things converge to the observed ratio.
568568
*/
569569
if (stime == 0) {

kernel/sched/deadline.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ static void dl_change_utilization(struct task_struct *p, u64 new_bw)
245245
p->dl.dl_non_contending = 0;
246246
/*
247247
* If the timer handler is currently running and the
248-
* timer cannot be cancelled, inactive_task_timer()
248+
* timer cannot be canceled, inactive_task_timer()
249249
* will see that dl_not_contending is not set, and
250250
* will not touch the rq's active utilization,
251251
* so we are still safe.
@@ -267,7 +267,7 @@ static void dl_change_utilization(struct task_struct *p, u64 new_bw)
267267
* fires.
268268
*
269269
* If the task wakes up again before the inactive timer fires,
270-
* the timer is cancelled, whereas if the task wakes up after the
270+
* the timer is canceled, whereas if the task wakes up after the
271271
* inactive timer fired (and running_bw has been decreased) the
272272
* task's utilization has to be added to running_bw again.
273273
* A flag in the deadline scheduling entity (dl_non_contending)
@@ -385,7 +385,7 @@ static void task_contending(struct sched_dl_entity *dl_se, int flags)
385385
dl_se->dl_non_contending = 0;
386386
/*
387387
* If the timer handler is currently running and the
388-
* timer cannot be cancelled, inactive_task_timer()
388+
* timer cannot be canceled, inactive_task_timer()
389389
* will see that dl_not_contending is not set, and
390390
* will not touch the rq's active utilization,
391391
* so we are still safe.
@@ -1206,7 +1206,7 @@ extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
12061206
* Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
12071207
* multiplied by 2^BW_SHIFT, the result has to be shifted right by
12081208
* BW_SHIFT.
1209-
* Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1209+
* Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT,
12101210
* dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
12111211
* Since delta is a 64 bit variable, to have an overflow its value
12121212
* should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
@@ -1737,7 +1737,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
17371737
p->dl.dl_non_contending = 0;
17381738
/*
17391739
* If the timer handler is currently running and the
1740-
* timer cannot be cancelled, inactive_task_timer()
1740+
* timer cannot be canceled, inactive_task_timer()
17411741
* will see that dl_not_contending is not set, and
17421742
* will not touch the rq's active utilization,
17431743
* so we are still safe.
@@ -2745,7 +2745,7 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
27452745

27462746
/*
27472747
* Default limits for DL period; on the top end we guard against small util
2748-
* tasks still getting rediculous long effective runtimes, on the bottom end we
2748+
* tasks still getting ridiculously long effective runtimes, on the bottom end we
27492749
* guard against timer DoS.
27502750
*/
27512751
unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */

kernel/sched/debug.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -815,7 +815,7 @@ void sysrq_sched_debug_show(void)
815815
}
816816

817817
/*
818-
* This itererator needs some explanation.
818+
* This iterator needs some explanation.
819819
* It returns 1 for the header position.
820820
* This means 2 is CPU 0.
821821
* In a hotplugged system some CPUs, including CPU 0, may be missing so we have

kernel/sched/fair.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1125,7 +1125,7 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
11251125
return rss / nr_scan_pages;
11261126
}
11271127

1128-
/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1128+
/* For sanity's sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
11291129
#define MAX_SCAN_WINDOW 2560
11301130

11311131
static unsigned int task_scan_min(struct task_struct *p)
@@ -2577,7 +2577,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
25772577
}
25782578

25792579
/*
2580-
* Get rid of NUMA staticstics associated with a task (either current or dead).
2580+
* Get rid of NUMA statistics associated with a task (either current or dead).
25812581
* If @final is set, the task is dead and has reached refcount zero, so we can
25822582
* safely free all relevant data structures. Otherwise, there might be
25832583
* concurrent reads from places like load balancing and procfs, and we should
@@ -3952,7 +3952,7 @@ static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
39523952
*
39533953
* abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
39543954
*
3955-
* NOTE: this only works when value + maring < INT_MAX.
3955+
* NOTE: this only works when value + margin < INT_MAX.
39563956
*/
39573957
static inline bool within_margin(int value, int margin)
39583958
{
@@ -4256,7 +4256,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
42564256
/*
42574257
* When bandwidth control is enabled, cfs might have been removed
42584258
* because of a parent been throttled but cfs->nr_running > 1. Try to
4259-
* add it unconditionnally.
4259+
* add it unconditionally.
42604260
*/
42614261
if (cfs_rq->nr_running == 1 || cfs_bandwidth_used())
42624262
list_add_leaf_cfs_rq(cfs_rq);
@@ -5311,7 +5311,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
53115311
* bits doesn't do much.
53125312
*/
53135313

5314-
/* cpu online calback */
5314+
/* cpu online callback */
53155315
static void __maybe_unused update_runtime_enabled(struct rq *rq)
53165316
{
53175317
struct task_group *tg;
@@ -6963,7 +6963,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
69636963

69646964
/*
69656965
* This is possible from callers such as attach_tasks(), in which we
6966-
* unconditionally check_prempt_curr() after an enqueue (which may have
6966+
* unconditionally check_preempt_curr() after an enqueue (which may have
69676967
* lead to a throttle). This both saves work and prevents false
69686968
* next-buddy nomination below.
69696969
*/
@@ -7595,7 +7595,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
75957595
return 0;
75967596
}
75977597

7598-
/* Record that we found atleast one task that could run on dst_cpu */
7598+
/* Record that we found at least one task that could run on dst_cpu */
75997599
env->flags &= ~LBF_ALL_PINNED;
76007600

76017601
if (task_running(env->src_rq, p)) {
@@ -9690,7 +9690,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
96909690
* load to given_cpu. In rare situations, this may cause
96919691
* conflicts (balance_cpu and given_cpu/ilb_cpu deciding
96929692
* _independently_ and at _same_ time to move some load to
9693-
* given_cpu) causing exceess load to be moved to given_cpu.
9693+
* given_cpu) causing excess load to be moved to given_cpu.
96949694
* This however should not happen so much in practice and
96959695
* moreover subsequent load balance cycles should correct the
96969696
* excess load moved.
@@ -9834,7 +9834,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
98349834
/*
98359835
* newidle_balance() disregards balance intervals, so we could
98369836
* repeatedly reach this code, which would lead to balance_interval
9837-
* skyrocketting in a short amount of time. Skip the balance_interval
9837+
* skyrocketing in a short amount of time. Skip the balance_interval
98389838
* increase logic to avoid that.
98399839
*/
98409840
if (env.idle == CPU_NEWLY_IDLE)

0 commit comments

Comments
 (0)