@@ -1125,7 +1125,7 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
11251125 return rss / nr_scan_pages ;
11261126}
11271127
1128- /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1128+ /* For sanity's sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
11291129#define MAX_SCAN_WINDOW 2560
11301130
11311131static unsigned int task_scan_min (struct task_struct * p )
@@ -2577,7 +2577,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
25772577}
25782578
25792579/*
2580- * Get rid of NUMA staticstics associated with a task (either current or dead).
2580+ * Get rid of NUMA statistics associated with a task (either current or dead).
25812581 * If @final is set, the task is dead and has reached refcount zero, so we can
25822582 * safely free all relevant data structures. Otherwise, there might be
25832583 * concurrent reads from places like load balancing and procfs, and we should
@@ -3952,7 +3952,7 @@ static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
39523952 *
39533953 * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
39543954 *
3955- * NOTE: this only works when value + maring < INT_MAX.
3955+ * NOTE: this only works when value + margin < INT_MAX.
39563956 */
39573957static inline bool within_margin (int value , int margin )
39583958{
@@ -4256,7 +4256,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
42564256 /*
42574257 * When bandwidth control is enabled, cfs might have been removed
42584258 * because of a parent been throttled but cfs->nr_running > 1. Try to
4259- * add it unconditionnally .
4259+ * add it unconditionally .
42604260 */
42614261 if (cfs_rq -> nr_running == 1 || cfs_bandwidth_used ())
42624262 list_add_leaf_cfs_rq (cfs_rq );
@@ -5311,7 +5311,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
53115311 * bits doesn't do much.
53125312 */
53135313
5314- /* cpu online calback */
5314+ /* cpu online callback */
53155315static void __maybe_unused update_runtime_enabled (struct rq * rq )
53165316{
53175317 struct task_group * tg ;
@@ -6963,7 +6963,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
69636963
69646964 /*
69656965 * This is possible from callers such as attach_tasks(), in which we
6966- * unconditionally check_prempt_curr () after an enqueue (which may have
6966+ * unconditionally check_preempt_curr () after an enqueue (which may have
69676967 * lead to a throttle). This both saves work and prevents false
69686968 * next-buddy nomination below.
69696969 */
@@ -7595,7 +7595,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
75957595 return 0 ;
75967596 }
75977597
7598- /* Record that we found atleast one task that could run on dst_cpu */
7598+ /* Record that we found at least one task that could run on dst_cpu */
75997599 env -> flags &= ~LBF_ALL_PINNED ;
76007600
76017601 if (task_running (env -> src_rq , p )) {
@@ -9690,7 +9690,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
96909690 * load to given_cpu. In rare situations, this may cause
96919691 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
96929692 * _independently_ and at _same_ time to move some load to
9693- * given_cpu) causing exceess load to be moved to given_cpu.
9693+ * given_cpu) causing excess load to be moved to given_cpu.
96949694 * This however should not happen so much in practice and
96959695 * moreover subsequent load balance cycles should correct the
96969696 * excess load moved.
@@ -9834,7 +9834,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
98349834 /*
98359835 * newidle_balance() disregards balance intervals, so we could
98369836 * repeatedly reach this code, which would lead to balance_interval
9837- * skyrocketting in a short amount of time. Skip the balance_interval
9837+ * skyrocketing in a short amount of time. Skip the balance_interval
98389838 * increase logic to avoid that.
98399839 */
98409840 if (env .idle == CPU_NEWLY_IDLE )
0 commit comments