Skip to content

Commit 402de7f

Browse files
author
Ingo Molnar
committed
sched: Fix spelling in comments
Do a spell-checking pass. Signed-off-by: Ingo Molnar <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: [email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 04746ed commit 402de7f

File tree

16 files changed

+92
-92
lines changed

16 files changed

+92
-92
lines changed

kernel/sched/clock.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -340,7 +340,7 @@ static notrace u64 sched_clock_remote(struct sched_clock_data *scd)
340340
this_clock = sched_clock_local(my_scd);
341341
/*
342342
* We must enforce atomic readout on 32-bit, otherwise the
343-
* update on the remote CPU can hit inbetween the readout of
343+
* update on the remote CPU can hit in between the readout of
344344
* the low 32-bit and the high 32-bit portion.
345345
*/
346346
remote_clock = cmpxchg64(&scd->clock, 0, 0);
@@ -444,7 +444,7 @@ notrace void sched_clock_tick_stable(void)
444444
}
445445

446446
/*
447-
* We are going deep-idle (irqs are disabled):
447+
* We are going deep-idle (IRQs are disabled):
448448
*/
449449
notrace void sched_clock_idle_sleep_event(void)
450450
{

kernel/sched/core.c

Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -707,14 +707,14 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
707707
/*
708708
* Since irq_time is only updated on {soft,}irq_exit, we might run into
709709
* this case when a previous update_rq_clock() happened inside a
710-
* {soft,}irq region.
710+
* {soft,}IRQ region.
711711
*
712712
* When this happens, we stop ->clock_task and only update the
713713
* prev_irq_time stamp to account for the part that fit, so that a next
714714
* update will consume the rest. This ensures ->clock_task is
715715
* monotonic.
716716
*
717-
* It does however cause some slight miss-attribution of {soft,}irq
717+
* It does however cause some slight miss-attribution of {soft,}IRQ
718718
* time, a more accurate solution would be to update the irq_time using
719719
* the current rq->clock timestamp, except that would require using
720720
* atomic ops.
@@ -827,7 +827,7 @@ static void __hrtick_start(void *arg)
827827
/*
828828
* Called to set the hrtick timer state.
829829
*
830-
* called with rq->lock held and irqs disabled
830+
* called with rq->lock held and IRQs disabled
831831
*/
832832
void hrtick_start(struct rq *rq, u64 delay)
833833
{
@@ -851,7 +851,7 @@ void hrtick_start(struct rq *rq, u64 delay)
851851
/*
852852
* Called to set the hrtick timer state.
853853
*
854-
* called with rq->lock held and irqs disabled
854+
* called with rq->lock held and IRQs disabled
855855
*/
856856
void hrtick_start(struct rq *rq, u64 delay)
857857
{
@@ -885,7 +885,7 @@ static inline void hrtick_rq_init(struct rq *rq)
885885
#endif /* CONFIG_SCHED_HRTICK */
886886

887887
/*
888-
* cmpxchg based fetch_or, macro so it works for different integer types
888+
* try_cmpxchg based fetch_or() macro so it works for different integer types:
889889
*/
890890
#define fetch_or(ptr, mask) \
891891
({ \
@@ -1082,7 +1082,7 @@ void resched_cpu(int cpu)
10821082
*
10831083
* We don't do similar optimization for completely idle system, as
10841084
* selecting an idle CPU will add more delays to the timers than intended
1085-
* (as that CPU's timer base may not be uptodate wrt jiffies etc).
1085+
* (as that CPU's timer base may not be up to date wrt jiffies etc).
10861086
*/
10871087
int get_nohz_timer_target(void)
10881088
{
@@ -1142,7 +1142,7 @@ static void wake_up_idle_cpu(int cpu)
11421142
* nohz functions that would need to follow TIF_NR_POLLING
11431143
* clearing:
11441144
*
1145-
* - On most archs, a simple fetch_or on ti::flags with a
1145+
* - On most architectures, a simple fetch_or on ti::flags with a
11461146
* "0" value would be enough to know if an IPI needs to be sent.
11471147
*
11481148
* - x86 needs to perform a last need_resched() check between
@@ -1651,7 +1651,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
16511651
rq_clamp = uclamp_rq_get(rq, clamp_id);
16521652
/*
16531653
* Defensive programming: this should never happen. If it happens,
1654-
* e.g. due to future modification, warn and fixup the expected value.
1654+
* e.g. due to future modification, warn and fix up the expected value.
16551655
*/
16561656
SCHED_WARN_ON(bucket->value > rq_clamp);
16571657
if (bucket->value >= rq_clamp) {
@@ -2227,7 +2227,7 @@ static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
22272227
return;
22282228

22292229
/*
2230-
* Violates locking rules! see comment in __do_set_cpus_allowed().
2230+
* Violates locking rules! See comment in __do_set_cpus_allowed().
22312231
*/
22322232
__do_set_cpus_allowed(p, &ac);
22332233
}
@@ -2394,7 +2394,7 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
23942394
}
23952395

23962396
/*
2397-
* migration_cpu_stop - this will be executed by a highprio stopper thread
2397+
* migration_cpu_stop - this will be executed by a high-prio stopper thread
23982398
* and performs thread migration by bumping thread off CPU then
23992399
* 'pushing' onto another runqueue.
24002400
*/
@@ -3694,8 +3694,8 @@ void sched_ttwu_pending(void *arg)
36943694
* it is possible for select_idle_siblings() to stack a number
36953695
* of tasks on this CPU during that window.
36963696
*
3697-
* It is ok to clear ttwu_pending when another task pending.
3698-
* We will receive IPI after local irq enabled and then enqueue it.
3697+
* It is OK to clear ttwu_pending when another task pending.
3698+
* We will receive IPI after local IRQ enabled and then enqueue it.
36993699
* Since now nr_running > 0, idle_cpu() will always get correct result.
37003700
*/
37013701
WRITE_ONCE(rq->ttwu_pending, 0);
@@ -5017,7 +5017,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
50175017
*
50185018
* The context switch have flipped the stack from under us and restored the
50195019
* local variables which were saved when this task called schedule() in the
5020-
* past. prev == current is still correct but we need to recalculate this_rq
5020+
* past. 'prev == current' is still correct but we need to recalculate this_rq
50215021
* because prev may have moved to another CPU.
50225022
*/
50235023
static struct rq *finish_task_switch(struct task_struct *prev)
@@ -5363,7 +5363,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
53635363
/*
53645364
* 64-bit doesn't need locks to atomically read a 64-bit value.
53655365
* So we have a optimization chance when the task's delta_exec is 0.
5366-
* Reading ->on_cpu is racy, but this is ok.
5366+
* Reading ->on_cpu is racy, but this is OK.
53675367
*
53685368
* If we race with it leaving CPU, we'll take a lock. So we're correct.
53695369
* If we race with it entering CPU, unaccounted time is 0. This is
@@ -6637,7 +6637,7 @@ void __sched schedule_idle(void)
66376637
{
66386638
/*
66396639
* As this skips calling sched_submit_work(), which the idle task does
6640-
* regardless because that function is a nop when the task is in a
6640+
* regardless because that function is a NOP when the task is in a
66416641
* TASK_RUNNING state, make sure this isn't used someplace that the
66426642
* current task can be in any other state. Note, idle is always in the
66436643
* TASK_RUNNING state.
@@ -6832,9 +6832,9 @@ EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
68326832

68336833
/*
68346834
* This is the entry point to schedule() from kernel preemption
6835-
* off of irq context.
6836-
* Note, that this is called and return with irqs disabled. This will
6837-
* protect us against recursive calling from irq.
6835+
* off of IRQ context.
6836+
* Note, that this is called and return with IRQs disabled. This will
6837+
* protect us against recursive calling from IRQ contexts.
68386838
*/
68396839
asmlinkage __visible void __sched preempt_schedule_irq(void)
68406840
{
@@ -6953,7 +6953,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
69536953
goto out_unlock;
69546954

69556955
/*
6956-
* Idle task boosting is a nono in general. There is one
6956+
* Idle task boosting is a no-no in general. There is one
69576957
* exception, when PREEMPT_RT and NOHZ is active:
69586958
*
69596959
* The idle task calls get_next_timer_interrupt() and holds
@@ -7356,11 +7356,11 @@ PREEMPT_MODEL_ACCESSOR(none);
73567356
PREEMPT_MODEL_ACCESSOR(voluntary);
73577357
PREEMPT_MODEL_ACCESSOR(full);
73587358

7359-
#else /* !CONFIG_PREEMPT_DYNAMIC */
7359+
#else /* !CONFIG_PREEMPT_DYNAMIC: */
73607360

73617361
static inline void preempt_dynamic_init(void) { }
73627362

7363-
#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
7363+
#endif /* CONFIG_PREEMPT_DYNAMIC */
73647364

73657365
int io_schedule_prepare(void)
73667366
{
@@ -7970,7 +7970,7 @@ int sched_cpu_deactivate(unsigned int cpu)
79707970
* Specifically, we rely on ttwu to no longer target this CPU, see
79717971
* ttwu_queue_cond() and is_cpu_allowed().
79727972
*
7973-
* Do sync before park smpboot threads to take care the rcu boost case.
7973+
* Do sync before park smpboot threads to take care the RCU boost case.
79747974
*/
79757975
synchronize_rcu();
79767976

@@ -8045,7 +8045,7 @@ int sched_cpu_wait_empty(unsigned int cpu)
80458045
* Since this CPU is going 'away' for a while, fold any nr_active delta we
80468046
* might have. Called from the CPU stopper task after ensuring that the
80478047
* stopper is the last running task on the CPU, so nr_active count is
8048-
* stable. We need to take the teardown thread which is calling this into
8048+
* stable. We need to take the tear-down thread which is calling this into
80498049
* account, so we hand in adjust = 1 to the load calculation.
80508050
*
80518051
* Also see the comment "Global load-average calculations".
@@ -8239,7 +8239,7 @@ void __init sched_init(void)
82398239
/*
82408240
* How much CPU bandwidth does root_task_group get?
82418241
*
8242-
* In case of task-groups formed thr' the cgroup filesystem, it
8242+
* In case of task-groups formed through the cgroup filesystem, it
82438243
* gets 100% of the CPU resources in the system. This overall
82448244
* system CPU resource is divided among the tasks of
82458245
* root_task_group and its child task-groups in a fair manner,
@@ -8541,7 +8541,7 @@ void normalize_rt_tasks(void)
85418541

85428542
#if defined(CONFIG_KGDB_KDB)
85438543
/*
8544-
* These functions are only useful for kdb.
8544+
* These functions are only useful for KDB.
85458545
*
85468546
* They can only be called when the whole system has been
85478547
* stopped - every CPU needs to be quiescent, and no scheduling
@@ -8649,7 +8649,7 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
86498649
online_fair_sched_group(tg);
86508650
}
86518651

8652-
/* rcu callback to free various structures associated with a task group */
8652+
/* RCU callback to free various structures associated with a task group */
86538653
static void sched_unregister_group_rcu(struct rcu_head *rhp)
86548654
{
86558655
/* Now it should be safe to free those cfs_rqs: */
@@ -9767,10 +9767,10 @@ const int sched_prio_to_weight[40] = {
97679767
};
97689768

97699769
/*
9770-
* Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
9770+
* Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
97719771
*
97729772
* In cases where the weight does not change often, we can use the
9773-
* precalculated inverse to speed up arithmetics by turning divisions
9773+
* pre-calculated inverse to speed up arithmetics by turning divisions
97749774
* into multiplications:
97759775
*/
97769776
const u32 sched_prio_to_wmult[40] = {
@@ -10026,16 +10026,16 @@ void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
1002610026
/*
1002710027
* Move the src cid if the dst cid is unset. This keeps id
1002810028
* allocation closest to 0 in cases where few threads migrate around
10029-
* many cpus.
10029+
* many CPUs.
1003010030
*
1003110031
* If destination cid is already set, we may have to just clear
1003210032
* the src cid to ensure compactness in frequent migrations
1003310033
* scenarios.
1003410034
*
1003510035
* It is not useful to clear the src cid when the number of threads is
10036-
* greater or equal to the number of allowed cpus, because user-space
10036+
* greater or equal to the number of allowed CPUs, because user-space
1003710037
* can expect that the number of allowed cids can reach the number of
10038-
* allowed cpus.
10038+
* allowed CPUs.
1003910039
*/
1004010040
dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
1004110041
dst_cid = READ_ONCE(dst_pcpu_cid->cid);

kernel/sched/core_sched.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ void __sched_core_account_forceidle(struct rq *rq)
279279
continue;
280280

281281
/*
282-
* Note: this will account forceidle to the current cpu, even
282+
* Note: this will account forceidle to the current CPU, even
283283
* if it comes from our SMT sibling.
284284
*/
285285
__account_forceidle_time(p, delta);

kernel/sched/cputime.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,11 @@
1414
* They are only modified in vtime_account, on corresponding CPU
1515
* with interrupts disabled. So, writes are safe.
1616
* They are read and saved off onto struct rq in update_rq_clock().
17-
* This may result in other CPU reading this CPU's irq time and can
17+
* This may result in other CPU reading this CPU's IRQ time and can
1818
* race with irq/vtime_account on this CPU. We would either get old
19-
* or new value with a side effect of accounting a slice of irq time to wrong
20-
* task when irq is in progress while we read rq->clock. That is a worthy
21-
* compromise in place of having locks on each irq in account_system_time.
19+
* or new value with a side effect of accounting a slice of IRQ time to wrong
20+
* task when IRQ is in progress while we read rq->clock. That is a worthy
21+
* compromise in place of having locks on each IRQ in account_system_time.
2222
*/
2323
DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
2424

@@ -269,7 +269,7 @@ static __always_inline u64 steal_account_process_time(u64 maxtime)
269269
}
270270

271271
/*
272-
* Account how much elapsed time was spent in steal, irq, or softirq time.
272+
* Account how much elapsed time was spent in steal, IRQ, or softirq time.
273273
*/
274274
static inline u64 account_other_time(u64 max)
275275
{
@@ -370,7 +370,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
370370
* Check for hardirq is done both for system and user time as there is
371371
* no timer going off while we are on hardirq and hence we may never get an
372372
* opportunity to update it solely in system time.
373-
* p->stime and friends are only updated on system time and not on irq
373+
* p->stime and friends are only updated on system time and not on IRQ
374374
* softirq as those do not count in task exec_runtime any more.
375375
*/
376376
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
@@ -380,7 +380,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
380380

381381
/*
382382
* When returning from idle, many ticks can get accounted at
383-
* once, including some ticks of steal, irq, and softirq time.
383+
* once, including some ticks of steal, IRQ, and softirq time.
384384
* Subtract those ticks from the amount of time accounted to
385385
* idle, or potentially user or system time. Due to rounding,
386386
* other time can exceed ticks occasionally.

kernel/sched/deadline.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -708,7 +708,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
708708
}
709709

710710
/*
711-
* And we finally need to fixup root_domain(s) bandwidth accounting,
711+
* And we finally need to fix up root_domain(s) bandwidth accounting,
712712
* since p is still hanging out in the old (now moved to default) root
713713
* domain.
714714
*/
@@ -992,7 +992,7 @@ static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
992992
* is detected, the runtime and deadline need to be updated.
993993
*
994994
* If the task has an implicit deadline, i.e., deadline == period, the Original
995-
* CBS is applied. the runtime is replenished and a new absolute deadline is
995+
* CBS is applied. The runtime is replenished and a new absolute deadline is
996996
* set, as in the previous cases.
997997
*
998998
* However, the Original CBS does not work properly for tasks with
@@ -1294,7 +1294,7 @@ int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
12941294
* Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
12951295
* by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT.
12961296
* Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
1297-
* is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1297+
* is multiplied by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
12981298
* Since delta is a 64 bit variable, to have an overflow its value should be
12991299
* larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is
13001300
* not an issue here.
@@ -2488,7 +2488,7 @@ static void pull_dl_task(struct rq *this_rq)
24882488
src_rq = cpu_rq(cpu);
24892489

24902490
/*
2491-
* It looks racy, abd it is! However, as in sched_rt.c,
2491+
* It looks racy, and it is! However, as in sched_rt.c,
24922492
* we are fine with this.
24932493
*/
24942494
if (this_rq->dl.dl_nr_running &&

kernel/sched/fair.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@
6161
* Options are:
6262
*
6363
* SCHED_TUNABLESCALING_NONE - unscaled, always *1
64-
* SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
64+
* SCHED_TUNABLESCALING_LOG - scaled logarithmically, *1+ilog(ncpus)
6565
* SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
6666
*
6767
* (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
@@ -8719,7 +8719,7 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
87198719
* topology where each level pairs two lower groups (or better). This results
87208720
* in O(log n) layers. Furthermore we reduce the number of CPUs going up the
87218721
* tree to only the first of the previous level and we decrease the frequency
8722-
* of load-balance at each level inv. proportional to the number of CPUs in
8722+
* of load-balance at each level inversely proportional to the number of CPUs in
87238723
* the groups.
87248724
*
87258725
* This yields:

0 commit comments

Comments
 (0)