@@ -707,14 +707,14 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
707
707
/*
708
708
* Since irq_time is only updated on {soft,}irq_exit, we might run into
709
709
* this case when a previous update_rq_clock() happened inside a
710
- * {soft,}irq region.
710
+ * {soft,}IRQ region.
711
711
*
712
712
* When this happens, we stop ->clock_task and only update the
713
713
* prev_irq_time stamp to account for the part that fit, so that a next
714
714
* update will consume the rest. This ensures ->clock_task is
715
715
* monotonic.
716
716
*
717
- * It does however cause some slight miss-attribution of {soft,}irq
717
+ * It does however cause some slight miss-attribution of {soft,}IRQ
718
718
* time, a more accurate solution would be to update the irq_time using
719
719
* the current rq->clock timestamp, except that would require using
720
720
* atomic ops.
@@ -827,7 +827,7 @@ static void __hrtick_start(void *arg)
827
827
/*
828
828
* Called to set the hrtick timer state.
829
829
*
830
- * called with rq->lock held and irqs disabled
830
+ * called with rq->lock held and IRQs disabled
831
831
*/
832
832
void hrtick_start (struct rq * rq , u64 delay )
833
833
{
@@ -851,7 +851,7 @@ void hrtick_start(struct rq *rq, u64 delay)
851
851
/*
852
852
* Called to set the hrtick timer state.
853
853
*
854
- * called with rq->lock held and irqs disabled
854
+ * called with rq->lock held and IRQs disabled
855
855
*/
856
856
void hrtick_start (struct rq * rq , u64 delay )
857
857
{
@@ -885,7 +885,7 @@ static inline void hrtick_rq_init(struct rq *rq)
885
885
#endif /* CONFIG_SCHED_HRTICK */
886
886
887
887
/*
888
- * cmpxchg based fetch_or, macro so it works for different integer types
888
+ * try_cmpxchg based fetch_or() macro so it works for different integer types:
889
889
*/
890
890
#define fetch_or (ptr , mask ) \
891
891
({ \
@@ -1082,7 +1082,7 @@ void resched_cpu(int cpu)
1082
1082
*
1083
1083
* We don't do similar optimization for completely idle system, as
1084
1084
* selecting an idle CPU will add more delays to the timers than intended
1085
- * (as that CPU's timer base may not be uptodate wrt jiffies etc).
1085
+ * (as that CPU's timer base may not be up to date wrt jiffies etc).
1086
1086
*/
1087
1087
int get_nohz_timer_target (void )
1088
1088
{
@@ -1142,7 +1142,7 @@ static void wake_up_idle_cpu(int cpu)
1142
1142
* nohz functions that would need to follow TIF_NR_POLLING
1143
1143
* clearing:
1144
1144
*
1145
- * - On most archs , a simple fetch_or on ti::flags with a
1145
+ * - On most architectures , a simple fetch_or on ti::flags with a
1146
1146
* "0" value would be enough to know if an IPI needs to be sent.
1147
1147
*
1148
1148
* - x86 needs to perform a last need_resched() check between
@@ -1651,7 +1651,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1651
1651
rq_clamp = uclamp_rq_get (rq , clamp_id );
1652
1652
/*
1653
1653
* Defensive programming: this should never happen. If it happens,
1654
- * e.g. due to future modification, warn and fixup the expected value.
1654
+ * e.g. due to future modification, warn and fix up the expected value.
1655
1655
*/
1656
1656
SCHED_WARN_ON (bucket -> value > rq_clamp );
1657
1657
if (bucket -> value >= rq_clamp ) {
@@ -2227,7 +2227,7 @@ static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2227
2227
return ;
2228
2228
2229
2229
/*
2230
- * Violates locking rules! see comment in __do_set_cpus_allowed().
2230
+ * Violates locking rules! See comment in __do_set_cpus_allowed().
2231
2231
*/
2232
2232
__do_set_cpus_allowed (p , & ac );
2233
2233
}
@@ -2394,7 +2394,7 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2394
2394
}
2395
2395
2396
2396
/*
2397
- * migration_cpu_stop - this will be executed by a highprio stopper thread
2397
+ * migration_cpu_stop - this will be executed by a high-prio stopper thread
2398
2398
* and performs thread migration by bumping thread off CPU then
2399
2399
* 'pushing' onto another runqueue.
2400
2400
*/
@@ -3694,8 +3694,8 @@ void sched_ttwu_pending(void *arg)
3694
3694
* it is possible for select_idle_siblings() to stack a number
3695
3695
* of tasks on this CPU during that window.
3696
3696
*
3697
- * It is ok to clear ttwu_pending when another task pending.
3698
- * We will receive IPI after local irq enabled and then enqueue it.
3697
+ * It is OK to clear ttwu_pending when another task pending.
3698
+ * We will receive IPI after local IRQ enabled and then enqueue it.
3699
3699
* Since now nr_running > 0, idle_cpu() will always get correct result.
3700
3700
*/
3701
3701
WRITE_ONCE (rq -> ttwu_pending , 0 );
@@ -5017,7 +5017,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
5017
5017
*
5018
5018
* The context switch have flipped the stack from under us and restored the
5019
5019
* local variables which were saved when this task called schedule() in the
5020
- * past. prev == current is still correct but we need to recalculate this_rq
5020
+ * past. ' prev == current' is still correct but we need to recalculate this_rq
5021
5021
* because prev may have moved to another CPU.
5022
5022
*/
5023
5023
static struct rq * finish_task_switch (struct task_struct * prev )
@@ -5363,7 +5363,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
5363
5363
/*
5364
5364
* 64-bit doesn't need locks to atomically read a 64-bit value.
5365
5365
* So we have a optimization chance when the task's delta_exec is 0.
5366
- * Reading ->on_cpu is racy, but this is ok .
5366
+ * Reading ->on_cpu is racy, but this is OK .
5367
5367
*
5368
5368
* If we race with it leaving CPU, we'll take a lock. So we're correct.
5369
5369
* If we race with it entering CPU, unaccounted time is 0. This is
@@ -6637,7 +6637,7 @@ void __sched schedule_idle(void)
6637
6637
{
6638
6638
/*
6639
6639
* As this skips calling sched_submit_work(), which the idle task does
6640
- * regardless because that function is a nop when the task is in a
6640
+ * regardless because that function is a NOP when the task is in a
6641
6641
* TASK_RUNNING state, make sure this isn't used someplace that the
6642
6642
* current task can be in any other state. Note, idle is always in the
6643
6643
* TASK_RUNNING state.
@@ -6832,9 +6832,9 @@ EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
6832
6832
6833
6833
/*
6834
6834
* This is the entry point to schedule() from kernel preemption
6835
- * off of irq context.
6836
- * Note, that this is called and return with irqs disabled. This will
6837
- * protect us against recursive calling from irq .
6835
+ * off of IRQ context.
6836
+ * Note, that this is called and return with IRQs disabled. This will
6837
+ * protect us against recursive calling from IRQ contexts .
6838
6838
*/
6839
6839
asmlinkage __visible void __sched preempt_schedule_irq (void )
6840
6840
{
@@ -6953,7 +6953,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
6953
6953
goto out_unlock ;
6954
6954
6955
6955
/*
6956
- * Idle task boosting is a nono in general. There is one
6956
+ * Idle task boosting is a no-no in general. There is one
6957
6957
* exception, when PREEMPT_RT and NOHZ is active:
6958
6958
*
6959
6959
* The idle task calls get_next_timer_interrupt() and holds
@@ -7356,11 +7356,11 @@ PREEMPT_MODEL_ACCESSOR(none);
7356
7356
PREEMPT_MODEL_ACCESSOR (voluntary );
7357
7357
PREEMPT_MODEL_ACCESSOR (full );
7358
7358
7359
- #else /* !CONFIG_PREEMPT_DYNAMIC */
7359
+ #else /* !CONFIG_PREEMPT_DYNAMIC: */
7360
7360
7361
7361
static inline void preempt_dynamic_init (void ) { }
7362
7362
7363
- #endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
7363
+ #endif /* CONFIG_PREEMPT_DYNAMIC */
7364
7364
7365
7365
int io_schedule_prepare (void )
7366
7366
{
@@ -7970,7 +7970,7 @@ int sched_cpu_deactivate(unsigned int cpu)
7970
7970
* Specifically, we rely on ttwu to no longer target this CPU, see
7971
7971
* ttwu_queue_cond() and is_cpu_allowed().
7972
7972
*
7973
- * Do sync before park smpboot threads to take care the rcu boost case.
7973
+ * Do sync before park smpboot threads to take care the RCU boost case.
7974
7974
*/
7975
7975
synchronize_rcu ();
7976
7976
@@ -8045,7 +8045,7 @@ int sched_cpu_wait_empty(unsigned int cpu)
8045
8045
* Since this CPU is going 'away' for a while, fold any nr_active delta we
8046
8046
* might have. Called from the CPU stopper task after ensuring that the
8047
8047
* stopper is the last running task on the CPU, so nr_active count is
8048
- * stable. We need to take the teardown thread which is calling this into
8048
+ * stable. We need to take the tear-down thread which is calling this into
8049
8049
* account, so we hand in adjust = 1 to the load calculation.
8050
8050
*
8051
8051
* Also see the comment "Global load-average calculations".
@@ -8239,7 +8239,7 @@ void __init sched_init(void)
8239
8239
/*
8240
8240
* How much CPU bandwidth does root_task_group get?
8241
8241
*
8242
- * In case of task-groups formed thr' the cgroup filesystem, it
8242
+ * In case of task-groups formed through the cgroup filesystem, it
8243
8243
* gets 100% of the CPU resources in the system. This overall
8244
8244
* system CPU resource is divided among the tasks of
8245
8245
* root_task_group and its child task-groups in a fair manner,
@@ -8541,7 +8541,7 @@ void normalize_rt_tasks(void)
8541
8541
8542
8542
#if defined(CONFIG_KGDB_KDB )
8543
8543
/*
8544
- * These functions are only useful for kdb .
8544
+ * These functions are only useful for KDB .
8545
8545
*
8546
8546
* They can only be called when the whole system has been
8547
8547
* stopped - every CPU needs to be quiescent, and no scheduling
@@ -8649,7 +8649,7 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
8649
8649
online_fair_sched_group (tg );
8650
8650
}
8651
8651
8652
- /* rcu callback to free various structures associated with a task group */
8652
+ /* RCU callback to free various structures associated with a task group */
8653
8653
static void sched_unregister_group_rcu (struct rcu_head * rhp )
8654
8654
{
8655
8655
/* Now it should be safe to free those cfs_rqs: */
@@ -9767,10 +9767,10 @@ const int sched_prio_to_weight[40] = {
9767
9767
};
9768
9768
9769
9769
/*
9770
- * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated .
9770
+ * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated .
9771
9771
*
9772
9772
* In cases where the weight does not change often, we can use the
9773
- * precalculated inverse to speed up arithmetics by turning divisions
9773
+ * pre-calculated inverse to speed up arithmetics by turning divisions
9774
9774
* into multiplications:
9775
9775
*/
9776
9776
const u32 sched_prio_to_wmult [40 ] = {
@@ -10026,16 +10026,16 @@ void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
10026
10026
/*
10027
10027
* Move the src cid if the dst cid is unset. This keeps id
10028
10028
* allocation closest to 0 in cases where few threads migrate around
10029
- * many cpus .
10029
+ * many CPUs .
10030
10030
*
10031
10031
* If destination cid is already set, we may have to just clear
10032
10032
* the src cid to ensure compactness in frequent migrations
10033
10033
* scenarios.
10034
10034
*
10035
10035
* It is not useful to clear the src cid when the number of threads is
10036
- * greater or equal to the number of allowed cpus , because user-space
10036
+ * greater or equal to the number of allowed CPUs , because user-space
10037
10037
* can expect that the number of allowed cids can reach the number of
10038
- * allowed cpus .
10038
+ * allowed CPUs .
10039
10039
*/
10040
10040
dst_pcpu_cid = per_cpu_ptr (mm -> pcpu_cid , cpu_of (dst_rq ));
10041
10041
dst_cid = READ_ONCE (dst_pcpu_cid -> cid );
0 commit comments