@@ -1214,9 +1214,8 @@ int tg_nop(struct task_group *tg, void *data)
1214
1214
}
1215
1215
#endif
1216
1216
1217
- static void set_load_weight (struct task_struct * p )
1217
+ static void set_load_weight (struct task_struct * p , bool update_load )
1218
1218
{
1219
- bool update_load = !(READ_ONCE (p -> __state ) & TASK_NEW );
1220
1219
int prio = p -> static_prio - MAX_RT_PRIO ;
1221
1220
struct load_weight * load = & p -> se .load ;
1222
1221
@@ -4407,7 +4406,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
4407
4406
p -> static_prio = NICE_TO_PRIO (0 );
4408
4407
4409
4408
p -> prio = p -> normal_prio = p -> static_prio ;
4410
- set_load_weight (p );
4409
+ set_load_weight (p , false );
4411
4410
4412
4411
/*
4413
4412
* We don't need the reset flag anymore after the fork. It has
@@ -4425,6 +4424,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
4425
4424
4426
4425
init_entity_runnable_average (& p -> se );
4427
4426
4427
+
4428
4428
#ifdef CONFIG_SCHED_INFO
4429
4429
if (likely (sched_info_on ()))
4430
4430
memset (& p -> sched_info , 0 , sizeof (p -> sched_info ));
@@ -4440,18 +4440,23 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
4440
4440
return 0 ;
4441
4441
}
4442
4442
4443
- void sched_post_fork (struct task_struct * p , struct kernel_clone_args * kargs )
4443
+ void sched_cgroup_fork (struct task_struct * p , struct kernel_clone_args * kargs )
4444
4444
{
4445
4445
unsigned long flags ;
4446
- #ifdef CONFIG_CGROUP_SCHED
4447
- struct task_group * tg ;
4448
- #endif
4449
4446
4447
+ /*
4448
+ * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4449
+ * required yet, but lockdep gets upset if rules are violated.
4450
+ */
4450
4451
raw_spin_lock_irqsave (& p -> pi_lock , flags );
4451
4452
#ifdef CONFIG_CGROUP_SCHED
4452
- tg = container_of (kargs -> cset -> subsys [cpu_cgrp_id ],
4453
- struct task_group , css );
4454
- p -> sched_task_group = autogroup_task_group (p , tg );
4453
+ if (1 ) {
4454
+ struct task_group * tg ;
4455
+ tg = container_of (kargs -> cset -> subsys [cpu_cgrp_id ],
4456
+ struct task_group , css );
4457
+ tg = autogroup_task_group (p , tg );
4458
+ p -> sched_task_group = tg ;
4459
+ }
4455
4460
#endif
4456
4461
rseq_migrate (p );
4457
4462
/*
@@ -4462,7 +4467,10 @@ void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4462
4467
if (p -> sched_class -> task_fork )
4463
4468
p -> sched_class -> task_fork (p );
4464
4469
raw_spin_unlock_irqrestore (& p -> pi_lock , flags );
4470
+ }
4465
4471
4472
+ void sched_post_fork (struct task_struct * p )
4473
+ {
4466
4474
uclamp_post_fork (p );
4467
4475
}
4468
4476
@@ -6922,7 +6930,7 @@ void set_user_nice(struct task_struct *p, long nice)
6922
6930
put_prev_task (rq , p );
6923
6931
6924
6932
p -> static_prio = NICE_TO_PRIO (nice );
6925
- set_load_weight (p );
6933
+ set_load_weight (p , true );
6926
6934
old_prio = p -> prio ;
6927
6935
p -> prio = effective_prio (p );
6928
6936
@@ -7213,7 +7221,7 @@ static void __setscheduler_params(struct task_struct *p,
7213
7221
*/
7214
7222
p -> rt_priority = attr -> sched_priority ;
7215
7223
p -> normal_prio = normal_prio (p );
7216
- set_load_weight (p );
7224
+ set_load_weight (p , true );
7217
7225
}
7218
7226
7219
7227
/*
@@ -9446,7 +9454,7 @@ void __init sched_init(void)
9446
9454
#endif
9447
9455
}
9448
9456
9449
- set_load_weight (& init_task );
9457
+ set_load_weight (& init_task , false );
9450
9458
9451
9459
/*
9452
9460
* The boot idle thread does lazy MMU switching as well:
0 commit comments