@@ -9309,47 +9309,23 @@ static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9309
9309
#ifdef CONFIG_CFS_BANDWIDTH
9310
9310
static DEFINE_MUTEX (cfs_constraints_mutex );
9311
9311
9312
- const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC ; /* 1s */
9313
- static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC ; /* 1ms */
9314
- /* More than 203 days if BW_SHIFT equals 20. */
9315
- static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC ;
9316
-
9317
9312
static int __cfs_schedulable (struct task_group * tg , u64 period , u64 runtime );
9318
9313
9319
- static int tg_set_cfs_bandwidth (struct task_group * tg , u64 period , u64 quota ,
9320
- u64 burst )
9314
+ static int tg_set_cfs_bandwidth (struct task_group * tg ,
9315
+ u64 period_us , u64 quota_us , u64 burst_us )
9321
9316
{
9322
9317
int i , ret = 0 , runtime_enabled , runtime_was_enabled ;
9323
9318
struct cfs_bandwidth * cfs_b = & tg -> cfs_bandwidth ;
9319
+ u64 period , quota , burst ;
9324
9320
9325
- if (tg == & root_task_group )
9326
- return - EINVAL ;
9327
-
9328
- /*
9329
- * Ensure we have at some amount of bandwidth every period. This is
9330
- * to prevent reaching a state of large arrears when throttled via
9331
- * entity_tick() resulting in prolonged exit starvation.
9332
- */
9333
- if (quota < min_cfs_quota_period || period < min_cfs_quota_period )
9334
- return - EINVAL ;
9321
+ period = (u64 )period_us * NSEC_PER_USEC ;
9335
9322
9336
- /*
9337
- * Likewise, bound things on the other side by preventing insane quota
9338
- * periods. This also allows us to normalize in computing quota
9339
- * feasibility.
9340
- */
9341
- if (period > max_cfs_quota_period )
9342
- return - EINVAL ;
9343
-
9344
- /*
9345
- * Bound quota to defend quota against overflow during bandwidth shift.
9346
- */
9347
- if (quota != RUNTIME_INF && quota > max_cfs_runtime )
9348
- return - EINVAL ;
9323
+ if (quota_us == RUNTIME_INF )
9324
+ quota = RUNTIME_INF ;
9325
+ else
9326
+ quota = (u64 )quota_us * NSEC_PER_USEC ;
9349
9327
9350
- if (quota != RUNTIME_INF && (burst > quota ||
9351
- burst + quota > max_cfs_runtime ))
9352
- return - EINVAL ;
9328
+ burst = (u64 )burst_us * NSEC_PER_USEC ;
9353
9329
9354
9330
/*
9355
9331
* Prevent race between setting of cfs_rq->runtime_enabled and
@@ -9437,50 +9413,6 @@ static u64 tg_get_cfs_burst(struct task_group *tg)
9437
9413
return burst_us ;
9438
9414
}
9439
9415
9440
- static int tg_set_cfs_period (struct task_group * tg , long cfs_period_us )
9441
- {
9442
- u64 quota , period , burst ;
9443
-
9444
- if ((u64 )cfs_period_us > U64_MAX / NSEC_PER_USEC )
9445
- return - EINVAL ;
9446
-
9447
- period = (u64 )cfs_period_us * NSEC_PER_USEC ;
9448
- quota = tg -> cfs_bandwidth .quota ;
9449
- burst = tg -> cfs_bandwidth .burst ;
9450
-
9451
- return tg_set_cfs_bandwidth (tg , period , quota , burst );
9452
- }
9453
-
9454
- static int tg_set_cfs_quota (struct task_group * tg , long cfs_quota_us )
9455
- {
9456
- u64 quota , period , burst ;
9457
-
9458
- period = ktime_to_ns (tg -> cfs_bandwidth .period );
9459
- burst = tg -> cfs_bandwidth .burst ;
9460
- if (cfs_quota_us < 0 )
9461
- quota = RUNTIME_INF ;
9462
- else if ((u64 )cfs_quota_us <= U64_MAX / NSEC_PER_USEC )
9463
- quota = (u64 )cfs_quota_us * NSEC_PER_USEC ;
9464
- else
9465
- return - EINVAL ;
9466
-
9467
- return tg_set_cfs_bandwidth (tg , period , quota , burst );
9468
- }
9469
-
9470
- static int tg_set_cfs_burst (struct task_group * tg , long cfs_burst_us )
9471
- {
9472
- u64 quota , period , burst ;
9473
-
9474
- if ((u64 )cfs_burst_us > U64_MAX / NSEC_PER_USEC )
9475
- return - EINVAL ;
9476
-
9477
- burst = (u64 )cfs_burst_us * NSEC_PER_USEC ;
9478
- period = ktime_to_ns (tg -> cfs_bandwidth .period );
9479
- quota = tg -> cfs_bandwidth .quota ;
9480
-
9481
- return tg_set_cfs_bandwidth (tg , period , quota , burst );
9482
- }
9483
-
9484
9416
struct cfs_schedulable_data {
9485
9417
struct task_group * tg ;
9486
9418
u64 period , quota ;
@@ -9614,6 +9546,11 @@ static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9614
9546
return 0 ;
9615
9547
}
9616
9548
9549
+ const u64 max_bw_quota_period_us = 1 * USEC_PER_SEC ; /* 1s */
9550
+ static const u64 min_bw_quota_period_us = 1 * USEC_PER_MSEC ; /* 1ms */
9551
+ /* More than 203 days if BW_SHIFT equals 20. */
9552
+ static const u64 max_bw_runtime_us = MAX_BW ;
9553
+
9617
9554
static void tg_bandwidth (struct task_group * tg ,
9618
9555
u64 * period_us_p , u64 * quota_us_p , u64 * burst_us_p )
9619
9556
{
@@ -9634,6 +9571,50 @@ static u64 cpu_period_read_u64(struct cgroup_subsys_state *css,
9634
9571
return period_us ;
9635
9572
}
9636
9573
9574
+ static int tg_set_bandwidth (struct task_group * tg ,
9575
+ u64 period_us , u64 quota_us , u64 burst_us )
9576
+ {
9577
+ const u64 max_usec = U64_MAX / NSEC_PER_USEC ;
9578
+
9579
+ if (tg == & root_task_group )
9580
+ return - EINVAL ;
9581
+
9582
+ /* Values should survive translation to nsec */
9583
+ if (period_us > max_usec ||
9584
+ (quota_us != RUNTIME_INF && quota_us > max_usec ) ||
9585
+ burst_us > max_usec )
9586
+ return - EINVAL ;
9587
+
9588
+ /*
9589
+ * Ensure we have some amount of bandwidth every period. This is to
9590
+ * prevent reaching a state of large arrears when throttled via
9591
+ * entity_tick() resulting in prolonged exit starvation.
9592
+ */
9593
+ if (quota_us < min_bw_quota_period_us ||
9594
+ period_us < min_bw_quota_period_us )
9595
+ return - EINVAL ;
9596
+
9597
+ /*
9598
+ * Likewise, bound things on the other side by preventing insane quota
9599
+ * periods. This also allows us to normalize in computing quota
9600
+ * feasibility.
9601
+ */
9602
+ if (period_us > max_bw_quota_period_us )
9603
+ return - EINVAL ;
9604
+
9605
+ /*
9606
+ * Bound quota to defend quota against overflow during bandwidth shift.
9607
+ */
9608
+ if (quota_us != RUNTIME_INF && quota_us > max_bw_runtime_us )
9609
+ return - EINVAL ;
9610
+
9611
+ if (quota_us != RUNTIME_INF && (burst_us > quota_us ||
9612
+ burst_us + quota_us > max_bw_runtime_us ))
9613
+ return - EINVAL ;
9614
+
9615
+ return tg_set_cfs_bandwidth (tg , period_us , quota_us , burst_us );
9616
+ }
9617
+
9637
9618
static s64 cpu_quota_read_s64 (struct cgroup_subsys_state * css ,
9638
9619
struct cftype * cft )
9639
9620
{
@@ -9652,22 +9633,37 @@ static u64 cpu_burst_read_u64(struct cgroup_subsys_state *css,
9652
9633
return burst_us ;
9653
9634
}
9654
9635
9655
- static int cpu_cfs_period_write_u64 (struct cgroup_subsys_state * css ,
9656
- struct cftype * cftype , u64 cfs_period_us )
9636
+ static int cpu_period_write_u64 (struct cgroup_subsys_state * css ,
9637
+ struct cftype * cftype , u64 period_us )
9657
9638
{
9658
- return tg_set_cfs_period (css_tg (css ), cfs_period_us );
9639
+ struct task_group * tg = css_tg (css );
9640
+ u64 quota_us , burst_us ;
9641
+
9642
+ tg_bandwidth (tg , NULL , & quota_us , & burst_us );
9643
+ return tg_set_bandwidth (tg , period_us , quota_us , burst_us );
9659
9644
}
9660
9645
9661
- static int cpu_cfs_quota_write_s64 (struct cgroup_subsys_state * css ,
9662
- struct cftype * cftype , s64 cfs_quota_us )
9646
+ static int cpu_quota_write_s64 (struct cgroup_subsys_state * css ,
9647
+ struct cftype * cftype , s64 quota_us )
9663
9648
{
9664
- return tg_set_cfs_quota (css_tg (css ), cfs_quota_us );
9649
+ struct task_group * tg = css_tg (css );
9650
+ u64 period_us , burst_us ;
9651
+
9652
+ if (quota_us < 0 )
9653
+ quota_us = RUNTIME_INF ;
9654
+
9655
+ tg_bandwidth (tg , & period_us , NULL , & burst_us );
9656
+ return tg_set_bandwidth (tg , period_us , quota_us , burst_us );
9665
9657
}
9666
9658
9667
- static int cpu_cfs_burst_write_u64 (struct cgroup_subsys_state * css ,
9668
- struct cftype * cftype , u64 cfs_burst_us )
9659
+ static int cpu_burst_write_u64 (struct cgroup_subsys_state * css ,
9660
+ struct cftype * cftype , u64 burst_us )
9669
9661
{
9670
- return tg_set_cfs_burst (css_tg (css ), cfs_burst_us );
9662
+ struct task_group * tg = css_tg (css );
9663
+ u64 period_us , quota_us ;
9664
+
9665
+ tg_bandwidth (tg , & period_us , & quota_us , NULL );
9666
+ return tg_set_bandwidth (tg , period_us , quota_us , burst_us );
9671
9667
}
9672
9668
#endif /* CONFIG_CFS_BANDWIDTH */
9673
9669
@@ -9733,17 +9729,17 @@ static struct cftype cpu_legacy_files[] = {
9733
9729
{
9734
9730
.name = "cfs_period_us" ,
9735
9731
.read_u64 = cpu_period_read_u64 ,
9736
- .write_u64 = cpu_cfs_period_write_u64 ,
9732
+ .write_u64 = cpu_period_write_u64 ,
9737
9733
},
9738
9734
{
9739
9735
.name = "cfs_quota_us" ,
9740
9736
.read_s64 = cpu_quota_read_s64 ,
9741
- .write_s64 = cpu_cfs_quota_write_s64 ,
9737
+ .write_s64 = cpu_quota_write_s64 ,
9742
9738
},
9743
9739
{
9744
9740
.name = "cfs_burst_us" ,
9745
9741
.read_u64 = cpu_burst_read_u64 ,
9746
- .write_u64 = cpu_cfs_burst_write_u64 ,
9742
+ .write_u64 = cpu_burst_write_u64 ,
9747
9743
},
9748
9744
{
9749
9745
.name = "stat" ,
@@ -9940,22 +9936,20 @@ static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
9940
9936
}
9941
9937
9942
9938
/* caller should put the current value in *@periodp before calling */
9943
- static int __maybe_unused cpu_period_quota_parse (char * buf ,
9944
- u64 * periodp , u64 * quotap )
9939
+ static int __maybe_unused cpu_period_quota_parse (char * buf , u64 * period_us_p ,
9940
+ u64 * quota_us_p )
9945
9941
{
9946
9942
char tok [21 ]; /* U64_MAX */
9947
9943
9948
- if (sscanf (buf , "%20s %llu" , tok , periodp ) < 1 )
9944
+ if (sscanf (buf , "%20s %llu" , tok , period_us_p ) < 1 )
9949
9945
return - EINVAL ;
9950
9946
9951
- * periodp *= NSEC_PER_USEC ;
9952
-
9953
- if (sscanf (tok , "%llu" , quotap ))
9954
- * quotap *= NSEC_PER_USEC ;
9955
- else if (!strcmp (tok , "max" ))
9956
- * quotap = RUNTIME_INF ;
9957
- else
9958
- return - EINVAL ;
9947
+ if (sscanf (tok , "%llu" , quota_us_p ) < 1 ) {
9948
+ if (!strcmp (tok , "max" ))
9949
+ * quota_us_p = RUNTIME_INF ;
9950
+ else
9951
+ return - EINVAL ;
9952
+ }
9959
9953
9960
9954
return 0 ;
9961
9955
}
@@ -9975,14 +9969,13 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of,
9975
9969
char * buf , size_t nbytes , loff_t off )
9976
9970
{
9977
9971
struct task_group * tg = css_tg (of_css (of ));
9978
- u64 period = tg_get_cfs_period (tg );
9979
- u64 burst = tg -> cfs_bandwidth .burst ;
9980
- u64 quota ;
9972
+ u64 period_us , quota_us , burst_us ;
9981
9973
int ret ;
9982
9974
9983
- ret = cpu_period_quota_parse (buf , & period , & quota );
9975
+ tg_bandwidth (tg , & period_us , NULL , & burst_us );
9976
+ ret = cpu_period_quota_parse (buf , & period_us , & quota_us );
9984
9977
if (!ret )
9985
- ret = tg_set_cfs_bandwidth (tg , period , quota , burst );
9978
+ ret = tg_set_bandwidth (tg , period_us , quota_us , burst_us );
9986
9979
return ret ?: nbytes ;
9987
9980
}
9988
9981
#endif /* CONFIG_CFS_BANDWIDTH */
@@ -10019,7 +10012,7 @@ static struct cftype cpu_files[] = {
10019
10012
.name = "max.burst" ,
10020
10013
.flags = CFTYPE_NOT_ON_ROOT ,
10021
10014
.read_u64 = cpu_burst_read_u64 ,
10022
- .write_u64 = cpu_cfs_burst_write_u64 ,
10015
+ .write_u64 = cpu_burst_write_u64 ,
10023
10016
},
10024
10017
#endif /* CONFIG_CFS_BANDWIDTH */
10025
10018
#ifdef CONFIG_UCLAMP_TASK_GROUP
0 commit comments