@@ -3394,6 +3394,19 @@ static inline bool update_other_load_avgs(struct rq *rq) { return false; }
3394
3394
3395
3395
unsigned long uclamp_eff_value (struct task_struct * p , enum uclamp_id clamp_id );
3396
3396
3397
+ /*
3398
+ * When uclamp is compiled in, the aggregation at rq level is 'turned off'
3399
+ * by default in the fast path and only gets turned on once userspace performs
3400
+ * an operation that requires it.
3401
+ *
3402
+ * Returns true if userspace opted-in to use uclamp and aggregation at rq level
3403
+ * hence is active.
3404
+ */
3405
+ static inline bool uclamp_is_used (void )
3406
+ {
3407
+ return static_branch_likely (& sched_uclamp_used );
3408
+ }
3409
+
3397
3410
static inline unsigned long uclamp_rq_get (struct rq * rq ,
3398
3411
enum uclamp_id clamp_id )
3399
3412
{
@@ -3417,7 +3430,7 @@ static inline bool uclamp_rq_is_capped(struct rq *rq)
3417
3430
unsigned long rq_util ;
3418
3431
unsigned long max_util ;
3419
3432
3420
- if (!static_branch_likely ( & sched_uclamp_used ))
3433
+ if (!uclamp_is_used ( ))
3421
3434
return false;
3422
3435
3423
3436
rq_util = cpu_util_cfs (cpu_of (rq )) + cpu_util_rt (rq );
@@ -3426,19 +3439,6 @@ static inline bool uclamp_rq_is_capped(struct rq *rq)
3426
3439
return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util ;
3427
3440
}
3428
3441
3429
- /*
3430
- * When uclamp is compiled in, the aggregation at rq level is 'turned off'
3431
- * by default in the fast path and only gets turned on once userspace performs
3432
- * an operation that requires it.
3433
- *
3434
- * Returns true if userspace opted-in to use uclamp and aggregation at rq level
3435
- * hence is active.
3436
- */
3437
- static inline bool uclamp_is_used (void )
3438
- {
3439
- return static_branch_likely (& sched_uclamp_used );
3440
- }
3441
-
3442
3442
#define for_each_clamp_id (clamp_id ) \
3443
3443
for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
3444
3444
0 commit comments