@@ -1306,10 +1306,10 @@ static void set_load_weight(struct task_struct *p, bool update_load)
1306
1306
static DEFINE_MUTEX (uclamp_mutex );
1307
1307
1308
1308
/* Max allowed minimum utilization */
1309
- static unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE ;
1309
+ static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE ;
1310
1310
1311
1311
/* Max allowed maximum utilization */
1312
- static unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE ;
1312
+ static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE ;
1313
1313
1314
1314
/*
1315
1315
* By default RT tasks run at the maximum performance point/capacity of the
@@ -1456,33 +1456,6 @@ static void uclamp_update_util_min_rt_default(struct task_struct *p)
1456
1456
task_rq_unlock (rq , p , & rf );
1457
1457
}
1458
1458
1459
- static void uclamp_sync_util_min_rt_default (void )
1460
- {
1461
- struct task_struct * g , * p ;
1462
-
1463
- /*
1464
- * copy_process() sysctl_uclamp
1465
- * uclamp_min_rt = X;
1466
- * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1467
- * // link thread smp_mb__after_spinlock()
1468
- * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1469
- * sched_post_fork() for_each_process_thread()
1470
- * __uclamp_sync_rt() __uclamp_sync_rt()
1471
- *
1472
- * Ensures that either sched_post_fork() will observe the new
1473
- * uclamp_min_rt or for_each_process_thread() will observe the new
1474
- * task.
1475
- */
1476
- read_lock (& tasklist_lock );
1477
- smp_mb__after_spinlock ();
1478
- read_unlock (& tasklist_lock );
1479
-
1480
- rcu_read_lock ();
1481
- for_each_process_thread (g , p )
1482
- uclamp_update_util_min_rt_default (p );
1483
- rcu_read_unlock ();
1484
- }
1485
-
1486
1459
static inline struct uclamp_se
1487
1460
uclamp_tg_restrict (struct task_struct * p , enum uclamp_id clamp_id )
1488
1461
{
@@ -1762,6 +1735,11 @@ uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1762
1735
}
1763
1736
1764
1737
static void cpu_util_update_eff (struct cgroup_subsys_state * css );
1738
+ #endif
1739
+
1740
+ #ifdef CONFIG_SYSCTL
1741
+ #ifdef CONFIG_UCLAMP_TASK
1742
+ #ifdef CONFIG_UCLAMP_TASK_GROUP
1765
1743
static void uclamp_update_root_tg (void )
1766
1744
{
1767
1745
struct task_group * tg = & root_task_group ;
@@ -1779,6 +1757,33 @@ static void uclamp_update_root_tg(void)
1779
1757
static void uclamp_update_root_tg (void ) { }
1780
1758
#endif
1781
1759
1760
+ static void uclamp_sync_util_min_rt_default (void )
1761
+ {
1762
+ struct task_struct * g , * p ;
1763
+
1764
+ /*
1765
+ * copy_process() sysctl_uclamp
1766
+ * uclamp_min_rt = X;
1767
+ * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1768
+ * // link thread smp_mb__after_spinlock()
1769
+ * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1770
+ * sched_post_fork() for_each_process_thread()
1771
+ * __uclamp_sync_rt() __uclamp_sync_rt()
1772
+ *
1773
+ * Ensures that either sched_post_fork() will observe the new
1774
+ * uclamp_min_rt or for_each_process_thread() will observe the new
1775
+ * task.
1776
+ */
1777
+ read_lock (& tasklist_lock );
1778
+ smp_mb__after_spinlock ();
1779
+ read_unlock (& tasklist_lock );
1780
+
1781
+ rcu_read_lock ();
1782
+ for_each_process_thread (g , p )
1783
+ uclamp_update_util_min_rt_default (p );
1784
+ rcu_read_unlock ();
1785
+ }
1786
+
1782
1787
static int sysctl_sched_uclamp_handler (struct ctl_table * table , int write ,
1783
1788
void * buffer , size_t * lenp , loff_t * ppos )
1784
1789
{
@@ -1843,6 +1848,8 @@ static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
1843
1848
1844
1849
return result ;
1845
1850
}
1851
+ #endif
1852
+ #endif
1846
1853
1847
1854
static int uclamp_validate (struct task_struct * p ,
1848
1855
const struct sched_attr * attr )
0 commit comments