Skip to content

Commit 5842738

Browse files
Lin YikaiKernel Patches Daemon
authored andcommitted
selftests/bpf: Add selftests for cpuidle_gov_ext
Add test to verify cpuidle governor ext's load, attach, and kfuncs. This patch also provides a simple demonstration of `cpuidle_gov_ext_ops` usage: - In `ops.init()`, we set the "rating" value to 60 - significantly exceeding other governors' ratings - to activate `cpuidle_gov_ext`. - For specific scenarios (e.g., screen-off music playback on mobile devices), we can enable "expect_deeper" to transition to deeper idle states. This implementation serves as a foundation, not a final solution. We can explore further exploration of cpuidle strategies optimized for various usage scenarios. Test Results ----------- :~/workplace/bpf/x86/submit/bpf_next/tools/testing/selftests/bpf$ make -j4 :$ sudo ./test_progs -t test_cpuidle_gov_ext #449 test_cpuidle_gov_ext: OK Summary: 1/0 PASSED, 0 SKIPPED, 0 FAILED Additionally, the kernel log shows: $sudo cat /dev/kmsg 6,911,10997439785,-; cpuidle: using governor ext 6,913,11010384887,-; cpuidle: using governor menu After `cpuidle_gov_ext` exits, the system will restore the previous governor. Signed-off-by: Lin Yikai <[email protected]>
1 parent 3c961b0 commit 5842738

File tree

3 files changed

+241
-0
lines changed

3 files changed

+241
-0
lines changed
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* test_cpuidle_gov_ext.c - test cpuidle governor ext's load, attach and kfuncs
4+
*
5+
* Copyright (C) Yikai Lin <[email protected]>
6+
*/
7+
8+
#include <test_progs.h>
9+
#include "cpuidle_gov_ext.skel.h"
10+
11+
void test_test_cpuidle_gov_ext(void)
12+
{
13+
struct cpuidle_gov_ext *skel;
14+
int err;
15+
16+
skel = cpuidle_gov_ext__open_and_load();
17+
if (!ASSERT_OK_PTR(skel, "cpuidle_gov_ext__open_and_load"))
18+
return;
19+
20+
skel->bss->expect_deeper = 1;
21+
err = cpuidle_gov_ext__attach(skel);
22+
if (!ASSERT_OK(err, "cpuidle_gov_ext__attach"))
23+
goto cleanup;
24+
25+
cleanup:
26+
cpuidle_gov_ext__destroy(skel);
27+
}
28+
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) Yikai Lin <[email protected]>
4+
*/
5+
6+
#ifndef _CPUIDLE_COMMON_H
7+
#define _CPUIDLE_COMMON_H
8+
9+
int bpf_cpuidle_ext_gov_update_rating(unsigned int rating) __ksym __weak;
10+
s64 bpf_cpuidle_ext_gov_latency_req(unsigned int cpu) __ksym __weak;
11+
s64 bpf_tick_nohz_get_sleep_length(void) __ksym __weak;
12+
13+
#endif /* _CPUIDLE_COMMON_H */
Lines changed: 200 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,200 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* cpuidle_gov_ext.c - test to use cpuidle governor ext by bpf
4+
*
5+
* Copyright (C) Yikai Lin <[email protected]>
6+
*/
7+
8+
#include "vmlinux.h"
9+
10+
#include <bpf/bpf_helpers.h>
11+
#include <bpf/bpf_tracing.h>
12+
#include <bpf/bpf_core_read.h>
13+
14+
#include "bpf_misc.h"
15+
#include "cpuidle_common.h"
16+
17+
char LICENSE[] SEC("license") = "GPL";
18+
19+
#define ALPHA 10
20+
#define ALPHA_SCALE 100
21+
#define FIT_FACTOR 90
22+
23+
#ifndef max
24+
#define max(a, b) ((a) > (b) ? (a) : (b))
25+
#endif
26+
#ifndef min
27+
#define min(a, b) ((a) < (b) ? (a) : (b))
28+
#endif
29+
30+
/*
31+
* For some low-power scenarios,
32+
* such as the screen off scenario of mobile devices
33+
* (which will be determined by the user-space BPF program),
34+
* we aim to choose a deeper state
35+
* At this point, we will somewhat disregard the impact on CPU performance.
36+
*/
37+
int expect_deeper = 0;
38+
39+
struct cpuidle_gov_data {
40+
int cpu;
41+
int last_idx;
42+
u64 last_pred;
43+
u64 last_duration;
44+
u64 next_pred;
45+
};
46+
47+
struct {
48+
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
49+
__uint(max_entries, 1);
50+
__type(key, u32);
51+
__type(value, struct cpuidle_gov_data);
52+
} cpuidle_gov_data_map SEC(".maps");
53+
54+
static u64 calculate_ewma(u64 last, u64 new, u32 alpha, u32 alpha_scale)
55+
{
56+
return (alpha * new + (alpha_scale - alpha) * last) / alpha_scale;
57+
}
58+
59+
static void update_predict_duration(struct cpuidle_gov_data *data,
60+
struct cpuidle_driver *drv, struct cpuidle_device *dev)
61+
{
62+
int idx;
63+
struct cpuidle_state target;
64+
65+
if (!data || !drv || !dev)
66+
return;
67+
idx = data->last_idx;
68+
data->last_duration = dev->last_residency_ns;
69+
if (idx > 0) {
70+
bpf_core_read(&target, sizeof(target), &drv->states[idx]);
71+
if (data->last_duration > target.exit_latency)
72+
data->last_duration -= target.exit_latency;
73+
}
74+
data->last_pred = data->next_pred;
75+
data->next_pred = calculate_ewma(data->next_pred,
76+
data->last_duration, ALPHA, ALPHA_SCALE);
77+
}
78+
79+
/* Enable the cpuidle governor */
80+
SEC("struct_ops.s/enable")
81+
int BPF_PROG(bpf_cpuidle_enable, struct cpuidle_driver *drv, struct cpuidle_device *dev)
82+
{
83+
u32 key = 0;
84+
struct cpuidle_gov_data *data;
85+
86+
bpf_printk("cpuidle_gov_ext: enabled");
87+
data = bpf_map_lookup_percpu_elem(&cpuidle_gov_data_map, &key, dev->cpu);
88+
if (!data)
89+
return 0;
90+
91+
__builtin_memset(data, 0, sizeof(struct cpuidle_gov_data));
92+
data->cpu = dev->cpu;
93+
return 0;
94+
}
95+
96+
/* Disable the cpuidle governor */
97+
SEC("struct_ops.s/disable")
98+
void BPF_PROG(bpf_cpuidle_disable, struct cpuidle_driver *drv, struct cpuidle_device *dev)
99+
{
100+
bpf_printk("cpuidle_gov_ext: disabled");
101+
}
102+
103+
/* Select the next idle state */
104+
SEC("struct_ops.s/select")
105+
int BPF_PROG(bpf_cpuidle_select, struct cpuidle_driver *drv, struct cpuidle_device *dev)
106+
{
107+
u32 key = 0;
108+
s64 delta, latency_req, residency_ns;
109+
int i;
110+
unsigned long long disable;
111+
struct cpuidle_gov_data *data;
112+
struct cpuidle_state *cs;
113+
114+
data = bpf_map_lookup_percpu_elem(&cpuidle_gov_data_map, &key, dev->cpu);
115+
if (!data) {
116+
bpf_printk("cpuidle_gov_ext: [%s] cpuidle_gov_data_map is NULL\n", __func__);
117+
return 0;
118+
}
119+
latency_req = bpf_cpuidle_ext_gov_latency_req(dev->cpu);
120+
delta = bpf_tick_nohz_get_sleep_length();
121+
122+
update_predict_duration(data, drv, dev);
123+
for (i = ARRAY_SIZE(drv->states)-1; i > 0; i--) {
124+
if (i >= drv->state_count)
125+
continue;
126+
cs = &drv->states[i];
127+
disable = dev->states_usage[i].disable;
128+
if (disable)
129+
continue;
130+
if (latency_req < cs->exit_latency_ns)
131+
continue;
132+
133+
if (delta < cs->target_residency_ns)
134+
continue;
135+
136+
if (data->next_pred / FIT_FACTOR * ALPHA_SCALE < cs->target_residency_ns)
137+
continue;
138+
139+
break;
140+
}
141+
residency_ns = drv->states[i].target_residency_ns;
142+
if (expect_deeper &&
143+
i <= drv->state_count-2 &&
144+
!dev->states_usage[i+1].disable &&
145+
data->last_pred >= residency_ns &&
146+
data->next_pred < residency_ns &&
147+
data->next_pred / FIT_FACTOR * ALPHA_SCALE >= residency_ns &&
148+
data->next_pred / FIT_FACTOR * ALPHA_SCALE >= data->last_duration &&
149+
delta > residency_ns) {
150+
i++;
151+
}
152+
153+
return i;
154+
}
155+
156+
//enable or disable scheduling tick after selecting cpuidle state
157+
SEC("struct_ops.s/set_stop_tick")
158+
bool BPF_PROG(bpf_cpuidle_set_stop_tick)
159+
{
160+
return false;
161+
}
162+
163+
/* Reflect function called after entering an idle state */
164+
SEC("struct_ops.s/reflect")
165+
void BPF_PROG(bpf_cpuidle_reflect, struct cpuidle_device *dev, int index)
166+
{
167+
u32 key = 0;
168+
struct cpuidle_gov_data *data;
169+
170+
data = bpf_map_lookup_percpu_elem(&cpuidle_gov_data_map, &key, dev->cpu);
171+
if (!data) {
172+
bpf_printk("cpuidle_gov_ext: [%s] cpuidle_gov_data_map is NULL\n", __func__);
173+
return;
174+
}
175+
data->last_idx = index;
176+
}
177+
178+
/* Initialize the BPF cpuidle governor */
179+
SEC("struct_ops.s/init")
180+
int BPF_PROG(bpf_cpuidle_init)
181+
{
182+
return bpf_cpuidle_ext_gov_update_rating(60);
183+
}
184+
185+
/* Cleanup after the BPF cpuidle governor */
186+
SEC("struct_ops.s/exit")
187+
void BPF_PROG(bpf_cpuidle_exit) { }
188+
189+
/* Struct_ops linkage for cpuidle governor */
190+
SEC(".struct_ops.link")
191+
struct cpuidle_gov_ext_ops ops = {
192+
.enable = (void *)bpf_cpuidle_enable,
193+
.disable = (void *)bpf_cpuidle_disable,
194+
.select = (void *)bpf_cpuidle_select,
195+
.set_stop_tick = (void *)bpf_cpuidle_set_stop_tick,
196+
.reflect = (void *)bpf_cpuidle_reflect,
197+
.init = (void *)bpf_cpuidle_init,
198+
.exit = (void *)bpf_cpuidle_exit,
199+
.name = "BPF_cpuidle_gov"
200+
};

0 commit comments

Comments
 (0)