Skip to content

Commit 18bc72a

Browse files
Lin YikaiKernel Patches Daemon
authored andcommitted
selftests/bpf: Add selftests
Add test to verify cpuidle governor ext's load, attach, and kfuncs. This patch also provides a simple demonstration of `cpuidle_gov_ext_ops` usage: - In `ops.init()`, we set the "rating" value to 60 - significantly exceeding other governors' ratings - to activate `cpuidle_gov_ext`. - For specific scenarios (e.g., screen-off music playback on mobile devices), we can enable "expect_deeper" to transition to deeper idle states. This implementation serves as a foundation, not a final solution. We can explore further exploration of cpuidle strategies optimized for various usage scenarios. Signed-off-by: Lin Yikai <[email protected]>
1 parent 4d3b29a commit 18bc72a

File tree

2 files changed

+236
-0
lines changed

2 files changed

+236
-0
lines changed
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* test_cpuidle_gov_ext.c - test cpuidle governor ext's load, attach and kfuncs
4+
*
5+
* Copyright (C) Yikai Lin <[email protected]>
6+
*/
7+
8+
#include <test_progs.h>
9+
#include "cpuidle_gov_ext.skel.h"
10+
11+
void test_test_cpuidle_gov_ext(void)
12+
{
13+
struct cpuidle_gov_ext *skel;
14+
int err;
15+
16+
skel = cpuidle_gov_ext__open_and_load();
17+
if (!ASSERT_OK_PTR(skel, "cpuidle_gov_ext__open_and_load"))
18+
return;
19+
20+
skel->bss->expect_deeper = 1;
21+
err = cpuidle_gov_ext__attach(skel);
22+
if (!ASSERT_OK(err, "cpuidle_gov_ext__attach"))
23+
goto cleanup;
24+
25+
cleanup:
26+
cpuidle_gov_ext__destroy(skel);
27+
}
28+
Lines changed: 208 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,208 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* cpuidle_gov_ext.c - test to use cpuidle governor ext by bpf
4+
*
5+
* Copyright (C) Yikai Lin <[email protected]>
6+
*/
7+
8+
#include "vmlinux.h"
9+
10+
#include <bpf/bpf_helpers.h>
11+
#include <bpf/bpf_tracing.h>
12+
#include <bpf/bpf_core_read.h>
13+
14+
char LICENSE[] SEC("license") = "GPL";
15+
16+
#ifndef ARRAY_SIZE
17+
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
18+
#endif
19+
#ifndef max
20+
#define max(a, b) ((a) > (b) ? (a) : (b))
21+
#endif
22+
#ifndef min
23+
#define min(a, b) ((a) < (b) ? (a) : (b))
24+
#endif
25+
26+
#define ALPHA 10
27+
#define ALPHA_SCALE 100
28+
#define FIT_FACTOR 90
29+
30+
/*
31+
* For some low-power scenarios,
32+
* such as the screen off scenario of mobile devices
33+
* (which will be determined by the user-space BPF program),
34+
* we aim to choose a deeper state
35+
* At this point, we will somewhat disregard the impact on CPU performance.
36+
*/
37+
int expect_deeper = 0;
38+
39+
int bpf_cpuidle_ext_gov_update_rating(unsigned int rating) __ksym __weak;
40+
s64 bpf_cpuidle_ext_gov_latency_req(unsigned int cpu) __ksym __weak;
41+
s64 bpf_tick_nohz_get_sleep_length(void) __ksym __weak;
42+
43+
struct cpuidle_gov_data {
44+
int cpu;
45+
int last_idx;
46+
u64 last_pred;
47+
u64 last_duration;
48+
u64 next_pred;
49+
};
50+
51+
struct {
52+
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
53+
__uint(max_entries, 1);
54+
__type(key, u32);
55+
__type(value, struct cpuidle_gov_data);
56+
} cpuidle_gov_data_map SEC(".maps");
57+
58+
static u64 calculate_ewma(u64 last, u64 new, u32 alpha, u32 alpha_scale)
59+
{
60+
return (alpha * new + (alpha_scale - alpha) * last) / alpha_scale;
61+
}
62+
63+
static void update_predict_duration(struct cpuidle_gov_data *data,
64+
struct cpuidle_driver *drv, struct cpuidle_device *dev)
65+
{
66+
int idx;
67+
struct cpuidle_state target;
68+
69+
if (!data || !drv || !dev)
70+
return;
71+
idx = data->last_idx;
72+
data->last_duration = dev->last_residency_ns;
73+
if (idx > 0) {
74+
bpf_core_read(&target, sizeof(target), &drv->states[idx]);
75+
if (data->last_duration > target.exit_latency)
76+
data->last_duration -= target.exit_latency;
77+
}
78+
data->last_pred = data->next_pred;
79+
data->next_pred = calculate_ewma(data->next_pred,
80+
data->last_duration, ALPHA, ALPHA_SCALE);
81+
}
82+
83+
/* Enable the cpuidle governor */
84+
SEC("struct_ops.s/enable")
85+
int BPF_PROG(bpf_cpuidle_enable, struct cpuidle_driver *drv, struct cpuidle_device *dev)
86+
{
87+
u32 key = 0;
88+
struct cpuidle_gov_data *data;
89+
90+
bpf_printk("cpuidle_gov_ext: enabled");
91+
data = bpf_map_lookup_percpu_elem(&cpuidle_gov_data_map, &key, dev->cpu);
92+
if (!data)
93+
return 0;
94+
95+
__builtin_memset(data, 0, sizeof(struct cpuidle_gov_data));
96+
data->cpu = dev->cpu;
97+
return 0;
98+
}
99+
100+
/* Disable the cpuidle governor */
101+
SEC("struct_ops.s/disable")
102+
void BPF_PROG(bpf_cpuidle_disable, struct cpuidle_driver *drv, struct cpuidle_device *dev)
103+
{
104+
bpf_printk("cpuidle_gov_ext: disabled");
105+
}
106+
107+
/* Select the next idle state */
108+
SEC("struct_ops.s/select")
109+
int BPF_PROG(bpf_cpuidle_select, struct cpuidle_driver *drv, struct cpuidle_device *dev)
110+
{
111+
u32 key = 0;
112+
s64 delta, latency_req, residency_ns;
113+
int i, selected;
114+
unsigned long long disable = 0;
115+
struct cpuidle_gov_data *data;
116+
struct cpuidle_state cs;
117+
118+
data = bpf_map_lookup_percpu_elem(&cpuidle_gov_data_map, &key, dev->cpu);
119+
if (!data) {
120+
bpf_printk("cpuidle_gov_ext: [%s] cpuidle_gov_data_map is NULL\n", __func__);
121+
return 0;
122+
}
123+
latency_req = bpf_cpuidle_ext_gov_latency_req(dev->cpu);
124+
delta = bpf_tick_nohz_get_sleep_length();
125+
126+
update_predict_duration(data, drv, dev);
127+
128+
for (i = ARRAY_SIZE(drv->states)-1; i > 0; i--) {
129+
if (i > drv->state_count-1)
130+
continue;
131+
bpf_core_read(&cs, sizeof(cs), &drv->states[i]);
132+
bpf_core_read(&disable, sizeof(disable), &dev->states_usage[i]);
133+
134+
if (disable)
135+
continue;
136+
137+
if (latency_req < cs.exit_latency_ns)
138+
continue;
139+
140+
if (delta < cs.target_residency_ns)
141+
continue;
142+
143+
if (data->next_pred / FIT_FACTOR * ALPHA_SCALE < cs.target_residency_ns)
144+
continue;
145+
146+
break;
147+
}
148+
residency_ns = drv->states[i].target_residency_ns;
149+
if (expect_deeper &&
150+
i < drv->state_count - 1 &&
151+
data->last_pred >= residency_ns &&
152+
data->next_pred < residency_ns &&
153+
data->next_pred / FIT_FACTOR * ALPHA_SCALE >= residency_ns &&
154+
data->next_pred / FIT_FACTOR * ALPHA_SCALE >= data->last_duration &&
155+
delta > residency_ns) {
156+
i++;
157+
}
158+
159+
selected = i;
160+
return selected;
161+
}
162+
163+
//enable or disable scheduling tick after selecting cpuidle state
164+
SEC("struct_ops.s/set_stop_tick")
165+
bool BPF_PROG(bpf_cpuidle_set_stop_tick)
166+
{
167+
return false;
168+
}
169+
170+
/* Reflect function called after entering an idle state */
171+
SEC("struct_ops.s/reflect")
172+
void BPF_PROG(bpf_cpuidle_reflect, struct cpuidle_device *dev, int index)
173+
{
174+
u32 key = 0;
175+
struct cpuidle_gov_data *data;
176+
177+
data = bpf_map_lookup_percpu_elem(&cpuidle_gov_data_map, &key, dev->cpu);
178+
if (!data) {
179+
bpf_printk("cpuidle_gov_ext: [%s] cpuidle_gov_data_map is NULL\n", __func__);
180+
return;
181+
}
182+
data->last_idx = index;
183+
}
184+
185+
/* Initialize the BPF cpuidle governor */
186+
SEC("struct_ops.s/init")
187+
int BPF_PROG(bpf_cpuidle_init)
188+
{
189+
int ret = bpf_cpuidle_ext_gov_update_rating(60);
190+
return ret;
191+
}
192+
193+
/* Cleanup after the BPF cpuidle governor */
194+
SEC("struct_ops.s/exit")
195+
void BPF_PROG(bpf_cpuidle_exit) { }
196+
197+
/* Struct_ops linkage for cpuidle governor */
198+
SEC(".struct_ops.link")
199+
struct cpuidle_gov_ext_ops ops = {
200+
.enable = (void *)bpf_cpuidle_enable,
201+
.disable = (void *)bpf_cpuidle_disable,
202+
.select = (void *)bpf_cpuidle_select,
203+
.set_stop_tick = (void *)bpf_cpuidle_set_stop_tick,
204+
.reflect = (void *)bpf_cpuidle_reflect,
205+
.init = (void *)bpf_cpuidle_init,
206+
.exit = (void *)bpf_cpuidle_exit,
207+
.name = "BPF_cpuidle_gov"
208+
};

0 commit comments

Comments
 (0)