8
8
/* From include/linux/filter.h */
9
9
#define MAX_BPF_STACK 512
10
10
11
- #if defined(__TARGET_ARCH_x86 )
11
+ #if defined(__TARGET_ARCH_x86 ) || defined( __TARGET_ARCH_arm64 )
12
12
13
13
struct elem {
14
14
struct bpf_timer t ;
@@ -30,6 +30,18 @@ __jited(" movabsq $0x{{.*}}, %r9")
30
30
__jited (" addq %gs:{{.*}}, %r9" )
31
31
__jited (" movl $0x2a, %edi" )
32
32
__jited (" movq %rdi, -0x100(%r9)" )
33
+ __arch_arm64
34
+ __jited (" stp x25, x27, [sp, {{.*}}]!" )
35
+ __jited (" mov x27, {{.*}}" )
36
+ __jited (" movk x27, {{.*}}, lsl #16" )
37
+ __jited (" movk x27, {{.*}}" )
38
+ __jited (" mrs x10, TPIDR_EL{{[0-1]}}" )
39
+ __jited (" add x27, x27, x10" )
40
+ __jited (" add x25, x27, {{.*}}" )
41
+ __jited (" mov x0, #0x2a" )
42
+ __jited (" str x0, [x27]" )
43
+ __jited ("..." )
44
+ __jited (" ldp x25, x27, [sp], {{.*}}" )
33
45
__naked void private_stack_single_prog (void )
34
46
{
35
47
asm volatile (" \
@@ -45,6 +57,9 @@ __description("No private stack")
45
57
__success
46
58
__arch_x86_64
47
59
__jited (" subq $0x8, %rsp" )
60
+ __arch_arm64
61
+ __jited (" mov x25, sp" )
62
+ __jited (" sub sp, sp, #0x10" )
48
63
__naked void no_private_stack_nested (void )
49
64
{
50
65
asm volatile (" \
@@ -81,6 +96,19 @@ __jited(" pushq %r9")
81
96
__jited (" callq 0x{{.*}}" )
82
97
__jited (" popq %r9" )
83
98
__jited (" xorl %eax, %eax" )
99
+ __arch_arm64
100
+ __jited (" stp x25, x27, [sp, {{.*}}]!" )
101
+ __jited (" mov x27, {{.*}}" )
102
+ __jited (" movk x27, {{.*}}, lsl #16" )
103
+ __jited (" movk x27, {{.*}}" )
104
+ __jited (" mrs x10, TPIDR_EL{{[0-1]}}" )
105
+ __jited (" add x27, x27, x10" )
106
+ __jited (" add x25, x27, {{.*}}" )
107
+ __jited (" mov x0, #0x2a" )
108
+ __jited (" str x0, [x27]" )
109
+ __jited (" bl {{.*}}" )
110
+ __jited ("..." )
111
+ __jited (" ldp x25, x27, [sp], {{.*}}" )
84
112
__naked void private_stack_nested_1 (void )
85
113
{
86
114
asm volatile (" \
@@ -131,6 +159,24 @@ __jited(" movq %rdi, -0x200(%r9)")
131
159
__jited (" pushq %r9" )
132
160
__jited (" callq" )
133
161
__jited (" popq %r9" )
162
+ __arch_arm64
163
+ __jited ("func #1" )
164
+ __jited ("..." )
165
+ __jited (" stp x25, x27, [sp, {{.*}}]!" )
166
+ __jited (" mov x27, {{.*}}" )
167
+ __jited (" movk x27, {{.*}}, lsl #16" )
168
+ __jited (" movk x27, {{.*}}" )
169
+ __jited (" mrs x10, TPIDR_EL{{[0-1]}}" )
170
+ __jited (" add x27, x27, x10" )
171
+ __jited (" add x25, x27, {{.*}}" )
172
+ __jited (" bl 0x{{.*}}" )
173
+ __jited (" add x7, x0, #0x0" )
174
+ __jited (" mov x0, #0x2a" )
175
+ __jited (" str x0, [x27]" )
176
+ __jited (" bl 0x{{.*}}" )
177
+ __jited (" add x7, x0, #0x0" )
178
+ __jited (" mov x7, #0x0" )
179
+ __jited (" ldp x25, x27, [sp], {{.*}}" )
134
180
__naked void private_stack_callback (void )
135
181
{
136
182
asm volatile (" \
@@ -154,6 +200,28 @@ __arch_x86_64
154
200
__jited (" pushq %r9" )
155
201
__jited (" callq" )
156
202
__jited (" popq %r9" )
203
+ __arch_arm64
204
+ __jited (" stp x29, x30, [sp, #-0x10]!" )
205
+ __jited (" mov x29, sp" )
206
+ __jited (" stp xzr, x26, [sp, #-0x10]!" )
207
+ __jited (" mov x26, sp" )
208
+ __jited (" stp x19, x20, [sp, #-0x10]!" )
209
+ __jited (" stp x21, x22, [sp, #-0x10]!" )
210
+ __jited (" stp x23, x24, [sp, #-0x10]!" )
211
+ __jited (" stp x25, x26, [sp, #-0x10]!" )
212
+ __jited (" stp x27, x28, [sp, #-0x10]!" )
213
+ __jited (" mov x27, {{.*}}" )
214
+ __jited (" movk x27, {{.*}}, lsl #16" )
215
+ __jited (" movk x27, {{.*}}" )
216
+ __jited (" mrs x10, TPIDR_EL{{[0-1]}}" )
217
+ __jited (" add x27, x27, x10" )
218
+ __jited (" add x25, x27, {{.*}}" )
219
+ __jited (" mov x0, #0x2a" )
220
+ __jited (" str x0, [x27]" )
221
+ __jited (" mov x0, #0x0" )
222
+ __jited (" bl 0x{{.*}}" )
223
+ __jited (" add x7, x0, #0x0" )
224
+ __jited (" ldp x27, x28, [sp], #0x10" )
157
225
int private_stack_exception_main_prog (void )
158
226
{
159
227
asm volatile (" \
@@ -179,6 +247,19 @@ __jited(" movq %rdi, -0x200(%r9)")
179
247
__jited (" pushq %r9" )
180
248
__jited (" callq" )
181
249
__jited (" popq %r9" )
250
+ __arch_arm64
251
+ __jited (" stp x27, x28, [sp, #-0x10]!" )
252
+ __jited (" mov x27, {{.*}}" )
253
+ __jited (" movk x27, {{.*}}, lsl #16" )
254
+ __jited (" movk x27, {{.*}}" )
255
+ __jited (" mrs x10, TPIDR_EL{{[0-1]}}" )
256
+ __jited (" add x27, x27, x10" )
257
+ __jited (" add x25, x27, {{.*}}" )
258
+ __jited (" mov x0, #0x2a" )
259
+ __jited (" str x0, [x27]" )
260
+ __jited (" bl 0x{{.*}}" )
261
+ __jited (" add x7, x0, #0x0" )
262
+ __jited (" ldp x27, x28, [sp], #0x10" )
182
263
int private_stack_exception_sub_prog (void )
183
264
{
184
265
asm volatile (" \
@@ -220,6 +301,10 @@ __description("Private stack, async callback, not nested")
220
301
__success __retval (0 )
221
302
__arch_x86_64
222
303
__jited (" movabsq $0x{{.*}}, %r9" )
304
+ __arch_arm64
305
+ __jited (" mrs x10, TPIDR_EL{{[0-1]}}" )
306
+ __jited (" add x27, x27, x10" )
307
+ __jited (" add x25, x27, {{.*}}" )
223
308
int private_stack_async_callback_1 (void )
224
309
{
225
310
struct bpf_timer * arr_timer ;
@@ -241,6 +326,8 @@ __description("Private stack, async callback, potential nesting")
241
326
__success __retval (0 )
242
327
__arch_x86_64
243
328
__jited (" subq $0x100, %rsp" )
329
+ __arch_arm64
330
+ __jited (" sub sp, sp, #0x100" )
244
331
int private_stack_async_callback_2 (void )
245
332
{
246
333
struct bpf_timer * arr_timer ;
0 commit comments