32
32
#include <asm/set_memory.h>
33
33
#include <asm/text-patching.h>
34
34
#include <asm/unwind.h>
35
- #include "bpf_jit.h"
36
35
37
36
struct bpf_jit {
38
37
u32 seen ; /* Flags to remember seen eBPF instructions */
@@ -54,6 +53,7 @@ struct bpf_jit {
54
53
int prologue_plt ; /* Start of prologue hotpatch PLT */
55
54
int kern_arena ; /* Pool offset of kernel arena address */
56
55
u64 user_arena ; /* User arena address */
56
+ u32 frame_off ; /* Offset of struct bpf_prog from %r15 */
57
57
};
58
58
59
59
#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
@@ -425,12 +425,26 @@ static void jit_fill_hole(void *area, unsigned int size)
425
425
memset (area , 0 , size );
426
426
}
427
427
428
+ /*
429
+ * Caller-allocated part of the frame.
430
+ * Thanks to packed stack, its otherwise unused initial part can be used for
431
+ * the BPF stack and for the next frame.
432
+ */
433
+ struct prog_frame {
434
+ u64 unused [8 ];
435
+ /* BPF stack starts here and grows towards 0 */
436
+ u32 tail_call_cnt ;
437
+ u32 pad ;
438
+ u64 r6 [10 ]; /* r6 - r15 */
439
+ u64 backchain ;
440
+ } __packed ;
441
+
428
442
/*
429
443
* Save registers from "rs" (register start) to "re" (register end) on stack
430
444
*/
431
445
static void save_regs (struct bpf_jit * jit , u32 rs , u32 re )
432
446
{
433
- u32 off = STK_OFF_R6 + (rs - 6 ) * 8 ;
447
+ u32 off = offsetof( struct prog_frame , r6 ) + (rs - 6 ) * 8 ;
434
448
435
449
if (rs == re )
436
450
/* stg %rs,off(%r15) */
@@ -443,12 +457,9 @@ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
443
457
/*
444
458
* Restore registers from "rs" (register start) to "re" (register end) on stack
445
459
*/
446
- static void restore_regs (struct bpf_jit * jit , u32 rs , u32 re , u32 stack_depth )
460
+ static void restore_regs (struct bpf_jit * jit , u32 rs , u32 re )
447
461
{
448
- u32 off = STK_OFF_R6 + (rs - 6 ) * 8 ;
449
-
450
- if (jit -> seen & SEEN_STACK )
451
- off += STK_OFF + stack_depth ;
462
+ u32 off = jit -> frame_off + offsetof(struct prog_frame , r6 ) + (rs - 6 ) * 8 ;
452
463
453
464
if (rs == re )
454
465
/* lg %rs,off(%r15) */
@@ -492,8 +503,7 @@ static int get_end(u16 seen_regs, int start)
492
503
* Save and restore clobbered registers (6-15) on stack.
493
504
* We save/restore registers in chunks with gap >= 2 registers.
494
505
*/
495
- static void save_restore_regs (struct bpf_jit * jit , int op , u32 stack_depth ,
496
- u16 extra_regs )
506
+ static void save_restore_regs (struct bpf_jit * jit , int op , u16 extra_regs )
497
507
{
498
508
u16 seen_regs = jit -> seen_regs | extra_regs ;
499
509
const int last = 15 , save_restore_size = 6 ;
@@ -516,7 +526,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth,
516
526
if (op == REGS_SAVE )
517
527
save_regs (jit , rs , re );
518
528
else
519
- restore_regs (jit , rs , re , stack_depth );
529
+ restore_regs (jit , rs , re );
520
530
re ++ ;
521
531
} while (re <= last );
522
532
}
@@ -573,20 +583,22 @@ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
573
583
* Emit function prologue
574
584
*
575
585
* Save registers and create stack frame if necessary.
576
- * See stack frame layout description in "bpf_jit.h"!
586
+ * Stack frame layout is described by struct prog_frame.
577
587
*/
578
- static void bpf_jit_prologue (struct bpf_jit * jit , struct bpf_prog * fp ,
579
- u32 stack_depth )
588
+ static void bpf_jit_prologue (struct bpf_jit * jit , struct bpf_prog * fp )
580
589
{
590
+ BUILD_BUG_ON (sizeof (struct prog_frame ) != STACK_FRAME_OVERHEAD );
591
+
581
592
/* No-op for hotpatching */
582
593
/* brcl 0,prologue_plt */
583
594
EMIT6_PCREL_RILC (0xc0040000 , 0 , jit -> prologue_plt );
584
595
jit -> prologue_plt_ret = jit -> prg ;
585
596
586
597
if (!bpf_is_subprog (fp )) {
587
598
/* Initialize the tail call counter in the main program. */
588
- /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
589
- _EMIT6 (0xd703f000 | STK_OFF_TCCNT , 0xf000 | STK_OFF_TCCNT );
599
+ /* xc tail_call_cnt(4,%r15),tail_call_cnt(%r15) */
600
+ _EMIT6 (0xd703f000 | offsetof(struct prog_frame , tail_call_cnt ),
601
+ 0xf000 | offsetof(struct prog_frame , tail_call_cnt ));
590
602
} else {
591
603
/*
592
604
* Skip the tail call counter initialization in subprograms.
@@ -609,7 +621,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
609
621
jit -> seen_regs |= NVREGS ;
610
622
} else {
611
623
/* Save registers */
612
- save_restore_regs (jit , REGS_SAVE , stack_depth ,
624
+ save_restore_regs (jit , REGS_SAVE ,
613
625
fp -> aux -> exception_boundary ? NVREGS : 0 );
614
626
}
615
627
/* Setup literal pool */
@@ -629,13 +641,15 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
629
641
if (is_first_pass (jit ) || (jit -> seen & SEEN_STACK )) {
630
642
/* lgr %w1,%r15 (backchain) */
631
643
EMIT4 (0xb9040000 , REG_W1 , REG_15 );
632
- /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
633
- EMIT4_DISP (0x41000000 , BPF_REG_FP , REG_15 , STK_160_UNUSED );
634
- /* aghi %r15,-STK_OFF */
635
- EMIT4_IMM (0xa70b0000 , REG_15 , - (STK_OFF + stack_depth ));
636
- /* stg %w1,152(%r15) (backchain) */
644
+ /* la %bfp,unused_end(%r15) (BPF frame pointer) */
645
+ EMIT4_DISP (0x41000000 , BPF_REG_FP , REG_15 ,
646
+ offsetofend (struct prog_frame , unused ));
647
+ /* aghi %r15,-frame_off */
648
+ EMIT4_IMM (0xa70b0000 , REG_15 , - jit -> frame_off );
649
+ /* stg %w1,backchain(%r15) */
637
650
EMIT6_DISP_LH (0xe3000000 , 0x0024 , REG_W1 , REG_0 ,
638
- REG_15 , 152 );
651
+ REG_15 ,
652
+ offsetof(struct prog_frame , backchain ));
639
653
}
640
654
}
641
655
@@ -669,13 +683,13 @@ static void call_r1(struct bpf_jit *jit)
669
683
/*
670
684
* Function epilogue
671
685
*/
672
- static void bpf_jit_epilogue (struct bpf_jit * jit , u32 stack_depth )
686
+ static void bpf_jit_epilogue (struct bpf_jit * jit )
673
687
{
674
688
jit -> exit_ip = jit -> prg ;
675
689
/* Load exit code: lgr %r2,%b0 */
676
690
EMIT4 (0xb9040000 , REG_2 , BPF_REG_0 );
677
691
/* Restore registers */
678
- save_restore_regs (jit , REGS_RESTORE , stack_depth , 0 );
692
+ save_restore_regs (jit , REGS_RESTORE , 0 );
679
693
EMIT_JUMP_REG (14 );
680
694
681
695
jit -> prg = ALIGN (jit -> prg , 8 );
@@ -857,7 +871,7 @@ static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags)
857
871
* stack space for the large switch statement.
858
872
*/
859
873
static noinline int bpf_jit_insn (struct bpf_jit * jit , struct bpf_prog * fp ,
860
- int i , bool extra_pass , u32 stack_depth )
874
+ int i , bool extra_pass )
861
875
{
862
876
struct bpf_insn * insn = & fp -> insnsi [i ];
863
877
s32 branch_oc_off = insn -> off ;
@@ -1778,9 +1792,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
1778
1792
* Note 2: We assume that the verifier does not let us call the
1779
1793
* main program, which clears the tail call counter on entry.
1780
1794
*/
1781
- /* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */
1782
- _EMIT6 (0xd203f000 | STK_OFF_TCCNT ,
1783
- 0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth ));
1795
+ /* mvc tail_call_cnt(4,%r15),frame_off+tail_call_cnt(%r15) */
1796
+ _EMIT6 (0xd203f000 | offsetof(struct prog_frame , tail_call_cnt ),
1797
+ 0xf000 | (jit -> frame_off +
1798
+ offsetof(struct prog_frame , tail_call_cnt )));
1784
1799
1785
1800
/* Sign-extend the kfunc arguments. */
1786
1801
if (insn -> src_reg == BPF_PSEUDO_KFUNC_CALL ) {
@@ -1831,10 +1846,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
1831
1846
* goto out;
1832
1847
*/
1833
1848
1834
- if (jit -> seen & SEEN_STACK )
1835
- off = STK_OFF_TCCNT + STK_OFF + stack_depth ;
1836
- else
1837
- off = STK_OFF_TCCNT ;
1849
+ off = jit -> frame_off +
1850
+ offsetof(struct prog_frame , tail_call_cnt );
1838
1851
/* lhi %w0,1 */
1839
1852
EMIT4_IMM (0xa7080000 , REG_W0 , 1 );
1840
1853
/* laal %w1,%w0,off(%r15) */
@@ -1864,7 +1877,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
1864
1877
/*
1865
1878
* Restore registers before calling function
1866
1879
*/
1867
- save_restore_regs (jit , REGS_RESTORE , stack_depth , 0 );
1880
+ save_restore_regs (jit , REGS_RESTORE , 0 );
1868
1881
1869
1882
/*
1870
1883
* goto *(prog->bpf_func + tail_call_start);
@@ -2157,7 +2170,7 @@ static int bpf_set_addr(struct bpf_jit *jit, int i)
2157
2170
* Compile eBPF program into s390x code
2158
2171
*/
2159
2172
static int bpf_jit_prog (struct bpf_jit * jit , struct bpf_prog * fp ,
2160
- bool extra_pass , u32 stack_depth )
2173
+ bool extra_pass )
2161
2174
{
2162
2175
int i , insn_count , lit32_size , lit64_size ;
2163
2176
u64 kern_arena ;
@@ -2166,24 +2179,30 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
2166
2179
jit -> lit64 = jit -> lit64_start ;
2167
2180
jit -> prg = 0 ;
2168
2181
jit -> excnt = 0 ;
2182
+ if (is_first_pass (jit ) || (jit -> seen & SEEN_STACK ))
2183
+ jit -> frame_off = sizeof (struct prog_frame ) -
2184
+ offsetofend (struct prog_frame , unused ) +
2185
+ round_up (fp -> aux -> stack_depth , 8 );
2186
+ else
2187
+ jit -> frame_off = 0 ;
2169
2188
2170
2189
kern_arena = bpf_arena_get_kern_vm_start (fp -> aux -> arena );
2171
2190
if (kern_arena )
2172
2191
jit -> kern_arena = _EMIT_CONST_U64 (kern_arena );
2173
2192
jit -> user_arena = bpf_arena_get_user_vm_start (fp -> aux -> arena );
2174
2193
2175
- bpf_jit_prologue (jit , fp , stack_depth );
2194
+ bpf_jit_prologue (jit , fp );
2176
2195
if (bpf_set_addr (jit , 0 ) < 0 )
2177
2196
return -1 ;
2178
2197
for (i = 0 ; i < fp -> len ; i += insn_count ) {
2179
- insn_count = bpf_jit_insn (jit , fp , i , extra_pass , stack_depth );
2198
+ insn_count = bpf_jit_insn (jit , fp , i , extra_pass );
2180
2199
if (insn_count < 0 )
2181
2200
return -1 ;
2182
2201
/* Next instruction address */
2183
2202
if (bpf_set_addr (jit , i + insn_count ) < 0 )
2184
2203
return -1 ;
2185
2204
}
2186
- bpf_jit_epilogue (jit , stack_depth );
2205
+ bpf_jit_epilogue (jit );
2187
2206
2188
2207
lit32_size = jit -> lit32 - jit -> lit32_start ;
2189
2208
lit64_size = jit -> lit64 - jit -> lit64_start ;
@@ -2259,7 +2278,6 @@ static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
2259
2278
*/
2260
2279
struct bpf_prog * bpf_int_jit_compile (struct bpf_prog * fp )
2261
2280
{
2262
- u32 stack_depth = round_up (fp -> aux -> stack_depth , 8 );
2263
2281
struct bpf_prog * tmp , * orig_fp = fp ;
2264
2282
struct bpf_binary_header * header ;
2265
2283
struct s390_jit_data * jit_data ;
@@ -2312,7 +2330,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
2312
2330
* - 3: Calculate program size and addrs array
2313
2331
*/
2314
2332
for (pass = 1 ; pass <= 3 ; pass ++ ) {
2315
- if (bpf_jit_prog (& jit , fp , extra_pass , stack_depth )) {
2333
+ if (bpf_jit_prog (& jit , fp , extra_pass )) {
2316
2334
fp = orig_fp ;
2317
2335
goto free_addrs ;
2318
2336
}
@@ -2326,7 +2344,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
2326
2344
goto free_addrs ;
2327
2345
}
2328
2346
skip_init_ctx :
2329
- if (bpf_jit_prog (& jit , fp , extra_pass , stack_depth )) {
2347
+ if (bpf_jit_prog (& jit , fp , extra_pass )) {
2330
2348
bpf_jit_binary_free (header );
2331
2349
fp = orig_fp ;
2332
2350
goto free_addrs ;
@@ -2646,9 +2664,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
2646
2664
/* stg %r1,backchain_off(%r15) */
2647
2665
EMIT6_DISP_LH (0xe3000000 , 0x0024 , REG_1 , REG_0 , REG_15 ,
2648
2666
tjit -> backchain_off );
2649
- /* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT (%r15) */
2667
+ /* mvc tccnt_off(4,%r15),stack_size+tail_call_cnt (%r15) */
2650
2668
_EMIT6 (0xd203f000 | tjit -> tccnt_off ,
2651
- 0xf000 | (tjit -> stack_size + STK_OFF_TCCNT ));
2669
+ 0xf000 | (tjit -> stack_size +
2670
+ offsetof(struct prog_frame , tail_call_cnt )));
2652
2671
/* stmg %r2,%rN,fwd_reg_args_off(%r15) */
2653
2672
if (nr_reg_args )
2654
2673
EMIT6_DISP_LH (0xeb000000 , 0x0024 , REG_2 ,
@@ -2785,8 +2804,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
2785
2804
(nr_stack_args * sizeof (u64 ) - 1 ) << 16 |
2786
2805
tjit -> stack_args_off ,
2787
2806
0xf000 | tjit -> orig_stack_args_off );
2788
- /* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
2789
- _EMIT6 (0xd203f000 | STK_OFF_TCCNT , 0xf000 | tjit -> tccnt_off );
2807
+ /* mvc tail_call_cnt(4,%r15),tccnt_off(%r15) */
2808
+ _EMIT6 (0xd203f000 | offsetof(struct prog_frame , tail_call_cnt ),
2809
+ 0xf000 | tjit -> tccnt_off );
2790
2810
/* lgr %r1,%r8 */
2791
2811
EMIT4 (0xb9040000 , REG_1 , REG_8 );
2792
2812
/* %r1() */
@@ -2843,8 +2863,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
2843
2863
if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET ))
2844
2864
EMIT6_DISP_LH (0xe3000000 , 0x0004 , REG_2 , REG_0 , REG_15 ,
2845
2865
tjit -> retval_off );
2846
- /* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
2847
- _EMIT6 (0xd203f000 | (tjit -> stack_size + STK_OFF_TCCNT ),
2866
+ /* mvc stack_size+tail_call_cnt(4,%r15),tccnt_off(%r15) */
2867
+ _EMIT6 (0xd203f000 | (tjit -> stack_size +
2868
+ offsetof(struct prog_frame , tail_call_cnt )),
2848
2869
0xf000 | tjit -> tccnt_off );
2849
2870
/* aghi %r15,stack_size */
2850
2871
EMIT4_IMM (0xa70b0000 , REG_15 , tjit -> stack_size );
0 commit comments