28
28
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
29
29
#define TCCNT_PTR (MAX_BPF_JIT_REG + 2)
30
30
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
31
- #define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
32
31
#define ARENA_VM_START (MAX_BPF_JIT_REG + 5)
33
32
34
33
#define check_imm (bits , imm ) do { \
@@ -67,7 +66,6 @@ static const int bpf2a64[] = {
67
66
[TCCNT_PTR ] = A64_R (26 ),
68
67
/* temporary register for blinding constants */
69
68
[BPF_REG_AX ] = A64_R (9 ),
70
- [FP_BOTTOM ] = A64_R (27 ),
71
69
/* callee saved register for kern_vm_start address */
72
70
[ARENA_VM_START ] = A64_R (28 ),
73
71
};
@@ -81,7 +79,6 @@ struct jit_ctx {
81
79
__le32 * image ;
82
80
__le32 * ro_image ;
83
81
u32 stack_size ;
84
- int fpb_offset ;
85
82
u64 user_vm_start ;
86
83
};
87
84
@@ -330,7 +327,6 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
330
327
const u8 r8 = bpf2a64 [BPF_REG_8 ];
331
328
const u8 r9 = bpf2a64 [BPF_REG_9 ];
332
329
const u8 fp = bpf2a64 [BPF_REG_FP ];
333
- const u8 fpb = bpf2a64 [FP_BOTTOM ];
334
330
const u8 arena_vm_base = bpf2a64 [ARENA_VM_START ];
335
331
const int idx0 = ctx -> idx ;
336
332
int cur_offset ;
@@ -381,7 +377,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
381
377
emit (A64_PUSH (r6 , r7 , A64_SP ), ctx );
382
378
emit (A64_PUSH (r8 , r9 , A64_SP ), ctx );
383
379
prepare_bpf_tail_call_cnt (ctx );
384
- emit (A64_PUSH (fpb , A64_R (28 ), A64_SP ), ctx );
380
+ emit (A64_PUSH (A64_R ( 27 ) , A64_R (28 ), A64_SP ), ctx );
385
381
} else {
386
382
/*
387
383
* Exception callback receives FP of Main Program as third
@@ -427,8 +423,6 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
427
423
emit (A64_PUSH (A64_R (23 ), A64_R (24 ), A64_SP ), ctx );
428
424
}
429
425
430
- emit (A64_SUB_I (1 , fpb , fp , ctx -> fpb_offset ), ctx );
431
-
432
426
/* Stack must be multiples of 16B */
433
427
ctx -> stack_size = round_up (prog -> aux -> stack_depth , 16 );
434
428
@@ -745,7 +739,6 @@ static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)
745
739
const u8 r9 = bpf2a64 [BPF_REG_9 ];
746
740
const u8 fp = bpf2a64 [BPF_REG_FP ];
747
741
const u8 ptr = bpf2a64 [TCCNT_PTR ];
748
- const u8 fpb = bpf2a64 [FP_BOTTOM ];
749
742
750
743
/* We're done with BPF stack */
751
744
emit (A64_ADD_I (1 , A64_SP , A64_SP , ctx -> stack_size ), ctx );
@@ -760,7 +753,7 @@ static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)
760
753
emit (A64_POP (A64_R (23 ), A64_R (24 ), A64_SP ), ctx );
761
754
762
755
/* Restore x27 and x28 */
763
- emit (A64_POP (fpb , A64_R (28 ), A64_SP ), ctx );
756
+ emit (A64_POP (A64_R ( 27 ) , A64_R (28 ), A64_SP ), ctx );
764
757
/* Restore fs (x25) and x26 */
765
758
emit (A64_POP (ptr , fp , A64_SP ), ctx );
766
759
emit (A64_POP (ptr , fp , A64_SP ), ctx );
@@ -887,7 +880,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
887
880
const u8 tmp = bpf2a64 [TMP_REG_1 ];
888
881
const u8 tmp2 = bpf2a64 [TMP_REG_2 ];
889
882
const u8 fp = bpf2a64 [BPF_REG_FP ];
890
- const u8 fpb = bpf2a64 [FP_BOTTOM ];
891
883
const u8 arena_vm_base = bpf2a64 [ARENA_VM_START ];
892
884
const s16 off = insn -> off ;
893
885
const s32 imm = insn -> imm ;
@@ -1339,9 +1331,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1339
1331
emit (A64_ADD (1 , tmp2 , src , arena_vm_base ), ctx );
1340
1332
src = tmp2 ;
1341
1333
}
1342
- if (ctx -> fpb_offset > 0 && src == fp && BPF_MODE ( insn -> code ) != BPF_PROBE_MEM32 ) {
1343
- src_adj = fpb ;
1344
- off_adj = off + ctx -> fpb_offset ;
1334
+ if (src == fp ) {
1335
+ src_adj = A64_SP ;
1336
+ off_adj = off + ctx -> stack_size ;
1345
1337
} else {
1346
1338
src_adj = src ;
1347
1339
off_adj = off ;
@@ -1432,9 +1424,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1432
1424
emit (A64_ADD (1 , tmp2 , dst , arena_vm_base ), ctx );
1433
1425
dst = tmp2 ;
1434
1426
}
1435
- if (ctx -> fpb_offset > 0 && dst == fp && BPF_MODE ( insn -> code ) != BPF_PROBE_MEM32 ) {
1436
- dst_adj = fpb ;
1437
- off_adj = off + ctx -> fpb_offset ;
1427
+ if (dst == fp ) {
1428
+ dst_adj = A64_SP ;
1429
+ off_adj = off + ctx -> stack_size ;
1438
1430
} else {
1439
1431
dst_adj = dst ;
1440
1432
off_adj = off ;
@@ -1494,9 +1486,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1494
1486
emit (A64_ADD (1 , tmp2 , dst , arena_vm_base ), ctx );
1495
1487
dst = tmp2 ;
1496
1488
}
1497
- if (ctx -> fpb_offset > 0 && dst == fp && BPF_MODE ( insn -> code ) != BPF_PROBE_MEM32 ) {
1498
- dst_adj = fpb ;
1499
- off_adj = off + ctx -> fpb_offset ;
1489
+ if (dst == fp ) {
1490
+ dst_adj = A64_SP ;
1491
+ off_adj = off + ctx -> stack_size ;
1500
1492
} else {
1501
1493
dst_adj = dst ;
1502
1494
off_adj = off ;
@@ -1565,79 +1557,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1565
1557
return 0 ;
1566
1558
}
1567
1559
1568
- /*
1569
- * Return 0 if FP may change at runtime, otherwise find the minimum negative
1570
- * offset to FP, converts it to positive number, and align down to 8 bytes.
1571
- */
1572
- static int find_fpb_offset (struct bpf_prog * prog )
1573
- {
1574
- int i ;
1575
- int offset = 0 ;
1576
-
1577
- for (i = 0 ; i < prog -> len ; i ++ ) {
1578
- const struct bpf_insn * insn = & prog -> insnsi [i ];
1579
- const u8 class = BPF_CLASS (insn -> code );
1580
- const u8 mode = BPF_MODE (insn -> code );
1581
- const u8 src = insn -> src_reg ;
1582
- const u8 dst = insn -> dst_reg ;
1583
- const s32 imm = insn -> imm ;
1584
- const s16 off = insn -> off ;
1585
-
1586
- switch (class ) {
1587
- case BPF_STX :
1588
- case BPF_ST :
1589
- /* fp holds atomic operation result */
1590
- if (class == BPF_STX && mode == BPF_ATOMIC &&
1591
- ((imm == BPF_XCHG ||
1592
- imm == (BPF_FETCH | BPF_ADD ) ||
1593
- imm == (BPF_FETCH | BPF_AND ) ||
1594
- imm == (BPF_FETCH | BPF_XOR ) ||
1595
- imm == (BPF_FETCH | BPF_OR )) &&
1596
- src == BPF_REG_FP ))
1597
- return 0 ;
1598
-
1599
- if (mode == BPF_MEM && dst == BPF_REG_FP &&
1600
- off < offset )
1601
- offset = insn -> off ;
1602
- break ;
1603
-
1604
- case BPF_JMP32 :
1605
- case BPF_JMP :
1606
- break ;
1607
-
1608
- case BPF_LDX :
1609
- case BPF_LD :
1610
- /* fp holds load result */
1611
- if (dst == BPF_REG_FP )
1612
- return 0 ;
1613
-
1614
- if (class == BPF_LDX && mode == BPF_MEM &&
1615
- src == BPF_REG_FP && off < offset )
1616
- offset = off ;
1617
- break ;
1618
-
1619
- case BPF_ALU :
1620
- case BPF_ALU64 :
1621
- default :
1622
- /* fp holds ALU result */
1623
- if (dst == BPF_REG_FP )
1624
- return 0 ;
1625
- }
1626
- }
1627
-
1628
- if (offset < 0 ) {
1629
- /*
1630
- * safely be converted to a positive 'int', since insn->off
1631
- * is 's16'
1632
- */
1633
- offset = - offset ;
1634
- /* align down to 8 bytes */
1635
- offset = ALIGN_DOWN (offset , 8 );
1636
- }
1637
-
1638
- return offset ;
1639
- }
1640
-
1641
1560
static int build_body (struct jit_ctx * ctx , bool extra_pass )
1642
1561
{
1643
1562
const struct bpf_prog * prog = ctx -> prog ;
@@ -1774,7 +1693,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1774
1693
goto out_off ;
1775
1694
}
1776
1695
1777
- ctx .fpb_offset = find_fpb_offset (prog );
1778
1696
ctx .user_vm_start = bpf_arena_get_user_vm_start (prog -> aux -> arena );
1779
1697
1780
1698
/*
0 commit comments