Skip to content

Commit dbcd7f5

Browse files
Youling Tangchenhuacai
authored andcommitted
LoongArch: BPF: Add BPF exception tables
Inspired by commit 8008342("bpf, arm64: Add BPF exception tables"), do similar to LoongArch to add BPF exception tables. When a tracing BPF program attempts to read memory without using the bpf_probe_read() helper, the verifier marks the load instruction with the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently recognize this flag it falls back to the interpreter. Add support for BPF_PROBE_MEM, by appending an exception table to the BPF program. If the load instruction causes a data abort, the fixup infrastructure finds the exception table and fixes up the fault, by clearing the destination register and jumping over the faulting instruction. To keep the compact exception table entry format, inspect the pc in fixup_exception(). A more generic solution would add a "handler" field to the table entry, like on x86, s390 and arm64, etc. Signed-off-by: Youling Tang <[email protected]> Signed-off-by: Huacai Chen <[email protected]>
1 parent 912bcfa commit dbcd7f5

File tree

5 files changed

+96
-5
lines changed

5 files changed

+96
-5
lines changed

arch/loongarch/include/asm/asm-extable.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#define EX_TYPE_NONE 0
66
#define EX_TYPE_FIXUP 1
77
#define EX_TYPE_UACCESS_ERR_ZERO 2
8+
#define EX_TYPE_BPF 3
89

910
#ifdef __ASSEMBLY__
1011

arch/loongarch/include/asm/extable.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,16 @@ do { \
3232
(b)->data = (tmp).data; \
3333
} while (0)
3434

35+
#ifdef CONFIG_BPF_JIT
36+
bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs);
37+
#else
38+
static inline
39+
bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs)
40+
{
41+
return false;
42+
}
43+
#endif /* !CONFIG_BPF_JIT */
44+
3545
bool fixup_exception(struct pt_regs *regs);
3646

3747
#endif

arch/loongarch/mm/extable.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,8 @@ bool fixup_exception(struct pt_regs *regs)
5555
return ex_handler_fixup(ex, regs);
5656
case EX_TYPE_UACCESS_ERR_ZERO:
5757
return ex_handler_uaccess_err_zero(ex, regs);
58+
case EX_TYPE_BPF:
59+
return ex_handler_bpf(ex, regs);
5860
}
5961

6062
BUG();

arch/loongarch/net/bpf_jit.c

Lines changed: 81 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -387,6 +387,65 @@ static bool is_signed_bpf_cond(u8 cond)
387387
cond == BPF_JSGE || cond == BPF_JSLE;
388388
}
389389

390+
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
391+
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
392+
393+
bool ex_handler_bpf(const struct exception_table_entry *ex,
394+
struct pt_regs *regs)
395+
{
396+
int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
397+
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
398+
399+
regs->regs[dst_reg] = 0;
400+
regs->csr_era = (unsigned long)&ex->fixup - offset;
401+
402+
return true;
403+
}
404+
405+
/* For accesses to BTF pointers, add an entry to the exception table */
406+
static int add_exception_handler(const struct bpf_insn *insn,
407+
struct jit_ctx *ctx,
408+
int dst_reg)
409+
{
410+
unsigned long pc;
411+
off_t offset;
412+
struct exception_table_entry *ex;
413+
414+
if (!ctx->image || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM)
415+
return 0;
416+
417+
if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries))
418+
return -EINVAL;
419+
420+
ex = &ctx->prog->aux->extable[ctx->num_exentries];
421+
pc = (unsigned long)&ctx->image[ctx->idx - 1];
422+
423+
offset = pc - (long)&ex->insn;
424+
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
425+
return -ERANGE;
426+
427+
ex->insn = offset;
428+
429+
/*
430+
* Since the extable follows the program, the fixup offset is always
431+
* negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
432+
* to keep things simple, and put the destination register in the upper
433+
* bits. We don't need to worry about buildtime or runtime sort
434+
* modifying the upper bits because the table is already sorted, and
435+
* isn't part of the main exception table.
436+
*/
437+
offset = (long)&ex->fixup - (pc + LOONGARCH_INSN_SIZE);
438+
if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
439+
return -ERANGE;
440+
441+
ex->type = EX_TYPE_BPF;
442+
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
443+
444+
ctx->num_exentries++;
445+
446+
return 0;
447+
}
448+
390449
static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass)
391450
{
392451
u8 tm = -1;
@@ -816,6 +875,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
816875
case BPF_LDX | BPF_MEM | BPF_H:
817876
case BPF_LDX | BPF_MEM | BPF_W:
818877
case BPF_LDX | BPF_MEM | BPF_DW:
878+
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
879+
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
880+
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
881+
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
819882
switch (BPF_SIZE(code)) {
820883
case BPF_B:
821884
if (is_signed_imm12(off)) {
@@ -854,6 +917,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
854917
}
855918
break;
856919
}
920+
921+
ret = add_exception_handler(insn, ctx, dst);
922+
if (ret)
923+
return ret;
857924
break;
858925

859926
/* *(size *)(dst + off) = imm */
@@ -1018,14 +1085,17 @@ static int validate_code(struct jit_ctx *ctx)
10181085
return -1;
10191086
}
10201087

1088+
if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries))
1089+
return -1;
1090+
10211091
return 0;
10221092
}
10231093

10241094
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
10251095
{
10261096
bool tmp_blinded = false, extra_pass = false;
10271097
u8 *image_ptr;
1028-
int image_size;
1098+
int image_size, prog_size, extable_size;
10291099
struct jit_ctx ctx;
10301100
struct jit_data *jit_data;
10311101
struct bpf_binary_header *header;
@@ -1066,7 +1136,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
10661136
image_ptr = jit_data->image;
10671137
header = jit_data->header;
10681138
extra_pass = true;
1069-
image_size = sizeof(u32) * ctx.idx;
1139+
prog_size = sizeof(u32) * ctx.idx;
10701140
goto skip_init_ctx;
10711141
}
10721142

@@ -1088,12 +1158,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
10881158
ctx.epilogue_offset = ctx.idx;
10891159
build_epilogue(&ctx);
10901160

1161+
extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry);
1162+
10911163
/* Now we know the actual image size.
10921164
* As each LoongArch instruction is of length 32bit,
10931165
* we are translating number of JITed intructions into
10941166
* the size required to store these JITed code.
10951167
*/
1096-
image_size = sizeof(u32) * ctx.idx;
1168+
prog_size = sizeof(u32) * ctx.idx;
1169+
image_size = prog_size + extable_size;
10971170
/* Now we know the size of the structure to make */
10981171
header = bpf_jit_binary_alloc(image_size, &image_ptr,
10991172
sizeof(u32), jit_fill_hole);
@@ -1104,9 +1177,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
11041177

11051178
/* 2. Now, the actual pass to generate final JIT code */
11061179
ctx.image = (union loongarch_instruction *)image_ptr;
1180+
if (extable_size)
1181+
prog->aux->extable = (void *)image_ptr + prog_size;
11071182

11081183
skip_init_ctx:
11091184
ctx.idx = 0;
1185+
ctx.num_exentries = 0;
11101186

11111187
build_prologue(&ctx);
11121188
if (build_body(&ctx, extra_pass)) {
@@ -1125,7 +1201,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
11251201

11261202
/* And we're done */
11271203
if (bpf_jit_enable > 1)
1128-
bpf_jit_dump(prog->len, image_size, 2, ctx.image);
1204+
bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
11291205

11301206
/* Update the icache */
11311207
flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
@@ -1147,7 +1223,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
11471223
jit_data->header = header;
11481224
}
11491225
prog->jited = 1;
1150-
prog->jited_len = image_size;
1226+
prog->jited_len = prog_size;
11511227
prog->bpf_func = (void *)ctx.image;
11521228

11531229
if (!prog->is_func || extra_pass) {

arch/loongarch/net/bpf_jit.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
*
55
* Copyright (C) 2022 Loongson Technology Corporation Limited
66
*/
7+
#include <linux/bitfield.h>
78
#include <linux/bpf.h>
89
#include <linux/filter.h>
910
#include <asm/cacheflush.h>
@@ -15,6 +16,7 @@ struct jit_ctx {
1516
unsigned int flags;
1617
unsigned int epilogue_offset;
1718
u32 *offset;
19+
int num_exentries;
1820
union loongarch_instruction *image;
1921
u32 stack_size;
2022
};

0 commit comments

Comments
 (0)