@@ -387,6 +387,65 @@ static bool is_signed_bpf_cond(u8 cond)
387
387
cond == BPF_JSGE || cond == BPF_JSLE ;
388
388
}
389
389
390
+ #define BPF_FIXUP_REG_MASK GENMASK(31, 27)
391
+ #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
392
+
393
+ bool ex_handler_bpf (const struct exception_table_entry * ex ,
394
+ struct pt_regs * regs )
395
+ {
396
+ int dst_reg = FIELD_GET (BPF_FIXUP_REG_MASK , ex -> fixup );
397
+ off_t offset = FIELD_GET (BPF_FIXUP_OFFSET_MASK , ex -> fixup );
398
+
399
+ regs -> regs [dst_reg ] = 0 ;
400
+ regs -> csr_era = (unsigned long )& ex -> fixup - offset ;
401
+
402
+ return true;
403
+ }
404
+
405
+ /* For accesses to BTF pointers, add an entry to the exception table */
406
+ static int add_exception_handler (const struct bpf_insn * insn ,
407
+ struct jit_ctx * ctx ,
408
+ int dst_reg )
409
+ {
410
+ unsigned long pc ;
411
+ off_t offset ;
412
+ struct exception_table_entry * ex ;
413
+
414
+ if (!ctx -> image || !ctx -> prog -> aux -> extable || BPF_MODE (insn -> code ) != BPF_PROBE_MEM )
415
+ return 0 ;
416
+
417
+ if (WARN_ON_ONCE (ctx -> num_exentries >= ctx -> prog -> aux -> num_exentries ))
418
+ return - EINVAL ;
419
+
420
+ ex = & ctx -> prog -> aux -> extable [ctx -> num_exentries ];
421
+ pc = (unsigned long )& ctx -> image [ctx -> idx - 1 ];
422
+
423
+ offset = pc - (long )& ex -> insn ;
424
+ if (WARN_ON_ONCE (offset >= 0 || offset < INT_MIN ))
425
+ return - ERANGE ;
426
+
427
+ ex -> insn = offset ;
428
+
429
+ /*
430
+ * Since the extable follows the program, the fixup offset is always
431
+ * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
432
+ * to keep things simple, and put the destination register in the upper
433
+ * bits. We don't need to worry about buildtime or runtime sort
434
+ * modifying the upper bits because the table is already sorted, and
435
+ * isn't part of the main exception table.
436
+ */
437
+ offset = (long )& ex -> fixup - (pc + LOONGARCH_INSN_SIZE );
438
+ if (!FIELD_FIT (BPF_FIXUP_OFFSET_MASK , offset ))
439
+ return - ERANGE ;
440
+
441
+ ex -> type = EX_TYPE_BPF ;
442
+ ex -> fixup = FIELD_PREP (BPF_FIXUP_OFFSET_MASK , offset ) | FIELD_PREP (BPF_FIXUP_REG_MASK , dst_reg );
443
+
444
+ ctx -> num_exentries ++ ;
445
+
446
+ return 0 ;
447
+ }
448
+
390
449
static int build_insn (const struct bpf_insn * insn , struct jit_ctx * ctx , bool extra_pass )
391
450
{
392
451
u8 tm = -1 ;
@@ -816,6 +875,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
816
875
case BPF_LDX | BPF_MEM | BPF_H :
817
876
case BPF_LDX | BPF_MEM | BPF_W :
818
877
case BPF_LDX | BPF_MEM | BPF_DW :
878
+ case BPF_LDX | BPF_PROBE_MEM | BPF_DW :
879
+ case BPF_LDX | BPF_PROBE_MEM | BPF_W :
880
+ case BPF_LDX | BPF_PROBE_MEM | BPF_H :
881
+ case BPF_LDX | BPF_PROBE_MEM | BPF_B :
819
882
switch (BPF_SIZE (code )) {
820
883
case BPF_B :
821
884
if (is_signed_imm12 (off )) {
@@ -854,6 +917,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
854
917
}
855
918
break ;
856
919
}
920
+
921
+ ret = add_exception_handler (insn , ctx , dst );
922
+ if (ret )
923
+ return ret ;
857
924
break ;
858
925
859
926
/* *(size *)(dst + off) = imm */
@@ -1018,14 +1085,17 @@ static int validate_code(struct jit_ctx *ctx)
1018
1085
return -1 ;
1019
1086
}
1020
1087
1088
+ if (WARN_ON_ONCE (ctx -> num_exentries != ctx -> prog -> aux -> num_exentries ))
1089
+ return -1 ;
1090
+
1021
1091
return 0 ;
1022
1092
}
1023
1093
1024
1094
struct bpf_prog * bpf_int_jit_compile (struct bpf_prog * prog )
1025
1095
{
1026
1096
bool tmp_blinded = false, extra_pass = false;
1027
1097
u8 * image_ptr ;
1028
- int image_size ;
1098
+ int image_size , prog_size , extable_size ;
1029
1099
struct jit_ctx ctx ;
1030
1100
struct jit_data * jit_data ;
1031
1101
struct bpf_binary_header * header ;
@@ -1066,7 +1136,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1066
1136
image_ptr = jit_data -> image ;
1067
1137
header = jit_data -> header ;
1068
1138
extra_pass = true;
1069
- image_size = sizeof (u32 ) * ctx .idx ;
1139
+ prog_size = sizeof (u32 ) * ctx .idx ;
1070
1140
goto skip_init_ctx ;
1071
1141
}
1072
1142
@@ -1088,12 +1158,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1088
1158
ctx .epilogue_offset = ctx .idx ;
1089
1159
build_epilogue (& ctx );
1090
1160
1161
+ extable_size = prog -> aux -> num_exentries * sizeof (struct exception_table_entry );
1162
+
1091
1163
/* Now we know the actual image size.
1092
1164
* As each LoongArch instruction is of length 32bit,
1093
1165
* we are translating number of JITed intructions into
1094
1166
* the size required to store these JITed code.
1095
1167
*/
1096
- image_size = sizeof (u32 ) * ctx .idx ;
1168
+ prog_size = sizeof (u32 ) * ctx .idx ;
1169
+ image_size = prog_size + extable_size ;
1097
1170
/* Now we know the size of the structure to make */
1098
1171
header = bpf_jit_binary_alloc (image_size , & image_ptr ,
1099
1172
sizeof (u32 ), jit_fill_hole );
@@ -1104,9 +1177,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1104
1177
1105
1178
/* 2. Now, the actual pass to generate final JIT code */
1106
1179
ctx .image = (union loongarch_instruction * )image_ptr ;
1180
+ if (extable_size )
1181
+ prog -> aux -> extable = (void * )image_ptr + prog_size ;
1107
1182
1108
1183
skip_init_ctx :
1109
1184
ctx .idx = 0 ;
1185
+ ctx .num_exentries = 0 ;
1110
1186
1111
1187
build_prologue (& ctx );
1112
1188
if (build_body (& ctx , extra_pass )) {
@@ -1125,7 +1201,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1125
1201
1126
1202
/* And we're done */
1127
1203
if (bpf_jit_enable > 1 )
1128
- bpf_jit_dump (prog -> len , image_size , 2 , ctx .image );
1204
+ bpf_jit_dump (prog -> len , prog_size , 2 , ctx .image );
1129
1205
1130
1206
/* Update the icache */
1131
1207
flush_icache_range ((unsigned long )header , (unsigned long )(ctx .image + ctx .idx ));
@@ -1147,7 +1223,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1147
1223
jit_data -> header = header ;
1148
1224
}
1149
1225
prog -> jited = 1 ;
1150
- prog -> jited_len = image_size ;
1226
+ prog -> jited_len = prog_size ;
1151
1227
prog -> bpf_func = (void * )ctx .image ;
1152
1228
1153
1229
if (!prog -> is_func || extra_pass ) {
0 commit comments