Skip to content

Commit 3ba026f

Browse files
liu-song-6Alexei Starovoitov
authored andcommitted
x86, bpf: Use bpf_prog_pack for bpf trampoline
There are three major changes here: 1. Add arch_[alloc|free]_bpf_trampoline based on bpf_prog_pack; 2. Let arch_prepare_bpf_trampoline handle ROX input image, this requires arch_prepare_bpf_trampoline allocating a temporary RW buffer; 3. Update __arch_prepare_bpf_trampoline() to handle a RW buffer (rw_image) and a ROX buffer (image). This part is similar to the image/rw_image logic in bpf_int_jit_compile(). Signed-off-by: Song Liu <[email protected]> Acked-by: Ilya Leoshkevich <[email protected]> Acked-by: Jiri Olsa <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 26ef208 commit 3ba026f

File tree

1 file changed

+72
-26
lines changed

1 file changed

+72
-26
lines changed

arch/x86/net/bpf_jit_comp.c

Lines changed: 72 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -2198,7 +2198,8 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog,
21982198

21992199
static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
22002200
struct bpf_tramp_link *l, int stack_size,
2201-
int run_ctx_off, bool save_ret)
2201+
int run_ctx_off, bool save_ret,
2202+
void *image, void *rw_image)
22022203
{
22032204
u8 *prog = *pprog;
22042205
u8 *jmp_insn;
@@ -2226,7 +2227,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
22262227
else
22272228
EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
22282229

2229-
if (emit_rsb_call(&prog, bpf_trampoline_enter(p), prog))
2230+
if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image)))
22302231
return -EINVAL;
22312232
/* remember prog start time returned by __bpf_prog_enter */
22322233
emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
@@ -2250,7 +2251,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
22502251
(long) p->insnsi >> 32,
22512252
(u32) (long) p->insnsi);
22522253
/* call JITed bpf program or interpreter */
2253-
if (emit_rsb_call(&prog, p->bpf_func, prog))
2254+
if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image)))
22542255
return -EINVAL;
22552256

22562257
/*
@@ -2277,7 +2278,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
22772278
EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
22782279
else
22792280
EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
2280-
if (emit_rsb_call(&prog, bpf_trampoline_exit(p), prog))
2281+
if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image)))
22812282
return -EINVAL;
22822283

22832284
*pprog = prog;
@@ -2312,14 +2313,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
23122313

23132314
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
23142315
struct bpf_tramp_links *tl, int stack_size,
2315-
int run_ctx_off, bool save_ret)
2316+
int run_ctx_off, bool save_ret,
2317+
void *image, void *rw_image)
23162318
{
23172319
int i;
23182320
u8 *prog = *pprog;
23192321

23202322
for (i = 0; i < tl->nr_links; i++) {
23212323
if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
2322-
run_ctx_off, save_ret))
2324+
run_ctx_off, save_ret, image, rw_image))
23232325
return -EINVAL;
23242326
}
23252327
*pprog = prog;
@@ -2328,7 +2330,8 @@ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
23282330

23292331
static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
23302332
struct bpf_tramp_links *tl, int stack_size,
2331-
int run_ctx_off, u8 **branches)
2333+
int run_ctx_off, u8 **branches,
2334+
void *image, void *rw_image)
23322335
{
23332336
u8 *prog = *pprog;
23342337
int i;
@@ -2339,7 +2342,8 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
23392342
emit_mov_imm32(&prog, false, BPF_REG_0, 0);
23402343
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
23412344
for (i = 0; i < tl->nr_links; i++) {
2342-
if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true))
2345+
if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true,
2346+
image, rw_image))
23432347
return -EINVAL;
23442348

23452349
/* mod_ret prog stored return value into [rbp - 8]. Emit:
@@ -2422,7 +2426,8 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
24222426
* add rsp, 8 // skip eth_type_trans's frame
24232427
* ret // return to its caller
24242428
*/
2425-
static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
2429+
static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
2430+
void *rw_image_end, void *image,
24262431
const struct btf_func_model *m, u32 flags,
24272432
struct bpf_tramp_links *tlinks,
24282433
void *func_addr)
@@ -2521,7 +2526,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image
25212526
orig_call += X86_PATCH_SIZE;
25222527
}
25232528

2524-
prog = image;
2529+
prog = rw_image;
25252530

25262531
EMIT_ENDBR();
25272532
/*
@@ -2563,15 +2568,16 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image
25632568
if (flags & BPF_TRAMP_F_CALL_ORIG) {
25642569
/* arg1: mov rdi, im */
25652570
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2566-
if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) {
2571+
if (emit_rsb_call(&prog, __bpf_tramp_enter,
2572+
image + (prog - (u8 *)rw_image))) {
25672573
ret = -EINVAL;
25682574
goto cleanup;
25692575
}
25702576
}
25712577

25722578
if (fentry->nr_links)
25732579
if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
2574-
flags & BPF_TRAMP_F_RET_FENTRY_RET))
2580+
flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image))
25752581
return -EINVAL;
25762582

25772583
if (fmod_ret->nr_links) {
@@ -2581,7 +2587,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image
25812587
return -ENOMEM;
25822588

25832589
if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
2584-
run_ctx_off, branches)) {
2590+
run_ctx_off, branches, image, rw_image)) {
25852591
ret = -EINVAL;
25862592
goto cleanup;
25872593
}
@@ -2602,14 +2608,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image
26022608
EMIT2(0xff, 0xd3); /* call *rbx */
26032609
} else {
26042610
/* call original function */
2605-
if (emit_rsb_call(&prog, orig_call, prog)) {
2611+
if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) {
26062612
ret = -EINVAL;
26072613
goto cleanup;
26082614
}
26092615
}
26102616
/* remember return value in a stack for bpf prog to access */
26112617
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2612-
im->ip_after_call = prog;
2618+
im->ip_after_call = image + (prog - (u8 *)rw_image);
26132619
memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
26142620
prog += X86_PATCH_SIZE;
26152621
}
@@ -2625,12 +2631,13 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image
26252631
* aligned address of do_fexit.
26262632
*/
26272633
for (i = 0; i < fmod_ret->nr_links; i++)
2628-
emit_cond_near_jump(&branches[i], prog, branches[i],
2629-
X86_JNE);
2634+
emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image),
2635+
image + (branches[i] - (u8 *)rw_image), X86_JNE);
26302636
}
26312637

26322638
if (fexit->nr_links)
2633-
if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) {
2639+
if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off,
2640+
false, image, rw_image)) {
26342641
ret = -EINVAL;
26352642
goto cleanup;
26362643
}
@@ -2643,10 +2650,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image
26432650
* restored to R0.
26442651
*/
26452652
if (flags & BPF_TRAMP_F_CALL_ORIG) {
2646-
im->ip_epilogue = prog;
2653+
im->ip_epilogue = image + (prog - (u8 *)rw_image);
26472654
/* arg1: mov rdi, im */
26482655
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2649-
if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) {
2656+
if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) {
26502657
ret = -EINVAL;
26512658
goto cleanup;
26522659
}
@@ -2665,25 +2672,64 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image
26652672
if (flags & BPF_TRAMP_F_SKIP_FRAME)
26662673
/* skip our return address and return to parent */
26672674
EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2668-
emit_return(&prog, prog);
2675+
emit_return(&prog, image + (prog - (u8 *)rw_image));
26692676
/* Make sure the trampoline generation logic doesn't overflow */
2670-
if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2677+
if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) {
26712678
ret = -EFAULT;
26722679
goto cleanup;
26732680
}
2674-
ret = prog - (u8 *)image + BPF_INSN_SAFETY;
2681+
ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY;
26752682

26762683
cleanup:
26772684
kfree(branches);
26782685
return ret;
26792686
}
26802687

2688+
void *arch_alloc_bpf_trampoline(unsigned int size)
2689+
{
2690+
return bpf_prog_pack_alloc(size, jit_fill_hole);
2691+
}
2692+
2693+
void arch_free_bpf_trampoline(void *image, unsigned int size)
2694+
{
2695+
bpf_prog_pack_free(image, size);
2696+
}
2697+
2698+
void arch_protect_bpf_trampoline(void *image, unsigned int size)
2699+
{
2700+
}
2701+
2702+
void arch_unprotect_bpf_trampoline(void *image, unsigned int size)
2703+
{
2704+
}
2705+
26812706
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
26822707
const struct btf_func_model *m, u32 flags,
26832708
struct bpf_tramp_links *tlinks,
26842709
void *func_addr)
26852710
{
2686-
return __arch_prepare_bpf_trampoline(im, image, image_end, m, flags, tlinks, func_addr);
2711+
void *rw_image, *tmp;
2712+
int ret;
2713+
u32 size = image_end - image;
2714+
2715+
/* rw_image doesn't need to be in module memory range, so we can
2716+
* use kvmalloc.
2717+
*/
2718+
rw_image = kvmalloc(size, GFP_KERNEL);
2719+
if (!rw_image)
2720+
return -ENOMEM;
2721+
2722+
ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
2723+
flags, tlinks, func_addr);
2724+
if (ret < 0)
2725+
goto out;
2726+
2727+
tmp = bpf_arch_text_copy(image, rw_image, size);
2728+
if (IS_ERR(tmp))
2729+
ret = PTR_ERR(tmp);
2730+
out:
2731+
kvfree(rw_image);
2732+
return ret;
26872733
}
26882734

26892735
int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
@@ -2704,8 +2750,8 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
27042750
if (!image)
27052751
return -ENOMEM;
27062752

2707-
ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, m, flags,
2708-
tlinks, func_addr);
2753+
ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
2754+
m, flags, tlinks, func_addr);
27092755
bpf_jit_free_exec(image);
27102756
return ret;
27112757
}

0 commit comments

Comments
 (0)