Skip to content

Commit 6a53745

Browse files
joanbmAlexei Starovoitov
authored andcommitted
x86/bpf: Fix IP for relocating call depth accounting
The commit: 59bec00 ("x86/percpu: Introduce %rip-relative addressing to PER_CPU_VAR()") made PER_CPU_VAR() to use rip-relative addressing, hence INCREMENT_CALL_DEPTH macro and skl_call_thunk_template got rip-relative asm code inside of it. A follow up commit: 17bce3b ("x86/callthunks: Handle %rip-relative relocations in call thunk template") changed x86_call_depth_emit_accounting() to use apply_relocation(), but mistakenly assumed that the code is being patched in-place (where the destination of the relocation matches the address of the code), using *pprog as the destination ip. This is not true for the call depth accounting, emitted by the BPF JIT, so the calculated address was wrong, JIT-ed BPF progs on kernels with call depth tracking got broken and usually caused a page fault. Pass the destination IP when the BPF JIT emits call depth accounting. Fixes: 17bce3b ("x86/callthunks: Handle %rip-relative relocations in call thunk template") Signed-off-by: Joan Bruguera Micó <[email protected]> Reviewed-by: Uros Bizjak <[email protected]> Acked-by: Ingo Molnar <[email protected]> Cc: Alexei Starovoitov <[email protected]> Cc: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 9d98aa0 commit 6a53745

File tree

3 files changed

+12
-15
lines changed

3 files changed

+12
-15
lines changed

arch/x86/include/asm/alternative.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ extern void callthunks_patch_builtin_calls(void);
117117
extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
118118
struct module *mod);
119119
extern void *callthunks_translate_call_dest(void *dest);
120-
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func);
120+
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip);
121121
#else
122122
static __always_inline void callthunks_patch_builtin_calls(void) {}
123123
static __always_inline void
@@ -128,7 +128,7 @@ static __always_inline void *callthunks_translate_call_dest(void *dest)
128128
return dest;
129129
}
130130
static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
131-
void *func)
131+
void *func, void *ip)
132132
{
133133
return 0;
134134
}

arch/x86/kernel/callthunks.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -314,7 +314,7 @@ static bool is_callthunk(void *addr)
314314
return !bcmp(pad, insn_buff, tmpl_size);
315315
}
316316

317-
int x86_call_depth_emit_accounting(u8 **pprog, void *func)
317+
int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
318318
{
319319
unsigned int tmpl_size = SKL_TMPL_SIZE;
320320
u8 insn_buff[MAX_PATCH_LEN];
@@ -327,7 +327,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func)
327327
return 0;
328328

329329
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
330-
apply_relocation(insn_buff, tmpl_size, *pprog,
330+
apply_relocation(insn_buff, tmpl_size, ip,
331331
skl_call_thunk_template, tmpl_size);
332332

333333
memcpy(*pprog, insn_buff, tmpl_size);

arch/x86/net/bpf_jit_comp.c

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -480,7 +480,7 @@ static int emit_call(u8 **pprog, void *func, void *ip)
480480
static int emit_rsb_call(u8 **pprog, void *func, void *ip)
481481
{
482482
OPTIMIZER_HIDE_VAR(func);
483-
ip += x86_call_depth_emit_accounting(pprog, func);
483+
ip += x86_call_depth_emit_accounting(pprog, func, ip);
484484
return emit_patch(pprog, func, ip, 0xE8);
485485
}
486486

@@ -1972,20 +1972,17 @@ st: if (is_imm8(insn->off))
19721972

19731973
/* call */
19741974
case BPF_JMP | BPF_CALL: {
1975-
int offs;
1975+
u8 *ip = image + addrs[i - 1];
19761976

19771977
func = (u8 *) __bpf_call_base + imm32;
19781978
if (tail_call_reachable) {
19791979
RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
1980-
if (!imm32)
1981-
return -EINVAL;
1982-
offs = 7 + x86_call_depth_emit_accounting(&prog, func);
1983-
} else {
1984-
if (!imm32)
1985-
return -EINVAL;
1986-
offs = x86_call_depth_emit_accounting(&prog, func);
1980+
ip += 7;
19871981
}
1988-
if (emit_call(&prog, func, image + addrs[i - 1] + offs))
1982+
if (!imm32)
1983+
return -EINVAL;
1984+
ip += x86_call_depth_emit_accounting(&prog, func, ip);
1985+
if (emit_call(&prog, func, ip))
19891986
return -EINVAL;
19901987
break;
19911988
}
@@ -2835,7 +2832,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
28352832
* Direct-call fentry stub, as such it needs accounting for the
28362833
* __fentry__ call.
28372834
*/
2838-
x86_call_depth_emit_accounting(&prog, NULL);
2835+
x86_call_depth_emit_accounting(&prog, NULL, image);
28392836
}
28402837
EMIT1(0x55); /* push rbp */
28412838
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */

0 commit comments

Comments
 (0)