Skip to content

Commit 8c3fe02

Browse files
author
Alexei Starovoitov
committed
Merge branch 'x86-bpf-fixes-for-the-bpf-jit-with-retbleed-stuff'
Joan Bruguera Micó says: ==================== x86/bpf: Fixes for the BPF JIT with retbleed=stuff From: Joan Bruguera Micó <[email protected]> Fixes two issues that cause kernels panic when using the BPF JIT with the call depth tracking / stuffing mitigation for Skylake processors (`retbleed=stuff`). Both issues can be triggered by running simple BPF programs (e.g. running the test suite should trigger both). The first (resubmit) fixes a trivial issue related to calculating the destination IP for call instructions with call depth tracking. The second is related to using the correct IP for relocations, related to the recently introduced %rip-relative addressing for PER_CPU_VAR. Cc: Alexei Starovoitov <[email protected]> Cc: Daniel Borkmann <[email protected]> --- v2: Simplify calculation of "ip". Add more details to the commit message. Joan Bruguera Micó (1): x86/bpf: Fix IP for relocating call depth accounting ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
2 parents 6dae957 + 6a53745 commit 8c3fe02

File tree

3 files changed

+12
-15
lines changed

3 files changed

+12
-15
lines changed

arch/x86/include/asm/alternative.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ extern void callthunks_patch_builtin_calls(void);
117117
extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
118118
struct module *mod);
119119
extern void *callthunks_translate_call_dest(void *dest);
120-
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func);
120+
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip);
121121
#else
122122
static __always_inline void callthunks_patch_builtin_calls(void) {}
123123
static __always_inline void
@@ -128,7 +128,7 @@ static __always_inline void *callthunks_translate_call_dest(void *dest)
128128
return dest;
129129
}
130130
static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
131-
void *func)
131+
void *func, void *ip)
132132
{
133133
return 0;
134134
}

arch/x86/kernel/callthunks.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -314,7 +314,7 @@ static bool is_callthunk(void *addr)
314314
return !bcmp(pad, insn_buff, tmpl_size);
315315
}
316316

317-
int x86_call_depth_emit_accounting(u8 **pprog, void *func)
317+
int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
318318
{
319319
unsigned int tmpl_size = SKL_TMPL_SIZE;
320320
u8 insn_buff[MAX_PATCH_LEN];
@@ -327,7 +327,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func)
327327
return 0;
328328

329329
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
330-
apply_relocation(insn_buff, tmpl_size, *pprog,
330+
apply_relocation(insn_buff, tmpl_size, ip,
331331
skl_call_thunk_template, tmpl_size);
332332

333333
memcpy(*pprog, insn_buff, tmpl_size);

arch/x86/net/bpf_jit_comp.c

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -480,7 +480,7 @@ static int emit_call(u8 **pprog, void *func, void *ip)
480480
static int emit_rsb_call(u8 **pprog, void *func, void *ip)
481481
{
482482
OPTIMIZER_HIDE_VAR(func);
483-
x86_call_depth_emit_accounting(pprog, func);
483+
ip += x86_call_depth_emit_accounting(pprog, func, ip);
484484
return emit_patch(pprog, func, ip, 0xE8);
485485
}
486486

@@ -1972,20 +1972,17 @@ st: if (is_imm8(insn->off))
19721972

19731973
/* call */
19741974
case BPF_JMP | BPF_CALL: {
1975-
int offs;
1975+
u8 *ip = image + addrs[i - 1];
19761976

19771977
func = (u8 *) __bpf_call_base + imm32;
19781978
if (tail_call_reachable) {
19791979
RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
1980-
if (!imm32)
1981-
return -EINVAL;
1982-
offs = 7 + x86_call_depth_emit_accounting(&prog, func);
1983-
} else {
1984-
if (!imm32)
1985-
return -EINVAL;
1986-
offs = x86_call_depth_emit_accounting(&prog, func);
1980+
ip += 7;
19871981
}
1988-
if (emit_call(&prog, func, image + addrs[i - 1] + offs))
1982+
if (!imm32)
1983+
return -EINVAL;
1984+
ip += x86_call_depth_emit_accounting(&prog, func, ip);
1985+
if (emit_call(&prog, func, ip))
19891986
return -EINVAL;
19901987
break;
19911988
}
@@ -2835,7 +2832,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
28352832
* Direct-call fentry stub, as such it needs accounting for the
28362833
* __fentry__ call.
28372834
*/
2838-
x86_call_depth_emit_accounting(&prog, NULL);
2835+
x86_call_depth_emit_accounting(&prog, NULL, image);
28392836
}
28402837
EMIT1(0x55); /* push rbp */
28412838
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */

0 commit comments

Comments
 (0)