Skip to content

Commit cbba5d1

Browse files
committed
Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Pull bpf fixes from Alexei Starovoitov: - Fix interaction between livepatch and BPF fexit programs (Song Liu) With Steven and Masami acks. - Fix stack ORC unwind from BPF kprobe_multi (Jiri Olsa) With Steven and Masami acks. - Fix out of bounds access in widen_imprecise_scalars() in the verifier (Eduard Zingerman) - Fix conflicts between MPTCP and BPF sockmap (Jiayuan Chen) - Fix net_sched storage collision with BPF data_meta/data_end (Eric Dumazet) - Add _impl suffix to BPF kfuncs with implicit args to avoid breaking them in bpf-next when KF_IMPLICIT_ARGS is added (Mykyta Yatsenko) * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: selftests/bpf: Test widen_imprecise_scalars() with different stack depth bpf: account for current allocated stack depth in widen_imprecise_scalars() bpf: Add bpf_prog_run_data_pointers() selftests/bpf: Add mptcp test with sockmap mptcp: Fix proto fallback detection with BPF mptcp: Disallow MPTCP subflows from sockmap selftests/bpf: Add stacktrace ips test for raw_tp selftests/bpf: Add stacktrace ips test for kprobe_multi/kretprobe_multi x86/fgraph,bpf: Fix stack ORC unwind from kprobe_multi return probe Revert "perf/x86: Always store regs->ip in perf_callchain_kernel()" bpf: add _impl suffix for bpf_stream_vprintk() kfunc bpf:add _impl suffix for bpf_task_work_schedule* kfuncs selftests/bpf: Add tests for livepatch + bpf trampoline ftrace: bpf: Fix IPMODIFY + DIRECT in modify_ftrace_direct() ftrace: Fix BPF fexit with livepatch
2 parents a752782 + 6c76261 commit cbba5d1

File tree

29 files changed

+762
-84
lines changed

29 files changed

+762
-84
lines changed

arch/x86/events/core.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2789,13 +2789,13 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
27892789
return;
27902790
}
27912791

2792-
if (perf_callchain_store(entry, regs->ip))
2793-
return;
2794-
2795-
if (perf_hw_regs(regs))
2792+
if (perf_hw_regs(regs)) {
2793+
if (perf_callchain_store(entry, regs->ip))
2794+
return;
27962795
unwind_start(&state, current, regs, NULL);
2797-
else
2796+
} else {
27982797
unwind_start(&state, current, NULL, (void *)regs->sp);
2798+
}
27992799

28002800
for (; !unwind_done(&state); unwind_next_frame(&state)) {
28012801
addr = unwind_get_return_address(&state);

arch/x86/include/asm/ftrace.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,11 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs)
5656
return &arch_ftrace_regs(fregs)->regs;
5757
}
5858

59+
#define arch_ftrace_partial_regs(regs) do { \
60+
regs->flags &= ~X86_EFLAGS_FIXED; \
61+
regs->cs = __KERNEL_CS; \
62+
} while (0)
63+
5964
#define arch_ftrace_fill_perf_regs(fregs, _regs) do { \
6065
(_regs)->ip = arch_ftrace_regs(fregs)->regs.ip; \
6166
(_regs)->sp = arch_ftrace_regs(fregs)->regs.sp; \

arch/x86/kernel/ftrace_64.S

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -354,12 +354,17 @@ SYM_CODE_START(return_to_handler)
354354
UNWIND_HINT_UNDEFINED
355355
ANNOTATE_NOENDBR
356356

357+
/* Restore return_to_handler value that got eaten by previous ret instruction. */
358+
subq $8, %rsp
359+
UNWIND_HINT_FUNC
360+
357361
/* Save ftrace_regs for function exit context */
358362
subq $(FRAME_SIZE), %rsp
359363

360364
movq %rax, RAX(%rsp)
361365
movq %rdx, RDX(%rsp)
362366
movq %rbp, RBP(%rsp)
367+
movq %rsp, RSP(%rsp)
363368
movq %rsp, %rdi
364369

365370
call ftrace_return_to_handler
@@ -368,7 +373,8 @@ SYM_CODE_START(return_to_handler)
368373
movq RDX(%rsp), %rdx
369374
movq RAX(%rsp), %rax
370375

371-
addq $(FRAME_SIZE), %rsp
376+
addq $(FRAME_SIZE) + 8, %rsp
377+
372378
/*
373379
* Jump back to the old return address. This cannot be JMP_NOSPEC rdi
374380
* since IBT would demand that contain ENDBR, which simply isn't so for

include/linux/filter.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -901,6 +901,26 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
901901
cb->data_end = skb->data + skb_headlen(skb);
902902
}
903903

904+
static inline int bpf_prog_run_data_pointers(
905+
const struct bpf_prog *prog,
906+
struct sk_buff *skb)
907+
{
908+
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
909+
void *save_data_meta, *save_data_end;
910+
int res;
911+
912+
save_data_meta = cb->data_meta;
913+
save_data_end = cb->data_end;
914+
915+
bpf_compute_data_pointers(skb);
916+
res = bpf_prog_run(prog, skb);
917+
918+
cb->data_meta = save_data_meta;
919+
cb->data_end = save_data_end;
920+
921+
return res;
922+
}
923+
904924
/* Similar to bpf_compute_data_pointers(), except that save orginal
905925
* data in cb->data and cb->meta_data for restore.
906926
*/

include/linux/ftrace.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,6 +193,10 @@ static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs
193193
#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) || \
194194
defined(CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS)
195195

196+
#ifndef arch_ftrace_partial_regs
197+
#define arch_ftrace_partial_regs(regs) do {} while (0)
198+
#endif
199+
196200
static __always_inline struct pt_regs *
197201
ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
198202
{
@@ -202,7 +206,11 @@ ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
202206
* Since arch_ftrace_get_regs() will check some members and may return
203207
* NULL, we can not use it.
204208
*/
205-
return &arch_ftrace_regs(fregs)->regs;
209+
regs = &arch_ftrace_regs(fregs)->regs;
210+
211+
/* Allow arch specific updates to regs. */
212+
arch_ftrace_partial_regs(regs);
213+
return regs;
206214
}
207215

208216
#endif /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS || CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */

kernel/bpf/helpers.c

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4169,7 +4169,8 @@ static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work
41694169
}
41704170

41714171
/**
4172-
* bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL mode
4172+
* bpf_task_work_schedule_signal_impl - Schedule BPF callback using task_work_add with TWA_SIGNAL
4173+
* mode
41734174
* @task: Task struct for which callback should be scheduled
41744175
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
41754176
* @map__map: bpf_map that embeds struct bpf_task_work in the values
@@ -4178,15 +4179,17 @@ static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work
41784179
*
41794180
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
41804181
*/
4181-
__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
4182-
void *map__map, bpf_task_work_callback_t callback,
4183-
void *aux__prog)
4182+
__bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
4183+
struct bpf_task_work *tw, void *map__map,
4184+
bpf_task_work_callback_t callback,
4185+
void *aux__prog)
41844186
{
41854187
return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
41864188
}
41874189

41884190
/**
4189-
* bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME mode
4191+
* bpf_task_work_schedule_resume_impl - Schedule BPF callback using task_work_add with TWA_RESUME
4192+
* mode
41904193
* @task: Task struct for which callback should be scheduled
41914194
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
41924195
* @map__map: bpf_map that embeds struct bpf_task_work in the values
@@ -4195,9 +4198,10 @@ __bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct b
41954198
*
41964199
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
41974200
*/
4198-
__bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw,
4199-
void *map__map, bpf_task_work_callback_t callback,
4200-
void *aux__prog)
4201+
__bpf_kfunc int bpf_task_work_schedule_resume_impl(struct task_struct *task,
4202+
struct bpf_task_work *tw, void *map__map,
4203+
bpf_task_work_callback_t callback,
4204+
void *aux__prog)
42014205
{
42024206
return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME);
42034207
}
@@ -4376,9 +4380,9 @@ BTF_ID_FLAGS(func, bpf_strnstr);
43764380
#if defined(CONFIG_BPF_LSM) && defined(CONFIG_CGROUPS)
43774381
BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU)
43784382
#endif
4379-
BTF_ID_FLAGS(func, bpf_stream_vprintk, KF_TRUSTED_ARGS)
4380-
BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_TRUSTED_ARGS)
4381-
BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_TRUSTED_ARGS)
4383+
BTF_ID_FLAGS(func, bpf_stream_vprintk_impl, KF_TRUSTED_ARGS)
4384+
BTF_ID_FLAGS(func, bpf_task_work_schedule_signal_impl, KF_TRUSTED_ARGS)
4385+
BTF_ID_FLAGS(func, bpf_task_work_schedule_resume_impl, KF_TRUSTED_ARGS)
43824386
BTF_KFUNCS_END(common_btf_ids)
43834387

43844388
static const struct btf_kfunc_id_set common_kfunc_set = {

kernel/bpf/stream.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -355,7 +355,8 @@ __bpf_kfunc_start_defs();
355355
* Avoid using enum bpf_stream_id so that kfunc users don't have to pull in the
356356
* enum in headers.
357357
*/
358-
__bpf_kfunc int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args, u32 len__sz, void *aux__prog)
358+
__bpf_kfunc int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const void *args,
359+
u32 len__sz, void *aux__prog)
359360
{
360361
struct bpf_bprintf_data data = {
361362
.get_bin_args = true,

kernel/bpf/trampoline.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -479,11 +479,6 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
479479
* BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the
480480
* trampoline again, and retry register.
481481
*/
482-
/* reset fops->func and fops->trampoline for re-register */
483-
tr->fops->func = NULL;
484-
tr->fops->trampoline = 0;
485-
486-
/* free im memory and reallocate later */
487482
bpf_tramp_image_free(im);
488483
goto again;
489484
}

kernel/bpf/verifier.c

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8866,7 +8866,7 @@ static int widen_imprecise_scalars(struct bpf_verifier_env *env,
88668866
struct bpf_verifier_state *cur)
88678867
{
88688868
struct bpf_func_state *fold, *fcur;
8869-
int i, fr;
8869+
int i, fr, num_slots;
88708870

88718871
reset_idmap_scratch(env);
88728872
for (fr = old->curframe; fr >= 0; fr--) {
@@ -8879,7 +8879,9 @@ static int widen_imprecise_scalars(struct bpf_verifier_env *env,
88798879
&fcur->regs[i],
88808880
&env->idmap_scratch);
88818881

8882-
for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) {
8882+
num_slots = min(fold->allocated_stack / BPF_REG_SIZE,
8883+
fcur->allocated_stack / BPF_REG_SIZE);
8884+
for (i = 0; i < num_slots; i++) {
88838885
if (!is_spilled_reg(&fold->stack[i]) ||
88848886
!is_spilled_reg(&fcur->stack[i]))
88858887
continue;
@@ -12259,8 +12261,8 @@ enum special_kfunc_type {
1225912261
KF_bpf_res_spin_lock_irqsave,
1226012262
KF_bpf_res_spin_unlock_irqrestore,
1226112263
KF___bpf_trap,
12262-
KF_bpf_task_work_schedule_signal,
12263-
KF_bpf_task_work_schedule_resume,
12264+
KF_bpf_task_work_schedule_signal_impl,
12265+
KF_bpf_task_work_schedule_resume_impl,
1226412266
};
1226512267

1226612268
BTF_ID_LIST(special_kfunc_list)
@@ -12331,13 +12333,13 @@ BTF_ID(func, bpf_res_spin_unlock)
1233112333
BTF_ID(func, bpf_res_spin_lock_irqsave)
1233212334
BTF_ID(func, bpf_res_spin_unlock_irqrestore)
1233312335
BTF_ID(func, __bpf_trap)
12334-
BTF_ID(func, bpf_task_work_schedule_signal)
12335-
BTF_ID(func, bpf_task_work_schedule_resume)
12336+
BTF_ID(func, bpf_task_work_schedule_signal_impl)
12337+
BTF_ID(func, bpf_task_work_schedule_resume_impl)
1233612338

1233712339
static bool is_task_work_add_kfunc(u32 func_id)
1233812340
{
12339-
return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
12340-
func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume];
12341+
return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal_impl] ||
12342+
func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume_impl];
1234112343
}
1234212344

1234312345
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)

0 commit comments

Comments
 (0)