Skip to content

Commit 02ffd6f

Browse files
committed
Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Pull bpf fixes from Alexei Starovoitov: "A number of fixes accumulated due to summer vacations - Fix out-of-bounds dynptr write in bpf_crypto_crypt() kfunc which was misidentified as a security issue (Daniel Borkmann) - Update the list of BPF selftests maintainers (Eduard Zingerman) - Fix selftests warnings with icecc compiler (Ilya Leoshkevich) - Disable XDP/cpumap direct return optimization (Jesper Dangaard Brouer) - Fix unexpected get_helper_proto() result in unusual configuration BPF_SYSCALL=y and BPF_EVENTS=n (Jiri Olsa) - Allow fallback to interpreter when JIT support is limited (KaFai Wan) - Fix rqspinlock and choose trylock fallback for NMI waiters. Pick the simplest fix. More involved fix is targeted bpf-next (Kumar Kartikeya Dwivedi) - Fix cleanup when tcp_bpf_send_verdict() fails to allocate psock->cork (Kuniyuki Iwashima) - Disallow bpf_timer in PREEMPT_RT for now. Proper solution is being discussed for bpf-next. (Leon Hwang) - Fix XSK cq descriptor production (Maciej Fijalkowski) - Tell memcg to use allow_spinning=false path in bpf_timer_init() to avoid lockup in cgroup_file_notify() (Peilin Ye) - Fix bpf_strnstr() to handle suffix match cases (Rong Tao)" * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: selftests/bpf: Skip timer cases when bpf_timer is not supported bpf: Reject bpf_timer for PREEMPT_RT tcp_bpf: Call sk_msg_free() when tcp_bpf_send_verdict() fails to allocate psock->cork. bpf: Tell memcg to use allow_spinning=false path in bpf_timer_init() bpf: Allow fall back to interpreter for programs with stack size <= 512 rqspinlock: Choose trylock fallback for NMI waiters xsk: Fix immature cq descriptor production bpf: Update the list of BPF selftests maintainers selftests/bpf: Add tests for bpf_strnstr selftests/bpf: Fix "expression result unused" warnings with icecc bpf: Fix bpf_strnstr() to handle suffix match cases better selftests/bpf: Extend crypto_sanity selftest with invalid dst buffer bpf: Fix out-of-bounds dynptr write in bpf_crypto_crypt bpf: Check the helper function is valid in get_helper_proto bpf, cpumap: Disable page_pool direct xdp_return need larger scope
2 parents 4f553c1 + 91f34aa commit 02ffd6f

File tree

20 files changed

+213
-53
lines changed

20 files changed

+213
-53
lines changed

MAINTAINERS

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4683,7 +4683,6 @@ F: security/bpf/
46834683
BPF [SELFTESTS] (Test Runners & Infrastructure)
46844684
M: Andrii Nakryiko <[email protected]>
46854685
M: Eduard Zingerman <[email protected]>
4686-
R: Mykola Lysenko <[email protected]>
46874686
46884687
S: Maintained
46894688
F: tools/testing/selftests/bpf/

kernel/bpf/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,3 +62,4 @@ CFLAGS_REMOVE_bpf_lru_list.o = $(CC_FLAGS_FTRACE)
6262
CFLAGS_REMOVE_queue_stack_maps.o = $(CC_FLAGS_FTRACE)
6363
CFLAGS_REMOVE_lpm_trie.o = $(CC_FLAGS_FTRACE)
6464
CFLAGS_REMOVE_ringbuf.o = $(CC_FLAGS_FTRACE)
65+
CFLAGS_REMOVE_rqspinlock.o = $(CC_FLAGS_FTRACE)

kernel/bpf/core.c

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2366,8 +2366,7 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
23662366
const struct bpf_insn *insn)
23672367
{
23682368
/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2369-
* is not working properly, or interpreter is being used when
2370-
* prog->jit_requested is not 0, so warn about it!
2369+
* is not working properly, so warn about it!
23712370
*/
23722371
WARN_ON_ONCE(1);
23732372
return 0;
@@ -2468,8 +2467,9 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
24682467
return ret;
24692468
}
24702469

2471-
static void bpf_prog_select_func(struct bpf_prog *fp)
2470+
static bool bpf_prog_select_interpreter(struct bpf_prog *fp)
24722471
{
2472+
bool select_interpreter = false;
24732473
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
24742474
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
24752475
u32 idx = (round_up(stack_depth, 32) / 32) - 1;
@@ -2478,15 +2478,16 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
24782478
* But for non-JITed programs, we don't need bpf_func, so no bounds
24792479
* check needed.
24802480
*/
2481-
if (!fp->jit_requested &&
2482-
!WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) {
2481+
if (idx < ARRAY_SIZE(interpreters)) {
24832482
fp->bpf_func = interpreters[idx];
2483+
select_interpreter = true;
24842484
} else {
24852485
fp->bpf_func = __bpf_prog_ret0_warn;
24862486
}
24872487
#else
24882488
fp->bpf_func = __bpf_prog_ret0_warn;
24892489
#endif
2490+
return select_interpreter;
24902491
}
24912492

24922493
/**
@@ -2505,7 +2506,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
25052506
/* In case of BPF to BPF calls, verifier did all the prep
25062507
* work with regards to JITing, etc.
25072508
*/
2508-
bool jit_needed = fp->jit_requested;
2509+
bool jit_needed = false;
25092510

25102511
if (fp->bpf_func)
25112512
goto finalize;
@@ -2514,7 +2515,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
25142515
bpf_prog_has_kfunc_call(fp))
25152516
jit_needed = true;
25162517

2517-
bpf_prog_select_func(fp);
2518+
if (!bpf_prog_select_interpreter(fp))
2519+
jit_needed = true;
25182520

25192521
/* eBPF JITs can rewrite the program in case constant
25202522
* blinding is active. However, in case of error during
@@ -3024,7 +3026,10 @@ EXPORT_SYMBOL_GPL(bpf_event_output);
30243026

30253027
/* Always built-in helper functions. */
30263028
const struct bpf_func_proto bpf_tail_call_proto = {
3027-
.func = NULL,
3029+
/* func is unused for tail_call, we set it to pass the
3030+
* get_helper_proto check
3031+
*/
3032+
.func = BPF_PTR_POISON,
30283033
.gpl_only = false,
30293034
.ret_type = RET_VOID,
30303035
.arg1_type = ARG_PTR_TO_CTX,

kernel/bpf/cpumap.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,6 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
186186
struct xdp_buff xdp;
187187
int i, nframes = 0;
188188

189-
xdp_set_return_frame_no_direct();
190189
xdp.rxq = &rxq;
191190

192191
for (i = 0; i < n; i++) {
@@ -231,7 +230,6 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
231230
}
232231
}
233232

234-
xdp_clear_return_frame_no_direct();
235233
stats->pass += nframes;
236234

237235
return nframes;
@@ -255,6 +253,7 @@ static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
255253

256254
rcu_read_lock();
257255
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
256+
xdp_set_return_frame_no_direct();
258257

259258
ret->xdp_n = cpu_map_bpf_prog_run_xdp(rcpu, frames, ret->xdp_n, stats);
260259
if (unlikely(ret->skb_n))
@@ -264,6 +263,7 @@ static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
264263
if (stats->redirect)
265264
xdp_do_flush();
266265

266+
xdp_clear_return_frame_no_direct();
267267
bpf_net_ctx_clear(bpf_net_ctx);
268268
rcu_read_unlock();
269269

kernel/bpf/crypto.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -278,7 +278,7 @@ static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx,
278278
siv_len = siv ? __bpf_dynptr_size(siv) : 0;
279279
src_len = __bpf_dynptr_size(src);
280280
dst_len = __bpf_dynptr_size(dst);
281-
if (!src_len || !dst_len)
281+
if (!src_len || !dst_len || src_len > dst_len)
282282
return -EINVAL;
283283

284284
if (siv_len != ctx->siv_len)

kernel/bpf/helpers.c

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1274,8 +1274,11 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u
12741274
goto out;
12751275
}
12761276

1277-
/* allocate hrtimer via map_kmalloc to use memcg accounting */
1278-
cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node);
1277+
/* Allocate via bpf_map_kmalloc_node() for memcg accounting. Until
1278+
* kmalloc_nolock() is available, avoid locking issues by using
1279+
* __GFP_HIGH (GFP_ATOMIC & ~__GFP_RECLAIM).
1280+
*/
1281+
cb = bpf_map_kmalloc_node(map, size, __GFP_HIGH, map->numa_node);
12791282
if (!cb) {
12801283
ret = -ENOMEM;
12811284
goto out;
@@ -3664,10 +3667,17 @@ __bpf_kfunc int bpf_strnstr(const char *s1__ign, const char *s2__ign, size_t len
36643667

36653668
guard(pagefault)();
36663669
for (i = 0; i < XATTR_SIZE_MAX; i++) {
3667-
for (j = 0; i + j < len && j < XATTR_SIZE_MAX; j++) {
3670+
for (j = 0; i + j <= len && j < XATTR_SIZE_MAX; j++) {
36683671
__get_kernel_nofault(&c2, s2__ign + j, char, err_out);
36693672
if (c2 == '\0')
36703673
return i;
3674+
/*
3675+
* We allow reading an extra byte from s2 (note the
3676+
* `i + j <= len` above) to cover the case when s2 is
3677+
* a suffix of the first len chars of s1.
3678+
*/
3679+
if (i + j == len)
3680+
break;
36713681
__get_kernel_nofault(&c1, s1__ign + j, char, err_out);
36723682
if (c1 == '\0')
36733683
return -ENOENT;

kernel/bpf/rqspinlock.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -471,7 +471,7 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
471471
* any MCS node. This is not the most elegant solution, but is
472472
* simple enough.
473473
*/
474-
if (unlikely(idx >= _Q_MAX_NODES)) {
474+
if (unlikely(idx >= _Q_MAX_NODES || in_nmi())) {
475475
lockevent_inc(lock_no_node);
476476
RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT);
477477
while (!queued_spin_trylock(lock)) {

kernel/bpf/verifier.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8547,6 +8547,10 @@ static int process_timer_func(struct bpf_verifier_env *env, int regno,
85478547
verifier_bug(env, "Two map pointers in a timer helper");
85488548
return -EFAULT;
85498549
}
8550+
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
8551+
verbose(env, "bpf_timer cannot be used for PREEMPT_RT.\n");
8552+
return -EOPNOTSUPP;
8553+
}
85508554
meta->map_uid = reg->map_uid;
85518555
meta->map_ptr = map;
85528556
return 0;
@@ -11354,7 +11358,7 @@ static int get_helper_proto(struct bpf_verifier_env *env, int func_id,
1135411358
return -EINVAL;
1135511359

1135611360
*ptr = env->ops->get_func_proto(func_id, env->prog);
11357-
return *ptr ? 0 : -EINVAL;
11361+
return *ptr && (*ptr)->func ? 0 : -EINVAL;
1135811362
}
1135911363

1136011364
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,

net/ipv4/tcp_bpf.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -408,8 +408,11 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
408408
if (!psock->cork) {
409409
psock->cork = kzalloc(sizeof(*psock->cork),
410410
GFP_ATOMIC | __GFP_NOWARN);
411-
if (!psock->cork)
411+
if (!psock->cork) {
412+
sk_msg_free(sk, msg);
413+
*copied = 0;
412414
return -ENOMEM;
415+
}
413416
}
414417
memcpy(psock->cork, msg, sizeof(*msg));
415418
return 0;

0 commit comments

Comments
 (0)