Skip to content

Commit e28c444

Browse files
committed
Alexei Starovoitov says: ==================== pull-request: bpf 2022-10-23 We've added 7 non-merge commits during the last 18 day(s) which contain a total of 8 files changed, 69 insertions(+), 5 deletions(-). The main changes are: 1) Wait for busy refill_work when destroying bpf memory allocator, from Hou. 2) Allow bpf_user_ringbuf_drain() callbacks to return 1, from David. 3) Fix dispatcher patchable function entry to 5 bytes nop, from Jiri. 4) Prevent decl_tag from being referenced in func_proto, from Stanislav. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: bpf: Use __llist_del_all() whenever possbile during memory draining bpf: Wait for busy refill_work when destroying bpf memory allocator bpf: Fix dispatcher patchable function entry to 5 bytes nop bpf: prevent decl_tag from being referenced in func_proto selftests/bpf: Add reproducer for decl_tag in func_proto return type selftests/bpf: Make bpf_user_ringbuf_drain() selftest callback return 1 bpf: Allow bpf_user_ringbuf_drain() callbacks to return 1 ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents ec791d8 + bed54ae commit e28c444

File tree

8 files changed

+69
-5
lines changed

8 files changed

+69
-5
lines changed

arch/x86/net/bpf_jit_comp.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include <linux/bpf.h>
1212
#include <linux/memory.h>
1313
#include <linux/sort.h>
14+
#include <linux/init.h>
1415
#include <asm/extable.h>
1516
#include <asm/set_memory.h>
1617
#include <asm/nospec-branch.h>
@@ -388,6 +389,18 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
388389
return ret;
389390
}
390391

392+
int __init bpf_arch_init_dispatcher_early(void *ip)
393+
{
394+
const u8 *nop_insn = x86_nops[5];
395+
396+
if (is_endbr(*(u32 *)ip))
397+
ip += ENDBR_INSN_SIZE;
398+
399+
if (memcmp(ip, nop_insn, X86_PATCH_SIZE))
400+
text_poke_early(ip, nop_insn, X86_PATCH_SIZE);
401+
return 0;
402+
}
403+
391404
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
392405
void *old_addr, void *new_addr)
393406
{

include/linux/bpf.h

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#include <linux/bpfptr.h>
2828
#include <linux/btf.h>
2929
#include <linux/rcupdate_trace.h>
30+
#include <linux/init.h>
3031

3132
struct bpf_verifier_env;
3233
struct bpf_verifier_log;
@@ -970,6 +971,8 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key,
970971
struct bpf_attach_target_info *tgt_info);
971972
void bpf_trampoline_put(struct bpf_trampoline *tr);
972973
int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
974+
int __init bpf_arch_init_dispatcher_early(void *ip);
975+
973976
#define BPF_DISPATCHER_INIT(_name) { \
974977
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
975978
.func = &_name##_func, \
@@ -983,6 +986,13 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
983986
}, \
984987
}
985988

989+
#define BPF_DISPATCHER_INIT_CALL(_name) \
990+
static int __init _name##_init(void) \
991+
{ \
992+
return bpf_arch_init_dispatcher_early(_name##_func); \
993+
} \
994+
early_initcall(_name##_init)
995+
986996
#ifdef CONFIG_X86_64
987997
#define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5)))
988998
#else
@@ -1000,7 +1010,9 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
10001010
} \
10011011
EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
10021012
struct bpf_dispatcher bpf_dispatcher_##name = \
1003-
BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
1013+
BPF_DISPATCHER_INIT(bpf_dispatcher_##name); \
1014+
BPF_DISPATCHER_INIT_CALL(bpf_dispatcher_##name);
1015+
10041016
#define DECLARE_BPF_DISPATCHER(name) \
10051017
unsigned int bpf_dispatcher_##name##_func( \
10061018
const void *ctx, \

kernel/bpf/btf.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4436,6 +4436,11 @@ static int btf_func_proto_check(struct btf_verifier_env *env,
44364436
return -EINVAL;
44374437
}
44384438

4439+
if (btf_type_is_resolve_source_only(ret_type)) {
4440+
btf_verifier_log_type(env, t, "Invalid return type");
4441+
return -EINVAL;
4442+
}
4443+
44394444
if (btf_type_needs_resolve(ret_type) &&
44404445
!env_type_is_resolved(env, ret_type_id)) {
44414446
err = btf_resolve(env, ret_type, ret_type_id);

kernel/bpf/dispatcher.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#include <linux/hash.h>
55
#include <linux/bpf.h>
66
#include <linux/filter.h>
7+
#include <linux/init.h>
78

89
/* The BPF dispatcher is a multiway branch code generator. The
910
* dispatcher is a mechanism to avoid the performance penalty of an
@@ -90,6 +91,11 @@ int __weak arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int n
9091
return -ENOTSUPP;
9192
}
9293

94+
int __weak __init bpf_arch_init_dispatcher_early(void *ip)
95+
{
96+
return -ENOTSUPP;
97+
}
98+
9399
static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *buf)
94100
{
95101
s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0];

kernel/bpf/memalloc.c

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -418,14 +418,17 @@ static void drain_mem_cache(struct bpf_mem_cache *c)
418418
/* No progs are using this bpf_mem_cache, but htab_map_free() called
419419
* bpf_mem_cache_free() for all remaining elements and they can be in
420420
* free_by_rcu or in waiting_for_gp lists, so drain those lists now.
421+
*
422+
* Except for waiting_for_gp list, there are no concurrent operations
423+
* on these lists, so it is safe to use __llist_del_all().
421424
*/
422425
llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu))
423426
free_one(c, llnode);
424427
llist_for_each_safe(llnode, t, llist_del_all(&c->waiting_for_gp))
425428
free_one(c, llnode);
426-
llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist))
429+
llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist))
427430
free_one(c, llnode);
428-
llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
431+
llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist_extra))
429432
free_one(c, llnode);
430433
}
431434

@@ -493,6 +496,16 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
493496
rcu_in_progress = 0;
494497
for_each_possible_cpu(cpu) {
495498
c = per_cpu_ptr(ma->cache, cpu);
499+
/*
500+
* refill_work may be unfinished for PREEMPT_RT kernel
501+
* in which irq work is invoked in a per-CPU RT thread.
502+
* It is also possible for kernel with
503+
* arch_irq_work_has_interrupt() being false and irq
504+
* work is invoked in timer interrupt. So waiting for
505+
* the completion of irq work to ease the handling of
506+
* concurrency.
507+
*/
508+
irq_work_sync(&c->refill_work);
496509
drain_mem_cache(c);
497510
rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
498511
}
@@ -507,6 +520,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
507520
cc = per_cpu_ptr(ma->caches, cpu);
508521
for (i = 0; i < NUM_CACHES; i++) {
509522
c = &cc->cache[i];
523+
irq_work_sync(&c->refill_work);
510524
drain_mem_cache(c);
511525
rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
512526
}

kernel/bpf/verifier.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6946,6 +6946,7 @@ static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
69466946
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
69476947

69486948
callee->in_callback_fn = true;
6949+
callee->callback_ret_range = tnum_range(0, 1);
69496950
return 0;
69506951
}
69516952

tools/testing/selftests/bpf/prog_tests/btf.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3935,6 +3935,19 @@ static struct btf_raw_test raw_tests[] = {
39353935
.btf_load_err = true,
39363936
.err_str = "Invalid type_id",
39373937
},
3938+
{
3939+
.descr = "decl_tag test #16, func proto, return type",
3940+
.raw_types = {
3941+
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3942+
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
3943+
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), 2), (-1), /* [3] */
3944+
BTF_FUNC_PROTO_ENC(3, 0), /* [4] */
3945+
BTF_END_RAW,
3946+
},
3947+
BTF_STR_SEC("\0local\0tag1"),
3948+
.btf_load_err = true,
3949+
.err_str = "Invalid return type",
3950+
},
39383951
{
39393952
.descr = "type_tag test #1",
39403953
.raw_types = {

tools/testing/selftests/bpf/progs/user_ringbuf_success.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,14 +47,14 @@ record_sample(struct bpf_dynptr *dynptr, void *context)
4747
if (status) {
4848
bpf_printk("bpf_dynptr_read() failed: %d\n", status);
4949
err = 1;
50-
return 0;
50+
return 1;
5151
}
5252
} else {
5353
sample = bpf_dynptr_data(dynptr, 0, sizeof(*sample));
5454
if (!sample) {
5555
bpf_printk("Unexpectedly failed to get sample\n");
5656
err = 2;
57-
return 0;
57+
return 1;
5858
}
5959
stack_sample = *sample;
6060
}

0 commit comments

Comments
 (0)