@@ -11987,6 +11987,16 @@ static bool is_kfunc_arg_res_spin_lock(const struct btf *btf, const struct btf_p
11987
11987
return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RES_SPIN_LOCK_ID);
11988
11988
}
11989
11989
11990
+ static bool is_rbtree_node_type(const struct btf_type *t)
11991
+ {
11992
+ return t == btf_type_by_id(btf_vmlinux, kf_arg_btf_ids[KF_ARG_RB_NODE_ID]);
11993
+ }
11994
+
11995
+ static bool is_list_node_type(const struct btf_type *t)
11996
+ {
11997
+ return t == btf_type_by_id(btf_vmlinux, kf_arg_btf_ids[KF_ARG_LIST_NODE_ID]);
11998
+ }
11999
+
11990
12000
static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf,
11991
12001
const struct btf_param *arg)
11992
12002
{
@@ -12069,13 +12079,18 @@ enum special_kfunc_type {
12069
12079
KF_bpf_list_push_back_impl,
12070
12080
KF_bpf_list_pop_front,
12071
12081
KF_bpf_list_pop_back,
12082
+ KF_bpf_list_front,
12083
+ KF_bpf_list_back,
12072
12084
KF_bpf_cast_to_kern_ctx,
12073
12085
KF_bpf_rdonly_cast,
12074
12086
KF_bpf_rcu_read_lock,
12075
12087
KF_bpf_rcu_read_unlock,
12076
12088
KF_bpf_rbtree_remove,
12077
12089
KF_bpf_rbtree_add_impl,
12078
12090
KF_bpf_rbtree_first,
12091
+ KF_bpf_rbtree_root,
12092
+ KF_bpf_rbtree_left,
12093
+ KF_bpf_rbtree_right,
12079
12094
KF_bpf_dynptr_from_skb,
12080
12095
KF_bpf_dynptr_from_xdp,
12081
12096
KF_bpf_dynptr_slice,
@@ -12111,11 +12126,16 @@ BTF_ID(func, bpf_list_push_front_impl)
12111
12126
BTF_ID(func, bpf_list_push_back_impl)
12112
12127
BTF_ID(func, bpf_list_pop_front)
12113
12128
BTF_ID(func, bpf_list_pop_back)
12129
+ BTF_ID(func, bpf_list_front)
12130
+ BTF_ID(func, bpf_list_back)
12114
12131
BTF_ID(func, bpf_cast_to_kern_ctx)
12115
12132
BTF_ID(func, bpf_rdonly_cast)
12116
12133
BTF_ID(func, bpf_rbtree_remove)
12117
12134
BTF_ID(func, bpf_rbtree_add_impl)
12118
12135
BTF_ID(func, bpf_rbtree_first)
12136
+ BTF_ID(func, bpf_rbtree_root)
12137
+ BTF_ID(func, bpf_rbtree_left)
12138
+ BTF_ID(func, bpf_rbtree_right)
12119
12139
#ifdef CONFIG_NET
12120
12140
BTF_ID(func, bpf_dynptr_from_skb)
12121
12141
BTF_ID(func, bpf_dynptr_from_xdp)
@@ -12144,13 +12164,18 @@ BTF_ID(func, bpf_list_push_front_impl)
12144
12164
BTF_ID(func, bpf_list_push_back_impl)
12145
12165
BTF_ID(func, bpf_list_pop_front)
12146
12166
BTF_ID(func, bpf_list_pop_back)
12167
+ BTF_ID(func, bpf_list_front)
12168
+ BTF_ID(func, bpf_list_back)
12147
12169
BTF_ID(func, bpf_cast_to_kern_ctx)
12148
12170
BTF_ID(func, bpf_rdonly_cast)
12149
12171
BTF_ID(func, bpf_rcu_read_lock)
12150
12172
BTF_ID(func, bpf_rcu_read_unlock)
12151
12173
BTF_ID(func, bpf_rbtree_remove)
12152
12174
BTF_ID(func, bpf_rbtree_add_impl)
12153
12175
BTF_ID(func, bpf_rbtree_first)
12176
+ BTF_ID(func, bpf_rbtree_root)
12177
+ BTF_ID(func, bpf_rbtree_left)
12178
+ BTF_ID(func, bpf_rbtree_right)
12154
12179
#ifdef CONFIG_NET
12155
12180
BTF_ID(func, bpf_dynptr_from_skb)
12156
12181
BTF_ID(func, bpf_dynptr_from_xdp)
@@ -12579,14 +12604,19 @@ static bool is_bpf_list_api_kfunc(u32 btf_id)
12579
12604
return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
12580
12605
btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
12581
12606
btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
12582
- btf_id == special_kfunc_list[KF_bpf_list_pop_back];
12607
+ btf_id == special_kfunc_list[KF_bpf_list_pop_back] ||
12608
+ btf_id == special_kfunc_list[KF_bpf_list_front] ||
12609
+ btf_id == special_kfunc_list[KF_bpf_list_back];
12583
12610
}
12584
12611
12585
12612
static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
12586
12613
{
12587
12614
return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
12588
12615
btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
12589
- btf_id == special_kfunc_list[KF_bpf_rbtree_first];
12616
+ btf_id == special_kfunc_list[KF_bpf_rbtree_first] ||
12617
+ btf_id == special_kfunc_list[KF_bpf_rbtree_root] ||
12618
+ btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
12619
+ btf_id == special_kfunc_list[KF_bpf_rbtree_right];
12590
12620
}
12591
12621
12592
12622
static bool is_bpf_iter_num_api_kfunc(u32 btf_id)
@@ -12686,7 +12716,9 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
12686
12716
break;
12687
12717
case BPF_RB_NODE:
12688
12718
ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
12689
- kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]);
12719
+ kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
12720
+ kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
12721
+ kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_right]);
12690
12722
break;
12691
12723
default:
12692
12724
verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
@@ -13200,22 +13232,22 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
13200
13232
return ret;
13201
13233
break;
13202
13234
case KF_ARG_PTR_TO_RB_NODE:
13203
- if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove ]) {
13204
- if (!type_is_non_owning_ref( reg->type) || reg->ref_obj_id ) {
13205
- verbose(env, "rbtree_remove node input must be non-owning ref \n");
13235
+ if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl ]) {
13236
+ if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC) ) {
13237
+ verbose(env, "arg#%d expected pointer to allocated object \n", i );
13206
13238
return -EINVAL;
13207
13239
}
13208
- if (in_rbtree_lock_required_cb(env) ) {
13209
- verbose(env, "rbtree_remove not allowed in rbtree cb \n");
13240
+ if (!reg->ref_obj_id ) {
13241
+ verbose(env, "allocated object must be referenced \n");
13210
13242
return -EINVAL;
13211
13243
}
13212
13244
} else {
13213
- if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC) ) {
13214
- verbose(env, "arg#%d expected pointer to allocated object \n", i );
13245
+ if (!type_is_non_owning_ref( reg->type) && !reg->ref_obj_id ) {
13246
+ verbose(env, "%s can only take non-owning or refcounted bpf_rb_node pointer \n", func_name );
13215
13247
return -EINVAL;
13216
13248
}
13217
- if (!reg->ref_obj_id ) {
13218
- verbose(env, "allocated object must be referenced \n");
13249
+ if (in_rbtree_lock_required_cb(env) ) {
13250
+ verbose(env, "%s not allowed in rbtree cb \n", func_name );
13219
13251
return -EINVAL;
13220
13252
}
13221
13253
}
@@ -13745,13 +13777,11 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
13745
13777
insn_aux->kptr_struct_meta =
13746
13778
btf_find_struct_meta(meta.arg_btf,
13747
13779
meta.arg_btf_id);
13748
- } else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] ||
13749
- meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
13780
+ } else if (is_list_node_type(ptr_type)) {
13750
13781
struct btf_field *field = meta.arg_list_head.field;
13751
13782
13752
13783
mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
13753
- } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
13754
- meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
13784
+ } else if (is_rbtree_node_type(ptr_type)) {
13755
13785
struct btf_field *field = meta.arg_rbtree_root.field;
13756
13786
13757
13787
mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
@@ -13881,7 +13911,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
13881
13911
if (is_kfunc_ret_null(&meta))
13882
13912
regs[BPF_REG_0].id = id;
13883
13913
regs[BPF_REG_0].ref_obj_id = id;
13884
- } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first] ) {
13914
+ } else if (is_rbtree_node_type(ptr_type) || is_list_node_type(ptr_type) ) {
13885
13915
ref_set_non_owning(env, ®s[BPF_REG_0]);
13886
13916
}
13887
13917
0 commit comments