1818
1919#ifdef CONFIG_BPF_JIT
2020static struct bpf_struct_ops bpf_mptcp_sched_ops ;
21- static const struct btf_type * mptcp_sock_type , * mptcp_subflow_type __read_mostly ;
22- static u32 mptcp_sock_id , mptcp_subflow_id ;
21+ static u32 mptcp_sock_id ,
22+ mptcp_subflow_id ;
23+
24+ /* MPTCP BPF packet scheduler */
2325
2426static const struct bpf_func_proto *
2527bpf_mptcp_sched_get_func_proto (enum bpf_func_id func_id ,
@@ -43,12 +45,10 @@ static int bpf_mptcp_sched_btf_struct_access(struct bpf_verifier_log *log,
4345 const struct bpf_reg_state * reg ,
4446 int off , int size )
4547{
46- const struct btf_type * t ;
48+ u32 id = reg -> btf_id ;
4749 size_t end ;
4850
49- t = btf_type_by_id (reg -> btf , reg -> btf_id );
50-
51- if (t == mptcp_sock_type ) {
51+ if (id == mptcp_sock_id ) {
5252 switch (off ) {
5353 case offsetof(struct mptcp_sock , snd_burst ):
5454 end = offsetofend (struct mptcp_sock , snd_burst );
@@ -58,7 +58,7 @@ static int bpf_mptcp_sched_btf_struct_access(struct bpf_verifier_log *log,
5858 off );
5959 return - EACCES ;
6060 }
61- } else if (t == mptcp_subflow_type ) {
61+ } else if (id == mptcp_subflow_id ) {
6262 switch (off ) {
6363 case offsetof(struct mptcp_subflow_context , avg_pacing_rate ):
6464 end = offsetofend (struct mptcp_subflow_context , avg_pacing_rate );
@@ -75,7 +75,7 @@ static int bpf_mptcp_sched_btf_struct_access(struct bpf_verifier_log *log,
7575
7676 if (off + size > end ) {
7777 bpf_log (log , "access beyond %s at off %u size %u ended at %zu" ,
78- t == mptcp_sock_type ? "mptcp_sock" : "mptcp_subflow_context" ,
78+ id == mptcp_sock_id ? "mptcp_sock" : "mptcp_subflow_context" ,
7979 off , size , end );
8080 return - EACCES ;
8181 }
@@ -113,7 +113,6 @@ static int bpf_mptcp_sched_init_member(const struct btf_type *t,
113113 const struct mptcp_sched_ops * usched ;
114114 struct mptcp_sched_ops * sched ;
115115 u32 moff ;
116- int ret ;
117116
118117 usched = (const struct mptcp_sched_ops * )udata ;
119118 sched = (struct mptcp_sched_ops * )kdata ;
@@ -124,12 +123,7 @@ static int bpf_mptcp_sched_init_member(const struct btf_type *t,
124123 if (bpf_obj_name_cpy (sched -> name , usched -> name ,
125124 sizeof (sched -> name )) <= 0 )
126125 return - EINVAL ;
127-
128- rcu_read_lock ();
129- ret = mptcp_sched_find (usched -> name ) ? - EEXIST : 1 ;
130- rcu_read_unlock ();
131-
132- return ret ;
126+ return 1 ;
133127 }
134128
135129 return 0 ;
@@ -144,18 +138,21 @@ static int bpf_mptcp_sched_init(struct btf *btf)
144138 if (type_id < 0 )
145139 return - EINVAL ;
146140 mptcp_sock_id = type_id ;
147- mptcp_sock_type = btf_type_by_id (btf , mptcp_sock_id );
148141
149142 type_id = btf_find_by_name_kind (btf , "mptcp_subflow_context" ,
150143 BTF_KIND_STRUCT );
151144 if (type_id < 0 )
152145 return - EINVAL ;
153146 mptcp_subflow_id = type_id ;
154- mptcp_subflow_type = btf_type_by_id (btf , mptcp_subflow_id );
155147
156148 return 0 ;
157149}
158150
151+ static int bpf_mptcp_sched_validate (void * kdata )
152+ {
153+ return mptcp_validate_scheduler (kdata );
154+ }
155+
159156static int __bpf_mptcp_sched_get_send (struct mptcp_sock * msk )
160157{
161158 return 0 ;
@@ -188,6 +185,7 @@ static struct bpf_struct_ops bpf_mptcp_sched_ops = {
188185 .check_member = bpf_mptcp_sched_check_member ,
189186 .init_member = bpf_mptcp_sched_init_member ,
190187 .init = bpf_mptcp_sched_init ,
188+ .validate = bpf_mptcp_sched_validate ,
191189 .name = "mptcp_sched_ops" ,
192190 .cfi_stubs = & __bpf_mptcp_sched_ops ,
193191};
0 commit comments