@@ -3014,12 +3014,16 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
30143014}
30153015
30163016#ifdef CONFIG_BPF_SYSCALL
3017+ union bpf_tcp_iter_batch_item {
3018+ struct sock * sk ;
3019+ };
3020+
30173021struct bpf_tcp_iter_state {
30183022 struct tcp_iter_state state ;
30193023 unsigned int cur_sk ;
30203024 unsigned int end_sk ;
30213025 unsigned int max_sk ;
3022- struct sock * * batch ;
3026+ union bpf_tcp_iter_batch_item * batch ;
30233027};
30243028
30253029struct bpf_iter__tcp {
@@ -3045,13 +3049,13 @@ static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
30453049 unsigned int cur_sk = iter -> cur_sk ;
30463050
30473051 while (cur_sk < iter -> end_sk )
3048- sock_gen_put (iter -> batch [cur_sk ++ ]);
3052+ sock_gen_put (iter -> batch [cur_sk ++ ]. sk );
30493053}
30503054
30513055static int bpf_iter_tcp_realloc_batch (struct bpf_tcp_iter_state * iter ,
30523056 unsigned int new_batch_sz , gfp_t flags )
30533057{
3054- struct sock * * new_batch ;
3058+ union bpf_tcp_iter_batch_item * new_batch ;
30553059
30563060 new_batch = kvmalloc (sizeof (* new_batch ) * new_batch_sz ,
30573061 flags | __GFP_NOWARN );
@@ -3075,15 +3079,15 @@ static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
30753079 struct sock * sk ;
30763080
30773081 sock_hold (* start_sk );
3078- iter -> batch [iter -> end_sk ++ ] = * start_sk ;
3082+ iter -> batch [iter -> end_sk ++ ]. sk = * start_sk ;
30793083
30803084 sk = sk_nulls_next (* start_sk );
30813085 * start_sk = NULL ;
30823086 sk_nulls_for_each_from (sk , node ) {
30833087 if (seq_sk_match (seq , sk )) {
30843088 if (iter -> end_sk < iter -> max_sk ) {
30853089 sock_hold (sk );
3086- iter -> batch [iter -> end_sk ++ ] = sk ;
3090+ iter -> batch [iter -> end_sk ++ ]. sk = sk ;
30873091 } else if (!* start_sk ) {
30883092 /* Remember where we left off. */
30893093 * start_sk = sk ;
@@ -3104,15 +3108,15 @@ static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
31043108 struct sock * sk ;
31053109
31063110 sock_hold (* start_sk );
3107- iter -> batch [iter -> end_sk ++ ] = * start_sk ;
3111+ iter -> batch [iter -> end_sk ++ ]. sk = * start_sk ;
31083112
31093113 sk = sk_nulls_next (* start_sk );
31103114 * start_sk = NULL ;
31113115 sk_nulls_for_each_from (sk , node ) {
31123116 if (seq_sk_match (seq , sk )) {
31133117 if (iter -> end_sk < iter -> max_sk ) {
31143118 sock_hold (sk );
3115- iter -> batch [iter -> end_sk ++ ] = sk ;
3119+ iter -> batch [iter -> end_sk ++ ]. sk = sk ;
31163120 } else if (!* start_sk ) {
31173121 /* Remember where we left off. */
31183122 * start_sk = sk ;
@@ -3216,7 +3220,7 @@ static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
32163220 WARN_ON_ONCE (iter -> end_sk != expected );
32173221done :
32183222 bpf_iter_tcp_unlock_bucket (seq );
3219- return iter -> batch [0 ];
3223+ return iter -> batch [0 ]. sk ;
32203224}
32213225
32223226static void * bpf_iter_tcp_seq_start (struct seq_file * seq , loff_t * pos )
@@ -3251,11 +3255,11 @@ static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
32513255 * st->bucket. See tcp_seek_last_pos().
32523256 */
32533257 st -> offset ++ ;
3254- sock_gen_put (iter -> batch [iter -> cur_sk ++ ]);
3258+ sock_gen_put (iter -> batch [iter -> cur_sk ++ ]. sk );
32553259 }
32563260
32573261 if (iter -> cur_sk < iter -> end_sk )
3258- sk = iter -> batch [iter -> cur_sk ];
3262+ sk = iter -> batch [iter -> cur_sk ]. sk ;
32593263 else
32603264 sk = bpf_iter_tcp_batch (seq );
32613265
0 commit comments