Skip to content

Commit 44537b8

Browse files
Geliang Tangintel-lab-lkp
authored andcommitted
Squash to "selftests/bpf: Add bpf_burst scheduler & test"
Use the newly added bpf_for_each() helper to walk the conn_list. Use bpf_mptcp_send_info_to_ssk() helper. Drop bpf_subflow_send_info, use subflow_send_info instead. Signed-off-by: Geliang Tang <[email protected]>
1 parent a0515d3 commit 44537b8

File tree

1 file changed

+24
-38
lines changed

1 file changed

+24
-38
lines changed

tools/testing/selftests/bpf/progs/mptcp_bpf_burst.c

Lines changed: 24 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,6 @@ char _license[] SEC("license") = "GPL";
1111

1212
#define min(a, b) ((a) < (b) ? (a) : (b))
1313

14-
struct bpf_subflow_send_info {
15-
__u8 subflow_id;
16-
__u64 linger_time;
17-
};
18-
1914
extern bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) __ksym;
2015
extern void mptcp_set_timeout(struct sock *sk) __ksym;
2116
extern __u64 mptcp_wnd_end(const struct mptcp_sock *msk) __ksym;
@@ -70,7 +65,7 @@ void BPF_PROG(mptcp_sched_burst_release, struct mptcp_sock *msk)
7065
static int bpf_burst_get_send(struct mptcp_sock *msk,
7166
struct mptcp_sched_data *data)
7267
{
73-
struct bpf_subflow_send_info send_info[SSK_MODE_MAX];
68+
struct subflow_send_info send_info[SSK_MODE_MAX];
7469
struct mptcp_subflow_context *subflow;
7570
struct sock *sk = (struct sock *)msk;
7671
__u32 pace, burst, wmem;
@@ -80,20 +75,14 @@ static int bpf_burst_get_send(struct mptcp_sock *msk,
8075

8176
/* pick the subflow with the lower wmem/wspace ratio */
8277
for (i = 0; i < SSK_MODE_MAX; ++i) {
83-
send_info[i].subflow_id = MPTCP_SUBFLOWS_MAX;
78+
send_info[i].ssk = NULL;
8479
send_info[i].linger_time = -1;
8580
}
8681

87-
for (i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
88-
bool backup;
89-
90-
subflow = bpf_mptcp_subflow_ctx_by_pos(data, i);
91-
if (!subflow)
92-
break;
82+
bpf_for_each(mptcp_subflow, subflow, msk) {
83+
bool backup = subflow->backup || subflow->request_bkup;
9384

94-
backup = subflow->backup || subflow->request_bkup;
95-
96-
ssk = mptcp_subflow_tcp_sock(subflow);
85+
ssk = bpf_mptcp_subflow_tcp_sock(subflow);
9786
if (!mptcp_subflow_active(subflow))
9887
continue;
9988

@@ -109,23 +98,24 @@ static int bpf_burst_get_send(struct mptcp_sock *msk,
10998

11099
linger_time = div_u64((__u64)ssk->sk_wmem_queued << 32, pace);
111100
if (linger_time < send_info[backup].linger_time) {
112-
send_info[backup].subflow_id = i;
101+
send_info[backup].ssk = ssk;
113102
send_info[backup].linger_time = linger_time;
114103
}
115104
}
116105
mptcp_set_timeout(sk);
117106

118107
/* pick the best backup if no other subflow is active */
119108
if (!nr_active)
120-
send_info[SSK_MODE_ACTIVE].subflow_id = send_info[SSK_MODE_BACKUP].subflow_id;
109+
send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk;
121110

122-
subflow = bpf_mptcp_subflow_ctx_by_pos(data, send_info[SSK_MODE_ACTIVE].subflow_id);
123-
if (!subflow)
124-
return -1;
125-
ssk = mptcp_subflow_tcp_sock(subflow);
111+
ssk = bpf_mptcp_send_info_to_ssk(&send_info[SSK_MODE_ACTIVE]);
126112
if (!ssk || !sk_stream_memory_free(ssk))
127113
return -1;
128114

115+
subflow = bpf_mptcp_subflow_ctx(ssk);
116+
if (!subflow)
117+
return -1;
118+
129119
burst = min(MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt);
130120
wmem = ssk->sk_wmem_queued;
131121
if (!burst)
@@ -144,20 +134,16 @@ static int bpf_burst_get_send(struct mptcp_sock *msk,
144134
static int bpf_burst_get_retrans(struct mptcp_sock *msk,
145135
struct mptcp_sched_data *data)
146136
{
147-
int backup = MPTCP_SUBFLOWS_MAX, pick = MPTCP_SUBFLOWS_MAX, subflow_id;
137+
struct sock *backup = NULL, *pick = NULL;
148138
struct mptcp_subflow_context *subflow;
149139
int min_stale_count = INT_MAX;
150-
struct sock *ssk;
151140

152-
for (int i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
153-
subflow = bpf_mptcp_subflow_ctx_by_pos(data, i);
154-
if (!subflow)
155-
break;
141+
bpf_for_each(mptcp_subflow, subflow, msk) {
142+
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
156143

157144
if (!mptcp_subflow_active(subflow))
158145
continue;
159146

160-
ssk = mptcp_subflow_tcp_sock(subflow);
161147
/* still data outstanding at TCP level? skip this */
162148
if (!tcp_rtx_and_write_queues_empty(ssk)) {
163149
mptcp_pm_subflow_chk_stale(msk, ssk);
@@ -166,23 +152,23 @@ static int bpf_burst_get_retrans(struct mptcp_sock *msk,
166152
}
167153

168154
if (subflow->backup || subflow->request_bkup) {
169-
if (backup == MPTCP_SUBFLOWS_MAX)
170-
backup = i;
155+
if (!backup)
156+
backup = ssk;
171157
continue;
172158
}
173159

174-
if (pick == MPTCP_SUBFLOWS_MAX)
175-
pick = i;
160+
if (!pick)
161+
pick = ssk;
176162
}
177163

178-
if (pick < MPTCP_SUBFLOWS_MAX) {
179-
subflow_id = pick;
164+
if (pick)
180165
goto out;
181-
}
182-
subflow_id = min_stale_count > 1 ? backup : MPTCP_SUBFLOWS_MAX;
166+
pick = min_stale_count > 1 ? backup : NULL;
183167

184168
out:
185-
subflow = bpf_mptcp_subflow_ctx_by_pos(data, subflow_id);
169+
if (!pick)
170+
return -1;
171+
subflow = bpf_mptcp_subflow_ctx(pick);
186172
if (!subflow)
187173
return -1;
188174
mptcp_subflow_set_scheduled(subflow, true);

0 commit comments

Comments
 (0)