Skip to content

Commit def5b7b

Browse files
Paolo Abenikuba-moo
authored andcommitted
mptcp: plug races between subflow fail and subflow creation
We have races similar to the one addressed by the previous patch between subflow failing and additional subflow creation. They are just harder to trigger. The solution is similar. Use a separate flag to track the condition 'socket state prevent any additional subflow creation' protected by the fallback lock. The socket fallback makes such flag true, and also receiving or sending an MP_FAIL option. The field 'allow_infinite_fallback' is now always touched under the relevant lock, we can drop the ONCE annotation on write. Fixes: 478d770 ("mptcp: send out MP_FAIL when data checksum fails") Cc: [email protected] Signed-off-by: Paolo Abeni <[email protected]> Reviewed-by: Matthieu Baerts (NGI0) <[email protected]> Signed-off-by: Matthieu Baerts (NGI0) <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent f8a1d9b commit def5b7b

File tree

4 files changed

+32
-13
lines changed

4 files changed

+32
-13
lines changed

net/mptcp/pm.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -765,8 +765,14 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
765765

766766
pr_debug("fail_seq=%llu\n", fail_seq);
767767

768-
if (!READ_ONCE(msk->allow_infinite_fallback))
768+
/* After accepting the fail, we can't create any other subflows */
769+
spin_lock_bh(&msk->fallback_lock);
770+
if (!msk->allow_infinite_fallback) {
771+
spin_unlock_bh(&msk->fallback_lock);
769772
return;
773+
}
774+
msk->allow_subflows = false;
775+
spin_unlock_bh(&msk->fallback_lock);
770776

771777
if (!subflow->fail_tout) {
772778
pr_debug("send MP_FAIL response and infinite map\n");

net/mptcp/protocol.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -791,7 +791,7 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
791791
static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk)
792792
{
793793
mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq);
794-
WRITE_ONCE(msk->allow_infinite_fallback, false);
794+
msk->allow_infinite_fallback = false;
795795
mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
796796
}
797797

@@ -803,7 +803,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
803803
return false;
804804

805805
spin_lock_bh(&msk->fallback_lock);
806-
if (__mptcp_check_fallback(msk)) {
806+
if (!msk->allow_subflows) {
807807
spin_unlock_bh(&msk->fallback_lock);
808808
return false;
809809
}
@@ -2625,7 +2625,7 @@ static void __mptcp_retrans(struct sock *sk)
26252625
len = max(copied, len);
26262626
tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
26272627
info.size_goal);
2628-
WRITE_ONCE(msk->allow_infinite_fallback, false);
2628+
msk->allow_infinite_fallback = false;
26292629
}
26302630
spin_unlock_bh(&msk->fallback_lock);
26312631

@@ -2753,7 +2753,8 @@ static void __mptcp_init_sock(struct sock *sk)
27532753
WRITE_ONCE(msk->first, NULL);
27542754
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
27552755
WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
2756-
WRITE_ONCE(msk->allow_infinite_fallback, true);
2756+
msk->allow_infinite_fallback = true;
2757+
msk->allow_subflows = true;
27572758
msk->recovery = false;
27582759
msk->subflow_id = 1;
27592760
msk->last_data_sent = tcp_jiffies32;
@@ -3549,7 +3550,7 @@ bool mptcp_finish_join(struct sock *ssk)
35493550
/* active subflow, already present inside the conn_list */
35503551
if (!list_empty(&subflow->node)) {
35513552
spin_lock_bh(&msk->fallback_lock);
3552-
if (__mptcp_check_fallback(msk)) {
3553+
if (!msk->allow_subflows) {
35533554
spin_unlock_bh(&msk->fallback_lock);
35543555
return false;
35553556
}

net/mptcp/protocol.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -346,13 +346,15 @@ struct mptcp_sock {
346346
u64 rtt_us; /* last maximum rtt of subflows */
347347
} rcvq_space;
348348
u8 scaling_ratio;
349+
bool allow_subflows;
349350

350351
u32 subflow_id;
351352
u32 setsockopt_seq;
352353
char ca_name[TCP_CA_NAME_MAX];
353354

354-
spinlock_t fallback_lock; /* protects fallback and
355-
* allow_infinite_fallback
355+
spinlock_t fallback_lock; /* protects fallback,
356+
* allow_infinite_fallback and
357+
* allow_join
356358
*/
357359
};
358360

@@ -1232,6 +1234,7 @@ static inline bool __mptcp_try_fallback(struct mptcp_sock *msk)
12321234
return false;
12331235
}
12341236

1237+
msk->allow_subflows = false;
12351238
set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
12361239
spin_unlock_bh(&msk->fallback_lock);
12371240
return true;

net/mptcp/subflow.c

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1302,20 +1302,29 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
13021302
mptcp_schedule_work(sk);
13031303
}
13041304

1305-
static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1305+
static bool mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
13061306
{
13071307
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
13081308
unsigned long fail_tout;
13091309

1310+
/* we are really failing, prevent any later subflow join */
1311+
spin_lock_bh(&msk->fallback_lock);
1312+
if (!msk->allow_infinite_fallback) {
1313+
spin_unlock_bh(&msk->fallback_lock);
1314+
return false;
1315+
}
1316+
msk->allow_subflows = false;
1317+
spin_unlock_bh(&msk->fallback_lock);
1318+
13101319
/* graceful failure can happen only on the MPC subflow */
13111320
if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1312-
return;
1321+
return false;
13131322

13141323
/* since the close timeout take precedence on the fail one,
13151324
* no need to start the latter when the first is already set
13161325
*/
13171326
if (sock_flag((struct sock *)msk, SOCK_DEAD))
1318-
return;
1327+
return true;
13191328

13201329
/* we don't need extreme accuracy here, use a zero fail_tout as special
13211330
* value meaning no fail timeout at all;
@@ -1327,6 +1336,7 @@ static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
13271336
tcp_send_ack(ssk);
13281337

13291338
mptcp_reset_tout_timer(msk, subflow->fail_tout);
1339+
return true;
13301340
}
13311341

13321342
static bool subflow_check_data_avail(struct sock *ssk)
@@ -1387,12 +1397,11 @@ static bool subflow_check_data_avail(struct sock *ssk)
13871397
(subflow->mp_join || subflow->valid_csum_seen)) {
13881398
subflow->send_mp_fail = 1;
13891399

1390-
if (!READ_ONCE(msk->allow_infinite_fallback)) {
1400+
if (!mptcp_subflow_fail(msk, ssk)) {
13911401
subflow->reset_transient = 0;
13921402
subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
13931403
goto reset;
13941404
}
1395-
mptcp_subflow_fail(msk, ssk);
13961405
WRITE_ONCE(subflow->data_avail, true);
13971406
return true;
13981407
}

0 commit comments

Comments
 (0)