Skip to content

Commit ee024dd

Browse files
Paolo Abeniintel-lab-lkp
authored andcommitted
mptcp: consolidate subflow cleanup
Consolidate all the cleanup actions requiring the worked in a single helper and ensure the dummy data fin creation for fallback socket is performed only when the tcp rx queue is empty. There are no functional changes intended, but this will simplify the next patch, when the tcp rx queue spooling could be delayed at release_cb time. Signed-off-by: Paolo Abeni <[email protected]>
1 parent e0d5da1 commit ee024dd

File tree

1 file changed

+18
-15
lines changed

1 file changed

+18
-15
lines changed

net/mptcp/subflow.c

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1271,7 +1271,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
12711271
subflow->map_valid = 0;
12721272
}
12731273

1274-
/* sched mptcp worker to remove the subflow if no more data is pending */
1274+
static bool subflow_is_done(const struct sock *sk)
1275+
{
1276+
return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1277+
}
1278+
1279+
/* sched mptcp worker for subflow cleanup if no more data is pending */
12751280
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
12761281
{
12771282
struct sock *sk = (struct sock *)msk;
@@ -1281,8 +1286,18 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
12811286
inet_sk_state_load(sk) != TCP_ESTABLISHED)))
12821287
return;
12831288

1284-
if (skb_queue_empty(&ssk->sk_receive_queue) &&
1285-
!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
1289+
if (!skb_queue_empty(&ssk->sk_receive_queue))
1290+
return;
1291+
1292+
if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
1293+
mptcp_schedule_work(sk);
1294+
1295+
/* when the fallback subflow closes the rx side, trigger a 'dummy'
1296+
* ingress data fin, so that the msk state will follow along
1297+
*/
1298+
if (__mptcp_check_fallback(msk) && subflow_is_done(ssk) &&
1299+
msk->first == ssk &&
1300+
mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
12861301
mptcp_schedule_work(sk);
12871302
}
12881303

@@ -1842,11 +1857,6 @@ static void __subflow_state_change(struct sock *sk)
18421857
rcu_read_unlock();
18431858
}
18441859

1845-
static bool subflow_is_done(const struct sock *sk)
1846-
{
1847-
return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1848-
}
1849-
18501860
static void subflow_state_change(struct sock *sk)
18511861
{
18521862
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
@@ -1873,13 +1883,6 @@ static void subflow_state_change(struct sock *sk)
18731883
subflow_error_report(sk);
18741884

18751885
subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1876-
1877-
/* when the fallback subflow closes the rx side, trigger a 'dummy'
1878-
* ingress data fin, so that the msk state will follow along
1879-
*/
1880-
if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk &&
1881-
mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
1882-
mptcp_schedule_work(parent);
18831886
}
18841887

18851888
void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)

0 commit comments

Comments
 (0)