@@ -892,6 +892,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
892
892
mptcp_subflow_ctx (ssk )-> subflow_id = msk -> subflow_id ++ ;
893
893
mptcp_sockopt_sync_locked (msk , ssk );
894
894
mptcp_subflow_joined (msk , ssk );
895
+ mptcp_stop_tout_timer (sk );
895
896
return true;
896
897
}
897
898
@@ -2369,18 +2370,14 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2369
2370
bool dispose_it , need_push = false;
2370
2371
2371
2372
/* If the first subflow moved to a close state before accept, e.g. due
2372
- * to an incoming reset, mptcp either:
2373
- * - if either the subflow or the msk are dead, destroy the context
2374
- * (the subflow socket is deleted by inet_child_forget) and the msk
2375
- * - otherwise do nothing at the moment and take action at accept and/or
2376
- * listener shutdown - user-space must be able to accept() the closed
2377
- * socket.
2373
+ * to an incoming reset or listener shutdown, the subflow socket is
2374
+ * already deleted by inet_child_forget() and the mptcp socket can't
2375
+ * survive too.
2378
2376
*/
2379
- if (msk -> in_accept_queue && msk -> first == ssk ) {
2380
- if (!sock_flag (sk , SOCK_DEAD ) && !sock_flag (ssk , SOCK_DEAD ))
2381
- return ;
2382
-
2377
+ if (msk -> in_accept_queue && msk -> first == ssk &&
2378
+ (sock_flag (sk , SOCK_DEAD ) || sock_flag (ssk , SOCK_DEAD ))) {
2383
2379
/* ensure later check in mptcp_worker() will dispose the msk */
2380
+ mptcp_set_close_tout (sk , tcp_jiffies32 - (TCP_TIMEWAIT_LEN + 1 ));
2384
2381
sock_set_flag (sk , SOCK_DEAD );
2385
2382
lock_sock_nested (ssk , SINGLE_DEPTH_NESTING );
2386
2383
mptcp_subflow_drop_ctx (ssk );
@@ -2443,6 +2440,22 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2443
2440
out :
2444
2441
if (need_push )
2445
2442
__mptcp_push_pending (sk , 0 );
2443
+
2444
+ /* Catch every 'all subflows closed' scenario, including peers silently
2445
+ * closing them, e.g. due to timeout.
2446
+ * For established sockets, allow an additional timeout before closing,
2447
+ * as the protocol can still create more subflows.
2448
+ */
2449
+ if (list_is_singular (& msk -> conn_list ) && msk -> first &&
2450
+ inet_sk_state_load (msk -> first ) == TCP_CLOSE ) {
2451
+ if (sk -> sk_state != TCP_ESTABLISHED ||
2452
+ msk -> in_accept_queue || sock_flag (sk , SOCK_DEAD )) {
2453
+ inet_sk_state_store (sk , TCP_CLOSE );
2454
+ mptcp_close_wake_up (sk );
2455
+ } else {
2456
+ mptcp_start_tout_timer (sk );
2457
+ }
2458
+ }
2446
2459
}
2447
2460
2448
2461
void mptcp_close_ssk (struct sock * sk , struct sock * ssk ,
@@ -2486,23 +2499,14 @@ static void __mptcp_close_subflow(struct sock *sk)
2486
2499
2487
2500
}
2488
2501
2489
- static bool mptcp_should_close (const struct sock * sk )
2502
+ static bool mptcp_close_tout_expired (const struct sock * sk )
2490
2503
{
2491
- s32 delta = tcp_jiffies32 - inet_csk (sk )-> icsk_mtup .probe_timestamp ;
2492
- struct mptcp_subflow_context * subflow ;
2493
-
2494
- if (delta >= TCP_TIMEWAIT_LEN || mptcp_sk (sk )-> in_accept_queue )
2495
- return true;
2504
+ if (!inet_csk (sk )-> icsk_mtup .probe_timestamp ||
2505
+ sk -> sk_state == TCP_CLOSE )
2506
+ return false;
2496
2507
2497
- /* if all subflows are in closed status don't bother with additional
2498
- * timeout
2499
- */
2500
- mptcp_for_each_subflow (mptcp_sk (sk ), subflow ) {
2501
- if (inet_sk_state_load (mptcp_subflow_tcp_sock (subflow )) !=
2502
- TCP_CLOSE )
2503
- return false;
2504
- }
2505
- return true;
2508
+ return time_after32 (tcp_jiffies32 ,
2509
+ inet_csk (sk )-> icsk_mtup .probe_timestamp + TCP_TIMEWAIT_LEN );
2506
2510
}
2507
2511
2508
2512
static void mptcp_check_fastclose (struct mptcp_sock * msk )
@@ -2641,15 +2645,16 @@ void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
2641
2645
struct sock * sk = (struct sock * )msk ;
2642
2646
unsigned long timeout , close_timeout ;
2643
2647
2644
- if (!fail_tout && !sock_flag (sk , SOCK_DEAD ) )
2648
+ if (!fail_tout && !inet_csk (sk ) -> icsk_mtup . probe_timestamp )
2645
2649
return ;
2646
2650
2647
- close_timeout = inet_csk (sk )-> icsk_mtup .probe_timestamp - tcp_jiffies32 + jiffies + TCP_TIMEWAIT_LEN ;
2651
+ close_timeout = inet_csk (sk )-> icsk_mtup .probe_timestamp - tcp_jiffies32 + jiffies +
2652
+ TCP_TIMEWAIT_LEN ;
2648
2653
2649
2654
/* the close timeout takes precedence on the fail one, and here at least one of
2650
2655
* them is active
2651
2656
*/
2652
- timeout = sock_flag (sk , SOCK_DEAD ) ? close_timeout : fail_tout ;
2657
+ timeout = inet_csk (sk ) -> icsk_mtup . probe_timestamp ? close_timeout : fail_tout ;
2653
2658
2654
2659
sk_reset_timer (sk , & sk -> sk_timer , timeout );
2655
2660
}
@@ -2668,8 +2673,6 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
2668
2673
mptcp_subflow_reset (ssk );
2669
2674
WRITE_ONCE (mptcp_subflow_ctx (ssk )-> fail_tout , 0 );
2670
2675
unlock_sock_fast (ssk , slow );
2671
-
2672
- mptcp_reset_tout_timer (msk , 0 );
2673
2676
}
2674
2677
2675
2678
static void mptcp_do_fastclose (struct sock * sk )
@@ -2706,18 +2709,14 @@ static void mptcp_worker(struct work_struct *work)
2706
2709
if (test_and_clear_bit (MPTCP_WORK_CLOSE_SUBFLOW , & msk -> flags ))
2707
2710
__mptcp_close_subflow (sk );
2708
2711
2709
- /* There is no point in keeping around an orphaned sk timedout or
2710
- * closed, but we need the msk around to reply to incoming DATA_FIN,
2711
- * even if it is orphaned and in FIN_WAIT2 state
2712
- */
2713
- if (sock_flag (sk , SOCK_DEAD )) {
2714
- if (mptcp_should_close (sk ))
2715
- mptcp_do_fastclose (sk );
2712
+ if (mptcp_close_tout_expired (sk )) {
2713
+ mptcp_do_fastclose (sk );
2714
+ mptcp_close_wake_up (sk );
2715
+ }
2716
2716
2717
- if (sk -> sk_state == TCP_CLOSE ) {
2718
- __mptcp_destroy_sock (sk );
2719
- goto unlock ;
2720
- }
2717
+ if (sock_flag (sk , SOCK_DEAD ) && sk -> sk_state == TCP_CLOSE ) {
2718
+ __mptcp_destroy_sock (sk );
2719
+ goto unlock ;
2721
2720
}
2722
2721
2723
2722
if (test_and_clear_bit (MPTCP_WORK_RTX , & msk -> flags ))
@@ -3016,7 +3015,6 @@ bool __mptcp_close(struct sock *sk, long timeout)
3016
3015
3017
3016
cleanup :
3018
3017
/* orphan all the subflows */
3019
- inet_csk (sk )-> icsk_mtup .probe_timestamp = tcp_jiffies32 ;
3020
3018
mptcp_for_each_subflow (msk , subflow ) {
3021
3019
struct sock * ssk = mptcp_subflow_tcp_sock (subflow );
3022
3020
bool slow = lock_sock_fast_nested (ssk );
@@ -3053,7 +3051,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
3053
3051
__mptcp_destroy_sock (sk );
3054
3052
do_cancel_work = true;
3055
3053
} else {
3056
- mptcp_reset_tout_timer ( msk , 0 );
3054
+ mptcp_start_tout_timer ( sk );
3057
3055
}
3058
3056
3059
3057
return do_cancel_work ;
@@ -3117,7 +3115,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
3117
3115
inet_sk_state_store (sk , TCP_CLOSE );
3118
3116
3119
3117
mptcp_stop_rtx_timer (sk );
3120
- sk_stop_timer (sk , & sk -> sk_timer );
3118
+ mptcp_stop_tout_timer (sk );
3121
3119
3122
3120
if (msk -> token )
3123
3121
mptcp_event (MPTCP_EVENT_CLOSED , msk , NULL , GFP_KERNEL );
0 commit comments