@@ -28,22 +28,23 @@ LOG_MODULE_REGISTER(net_tcp, CONFIG_NET_TCP_LOG_LEVEL);
2828
2929#define ACK_TIMEOUT_MS CONFIG_NET_TCP_ACK_TIMEOUT
3030#define ACK_TIMEOUT K_MSEC(ACK_TIMEOUT_MS)
31- /* Allow for (tcp_retries + 1) transmissions
32- * 1 + 2... + 2^n < 2^(n+1)
33- * The last retransmission cycle takes only 1 tcp_rto
34- */
35- #define FIN_TIMEOUT_MS (tcp_rto * (1 + (1 << (tcp_retries + 1))))
36- #define FIN_TIMEOUT K_MSEC(FIN_TIMEOUT_MS)
31+ #define FIN_TIMEOUT K_MSEC(tcp_fin_timeout_ms)
3732#define ACK_DELAY K_MSEC(100)
3833
3934static int tcp_rto = CONFIG_NET_TCP_INIT_RETRANSMISSION_TIMEOUT ;
4035static int tcp_retries = CONFIG_NET_TCP_RETRY_COUNT ;
36+ static int tcp_fin_timeout_ms ;
4137static int tcp_window =
4238#if (CONFIG_NET_TCP_MAX_RECV_WINDOW_SIZE != 0 )
4339 CONFIG_NET_TCP_MAX_RECV_WINDOW_SIZE ;
4440#else
4541 (CONFIG_NET_BUF_RX_COUNT * CONFIG_NET_BUF_DATA_SIZE ) / 3 ;
4642#endif
43+ #ifdef CONFIG_NET_TCP_RANDOMIZED_RTO
44+ #define TCP_RTO_MS (conn->rto)
45+ #else
46+ #define TCP_RTO_MS (tcp_rto)
47+ #endif
4748
4849static sys_slist_t tcp_conns = SYS_SLIST_STATIC_INIT (& tcp_conns );
4950
@@ -352,6 +353,28 @@ static void tcp_send(struct net_pkt *pkt)
352353 tcp_pkt_unref (pkt );
353354}
354355
356+ static void tcp_derive_rto (struct tcp * conn )
357+ {
358+ #ifdef CONFIG_NET_TCP_RANDOMIZED_RTO
359+ /* Compute a randomized rto 1 and 1.5 times tcp_rto */
360+ uint32_t gain ;
361+ uint8_t gain8 ;
362+ uint32_t rto ;
363+
364+ /* Getting random is computational expensive, so only use 8 bits */
365+ sys_rand_get (& gain8 , sizeof (uint8_t ));
366+
367+ gain = (uint32_t )gain8 ;
368+ gain += 1 << 9 ;
369+
370+ rto = (uint32_t )tcp_rto ;
371+ rto = (gain * rto ) >> 9 ;
372+ conn -> rto = (uint16_t )rto ;
373+ #else
374+ ARG_UNUSED (conn );
375+ #endif
376+ }
377+
355378static void tcp_send_queue_flush (struct tcp * conn )
356379{
357380 struct net_pkt * pkt ;
@@ -517,7 +540,7 @@ static bool tcp_send_process_no_lock(struct tcp *conn)
517540
518541 if (conn -> in_retransmission ) {
519542 k_work_reschedule_for_queue (& tcp_work_q , & conn -> send_timer ,
520- K_MSEC (tcp_rto ));
543+ K_MSEC (TCP_RTO_MS ));
521544 } else if (local && !sys_slist_is_empty (& conn -> send_queue )) {
522545 k_work_reschedule_for_queue (& tcp_work_q , & conn -> send_timer ,
523546 K_NO_WAIT );
@@ -566,7 +589,7 @@ static void tcp_send_timer_cancel(struct tcp *conn)
566589 } else {
567590 conn -> send_retries = tcp_retries ;
568591 k_work_reschedule_for_queue (& tcp_work_q , & conn -> send_timer ,
569- K_MSEC (tcp_rto ));
592+ K_MSEC (TCP_RTO_MS ));
570593 }
571594}
572595
@@ -1182,7 +1205,7 @@ static int tcp_send_queued_data(struct tcp *conn)
11821205 if (subscribe ) {
11831206 conn -> send_data_retries = 0 ;
11841207 k_work_reschedule_for_queue (& tcp_work_q , & conn -> send_data_timer ,
1185- K_MSEC (tcp_rto ));
1208+ K_MSEC (TCP_RTO_MS ));
11861209 }
11871210 out :
11881211 return ret ;
@@ -1232,7 +1255,7 @@ static void tcp_resend_data(struct k_work *work)
12321255 if (conn -> in_close && conn -> send_data_total == 0 ) {
12331256 NET_DBG ("TCP connection in active close, "
12341257 "not disposing yet (waiting %dms)" ,
1235- FIN_TIMEOUT_MS );
1258+ tcp_fin_timeout_ms );
12361259 k_work_reschedule_for_queue (& tcp_work_q ,
12371260 & conn -> fin_timer ,
12381261 FIN_TIMEOUT );
@@ -1255,11 +1278,13 @@ static void tcp_resend_data(struct k_work *work)
12551278 NET_ERR ("TCP failed to allocate buffer in retransmission" );
12561279 }
12571280
1258- /* Every retransmit, the retransmission timeout increases by a factor 2 */
1259- exp_tcp_rto = tcp_rto << conn -> send_data_retries ;
1281+ exp_tcp_rto = TCP_RTO_MS ;
12601282 /* The last retransmit does not need to wait that long */
1261- if (conn -> send_data_retries >= tcp_retries ) {
1262- exp_tcp_rto = tcp_rto ;
1283+ if (conn -> send_data_retries < tcp_retries ) {
1284+ /* Every retransmit, the retransmission timeout increases by a factor 1.5 */
1285+ for (int i = 0 ; i < conn -> send_data_retries ; i ++ ) {
1286+ exp_tcp_rto += exp_tcp_rto >> 1 ;
1287+ }
12631288 }
12641289
12651290 k_work_reschedule_for_queue (& tcp_work_q , & conn -> send_data_timer ,
@@ -1302,7 +1327,7 @@ static void tcp_fin_timeout(struct k_work *work)
13021327 return ;
13031328 }
13041329
1305- NET_DBG ("Did not receive %s in %dms" , "FIN" , FIN_TIMEOUT_MS );
1330+ NET_DBG ("Did not receive %s in %dms" , "FIN" , tcp_fin_timeout_ms );
13061331 NET_DBG ("conn: %p %s" , conn , tcp_conn_state (conn , NULL ));
13071332
13081333 /* Extra unref from net_tcp_put() */
@@ -1318,9 +1343,11 @@ static void tcp_send_zwp(struct k_work *work)
13181343
13191344 (void )tcp_out_ext (conn , ACK , NULL , conn -> seq - 1 );
13201345
1346+ tcp_derive_rto (conn );
1347+
13211348 if (conn -> send_win == 0 ) {
13221349 (void )k_work_reschedule_for_queue (
1323- & tcp_work_q , & conn -> persist_timer , K_MSEC (tcp_rto ));
1350+ & tcp_work_q , & conn -> persist_timer , K_MSEC (TCP_RTO_MS ));
13241351 }
13251352
13261353 k_mutex_unlock (& conn -> lock );
@@ -1644,6 +1671,7 @@ static struct tcp *tcp_conn_new(struct net_pkt *pkt)
16441671
16451672 conn = context -> tcp ;
16461673 conn -> iface = pkt -> iface ;
1674+ tcp_derive_rto (conn );
16471675
16481676 net_context_set_family (conn -> context , net_pkt_family (pkt ));
16491677
@@ -1966,7 +1994,7 @@ static enum net_verdict tcp_in(struct tcp *conn, struct net_pkt *pkt)
19661994
19671995 if (conn -> send_win == 0 ) {
19681996 (void )k_work_reschedule_for_queue (
1969- & tcp_work_q , & conn -> persist_timer , K_MSEC (tcp_rto ));
1997+ & tcp_work_q , & conn -> persist_timer , K_MSEC (TCP_RTO_MS ));
19701998 } else {
19711999 (void )k_work_cancel_delayable (& conn -> persist_timer );
19722000 }
@@ -2138,6 +2166,7 @@ static enum net_verdict tcp_in(struct tcp *conn, struct net_pkt *pkt)
21382166 k_work_cancel_delayable (& conn -> send_data_timer );
21392167 if (conn -> data_mode == TCP_DATA_MODE_RESEND ) {
21402168 conn -> unacked_len = 0 ;
2169+ tcp_derive_rto (conn );
21412170 }
21422171 conn -> data_mode = TCP_DATA_MODE_SEND ;
21432172
@@ -2325,12 +2354,12 @@ int net_tcp_put(struct net_context *context)
23252354 */
23262355 k_work_reschedule_for_queue (& tcp_work_q ,
23272356 & conn -> send_data_timer ,
2328- K_MSEC (tcp_rto ));
2357+ K_MSEC (TCP_RTO_MS ));
23292358 } else {
23302359 int ret ;
23312360
23322361 NET_DBG ("TCP connection in active close, not "
2333- "disposing yet (waiting %dms)" , FIN_TIMEOUT_MS );
2362+ "disposing yet (waiting %dms)" , tcp_fin_timeout_ms );
23342363 k_work_reschedule_for_queue (& tcp_work_q ,
23352364 & conn -> fin_timer ,
23362365 FIN_TIMEOUT );
@@ -2542,6 +2571,7 @@ int net_tcp_connect(struct net_context *context,
25422571
25432572 conn = context -> tcp ;
25442573 conn -> iface = net_context_get_iface (context );
2574+ tcp_derive_rto (conn );
25452575
25462576 switch (net_context_get_family (context )) {
25472577 const struct in_addr * ip4 ;
@@ -3162,6 +3192,8 @@ struct k_sem *net_tcp_tx_sem_get(struct net_context *context)
31623192
31633193void net_tcp_init (void )
31643194{
3195+ int i ;
3196+ int rto ;
31653197#if defined(CONFIG_NET_TEST_PROTOCOL )
31663198 /* Register inputs for TTCN-3 based TCP sanity check */
31673199 test_cb_register (AF_INET , IPPROTO_TCP , 4242 , 4242 , tcp_input );
@@ -3184,6 +3216,21 @@ void net_tcp_init(void)
31843216 K_KERNEL_STACK_SIZEOF (work_q_stack ), THREAD_PRIORITY ,
31853217 NULL );
31863218
3219+ /* Compute the largest possible retransmission timeout */
3220+ tcp_fin_timeout_ms = 0 ;
3221+ rto = tcp_rto ;
3222+ for (i = 0 ; i < tcp_retries ; i ++ ) {
3223+ tcp_fin_timeout_ms += rto ;
3224+ rto += rto >> 1 ;
3225+ }
3226+ /* At the last timeout cicle */
3227+ tcp_fin_timeout_ms += tcp_rto ;
3228+
3229+ /* When CONFIG_NET_TCP_RANDOMIZED_RTO is active in can be worse case 1.5 times larger */
3230+ if (IS_ENABLED (CONFIG_NET_TCP_RANDOMIZED_RTO )) {
3231+ tcp_fin_timeout_ms += tcp_fin_timeout_ms >> 1 ;
3232+ }
3233+
31873234 k_thread_name_set (& tcp_work_q .thread , "tcp_work" );
31883235 NET_DBG ("Workq started. Thread ID: %p" , & tcp_work_q .thread );
31893236}
0 commit comments