@@ -41,6 +41,16 @@ static void tcp_in(struct tcp *conn, struct net_pkt *pkt);
4141int (* tcp_send_cb )(struct net_pkt * pkt ) = NULL ;
4242size_t (* tcp_recv_cb )(struct tcp * conn , struct net_pkt * pkt ) = NULL ;
4343
44+ static uint32_t tcp_get_seq (struct net_buf * buf )
45+ {
46+ return * (uint32_t * )net_buf_user_data (buf );
47+ }
48+
49+ static void tcp_set_seq (struct net_buf * buf , uint32_t seq )
50+ {
51+ * (uint32_t * )net_buf_user_data (buf ) = seq ;
52+ }
53+
4454static int tcp_pkt_linearize (struct net_pkt * pkt , size_t pos , size_t len )
4555{
4656 struct net_buf * buf , * first = pkt -> cursor .buf , * second = first -> frags ;
@@ -378,6 +388,10 @@ static int tcp_conn_unref(struct tcp *conn)
378388 k_delayed_work_cancel (& conn -> send_data_timer );
379389 tcp_pkt_unref (conn -> send_data );
380390
391+ if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT ) {
392+ tcp_pkt_unref (conn -> queue_recv_data );
393+ }
394+
381395 k_delayed_work_cancel (& conn -> timewait_timer );
382396 k_delayed_work_cancel (& conn -> fin_timer );
383397
@@ -627,7 +641,35 @@ static bool tcp_options_check(struct tcp_options *recv_options,
627641 return result ;
628642}
629643
630- static int tcp_data_get (struct tcp * conn , struct net_pkt * pkt , size_t len )
644+ static size_t tcp_check_pending_data (struct tcp * conn , struct net_pkt * pkt ,
645+ size_t len )
646+ {
647+ size_t pending_len = 0 ;
648+
649+ if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT &&
650+ !net_pkt_is_empty (conn -> queue_recv_data )) {
651+ struct tcphdr * th = th_get (pkt );
652+ uint32_t expected_seq = th_seq (th ) + len ;
653+ uint32_t pending_seq ;
654+
655+ pending_seq = tcp_get_seq (conn -> queue_recv_data -> buffer );
656+ if (pending_seq == expected_seq ) {
657+ pending_len = net_pkt_get_len (conn -> queue_recv_data );
658+
659+ NET_DBG ("Found pending data seq %u len %zd" ,
660+ pending_seq , pending_len );
661+ net_buf_frag_add (pkt -> buffer ,
662+ conn -> queue_recv_data -> buffer );
663+ conn -> queue_recv_data -> buffer = NULL ;
664+
665+ k_delayed_work_cancel (& conn -> recv_queue_timer );
666+ }
667+ }
668+
669+ return pending_len ;
670+ }
671+
672+ static int tcp_data_get (struct tcp * conn , struct net_pkt * pkt , size_t * len )
631673{
632674 int ret = 0 ;
633675
@@ -644,10 +686,15 @@ static int tcp_data_get(struct tcp *conn, struct net_pkt *pkt, size_t len)
644686 goto out ;
645687 }
646688
689+ /* If there is any out-of-order pending data, then pass it
690+ * to the application here.
691+ */
692+ * len += tcp_check_pending_data (conn , up , * len );
693+
647694 net_pkt_cursor_init (up );
648695 net_pkt_set_overwrite (up , true);
649696
650- net_pkt_skip (up , net_pkt_get_len (up ) - len );
697+ net_pkt_skip (up , net_pkt_get_len (up ) - * len );
651698
652699 /* Do not pass data to application with TCP conn
653700 * locked as there could be an issue when the app tries
@@ -934,6 +981,22 @@ static int tcp_send_queued_data(struct tcp *conn)
934981 return ret ;
935982}
936983
984+ static void tcp_cleanup_recv_queue (struct k_work * work )
985+ {
986+ struct tcp * conn = CONTAINER_OF (work , struct tcp , recv_queue_timer );
987+
988+ k_mutex_lock (& conn -> lock , K_FOREVER );
989+
990+ NET_DBG ("Cleanup recv queue conn %p len %zd seq %u" , conn ,
991+ net_pkt_get_len (conn -> queue_recv_data ),
992+ tcp_get_seq (conn -> queue_recv_data -> buffer ));
993+
994+ net_buf_unref (conn -> queue_recv_data -> buffer );
995+ conn -> queue_recv_data -> buffer = NULL ;
996+
997+ k_mutex_unlock (& conn -> lock );
998+ }
999+
9371000static void tcp_resend_data (struct k_work * work )
9381001{
9391002 struct tcp * conn = CONTAINER_OF (work , struct tcp , send_data_timer );
@@ -1042,8 +1105,13 @@ static struct tcp *tcp_conn_alloc(void)
10421105 k_delayed_work_init (& conn -> timewait_timer , tcp_timewait_timeout );
10431106 k_delayed_work_init (& conn -> fin_timer , tcp_fin_timeout );
10441107
1108+ if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT ) {
1109+ conn -> queue_recv_data = tcp_rx_pkt_alloc (conn , 0 );
1110+ }
1111+
10451112 conn -> send_data = tcp_pkt_alloc (conn , 0 );
10461113 k_delayed_work_init (& conn -> send_data_timer , tcp_resend_data );
1114+ k_delayed_work_init (& conn -> recv_queue_timer , tcp_cleanup_recv_queue );
10471115
10481116 k_sem_init (& conn -> connect_sem , 0 , UINT_MAX );
10491117 conn -> in_connect = false;
@@ -1259,6 +1327,127 @@ static bool tcp_validate_seq(struct tcp *conn, struct tcphdr *hdr)
12591327 (net_tcp_seq_cmp (th_seq (hdr ), conn -> ack + conn -> recv_win ) < 0 );
12601328}
12611329
1330+ static void print_seq_list (struct net_buf * buf )
1331+ {
1332+ struct net_buf * tmp = buf ;
1333+ uint32_t seq ;
1334+
1335+ while (tmp ) {
1336+ seq = tcp_get_seq (tmp );
1337+
1338+ NET_DBG ("buf %p seq %u len %d" , tmp , seq , tmp -> len );
1339+
1340+ tmp = tmp -> frags ;
1341+ }
1342+ }
1343+
1344+ static void tcp_queue_recv_data (struct tcp * conn , struct net_pkt * pkt ,
1345+ size_t len , uint32_t seq )
1346+ {
1347+ uint32_t seq_start = seq ;
1348+ bool inserted = false;
1349+ struct net_buf * tmp ;
1350+
1351+ NET_DBG ("conn: %p len %zd seq %u ack %u" , conn , len , seq , conn -> ack );
1352+
1353+ tmp = pkt -> buffer ;
1354+
1355+ tcp_set_seq (tmp , seq );
1356+ seq += tmp -> len ;
1357+ tmp = tmp -> frags ;
1358+
1359+ while (tmp ) {
1360+ tcp_set_seq (tmp , seq );
1361+ seq += tmp -> len ;
1362+ tmp = tmp -> frags ;
1363+ }
1364+
1365+ if (IS_ENABLED (CONFIG_NET_TCP_LOG_LEVEL_DBG )) {
1366+ NET_DBG ("Queuing data: conn %p" , conn );
1367+ print_seq_list (pkt -> buffer );
1368+ }
1369+
1370+ if (!net_pkt_is_empty (conn -> queue_recv_data )) {
1371+ /* Place the data to correct place in the list. If the data
1372+ * would not be sequential, then drop this packet.
1373+ */
1374+ uint32_t pending_seq ;
1375+
1376+ pending_seq = tcp_get_seq (conn -> queue_recv_data -> buffer );
1377+ if (pending_seq == seq ) {
1378+ /* Put new data before the pending data */
1379+ net_buf_frag_add (pkt -> buffer ,
1380+ conn -> queue_recv_data -> buffer );
1381+ conn -> queue_recv_data -> buffer = pkt -> buffer ;
1382+ inserted = true;
1383+ } else {
1384+ struct net_buf * last ;
1385+
1386+ last = net_buf_frag_last (conn -> queue_recv_data -> buffer );
1387+ pending_seq = tcp_get_seq (last );
1388+
1389+ if ((pending_seq + last -> len ) == seq_start ) {
1390+ /* Put new data after pending data */
1391+ last -> frags = pkt -> buffer ;
1392+ inserted = true;
1393+ }
1394+ }
1395+
1396+ if (IS_ENABLED (CONFIG_NET_TCP_LOG_LEVEL_DBG )) {
1397+ if (inserted ) {
1398+ NET_DBG ("All pending data: conn %p" , conn );
1399+ print_seq_list (conn -> queue_recv_data -> buffer );
1400+ } else {
1401+ NET_DBG ("Cannot add new data to queue" );
1402+ }
1403+ }
1404+ } else {
1405+ net_pkt_append_buffer (conn -> queue_recv_data , pkt -> buffer );
1406+ inserted = true;
1407+ }
1408+
1409+ if (inserted ) {
1410+ /* We need to keep the received data but free the pkt */
1411+ pkt -> buffer = NULL ;
1412+
1413+ if (!k_delayed_work_pending (& conn -> recv_queue_timer )) {
1414+ k_delayed_work_submit (& conn -> recv_queue_timer ,
1415+ K_MSEC (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT ));
1416+ }
1417+ }
1418+ }
1419+
1420+ static bool tcp_data_received (struct tcp * conn , struct net_pkt * pkt ,
1421+ size_t * len )
1422+ {
1423+ if (tcp_data_get (conn , pkt , len ) < 0 ) {
1424+ return false;
1425+ }
1426+
1427+ net_stats_update_tcp_seg_recv (conn -> iface );
1428+ conn_ack (conn , * len );
1429+ tcp_out (conn , ACK );
1430+
1431+ return true;
1432+ }
1433+
1434+ static void tcp_out_of_order_data (struct tcp * conn , struct net_pkt * pkt ,
1435+ size_t data_len , uint32_t seq )
1436+ {
1437+ size_t headers_len ;
1438+
1439+ headers_len = net_pkt_get_len (pkt ) - data_len ;
1440+
1441+ /* Get rid of protocol headers from the data */
1442+ if (tcp_pkt_pull (pkt , headers_len ) < 0 ) {
1443+ return ;
1444+ }
1445+
1446+ /* We received out-of-order data. Try to queue it.
1447+ */
1448+ tcp_queue_recv_data (conn , pkt , data_len , seq );
1449+ }
1450+
12621451/* TCP state machine, everything happens here */
12631452static void tcp_in (struct tcp * conn , struct net_pkt * pkt )
12641453{
@@ -1373,7 +1562,7 @@ static void tcp_in(struct tcp *conn, struct net_pkt *pkt)
13731562 }
13741563
13751564 if (len ) {
1376- if (tcp_data_get (conn , pkt , len ) < 0 ) {
1565+ if (tcp_data_get (conn , pkt , & len ) < 0 ) {
13771566 break ;
13781567 }
13791568 conn_ack (conn , + len );
@@ -1390,7 +1579,7 @@ static void tcp_in(struct tcp *conn, struct net_pkt *pkt)
13901579 tcp_send_timer_cancel (conn );
13911580 conn_ack (conn , th_seq (th ) + 1 );
13921581 if (len ) {
1393- if (tcp_data_get (conn , pkt , len ) < 0 ) {
1582+ if (tcp_data_get (conn , pkt , & len ) < 0 ) {
13941583 break ;
13951584 }
13961585 conn_ack (conn , + len );
@@ -1423,7 +1612,7 @@ static void tcp_in(struct tcp *conn, struct net_pkt *pkt)
14231612 } else if (th && FL (& fl , = = , (FIN | ACK | PSH ),
14241613 th_seq (th ) == conn -> ack )) {
14251614 if (len ) {
1426- if (tcp_data_get (conn , pkt , len ) < 0 ) {
1615+ if (tcp_data_get (conn , pkt , & len ) < 0 ) {
14271616 break ;
14281617 }
14291618 }
@@ -1490,17 +1679,16 @@ static void tcp_in(struct tcp *conn, struct net_pkt *pkt)
14901679
14911680 if (th && len ) {
14921681 if (th_seq (th ) == conn -> ack ) {
1493- if (tcp_data_get (conn , pkt , len ) < 0 ) {
1682+ if (! tcp_data_received (conn , pkt , & len )) {
14941683 break ;
14951684 }
1496-
1497- net_stats_update_tcp_seg_recv (conn -> iface );
1498- conn_ack (conn , + len );
1499- tcp_out (conn , ACK );
15001685 } else if (net_tcp_seq_greater (conn -> ack , th_seq (th ))) {
15011686 tcp_out (conn , ACK ); /* peer has resent */
15021687
15031688 net_stats_update_tcp_seg_ackerr (conn -> iface );
1689+ } else if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT ) {
1690+ tcp_out_of_order_data (conn , pkt , len ,
1691+ th_seq (th ));
15041692 }
15051693 }
15061694 break ;
0 commit comments