@@ -797,7 +797,10 @@ static void tcp_conn_release(struct k_work *work)
797
797
tcp_pkt_unref (conn -> send_data );
798
798
799
799
if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT ) {
800
- tcp_pkt_unref (conn -> queue_recv_data );
800
+ if (conn -> queue_recv_data != NULL ) {
801
+ net_buf_unref (conn -> queue_recv_data );
802
+ conn -> queue_recv_data = NULL ;
803
+ }
801
804
}
802
805
803
806
(void )k_work_cancel_delayable (& conn -> timewait_timer );
@@ -1136,8 +1139,7 @@ static size_t tcp_check_pending_data(struct tcp *conn, struct net_pkt *pkt,
1136
1139
{
1137
1140
size_t pending_len = 0 ;
1138
1141
1139
- if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT &&
1140
- !net_pkt_is_empty (conn -> queue_recv_data )) {
1142
+ if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT && conn -> queue_recv_data != NULL ) {
1141
1143
/* Some potentential cases:
1142
1144
* Note: MI = MAX_INT
1143
1145
* Packet | Queued| End off | Gap size | Required handling
@@ -1159,10 +1161,10 @@ static size_t tcp_check_pending_data(struct tcp *conn, struct net_pkt *pkt,
1159
1161
int32_t gap_size ;
1160
1162
uint32_t end_offset ;
1161
1163
1162
- pending_seq = tcp_get_seq (conn -> queue_recv_data -> buffer );
1164
+ pending_seq = tcp_get_seq (conn -> queue_recv_data );
1163
1165
end_offset = expected_seq - pending_seq ;
1164
1166
gap_size = (int32_t )(pending_seq - th_seq (th ) - ((uint32_t )len ));
1165
- pending_len = net_pkt_get_len (conn -> queue_recv_data );
1167
+ pending_len = net_buf_frags_len (conn -> queue_recv_data );
1166
1168
if (end_offset < pending_len ) {
1167
1169
if (end_offset ) {
1168
1170
net_pkt_remove_tail (pkt , end_offset );
@@ -1173,15 +1175,15 @@ static size_t tcp_check_pending_data(struct tcp *conn, struct net_pkt *pkt,
1173
1175
expected_seq , pending_len );
1174
1176
1175
1177
net_buf_frag_add (pkt -> buffer ,
1176
- conn -> queue_recv_data -> buffer );
1177
- conn -> queue_recv_data -> buffer = NULL ;
1178
+ conn -> queue_recv_data );
1179
+ conn -> queue_recv_data = NULL ;
1178
1180
1179
1181
k_work_cancel_delayable (& conn -> recv_queue_timer );
1180
1182
} else {
1181
1183
/* Check if the queued data is just a section of the incoming data */
1182
1184
if (gap_size <= 0 ) {
1183
- net_buf_unref (conn -> queue_recv_data -> buffer );
1184
- conn -> queue_recv_data -> buffer = NULL ;
1185
+ net_buf_unref (conn -> queue_recv_data );
1186
+ conn -> queue_recv_data = NULL ;
1185
1187
1186
1188
k_work_cancel_delayable (& conn -> recv_queue_timer );
1187
1189
}
@@ -1770,11 +1772,11 @@ static void tcp_cleanup_recv_queue(struct k_work *work)
1770
1772
k_mutex_lock (& conn -> lock , K_FOREVER );
1771
1773
1772
1774
NET_DBG ("Cleanup recv queue conn %p len %zd seq %u" , conn ,
1773
- net_pkt_get_len (conn -> queue_recv_data ),
1774
- tcp_get_seq (conn -> queue_recv_data -> buffer ));
1775
+ net_buf_frags_len (conn -> queue_recv_data ),
1776
+ tcp_get_seq (conn -> queue_recv_data ));
1775
1777
1776
- net_buf_unref (conn -> queue_recv_data -> buffer );
1777
- conn -> queue_recv_data -> buffer = NULL ;
1778
+ net_buf_unref (conn -> queue_recv_data );
1779
+ conn -> queue_recv_data = NULL ;
1778
1780
1779
1781
k_mutex_unlock (& conn -> lock );
1780
1782
}
@@ -2022,15 +2024,6 @@ static struct tcp *tcp_conn_alloc(void)
2022
2024
2023
2025
memset (conn , 0 , sizeof (* conn ));
2024
2026
2025
- if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT ) {
2026
- conn -> queue_recv_data = tcp_rx_pkt_alloc (conn , 0 );
2027
- if (conn -> queue_recv_data == NULL ) {
2028
- NET_ERR ("Cannot allocate %s queue for conn %p" , "recv" ,
2029
- conn );
2030
- goto fail ;
2031
- }
2032
- }
2033
-
2034
2027
conn -> send_data = tcp_pkt_alloc (conn , 0 );
2035
2028
if (conn -> send_data == NULL ) {
2036
2029
NET_ERR ("Cannot allocate %s queue for conn %p" , "send" , conn );
@@ -2089,11 +2082,6 @@ static struct tcp *tcp_conn_alloc(void)
2089
2082
return conn ;
2090
2083
2091
2084
fail :
2092
- if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT && conn -> queue_recv_data ) {
2093
- tcp_pkt_unref (conn -> queue_recv_data );
2094
- conn -> queue_recv_data = NULL ;
2095
- }
2096
-
2097
2085
k_mem_slab_free (& tcp_conns_slab , (void * )conn );
2098
2086
return NULL ;
2099
2087
}
@@ -2545,7 +2533,7 @@ static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt,
2545
2533
NET_DBG ("Queuing data: conn %p" , conn );
2546
2534
}
2547
2535
2548
- if (! net_pkt_is_empty ( conn -> queue_recv_data ) ) {
2536
+ if (conn -> queue_recv_data != NULL ) {
2549
2537
/* Place the data to correct place in the list. If the data
2550
2538
* would not be sequential, then drop this packet.
2551
2539
*
@@ -2575,9 +2563,9 @@ static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt,
2575
2563
uint32_t end_offset ;
2576
2564
size_t pending_len ;
2577
2565
2578
- pending_seq = tcp_get_seq (conn -> queue_recv_data -> buffer );
2566
+ pending_seq = tcp_get_seq (conn -> queue_recv_data );
2579
2567
end_offset = seq - pending_seq ;
2580
- pending_len = net_pkt_get_len (conn -> queue_recv_data );
2568
+ pending_len = net_buf_frags_len (conn -> queue_recv_data );
2581
2569
if (end_offset < pending_len ) {
2582
2570
if (end_offset < len ) {
2583
2571
if (end_offset ) {
@@ -2586,16 +2574,16 @@ static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt,
2586
2574
2587
2575
/* Put new data before the pending data */
2588
2576
net_buf_frag_add (pkt -> buffer ,
2589
- conn -> queue_recv_data -> buffer );
2577
+ conn -> queue_recv_data );
2590
2578
NET_DBG ("Adding at before queue, end_offset %i, pending_len %zu" ,
2591
2579
end_offset , pending_len );
2592
- conn -> queue_recv_data -> buffer = pkt -> buffer ;
2580
+ conn -> queue_recv_data = pkt -> buffer ;
2593
2581
inserted = true;
2594
2582
}
2595
2583
} else {
2596
2584
struct net_buf * last ;
2597
2585
2598
- last = net_buf_frag_last (conn -> queue_recv_data -> buffer );
2586
+ last = net_buf_frag_last (conn -> queue_recv_data );
2599
2587
pending_seq = tcp_get_seq (last );
2600
2588
2601
2589
start_offset = pending_seq - seq_start ;
@@ -2607,20 +2595,20 @@ static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt,
2607
2595
/* The queued data is irrelevant since the new packet overlaps the
2608
2596
* new packet, take the new packet as contents
2609
2597
*/
2610
- net_buf_unref (conn -> queue_recv_data -> buffer );
2611
- conn -> queue_recv_data -> buffer = pkt -> buffer ;
2598
+ net_buf_unref (conn -> queue_recv_data );
2599
+ conn -> queue_recv_data = pkt -> buffer ;
2612
2600
inserted = true;
2613
2601
} else {
2614
2602
if (end_offset < len ) {
2615
2603
if (end_offset ) {
2616
- net_pkt_remove_tail (conn -> queue_recv_data ,
2617
- end_offset );
2604
+ net_buf_remove_mem (conn -> queue_recv_data ,
2605
+ end_offset );
2618
2606
}
2619
2607
2620
2608
/* Put new data after pending data */
2621
2609
NET_DBG ("Adding at end of queue, start %i, end %i, len %zu" ,
2622
2610
start_offset , end_offset , len );
2623
- net_buf_frag_add (conn -> queue_recv_data -> buffer ,
2611
+ net_buf_frag_add (conn -> queue_recv_data ,
2624
2612
pkt -> buffer );
2625
2613
inserted = true;
2626
2614
}
@@ -2629,18 +2617,18 @@ static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt,
2629
2617
2630
2618
if (inserted ) {
2631
2619
NET_DBG ("All pending data: conn %p" , conn );
2632
- if (check_seq_list (conn -> queue_recv_data -> buffer ) == false) {
2620
+ if (check_seq_list (conn -> queue_recv_data ) == false) {
2633
2621
NET_ERR ("Incorrect order in out of order sequence for conn %p" ,
2634
2622
conn );
2635
2623
/* error in sequence list, drop it */
2636
- net_buf_unref (conn -> queue_recv_data -> buffer );
2637
- conn -> queue_recv_data -> buffer = NULL ;
2624
+ net_buf_unref (conn -> queue_recv_data );
2625
+ conn -> queue_recv_data = NULL ;
2638
2626
}
2639
2627
} else {
2640
2628
NET_DBG ("Cannot add new data to queue" );
2641
2629
}
2642
2630
} else {
2643
- net_pkt_append_buffer ( conn -> queue_recv_data , pkt -> buffer ) ;
2631
+ conn -> queue_recv_data = pkt -> buffer ;
2644
2632
inserted = true;
2645
2633
}
2646
2634
0 commit comments