@@ -869,7 +869,10 @@ static void tcp_conn_release(struct k_work *work)
869
869
tcp_pkt_unref (conn -> send_data );
870
870
871
871
if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT ) {
872
- tcp_pkt_unref (conn -> queue_recv_data );
872
+ if (conn -> queue_recv_data != NULL ) {
873
+ net_buf_unref (conn -> queue_recv_data );
874
+ conn -> queue_recv_data = NULL ;
875
+ }
873
876
}
874
877
875
878
(void )k_work_cancel_delayable (& conn -> timewait_timer );
@@ -1208,8 +1211,7 @@ static size_t tcp_check_pending_data(struct tcp *conn, struct net_pkt *pkt,
1208
1211
{
1209
1212
size_t pending_len = 0 ;
1210
1213
1211
- if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT &&
1212
- !net_pkt_is_empty (conn -> queue_recv_data )) {
1214
+ if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT && conn -> queue_recv_data != NULL ) {
1213
1215
/* Some potentential cases:
1214
1216
* Note: MI = MAX_INT
1215
1217
* Packet | Queued| End off | Gap size | Required handling
@@ -1231,10 +1233,10 @@ static size_t tcp_check_pending_data(struct tcp *conn, struct net_pkt *pkt,
1231
1233
int32_t gap_size ;
1232
1234
uint32_t end_offset ;
1233
1235
1234
- pending_seq = tcp_get_seq (conn -> queue_recv_data -> buffer );
1236
+ pending_seq = tcp_get_seq (conn -> queue_recv_data );
1235
1237
end_offset = expected_seq - pending_seq ;
1236
1238
gap_size = (int32_t )(pending_seq - th_seq (th ) - ((uint32_t )len ));
1237
- pending_len = net_pkt_get_len (conn -> queue_recv_data );
1239
+ pending_len = net_buf_frags_len (conn -> queue_recv_data );
1238
1240
if (end_offset < pending_len ) {
1239
1241
if (end_offset ) {
1240
1242
net_pkt_remove_tail (pkt , end_offset );
@@ -1245,15 +1247,15 @@ static size_t tcp_check_pending_data(struct tcp *conn, struct net_pkt *pkt,
1245
1247
expected_seq , pending_len );
1246
1248
1247
1249
net_buf_frag_add (pkt -> buffer ,
1248
- conn -> queue_recv_data -> buffer );
1249
- conn -> queue_recv_data -> buffer = NULL ;
1250
+ conn -> queue_recv_data );
1251
+ conn -> queue_recv_data = NULL ;
1250
1252
1251
1253
k_work_cancel_delayable (& conn -> recv_queue_timer );
1252
1254
} else {
1253
1255
/* Check if the queued data is just a section of the incoming data */
1254
1256
if (gap_size <= 0 ) {
1255
- net_buf_unref (conn -> queue_recv_data -> buffer );
1256
- conn -> queue_recv_data -> buffer = NULL ;
1257
+ net_buf_unref (conn -> queue_recv_data );
1258
+ conn -> queue_recv_data = NULL ;
1257
1259
1258
1260
k_work_cancel_delayable (& conn -> recv_queue_timer );
1259
1261
}
@@ -1899,11 +1901,11 @@ static void tcp_cleanup_recv_queue(struct k_work *work)
1899
1901
k_mutex_lock (& conn -> lock , K_FOREVER );
1900
1902
1901
1903
NET_DBG ("[%p] cleanup recv queue len %zd seq %u" , conn ,
1902
- net_pkt_get_len (conn -> queue_recv_data ),
1903
- tcp_get_seq (conn -> queue_recv_data -> buffer ));
1904
+ net_buf_frags_len (conn -> queue_recv_data ),
1905
+ tcp_get_seq (conn -> queue_recv_data ));
1904
1906
1905
- net_buf_unref (conn -> queue_recv_data -> buffer );
1906
- conn -> queue_recv_data -> buffer = NULL ;
1907
+ net_buf_unref (conn -> queue_recv_data );
1908
+ conn -> queue_recv_data = NULL ;
1907
1909
1908
1910
k_mutex_unlock (& conn -> lock );
1909
1911
}
@@ -2154,15 +2156,6 @@ static struct tcp *tcp_conn_alloc(void)
2154
2156
2155
2157
memset (conn , 0 , sizeof (* conn ));
2156
2158
2157
- if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT ) {
2158
- conn -> queue_recv_data = tcp_rx_pkt_alloc (conn , 0 );
2159
- if (conn -> queue_recv_data == NULL ) {
2160
- NET_ERR ("Cannot allocate %s queue for conn %p" , "recv" ,
2161
- conn );
2162
- goto fail ;
2163
- }
2164
- }
2165
-
2166
2159
conn -> send_data = tcp_pkt_alloc (conn , 0 );
2167
2160
if (conn -> send_data == NULL ) {
2168
2161
NET_ERR ("Cannot allocate %s queue for conn %p" , "send" , conn );
@@ -2223,11 +2216,6 @@ static struct tcp *tcp_conn_alloc(void)
2223
2216
return conn ;
2224
2217
2225
2218
fail :
2226
- if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT && conn -> queue_recv_data ) {
2227
- tcp_pkt_unref (conn -> queue_recv_data );
2228
- conn -> queue_recv_data = NULL ;
2229
- }
2230
-
2231
2219
k_mem_slab_free (& tcp_conns_slab , (void * )conn );
2232
2220
return NULL ;
2233
2221
}
@@ -2688,7 +2676,7 @@ static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt,
2688
2676
NET_DBG ("[%p] Queuing data" , conn );
2689
2677
}
2690
2678
2691
- if (! net_pkt_is_empty ( conn -> queue_recv_data ) ) {
2679
+ if (conn -> queue_recv_data != NULL ) {
2692
2680
/* Place the data to correct place in the list. If the data
2693
2681
* would not be sequential, then drop this packet.
2694
2682
*
@@ -2718,9 +2706,9 @@ static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt,
2718
2706
uint32_t end_offset ;
2719
2707
size_t pending_len ;
2720
2708
2721
- pending_seq = tcp_get_seq (conn -> queue_recv_data -> buffer );
2709
+ pending_seq = tcp_get_seq (conn -> queue_recv_data );
2722
2710
end_offset = seq - pending_seq ;
2723
- pending_len = net_pkt_get_len (conn -> queue_recv_data );
2711
+ pending_len = net_buf_frags_len (conn -> queue_recv_data );
2724
2712
if (end_offset < pending_len ) {
2725
2713
if (end_offset < len ) {
2726
2714
if (end_offset ) {
@@ -2729,17 +2717,17 @@ static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt,
2729
2717
2730
2718
/* Put new data before the pending data */
2731
2719
net_buf_frag_add (pkt -> buffer ,
2732
- conn -> queue_recv_data -> buffer );
2720
+ conn -> queue_recv_data );
2733
2721
NET_DBG ("[%p] Adding at before queue, "
2734
2722
"end_offset %i, pending_len %zu" ,
2735
2723
conn , end_offset , pending_len );
2736
- conn -> queue_recv_data -> buffer = pkt -> buffer ;
2724
+ conn -> queue_recv_data = pkt -> buffer ;
2737
2725
inserted = true;
2738
2726
}
2739
2727
} else {
2740
2728
struct net_buf * last ;
2741
2729
2742
- last = net_buf_frag_last (conn -> queue_recv_data -> buffer );
2730
+ last = net_buf_frag_last (conn -> queue_recv_data );
2743
2731
pending_seq = tcp_get_seq (last );
2744
2732
2745
2733
start_offset = pending_seq - seq_start ;
@@ -2751,21 +2739,21 @@ static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt,
2751
2739
/* The queued data is irrelevant since the new packet overlaps the
2752
2740
* new packet, take the new packet as contents
2753
2741
*/
2754
- net_buf_unref (conn -> queue_recv_data -> buffer );
2755
- conn -> queue_recv_data -> buffer = pkt -> buffer ;
2742
+ net_buf_unref (conn -> queue_recv_data );
2743
+ conn -> queue_recv_data = pkt -> buffer ;
2756
2744
inserted = true;
2757
2745
} else {
2758
2746
if (end_offset < len ) {
2759
2747
if (end_offset ) {
2760
- net_pkt_remove_tail (conn -> queue_recv_data ,
2761
- end_offset );
2748
+ net_buf_remove_mem (conn -> queue_recv_data ,
2749
+ end_offset );
2762
2750
}
2763
2751
2764
2752
/* Put new data after pending data */
2765
2753
NET_DBG ("[%p] Adding at end of queue, "
2766
2754
"start %i, end %i, len %zu" ,
2767
2755
conn , start_offset , end_offset , len );
2768
- net_buf_frag_add (conn -> queue_recv_data -> buffer ,
2756
+ net_buf_frag_add (conn -> queue_recv_data ,
2769
2757
pkt -> buffer );
2770
2758
inserted = true;
2771
2759
}
@@ -2774,18 +2762,18 @@ static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt,
2774
2762
2775
2763
if (inserted ) {
2776
2764
NET_DBG ("[%p] All pending data" , conn );
2777
- if (check_seq_list (conn -> queue_recv_data -> buffer ) == false) {
2765
+ if (check_seq_list (conn -> queue_recv_data ) == false) {
2778
2766
NET_ERR ("Incorrect order in out of order sequence for conn %p" ,
2779
2767
conn );
2780
2768
/* error in sequence list, drop it */
2781
- net_buf_unref (conn -> queue_recv_data -> buffer );
2782
- conn -> queue_recv_data -> buffer = NULL ;
2769
+ net_buf_unref (conn -> queue_recv_data );
2770
+ conn -> queue_recv_data = NULL ;
2783
2771
}
2784
2772
} else {
2785
2773
NET_DBG ("[%p] Cannot add new data to queue" , conn );
2786
2774
}
2787
2775
} else {
2788
- net_pkt_append_buffer ( conn -> queue_recv_data , pkt -> buffer ) ;
2776
+ conn -> queue_recv_data = pkt -> buffer ;
2789
2777
inserted = true;
2790
2778
}
2791
2779
0 commit comments