Skip to content

Commit a5f9520

Browse files
jukkarnashif
authored andcommitted
net: tcp2: Cleanup properly if running out of mem
If we cannot allocate net_pkt or net_buf, then check this condition properly and release other resources that were already allocated. Signed-off-by: Jukka Rissanen <[email protected]>
1 parent e9b5be3 commit a5f9520

File tree

1 file changed

+43
-22
lines changed

1 file changed

+43
-22
lines changed

subsys/net/ip/tcp2.c

Lines changed: 43 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -304,13 +304,19 @@ static void tcp_send(struct net_pkt *pkt)
304304
if (is_6lo_technology(pkt)) {
305305
struct net_pkt *new_pkt;
306306

307-
new_pkt = net_pkt_clone(pkt, TCP_PKT_ALLOC_TIMEOUT);
307+
new_pkt = tcp_pkt_clone(pkt);
308308
if (!new_pkt) {
309+
/* The caller of this func assumes that the net_pkt
310+
* is consumed by this function. We call unref here
311+
* so that the unref at the end of the func will
312+
* free the net_pkt.
313+
*/
314+
tcp_pkt_unref(pkt);
309315
goto out;
310316
}
311317

312318
if (net_send_data(new_pkt) < 0) {
313-
net_pkt_unref(new_pkt);
319+
tcp_pkt_unref(new_pkt);
314320
}
315321

316322
/* We simulate sending of the original pkt and unref it like
@@ -354,16 +360,15 @@ static int tcp_conn_unref(struct tcp *conn)
354360
}
355361
#endif /* CONFIG_NET_TEST_PROTOCOL */
356362

357-
ref_count = atomic_dec(&conn->ref_count) - 1;
363+
k_mutex_lock(&tcp_lock, K_FOREVER);
358364

365+
ref_count = atomic_dec(&conn->ref_count) - 1;
359366
if (ref_count) {
360367
tp_out(net_context_get_family(conn->context), conn->iface,
361368
"TP_TRACE", "event", "CONN_DELETE");
362-
goto out;
369+
goto unlock;
363370
}
364371

365-
k_mutex_lock(&tcp_lock, K_FOREVER);
366-
367372
/* If there is any pending data, pass that to application */
368373
while ((pkt = k_fifo_get(&conn->recv_data, K_NO_WAIT)) != NULL) {
369374
net_context_packet_received(
@@ -403,6 +408,7 @@ static int tcp_conn_unref(struct tcp *conn)
403408

404409
k_mem_slab_free(&tcp_conns_slab, (void **)&conn);
405410

411+
unlock:
406412
k_mutex_unlock(&tcp_lock);
407413
out:
408414
return ref_count;
@@ -681,7 +687,7 @@ static int tcp_data_get(struct tcp *conn, struct net_pkt *pkt, size_t *len)
681687
}
682688

683689
if (conn->context->recv_cb) {
684-
struct net_pkt *up = net_pkt_clone(pkt, TCP_PKT_ALLOC_TIMEOUT);
690+
struct net_pkt *up = tcp_pkt_clone(pkt);
685691

686692
if (!up) {
687693
ret = -ENOBUFS;
@@ -915,11 +921,11 @@ static int tcp_send_data(struct tcp *conn)
915921
conn->unacked_len += len;
916922

917923
if (conn->data_mode == TCP_DATA_MODE_RESEND) {
918-
net_stats_update_tcp_resent(net_pkt_iface(pkt), len);
924+
net_stats_update_tcp_resent(conn->iface, len);
919925
net_stats_update_tcp_seg_rexmit(conn->iface);
920926
} else {
921-
net_stats_update_tcp_sent(net_pkt_iface(pkt), len);
922-
net_stats_update_tcp_seg_sent(net_pkt_iface(pkt));
927+
net_stats_update_tcp_sent(conn->iface, len);
928+
net_stats_update_tcp_seg_sent(conn->iface);
923929
}
924930
}
925931

@@ -1098,46 +1104,61 @@ static struct tcp *tcp_conn_alloc(void)
10981104

10991105
ret = k_mem_slab_alloc(&tcp_conns_slab, (void **)&conn, K_NO_WAIT);
11001106
if (ret) {
1107+
NET_ERR("Cannot allocate slab");
11011108
goto out;
11021109
}
11031110

11041111
memset(conn, 0, sizeof(*conn));
11051112

1113+
if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT) {
1114+
conn->queue_recv_data = tcp_rx_pkt_alloc(conn, 0);
1115+
if (conn->queue_recv_data == NULL) {
1116+
NET_ERR("Cannot allocate %s queue for conn %p", "recv",
1117+
conn);
1118+
goto fail;
1119+
}
1120+
}
1121+
1122+
conn->send_data = tcp_pkt_alloc(conn, 0);
1123+
if (conn->send_data == NULL) {
1124+
NET_ERR("Cannot allocate %s queue for conn %p", "send", conn);
1125+
goto fail;
1126+
}
1127+
11061128
k_mutex_init(&conn->lock);
11071129
k_fifo_init(&conn->recv_data);
1130+
k_sem_init(&conn->connect_sem, 0, UINT_MAX);
11081131

1132+
conn->in_connect = false;
11091133
conn->state = TCP_LISTEN;
1110-
11111134
conn->recv_win = tcp_window;
1112-
11131135
conn->seq = (IS_ENABLED(CONFIG_NET_TEST_PROTOCOL) ||
11141136
IS_ENABLED(CONFIG_NET_TEST)) ? 0 : sys_rand32_get();
11151137

11161138
sys_slist_init(&conn->send_queue);
11171139

11181140
k_delayed_work_init(&conn->send_timer, tcp_send_process);
1119-
11201141
k_delayed_work_init(&conn->timewait_timer, tcp_timewait_timeout);
11211142
k_delayed_work_init(&conn->fin_timer, tcp_fin_timeout);
1122-
1123-
if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT) {
1124-
conn->queue_recv_data = tcp_rx_pkt_alloc(conn, 0);
1125-
}
1126-
1127-
conn->send_data = tcp_pkt_alloc(conn, 0);
11281143
k_delayed_work_init(&conn->send_data_timer, tcp_resend_data);
11291144
k_delayed_work_init(&conn->recv_queue_timer, tcp_cleanup_recv_queue);
11301145

1131-
k_sem_init(&conn->connect_sem, 0, UINT_MAX);
1132-
conn->in_connect = false;
1133-
11341146
tcp_conn_ref(conn);
11351147

11361148
sys_slist_append(&tcp_conns, &conn->next);
11371149
out:
11381150
NET_DBG("conn: %p", conn);
11391151

11401152
return conn;
1153+
1154+
fail:
1155+
if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT && conn->queue_recv_data) {
1156+
tcp_pkt_unref(conn->queue_recv_data);
1157+
conn->queue_recv_data = NULL;
1158+
}
1159+
1160+
k_mem_slab_free(&tcp_conns_slab, (void **)&conn);
1161+
return NULL;
11411162
}
11421163

11431164
int net_tcp_get(struct net_context *context)

0 commit comments

Comments
 (0)