Skip to content

Commit 1094c6f

Browse files
Paolo Abenikuba-moo
authored andcommitted
mptcp: fix possible divide by zero
Florian noted that if mptcp_alloc_tx_skb() allocation fails in __mptcp_push_pending(), we can end-up invoking mptcp_push_release()/tcp_push() with a zero mss, causing a divide by 0 error. This change addresses the issue refactoring the skb allocation code checking if skb collapsing will happen for sure and doing the skb allocation only after such check. Skb allocation will now happen only after the call to tcp_send_mss() which correctly initializes mss_now. As side bonuses we now fill the skb tx cache only when needed, and this also clean-up a bit the output path. v1 -> v2: - use lockdep_assert_held_once() - Jakub - fix indentation - Jakub Reported-by: Florian Westphal <[email protected]> Fixes: 724cfd2 ("mptcp: allocate TX skbs in msk context") Signed-off-by: Paolo Abeni <[email protected]> Signed-off-by: Mat Martineau <[email protected]> Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 780aa12 commit 1094c6f

File tree

1 file changed

+35
-41
lines changed

1 file changed

+35
-41
lines changed

net/mptcp/protocol.c

Lines changed: 35 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -1003,6 +1003,13 @@ static void mptcp_wmem_uncharge(struct sock *sk, int size)
10031003
msk->wmem_reserved += size;
10041004
}
10051005

1006+
static void __mptcp_mem_reclaim_partial(struct sock *sk)
1007+
{
1008+
lockdep_assert_held_once(&sk->sk_lock.slock);
1009+
__mptcp_update_wmem(sk);
1010+
sk_mem_reclaim_partial(sk);
1011+
}
1012+
10061013
static void mptcp_mem_reclaim_partial(struct sock *sk)
10071014
{
10081015
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1094,12 +1101,8 @@ static void __mptcp_clean_una(struct sock *sk)
10941101
msk->recovery = false;
10951102

10961103
out:
1097-
if (cleaned) {
1098-
if (tcp_under_memory_pressure(sk)) {
1099-
__mptcp_update_wmem(sk);
1100-
sk_mem_reclaim_partial(sk);
1101-
}
1102-
}
1104+
if (cleaned && tcp_under_memory_pressure(sk))
1105+
__mptcp_mem_reclaim_partial(sk);
11031106

11041107
if (snd_una == READ_ONCE(msk->snd_nxt) && !msk->recovery) {
11051108
if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
@@ -1179,6 +1182,7 @@ struct mptcp_sendmsg_info {
11791182
u16 limit;
11801183
u16 sent;
11811184
unsigned int flags;
1185+
bool data_lock_held;
11821186
};
11831187

11841188
static int mptcp_check_allowed_size(struct mptcp_sock *msk, u64 data_seq,
@@ -1250,17 +1254,17 @@ static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
12501254
return false;
12511255
}
12521256

1253-
static bool mptcp_must_reclaim_memory(struct sock *sk, struct sock *ssk)
1257+
static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
12541258
{
1255-
return !ssk->sk_tx_skb_cache &&
1256-
tcp_under_memory_pressure(sk);
1257-
}
1259+
gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
12581260

1259-
static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk)
1260-
{
1261-
if (unlikely(mptcp_must_reclaim_memory(sk, ssk)))
1262-
mptcp_mem_reclaim_partial(sk);
1263-
return __mptcp_alloc_tx_skb(sk, ssk, sk->sk_allocation);
1261+
if (unlikely(tcp_under_memory_pressure(sk))) {
1262+
if (data_lock_held)
1263+
__mptcp_mem_reclaim_partial(sk);
1264+
else
1265+
mptcp_mem_reclaim_partial(sk);
1266+
}
1267+
return __mptcp_alloc_tx_skb(sk, ssk, gfp);
12641268
}
12651269

12661270
/* note: this always recompute the csum on the whole skb, even
@@ -1284,7 +1288,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
12841288
bool zero_window_probe = false;
12851289
struct mptcp_ext *mpext = NULL;
12861290
struct sk_buff *skb, *tail;
1287-
bool can_collapse = false;
1291+
bool must_collapse = false;
12881292
int size_bias = 0;
12891293
int avail_size;
12901294
size_t ret = 0;
@@ -1304,16 +1308,24 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
13041308
* SSN association set here
13051309
*/
13061310
mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
1307-
can_collapse = (info->size_goal - skb->len > 0) &&
1308-
mptcp_skb_can_collapse_to(data_seq, skb, mpext);
1309-
if (!can_collapse) {
1311+
if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
13101312
TCP_SKB_CB(skb)->eor = 1;
1311-
} else {
1313+
goto alloc_skb;
1314+
}
1315+
1316+
must_collapse = (info->size_goal - skb->len > 0) &&
1317+
(skb_shinfo(skb)->nr_frags < sysctl_max_skb_frags);
1318+
if (must_collapse) {
13121319
size_bias = skb->len;
13131320
avail_size = info->size_goal - skb->len;
13141321
}
13151322
}
13161323

1324+
alloc_skb:
1325+
if (!must_collapse && !ssk->sk_tx_skb_cache &&
1326+
!mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held))
1327+
return 0;
1328+
13171329
/* Zero window and all data acked? Probe. */
13181330
avail_size = mptcp_check_allowed_size(msk, data_seq, avail_size);
13191331
if (avail_size == 0) {
@@ -1343,7 +1355,6 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
13431355
if (skb == tail) {
13441356
TCP_SKB_CB(tail)->tcp_flags &= ~TCPHDR_PSH;
13451357
mpext->data_len += ret;
1346-
WARN_ON_ONCE(!can_collapse);
13471358
WARN_ON_ONCE(zero_window_probe);
13481359
goto out;
13491360
}
@@ -1530,15 +1541,6 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
15301541
if (ssk != prev_ssk)
15311542
lock_sock(ssk);
15321543

1533-
/* keep it simple and always provide a new skb for the
1534-
* subflow, even if we will not use it when collapsing
1535-
* on the pending one
1536-
*/
1537-
if (!mptcp_alloc_tx_skb(sk, ssk)) {
1538-
mptcp_push_release(sk, ssk, &info);
1539-
goto out;
1540-
}
1541-
15421544
ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
15431545
if (ret <= 0) {
15441546
mptcp_push_release(sk, ssk, &info);
@@ -1571,7 +1573,9 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
15711573
static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
15721574
{
15731575
struct mptcp_sock *msk = mptcp_sk(sk);
1574-
struct mptcp_sendmsg_info info;
1576+
struct mptcp_sendmsg_info info = {
1577+
.data_lock_held = true,
1578+
};
15751579
struct mptcp_data_frag *dfrag;
15761580
struct sock *xmit_ssk;
15771581
int len, copied = 0;
@@ -1597,13 +1601,6 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
15971601
goto out;
15981602
}
15991603

1600-
if (unlikely(mptcp_must_reclaim_memory(sk, ssk))) {
1601-
__mptcp_update_wmem(sk);
1602-
sk_mem_reclaim_partial(sk);
1603-
}
1604-
if (!__mptcp_alloc_tx_skb(sk, ssk, GFP_ATOMIC))
1605-
goto out;
1606-
16071604
ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
16081605
if (ret <= 0)
16091606
goto out;
@@ -2409,9 +2406,6 @@ static void __mptcp_retrans(struct sock *sk)
24092406
info.sent = 0;
24102407
info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : dfrag->already_sent;
24112408
while (info.sent < info.limit) {
2412-
if (!mptcp_alloc_tx_skb(sk, ssk))
2413-
break;
2414-
24152409
ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
24162410
if (ret <= 0)
24172411
break;

0 commit comments

Comments
 (0)