Skip to content

Commit 20ab843

Browse files
committed
Merge branch 'sk-sk_forward_alloc-fixes'
Kuniyuki Iwashima says: ==================== sk->sk_forward_alloc fixes. The first patch fixes a negative sk_forward_alloc by adding sk_rmem_schedule() before skb_set_owner_r(), and second patch removes an unnecessary WARN_ON_ONCE(). v2: https://lore.kernel.org/netdev/[email protected]/ v1: https://lore.kernel.org/netdev/[email protected]/ ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents 7fa0b52 + 62ec33b commit 20ab843

File tree

5 files changed

+19
-13
lines changed

5 files changed

+19
-13
lines changed

include/net/sock.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2434,6 +2434,19 @@ static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struc
24342434
return false;
24352435
}
24362436

2437+
static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk)
2438+
{
2439+
skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
2440+
if (skb) {
2441+
if (sk_rmem_schedule(sk, skb, skb->truesize)) {
2442+
skb_set_owner_r(skb, sk);
2443+
return skb;
2444+
}
2445+
__kfree_skb(skb);
2446+
}
2447+
return NULL;
2448+
}
2449+
24372450
static inline void skb_prepare_for_gro(struct sk_buff *skb)
24382451
{
24392452
if (skb->destructor != sock_wfree) {

net/caif/caif_socket.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1015,6 +1015,7 @@ static void caif_sock_destructor(struct sock *sk)
10151015
return;
10161016
}
10171017
sk_stream_kill_queues(&cf_sk->sk);
1018+
WARN_ON_ONCE(sk->sk_forward_alloc);
10181019
caif_free_client(&cf_sk->layer);
10191020
}
10201021

net/core/stream.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,6 @@ void sk_stream_kill_queues(struct sock *sk)
209209
sk_mem_reclaim_final(sk);
210210

211211
WARN_ON_ONCE(sk->sk_wmem_queued);
212-
WARN_ON_ONCE(sk->sk_forward_alloc);
213212

214213
/* It is _impossible_ for the backlog to contain anything
215214
* when we get here. All user references to this socket

net/dccp/ipv6.c

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -551,11 +551,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
551551
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
552552
/* Clone pktoptions received with SYN, if we own the req */
553553
if (*own_req && ireq->pktopts) {
554-
newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
554+
newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
555555
consume_skb(ireq->pktopts);
556556
ireq->pktopts = NULL;
557-
if (newnp->pktoptions)
558-
skb_set_owner_r(newnp->pktoptions, newsk);
559557
}
560558

561559
return newsk;
@@ -615,7 +613,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
615613
--ANK (980728)
616614
*/
617615
if (np->rxopt.all)
618-
opt_skb = skb_clone(skb, GFP_ATOMIC);
616+
opt_skb = skb_clone_and_charge_r(skb, sk);
619617

620618
if (sk->sk_state == DCCP_OPEN) { /* Fast path */
621619
if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
@@ -679,7 +677,6 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
679677
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
680678
if (ipv6_opt_accepted(sk, opt_skb,
681679
&DCCP_SKB_CB(opt_skb)->header.h6)) {
682-
skb_set_owner_r(opt_skb, sk);
683680
memmove(IP6CB(opt_skb),
684681
&DCCP_SKB_CB(opt_skb)->header.h6,
685682
sizeof(struct inet6_skb_parm));

net/ipv6/tcp_ipv6.c

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1388,14 +1388,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
13881388

13891389
/* Clone pktoptions received with SYN, if we own the req */
13901390
if (ireq->pktopts) {
1391-
newnp->pktoptions = skb_clone(ireq->pktopts,
1392-
sk_gfp_mask(sk, GFP_ATOMIC));
1391+
newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
13931392
consume_skb(ireq->pktopts);
13941393
ireq->pktopts = NULL;
1395-
if (newnp->pktoptions) {
1394+
if (newnp->pktoptions)
13961395
tcp_v6_restore_cb(newnp->pktoptions);
1397-
skb_set_owner_r(newnp->pktoptions, newsk);
1398-
}
13991396
}
14001397
} else {
14011398
if (!req_unhash && found_dup_sk) {
@@ -1467,7 +1464,7 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
14671464
--ANK (980728)
14681465
*/
14691466
if (np->rxopt.all)
1470-
opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1467+
opt_skb = skb_clone_and_charge_r(skb, sk);
14711468

14721469
reason = SKB_DROP_REASON_NOT_SPECIFIED;
14731470
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
@@ -1553,7 +1550,6 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
15531550
if (np->repflow)
15541551
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
15551552
if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1556-
skb_set_owner_r(opt_skb, sk);
15571553
tcp_v6_restore_cb(opt_skb);
15581554
opt_skb = xchg(&np->pktoptions, opt_skb);
15591555
} else {

0 commit comments

Comments
 (0)