Skip to content

Commit 0ff0faf

Browse files
committed
Merge branch 'udp-fix-two-integer-overflows-when-sk-sk_rcvbuf-is-close-to-int_max'
Kuniyuki Iwashima says: ==================== udp: Fix two integer overflows when sk->sk_rcvbuf is close to INT_MAX. I got a report that UDP mem usage in /proc/net/sockstat did not drop even after an application was terminated. The issue could happen if sk->sk_rmem_alloc wraps around due to a large sk->sk_rcvbuf, which was INT_MAX in our case. The patch 2 fixes the issue, and the patch 1 fixes yet another overflow I found while investigating the issue. v3: https://lore.kernel.org/[email protected] v2: https://lore.kernel.org/[email protected] v1: https://lore.kernel.org/[email protected] ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents 1b7fdc7 + df207de commit 0ff0faf

File tree

1 file changed

+24
-18
lines changed

1 file changed

+24
-18
lines changed

net/ipv4/udp.c

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1625,12 +1625,12 @@ static bool udp_skb_has_head_state(struct sk_buff *skb)
16251625
}
16261626

16271627
/* fully reclaim rmem/fwd memory allocated for skb */
1628-
static void udp_rmem_release(struct sock *sk, int size, int partial,
1629-
bool rx_queue_lock_held)
1628+
static void udp_rmem_release(struct sock *sk, unsigned int size,
1629+
int partial, bool rx_queue_lock_held)
16301630
{
16311631
struct udp_sock *up = udp_sk(sk);
16321632
struct sk_buff_head *sk_queue;
1633-
int amt;
1633+
unsigned int amt;
16341634

16351635
if (likely(partial)) {
16361636
up->forward_deficit += size;
@@ -1650,10 +1650,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
16501650
if (!rx_queue_lock_held)
16511651
spin_lock(&sk_queue->lock);
16521652

1653-
1654-
sk_forward_alloc_add(sk, size);
1655-
amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
1656-
sk_forward_alloc_add(sk, -amt);
1653+
amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
1654+
sk_forward_alloc_add(sk, size - amt);
16571655

16581656
if (amt)
16591657
__sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
@@ -1725,17 +1723,25 @@ static int udp_rmem_schedule(struct sock *sk, int size)
17251723
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
17261724
{
17271725
struct sk_buff_head *list = &sk->sk_receive_queue;
1728-
int rmem, err = -ENOMEM;
1726+
unsigned int rmem, rcvbuf;
17291727
spinlock_t *busy = NULL;
1730-
int size, rcvbuf;
1728+
int size, err = -ENOMEM;
17311729

1732-
/* Immediately drop when the receive queue is full.
1733-
* Always allow at least one packet.
1734-
*/
17351730
rmem = atomic_read(&sk->sk_rmem_alloc);
17361731
rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1737-
if (rmem > rcvbuf)
1738-
goto drop;
1732+
size = skb->truesize;
1733+
1734+
/* Immediately drop when the receive queue is full.
1735+
* Cast to unsigned int performs the boundary check for INT_MAX.
1736+
*/
1737+
if (rmem + size > rcvbuf) {
1738+
if (rcvbuf > INT_MAX >> 1)
1739+
goto drop;
1740+
1741+
/* Always allow at least one packet for small buffer. */
1742+
if (rmem > rcvbuf)
1743+
goto drop;
1744+
}
17391745

17401746
/* Under mem pressure, it might be helpful to help udp_recvmsg()
17411747
* having linear skbs :
@@ -1745,10 +1751,10 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
17451751
*/
17461752
if (rmem > (rcvbuf >> 1)) {
17471753
skb_condense(skb);
1748-
1754+
size = skb->truesize;
17491755
busy = busylock_acquire(sk);
17501756
}
1751-
size = skb->truesize;
1757+
17521758
udp_set_dev_scratch(skb);
17531759

17541760
atomic_add(size, &sk->sk_rmem_alloc);
@@ -1835,7 +1841,7 @@ EXPORT_IPV6_MOD_GPL(skb_consume_udp);
18351841

18361842
static struct sk_buff *__first_packet_length(struct sock *sk,
18371843
struct sk_buff_head *rcvq,
1838-
int *total)
1844+
unsigned int *total)
18391845
{
18401846
struct sk_buff *skb;
18411847

@@ -1868,8 +1874,8 @@ static int first_packet_length(struct sock *sk)
18681874
{
18691875
struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
18701876
struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
1877+
unsigned int total = 0;
18711878
struct sk_buff *skb;
1872-
int total = 0;
18731879
int res;
18741880

18751881
spin_lock_bh(&rcvq->lock);

0 commit comments

Comments
 (0)