@@ -1625,12 +1625,12 @@ static bool udp_skb_has_head_state(struct sk_buff *skb)
1625
1625
}
1626
1626
1627
1627
/* fully reclaim rmem/fwd memory allocated for skb */
1628
- static void udp_rmem_release (struct sock * sk , int size , int partial ,
1629
- bool rx_queue_lock_held )
1628
+ static void udp_rmem_release (struct sock * sk , unsigned int size ,
1629
+ int partial , bool rx_queue_lock_held )
1630
1630
{
1631
1631
struct udp_sock * up = udp_sk (sk );
1632
1632
struct sk_buff_head * sk_queue ;
1633
- int amt ;
1633
+ unsigned int amt ;
1634
1634
1635
1635
if (likely (partial )) {
1636
1636
up -> forward_deficit += size ;
@@ -1650,10 +1650,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
1650
1650
if (!rx_queue_lock_held )
1651
1651
spin_lock (& sk_queue -> lock );
1652
1652
1653
-
1654
- sk_forward_alloc_add (sk , size );
1655
- amt = (sk -> sk_forward_alloc - partial ) & ~(PAGE_SIZE - 1 );
1656
- sk_forward_alloc_add (sk , - amt );
1653
+ amt = (size + sk -> sk_forward_alloc - partial ) & ~(PAGE_SIZE - 1 );
1654
+ sk_forward_alloc_add (sk , size - amt );
1657
1655
1658
1656
if (amt )
1659
1657
__sk_mem_reduce_allocated (sk , amt >> PAGE_SHIFT );
@@ -1725,17 +1723,25 @@ static int udp_rmem_schedule(struct sock *sk, int size)
1725
1723
int __udp_enqueue_schedule_skb (struct sock * sk , struct sk_buff * skb )
1726
1724
{
1727
1725
struct sk_buff_head * list = & sk -> sk_receive_queue ;
1728
- int rmem , err = - ENOMEM ;
1726
+ unsigned int rmem , rcvbuf ;
1729
1727
spinlock_t * busy = NULL ;
1730
- int size , rcvbuf ;
1728
+ int size , err = - ENOMEM ;
1731
1729
1732
- /* Immediately drop when the receive queue is full.
1733
- * Always allow at least one packet.
1734
- */
1735
1730
rmem = atomic_read (& sk -> sk_rmem_alloc );
1736
1731
rcvbuf = READ_ONCE (sk -> sk_rcvbuf );
1737
- if (rmem > rcvbuf )
1738
- goto drop ;
1732
+ size = skb -> truesize ;
1733
+
1734
+ /* Immediately drop when the receive queue is full.
1735
+ * Cast to unsigned int performs the boundary check for INT_MAX.
1736
+ */
1737
+ if (rmem + size > rcvbuf ) {
1738
+ if (rcvbuf > INT_MAX >> 1 )
1739
+ goto drop ;
1740
+
1741
+ /* Always allow at least one packet for small buffer. */
1742
+ if (rmem > rcvbuf )
1743
+ goto drop ;
1744
+ }
1739
1745
1740
1746
/* Under mem pressure, it might be helpful to help udp_recvmsg()
1741
1747
* having linear skbs :
@@ -1745,10 +1751,10 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
1745
1751
*/
1746
1752
if (rmem > (rcvbuf >> 1 )) {
1747
1753
skb_condense (skb );
1748
-
1754
+ size = skb -> truesize ;
1749
1755
busy = busylock_acquire (sk );
1750
1756
}
1751
- size = skb -> truesize ;
1757
+
1752
1758
udp_set_dev_scratch (skb );
1753
1759
1754
1760
atomic_add (size , & sk -> sk_rmem_alloc );
@@ -1835,7 +1841,7 @@ EXPORT_IPV6_MOD_GPL(skb_consume_udp);
1835
1841
1836
1842
static struct sk_buff * __first_packet_length (struct sock * sk ,
1837
1843
struct sk_buff_head * rcvq ,
1838
- int * total )
1844
+ unsigned int * total )
1839
1845
{
1840
1846
struct sk_buff * skb ;
1841
1847
@@ -1868,8 +1874,8 @@ static int first_packet_length(struct sock *sk)
1868
1874
{
1869
1875
struct sk_buff_head * rcvq = & udp_sk (sk )-> reader_queue ;
1870
1876
struct sk_buff_head * sk_queue = & sk -> sk_receive_queue ;
1877
+ unsigned int total = 0 ;
1871
1878
struct sk_buff * skb ;
1872
- int total = 0 ;
1873
1879
int res ;
1874
1880
1875
1881
spin_lock_bh (& rcvq -> lock );
0 commit comments