@@ -387,7 +387,6 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
387387 WARN_ON (skb -> sk != NULL );
388388 skb -> sk = sk ;
389389 skb -> destructor = netlink_skb_destructor ;
390- atomic_add (skb -> truesize , & sk -> sk_rmem_alloc );
391390 sk_mem_charge (sk , skb -> truesize );
392391}
393392
@@ -1212,41 +1211,48 @@ struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast)
12121211int netlink_attachskb (struct sock * sk , struct sk_buff * skb ,
12131212 long * timeo , struct sock * ssk )
12141213{
1214+ DECLARE_WAITQUEUE (wait , current );
12151215 struct netlink_sock * nlk ;
1216+ unsigned int rmem ;
12161217
12171218 nlk = nlk_sk (sk );
1219+ rmem = atomic_add_return (skb -> truesize , & sk -> sk_rmem_alloc );
12181220
1219- if ((atomic_read (& sk -> sk_rmem_alloc ) > sk -> sk_rcvbuf ||
1220- test_bit (NETLINK_S_CONGESTED , & nlk -> state ))) {
1221- DECLARE_WAITQUEUE (wait , current );
1222- if (!* timeo ) {
1223- if (!ssk || netlink_is_kernel (ssk ))
1224- netlink_overrun (sk );
1225- sock_put (sk );
1226- kfree_skb (skb );
1227- return - EAGAIN ;
1228- }
1229-
1230- __set_current_state (TASK_INTERRUPTIBLE );
1231- add_wait_queue (& nlk -> wait , & wait );
1221+ if ((rmem == skb -> truesize || rmem < READ_ONCE (sk -> sk_rcvbuf )) &&
1222+ !test_bit (NETLINK_S_CONGESTED , & nlk -> state )) {
1223+ netlink_skb_set_owner_r (skb , sk );
1224+ return 0 ;
1225+ }
12321226
1233- if ((atomic_read (& sk -> sk_rmem_alloc ) > sk -> sk_rcvbuf ||
1234- test_bit (NETLINK_S_CONGESTED , & nlk -> state )) &&
1235- !sock_flag (sk , SOCK_DEAD ))
1236- * timeo = schedule_timeout (* timeo );
1227+ atomic_sub (skb -> truesize , & sk -> sk_rmem_alloc );
12371228
1238- __set_current_state (TASK_RUNNING );
1239- remove_wait_queue (& nlk -> wait , & wait );
1229+ if (!* timeo ) {
1230+ if (!ssk || netlink_is_kernel (ssk ))
1231+ netlink_overrun (sk );
12401232 sock_put (sk );
1233+ kfree_skb (skb );
1234+ return - EAGAIN ;
1235+ }
12411236
1242- if (signal_pending (current )) {
1243- kfree_skb (skb );
1244- return sock_intr_errno (* timeo );
1245- }
1246- return 1 ;
1237+ __set_current_state (TASK_INTERRUPTIBLE );
1238+ add_wait_queue (& nlk -> wait , & wait );
1239+ rmem = atomic_read (& sk -> sk_rmem_alloc );
1240+
1241+ if (((rmem && rmem + skb -> truesize > READ_ONCE (sk -> sk_rcvbuf )) ||
1242+ test_bit (NETLINK_S_CONGESTED , & nlk -> state )) &&
1243+ !sock_flag (sk , SOCK_DEAD ))
1244+ * timeo = schedule_timeout (* timeo );
1245+
1246+ __set_current_state (TASK_RUNNING );
1247+ remove_wait_queue (& nlk -> wait , & wait );
1248+ sock_put (sk );
1249+
1250+ if (signal_pending (current )) {
1251+ kfree_skb (skb );
1252+ return sock_intr_errno (* timeo );
12471253 }
1248- netlink_skb_set_owner_r ( skb , sk );
1249- return 0 ;
1254+
1255+ return 1 ;
12501256}
12511257
12521258static int __netlink_sendskb (struct sock * sk , struct sk_buff * skb )
@@ -1307,6 +1313,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
13071313 ret = - ECONNREFUSED ;
13081314 if (nlk -> netlink_rcv != NULL ) {
13091315 ret = skb -> len ;
1316+ atomic_add (skb -> truesize , & sk -> sk_rmem_alloc );
13101317 netlink_skb_set_owner_r (skb , sk );
13111318 NETLINK_CB (skb ).sk = ssk ;
13121319 netlink_deliver_tap_kernel (sk , ssk , skb );
@@ -1383,13 +1390,19 @@ EXPORT_SYMBOL_GPL(netlink_strict_get_check);
13831390static int netlink_broadcast_deliver (struct sock * sk , struct sk_buff * skb )
13841391{
13851392 struct netlink_sock * nlk = nlk_sk (sk );
1393+ unsigned int rmem , rcvbuf ;
13861394
1387- if (atomic_read (& sk -> sk_rmem_alloc ) <= sk -> sk_rcvbuf &&
1395+ rmem = atomic_add_return (skb -> truesize , & sk -> sk_rmem_alloc );
1396+ rcvbuf = READ_ONCE (sk -> sk_rcvbuf );
1397+
1398+ if ((rmem != skb -> truesize || rmem <= rcvbuf ) &&
13881399 !test_bit (NETLINK_S_CONGESTED , & nlk -> state )) {
13891400 netlink_skb_set_owner_r (skb , sk );
13901401 __netlink_sendskb (sk , skb );
1391- return atomic_read ( & sk -> sk_rmem_alloc ) > (sk -> sk_rcvbuf >> 1 );
1402+ return rmem > (rcvbuf >> 1 );
13921403 }
1404+
1405+ atomic_sub (skb -> truesize , & sk -> sk_rmem_alloc );
13931406 return -1 ;
13941407}
13951408
@@ -2249,6 +2262,7 @@ static int netlink_dump(struct sock *sk, bool lock_taken)
22492262 struct module * module ;
22502263 int err = - ENOBUFS ;
22512264 int alloc_min_size ;
2265+ unsigned int rmem ;
22522266 int alloc_size ;
22532267
22542268 if (!lock_taken )
@@ -2258,9 +2272,6 @@ static int netlink_dump(struct sock *sk, bool lock_taken)
22582272 goto errout_skb ;
22592273 }
22602274
2261- if (atomic_read (& sk -> sk_rmem_alloc ) >= sk -> sk_rcvbuf )
2262- goto errout_skb ;
2263-
22642275 /* NLMSG_GOODSIZE is small to avoid high order allocations being
22652276 * required, but it makes sense to _attempt_ a 32KiB allocation
22662277 * to reduce number of system calls on dump operations, if user
@@ -2283,6 +2294,12 @@ static int netlink_dump(struct sock *sk, bool lock_taken)
22832294 if (!skb )
22842295 goto errout_skb ;
22852296
2297+ rmem = atomic_add_return (skb -> truesize , & sk -> sk_rmem_alloc );
2298+ if (rmem >= READ_ONCE (sk -> sk_rcvbuf )) {
2299+ atomic_sub (skb -> truesize , & sk -> sk_rmem_alloc );
2300+ goto errout_skb ;
2301+ }
2302+
22862303 /* Trim skb to allocated size. User is expected to provide buffer as
22872304 * large as max(min_dump_alloc, 32KiB (max_recvmsg_len capped at
22882305 * netlink_recvmsg())). dump will pack as many smaller messages as
0 commit comments