@@ -67,6 +67,7 @@ struct nbd_sock {
67
67
struct recv_thread_args {
68
68
struct work_struct work ;
69
69
struct nbd_device * nbd ;
70
+ struct nbd_sock * nsock ;
70
71
int index ;
71
72
};
72
73
@@ -395,6 +396,22 @@ static u32 req_to_nbd_cmd_type(struct request *req)
395
396
}
396
397
}
397
398
399
+ static struct nbd_config * nbd_get_config_unlocked (struct nbd_device * nbd )
400
+ {
401
+ if (refcount_inc_not_zero (& nbd -> config_refs )) {
402
+ /*
403
+ * Add smp_mb__after_atomic to ensure that reading nbd->config_refs
404
+ * and reading nbd->config is ordered. The pair is the barrier in
405
+ * nbd_alloc_and_init_config(), avoid nbd->config_refs is set
406
+ * before nbd->config.
407
+ */
408
+ smp_mb__after_atomic ();
409
+ return nbd -> config ;
410
+ }
411
+
412
+ return NULL ;
413
+ }
414
+
398
415
static enum blk_eh_timer_return nbd_xmit_timeout (struct request * req )
399
416
{
400
417
struct nbd_cmd * cmd = blk_mq_rq_to_pdu (req );
@@ -409,13 +426,13 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
409
426
return BLK_EH_DONE ;
410
427
}
411
428
412
- if (!refcount_inc_not_zero (& nbd -> config_refs )) {
429
+ config = nbd_get_config_unlocked (nbd );
430
+ if (!config ) {
413
431
cmd -> status = BLK_STS_TIMEOUT ;
414
432
__clear_bit (NBD_CMD_INFLIGHT , & cmd -> flags );
415
433
mutex_unlock (& cmd -> lock );
416
434
goto done ;
417
435
}
418
- config = nbd -> config ;
419
436
420
437
if (config -> num_connections > 1 ||
421
438
(config -> num_connections == 1 && nbd -> tag_set .timeout )) {
@@ -489,15 +506,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
489
506
return BLK_EH_DONE ;
490
507
}
491
508
492
- /*
493
- * Send or receive packet. Return a positive value on success and
494
- * negtive value on failue, and never return 0.
495
- */
496
- static int sock_xmit (struct nbd_device * nbd , int index , int send ,
497
- struct iov_iter * iter , int msg_flags , int * sent )
509
+ static int __sock_xmit (struct nbd_device * nbd , struct socket * sock , int send ,
510
+ struct iov_iter * iter , int msg_flags , int * sent )
498
511
{
499
- struct nbd_config * config = nbd -> config ;
500
- struct socket * sock = config -> socks [index ]-> sock ;
501
512
int result ;
502
513
struct msghdr msg ;
503
514
unsigned int noreclaim_flag ;
@@ -540,6 +551,19 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
540
551
return result ;
541
552
}
542
553
554
+ /*
555
+ * Send or receive packet. Return a positive value on success and
556
+ * negtive value on failure, and never return 0.
557
+ */
558
+ static int sock_xmit (struct nbd_device * nbd , int index , int send ,
559
+ struct iov_iter * iter , int msg_flags , int * sent )
560
+ {
561
+ struct nbd_config * config = nbd -> config ;
562
+ struct socket * sock = config -> socks [index ]-> sock ;
563
+
564
+ return __sock_xmit (nbd , sock , send , iter , msg_flags , sent );
565
+ }
566
+
543
567
/*
544
568
* Different settings for sk->sk_sndtimeo can result in different return values
545
569
* if there is a signal pending when we enter sendmsg, because reasons?
@@ -696,7 +720,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
696
720
return 0 ;
697
721
}
698
722
699
- static int nbd_read_reply (struct nbd_device * nbd , int index ,
723
+ static int nbd_read_reply (struct nbd_device * nbd , struct socket * sock ,
700
724
struct nbd_reply * reply )
701
725
{
702
726
struct kvec iov = {.iov_base = reply , .iov_len = sizeof (* reply )};
@@ -705,7 +729,7 @@ static int nbd_read_reply(struct nbd_device *nbd, int index,
705
729
706
730
reply -> magic = 0 ;
707
731
iov_iter_kvec (& to , ITER_DEST , & iov , 1 , sizeof (* reply ));
708
- result = sock_xmit (nbd , index , 0 , & to , MSG_WAITALL , NULL );
732
+ result = __sock_xmit (nbd , sock , 0 , & to , MSG_WAITALL , NULL );
709
733
if (result < 0 ) {
710
734
if (!nbd_disconnected (nbd -> config ))
711
735
dev_err (disk_to_dev (nbd -> disk ),
@@ -829,14 +853,14 @@ static void recv_work(struct work_struct *work)
829
853
struct nbd_device * nbd = args -> nbd ;
830
854
struct nbd_config * config = nbd -> config ;
831
855
struct request_queue * q = nbd -> disk -> queue ;
832
- struct nbd_sock * nsock ;
856
+ struct nbd_sock * nsock = args -> nsock ;
833
857
struct nbd_cmd * cmd ;
834
858
struct request * rq ;
835
859
836
860
while (1 ) {
837
861
struct nbd_reply reply ;
838
862
839
- if (nbd_read_reply (nbd , args -> index , & reply ))
863
+ if (nbd_read_reply (nbd , nsock -> sock , & reply ))
840
864
break ;
841
865
842
866
/*
@@ -871,7 +895,6 @@ static void recv_work(struct work_struct *work)
871
895
percpu_ref_put (& q -> q_usage_counter );
872
896
}
873
897
874
- nsock = config -> socks [args -> index ];
875
898
mutex_lock (& nsock -> tx_lock );
876
899
nbd_mark_nsock_dead (nbd , nsock , 1 );
877
900
mutex_unlock (& nsock -> tx_lock );
@@ -977,12 +1000,12 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
977
1000
struct nbd_sock * nsock ;
978
1001
int ret ;
979
1002
980
- if (!refcount_inc_not_zero (& nbd -> config_refs )) {
1003
+ config = nbd_get_config_unlocked (nbd );
1004
+ if (!config ) {
981
1005
dev_err_ratelimited (disk_to_dev (nbd -> disk ),
982
1006
"Socks array is empty\n" );
983
1007
return - EINVAL ;
984
1008
}
985
- config = nbd -> config ;
986
1009
987
1010
if (index >= config -> num_connections ) {
988
1011
dev_err_ratelimited (disk_to_dev (nbd -> disk ),
@@ -1215,6 +1238,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1215
1238
INIT_WORK (& args -> work , recv_work );
1216
1239
args -> index = i ;
1217
1240
args -> nbd = nbd ;
1241
+ args -> nsock = nsock ;
1218
1242
nsock -> cookie ++ ;
1219
1243
mutex_unlock (& nsock -> tx_lock );
1220
1244
sockfd_put (old );
@@ -1397,6 +1421,7 @@ static int nbd_start_device(struct nbd_device *nbd)
1397
1421
refcount_inc (& nbd -> config_refs );
1398
1422
INIT_WORK (& args -> work , recv_work );
1399
1423
args -> nbd = nbd ;
1424
+ args -> nsock = config -> socks [i ];
1400
1425
args -> index = i ;
1401
1426
queue_work (nbd -> recv_workq , & args -> work );
1402
1427
}
@@ -1530,30 +1555,45 @@ static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
1530
1555
return error ;
1531
1556
}
1532
1557
1533
- static struct nbd_config * nbd_alloc_config ( void )
1558
+ static int nbd_alloc_and_init_config ( struct nbd_device * nbd )
1534
1559
{
1535
1560
struct nbd_config * config ;
1536
1561
1562
+ if (WARN_ON (nbd -> config ))
1563
+ return - EINVAL ;
1564
+
1537
1565
if (!try_module_get (THIS_MODULE ))
1538
- return ERR_PTR ( - ENODEV ) ;
1566
+ return - ENODEV ;
1539
1567
1540
1568
config = kzalloc (sizeof (struct nbd_config ), GFP_NOFS );
1541
1569
if (!config ) {
1542
1570
module_put (THIS_MODULE );
1543
- return ERR_PTR ( - ENOMEM ) ;
1571
+ return - ENOMEM ;
1544
1572
}
1545
1573
1546
1574
atomic_set (& config -> recv_threads , 0 );
1547
1575
init_waitqueue_head (& config -> recv_wq );
1548
1576
init_waitqueue_head (& config -> conn_wait );
1549
1577
config -> blksize_bits = NBD_DEF_BLKSIZE_BITS ;
1550
1578
atomic_set (& config -> live_connections , 0 );
1551
- return config ;
1579
+
1580
+ nbd -> config = config ;
1581
+ /*
1582
+ * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
1583
+ * its pair is the barrier in nbd_get_config_unlocked().
1584
+ * So nbd_get_config_unlocked() won't see nbd->config as null after
1585
+ * refcount_inc_not_zero() succeed.
1586
+ */
1587
+ smp_mb__before_atomic ();
1588
+ refcount_set (& nbd -> config_refs , 1 );
1589
+
1590
+ return 0 ;
1552
1591
}
1553
1592
1554
1593
static int nbd_open (struct gendisk * disk , blk_mode_t mode )
1555
1594
{
1556
1595
struct nbd_device * nbd ;
1596
+ struct nbd_config * config ;
1557
1597
int ret = 0 ;
1558
1598
1559
1599
mutex_lock (& nbd_index_mutex );
@@ -1566,27 +1606,25 @@ static int nbd_open(struct gendisk *disk, blk_mode_t mode)
1566
1606
ret = - ENXIO ;
1567
1607
goto out ;
1568
1608
}
1569
- if (!refcount_inc_not_zero (& nbd -> config_refs )) {
1570
- struct nbd_config * config ;
1571
1609
1610
+ config = nbd_get_config_unlocked (nbd );
1611
+ if (!config ) {
1572
1612
mutex_lock (& nbd -> config_lock );
1573
1613
if (refcount_inc_not_zero (& nbd -> config_refs )) {
1574
1614
mutex_unlock (& nbd -> config_lock );
1575
1615
goto out ;
1576
1616
}
1577
- config = nbd_alloc_config ();
1578
- if (IS_ERR (config )) {
1579
- ret = PTR_ERR (config );
1617
+ ret = nbd_alloc_and_init_config (nbd );
1618
+ if (ret ) {
1580
1619
mutex_unlock (& nbd -> config_lock );
1581
1620
goto out ;
1582
1621
}
1583
- nbd -> config = config ;
1584
- refcount_set (& nbd -> config_refs , 1 );
1622
+
1585
1623
refcount_inc (& nbd -> refs );
1586
1624
mutex_unlock (& nbd -> config_lock );
1587
1625
if (max_part )
1588
1626
set_bit (GD_NEED_PART_SCAN , & disk -> state );
1589
- } else if (nbd_disconnected (nbd -> config )) {
1627
+ } else if (nbd_disconnected (config )) {
1590
1628
if (max_part )
1591
1629
set_bit (GD_NEED_PART_SCAN , & disk -> state );
1592
1630
}
@@ -1990,22 +2028,17 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1990
2028
pr_err ("nbd%d already in use\n" , index );
1991
2029
return - EBUSY ;
1992
2030
}
1993
- if (WARN_ON (nbd -> config )) {
1994
- mutex_unlock (& nbd -> config_lock );
1995
- nbd_put (nbd );
1996
- return - EINVAL ;
1997
- }
1998
- config = nbd_alloc_config ();
1999
- if (IS_ERR (config )) {
2031
+
2032
+ ret = nbd_alloc_and_init_config (nbd );
2033
+ if (ret ) {
2000
2034
mutex_unlock (& nbd -> config_lock );
2001
2035
nbd_put (nbd );
2002
2036
pr_err ("couldn't allocate config\n" );
2003
- return PTR_ERR ( config ) ;
2037
+ return ret ;
2004
2038
}
2005
- nbd -> config = config ;
2006
- refcount_set (& nbd -> config_refs , 1 );
2007
- set_bit (NBD_RT_BOUND , & config -> runtime_flags );
2008
2039
2040
+ config = nbd -> config ;
2041
+ set_bit (NBD_RT_BOUND , & config -> runtime_flags );
2009
2042
ret = nbd_genl_size_set (info , nbd );
2010
2043
if (ret )
2011
2044
goto out ;
@@ -2208,15 +2241,15 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2208
2241
}
2209
2242
mutex_unlock (& nbd_index_mutex );
2210
2243
2211
- if (!refcount_inc_not_zero (& nbd -> config_refs )) {
2244
+ config = nbd_get_config_unlocked (nbd );
2245
+ if (!config ) {
2212
2246
dev_err (nbd_to_dev (nbd ),
2213
2247
"not configured, cannot reconfigure\n" );
2214
2248
nbd_put (nbd );
2215
2249
return - EINVAL ;
2216
2250
}
2217
2251
2218
2252
mutex_lock (& nbd -> config_lock );
2219
- config = nbd -> config ;
2220
2253
if (!test_bit (NBD_RT_BOUND , & config -> runtime_flags ) ||
2221
2254
!nbd -> pid ) {
2222
2255
dev_err (nbd_to_dev (nbd ),
0 commit comments