Skip to content

Commit bc893f7

Browse files
committed
Merge tag 'block-6.7-2023-11-23' of git://git.kernel.dk/linux
Pull block fixes from Jens Axboe: "A bit bigger than usual at this time, but nothing really earth shattering: - NVMe pull request via Keith: - TCP TLS fixes (Hannes) - Authentifaction fixes (Mark, Hannes) - Properly terminate target names (Christoph) - MD pull request via Song, fixing a raid5 corruption issue - Disentanglement of the dependency mess in nvme introduced with the tls additions. Now it should actually build on all configs (Arnd) - Series of bcache fixes (Coly) - Removal of a dead helper (Damien) - s390 dasd fix (Muhammad, Jan) - lockdep blk-cgroup fixes (Ming)" * tag 'block-6.7-2023-11-23' of git://git.kernel.dk/linux: (33 commits) nvme: tcp: fix compile-time checks for TLS mode nvme: target: fix Kconfig select statements nvme: target: fix nvme_keyring_id() references nvme: move nvme_stop_keep_alive() back to original position nbd: pass nbd_sock to nbd_read_reply() instead of index s390/dasd: protect device queue against concurrent access s390/dasd: resolve spelling mistake block/null_blk: Fix double blk_mq_start_request() warning nvmet-tcp: always initialize tls_handshake_tmo_work nvmet: nul-terminate the NQNs passed in the connect command nvme: blank out authentication fabrics options if not configured nvme: catch errors from nvme_configure_metadata() nvme-tcp: only evaluate 'tls' option if TLS is selected nvme-auth: set explanation code for failure2 msgs nvme-auth: unlock mutex in one place only block: Remove blk_set_runtime_active() nbd: fix null-ptr-dereference while accessing 'nbd->config' nbd: factor out a helper to get nbd_config without holding 'config_lock' nbd: fold nbd config initialization into nbd_alloc_config() bcache: avoid NULL checking to c->root in run_cache_set() ...
2 parents 0044423 + 0e6c4fe commit bc893f7

File tree

25 files changed

+210
-148
lines changed

25 files changed

+210
-148
lines changed

block/blk-cgroup.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -577,6 +577,7 @@ static void blkg_destroy_all(struct gendisk *disk)
577577
struct request_queue *q = disk->queue;
578578
struct blkcg_gq *blkg, *n;
579579
int count = BLKG_DESTROY_BATCH_SIZE;
580+
int i;
580581

581582
restart:
582583
spin_lock_irq(&q->queue_lock);
@@ -602,6 +603,18 @@ static void blkg_destroy_all(struct gendisk *disk)
602603
}
603604
}
604605

606+
/*
607+
* Mark policy deactivated since policy offline has been done, and
608+
* the free is scheduled, so future blkcg_deactivate_policy() can
609+
* be bypassed
610+
*/
611+
for (i = 0; i < BLKCG_MAX_POLS; i++) {
612+
struct blkcg_policy *pol = blkcg_policy[i];
613+
614+
if (pol)
615+
__clear_bit(pol->plid, q->blkcg_pols);
616+
}
617+
605618
q->root_blkg = NULL;
606619
spin_unlock_irq(&q->queue_lock);
607620
}

block/blk-cgroup.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -249,8 +249,6 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
249249
{
250250
struct blkcg_gq *blkg;
251251

252-
WARN_ON_ONCE(!rcu_read_lock_held());
253-
254252
if (blkcg == &blkcg_root)
255253
return q->root_blkg;
256254

block/blk-pm.c

Lines changed: 5 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -163,38 +163,15 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
163163
* @q: the queue of the device
164164
*
165165
* Description:
166-
* For historical reasons, this routine merely calls blk_set_runtime_active()
167-
* to do the real work of restarting the queue. It does this regardless of
168-
* whether the device's runtime-resume succeeded; even if it failed the
166+
* Restart the queue of a runtime suspended device. It does this regardless
167+
* of whether the device's runtime-resume succeeded; even if it failed the
169168
* driver or error handler will need to communicate with the device.
170169
*
171170
* This function should be called near the end of the device's
172-
* runtime_resume callback.
171+
* runtime_resume callback to correct queue runtime PM status and re-enable
172+
* peeking requests from the queue.
173173
*/
174174
void blk_post_runtime_resume(struct request_queue *q)
175-
{
176-
blk_set_runtime_active(q);
177-
}
178-
EXPORT_SYMBOL(blk_post_runtime_resume);
179-
180-
/**
181-
* blk_set_runtime_active - Force runtime status of the queue to be active
182-
* @q: the queue of the device
183-
*
184-
* If the device is left runtime suspended during system suspend the resume
185-
* hook typically resumes the device and corrects runtime status
186-
* accordingly. However, that does not affect the queue runtime PM status
187-
* which is still "suspended". This prevents processing requests from the
188-
* queue.
189-
*
190-
* This function can be used in driver's resume hook to correct queue
191-
* runtime PM status and re-enable peeking requests from the queue. It
192-
* should be called before first request is added to the queue.
193-
*
194-
* This function is also called by blk_post_runtime_resume() for
195-
* runtime resumes. It does everything necessary to restart the queue.
196-
*/
197-
void blk_set_runtime_active(struct request_queue *q)
198175
{
199176
int old_status;
200177

@@ -211,4 +188,4 @@ void blk_set_runtime_active(struct request_queue *q)
211188
if (old_status != RPM_ACTIVE)
212189
blk_clear_pm_only(q);
213190
}
214-
EXPORT_SYMBOL(blk_set_runtime_active);
191+
EXPORT_SYMBOL(blk_post_runtime_resume);

block/blk-throttle.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1320,6 +1320,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
13201320
tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
13211321
tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
13221322

1323+
rcu_read_lock();
13231324
/*
13241325
* Update has_rules[] flags for the updated tg's subtree. A tg is
13251326
* considered to have rules if either the tg itself or any of its
@@ -1347,6 +1348,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
13471348
this_tg->latency_target = max(this_tg->latency_target,
13481349
parent_tg->latency_target);
13491350
}
1351+
rcu_read_unlock();
13501352

13511353
/*
13521354
* We're already holding queue_lock and know @tg is valid. Let's

drivers/block/nbd.c

Lines changed: 75 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ struct nbd_sock {
6767
struct recv_thread_args {
6868
struct work_struct work;
6969
struct nbd_device *nbd;
70+
struct nbd_sock *nsock;
7071
int index;
7172
};
7273

@@ -395,6 +396,22 @@ static u32 req_to_nbd_cmd_type(struct request *req)
395396
}
396397
}
397398

399+
static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
400+
{
401+
if (refcount_inc_not_zero(&nbd->config_refs)) {
402+
/*
403+
* Add smp_mb__after_atomic to ensure that reading nbd->config_refs
404+
* and reading nbd->config is ordered. The pair is the barrier in
405+
* nbd_alloc_and_init_config(), avoid nbd->config_refs is set
406+
* before nbd->config.
407+
*/
408+
smp_mb__after_atomic();
409+
return nbd->config;
410+
}
411+
412+
return NULL;
413+
}
414+
398415
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
399416
{
400417
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -409,13 +426,13 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
409426
return BLK_EH_DONE;
410427
}
411428

412-
if (!refcount_inc_not_zero(&nbd->config_refs)) {
429+
config = nbd_get_config_unlocked(nbd);
430+
if (!config) {
413431
cmd->status = BLK_STS_TIMEOUT;
414432
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
415433
mutex_unlock(&cmd->lock);
416434
goto done;
417435
}
418-
config = nbd->config;
419436

420437
if (config->num_connections > 1 ||
421438
(config->num_connections == 1 && nbd->tag_set.timeout)) {
@@ -489,15 +506,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
489506
return BLK_EH_DONE;
490507
}
491508

492-
/*
493-
* Send or receive packet. Return a positive value on success and
494-
* negtive value on failue, and never return 0.
495-
*/
496-
static int sock_xmit(struct nbd_device *nbd, int index, int send,
497-
struct iov_iter *iter, int msg_flags, int *sent)
509+
static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
510+
struct iov_iter *iter, int msg_flags, int *sent)
498511
{
499-
struct nbd_config *config = nbd->config;
500-
struct socket *sock = config->socks[index]->sock;
501512
int result;
502513
struct msghdr msg;
503514
unsigned int noreclaim_flag;
@@ -540,6 +551,19 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
540551
return result;
541552
}
542553

554+
/*
555+
* Send or receive packet. Return a positive value on success and
556+
* negtive value on failure, and never return 0.
557+
*/
558+
static int sock_xmit(struct nbd_device *nbd, int index, int send,
559+
struct iov_iter *iter, int msg_flags, int *sent)
560+
{
561+
struct nbd_config *config = nbd->config;
562+
struct socket *sock = config->socks[index]->sock;
563+
564+
return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
565+
}
566+
543567
/*
544568
* Different settings for sk->sk_sndtimeo can result in different return values
545569
* if there is a signal pending when we enter sendmsg, because reasons?
@@ -696,7 +720,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
696720
return 0;
697721
}
698722

699-
static int nbd_read_reply(struct nbd_device *nbd, int index,
723+
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
700724
struct nbd_reply *reply)
701725
{
702726
struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
@@ -705,7 +729,7 @@ static int nbd_read_reply(struct nbd_device *nbd, int index,
705729

706730
reply->magic = 0;
707731
iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
708-
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
732+
result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
709733
if (result < 0) {
710734
if (!nbd_disconnected(nbd->config))
711735
dev_err(disk_to_dev(nbd->disk),
@@ -829,14 +853,14 @@ static void recv_work(struct work_struct *work)
829853
struct nbd_device *nbd = args->nbd;
830854
struct nbd_config *config = nbd->config;
831855
struct request_queue *q = nbd->disk->queue;
832-
struct nbd_sock *nsock;
856+
struct nbd_sock *nsock = args->nsock;
833857
struct nbd_cmd *cmd;
834858
struct request *rq;
835859

836860
while (1) {
837861
struct nbd_reply reply;
838862

839-
if (nbd_read_reply(nbd, args->index, &reply))
863+
if (nbd_read_reply(nbd, nsock->sock, &reply))
840864
break;
841865

842866
/*
@@ -871,7 +895,6 @@ static void recv_work(struct work_struct *work)
871895
percpu_ref_put(&q->q_usage_counter);
872896
}
873897

874-
nsock = config->socks[args->index];
875898
mutex_lock(&nsock->tx_lock);
876899
nbd_mark_nsock_dead(nbd, nsock, 1);
877900
mutex_unlock(&nsock->tx_lock);
@@ -977,12 +1000,12 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
9771000
struct nbd_sock *nsock;
9781001
int ret;
9791002

980-
if (!refcount_inc_not_zero(&nbd->config_refs)) {
1003+
config = nbd_get_config_unlocked(nbd);
1004+
if (!config) {
9811005
dev_err_ratelimited(disk_to_dev(nbd->disk),
9821006
"Socks array is empty\n");
9831007
return -EINVAL;
9841008
}
985-
config = nbd->config;
9861009

9871010
if (index >= config->num_connections) {
9881011
dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -1215,6 +1238,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
12151238
INIT_WORK(&args->work, recv_work);
12161239
args->index = i;
12171240
args->nbd = nbd;
1241+
args->nsock = nsock;
12181242
nsock->cookie++;
12191243
mutex_unlock(&nsock->tx_lock);
12201244
sockfd_put(old);
@@ -1397,6 +1421,7 @@ static int nbd_start_device(struct nbd_device *nbd)
13971421
refcount_inc(&nbd->config_refs);
13981422
INIT_WORK(&args->work, recv_work);
13991423
args->nbd = nbd;
1424+
args->nsock = config->socks[i];
14001425
args->index = i;
14011426
queue_work(nbd->recv_workq, &args->work);
14021427
}
@@ -1530,30 +1555,45 @@ static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
15301555
return error;
15311556
}
15321557

1533-
static struct nbd_config *nbd_alloc_config(void)
1558+
static int nbd_alloc_and_init_config(struct nbd_device *nbd)
15341559
{
15351560
struct nbd_config *config;
15361561

1562+
if (WARN_ON(nbd->config))
1563+
return -EINVAL;
1564+
15371565
if (!try_module_get(THIS_MODULE))
1538-
return ERR_PTR(-ENODEV);
1566+
return -ENODEV;
15391567

15401568
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
15411569
if (!config) {
15421570
module_put(THIS_MODULE);
1543-
return ERR_PTR(-ENOMEM);
1571+
return -ENOMEM;
15441572
}
15451573

15461574
atomic_set(&config->recv_threads, 0);
15471575
init_waitqueue_head(&config->recv_wq);
15481576
init_waitqueue_head(&config->conn_wait);
15491577
config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
15501578
atomic_set(&config->live_connections, 0);
1551-
return config;
1579+
1580+
nbd->config = config;
1581+
/*
1582+
* Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
1583+
* its pair is the barrier in nbd_get_config_unlocked().
1584+
* So nbd_get_config_unlocked() won't see nbd->config as null after
1585+
* refcount_inc_not_zero() succeed.
1586+
*/
1587+
smp_mb__before_atomic();
1588+
refcount_set(&nbd->config_refs, 1);
1589+
1590+
return 0;
15521591
}
15531592

15541593
static int nbd_open(struct gendisk *disk, blk_mode_t mode)
15551594
{
15561595
struct nbd_device *nbd;
1596+
struct nbd_config *config;
15571597
int ret = 0;
15581598

15591599
mutex_lock(&nbd_index_mutex);
@@ -1566,27 +1606,25 @@ static int nbd_open(struct gendisk *disk, blk_mode_t mode)
15661606
ret = -ENXIO;
15671607
goto out;
15681608
}
1569-
if (!refcount_inc_not_zero(&nbd->config_refs)) {
1570-
struct nbd_config *config;
15711609

1610+
config = nbd_get_config_unlocked(nbd);
1611+
if (!config) {
15721612
mutex_lock(&nbd->config_lock);
15731613
if (refcount_inc_not_zero(&nbd->config_refs)) {
15741614
mutex_unlock(&nbd->config_lock);
15751615
goto out;
15761616
}
1577-
config = nbd_alloc_config();
1578-
if (IS_ERR(config)) {
1579-
ret = PTR_ERR(config);
1617+
ret = nbd_alloc_and_init_config(nbd);
1618+
if (ret) {
15801619
mutex_unlock(&nbd->config_lock);
15811620
goto out;
15821621
}
1583-
nbd->config = config;
1584-
refcount_set(&nbd->config_refs, 1);
1622+
15851623
refcount_inc(&nbd->refs);
15861624
mutex_unlock(&nbd->config_lock);
15871625
if (max_part)
15881626
set_bit(GD_NEED_PART_SCAN, &disk->state);
1589-
} else if (nbd_disconnected(nbd->config)) {
1627+
} else if (nbd_disconnected(config)) {
15901628
if (max_part)
15911629
set_bit(GD_NEED_PART_SCAN, &disk->state);
15921630
}
@@ -1990,22 +2028,17 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
19902028
pr_err("nbd%d already in use\n", index);
19912029
return -EBUSY;
19922030
}
1993-
if (WARN_ON(nbd->config)) {
1994-
mutex_unlock(&nbd->config_lock);
1995-
nbd_put(nbd);
1996-
return -EINVAL;
1997-
}
1998-
config = nbd_alloc_config();
1999-
if (IS_ERR(config)) {
2031+
2032+
ret = nbd_alloc_and_init_config(nbd);
2033+
if (ret) {
20002034
mutex_unlock(&nbd->config_lock);
20012035
nbd_put(nbd);
20022036
pr_err("couldn't allocate config\n");
2003-
return PTR_ERR(config);
2037+
return ret;
20042038
}
2005-
nbd->config = config;
2006-
refcount_set(&nbd->config_refs, 1);
2007-
set_bit(NBD_RT_BOUND, &config->runtime_flags);
20082039

2040+
config = nbd->config;
2041+
set_bit(NBD_RT_BOUND, &config->runtime_flags);
20092042
ret = nbd_genl_size_set(info, nbd);
20102043
if (ret)
20112044
goto out;
@@ -2208,15 +2241,15 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
22082241
}
22092242
mutex_unlock(&nbd_index_mutex);
22102243

2211-
if (!refcount_inc_not_zero(&nbd->config_refs)) {
2244+
config = nbd_get_config_unlocked(nbd);
2245+
if (!config) {
22122246
dev_err(nbd_to_dev(nbd),
22132247
"not configured, cannot reconfigure\n");
22142248
nbd_put(nbd);
22152249
return -EINVAL;
22162250
}
22172251

22182252
mutex_lock(&nbd->config_lock);
2219-
config = nbd->config;
22202253
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
22212254
!nbd->pid) {
22222255
dev_err(nbd_to_dev(nbd),

0 commit comments

Comments
 (0)