Skip to content

Commit b74e58c

Browse files
committed
Merge branch 'nvme-5.6' of git://git.infradead.org/nvme into block-5.6
Pull NVMe fixes from Keith. * 'nvme-5.6' of git://git.infradead.org/nvme: nvmet: update AEN list and array at one place nvmet: Fix controller use after free nvmet: Fix error print message at nvmet_install_queue function nvme-pci: remove nvmeq->tags nvmet: fix dsm failure when payload does not match sgl descriptor nvmet: Pass lockdep expression to RCU lists
2 parents c8ab422 + 0f5be6a commit b74e58c

File tree

6 files changed

+72
-51
lines changed

6 files changed

+72
-51
lines changed

drivers/nvme/host/pci.c

Lines changed: 8 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,6 @@ struct nvme_queue {
167167
/* only used for poll queues: */
168168
spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
169169
volatile struct nvme_completion *cqes;
170-
struct blk_mq_tags **tags;
171170
dma_addr_t sq_dma_addr;
172171
dma_addr_t cq_dma_addr;
173172
u32 __iomem *q_db;
@@ -376,29 +375,17 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
376375

377376
WARN_ON(hctx_idx != 0);
378377
WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
379-
WARN_ON(nvmeq->tags);
380378

381379
hctx->driver_data = nvmeq;
382-
nvmeq->tags = &dev->admin_tagset.tags[0];
383380
return 0;
384381
}
385382

386-
static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
387-
{
388-
struct nvme_queue *nvmeq = hctx->driver_data;
389-
390-
nvmeq->tags = NULL;
391-
}
392-
393383
static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
394384
unsigned int hctx_idx)
395385
{
396386
struct nvme_dev *dev = data;
397387
struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
398388

399-
if (!nvmeq->tags)
400-
nvmeq->tags = &dev->tagset.tags[hctx_idx];
401-
402389
WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
403390
hctx->driver_data = nvmeq;
404391
return 0;
@@ -948,6 +935,13 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
948935
writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
949936
}
950937

938+
static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
939+
{
940+
if (!nvmeq->qid)
941+
return nvmeq->dev->admin_tagset.tags[0];
942+
return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
943+
}
944+
951945
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
952946
{
953947
volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
@@ -972,7 +966,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
972966
return;
973967
}
974968

975-
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
969+
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
976970
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
977971
nvme_end_request(req, cqe->status, cqe->result);
978972
}
@@ -1572,7 +1566,6 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
15721566
.queue_rq = nvme_queue_rq,
15731567
.complete = nvme_pci_complete_rq,
15741568
.init_hctx = nvme_admin_init_hctx,
1575-
.exit_hctx = nvme_admin_exit_hctx,
15761569
.init_request = nvme_init_request,
15771570
.timeout = nvme_timeout,
15781571
};

drivers/nvme/target/core.c

Lines changed: 51 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -129,27 +129,8 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
129129
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
130130
}
131131

132-
static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
133-
{
134-
struct nvmet_req *req;
135-
136-
while (1) {
137-
mutex_lock(&ctrl->lock);
138-
if (!ctrl->nr_async_event_cmds) {
139-
mutex_unlock(&ctrl->lock);
140-
return;
141-
}
142-
143-
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
144-
mutex_unlock(&ctrl->lock);
145-
nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
146-
}
147-
}
148-
149-
static void nvmet_async_event_work(struct work_struct *work)
132+
static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
150133
{
151-
struct nvmet_ctrl *ctrl =
152-
container_of(work, struct nvmet_ctrl, async_event_work);
153134
struct nvmet_async_event *aen;
154135
struct nvmet_req *req;
155136

@@ -159,18 +140,41 @@ static void nvmet_async_event_work(struct work_struct *work)
159140
struct nvmet_async_event, entry);
160141
if (!aen || !ctrl->nr_async_event_cmds) {
161142
mutex_unlock(&ctrl->lock);
162-
return;
143+
break;
163144
}
164145

165146
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
166-
nvmet_set_result(req, nvmet_async_event_result(aen));
147+
if (status == 0)
148+
nvmet_set_result(req, nvmet_async_event_result(aen));
167149

168150
list_del(&aen->entry);
169151
kfree(aen);
170152

171153
mutex_unlock(&ctrl->lock);
172-
nvmet_req_complete(req, 0);
154+
nvmet_req_complete(req, status);
155+
}
156+
}
157+
158+
static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
159+
{
160+
struct nvmet_req *req;
161+
162+
mutex_lock(&ctrl->lock);
163+
while (ctrl->nr_async_event_cmds) {
164+
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
165+
mutex_unlock(&ctrl->lock);
166+
nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
167+
mutex_lock(&ctrl->lock);
173168
}
169+
mutex_unlock(&ctrl->lock);
170+
}
171+
172+
static void nvmet_async_event_work(struct work_struct *work)
173+
{
174+
struct nvmet_ctrl *ctrl =
175+
container_of(work, struct nvmet_ctrl, async_event_work);
176+
177+
nvmet_async_events_process(ctrl, 0);
174178
}
175179

176180
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
@@ -555,7 +559,8 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
555559
} else {
556560
struct nvmet_ns *old;
557561

558-
list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
562+
list_for_each_entry_rcu(old, &subsys->namespaces, dev_link,
563+
lockdep_is_held(&subsys->lock)) {
559564
BUG_ON(ns->nsid == old->nsid);
560565
if (ns->nsid < old->nsid)
561566
break;
@@ -752,19 +757,24 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
752757

753758
void nvmet_sq_destroy(struct nvmet_sq *sq)
754759
{
760+
u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
761+
struct nvmet_ctrl *ctrl = sq->ctrl;
762+
755763
/*
756764
* If this is the admin queue, complete all AERs so that our
757765
* queue doesn't have outstanding requests on it.
758766
*/
759-
if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
760-
nvmet_async_events_free(sq->ctrl);
767+
if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) {
768+
nvmet_async_events_process(ctrl, status);
769+
nvmet_async_events_free(ctrl);
770+
}
761771
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
762772
wait_for_completion(&sq->confirm_done);
763773
wait_for_completion(&sq->free_done);
764774
percpu_ref_exit(&sq->ref);
765775

766-
if (sq->ctrl) {
767-
nvmet_ctrl_put(sq->ctrl);
776+
if (ctrl) {
777+
nvmet_ctrl_put(ctrl);
768778
sq->ctrl = NULL; /* allows reusing the queue later */
769779
}
770780
}
@@ -938,6 +948,17 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
938948
}
939949
EXPORT_SYMBOL_GPL(nvmet_check_data_len);
940950

951+
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
952+
{
953+
if (unlikely(data_len > req->transfer_len)) {
954+
req->error_loc = offsetof(struct nvme_common_command, dptr);
955+
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
956+
return false;
957+
}
958+
959+
return true;
960+
}
961+
941962
int nvmet_req_alloc_sgl(struct nvmet_req *req)
942963
{
943964
struct pci_dev *p2p_dev = NULL;
@@ -1172,7 +1193,8 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
11721193

11731194
ctrl->p2p_client = get_device(req->p2p_client);
11741195

1175-
list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
1196+
list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link,
1197+
lockdep_is_held(&ctrl->subsys->lock))
11761198
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
11771199
}
11781200

drivers/nvme/target/fabrics-cmd.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
109109
u16 qid = le16_to_cpu(c->qid);
110110
u16 sqsize = le16_to_cpu(c->sqsize);
111111
struct nvmet_ctrl *old;
112+
u16 ret;
112113

113114
old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
114115
if (old) {
@@ -119,7 +120,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
119120
if (!sqsize) {
120121
pr_warn("queue size zero!\n");
121122
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
122-
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
123+
ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
124+
goto err;
123125
}
124126

125127
/* note: convert queue size from 0's-based value to 1's-based value */
@@ -132,16 +134,19 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
132134
}
133135

134136
if (ctrl->ops->install_queue) {
135-
u16 ret = ctrl->ops->install_queue(req->sq);
136-
137+
ret = ctrl->ops->install_queue(req->sq);
137138
if (ret) {
138139
pr_err("failed to install queue %d cntlid %d ret %x\n",
139-
qid, ret, ctrl->cntlid);
140-
return ret;
140+
qid, ctrl->cntlid, ret);
141+
goto err;
141142
}
142143
}
143144

144145
return 0;
146+
147+
err:
148+
req->sq->ctrl = NULL;
149+
return ret;
145150
}
146151

147152
static void nvmet_execute_admin_connect(struct nvmet_req *req)

drivers/nvme/target/io-cmd-bdev.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -280,7 +280,7 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
280280

281281
static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
282282
{
283-
if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
283+
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
284284
return;
285285

286286
switch (le32_to_cpu(req->cmd->dsm.attributes)) {

drivers/nvme/target/io-cmd-file.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -336,7 +336,7 @@ static void nvmet_file_dsm_work(struct work_struct *w)
336336

337337
static void nvmet_file_execute_dsm(struct nvmet_req *req)
338338
{
339-
if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
339+
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
340340
return;
341341
INIT_WORK(&req->f.work, nvmet_file_dsm_work);
342342
schedule_work(&req->f.work);

drivers/nvme/target/nvmet.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -374,6 +374,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
374374
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
375375
void nvmet_req_uninit(struct nvmet_req *req);
376376
bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
377+
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
377378
void nvmet_req_complete(struct nvmet_req *req, u16 status);
378379
int nvmet_req_alloc_sgl(struct nvmet_req *req);
379380
void nvmet_req_free_sgl(struct nvmet_req *req);

0 commit comments

Comments
 (0)