Skip to content

Commit a692a61

Browse files
committed
Merge tag 'block-5.11-2021-01-24' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: - NVMe pull request from Christoph: - fix a status code in nvmet (Chaitanya Kulkarni) - avoid double completions in nvme-rdma/nvme-tcp (Chao Leng) - fix the CMB support to cope with NVMe 1.4 controllers (Klaus Jensen) - fix PRINFO handling in the passthrough ioctl (Revanth Rajashekar) - fix a double DMA unmap in nvme-pci - lightnvm error path leak fix (Pan) - MD pull request from Song: - Flush request fix (Xiao) * tag 'block-5.11-2021-01-24' of git://git.kernel.dk/linux-block: lightnvm: fix memory leak when submit fails nvme-pci: fix error unwind in nvme_map_data nvme-pci: refactor nvme_unmap_data md: Set prev_flush_start and flush_bio in an atomic way nvmet: set right status on error in id-ns handler nvme-pci: allow use of cmb on v1.4 controllers nvme-tcp: avoid request double completion for concurrent nvme_tcp_timeout nvme-rdma: avoid request double completion for concurrent nvme_rdma_timeout nvme: check the PRINFO bit before deciding the host buffer length
2 parents 5130680 + 9778448 commit a692a61

File tree

8 files changed

+132
-52
lines changed

8 files changed

+132
-52
lines changed

drivers/lightnvm/core.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -844,11 +844,10 @@ static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
844844
rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
845845

846846
ret = nvm_submit_io_sync_raw(dev, &rqd);
847+
__free_page(page);
847848
if (ret)
848849
return ret;
849850

850-
__free_page(page);
851-
852851
return rqd.error;
853852
}
854853

drivers/md/md.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -639,8 +639,10 @@ static void md_submit_flush_data(struct work_struct *ws)
639639
* could wait for this and below md_handle_request could wait for those
640640
* bios because of suspend check
641641
*/
642+
spin_lock_irq(&mddev->lock);
642643
mddev->prev_flush_start = mddev->start_flush;
643644
mddev->flush_bio = NULL;
645+
spin_unlock_irq(&mddev->lock);
644646
wake_up(&mddev->sb_wait);
645647

646648
if (bio->bi_iter.bi_size == 0) {

drivers/nvme/host/core.c

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1543,8 +1543,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
15431543
}
15441544

15451545
length = (io.nblocks + 1) << ns->lba_shift;
1546-
meta_len = (io.nblocks + 1) * ns->ms;
1547-
metadata = nvme_to_user_ptr(io.metadata);
1546+
1547+
if ((io.control & NVME_RW_PRINFO_PRACT) &&
1548+
ns->ms == sizeof(struct t10_pi_tuple)) {
1549+
/*
1550+
* Protection information is stripped/inserted by the
1551+
* controller.
1552+
*/
1553+
if (nvme_to_user_ptr(io.metadata))
1554+
return -EINVAL;
1555+
meta_len = 0;
1556+
metadata = NULL;
1557+
} else {
1558+
meta_len = (io.nblocks + 1) * ns->ms;
1559+
metadata = nvme_to_user_ptr(io.metadata);
1560+
}
15481561

15491562
if (ns->features & NVME_NS_EXT_LBAS) {
15501563
length += meta_len;

drivers/nvme/host/pci.c

Lines changed: 81 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include <linux/t10-pi.h>
2424
#include <linux/types.h>
2525
#include <linux/io-64-nonatomic-lo-hi.h>
26+
#include <linux/io-64-nonatomic-hi-lo.h>
2627
#include <linux/sed-opal.h>
2728
#include <linux/pci-p2pdma.h>
2829

@@ -542,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
542543
return true;
543544
}
544545

545-
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
546+
static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
546547
{
547-
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
548548
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
549-
dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
549+
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
550+
dma_addr_t dma_addr = iod->first_dma;
550551
int i;
551552

552-
if (iod->dma_len) {
553-
dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
554-
rq_dma_dir(req));
555-
return;
553+
for (i = 0; i < iod->npages; i++) {
554+
__le64 *prp_list = nvme_pci_iod_list(req)[i];
555+
dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
556+
557+
dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
558+
dma_addr = next_dma_addr;
556559
}
557560

558-
WARN_ON_ONCE(!iod->nents);
561+
}
559562

560-
if (is_pci_p2pdma_page(sg_page(iod->sg)))
561-
pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
562-
rq_dma_dir(req));
563-
else
564-
dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
563+
static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
564+
{
565+
const int last_sg = SGES_PER_PAGE - 1;
566+
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
567+
dma_addr_t dma_addr = iod->first_dma;
568+
int i;
565569

570+
for (i = 0; i < iod->npages; i++) {
571+
struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
572+
dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
566573

567-
if (iod->npages == 0)
568-
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
569-
dma_addr);
574+
dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
575+
dma_addr = next_dma_addr;
576+
}
570577

571-
for (i = 0; i < iod->npages; i++) {
572-
void *addr = nvme_pci_iod_list(req)[i];
578+
}
573579

574-
if (iod->use_sgl) {
575-
struct nvme_sgl_desc *sg_list = addr;
580+
static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
581+
{
582+
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
576583

577-
next_dma_addr =
578-
le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
579-
} else {
580-
__le64 *prp_list = addr;
584+
if (is_pci_p2pdma_page(sg_page(iod->sg)))
585+
pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
586+
rq_dma_dir(req));
587+
else
588+
dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
589+
}
581590

582-
next_dma_addr = le64_to_cpu(prp_list[last_prp]);
583-
}
591+
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
592+
{
593+
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
584594

585-
dma_pool_free(dev->prp_page_pool, addr, dma_addr);
586-
dma_addr = next_dma_addr;
595+
if (iod->dma_len) {
596+
dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
597+
rq_dma_dir(req));
598+
return;
587599
}
588600

601+
WARN_ON_ONCE(!iod->nents);
602+
603+
nvme_unmap_sg(dev, req);
604+
if (iod->npages == 0)
605+
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
606+
iod->first_dma);
607+
else if (iod->use_sgl)
608+
nvme_free_sgls(dev, req);
609+
else
610+
nvme_free_prps(dev, req);
589611
mempool_free(iod->sg, dev->iod_mempool);
590612
}
591613

@@ -661,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
661683
__le64 *old_prp_list = prp_list;
662684
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
663685
if (!prp_list)
664-
return BLK_STS_RESOURCE;
686+
goto free_prps;
665687
list[iod->npages++] = prp_list;
666688
prp_list[0] = old_prp_list[i - 1];
667689
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -681,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
681703
dma_addr = sg_dma_address(sg);
682704
dma_len = sg_dma_len(sg);
683705
}
684-
685706
done:
686707
cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
687708
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
688-
689709
return BLK_STS_OK;
690-
691-
bad_sgl:
710+
free_prps:
711+
nvme_free_prps(dev, req);
712+
return BLK_STS_RESOURCE;
713+
bad_sgl:
692714
WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
693715
"Invalid SGL for payload:%d nents:%d\n",
694716
blk_rq_payload_bytes(req), iod->nents);
@@ -760,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
760782

761783
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
762784
if (!sg_list)
763-
return BLK_STS_RESOURCE;
785+
goto free_sgls;
764786

765787
i = 0;
766788
nvme_pci_iod_list(req)[iod->npages++] = sg_list;
@@ -773,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
773795
} while (--entries > 0);
774796

775797
return BLK_STS_OK;
798+
free_sgls:
799+
nvme_free_sgls(dev, req);
800+
return BLK_STS_RESOURCE;
776801
}
777802

778803
static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
@@ -841,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
841866
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
842867
iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
843868
if (!iod->nents)
844-
goto out;
869+
goto out_free_sg;
845870

846871
if (is_pci_p2pdma_page(sg_page(iod->sg)))
847872
nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
@@ -850,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
850875
nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
851876
rq_dma_dir(req), DMA_ATTR_NO_WARN);
852877
if (!nr_mapped)
853-
goto out;
878+
goto out_free_sg;
854879

855880
iod->use_sgl = nvme_pci_use_sgls(dev, req);
856881
if (iod->use_sgl)
857882
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
858883
else
859884
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
860-
out:
861885
if (ret != BLK_STS_OK)
862-
nvme_unmap_data(dev, req);
886+
goto out_unmap_sg;
887+
return BLK_STS_OK;
888+
889+
out_unmap_sg:
890+
nvme_unmap_sg(dev, req);
891+
out_free_sg:
892+
mempool_free(iod->sg, dev->iod_mempool);
863893
return ret;
864894
}
865895

@@ -1795,6 +1825,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
17951825
if (dev->cmb_size)
17961826
return;
17971827

1828+
if (NVME_CAP_CMBS(dev->ctrl.cap))
1829+
writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
1830+
17981831
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
17991832
if (!dev->cmbsz)
18001833
return;
@@ -1808,6 +1841,16 @@ static void nvme_map_cmb(struct nvme_dev *dev)
18081841
if (offset > bar_size)
18091842
return;
18101843

1844+
/*
1845+
* Tell the controller about the host side address mapping the CMB,
1846+
* and enable CMB decoding for the NVMe 1.4+ scheme:
1847+
*/
1848+
if (NVME_CAP_CMBS(dev->ctrl.cap)) {
1849+
hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
1850+
(pci_bus_address(pdev, bar) + offset),
1851+
dev->bar + NVME_REG_CMBMSC);
1852+
}
1853+
18111854
/*
18121855
* Controllers may support a CMB size larger than their BAR,
18131856
* for example, due to being behind a bridge. Reduce the CMB to

drivers/nvme/host/rdma.c

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@ struct nvme_rdma_queue {
9797
struct completion cm_done;
9898
bool pi_support;
9999
int cq_size;
100+
struct mutex queue_lock;
100101
};
101102

102103
struct nvme_rdma_ctrl {
@@ -579,6 +580,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
579580
int ret;
580581

581582
queue = &ctrl->queues[idx];
583+
mutex_init(&queue->queue_lock);
582584
queue->ctrl = ctrl;
583585
if (idx && ctrl->ctrl.max_integrity_segments)
584586
queue->pi_support = true;
@@ -598,7 +600,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
598600
if (IS_ERR(queue->cm_id)) {
599601
dev_info(ctrl->ctrl.device,
600602
"failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
601-
return PTR_ERR(queue->cm_id);
603+
ret = PTR_ERR(queue->cm_id);
604+
goto out_destroy_mutex;
602605
}
603606

604607
if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
@@ -628,6 +631,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
628631
out_destroy_cm_id:
629632
rdma_destroy_id(queue->cm_id);
630633
nvme_rdma_destroy_queue_ib(queue);
634+
out_destroy_mutex:
635+
mutex_destroy(&queue->queue_lock);
631636
return ret;
632637
}
633638

@@ -639,9 +644,10 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
639644

640645
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
641646
{
642-
if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
643-
return;
644-
__nvme_rdma_stop_queue(queue);
647+
mutex_lock(&queue->queue_lock);
648+
if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
649+
__nvme_rdma_stop_queue(queue);
650+
mutex_unlock(&queue->queue_lock);
645651
}
646652

647653
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -651,6 +657,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
651657

652658
nvme_rdma_destroy_queue_ib(queue);
653659
rdma_destroy_id(queue->cm_id);
660+
mutex_destroy(&queue->queue_lock);
654661
}
655662

656663
static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)

drivers/nvme/host/tcp.c

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ struct nvme_tcp_queue {
7676
struct work_struct io_work;
7777
int io_cpu;
7878

79+
struct mutex queue_lock;
7980
struct mutex send_mutex;
8081
struct llist_head req_list;
8182
struct list_head send_list;
@@ -1219,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
12191220

12201221
sock_release(queue->sock);
12211222
kfree(queue->pdu);
1223+
mutex_destroy(&queue->queue_lock);
12221224
}
12231225

12241226
static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
@@ -1380,6 +1382,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
13801382
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
13811383
int ret, rcv_pdu_size;
13821384

1385+
mutex_init(&queue->queue_lock);
13831386
queue->ctrl = ctrl;
13841387
init_llist_head(&queue->req_list);
13851388
INIT_LIST_HEAD(&queue->send_list);
@@ -1398,7 +1401,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
13981401
if (ret) {
13991402
dev_err(nctrl->device,
14001403
"failed to create socket: %d\n", ret);
1401-
return ret;
1404+
goto err_destroy_mutex;
14021405
}
14031406

14041407
/* Single syn retry */
@@ -1507,6 +1510,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
15071510
err_sock:
15081511
sock_release(queue->sock);
15091512
queue->sock = NULL;
1513+
err_destroy_mutex:
1514+
mutex_destroy(&queue->queue_lock);
15101515
return ret;
15111516
}
15121517

@@ -1534,9 +1539,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
15341539
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
15351540
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
15361541

1537-
if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1538-
return;
1539-
__nvme_tcp_stop_queue(queue);
1542+
mutex_lock(&queue->queue_lock);
1543+
if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1544+
__nvme_tcp_stop_queue(queue);
1545+
mutex_unlock(&queue->queue_lock);
15401546
}
15411547

15421548
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)

drivers/nvme/target/admin-cmd.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -487,8 +487,10 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
487487

488488
/* return an all zeroed buffer if we can't find an active namespace */
489489
ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
490-
if (!ns)
490+
if (!ns) {
491+
status = NVME_SC_INVALID_NS;
491492
goto done;
493+
}
492494

493495
nvmet_ns_revalidate(ns);
494496

@@ -541,7 +543,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
541543
id->nsattr |= (1 << 0);
542544
nvmet_put_namespace(ns);
543545
done:
544-
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
546+
if (!status)
547+
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
548+
545549
kfree(id);
546550
out:
547551
nvmet_req_complete(req, status);

0 commit comments

Comments
 (0)