Skip to content

Commit 9275c20

Browse files
author
Christoph Hellwig
committed
nvme-pci: refactor nvme_unmap_data
Split out three helpers from nvme_unmap_data that will allow finer grained unwinding from nvme_map_data. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Keith Busch <[email protected]> Reviewed-by: Marc Orr <[email protected]>
1 parent bffcd50 commit 9275c20

File tree

1 file changed

+49
-28
lines changed

1 file changed

+49
-28
lines changed

drivers/nvme/host/pci.c

Lines changed: 49 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -543,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
543543
return true;
544544
}
545545

546-
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
546+
static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
547547
{
548-
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
549548
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
550-
dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
549+
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
550+
dma_addr_t dma_addr = iod->first_dma;
551551
int i;
552552

553-
if (iod->dma_len) {
554-
dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
555-
rq_dma_dir(req));
556-
return;
553+
for (i = 0; i < iod->npages; i++) {
554+
__le64 *prp_list = nvme_pci_iod_list(req)[i];
555+
dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
556+
557+
dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
558+
dma_addr = next_dma_addr;
557559
}
558560

559-
WARN_ON_ONCE(!iod->nents);
561+
}
560562

561-
if (is_pci_p2pdma_page(sg_page(iod->sg)))
562-
pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
563-
rq_dma_dir(req));
564-
else
565-
dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
563+
static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
564+
{
565+
const int last_sg = SGES_PER_PAGE - 1;
566+
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
567+
dma_addr_t dma_addr = iod->first_dma;
568+
int i;
566569

570+
for (i = 0; i < iod->npages; i++) {
571+
struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
572+
dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
567573

568-
if (iod->npages == 0)
569-
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
570-
dma_addr);
574+
dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
575+
dma_addr = next_dma_addr;
576+
}
571577

572-
for (i = 0; i < iod->npages; i++) {
573-
void *addr = nvme_pci_iod_list(req)[i];
578+
}
574579

575-
if (iod->use_sgl) {
576-
struct nvme_sgl_desc *sg_list = addr;
580+
static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
581+
{
582+
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
577583

578-
next_dma_addr =
579-
le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
580-
} else {
581-
__le64 *prp_list = addr;
584+
if (is_pci_p2pdma_page(sg_page(iod->sg)))
585+
pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
586+
rq_dma_dir(req));
587+
else
588+
dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
589+
}
582590

583-
next_dma_addr = le64_to_cpu(prp_list[last_prp]);
584-
}
591+
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
592+
{
593+
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
585594

586-
dma_pool_free(dev->prp_page_pool, addr, dma_addr);
587-
dma_addr = next_dma_addr;
595+
if (iod->dma_len) {
596+
dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
597+
rq_dma_dir(req));
598+
return;
588599
}
589600

601+
WARN_ON_ONCE(!iod->nents);
602+
603+
nvme_unmap_sg(dev, req);
604+
if (iod->npages == 0)
605+
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
606+
iod->first_dma);
607+
else if (iod->use_sgl)
608+
nvme_free_sgls(dev, req);
609+
else
610+
nvme_free_prps(dev, req);
590611
mempool_free(iod->sg, dev->iod_mempool);
591612
}
592613

0 commit comments

Comments
 (0)