23
23
#include <linux/t10-pi.h>
24
24
#include <linux/types.h>
25
25
#include <linux/io-64-nonatomic-lo-hi.h>
26
+ #include <linux/io-64-nonatomic-hi-lo.h>
26
27
#include <linux/sed-opal.h>
27
28
#include <linux/pci-p2pdma.h>
28
29
@@ -542,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
542
543
return true;
543
544
}
544
545
545
- static void nvme_unmap_data (struct nvme_dev * dev , struct request * req )
546
+ static void nvme_free_prps (struct nvme_dev * dev , struct request * req )
546
547
{
547
- struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
548
548
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof (__le64 ) - 1 ;
549
- dma_addr_t dma_addr = iod -> first_dma , next_dma_addr ;
549
+ struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
550
+ dma_addr_t dma_addr = iod -> first_dma ;
550
551
int i ;
551
552
552
- if (iod -> dma_len ) {
553
- dma_unmap_page (dev -> dev , dma_addr , iod -> dma_len ,
554
- rq_dma_dir (req ));
555
- return ;
553
+ for (i = 0 ; i < iod -> npages ; i ++ ) {
554
+ __le64 * prp_list = nvme_pci_iod_list (req )[i ];
555
+ dma_addr_t next_dma_addr = le64_to_cpu (prp_list [last_prp ]);
556
+
557
+ dma_pool_free (dev -> prp_page_pool , prp_list , dma_addr );
558
+ dma_addr = next_dma_addr ;
556
559
}
557
560
558
- WARN_ON_ONCE (! iod -> nents );
561
+ }
559
562
560
- if (is_pci_p2pdma_page (sg_page (iod -> sg )))
561
- pci_p2pdma_unmap_sg (dev -> dev , iod -> sg , iod -> nents ,
562
- rq_dma_dir (req ));
563
- else
564
- dma_unmap_sg (dev -> dev , iod -> sg , iod -> nents , rq_dma_dir (req ));
563
+ static void nvme_free_sgls (struct nvme_dev * dev , struct request * req )
564
+ {
565
+ const int last_sg = SGES_PER_PAGE - 1 ;
566
+ struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
567
+ dma_addr_t dma_addr = iod -> first_dma ;
568
+ int i ;
565
569
570
+ for (i = 0 ; i < iod -> npages ; i ++ ) {
571
+ struct nvme_sgl_desc * sg_list = nvme_pci_iod_list (req )[i ];
572
+ dma_addr_t next_dma_addr = le64_to_cpu ((sg_list [last_sg ]).addr );
566
573
567
- if ( iod -> npages == 0 )
568
- dma_pool_free ( dev -> prp_small_pool , nvme_pci_iod_list ( req )[ 0 ],
569
- dma_addr );
574
+ dma_pool_free ( dev -> prp_page_pool , sg_list , dma_addr );
575
+ dma_addr = next_dma_addr ;
576
+ }
570
577
571
- for (i = 0 ; i < iod -> npages ; i ++ ) {
572
- void * addr = nvme_pci_iod_list (req )[i ];
578
+ }
573
579
574
- if (iod -> use_sgl ) {
575
- struct nvme_sgl_desc * sg_list = addr ;
580
+ static void nvme_unmap_sg (struct nvme_dev * dev , struct request * req )
581
+ {
582
+ struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
576
583
577
- next_dma_addr =
578
- le64_to_cpu ((sg_list [SGES_PER_PAGE - 1 ]).addr );
579
- } else {
580
- __le64 * prp_list = addr ;
584
+ if (is_pci_p2pdma_page (sg_page (iod -> sg )))
585
+ pci_p2pdma_unmap_sg (dev -> dev , iod -> sg , iod -> nents ,
586
+ rq_dma_dir (req ));
587
+ else
588
+ dma_unmap_sg (dev -> dev , iod -> sg , iod -> nents , rq_dma_dir (req ));
589
+ }
581
590
582
- next_dma_addr = le64_to_cpu (prp_list [last_prp ]);
583
- }
591
+ static void nvme_unmap_data (struct nvme_dev * dev , struct request * req )
592
+ {
593
+ struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
584
594
585
- dma_pool_free (dev -> prp_page_pool , addr , dma_addr );
586
- dma_addr = next_dma_addr ;
595
+ if (iod -> dma_len ) {
596
+ dma_unmap_page (dev -> dev , iod -> first_dma , iod -> dma_len ,
597
+ rq_dma_dir (req ));
598
+ return ;
587
599
}
588
600
601
+ WARN_ON_ONCE (!iod -> nents );
602
+
603
+ nvme_unmap_sg (dev , req );
604
+ if (iod -> npages == 0 )
605
+ dma_pool_free (dev -> prp_small_pool , nvme_pci_iod_list (req )[0 ],
606
+ iod -> first_dma );
607
+ else if (iod -> use_sgl )
608
+ nvme_free_sgls (dev , req );
609
+ else
610
+ nvme_free_prps (dev , req );
589
611
mempool_free (iod -> sg , dev -> iod_mempool );
590
612
}
591
613
@@ -661,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
661
683
__le64 * old_prp_list = prp_list ;
662
684
prp_list = dma_pool_alloc (pool , GFP_ATOMIC , & prp_dma );
663
685
if (!prp_list )
664
- return BLK_STS_RESOURCE ;
686
+ goto free_prps ;
665
687
list [iod -> npages ++ ] = prp_list ;
666
688
prp_list [0 ] = old_prp_list [i - 1 ];
667
689
old_prp_list [i - 1 ] = cpu_to_le64 (prp_dma );
@@ -681,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
681
703
dma_addr = sg_dma_address (sg );
682
704
dma_len = sg_dma_len (sg );
683
705
}
684
-
685
706
done :
686
707
cmnd -> dptr .prp1 = cpu_to_le64 (sg_dma_address (iod -> sg ));
687
708
cmnd -> dptr .prp2 = cpu_to_le64 (iod -> first_dma );
688
-
689
709
return BLK_STS_OK ;
690
-
691
- bad_sgl :
710
+ free_prps :
711
+ nvme_free_prps (dev , req );
712
+ return BLK_STS_RESOURCE ;
713
+ bad_sgl :
692
714
WARN (DO_ONCE (nvme_print_sgl , iod -> sg , iod -> nents ),
693
715
"Invalid SGL for payload:%d nents:%d\n" ,
694
716
blk_rq_payload_bytes (req ), iod -> nents );
@@ -760,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
760
782
761
783
sg_list = dma_pool_alloc (pool , GFP_ATOMIC , & sgl_dma );
762
784
if (!sg_list )
763
- return BLK_STS_RESOURCE ;
785
+ goto free_sgls ;
764
786
765
787
i = 0 ;
766
788
nvme_pci_iod_list (req )[iod -> npages ++ ] = sg_list ;
@@ -773,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
773
795
} while (-- entries > 0 );
774
796
775
797
return BLK_STS_OK ;
798
+ free_sgls :
799
+ nvme_free_sgls (dev , req );
800
+ return BLK_STS_RESOURCE ;
776
801
}
777
802
778
803
static blk_status_t nvme_setup_prp_simple (struct nvme_dev * dev ,
@@ -841,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
841
866
sg_init_table (iod -> sg , blk_rq_nr_phys_segments (req ));
842
867
iod -> nents = blk_rq_map_sg (req -> q , req , iod -> sg );
843
868
if (!iod -> nents )
844
- goto out ;
869
+ goto out_free_sg ;
845
870
846
871
if (is_pci_p2pdma_page (sg_page (iod -> sg )))
847
872
nr_mapped = pci_p2pdma_map_sg_attrs (dev -> dev , iod -> sg ,
@@ -850,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
850
875
nr_mapped = dma_map_sg_attrs (dev -> dev , iod -> sg , iod -> nents ,
851
876
rq_dma_dir (req ), DMA_ATTR_NO_WARN );
852
877
if (!nr_mapped )
853
- goto out ;
878
+ goto out_free_sg ;
854
879
855
880
iod -> use_sgl = nvme_pci_use_sgls (dev , req );
856
881
if (iod -> use_sgl )
857
882
ret = nvme_pci_setup_sgls (dev , req , & cmnd -> rw , nr_mapped );
858
883
else
859
884
ret = nvme_pci_setup_prps (dev , req , & cmnd -> rw );
860
- out :
861
885
if (ret != BLK_STS_OK )
862
- nvme_unmap_data (dev , req );
886
+ goto out_unmap_sg ;
887
+ return BLK_STS_OK ;
888
+
889
+ out_unmap_sg :
890
+ nvme_unmap_sg (dev , req );
891
+ out_free_sg :
892
+ mempool_free (iod -> sg , dev -> iod_mempool );
863
893
return ret ;
864
894
}
865
895
@@ -1795,6 +1825,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
1795
1825
if (dev -> cmb_size )
1796
1826
return ;
1797
1827
1828
+ if (NVME_CAP_CMBS (dev -> ctrl .cap ))
1829
+ writel (NVME_CMBMSC_CRE , dev -> bar + NVME_REG_CMBMSC );
1830
+
1798
1831
dev -> cmbsz = readl (dev -> bar + NVME_REG_CMBSZ );
1799
1832
if (!dev -> cmbsz )
1800
1833
return ;
@@ -1808,6 +1841,16 @@ static void nvme_map_cmb(struct nvme_dev *dev)
1808
1841
if (offset > bar_size )
1809
1842
return ;
1810
1843
1844
+ /*
1845
+ * Tell the controller about the host side address mapping the CMB,
1846
+ * and enable CMB decoding for the NVMe 1.4+ scheme:
1847
+ */
1848
+ if (NVME_CAP_CMBS (dev -> ctrl .cap )) {
1849
+ hi_lo_writeq (NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
1850
+ (pci_bus_address (pdev , bar ) + offset ),
1851
+ dev -> bar + NVME_REG_CMBMSC );
1852
+ }
1853
+
1811
1854
/*
1812
1855
* Controllers may support a CMB size larger than their BAR,
1813
1856
* for example, due to being behind a bridge. Reduce the CMB to
0 commit comments