@@ -348,8 +348,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
348
348
*/
349
349
static int nvme_npages (unsigned size , struct nvme_dev * dev )
350
350
{
351
- unsigned nprps = DIV_ROUND_UP (size + dev -> ctrl . page_size ,
352
- dev -> ctrl . page_size );
351
+ unsigned nprps = DIV_ROUND_UP (size + NVME_CTRL_PAGE_SIZE ,
352
+ NVME_CTRL_PAGE_SIZE );
353
353
return DIV_ROUND_UP (8 * nprps , PAGE_SIZE - 8 );
354
354
}
355
355
@@ -515,7 +515,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
515
515
static void nvme_unmap_data (struct nvme_dev * dev , struct request * req )
516
516
{
517
517
struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
518
- const int last_prp = dev -> ctrl . page_size / sizeof (__le64 ) - 1 ;
518
+ const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof (__le64 ) - 1 ;
519
519
dma_addr_t dma_addr = iod -> first_dma , next_dma_addr ;
520
520
int i ;
521
521
@@ -582,34 +582,33 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
582
582
struct scatterlist * sg = iod -> sg ;
583
583
int dma_len = sg_dma_len (sg );
584
584
u64 dma_addr = sg_dma_address (sg );
585
- u32 page_size = dev -> ctrl .page_size ;
586
- int offset = dma_addr & (page_size - 1 );
585
+ int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1 );
587
586
__le64 * prp_list ;
588
587
void * * list = nvme_pci_iod_list (req );
589
588
dma_addr_t prp_dma ;
590
589
int nprps , i ;
591
590
592
- length -= (page_size - offset );
591
+ length -= (NVME_CTRL_PAGE_SIZE - offset );
593
592
if (length <= 0 ) {
594
593
iod -> first_dma = 0 ;
595
594
goto done ;
596
595
}
597
596
598
- dma_len -= (page_size - offset );
597
+ dma_len -= (NVME_CTRL_PAGE_SIZE - offset );
599
598
if (dma_len ) {
600
- dma_addr += (page_size - offset );
599
+ dma_addr += (NVME_CTRL_PAGE_SIZE - offset );
601
600
} else {
602
601
sg = sg_next (sg );
603
602
dma_addr = sg_dma_address (sg );
604
603
dma_len = sg_dma_len (sg );
605
604
}
606
605
607
- if (length <= page_size ) {
606
+ if (length <= NVME_CTRL_PAGE_SIZE ) {
608
607
iod -> first_dma = dma_addr ;
609
608
goto done ;
610
609
}
611
610
612
- nprps = DIV_ROUND_UP (length , page_size );
611
+ nprps = DIV_ROUND_UP (length , NVME_CTRL_PAGE_SIZE );
613
612
if (nprps <= (256 / 8 )) {
614
613
pool = dev -> prp_small_pool ;
615
614
iod -> npages = 0 ;
@@ -628,7 +627,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
628
627
iod -> first_dma = prp_dma ;
629
628
i = 0 ;
630
629
for (;;) {
631
- if (i == page_size >> 3 ) {
630
+ if (i == NVME_CTRL_PAGE_SIZE >> 3 ) {
632
631
__le64 * old_prp_list = prp_list ;
633
632
prp_list = dma_pool_alloc (pool , GFP_ATOMIC , & prp_dma );
634
633
if (!prp_list )
@@ -639,9 +638,9 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
639
638
i = 1 ;
640
639
}
641
640
prp_list [i ++ ] = cpu_to_le64 (dma_addr );
642
- dma_len -= page_size ;
643
- dma_addr += page_size ;
644
- length -= page_size ;
641
+ dma_len -= NVME_CTRL_PAGE_SIZE ;
642
+ dma_addr += NVME_CTRL_PAGE_SIZE ;
643
+ length -= NVME_CTRL_PAGE_SIZE ;
645
644
if (length <= 0 )
646
645
break ;
647
646
if (dma_len > 0 )
@@ -751,8 +750,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
751
750
struct bio_vec * bv )
752
751
{
753
752
struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
754
- unsigned int offset = bv -> bv_offset & (dev -> ctrl . page_size - 1 );
755
- unsigned int first_prp_len = dev -> ctrl . page_size - offset ;
753
+ unsigned int offset = bv -> bv_offset & (NVME_CTRL_PAGE_SIZE - 1 );
754
+ unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset ;
756
755
757
756
iod -> first_dma = dma_map_bvec (dev -> dev , bv , rq_dma_dir (req ), 0 );
758
757
if (dma_mapping_error (dev -> dev , iod -> first_dma ))
@@ -794,7 +793,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
794
793
struct bio_vec bv = req_bvec (req );
795
794
796
795
if (!is_pci_p2pdma_page (bv .bv_page )) {
797
- if (bv .bv_offset + bv .bv_len <= dev -> ctrl . page_size * 2 )
796
+ if (bv .bv_offset + bv .bv_len <= NVME_CTRL_PAGE_SIZE * 2 )
798
797
return nvme_setup_prp_simple (dev , req ,
799
798
& cmnd -> rw , & bv );
800
799
@@ -1396,12 +1395,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
1396
1395
{
1397
1396
int q_depth = dev -> q_depth ;
1398
1397
unsigned q_size_aligned = roundup (q_depth * entry_size ,
1399
- dev -> ctrl . page_size );
1398
+ NVME_CTRL_PAGE_SIZE );
1400
1399
1401
1400
if (q_size_aligned * nr_io_queues > dev -> cmb_size ) {
1402
1401
u64 mem_per_q = div_u64 (dev -> cmb_size , nr_io_queues );
1403
1402
1404
- mem_per_q = round_down (mem_per_q , dev -> ctrl . page_size );
1403
+ mem_per_q = round_down (mem_per_q , NVME_CTRL_PAGE_SIZE );
1405
1404
q_depth = div_u64 (mem_per_q , entry_size );
1406
1405
1407
1406
/*
@@ -1816,6 +1815,7 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
1816
1815
1817
1816
static int nvme_set_host_mem (struct nvme_dev * dev , u32 bits )
1818
1817
{
1818
+ u32 host_mem_size = dev -> host_mem_size >> NVME_CTRL_PAGE_SHIFT ;
1819
1819
u64 dma_addr = dev -> host_mem_descs_dma ;
1820
1820
struct nvme_command c ;
1821
1821
int ret ;
@@ -1824,8 +1824,7 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
1824
1824
c .features .opcode = nvme_admin_set_features ;
1825
1825
c .features .fid = cpu_to_le32 (NVME_FEAT_HOST_MEM_BUF );
1826
1826
c .features .dword11 = cpu_to_le32 (bits );
1827
- c .features .dword12 = cpu_to_le32 (dev -> host_mem_size >>
1828
- ilog2 (dev -> ctrl .page_size ));
1827
+ c .features .dword12 = cpu_to_le32 (host_mem_size );
1829
1828
c .features .dword13 = cpu_to_le32 (lower_32_bits (dma_addr ));
1830
1829
c .features .dword14 = cpu_to_le32 (upper_32_bits (dma_addr ));
1831
1830
c .features .dword15 = cpu_to_le32 (dev -> nr_host_mem_descs );
@@ -1845,7 +1844,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
1845
1844
1846
1845
for (i = 0 ; i < dev -> nr_host_mem_descs ; i ++ ) {
1847
1846
struct nvme_host_mem_buf_desc * desc = & dev -> host_mem_descs [i ];
1848
- size_t size = le32_to_cpu (desc -> size ) * dev -> ctrl . page_size ;
1847
+ size_t size = le32_to_cpu (desc -> size ) * NVME_CTRL_PAGE_SIZE ;
1849
1848
1850
1849
dma_free_attrs (dev -> dev , size , dev -> host_mem_desc_bufs [i ],
1851
1850
le64_to_cpu (desc -> addr ),
@@ -1897,7 +1896,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
1897
1896
break ;
1898
1897
1899
1898
descs [i ].addr = cpu_to_le64 (dma_addr );
1900
- descs [i ].size = cpu_to_le32 (len / dev -> ctrl . page_size );
1899
+ descs [i ].size = cpu_to_le32 (len / NVME_CTRL_PAGE_SIZE );
1901
1900
i ++ ;
1902
1901
}
1903
1902
@@ -1913,7 +1912,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
1913
1912
1914
1913
out_free_bufs :
1915
1914
while (-- i >= 0 ) {
1916
- size_t size = le32_to_cpu (descs [i ].size ) * dev -> ctrl . page_size ;
1915
+ size_t size = le32_to_cpu (descs [i ].size ) * NVME_CTRL_PAGE_SIZE ;
1917
1916
1918
1917
dma_free_attrs (dev -> dev , size , bufs [i ],
1919
1918
le64_to_cpu (descs [i ].addr ),
0 commit comments