Skip to content

Commit 6c3c05b

Browse files
ChaitanayaKulkarniChristoph Hellwig
authored andcommitted
nvme-core: replace ctrl page size with a macro
Saving the nvme controller's page size was from a time when the driver tried to use different sized pages, but this value is always set to a constant, and has been this way for some time. Remove the 'page_size' field and replace its usage with the constant value. This also lets the compiler make some micro-optimizations in the io path, and that's always a good thing. Signed-off-by: Chaitanya Kulkarni <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]>
1 parent 5887450 commit 6c3c05b

File tree

3 files changed

+37
-38
lines changed

3 files changed

+37
-38
lines changed

drivers/nvme/host/core.c

Lines changed: 6 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2345,12 +2345,7 @@ EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
23452345

23462346
int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
23472347
{
2348-
/*
2349-
* Default to a 4K page size, with the intention to update this
2350-
* path in the future to accomodate architectures with differing
2351-
* kernel and IO page sizes.
2352-
*/
2353-
unsigned dev_page_min, page_shift = 12;
2348+
unsigned dev_page_min;
23542349
int ret;
23552350

23562351
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
@@ -2360,20 +2355,18 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
23602355
}
23612356
dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
23622357

2363-
if (page_shift < dev_page_min) {
2358+
if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
23642359
dev_err(ctrl->device,
23652360
"Minimum device page size %u too large for host (%u)\n",
2366-
1 << dev_page_min, 1 << page_shift);
2361+
1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
23672362
return -ENODEV;
23682363
}
23692364

2370-
ctrl->page_size = 1 << page_shift;
2371-
23722365
if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
23732366
ctrl->ctrl_config = NVME_CC_CSS_CSI;
23742367
else
23752368
ctrl->ctrl_config = NVME_CC_CSS_NVM;
2376-
ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
2369+
ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
23772370
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
23782371
ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
23792372
ctrl->ctrl_config |= NVME_CC_ENABLE;
@@ -2423,13 +2416,13 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
24232416

24242417
if (ctrl->max_hw_sectors) {
24252418
u32 max_segments =
2426-
(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
2419+
(ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
24272420

24282421
max_segments = min_not_zero(max_segments, ctrl->max_segments);
24292422
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
24302423
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
24312424
}
2432-
blk_queue_virt_boundary(q, ctrl->page_size - 1);
2425+
blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
24332426
blk_queue_dma_alignment(q, 7);
24342427
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
24352428
vwc = true;

drivers/nvme/host/nvme.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,14 @@ extern unsigned int admin_timeout;
3737
#define NVME_INLINE_METADATA_SG_CNT 1
3838
#endif
3939

40+
/*
41+
* Default to a 4K page size, with the intention to update this
42+
* path in the future to accommodate architectures with differing
43+
* kernel and IO page sizes.
44+
*/
45+
#define NVME_CTRL_PAGE_SHIFT 12
46+
#define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT)
47+
4048
extern struct workqueue_struct *nvme_wq;
4149
extern struct workqueue_struct *nvme_reset_wq;
4250
extern struct workqueue_struct *nvme_delete_wq;
@@ -234,7 +242,6 @@ struct nvme_ctrl {
234242
u32 queue_count;
235243

236244
u64 cap;
237-
u32 page_size;
238245
u32 max_hw_sectors;
239246
u32 max_segments;
240247
u32 max_integrity_segments;

drivers/nvme/host/pci.c

Lines changed: 23 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -348,8 +348,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
348348
*/
349349
static int nvme_npages(unsigned size, struct nvme_dev *dev)
350350
{
351-
unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
352-
dev->ctrl.page_size);
351+
unsigned nprps = DIV_ROUND_UP(size + NVME_CTRL_PAGE_SIZE,
352+
NVME_CTRL_PAGE_SIZE);
353353
return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
354354
}
355355

@@ -515,7 +515,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
515515
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
516516
{
517517
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
518-
const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
518+
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
519519
dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
520520
int i;
521521

@@ -582,34 +582,33 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
582582
struct scatterlist *sg = iod->sg;
583583
int dma_len = sg_dma_len(sg);
584584
u64 dma_addr = sg_dma_address(sg);
585-
u32 page_size = dev->ctrl.page_size;
586-
int offset = dma_addr & (page_size - 1);
585+
int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
587586
__le64 *prp_list;
588587
void **list = nvme_pci_iod_list(req);
589588
dma_addr_t prp_dma;
590589
int nprps, i;
591590

592-
length -= (page_size - offset);
591+
length -= (NVME_CTRL_PAGE_SIZE - offset);
593592
if (length <= 0) {
594593
iod->first_dma = 0;
595594
goto done;
596595
}
597596

598-
dma_len -= (page_size - offset);
597+
dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
599598
if (dma_len) {
600-
dma_addr += (page_size - offset);
599+
dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
601600
} else {
602601
sg = sg_next(sg);
603602
dma_addr = sg_dma_address(sg);
604603
dma_len = sg_dma_len(sg);
605604
}
606605

607-
if (length <= page_size) {
606+
if (length <= NVME_CTRL_PAGE_SIZE) {
608607
iod->first_dma = dma_addr;
609608
goto done;
610609
}
611610

612-
nprps = DIV_ROUND_UP(length, page_size);
611+
nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
613612
if (nprps <= (256 / 8)) {
614613
pool = dev->prp_small_pool;
615614
iod->npages = 0;
@@ -628,7 +627,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
628627
iod->first_dma = prp_dma;
629628
i = 0;
630629
for (;;) {
631-
if (i == page_size >> 3) {
630+
if (i == NVME_CTRL_PAGE_SIZE >> 3) {
632631
__le64 *old_prp_list = prp_list;
633632
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
634633
if (!prp_list)
@@ -639,9 +638,9 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
639638
i = 1;
640639
}
641640
prp_list[i++] = cpu_to_le64(dma_addr);
642-
dma_len -= page_size;
643-
dma_addr += page_size;
644-
length -= page_size;
641+
dma_len -= NVME_CTRL_PAGE_SIZE;
642+
dma_addr += NVME_CTRL_PAGE_SIZE;
643+
length -= NVME_CTRL_PAGE_SIZE;
645644
if (length <= 0)
646645
break;
647646
if (dma_len > 0)
@@ -751,8 +750,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
751750
struct bio_vec *bv)
752751
{
753752
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
754-
unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1);
755-
unsigned int first_prp_len = dev->ctrl.page_size - offset;
753+
unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
754+
unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
756755

757756
iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
758757
if (dma_mapping_error(dev->dev, iod->first_dma))
@@ -794,7 +793,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
794793
struct bio_vec bv = req_bvec(req);
795794

796795
if (!is_pci_p2pdma_page(bv.bv_page)) {
797-
if (bv.bv_offset + bv.bv_len <= dev->ctrl.page_size * 2)
796+
if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
798797
return nvme_setup_prp_simple(dev, req,
799798
&cmnd->rw, &bv);
800799

@@ -1396,12 +1395,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
13961395
{
13971396
int q_depth = dev->q_depth;
13981397
unsigned q_size_aligned = roundup(q_depth * entry_size,
1399-
dev->ctrl.page_size);
1398+
NVME_CTRL_PAGE_SIZE);
14001399

14011400
if (q_size_aligned * nr_io_queues > dev->cmb_size) {
14021401
u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
14031402

1404-
mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
1403+
mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE);
14051404
q_depth = div_u64(mem_per_q, entry_size);
14061405

14071406
/*
@@ -1816,6 +1815,7 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
18161815

18171816
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
18181817
{
1818+
u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT;
18191819
u64 dma_addr = dev->host_mem_descs_dma;
18201820
struct nvme_command c;
18211821
int ret;
@@ -1824,8 +1824,7 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
18241824
c.features.opcode = nvme_admin_set_features;
18251825
c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
18261826
c.features.dword11 = cpu_to_le32(bits);
1827-
c.features.dword12 = cpu_to_le32(dev->host_mem_size >>
1828-
ilog2(dev->ctrl.page_size));
1827+
c.features.dword12 = cpu_to_le32(host_mem_size);
18291828
c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr));
18301829
c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr));
18311830
c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs);
@@ -1845,7 +1844,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
18451844

18461845
for (i = 0; i < dev->nr_host_mem_descs; i++) {
18471846
struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
1848-
size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
1847+
size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE;
18491848

18501849
dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
18511850
le64_to_cpu(desc->addr),
@@ -1897,7 +1896,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
18971896
break;
18981897

18991898
descs[i].addr = cpu_to_le64(dma_addr);
1900-
descs[i].size = cpu_to_le32(len / dev->ctrl.page_size);
1899+
descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE);
19011900
i++;
19021901
}
19031902

@@ -1913,7 +1912,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
19131912

19141913
out_free_bufs:
19151914
while (--i >= 0) {
1916-
size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
1915+
size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE;
19171916

19181917
dma_free_attrs(dev->dev, size, bufs[i],
19191918
le64_to_cpu(descs[i].addr),

0 commit comments

Comments
 (0)