Skip to content

Commit fb857b0

Browse files
committed
Merge tag 'nvme-6.2-2022-12-22' of git://git.infradead.org/nvme into block-6.2
Pull NVMe fixes from Christoph: "nvme fixes for Linux 6.2 - fix doorbell buffer value endianness (Klaus Jensen) - fix Linux vs NVMe page size mismatch (Keith Busch) - fix a potential use memory access beyong the allocation limit (Keith Busch) - fix a multipath vs blktrace NULL pointer dereference (Yanjun Zhang)" * tag 'nvme-6.2-2022-12-22' of git://git.infradead.org/nvme: nvme: fix multipath crash caused by flush request when blktrace is enabled nvme-pci: fix page size checks nvme-pci: fix mempool alloc size nvme-pci: fix doorbell buffer value endianness
2 parents 53eab8e + 3659fb5 commit fb857b0

File tree

2 files changed

+20
-19
lines changed

2 files changed

+20
-19
lines changed

drivers/nvme/host/nvme.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -893,7 +893,7 @@ static inline void nvme_trace_bio_complete(struct request *req)
893893
{
894894
struct nvme_ns *ns = req->q->queuedata;
895895

896-
if (req->cmd_flags & REQ_NVME_MPATH)
896+
if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
897897
trace_block_bio_complete(ns->head->disk->queue, req->bio);
898898
}
899899

drivers/nvme/host/pci.c

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
3737
#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
3838

39-
#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
39+
#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
4040

4141
/*
4242
* These can be higher, but we need to ensure that any command doesn't
@@ -144,9 +144,9 @@ struct nvme_dev {
144144
mempool_t *iod_mempool;
145145

146146
/* shadow doorbell buffer support: */
147-
u32 *dbbuf_dbs;
147+
__le32 *dbbuf_dbs;
148148
dma_addr_t dbbuf_dbs_dma_addr;
149-
u32 *dbbuf_eis;
149+
__le32 *dbbuf_eis;
150150
dma_addr_t dbbuf_eis_dma_addr;
151151

152152
/* host memory buffer support: */
@@ -208,10 +208,10 @@ struct nvme_queue {
208208
#define NVMEQ_SQ_CMB 1
209209
#define NVMEQ_DELETE_ERROR 2
210210
#define NVMEQ_POLLED 3
211-
u32 *dbbuf_sq_db;
212-
u32 *dbbuf_cq_db;
213-
u32 *dbbuf_sq_ei;
214-
u32 *dbbuf_cq_ei;
211+
__le32 *dbbuf_sq_db;
212+
__le32 *dbbuf_cq_db;
213+
__le32 *dbbuf_sq_ei;
214+
__le32 *dbbuf_cq_ei;
215215
struct completion delete_done;
216216
};
217217

@@ -343,20 +343,20 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
343343
}
344344

345345
/* Update dbbuf and return true if an MMIO is required */
346-
static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
347-
volatile u32 *dbbuf_ei)
346+
static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
347+
volatile __le32 *dbbuf_ei)
348348
{
349349
if (dbbuf_db) {
350-
u16 old_value;
350+
u16 old_value, event_idx;
351351

352352
/*
353353
* Ensure that the queue is written before updating
354354
* the doorbell in memory
355355
*/
356356
wmb();
357357

358-
old_value = *dbbuf_db;
359-
*dbbuf_db = value;
358+
old_value = le32_to_cpu(*dbbuf_db);
359+
*dbbuf_db = cpu_to_le32(value);
360360

361361
/*
362362
* Ensure that the doorbell is updated before reading the event
@@ -366,7 +366,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
366366
*/
367367
mb();
368368

369-
if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
369+
event_idx = le32_to_cpu(*dbbuf_ei);
370+
if (!nvme_dbbuf_need_event(event_idx, value, old_value))
370371
return false;
371372
}
372373

@@ -380,9 +381,9 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
380381
*/
381382
static int nvme_pci_npages_prp(void)
382383
{
383-
unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE,
384-
NVME_CTRL_PAGE_SIZE);
385-
return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
384+
unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
385+
unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
386+
return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
386387
}
387388

388389
/*
@@ -392,7 +393,7 @@ static int nvme_pci_npages_prp(void)
392393
static int nvme_pci_npages_sgl(void)
393394
{
394395
return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
395-
PAGE_SIZE);
396+
NVME_CTRL_PAGE_SIZE);
396397
}
397398

398399
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -708,7 +709,7 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
708709
sge->length = cpu_to_le32(entries * sizeof(*sge));
709710
sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
710711
} else {
711-
sge->length = cpu_to_le32(PAGE_SIZE);
712+
sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE);
712713
sge->type = NVME_SGL_FMT_SEG_DESC << 4;
713714
}
714715
}

0 commit comments

Comments
 (0)