Skip to content

Commit b75fd5f

Browse files
committed
block/nvme: keep BDRVNVMeState pointer in NVMeQueuePair
Passing around both BDRVNVMeState and NVMeQueuePair is unwieldy. Reduce the number of function arguments by keeping the BDRVNVMeState pointer in NVMeQueuePair. This will come in handly when a BH is introduced in a later patch and only one argument can be passed to it. Signed-off-by: Stefan Hajnoczi <[email protected]> Reviewed-by: Sergio Lopez <[email protected]> Reviewed-by: Philippe Mathieu-Daudé <[email protected]> Message-id: [email protected] Signed-off-by: Stefan Hajnoczi <[email protected]>
1 parent a5db74f commit b75fd5f

File tree

1 file changed

+38
-32
lines changed

1 file changed

+38
-32
lines changed

block/nvme.c

Lines changed: 38 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,8 @@
3939
*/
4040
#define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
4141

42+
typedef struct BDRVNVMeState BDRVNVMeState;
43+
4244
typedef struct {
4345
int32_t head, tail;
4446
uint8_t *queue;
@@ -59,8 +61,11 @@ typedef struct {
5961
typedef struct {
6062
QemuMutex lock;
6163

64+
/* Read from I/O code path, initialized under BQL */
65+
BDRVNVMeState *s;
66+
int index;
67+
6268
/* Fields protected by BQL */
63-
int index;
6469
uint8_t *prp_list_pages;
6570

6671
/* Fields protected by @lock */
@@ -96,7 +101,7 @@ typedef volatile struct {
96101

97102
QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
98103

99-
typedef struct {
104+
struct BDRVNVMeState {
100105
AioContext *aio_context;
101106
QEMUVFIOState *vfio;
102107
NVMeRegs *regs;
@@ -130,7 +135,7 @@ typedef struct {
130135

131136
/* PCI address (required for nvme_refresh_filename()) */
132137
char *device;
133-
} BDRVNVMeState;
138+
};
134139

135140
#define NVME_BLOCK_OPT_DEVICE "device"
136141
#define NVME_BLOCK_OPT_NAMESPACE "namespace"
@@ -174,7 +179,7 @@ static void nvme_init_queue(BlockDriverState *bs, NVMeQueue *q,
174179
}
175180
}
176181

177-
static void nvme_free_queue_pair(BlockDriverState *bs, NVMeQueuePair *q)
182+
static void nvme_free_queue_pair(NVMeQueuePair *q)
178183
{
179184
qemu_vfree(q->prp_list_pages);
180185
qemu_vfree(q->sq.queue);
@@ -205,6 +210,7 @@ static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs,
205210
uint64_t prp_list_iova;
206211

207212
qemu_mutex_init(&q->lock);
213+
q->s = s;
208214
q->index = idx;
209215
qemu_co_queue_init(&q->free_req_queue);
210216
q->prp_list_pages = qemu_blockalign0(bs, s->page_size * NVME_NUM_REQS);
@@ -240,13 +246,15 @@ static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs,
240246

241247
return q;
242248
fail:
243-
nvme_free_queue_pair(bs, q);
249+
nvme_free_queue_pair(q);
244250
return NULL;
245251
}
246252

247253
/* With q->lock */
248-
static void nvme_kick(BDRVNVMeState *s, NVMeQueuePair *q)
254+
static void nvme_kick(NVMeQueuePair *q)
249255
{
256+
BDRVNVMeState *s = q->s;
257+
250258
if (s->plugged || !q->need_kick) {
251259
return;
252260
}
@@ -295,21 +303,20 @@ static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)
295303
}
296304

297305
/* With q->lock */
298-
static void nvme_wake_free_req_locked(BDRVNVMeState *s, NVMeQueuePair *q)
306+
static void nvme_wake_free_req_locked(NVMeQueuePair *q)
299307
{
300308
if (!qemu_co_queue_empty(&q->free_req_queue)) {
301-
replay_bh_schedule_oneshot_event(s->aio_context,
309+
replay_bh_schedule_oneshot_event(q->s->aio_context,
302310
nvme_free_req_queue_cb, q);
303311
}
304312
}
305313

306314
/* Insert a request in the freelist and wake waiters */
307-
static void nvme_put_free_req_and_wake(BDRVNVMeState *s, NVMeQueuePair *q,
308-
NVMeRequest *req)
315+
static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req)
309316
{
310317
qemu_mutex_lock(&q->lock);
311318
nvme_put_free_req_locked(q, req);
312-
nvme_wake_free_req_locked(s, q);
319+
nvme_wake_free_req_locked(q);
313320
qemu_mutex_unlock(&q->lock);
314321
}
315322

@@ -336,8 +343,9 @@ static inline int nvme_translate_error(const NvmeCqe *c)
336343
}
337344

338345
/* With q->lock */
339-
static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q)
346+
static bool nvme_process_completion(NVMeQueuePair *q)
340347
{
348+
BDRVNVMeState *s = q->s;
341349
bool progress = false;
342350
NVMeRequest *preq;
343351
NVMeRequest req;
@@ -386,7 +394,7 @@ static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q)
386394
/* Notify the device so it can post more completions. */
387395
smp_mb_release();
388396
*q->cq.doorbell = cpu_to_le32(q->cq.head);
389-
nvme_wake_free_req_locked(s, q);
397+
nvme_wake_free_req_locked(q);
390398
}
391399
q->busy = false;
392400
return progress;
@@ -403,8 +411,7 @@ static void nvme_trace_command(const NvmeCmd *cmd)
403411
}
404412
}
405413

406-
static void nvme_submit_command(BDRVNVMeState *s, NVMeQueuePair *q,
407-
NVMeRequest *req,
414+
static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
408415
NvmeCmd *cmd, BlockCompletionFunc cb,
409416
void *opaque)
410417
{
@@ -413,15 +420,15 @@ static void nvme_submit_command(BDRVNVMeState *s, NVMeQueuePair *q,
413420
req->opaque = opaque;
414421
cmd->cid = cpu_to_le32(req->cid);
415422

416-
trace_nvme_submit_command(s, q->index, req->cid);
423+
trace_nvme_submit_command(q->s, q->index, req->cid);
417424
nvme_trace_command(cmd);
418425
qemu_mutex_lock(&q->lock);
419426
memcpy((uint8_t *)q->sq.queue +
420427
q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd));
421428
q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE;
422429
q->need_kick++;
423-
nvme_kick(s, q);
424-
nvme_process_completion(s, q);
430+
nvme_kick(q);
431+
nvme_process_completion(q);
425432
qemu_mutex_unlock(&q->lock);
426433
}
427434

@@ -436,13 +443,12 @@ static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q,
436443
NvmeCmd *cmd)
437444
{
438445
NVMeRequest *req;
439-
BDRVNVMeState *s = bs->opaque;
440446
int ret = -EINPROGRESS;
441447
req = nvme_get_free_req(q);
442448
if (!req) {
443449
return -EBUSY;
444450
}
445-
nvme_submit_command(s, q, req, cmd, nvme_cmd_sync_cb, &ret);
451+
nvme_submit_command(q, req, cmd, nvme_cmd_sync_cb, &ret);
446452

447453
BDRV_POLL_WHILE(bs, ret == -EINPROGRESS);
448454
return ret;
@@ -554,7 +560,7 @@ static bool nvme_poll_queues(BDRVNVMeState *s)
554560
}
555561

556562
qemu_mutex_lock(&q->lock);
557-
while (nvme_process_completion(s, q)) {
563+
while (nvme_process_completion(q)) {
558564
/* Keep polling */
559565
progress = true;
560566
}
@@ -592,7 +598,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
592598
};
593599
if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
594600
error_setg(errp, "Failed to create io queue [%d]", n);
595-
nvme_free_queue_pair(bs, q);
601+
nvme_free_queue_pair(q);
596602
return false;
597603
}
598604
cmd = (NvmeCmd) {
@@ -603,7 +609,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
603609
};
604610
if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
605611
error_setg(errp, "Failed to create io queue [%d]", n);
606-
nvme_free_queue_pair(bs, q);
612+
nvme_free_queue_pair(q);
607613
return false;
608614
}
609615
s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
@@ -798,7 +804,7 @@ static void nvme_close(BlockDriverState *bs)
798804
BDRVNVMeState *s = bs->opaque;
799805

800806
for (i = 0; i < s->nr_queues; ++i) {
801-
nvme_free_queue_pair(bs, s->queues[i]);
807+
nvme_free_queue_pair(s->queues[i]);
802808
}
803809
g_free(s->queues);
804810
aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
@@ -1028,10 +1034,10 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
10281034
r = nvme_cmd_map_qiov(bs, &cmd, req, qiov);
10291035
qemu_co_mutex_unlock(&s->dma_map_lock);
10301036
if (r) {
1031-
nvme_put_free_req_and_wake(s, ioq, req);
1037+
nvme_put_free_req_and_wake(ioq, req);
10321038
return r;
10331039
}
1034-
nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data);
1040+
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
10351041

10361042
data.co = qemu_coroutine_self();
10371043
while (data.ret == -EINPROGRESS) {
@@ -1131,7 +1137,7 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
11311137
assert(s->nr_queues > 1);
11321138
req = nvme_get_free_req(ioq);
11331139
assert(req);
1134-
nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data);
1140+
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
11351141

11361142
data.co = qemu_coroutine_self();
11371143
if (data.ret == -EINPROGRESS) {
@@ -1184,7 +1190,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
11841190
req = nvme_get_free_req(ioq);
11851191
assert(req);
11861192

1187-
nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data);
1193+
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
11881194

11891195
data.co = qemu_coroutine_self();
11901196
while (data.ret == -EINPROGRESS) {
@@ -1245,13 +1251,13 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
12451251
qemu_co_mutex_unlock(&s->dma_map_lock);
12461252

12471253
if (ret) {
1248-
nvme_put_free_req_and_wake(s, ioq, req);
1254+
nvme_put_free_req_and_wake(ioq, req);
12491255
goto out;
12501256
}
12511257

12521258
trace_nvme_dsm(s, offset, bytes);
12531259

1254-
nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data);
1260+
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
12551261

12561262
data.co = qemu_coroutine_self();
12571263
while (data.ret == -EINPROGRESS) {
@@ -1333,8 +1339,8 @@ static void nvme_aio_unplug(BlockDriverState *bs)
13331339
for (i = 1; i < s->nr_queues; i++) {
13341340
NVMeQueuePair *q = s->queues[i];
13351341
qemu_mutex_lock(&q->lock);
1336-
nvme_kick(s, q);
1337-
nvme_process_completion(s, q);
1342+
nvme_kick(q);
1343+
nvme_process_completion(q);
13381344
qemu_mutex_unlock(&q->lock);
13391345
}
13401346
}

0 commit comments

Comments
 (0)