39
39
*/
40
40
#define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
41
41
42
+ typedef struct BDRVNVMeState BDRVNVMeState ;
43
+
42
44
typedef struct {
43
45
int32_t head , tail ;
44
46
uint8_t * queue ;
@@ -59,8 +61,11 @@ typedef struct {
59
61
typedef struct {
60
62
QemuMutex lock ;
61
63
64
+ /* Read from I/O code path, initialized under BQL */
65
+ BDRVNVMeState * s ;
66
+ int index ;
67
+
62
68
/* Fields protected by BQL */
63
- int index ;
64
69
uint8_t * prp_list_pages ;
65
70
66
71
/* Fields protected by @lock */
@@ -96,7 +101,7 @@ typedef volatile struct {
96
101
97
102
QEMU_BUILD_BUG_ON (offsetof(NVMeRegs , doorbells ) != 0x1000 );
98
103
99
- typedef struct {
104
+ struct BDRVNVMeState {
100
105
AioContext * aio_context ;
101
106
QEMUVFIOState * vfio ;
102
107
NVMeRegs * regs ;
@@ -130,7 +135,7 @@ typedef struct {
130
135
131
136
/* PCI address (required for nvme_refresh_filename()) */
132
137
char * device ;
133
- } BDRVNVMeState ;
138
+ };
134
139
135
140
#define NVME_BLOCK_OPT_DEVICE "device"
136
141
#define NVME_BLOCK_OPT_NAMESPACE "namespace"
@@ -174,7 +179,7 @@ static void nvme_init_queue(BlockDriverState *bs, NVMeQueue *q,
174
179
}
175
180
}
176
181
177
- static void nvme_free_queue_pair (BlockDriverState * bs , NVMeQueuePair * q )
182
+ static void nvme_free_queue_pair (NVMeQueuePair * q )
178
183
{
179
184
qemu_vfree (q -> prp_list_pages );
180
185
qemu_vfree (q -> sq .queue );
@@ -205,6 +210,7 @@ static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs,
205
210
uint64_t prp_list_iova ;
206
211
207
212
qemu_mutex_init (& q -> lock );
213
+ q -> s = s ;
208
214
q -> index = idx ;
209
215
qemu_co_queue_init (& q -> free_req_queue );
210
216
q -> prp_list_pages = qemu_blockalign0 (bs , s -> page_size * NVME_NUM_REQS );
@@ -240,13 +246,15 @@ static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs,
240
246
241
247
return q ;
242
248
fail :
243
- nvme_free_queue_pair (bs , q );
249
+ nvme_free_queue_pair (q );
244
250
return NULL ;
245
251
}
246
252
247
253
/* With q->lock */
248
- static void nvme_kick (BDRVNVMeState * s , NVMeQueuePair * q )
254
+ static void nvme_kick (NVMeQueuePair * q )
249
255
{
256
+ BDRVNVMeState * s = q -> s ;
257
+
250
258
if (s -> plugged || !q -> need_kick ) {
251
259
return ;
252
260
}
@@ -295,21 +303,20 @@ static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)
295
303
}
296
304
297
305
/* With q->lock */
298
- static void nvme_wake_free_req_locked (BDRVNVMeState * s , NVMeQueuePair * q )
306
+ static void nvme_wake_free_req_locked (NVMeQueuePair * q )
299
307
{
300
308
if (!qemu_co_queue_empty (& q -> free_req_queue )) {
301
- replay_bh_schedule_oneshot_event (s -> aio_context ,
309
+ replay_bh_schedule_oneshot_event (q -> s -> aio_context ,
302
310
nvme_free_req_queue_cb , q );
303
311
}
304
312
}
305
313
306
314
/* Insert a request in the freelist and wake waiters */
307
- static void nvme_put_free_req_and_wake (BDRVNVMeState * s , NVMeQueuePair * q ,
308
- NVMeRequest * req )
315
+ static void nvme_put_free_req_and_wake (NVMeQueuePair * q , NVMeRequest * req )
309
316
{
310
317
qemu_mutex_lock (& q -> lock );
311
318
nvme_put_free_req_locked (q , req );
312
- nvme_wake_free_req_locked (s , q );
319
+ nvme_wake_free_req_locked (q );
313
320
qemu_mutex_unlock (& q -> lock );
314
321
}
315
322
@@ -336,8 +343,9 @@ static inline int nvme_translate_error(const NvmeCqe *c)
336
343
}
337
344
338
345
/* With q->lock */
339
- static bool nvme_process_completion (BDRVNVMeState * s , NVMeQueuePair * q )
346
+ static bool nvme_process_completion (NVMeQueuePair * q )
340
347
{
348
+ BDRVNVMeState * s = q -> s ;
341
349
bool progress = false;
342
350
NVMeRequest * preq ;
343
351
NVMeRequest req ;
@@ -386,7 +394,7 @@ static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q)
386
394
/* Notify the device so it can post more completions. */
387
395
smp_mb_release ();
388
396
* q -> cq .doorbell = cpu_to_le32 (q -> cq .head );
389
- nvme_wake_free_req_locked (s , q );
397
+ nvme_wake_free_req_locked (q );
390
398
}
391
399
q -> busy = false;
392
400
return progress ;
@@ -403,8 +411,7 @@ static void nvme_trace_command(const NvmeCmd *cmd)
403
411
}
404
412
}
405
413
406
- static void nvme_submit_command (BDRVNVMeState * s , NVMeQueuePair * q ,
407
- NVMeRequest * req ,
414
+ static void nvme_submit_command (NVMeQueuePair * q , NVMeRequest * req ,
408
415
NvmeCmd * cmd , BlockCompletionFunc cb ,
409
416
void * opaque )
410
417
{
@@ -413,15 +420,15 @@ static void nvme_submit_command(BDRVNVMeState *s, NVMeQueuePair *q,
413
420
req -> opaque = opaque ;
414
421
cmd -> cid = cpu_to_le32 (req -> cid );
415
422
416
- trace_nvme_submit_command (s , q -> index , req -> cid );
423
+ trace_nvme_submit_command (q -> s , q -> index , req -> cid );
417
424
nvme_trace_command (cmd );
418
425
qemu_mutex_lock (& q -> lock );
419
426
memcpy ((uint8_t * )q -> sq .queue +
420
427
q -> sq .tail * NVME_SQ_ENTRY_BYTES , cmd , sizeof (* cmd ));
421
428
q -> sq .tail = (q -> sq .tail + 1 ) % NVME_QUEUE_SIZE ;
422
429
q -> need_kick ++ ;
423
- nvme_kick (s , q );
424
- nvme_process_completion (s , q );
430
+ nvme_kick (q );
431
+ nvme_process_completion (q );
425
432
qemu_mutex_unlock (& q -> lock );
426
433
}
427
434
@@ -436,13 +443,12 @@ static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q,
436
443
NvmeCmd * cmd )
437
444
{
438
445
NVMeRequest * req ;
439
- BDRVNVMeState * s = bs -> opaque ;
440
446
int ret = - EINPROGRESS ;
441
447
req = nvme_get_free_req (q );
442
448
if (!req ) {
443
449
return - EBUSY ;
444
450
}
445
- nvme_submit_command (s , q , req , cmd , nvme_cmd_sync_cb , & ret );
451
+ nvme_submit_command (q , req , cmd , nvme_cmd_sync_cb , & ret );
446
452
447
453
BDRV_POLL_WHILE (bs , ret == - EINPROGRESS );
448
454
return ret ;
@@ -554,7 +560,7 @@ static bool nvme_poll_queues(BDRVNVMeState *s)
554
560
}
555
561
556
562
qemu_mutex_lock (& q -> lock );
557
- while (nvme_process_completion (s , q )) {
563
+ while (nvme_process_completion (q )) {
558
564
/* Keep polling */
559
565
progress = true;
560
566
}
@@ -592,7 +598,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
592
598
};
593
599
if (nvme_cmd_sync (bs , s -> queues [0 ], & cmd )) {
594
600
error_setg (errp , "Failed to create io queue [%d]" , n );
595
- nvme_free_queue_pair (bs , q );
601
+ nvme_free_queue_pair (q );
596
602
return false;
597
603
}
598
604
cmd = (NvmeCmd ) {
@@ -603,7 +609,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
603
609
};
604
610
if (nvme_cmd_sync (bs , s -> queues [0 ], & cmd )) {
605
611
error_setg (errp , "Failed to create io queue [%d]" , n );
606
- nvme_free_queue_pair (bs , q );
612
+ nvme_free_queue_pair (q );
607
613
return false;
608
614
}
609
615
s -> queues = g_renew (NVMeQueuePair * , s -> queues , n + 1 );
@@ -798,7 +804,7 @@ static void nvme_close(BlockDriverState *bs)
798
804
BDRVNVMeState * s = bs -> opaque ;
799
805
800
806
for (i = 0 ; i < s -> nr_queues ; ++ i ) {
801
- nvme_free_queue_pair (bs , s -> queues [i ]);
807
+ nvme_free_queue_pair (s -> queues [i ]);
802
808
}
803
809
g_free (s -> queues );
804
810
aio_set_event_notifier (bdrv_get_aio_context (bs ), & s -> irq_notifier ,
@@ -1028,10 +1034,10 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
1028
1034
r = nvme_cmd_map_qiov (bs , & cmd , req , qiov );
1029
1035
qemu_co_mutex_unlock (& s -> dma_map_lock );
1030
1036
if (r ) {
1031
- nvme_put_free_req_and_wake (s , ioq , req );
1037
+ nvme_put_free_req_and_wake (ioq , req );
1032
1038
return r ;
1033
1039
}
1034
- nvme_submit_command (s , ioq , req , & cmd , nvme_rw_cb , & data );
1040
+ nvme_submit_command (ioq , req , & cmd , nvme_rw_cb , & data );
1035
1041
1036
1042
data .co = qemu_coroutine_self ();
1037
1043
while (data .ret == - EINPROGRESS ) {
@@ -1131,7 +1137,7 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
1131
1137
assert (s -> nr_queues > 1 );
1132
1138
req = nvme_get_free_req (ioq );
1133
1139
assert (req );
1134
- nvme_submit_command (s , ioq , req , & cmd , nvme_rw_cb , & data );
1140
+ nvme_submit_command (ioq , req , & cmd , nvme_rw_cb , & data );
1135
1141
1136
1142
data .co = qemu_coroutine_self ();
1137
1143
if (data .ret == - EINPROGRESS ) {
@@ -1184,7 +1190,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
1184
1190
req = nvme_get_free_req (ioq );
1185
1191
assert (req );
1186
1192
1187
- nvme_submit_command (s , ioq , req , & cmd , nvme_rw_cb , & data );
1193
+ nvme_submit_command (ioq , req , & cmd , nvme_rw_cb , & data );
1188
1194
1189
1195
data .co = qemu_coroutine_self ();
1190
1196
while (data .ret == - EINPROGRESS ) {
@@ -1245,13 +1251,13 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
1245
1251
qemu_co_mutex_unlock (& s -> dma_map_lock );
1246
1252
1247
1253
if (ret ) {
1248
- nvme_put_free_req_and_wake (s , ioq , req );
1254
+ nvme_put_free_req_and_wake (ioq , req );
1249
1255
goto out ;
1250
1256
}
1251
1257
1252
1258
trace_nvme_dsm (s , offset , bytes );
1253
1259
1254
- nvme_submit_command (s , ioq , req , & cmd , nvme_rw_cb , & data );
1260
+ nvme_submit_command (ioq , req , & cmd , nvme_rw_cb , & data );
1255
1261
1256
1262
data .co = qemu_coroutine_self ();
1257
1263
while (data .ret == - EINPROGRESS ) {
@@ -1333,8 +1339,8 @@ static void nvme_aio_unplug(BlockDriverState *bs)
1333
1339
for (i = 1 ; i < s -> nr_queues ; i ++ ) {
1334
1340
NVMeQueuePair * q = s -> queues [i ];
1335
1341
qemu_mutex_lock (& q -> lock );
1336
- nvme_kick (s , q );
1337
- nvme_process_completion (s , q );
1342
+ nvme_kick (q );
1343
+ nvme_process_completion (q );
1338
1344
qemu_mutex_unlock (& q -> lock );
1339
1345
}
1340
1346
}
0 commit comments