@@ -47,7 +47,7 @@ typedef struct VirtIOSCSIReq {
47
47
/* Used for two-stage request submission and TMFs deferred to BH */
48
48
QTAILQ_ENTRY (VirtIOSCSIReq ) next ;
49
49
50
- /* Used for cancellation of request during TMFs */
50
+ /* Used for cancellation of request during TMFs. Atomic. */
51
51
int remaining ;
52
52
53
53
SCSIRequest * sreq ;
@@ -298,19 +298,23 @@ typedef struct {
298
298
VirtIOSCSIReq * tmf_req ;
299
299
} VirtIOSCSICancelNotifier ;
300
300
301
+ static void virtio_scsi_tmf_dec_remaining (VirtIOSCSIReq * tmf )
302
+ {
303
+ if (qatomic_fetch_dec (& tmf -> remaining ) == 1 ) {
304
+ trace_virtio_scsi_tmf_resp (virtio_scsi_get_lun (tmf -> req .tmf .lun ),
305
+ tmf -> req .tmf .tag , tmf -> resp .tmf .response );
306
+
307
+ virtio_scsi_complete_req (tmf , & tmf -> dev -> ctrl_lock );
308
+ }
309
+ }
310
+
301
311
static void virtio_scsi_cancel_notify (Notifier * notifier , void * data )
302
312
{
303
313
VirtIOSCSICancelNotifier * n = container_of (notifier ,
304
314
VirtIOSCSICancelNotifier ,
305
315
notifier );
306
316
307
- if (-- n -> tmf_req -> remaining == 0 ) {
308
- VirtIOSCSIReq * req = n -> tmf_req ;
309
-
310
- trace_virtio_scsi_tmf_resp (virtio_scsi_get_lun (req -> req .tmf .lun ),
311
- req -> req .tmf .tag , req -> resp .tmf .response );
312
- virtio_scsi_complete_req (req , & req -> dev -> ctrl_lock );
313
- }
317
+ virtio_scsi_tmf_dec_remaining (n -> tmf_req );
314
318
g_free (n );
315
319
}
316
320
@@ -416,7 +420,7 @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
416
420
}
417
421
}
418
422
419
- static void virtio_scsi_defer_tmf_to_bh (VirtIOSCSIReq * req )
423
+ static void virtio_scsi_defer_tmf_to_main_loop (VirtIOSCSIReq * req )
420
424
{
421
425
VirtIOSCSI * s = req -> dev ;
422
426
@@ -430,13 +434,145 @@ static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req)
430
434
}
431
435
}
432
436
437
+ static void virtio_scsi_tmf_cancel_req (VirtIOSCSIReq * tmf , SCSIRequest * r )
438
+ {
439
+ VirtIOSCSICancelNotifier * notifier ;
440
+
441
+ assert (r -> ctx == qemu_get_current_aio_context ());
442
+
443
+ /* Decremented in virtio_scsi_cancel_notify() */
444
+ qatomic_inc (& tmf -> remaining );
445
+
446
+ notifier = g_new (VirtIOSCSICancelNotifier , 1 );
447
+ notifier -> notifier .notify = virtio_scsi_cancel_notify ;
448
+ notifier -> tmf_req = tmf ;
449
+ scsi_req_cancel_async (r , & notifier -> notifier );
450
+ }
451
+
452
+ /* Execute a TMF on the requests in the current AioContext */
453
+ static void virtio_scsi_do_tmf_aio_context (void * opaque )
454
+ {
455
+ AioContext * ctx = qemu_get_current_aio_context ();
456
+ VirtIOSCSIReq * tmf = opaque ;
457
+ VirtIOSCSI * s = tmf -> dev ;
458
+ SCSIDevice * d = virtio_scsi_device_get (s , tmf -> req .tmf .lun );
459
+ SCSIRequest * r ;
460
+ bool match_tag ;
461
+
462
+ if (!d ) {
463
+ tmf -> resp .tmf .response = VIRTIO_SCSI_S_BAD_TARGET ;
464
+ virtio_scsi_tmf_dec_remaining (tmf );
465
+ return ;
466
+ }
467
+
468
+ /*
469
+ * This function could handle other subtypes that need to be processed in
470
+ * the request's AioContext in the future, but for now only request
471
+ * cancelation subtypes are performed here.
472
+ */
473
+ switch (tmf -> req .tmf .subtype ) {
474
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK :
475
+ match_tag = true;
476
+ break ;
477
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET :
478
+ case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET :
479
+ match_tag = false;
480
+ break ;
481
+ default :
482
+ g_assert_not_reached ();
483
+ }
484
+
485
+ WITH_QEMU_LOCK_GUARD (& d -> requests_lock ) {
486
+ QTAILQ_FOREACH (r , & d -> requests , next ) {
487
+ VirtIOSCSIReq * cmd_req = r -> hba_private ;
488
+ assert (cmd_req ); /* request has hba_private while enqueued */
489
+
490
+ if (r -> ctx != ctx ) {
491
+ continue ;
492
+ }
493
+ if (match_tag && cmd_req -> req .cmd .tag != tmf -> req .tmf .tag ) {
494
+ continue ;
495
+ }
496
+ virtio_scsi_tmf_cancel_req (tmf , r );
497
+ }
498
+ }
499
+
500
+ /* Incremented by virtio_scsi_do_tmf() */
501
+ virtio_scsi_tmf_dec_remaining (tmf );
502
+
503
+ object_unref (d );
504
+ }
505
+
506
+ static void dummy_bh (void * opaque )
507
+ {
508
+ /* Do nothing */
509
+ }
510
+
511
+ /*
512
+ * Wait for pending virtio_scsi_defer_tmf_to_aio_context() BHs.
513
+ */
514
+ static void virtio_scsi_flush_defer_tmf_to_aio_context (VirtIOSCSI * s )
515
+ {
516
+ GLOBAL_STATE_CODE ();
517
+
518
+ assert (!s -> dataplane_started );
519
+
520
+ if (s -> ctx ) {
521
+ /* Our BH only runs after previously scheduled BHs */
522
+ aio_wait_bh_oneshot (s -> ctx , dummy_bh , NULL );
523
+ }
524
+ }
525
+
526
+ /*
527
+ * Run the TMF in a specific AioContext, handling only requests in that
528
+ * AioContext. This is necessary because requests can run in different
529
+ * AioContext and it is only possible to cancel them from the AioContext where
530
+ * they are running.
531
+ */
532
+ static void virtio_scsi_defer_tmf_to_aio_context (VirtIOSCSIReq * tmf ,
533
+ AioContext * ctx )
534
+ {
535
+ /* Decremented in virtio_scsi_do_tmf_aio_context() */
536
+ qatomic_inc (& tmf -> remaining );
537
+
538
+ /* See virtio_scsi_flush_defer_tmf_to_aio_context() cleanup during reset */
539
+ aio_bh_schedule_oneshot (ctx , virtio_scsi_do_tmf_aio_context , tmf );
540
+ }
541
+
542
+ /*
543
+ * Returns the AioContext for a given TMF's tag field or NULL. Note that the
544
+ * request identified by the tag may have completed by the time you can execute
545
+ * a BH in the AioContext, so don't assume the request still exists in your BH.
546
+ */
547
+ static AioContext * find_aio_context_for_tmf_tag (SCSIDevice * d ,
548
+ VirtIOSCSIReq * tmf )
549
+ {
550
+ WITH_QEMU_LOCK_GUARD (& d -> requests_lock ) {
551
+ SCSIRequest * r ;
552
+ SCSIRequest * next ;
553
+
554
+ QTAILQ_FOREACH_SAFE (r , & d -> requests , next , next ) {
555
+ VirtIOSCSIReq * cmd_req = r -> hba_private ;
556
+
557
+ /* hba_private is non-NULL while the request is enqueued */
558
+ assert (cmd_req );
559
+
560
+ if (cmd_req -> req .cmd .tag == tmf -> req .tmf .tag ) {
561
+ return r -> ctx ;
562
+ }
563
+ }
564
+ }
565
+ return NULL ;
566
+ }
567
+
433
568
/* Return 0 if the request is ready to be completed and return to guest;
434
569
* -EINPROGRESS if the request is submitted and will be completed later, in the
435
570
* case of async cancellation. */
436
571
static int virtio_scsi_do_tmf (VirtIOSCSI * s , VirtIOSCSIReq * req )
437
572
{
438
573
SCSIDevice * d = virtio_scsi_device_get (s , req -> req .tmf .lun );
439
574
SCSIRequest * r , * next ;
575
+ AioContext * ctx ;
440
576
int ret = 0 ;
441
577
442
578
virtio_scsi_ctx_check (s , d );
@@ -454,52 +590,72 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
454
590
req -> req .tmf .tag , req -> req .tmf .subtype );
455
591
456
592
switch (req -> req .tmf .subtype ) {
457
- case VIRTIO_SCSI_T_TMF_ABORT_TASK :
458
- case VIRTIO_SCSI_T_TMF_QUERY_TASK :
593
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK : {
459
594
if (!d ) {
460
595
goto fail ;
461
596
}
462
597
if (d -> lun != virtio_scsi_get_lun (req -> req .tmf .lun )) {
463
598
goto incorrect_lun ;
464
599
}
465
- QTAILQ_FOREACH_SAFE ( r , & d -> requests , next , next ) {
466
- VirtIOSCSIReq * cmd_req = r -> hba_private ;
467
- if (cmd_req && cmd_req -> req . cmd . tag == req -> req . tmf . tag ) {
468
- break ;
469
- }
600
+
601
+ ctx = find_aio_context_for_tmf_tag ( d , req ) ;
602
+ if (ctx ) {
603
+ virtio_scsi_defer_tmf_to_aio_context ( req , ctx ) ;
604
+ ret = - EINPROGRESS ;
470
605
}
471
- if (r ) {
472
- /*
473
- * Assert that the request has not been completed yet, we
474
- * check for it in the loop above.
475
- */
476
- assert (r -> hba_private );
477
- if (req -> req .tmf .subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK ) {
478
- /* "If the specified command is present in the task set, then
479
- * return a service response set to FUNCTION SUCCEEDED".
480
- */
481
- req -> resp .tmf .response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED ;
482
- } else {
483
- VirtIOSCSICancelNotifier * notifier ;
484
-
485
- req -> remaining = 1 ;
486
- notifier = g_new (VirtIOSCSICancelNotifier , 1 );
487
- notifier -> tmf_req = req ;
488
- notifier -> notifier .notify = virtio_scsi_cancel_notify ;
489
- scsi_req_cancel_async (r , & notifier -> notifier );
490
- ret = - EINPROGRESS ;
606
+ break ;
607
+ }
608
+
609
+ case VIRTIO_SCSI_T_TMF_QUERY_TASK :
610
+ if (!d ) {
611
+ goto fail ;
612
+ }
613
+ if (d -> lun != virtio_scsi_get_lun (req -> req .tmf .lun )) {
614
+ goto incorrect_lun ;
615
+ }
616
+
617
+ WITH_QEMU_LOCK_GUARD (& d -> requests_lock ) {
618
+ QTAILQ_FOREACH (r , & d -> requests , next ) {
619
+ VirtIOSCSIReq * cmd_req = r -> hba_private ;
620
+ assert (cmd_req ); /* request has hba_private while enqueued */
621
+
622
+ if (cmd_req -> req .cmd .tag == req -> req .tmf .tag ) {
623
+ /*
624
+ * "If the specified command is present in the task set,
625
+ * then return a service response set to FUNCTION
626
+ * SUCCEEDED".
627
+ */
628
+ req -> resp .tmf .response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED ;
629
+ }
491
630
}
492
631
}
493
632
break ;
494
633
495
634
case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET :
496
635
case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET :
497
- virtio_scsi_defer_tmf_to_bh (req );
636
+ virtio_scsi_defer_tmf_to_main_loop (req );
498
637
ret = - EINPROGRESS ;
499
638
break ;
500
639
501
640
case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET :
502
- case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET :
641
+ case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET : {
642
+ if (!d ) {
643
+ goto fail ;
644
+ }
645
+ if (d -> lun != virtio_scsi_get_lun (req -> req .tmf .lun )) {
646
+ goto incorrect_lun ;
647
+ }
648
+
649
+ qatomic_inc (& req -> remaining );
650
+
651
+ ctx = s -> ctx ?: qemu_get_aio_context ();
652
+ virtio_scsi_defer_tmf_to_aio_context (req , ctx );
653
+
654
+ virtio_scsi_tmf_dec_remaining (req );
655
+ ret = - EINPROGRESS ;
656
+ break ;
657
+ }
658
+
503
659
case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET :
504
660
if (!d ) {
505
661
goto fail ;
@@ -508,34 +664,19 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
508
664
goto incorrect_lun ;
509
665
}
510
666
511
- /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
512
- * This way, if the bus starts calling back to the notifiers
513
- * even before we finish the loop, virtio_scsi_cancel_notify
514
- * will not complete the TMF too early.
515
- */
516
- req -> remaining = 1 ;
517
- QTAILQ_FOREACH_SAFE (r , & d -> requests , next , next ) {
518
- if (r -> hba_private ) {
519
- if (req -> req .tmf .subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET ) {
520
- /* "If there is any command present in the task set, then
521
- * return a service response set to FUNCTION SUCCEEDED".
522
- */
523
- req -> resp .tmf .response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED ;
524
- break ;
525
- } else {
526
- VirtIOSCSICancelNotifier * notifier ;
527
-
528
- req -> remaining ++ ;
529
- notifier = g_new (VirtIOSCSICancelNotifier , 1 );
530
- notifier -> notifier .notify = virtio_scsi_cancel_notify ;
531
- notifier -> tmf_req = req ;
532
- scsi_req_cancel_async (r , & notifier -> notifier );
533
- }
667
+ WITH_QEMU_LOCK_GUARD (& d -> requests_lock ) {
668
+ QTAILQ_FOREACH_SAFE (r , & d -> requests , next , next ) {
669
+ /* Request has hba_private while enqueued */
670
+ assert (r -> hba_private );
671
+
672
+ /*
673
+ * "If there is any command present in the task set, then
674
+ * return a service response set to FUNCTION SUCCEEDED".
675
+ */
676
+ req -> resp .tmf .response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED ;
677
+ break ;
534
678
}
535
679
}
536
- if (-- req -> remaining > 0 ) {
537
- ret = - EINPROGRESS ;
538
- }
539
680
break ;
540
681
541
682
case VIRTIO_SCSI_T_TMF_CLEAR_ACA :
@@ -941,6 +1082,7 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
941
1082
assert (!s -> dataplane_started );
942
1083
943
1084
virtio_scsi_reset_tmf_bh (s );
1085
+ virtio_scsi_flush_defer_tmf_to_aio_context (s );
944
1086
945
1087
qatomic_inc (& s -> resetting );
946
1088
bus_cold_reset (BUS (& s -> bus ));
0 commit comments