49
49
#include "blk-mq.h"
50
50
#include "blk-mq-sched.h"
51
51
#include "blk-pm.h"
52
- #include "blk-rq-qos.h"
53
52
54
53
struct dentry * blk_debugfs_root ;
55
54
@@ -337,23 +336,25 @@ void blk_put_queue(struct request_queue *q)
337
336
}
338
337
EXPORT_SYMBOL (blk_put_queue );
339
338
340
- void blk_set_queue_dying (struct request_queue * q )
339
+ void blk_queue_start_drain (struct request_queue * q )
341
340
{
342
- blk_queue_flag_set (QUEUE_FLAG_DYING , q );
343
-
344
341
/*
345
342
* When queue DYING flag is set, we need to block new req
346
343
* entering queue, so we call blk_freeze_queue_start() to
347
344
* prevent I/O from crossing blk_queue_enter().
348
345
*/
349
346
blk_freeze_queue_start (q );
350
-
351
347
if (queue_is_mq (q ))
352
348
blk_mq_wake_waiters (q );
353
-
354
349
/* Make blk_queue_enter() reexamine the DYING flag. */
355
350
wake_up_all (& q -> mq_freeze_wq );
356
351
}
352
+
353
+ void blk_set_queue_dying (struct request_queue * q )
354
+ {
355
+ blk_queue_flag_set (QUEUE_FLAG_DYING , q );
356
+ blk_queue_start_drain (q );
357
+ }
357
358
EXPORT_SYMBOL_GPL (blk_set_queue_dying );
358
359
359
360
/**
@@ -385,13 +386,8 @@ void blk_cleanup_queue(struct request_queue *q)
385
386
*/
386
387
blk_freeze_queue (q );
387
388
388
- rq_qos_exit (q );
389
-
390
389
blk_queue_flag_set (QUEUE_FLAG_DEAD , q );
391
390
392
- /* for synchronous bio-based driver finish in-flight integrity i/o */
393
- blk_flush_integrity ();
394
-
395
391
blk_sync_queue (q );
396
392
if (queue_is_mq (q ))
397
393
blk_mq_exit_queue (q );
@@ -416,6 +412,30 @@ void blk_cleanup_queue(struct request_queue *q)
416
412
}
417
413
EXPORT_SYMBOL (blk_cleanup_queue );
418
414
415
+ static bool blk_try_enter_queue (struct request_queue * q , bool pm )
416
+ {
417
+ rcu_read_lock ();
418
+ if (!percpu_ref_tryget_live (& q -> q_usage_counter ))
419
+ goto fail ;
420
+
421
+ /*
422
+ * The code that increments the pm_only counter must ensure that the
423
+ * counter is globally visible before the queue is unfrozen.
424
+ */
425
+ if (blk_queue_pm_only (q ) &&
426
+ (!pm || queue_rpm_status (q ) == RPM_SUSPENDED ))
427
+ goto fail_put ;
428
+
429
+ rcu_read_unlock ();
430
+ return true;
431
+
432
+ fail_put :
433
+ percpu_ref_put (& q -> q_usage_counter );
434
+ fail :
435
+ rcu_read_unlock ();
436
+ return false;
437
+ }
438
+
419
439
/**
420
440
* blk_queue_enter() - try to increase q->q_usage_counter
421
441
* @q: request queue pointer
@@ -425,64 +445,62 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
425
445
{
426
446
const bool pm = flags & BLK_MQ_REQ_PM ;
427
447
428
- while (true) {
429
- bool success = false;
430
-
431
- rcu_read_lock ();
432
- if (percpu_ref_tryget_live (& q -> q_usage_counter )) {
433
- /*
434
- * The code that increments the pm_only counter is
435
- * responsible for ensuring that that counter is
436
- * globally visible before the queue is unfrozen.
437
- */
438
- if ((pm && queue_rpm_status (q ) != RPM_SUSPENDED ) ||
439
- !blk_queue_pm_only (q )) {
440
- success = true;
441
- } else {
442
- percpu_ref_put (& q -> q_usage_counter );
443
- }
444
- }
445
- rcu_read_unlock ();
446
-
447
- if (success )
448
- return 0 ;
449
-
448
+ while (!blk_try_enter_queue (q , pm )) {
450
449
if (flags & BLK_MQ_REQ_NOWAIT )
451
450
return - EBUSY ;
452
451
453
452
/*
454
- * read pair of barrier in blk_freeze_queue_start(),
455
- * we need to order reading __PERCPU_REF_DEAD flag of
456
- * .q_usage_counter and reading .mq_freeze_depth or
457
- * queue dying flag, otherwise the following wait may
458
- * never return if the two reads are reordered.
453
+ * read pair of barrier in blk_freeze_queue_start(), we need to
454
+ * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
455
+ * reading .mq_freeze_depth or queue dying flag, otherwise the
456
+ * following wait may never return if the two reads are
457
+ * reordered.
459
458
*/
460
459
smp_rmb ();
461
-
462
460
wait_event (q -> mq_freeze_wq ,
463
461
(!q -> mq_freeze_depth &&
464
462
blk_pm_resume_queue (pm , q )) ||
465
463
blk_queue_dying (q ));
466
464
if (blk_queue_dying (q ))
467
465
return - ENODEV ;
468
466
}
467
+
468
+ return 0 ;
469
469
}
470
470
471
471
static inline int bio_queue_enter (struct bio * bio )
472
472
{
473
- struct request_queue * q = bio -> bi_bdev -> bd_disk -> queue ;
474
- bool nowait = bio -> bi_opf & REQ_NOWAIT ;
475
- int ret ;
473
+ struct gendisk * disk = bio -> bi_bdev -> bd_disk ;
474
+ struct request_queue * q = disk -> queue ;
476
475
477
- ret = blk_queue_enter (q , nowait ? BLK_MQ_REQ_NOWAIT : 0 );
478
- if (unlikely (ret )) {
479
- if (nowait && !blk_queue_dying (q ))
476
+ while (!blk_try_enter_queue (q , false)) {
477
+ if (bio -> bi_opf & REQ_NOWAIT ) {
478
+ if (test_bit (GD_DEAD , & disk -> state ))
479
+ goto dead ;
480
480
bio_wouldblock_error (bio );
481
- else
482
- bio_io_error (bio );
481
+ return - EBUSY ;
482
+ }
483
+
484
+ /*
485
+ * read pair of barrier in blk_freeze_queue_start(), we need to
486
+ * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
487
+ * reading .mq_freeze_depth or queue dying flag, otherwise the
488
+ * following wait may never return if the two reads are
489
+ * reordered.
490
+ */
491
+ smp_rmb ();
492
+ wait_event (q -> mq_freeze_wq ,
493
+ (!q -> mq_freeze_depth &&
494
+ blk_pm_resume_queue (false, q )) ||
495
+ test_bit (GD_DEAD , & disk -> state ));
496
+ if (test_bit (GD_DEAD , & disk -> state ))
497
+ goto dead ;
483
498
}
484
499
485
- return ret ;
500
+ return 0 ;
501
+ dead :
502
+ bio_io_error (bio );
503
+ return - ENODEV ;
486
504
}
487
505
488
506
void blk_queue_exit (struct request_queue * q )
@@ -899,11 +917,18 @@ static blk_qc_t __submit_bio(struct bio *bio)
899
917
struct gendisk * disk = bio -> bi_bdev -> bd_disk ;
900
918
blk_qc_t ret = BLK_QC_T_NONE ;
901
919
902
- if (blk_crypto_bio_prep (& bio )) {
903
- if (!disk -> fops -> submit_bio )
904
- return blk_mq_submit_bio (bio );
920
+ if (unlikely (bio_queue_enter (bio ) != 0 ))
921
+ return BLK_QC_T_NONE ;
922
+
923
+ if (!submit_bio_checks (bio ) || !blk_crypto_bio_prep (& bio ))
924
+ goto queue_exit ;
925
+ if (disk -> fops -> submit_bio ) {
905
926
ret = disk -> fops -> submit_bio (bio );
927
+ goto queue_exit ;
906
928
}
929
+ return blk_mq_submit_bio (bio );
930
+
931
+ queue_exit :
907
932
blk_queue_exit (disk -> queue );
908
933
return ret ;
909
934
}
@@ -941,9 +966,6 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
941
966
struct request_queue * q = bio -> bi_bdev -> bd_disk -> queue ;
942
967
struct bio_list lower , same ;
943
968
944
- if (unlikely (bio_queue_enter (bio ) != 0 ))
945
- continue ;
946
-
947
969
/*
948
970
* Create a fresh bio_list for all subordinate requests.
949
971
*/
@@ -979,23 +1001,12 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
979
1001
static blk_qc_t __submit_bio_noacct_mq (struct bio * bio )
980
1002
{
981
1003
struct bio_list bio_list [2 ] = { };
982
- blk_qc_t ret = BLK_QC_T_NONE ;
1004
+ blk_qc_t ret ;
983
1005
984
1006
current -> bio_list = bio_list ;
985
1007
986
1008
do {
987
- struct gendisk * disk = bio -> bi_bdev -> bd_disk ;
988
-
989
- if (unlikely (bio_queue_enter (bio ) != 0 ))
990
- continue ;
991
-
992
- if (!blk_crypto_bio_prep (& bio )) {
993
- blk_queue_exit (disk -> queue );
994
- ret = BLK_QC_T_NONE ;
995
- continue ;
996
- }
997
-
998
- ret = blk_mq_submit_bio (bio );
1009
+ ret = __submit_bio (bio );
999
1010
} while ((bio = bio_list_pop (& bio_list [0 ])));
1000
1011
1001
1012
current -> bio_list = NULL ;
@@ -1013,9 +1024,6 @@ static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
1013
1024
*/
1014
1025
blk_qc_t submit_bio_noacct (struct bio * bio )
1015
1026
{
1016
- if (!submit_bio_checks (bio ))
1017
- return BLK_QC_T_NONE ;
1018
-
1019
1027
/*
1020
1028
* We only want one ->submit_bio to be active at a time, else stack
1021
1029
* usage with stacked devices could be a problem. Use current->bio_list
0 commit comments