@@ -416,6 +416,30 @@ void blk_cleanup_queue(struct request_queue *q)
416
416
}
417
417
EXPORT_SYMBOL (blk_cleanup_queue );
418
418
419
+ static bool blk_try_enter_queue (struct request_queue * q , bool pm )
420
+ {
421
+ rcu_read_lock ();
422
+ if (!percpu_ref_tryget_live (& q -> q_usage_counter ))
423
+ goto fail ;
424
+
425
+ /*
426
+ * The code that increments the pm_only counter must ensure that the
427
+ * counter is globally visible before the queue is unfrozen.
428
+ */
429
+ if (blk_queue_pm_only (q ) &&
430
+ (!pm || queue_rpm_status (q ) == RPM_SUSPENDED ))
431
+ goto fail_put ;
432
+
433
+ rcu_read_unlock ();
434
+ return true;
435
+
436
+ fail_put :
437
+ percpu_ref_put (& q -> q_usage_counter );
438
+ fail :
439
+ rcu_read_unlock ();
440
+ return false;
441
+ }
442
+
419
443
/**
420
444
* blk_queue_enter() - try to increase q->q_usage_counter
421
445
* @q: request queue pointer
@@ -425,47 +449,27 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
425
449
{
426
450
const bool pm = flags & BLK_MQ_REQ_PM ;
427
451
428
- while (true) {
429
- bool success = false;
430
-
431
- rcu_read_lock ();
432
- if (percpu_ref_tryget_live (& q -> q_usage_counter )) {
433
- /*
434
- * The code that increments the pm_only counter is
435
- * responsible for ensuring that that counter is
436
- * globally visible before the queue is unfrozen.
437
- */
438
- if ((pm && queue_rpm_status (q ) != RPM_SUSPENDED ) ||
439
- !blk_queue_pm_only (q )) {
440
- success = true;
441
- } else {
442
- percpu_ref_put (& q -> q_usage_counter );
443
- }
444
- }
445
- rcu_read_unlock ();
446
-
447
- if (success )
448
- return 0 ;
449
-
452
+ while (!blk_try_enter_queue (q , pm )) {
450
453
if (flags & BLK_MQ_REQ_NOWAIT )
451
454
return - EBUSY ;
452
455
453
456
/*
454
- * read pair of barrier in blk_freeze_queue_start(),
455
- * we need to order reading __PERCPU_REF_DEAD flag of
456
- * .q_usage_counter and reading .mq_freeze_depth or
457
- * queue dying flag, otherwise the following wait may
458
- * never return if the two reads are reordered.
457
+ * read pair of barrier in blk_freeze_queue_start(), we need to
458
+ * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
459
+ * reading .mq_freeze_depth or queue dying flag, otherwise the
460
+ * following wait may never return if the two reads are
461
+ * reordered.
459
462
*/
460
463
smp_rmb ();
461
-
462
464
wait_event (q -> mq_freeze_wq ,
463
465
(!q -> mq_freeze_depth &&
464
466
blk_pm_resume_queue (pm , q )) ||
465
467
blk_queue_dying (q ));
466
468
if (blk_queue_dying (q ))
467
469
return - ENODEV ;
468
470
}
471
+
472
+ return 0 ;
469
473
}
470
474
471
475
static inline int bio_queue_enter (struct bio * bio )
0 commit comments