File tree Expand file tree Collapse file tree 2 files changed +35
-27
lines changed Expand file tree Collapse file tree 2 files changed +35
-27
lines changed Original file line number Diff line number Diff line change @@ -386,30 +386,6 @@ void blk_cleanup_queue(struct request_queue *q)
386
386
}
387
387
EXPORT_SYMBOL (blk_cleanup_queue );
388
388
389
- static bool blk_try_enter_queue (struct request_queue * q , bool pm )
390
- {
391
- rcu_read_lock ();
392
- if (!percpu_ref_tryget_live_rcu (& q -> q_usage_counter ))
393
- goto fail ;
394
-
395
- /*
396
- * The code that increments the pm_only counter must ensure that the
397
- * counter is globally visible before the queue is unfrozen.
398
- */
399
- if (blk_queue_pm_only (q ) &&
400
- (!pm || queue_rpm_status (q ) == RPM_SUSPENDED ))
401
- goto fail_put ;
402
-
403
- rcu_read_unlock ();
404
- return true;
405
-
406
- fail_put :
407
- blk_queue_exit (q );
408
- fail :
409
- rcu_read_unlock ();
410
- return false;
411
- }
412
-
413
389
/**
414
390
* blk_queue_enter() - try to increase q->q_usage_counter
415
391
* @q: request queue pointer
@@ -442,10 +418,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
442
418
return 0 ;
443
419
}
444
420
445
- static inline int bio_queue_enter ( struct bio * bio )
421
+ int __bio_queue_enter ( struct request_queue * q , struct bio * bio )
446
422
{
447
- struct request_queue * q = bdev_get_queue (bio -> bi_bdev );
448
-
449
423
while (!blk_try_enter_queue (q , false)) {
450
424
struct gendisk * disk = bio -> bi_bdev -> bd_disk ;
451
425
Original file line number Diff line number Diff line change @@ -55,6 +55,40 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
55
55
void blk_freeze_queue (struct request_queue * q );
56
56
void __blk_mq_unfreeze_queue (struct request_queue * q , bool force_atomic );
57
57
void blk_queue_start_drain (struct request_queue * q );
58
+ int __bio_queue_enter (struct request_queue * q , struct bio * bio );
59
+
60
+ static inline bool blk_try_enter_queue (struct request_queue * q , bool pm )
61
+ {
62
+ rcu_read_lock ();
63
+ if (!percpu_ref_tryget_live_rcu (& q -> q_usage_counter ))
64
+ goto fail ;
65
+
66
+ /*
67
+ * The code that increments the pm_only counter must ensure that the
68
+ * counter is globally visible before the queue is unfrozen.
69
+ */
70
+ if (blk_queue_pm_only (q ) &&
71
+ (!pm || queue_rpm_status (q ) == RPM_SUSPENDED ))
72
+ goto fail_put ;
73
+
74
+ rcu_read_unlock ();
75
+ return true;
76
+
77
+ fail_put :
78
+ blk_queue_exit (q );
79
+ fail :
80
+ rcu_read_unlock ();
81
+ return false;
82
+ }
83
+
84
+ static inline int bio_queue_enter (struct bio * bio )
85
+ {
86
+ struct request_queue * q = bdev_get_queue (bio -> bi_bdev );
87
+
88
+ if (blk_try_enter_queue (q , false))
89
+ return 0 ;
90
+ return __bio_queue_enter (q , bio );
91
+ }
58
92
59
93
#define BIO_INLINE_VECS 4
60
94
struct bio_vec * bvec_alloc (mempool_t * pool , unsigned short * nr_vecs ,
You can’t perform that action at this time.
0 commit comments