@@ -108,10 +108,10 @@ static struct cgroup_subsys_state *blkcg_css(void)
108
108
return task_css (current , io_cgrp_id );
109
109
}
110
110
111
- static bool blkcg_policy_enabled (struct gendisk * disk ,
111
+ static bool blkcg_policy_enabled (struct request_queue * q ,
112
112
const struct blkcg_policy * pol )
113
113
{
114
- return pol && test_bit (pol -> plid , disk -> blkcg_pols );
114
+ return pol && test_bit (pol -> plid , q -> blkcg_pols );
115
115
}
116
116
117
117
static void blkg_free_workfn (struct work_struct * work )
@@ -123,18 +123,18 @@ static void blkg_free_workfn(struct work_struct *work)
123
123
/*
124
124
* pd_free_fn() can also be called from blkcg_deactivate_policy(),
125
125
* in order to make sure pd_free_fn() is called in order, the deletion
126
- * of the list blkg->entry is delayed to here from blkg_destroy(), and
126
+ * of the list blkg->q_node is delayed to here from blkg_destroy(), and
127
127
* blkcg_mutex is used to synchronize blkg_free_workfn() and
128
128
* blkcg_deactivate_policy().
129
129
*/
130
- mutex_lock (& blkg -> disk -> blkcg_mutex );
130
+ mutex_lock (& blkg -> disk -> queue -> blkcg_mutex );
131
131
for (i = 0 ; i < BLKCG_MAX_POLS ; i ++ )
132
132
if (blkg -> pd [i ])
133
133
blkcg_policy [i ]-> pd_free_fn (blkg -> pd [i ]);
134
134
if (blkg -> parent )
135
135
blkg_put (blkg -> parent );
136
- list_del_init (& blkg -> entry );
137
- mutex_unlock (& blkg -> disk -> blkcg_mutex );
136
+ list_del_init (& blkg -> q_node );
137
+ mutex_unlock (& blkg -> disk -> queue -> blkcg_mutex );
138
138
139
139
put_disk (blkg -> disk );
140
140
free_percpu (blkg -> iostat_cpu );
@@ -269,7 +269,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
269
269
get_device (disk_to_dev (disk ));
270
270
blkg -> disk = disk ;
271
271
272
- INIT_LIST_HEAD (& blkg -> entry );
272
+ INIT_LIST_HEAD (& blkg -> q_node );
273
273
spin_lock_init (& blkg -> async_bio_lock );
274
274
bio_list_init (& blkg -> async_bios );
275
275
INIT_WORK (& blkg -> async_bio_work , blkg_async_bio_workfn );
@@ -285,7 +285,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
285
285
struct blkcg_policy * pol = blkcg_policy [i ];
286
286
struct blkg_policy_data * pd ;
287
287
288
- if (!blkcg_policy_enabled (disk , pol ))
288
+ if (!blkcg_policy_enabled (disk -> queue , pol ))
289
289
continue ;
290
290
291
291
/* alloc per-policy data and attach it to blkg */
@@ -371,7 +371,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
371
371
ret = radix_tree_insert (& blkcg -> blkg_tree , disk -> queue -> id , blkg );
372
372
if (likely (!ret )) {
373
373
hlist_add_head_rcu (& blkg -> blkcg_node , & blkcg -> blkg_list );
374
- list_add (& blkg -> entry , & disk -> blkg_list );
374
+ list_add (& blkg -> q_node , & disk -> queue -> blkg_list );
375
375
376
376
for (i = 0 ; i < BLKCG_MAX_POLS ; i ++ ) {
377
377
struct blkcg_policy * pol = blkcg_policy [i ];
@@ -444,7 +444,7 @@ static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
444
444
while (true) {
445
445
struct blkcg * pos = blkcg ;
446
446
struct blkcg * parent = blkcg_parent (blkcg );
447
- struct blkcg_gq * ret_blkg = disk -> root_blkg ;
447
+ struct blkcg_gq * ret_blkg = q -> root_blkg ;
448
448
449
449
while (parent ) {
450
450
blkg = blkg_lookup (parent , disk );
@@ -526,7 +526,7 @@ static void blkg_destroy_all(struct gendisk *disk)
526
526
527
527
restart :
528
528
spin_lock_irq (& q -> queue_lock );
529
- list_for_each_entry_safe (blkg , n , & disk -> blkg_list , entry ) {
529
+ list_for_each_entry_safe (blkg , n , & q -> blkg_list , q_node ) {
530
530
struct blkcg * blkcg = blkg -> blkcg ;
531
531
532
532
spin_lock (& blkcg -> lock );
@@ -545,7 +545,7 @@ static void blkg_destroy_all(struct gendisk *disk)
545
545
}
546
546
}
547
547
548
- disk -> root_blkg = NULL ;
548
+ q -> root_blkg = NULL ;
549
549
spin_unlock_irq (& q -> queue_lock );
550
550
}
551
551
@@ -620,7 +620,7 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
620
620
rcu_read_lock ();
621
621
hlist_for_each_entry_rcu (blkg , & blkcg -> blkg_list , blkcg_node ) {
622
622
spin_lock_irq (& blkg -> disk -> queue -> queue_lock );
623
- if (blkcg_policy_enabled (blkg -> disk , pol ))
623
+ if (blkcg_policy_enabled (blkg -> disk -> queue , pol ))
624
624
total += prfill (sf , blkg -> pd [pol -> plid ], data );
625
625
spin_unlock_irq (& blkg -> disk -> queue -> queue_lock );
626
626
}
@@ -728,7 +728,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
728
728
rcu_read_lock ();
729
729
spin_lock_irq (& q -> queue_lock );
730
730
731
- if (!blkcg_policy_enabled (disk , pol )) {
731
+ if (!blkcg_policy_enabled (q , pol )) {
732
732
ret = - EOPNOTSUPP ;
733
733
goto fail_unlock ;
734
734
}
@@ -771,7 +771,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
771
771
rcu_read_lock ();
772
772
spin_lock_irq (& q -> queue_lock );
773
773
774
- if (!blkcg_policy_enabled (disk , pol )) {
774
+ if (!blkcg_policy_enabled (q , pol )) {
775
775
blkg_free (new_blkg );
776
776
ret = - EOPNOTSUPP ;
777
777
goto fail_preloaded ;
@@ -951,7 +951,7 @@ static void blkcg_fill_root_iostats(void)
951
951
class_dev_iter_init (& iter , & block_class , NULL , & disk_type );
952
952
while ((dev = class_dev_iter_next (& iter ))) {
953
953
struct block_device * bdev = dev_to_bdev (dev );
954
- struct blkcg_gq * blkg = bdev -> bd_disk -> root_blkg ;
954
+ struct blkcg_gq * blkg = bdev -> bd_disk -> queue -> root_blkg ;
955
955
struct blkg_iostat tmp ;
956
956
int cpu ;
957
957
unsigned long flags ;
@@ -1298,8 +1298,8 @@ int blkcg_init_disk(struct gendisk *disk)
1298
1298
bool preloaded ;
1299
1299
int ret ;
1300
1300
1301
- INIT_LIST_HEAD (& disk -> blkg_list );
1302
- mutex_init (& disk -> blkcg_mutex );
1301
+ INIT_LIST_HEAD (& q -> blkg_list );
1302
+ mutex_init (& q -> blkcg_mutex );
1303
1303
1304
1304
new_blkg = blkg_alloc (& blkcg_root , disk , GFP_KERNEL );
1305
1305
if (!new_blkg )
@@ -1313,7 +1313,7 @@ int blkcg_init_disk(struct gendisk *disk)
1313
1313
blkg = blkg_create (& blkcg_root , disk , new_blkg );
1314
1314
if (IS_ERR (blkg ))
1315
1315
goto err_unlock ;
1316
- disk -> root_blkg = blkg ;
1316
+ q -> root_blkg = blkg ;
1317
1317
spin_unlock_irq (& q -> queue_lock );
1318
1318
1319
1319
if (preloaded )
@@ -1426,7 +1426,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
1426
1426
struct blkcg_gq * blkg , * pinned_blkg = NULL ;
1427
1427
int ret ;
1428
1428
1429
- if (blkcg_policy_enabled (disk , pol ))
1429
+ if (blkcg_policy_enabled (q , pol ))
1430
1430
return 0 ;
1431
1431
1432
1432
if (queue_is_mq (q ))
@@ -1435,7 +1435,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
1435
1435
spin_lock_irq (& q -> queue_lock );
1436
1436
1437
1437
/* blkg_list is pushed at the head, reverse walk to allocate parents first */
1438
- list_for_each_entry_reverse (blkg , & disk -> blkg_list , entry ) {
1438
+ list_for_each_entry_reverse (blkg , & q -> blkg_list , q_node ) {
1439
1439
struct blkg_policy_data * pd ;
1440
1440
1441
1441
if (blkg -> pd [pol -> plid ])
@@ -1480,16 +1480,16 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
1480
1480
1481
1481
/* all allocated, init in the same order */
1482
1482
if (pol -> pd_init_fn )
1483
- list_for_each_entry_reverse (blkg , & disk -> blkg_list , entry )
1483
+ list_for_each_entry_reverse (blkg , & q -> blkg_list , q_node )
1484
1484
pol -> pd_init_fn (blkg -> pd [pol -> plid ]);
1485
1485
1486
- list_for_each_entry_reverse (blkg , & disk -> blkg_list , entry ) {
1486
+ list_for_each_entry_reverse (blkg , & q -> blkg_list , q_node ) {
1487
1487
if (pol -> pd_online_fn )
1488
1488
pol -> pd_online_fn (blkg -> pd [pol -> plid ]);
1489
1489
blkg -> pd [pol -> plid ]-> online = true;
1490
1490
}
1491
1491
1492
- __set_bit (pol -> plid , disk -> blkcg_pols );
1492
+ __set_bit (pol -> plid , q -> blkcg_pols );
1493
1493
ret = 0 ;
1494
1494
1495
1495
spin_unlock_irq (& q -> queue_lock );
@@ -1505,7 +1505,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
1505
1505
enomem :
1506
1506
/* alloc failed, nothing's initialized yet, free everything */
1507
1507
spin_lock_irq (& q -> queue_lock );
1508
- list_for_each_entry (blkg , & disk -> blkg_list , entry ) {
1508
+ list_for_each_entry (blkg , & q -> blkg_list , q_node ) {
1509
1509
struct blkcg * blkcg = blkg -> blkcg ;
1510
1510
1511
1511
spin_lock (& blkcg -> lock );
@@ -1535,18 +1535,18 @@ void blkcg_deactivate_policy(struct gendisk *disk,
1535
1535
struct request_queue * q = disk -> queue ;
1536
1536
struct blkcg_gq * blkg ;
1537
1537
1538
- if (!blkcg_policy_enabled (disk , pol ))
1538
+ if (!blkcg_policy_enabled (q , pol ))
1539
1539
return ;
1540
1540
1541
1541
if (queue_is_mq (q ))
1542
1542
blk_mq_freeze_queue (q );
1543
1543
1544
- mutex_lock (& disk -> blkcg_mutex );
1544
+ mutex_lock (& q -> blkcg_mutex );
1545
1545
spin_lock_irq (& q -> queue_lock );
1546
1546
1547
- __clear_bit (pol -> plid , disk -> blkcg_pols );
1547
+ __clear_bit (pol -> plid , q -> blkcg_pols );
1548
1548
1549
- list_for_each_entry (blkg , & disk -> blkg_list , entry ) {
1549
+ list_for_each_entry (blkg , & q -> blkg_list , q_node ) {
1550
1550
struct blkcg * blkcg = blkg -> blkcg ;
1551
1551
1552
1552
spin_lock (& blkcg -> lock );
@@ -1560,7 +1560,7 @@ void blkcg_deactivate_policy(struct gendisk *disk,
1560
1560
}
1561
1561
1562
1562
spin_unlock_irq (& q -> queue_lock );
1563
- mutex_unlock (& disk -> blkcg_mutex );
1563
+ mutex_unlock (& q -> blkcg_mutex );
1564
1564
1565
1565
if (queue_is_mq (q ))
1566
1566
blk_mq_unfreeze_queue (q );
@@ -1957,7 +1957,7 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
1957
1957
* Associate @bio with the blkg found by combining the css's blkg and the
1958
1958
* request_queue of the @bio. An association failure is handled by walking up
1959
1959
* the blkg tree. Therefore, the blkg associated can be anything between @blkg
1960
- * and disk ->root_blkg. This situation only happens when a cgroup is dying and
1960
+ * and q ->root_blkg. This situation only happens when a cgroup is dying and
1961
1961
* then the remaining bios will spill to the closest alive blkg.
1962
1962
*
1963
1963
* A reference will be taken on the blkg and will be released when @bio is
@@ -1972,8 +1972,8 @@ void bio_associate_blkg_from_css(struct bio *bio,
1972
1972
if (css && css -> parent ) {
1973
1973
bio -> bi_blkg = blkg_tryget_closest (bio , css );
1974
1974
} else {
1975
- blkg_get (bio -> bi_bdev -> bd_disk -> root_blkg );
1976
- bio -> bi_blkg = bio -> bi_bdev -> bd_disk -> root_blkg ;
1975
+ blkg_get (bdev_get_queue ( bio -> bi_bdev ) -> root_blkg );
1976
+ bio -> bi_blkg = bdev_get_queue ( bio -> bi_bdev ) -> root_blkg ;
1977
1977
}
1978
1978
}
1979
1979
EXPORT_SYMBOL_GPL (bio_associate_blkg_from_css );
0 commit comments