Skip to content

Commit 1231039

Browse files
Christoph Hellwigaxboe
authored andcommitted
Revert "blk-cgroup: move the cgroup information to struct gendisk"
This reverts commit 3f13ab7 as a patch it depends on caused a few problems. Signed-off-by: Christoph Hellwig <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 2f1e07d commit 1231039

File tree

6 files changed

+50
-54
lines changed

6 files changed

+50
-54
lines changed

block/bfq-cgroup.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -999,7 +999,7 @@ void bfq_end_wr_async(struct bfq_data *bfqd)
999999
{
10001000
struct blkcg_gq *blkg;
10011001

1002-
list_for_each_entry(blkg, &bfqd->queue->disk->blkg_list, entry) {
1002+
list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
10031003
struct bfq_group *bfqg = blkg_to_bfqg(blkg);
10041004

10051005
bfq_end_wr_async_queues(bfqd, bfqg);
@@ -1293,7 +1293,7 @@ struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
12931293
if (ret)
12941294
return NULL;
12951295

1296-
return blkg_to_bfqg(bfqd->queue->disk->root_blkg);
1296+
return blkg_to_bfqg(bfqd->queue->root_blkg);
12971297
}
12981298

12991299
struct blkcg_policy blkcg_policy_bfq = {

block/blk-cgroup.c

Lines changed: 33 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -108,10 +108,10 @@ static struct cgroup_subsys_state *blkcg_css(void)
108108
return task_css(current, io_cgrp_id);
109109
}
110110

111-
static bool blkcg_policy_enabled(struct gendisk *disk,
111+
static bool blkcg_policy_enabled(struct request_queue *q,
112112
const struct blkcg_policy *pol)
113113
{
114-
return pol && test_bit(pol->plid, disk->blkcg_pols);
114+
return pol && test_bit(pol->plid, q->blkcg_pols);
115115
}
116116

117117
static void blkg_free_workfn(struct work_struct *work)
@@ -123,18 +123,18 @@ static void blkg_free_workfn(struct work_struct *work)
123123
/*
124124
* pd_free_fn() can also be called from blkcg_deactivate_policy(),
125125
* in order to make sure pd_free_fn() is called in order, the deletion
126-
* of the list blkg->entry is delayed to here from blkg_destroy(), and
126+
* of the list blkg->q_node is delayed to here from blkg_destroy(), and
127127
* blkcg_mutex is used to synchronize blkg_free_workfn() and
128128
* blkcg_deactivate_policy().
129129
*/
130-
mutex_lock(&blkg->disk->blkcg_mutex);
130+
mutex_lock(&blkg->disk->queue->blkcg_mutex);
131131
for (i = 0; i < BLKCG_MAX_POLS; i++)
132132
if (blkg->pd[i])
133133
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
134134
if (blkg->parent)
135135
blkg_put(blkg->parent);
136-
list_del_init(&blkg->entry);
137-
mutex_unlock(&blkg->disk->blkcg_mutex);
136+
list_del_init(&blkg->q_node);
137+
mutex_unlock(&blkg->disk->queue->blkcg_mutex);
138138

139139
put_disk(blkg->disk);
140140
free_percpu(blkg->iostat_cpu);
@@ -269,7 +269,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
269269
get_device(disk_to_dev(disk));
270270
blkg->disk = disk;
271271

272-
INIT_LIST_HEAD(&blkg->entry);
272+
INIT_LIST_HEAD(&blkg->q_node);
273273
spin_lock_init(&blkg->async_bio_lock);
274274
bio_list_init(&blkg->async_bios);
275275
INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
@@ -285,7 +285,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
285285
struct blkcg_policy *pol = blkcg_policy[i];
286286
struct blkg_policy_data *pd;
287287

288-
if (!blkcg_policy_enabled(disk, pol))
288+
if (!blkcg_policy_enabled(disk->queue, pol))
289289
continue;
290290

291291
/* alloc per-policy data and attach it to blkg */
@@ -371,7 +371,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
371371
ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg);
372372
if (likely(!ret)) {
373373
hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
374-
list_add(&blkg->entry, &disk->blkg_list);
374+
list_add(&blkg->q_node, &disk->queue->blkg_list);
375375

376376
for (i = 0; i < BLKCG_MAX_POLS; i++) {
377377
struct blkcg_policy *pol = blkcg_policy[i];
@@ -444,7 +444,7 @@ static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
444444
while (true) {
445445
struct blkcg *pos = blkcg;
446446
struct blkcg *parent = blkcg_parent(blkcg);
447-
struct blkcg_gq *ret_blkg = disk->root_blkg;
447+
struct blkcg_gq *ret_blkg = q->root_blkg;
448448

449449
while (parent) {
450450
blkg = blkg_lookup(parent, disk);
@@ -526,7 +526,7 @@ static void blkg_destroy_all(struct gendisk *disk)
526526

527527
restart:
528528
spin_lock_irq(&q->queue_lock);
529-
list_for_each_entry_safe(blkg, n, &disk->blkg_list, entry) {
529+
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
530530
struct blkcg *blkcg = blkg->blkcg;
531531

532532
spin_lock(&blkcg->lock);
@@ -545,7 +545,7 @@ static void blkg_destroy_all(struct gendisk *disk)
545545
}
546546
}
547547

548-
disk->root_blkg = NULL;
548+
q->root_blkg = NULL;
549549
spin_unlock_irq(&q->queue_lock);
550550
}
551551

@@ -620,7 +620,7 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
620620
rcu_read_lock();
621621
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
622622
spin_lock_irq(&blkg->disk->queue->queue_lock);
623-
if (blkcg_policy_enabled(blkg->disk, pol))
623+
if (blkcg_policy_enabled(blkg->disk->queue, pol))
624624
total += prfill(sf, blkg->pd[pol->plid], data);
625625
spin_unlock_irq(&blkg->disk->queue->queue_lock);
626626
}
@@ -728,7 +728,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
728728
rcu_read_lock();
729729
spin_lock_irq(&q->queue_lock);
730730

731-
if (!blkcg_policy_enabled(disk, pol)) {
731+
if (!blkcg_policy_enabled(q, pol)) {
732732
ret = -EOPNOTSUPP;
733733
goto fail_unlock;
734734
}
@@ -771,7 +771,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
771771
rcu_read_lock();
772772
spin_lock_irq(&q->queue_lock);
773773

774-
if (!blkcg_policy_enabled(disk, pol)) {
774+
if (!blkcg_policy_enabled(q, pol)) {
775775
blkg_free(new_blkg);
776776
ret = -EOPNOTSUPP;
777777
goto fail_preloaded;
@@ -951,7 +951,7 @@ static void blkcg_fill_root_iostats(void)
951951
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
952952
while ((dev = class_dev_iter_next(&iter))) {
953953
struct block_device *bdev = dev_to_bdev(dev);
954-
struct blkcg_gq *blkg = bdev->bd_disk->root_blkg;
954+
struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg;
955955
struct blkg_iostat tmp;
956956
int cpu;
957957
unsigned long flags;
@@ -1298,8 +1298,8 @@ int blkcg_init_disk(struct gendisk *disk)
12981298
bool preloaded;
12991299
int ret;
13001300

1301-
INIT_LIST_HEAD(&disk->blkg_list);
1302-
mutex_init(&disk->blkcg_mutex);
1301+
INIT_LIST_HEAD(&q->blkg_list);
1302+
mutex_init(&q->blkcg_mutex);
13031303

13041304
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
13051305
if (!new_blkg)
@@ -1313,7 +1313,7 @@ int blkcg_init_disk(struct gendisk *disk)
13131313
blkg = blkg_create(&blkcg_root, disk, new_blkg);
13141314
if (IS_ERR(blkg))
13151315
goto err_unlock;
1316-
disk->root_blkg = blkg;
1316+
q->root_blkg = blkg;
13171317
spin_unlock_irq(&q->queue_lock);
13181318

13191319
if (preloaded)
@@ -1426,7 +1426,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
14261426
struct blkcg_gq *blkg, *pinned_blkg = NULL;
14271427
int ret;
14281428

1429-
if (blkcg_policy_enabled(disk, pol))
1429+
if (blkcg_policy_enabled(q, pol))
14301430
return 0;
14311431

14321432
if (queue_is_mq(q))
@@ -1435,7 +1435,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
14351435
spin_lock_irq(&q->queue_lock);
14361436

14371437
/* blkg_list is pushed at the head, reverse walk to allocate parents first */
1438-
list_for_each_entry_reverse(blkg, &disk->blkg_list, entry) {
1438+
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
14391439
struct blkg_policy_data *pd;
14401440

14411441
if (blkg->pd[pol->plid])
@@ -1480,16 +1480,16 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
14801480

14811481
/* all allocated, init in the same order */
14821482
if (pol->pd_init_fn)
1483-
list_for_each_entry_reverse(blkg, &disk->blkg_list, entry)
1483+
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
14841484
pol->pd_init_fn(blkg->pd[pol->plid]);
14851485

1486-
list_for_each_entry_reverse(blkg, &disk->blkg_list, entry) {
1486+
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
14871487
if (pol->pd_online_fn)
14881488
pol->pd_online_fn(blkg->pd[pol->plid]);
14891489
blkg->pd[pol->plid]->online = true;
14901490
}
14911491

1492-
__set_bit(pol->plid, disk->blkcg_pols);
1492+
__set_bit(pol->plid, q->blkcg_pols);
14931493
ret = 0;
14941494

14951495
spin_unlock_irq(&q->queue_lock);
@@ -1505,7 +1505,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
15051505
enomem:
15061506
/* alloc failed, nothing's initialized yet, free everything */
15071507
spin_lock_irq(&q->queue_lock);
1508-
list_for_each_entry(blkg, &disk->blkg_list, entry) {
1508+
list_for_each_entry(blkg, &q->blkg_list, q_node) {
15091509
struct blkcg *blkcg = blkg->blkcg;
15101510

15111511
spin_lock(&blkcg->lock);
@@ -1535,18 +1535,18 @@ void blkcg_deactivate_policy(struct gendisk *disk,
15351535
struct request_queue *q = disk->queue;
15361536
struct blkcg_gq *blkg;
15371537

1538-
if (!blkcg_policy_enabled(disk, pol))
1538+
if (!blkcg_policy_enabled(q, pol))
15391539
return;
15401540

15411541
if (queue_is_mq(q))
15421542
blk_mq_freeze_queue(q);
15431543

1544-
mutex_lock(&disk->blkcg_mutex);
1544+
mutex_lock(&q->blkcg_mutex);
15451545
spin_lock_irq(&q->queue_lock);
15461546

1547-
__clear_bit(pol->plid, disk->blkcg_pols);
1547+
__clear_bit(pol->plid, q->blkcg_pols);
15481548

1549-
list_for_each_entry(blkg, &disk->blkg_list, entry) {
1549+
list_for_each_entry(blkg, &q->blkg_list, q_node) {
15501550
struct blkcg *blkcg = blkg->blkcg;
15511551

15521552
spin_lock(&blkcg->lock);
@@ -1560,7 +1560,7 @@ void blkcg_deactivate_policy(struct gendisk *disk,
15601560
}
15611561

15621562
spin_unlock_irq(&q->queue_lock);
1563-
mutex_unlock(&disk->blkcg_mutex);
1563+
mutex_unlock(&q->blkcg_mutex);
15641564

15651565
if (queue_is_mq(q))
15661566
blk_mq_unfreeze_queue(q);
@@ -1957,7 +1957,7 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
19571957
* Associate @bio with the blkg found by combining the css's blkg and the
19581958
* request_queue of the @bio. An association failure is handled by walking up
19591959
* the blkg tree. Therefore, the blkg associated can be anything between @blkg
1960-
* and disk->root_blkg. This situation only happens when a cgroup is dying and
1960+
* and q->root_blkg. This situation only happens when a cgroup is dying and
19611961
* then the remaining bios will spill to the closest alive blkg.
19621962
*
19631963
* A reference will be taken on the blkg and will be released when @bio is
@@ -1972,8 +1972,8 @@ void bio_associate_blkg_from_css(struct bio *bio,
19721972
if (css && css->parent) {
19731973
bio->bi_blkg = blkg_tryget_closest(bio, css);
19741974
} else {
1975-
blkg_get(bio->bi_bdev->bd_disk->root_blkg);
1976-
bio->bi_blkg = bio->bi_bdev->bd_disk->root_blkg;
1975+
blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg);
1976+
bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg;
19771977
}
19781978
}
19791979
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);

block/blk-cgroup.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ struct blkg_iostat_set {
5454
/* association between a blk cgroup and a request queue */
5555
struct blkcg_gq {
5656
struct gendisk *disk;
57-
struct list_head entry;
57+
struct list_head q_node;
5858
struct hlist_node blkcg_node;
5959
struct blkcg *blkcg;
6060

@@ -250,7 +250,7 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
250250
WARN_ON_ONCE(!rcu_read_lock_held());
251251

252252
if (blkcg == &blkcg_root)
253-
return disk->root_blkg;
253+
return disk->queue->root_blkg;
254254

255255
blkg = rcu_dereference(blkcg->blkg_hint);
256256
if (blkg && blkg->disk == disk)

block/blk-iolatency.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -665,7 +665,7 @@ static void blkiolatency_timer_fn(struct timer_list *t)
665665

666666
rcu_read_lock();
667667
blkg_for_each_descendant_pre(blkg, pos_css,
668-
blkiolat->rqos.disk->root_blkg) {
668+
blkiolat->rqos.disk->queue->root_blkg) {
669669
struct iolatency_grp *iolat;
670670
struct child_latency_info *lat_info;
671671
unsigned long flags;

block/blk-throttle.c

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -451,8 +451,7 @@ static void blk_throtl_update_limit_valid(struct throtl_data *td)
451451
bool low_valid = false;
452452

453453
rcu_read_lock();
454-
blkg_for_each_descendant_post(blkg, pos_css,
455-
td->queue->disk->root_blkg) {
454+
blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
456455
struct throtl_grp *tg = blkg_to_tg(blkg);
457456

458457
if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
@@ -1181,7 +1180,7 @@ static void throtl_pending_timer_fn(struct timer_list *t)
11811180

11821181
spin_lock_irq(&q->queue_lock);
11831182

1184-
if (!q->disk->root_blkg)
1183+
if (!q->root_blkg)
11851184
goto out_unlock;
11861185

11871186
if (throtl_can_upgrade(td, NULL))
@@ -1323,8 +1322,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
13231322
* blk-throttle.
13241323
*/
13251324
blkg_for_each_descendant_pre(blkg, pos_css,
1326-
global ? tg->td->queue->disk->root_blkg :
1327-
tg_to_blkg(tg)) {
1325+
global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
13281326
struct throtl_grp *this_tg = blkg_to_tg(blkg);
13291327
struct throtl_grp *parent_tg;
13301328

@@ -1719,7 +1717,7 @@ void blk_throtl_cancel_bios(struct gendisk *disk)
17191717
* path need RCU protection and to prevent warning from lockdep.
17201718
*/
17211719
rcu_read_lock();
1722-
blkg_for_each_descendant_post(blkg, pos_css, disk->root_blkg) {
1720+
blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
17231721
struct throtl_grp *tg = blkg_to_tg(blkg);
17241722
struct throtl_service_queue *sq = &tg->service_queue;
17251723

@@ -1873,8 +1871,7 @@ static bool throtl_can_upgrade(struct throtl_data *td,
18731871
return false;
18741872

18751873
rcu_read_lock();
1876-
blkg_for_each_descendant_post(blkg, pos_css,
1877-
td->queue->disk->root_blkg) {
1874+
blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
18781875
struct throtl_grp *tg = blkg_to_tg(blkg);
18791876

18801877
if (tg == this_tg)
@@ -1920,8 +1917,7 @@ static void throtl_upgrade_state(struct throtl_data *td)
19201917
td->low_upgrade_time = jiffies;
19211918
td->scale = 0;
19221919
rcu_read_lock();
1923-
blkg_for_each_descendant_post(blkg, pos_css,
1924-
td->queue->disk->root_blkg) {
1920+
blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
19251921
struct throtl_grp *tg = blkg_to_tg(blkg);
19261922
struct throtl_service_queue *sq = &tg->service_queue;
19271923

include/linux/blkdev.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -163,12 +163,6 @@ struct gendisk {
163163
struct timer_rand_state *random;
164164
atomic_t sync_io; /* RAID */
165165
struct disk_events *ev;
166-
#ifdef CONFIG_BLK_CGROUP
167-
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
168-
struct blkcg_gq *root_blkg;
169-
struct list_head blkg_list;
170-
struct mutex blkcg_mutex;
171-
#endif /* CONFIG_BLK_CGROUP */
172166
#ifdef CONFIG_BLK_DEV_INTEGRITY
173167
struct kobject integrity_kobj;
174168
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -487,6 +481,12 @@ struct request_queue {
487481
struct blk_mq_tags *sched_shared_tags;
488482

489483
struct list_head icq_list;
484+
#ifdef CONFIG_BLK_CGROUP
485+
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
486+
struct blkcg_gq *root_blkg;
487+
struct list_head blkg_list;
488+
struct mutex blkcg_mutex;
489+
#endif
490490

491491
struct queue_limits limits;
492492

0 commit comments

Comments
 (0)