Skip to content

Commit a3166c5

Browse files
YuKuai-huaweiaxboe
authored andcommitted
blk-throttle: delay initialization until configuration
Other cgroup policy like bfq, iocost are lazy-initialized when they are configured for the first time for the device, but blk-throttle is initialized unconditionally from blkcg_init_disk(). Delay initialization of blk-throttle as well, to save some cpu and memory overhead if it's not configured. Noted that once it's initialized, it can't be destroyed until disk removal, even if it's disabled. Signed-off-by: Yu Kuai <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent bf20ab5 commit a3166c5

File tree

4 files changed

+88
-53
lines changed

4 files changed

+88
-53
lines changed

block/blk-cgroup.c

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1440,14 +1440,8 @@ int blkcg_init_disk(struct gendisk *disk)
14401440
if (ret)
14411441
goto err_destroy_all;
14421442

1443-
ret = blk_throtl_init(disk);
1444-
if (ret)
1445-
goto err_ioprio_exit;
1446-
14471443
return 0;
14481444

1449-
err_ioprio_exit:
1450-
blk_ioprio_exit(disk);
14511445
err_destroy_all:
14521446
blkg_destroy_all(disk);
14531447
return ret;

block/blk-sysfs.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -807,7 +807,6 @@ int blk_register_queue(struct gendisk *disk)
807807

808808
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
809809
wbt_enable_default(disk);
810-
blk_throtl_register(disk);
811810

812811
/* Now everything is ready and send out KOBJ_ADD uevent */
813812
kobject_uevent(&disk->queue_kobj, KOBJ_ADD);

block/blk-throttle.c

Lines changed: 73 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -1211,6 +1211,53 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
12111211
}
12121212
}
12131213

1214+
static int blk_throtl_init(struct gendisk *disk)
1215+
{
1216+
struct request_queue *q = disk->queue;
1217+
struct throtl_data *td;
1218+
int ret;
1219+
1220+
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1221+
if (!td)
1222+
return -ENOMEM;
1223+
1224+
INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1225+
throtl_service_queue_init(&td->service_queue);
1226+
1227+
/*
1228+
* Freeze queue before activating policy, to synchronize with IO path,
1229+
* which is protected by 'q_usage_counter'.
1230+
*/
1231+
blk_mq_freeze_queue(disk->queue);
1232+
blk_mq_quiesce_queue(disk->queue);
1233+
1234+
q->td = td;
1235+
td->queue = q;
1236+
1237+
/* activate policy */
1238+
ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
1239+
if (ret) {
1240+
q->td = NULL;
1241+
kfree(td);
1242+
goto out;
1243+
}
1244+
1245+
if (blk_queue_nonrot(q))
1246+
td->throtl_slice = DFL_THROTL_SLICE_SSD;
1247+
else
1248+
td->throtl_slice = DFL_THROTL_SLICE_HD;
1249+
td->track_bio_latency = !queue_is_mq(q);
1250+
if (!td->track_bio_latency)
1251+
blk_stat_enable_accounting(q);
1252+
1253+
out:
1254+
blk_mq_unquiesce_queue(disk->queue);
1255+
blk_mq_unfreeze_queue(disk->queue);
1256+
1257+
return ret;
1258+
}
1259+
1260+
12141261
static ssize_t tg_set_conf(struct kernfs_open_file *of,
12151262
char *buf, size_t nbytes, loff_t off, bool is_u64)
12161263
{
@@ -1222,6 +1269,16 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of,
12221269

12231270
blkg_conf_init(&ctx, buf);
12241271

1272+
ret = blkg_conf_open_bdev(&ctx);
1273+
if (ret)
1274+
goto out_finish;
1275+
1276+
if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1277+
ret = blk_throtl_init(ctx.bdev->bd_disk);
1278+
if (ret)
1279+
goto out_finish;
1280+
}
1281+
12251282
ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
12261283
if (ret)
12271284
goto out_finish;
@@ -1396,6 +1453,16 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
13961453

13971454
blkg_conf_init(&ctx, buf);
13981455

1456+
ret = blkg_conf_open_bdev(&ctx);
1457+
if (ret)
1458+
goto out_finish;
1459+
1460+
if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1461+
ret = blk_throtl_init(ctx.bdev->bd_disk);
1462+
if (ret)
1463+
goto out_finish;
1464+
}
1465+
13991466
ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
14001467
if (ret)
14011468
goto out_finish;
@@ -1488,6 +1555,9 @@ void blk_throtl_cancel_bios(struct gendisk *disk)
14881555
struct cgroup_subsys_state *pos_css;
14891556
struct blkcg_gq *blkg;
14901557

1558+
if (!blk_throtl_activated(q))
1559+
return;
1560+
14911561
spin_lock_irq(&q->queue_lock);
14921562
/*
14931563
* queue_lock is held, rcu lock is not needed here technically.
@@ -1617,57 +1687,19 @@ bool __blk_throtl_bio(struct bio *bio)
16171687
return throttled;
16181688
}
16191689

1620-
int blk_throtl_init(struct gendisk *disk)
1621-
{
1622-
struct request_queue *q = disk->queue;
1623-
struct throtl_data *td;
1624-
int ret;
1625-
1626-
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1627-
if (!td)
1628-
return -ENOMEM;
1629-
1630-
INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1631-
throtl_service_queue_init(&td->service_queue);
1632-
1633-
q->td = td;
1634-
td->queue = q;
1635-
1636-
/* activate policy */
1637-
ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
1638-
if (ret)
1639-
kfree(td);
1640-
return ret;
1641-
}
1642-
16431690
void blk_throtl_exit(struct gendisk *disk)
16441691
{
16451692
struct request_queue *q = disk->queue;
16461693

1647-
BUG_ON(!q->td);
1694+
if (!blk_throtl_activated(q))
1695+
return;
1696+
16481697
del_timer_sync(&q->td->service_queue.pending_timer);
16491698
throtl_shutdown_wq(q);
16501699
blkcg_deactivate_policy(disk, &blkcg_policy_throtl);
16511700
kfree(q->td);
16521701
}
16531702

1654-
void blk_throtl_register(struct gendisk *disk)
1655-
{
1656-
struct request_queue *q = disk->queue;
1657-
struct throtl_data *td;
1658-
1659-
td = q->td;
1660-
BUG_ON(!td);
1661-
1662-
if (blk_queue_nonrot(q))
1663-
td->throtl_slice = DFL_THROTL_SLICE_SSD;
1664-
else
1665-
td->throtl_slice = DFL_THROTL_SLICE_HD;
1666-
td->track_bio_latency = !queue_is_mq(q);
1667-
if (!td->track_bio_latency)
1668-
blk_stat_enable_accounting(q);
1669-
}
1670-
16711703
static int __init throtl_init(void)
16721704
{
16731705
kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);

block/blk-throttle.h

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -150,23 +150,33 @@ static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
150150
* Internal throttling interface
151151
*/
152152
#ifndef CONFIG_BLK_DEV_THROTTLING
153-
static inline int blk_throtl_init(struct gendisk *disk) { return 0; }
154153
static inline void blk_throtl_exit(struct gendisk *disk) { }
155-
static inline void blk_throtl_register(struct gendisk *disk) { }
156154
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
157155
static inline void blk_throtl_cancel_bios(struct gendisk *disk) { }
158156
#else /* CONFIG_BLK_DEV_THROTTLING */
159-
int blk_throtl_init(struct gendisk *disk);
160157
void blk_throtl_exit(struct gendisk *disk);
161-
void blk_throtl_register(struct gendisk *disk);
162158
bool __blk_throtl_bio(struct bio *bio);
163159
void blk_throtl_cancel_bios(struct gendisk *disk);
164160

161+
static inline bool blk_throtl_activated(struct request_queue *q)
162+
{
163+
return q->td != NULL;
164+
}
165+
165166
static inline bool blk_should_throtl(struct bio *bio)
166167
{
167-
struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
168+
struct throtl_grp *tg;
168169
int rw = bio_data_dir(bio);
169170

171+
/*
172+
* This is called under bio_queue_enter(), and it's synchronized with
173+
* the activation of blk-throtl, which is protected by
174+
* blk_mq_freeze_queue().
175+
*/
176+
if (!blk_throtl_activated(bio->bi_bdev->bd_queue))
177+
return false;
178+
179+
tg = blkg_to_tg(bio->bi_blkg);
170180
if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
171181
if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
172182
bio_set_flag(bio, BIO_CGROUP_ACCT);

0 commit comments

Comments
 (0)