@@ -1211,6 +1211,53 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
1211
1211
}
1212
1212
}
1213
1213
1214
+ static int blk_throtl_init (struct gendisk * disk )
1215
+ {
1216
+ struct request_queue * q = disk -> queue ;
1217
+ struct throtl_data * td ;
1218
+ int ret ;
1219
+
1220
+ td = kzalloc_node (sizeof (* td ), GFP_KERNEL , q -> node );
1221
+ if (!td )
1222
+ return - ENOMEM ;
1223
+
1224
+ INIT_WORK (& td -> dispatch_work , blk_throtl_dispatch_work_fn );
1225
+ throtl_service_queue_init (& td -> service_queue );
1226
+
1227
+ /*
1228
+ * Freeze queue before activating policy, to synchronize with IO path,
1229
+ * which is protected by 'q_usage_counter'.
1230
+ */
1231
+ blk_mq_freeze_queue (disk -> queue );
1232
+ blk_mq_quiesce_queue (disk -> queue );
1233
+
1234
+ q -> td = td ;
1235
+ td -> queue = q ;
1236
+
1237
+ /* activate policy */
1238
+ ret = blkcg_activate_policy (disk , & blkcg_policy_throtl );
1239
+ if (ret ) {
1240
+ q -> td = NULL ;
1241
+ kfree (td );
1242
+ goto out ;
1243
+ }
1244
+
1245
+ if (blk_queue_nonrot (q ))
1246
+ td -> throtl_slice = DFL_THROTL_SLICE_SSD ;
1247
+ else
1248
+ td -> throtl_slice = DFL_THROTL_SLICE_HD ;
1249
+ td -> track_bio_latency = !queue_is_mq (q );
1250
+ if (!td -> track_bio_latency )
1251
+ blk_stat_enable_accounting (q );
1252
+
1253
+ out :
1254
+ blk_mq_unquiesce_queue (disk -> queue );
1255
+ blk_mq_unfreeze_queue (disk -> queue );
1256
+
1257
+ return ret ;
1258
+ }
1259
+
1260
+
1214
1261
static ssize_t tg_set_conf (struct kernfs_open_file * of ,
1215
1262
char * buf , size_t nbytes , loff_t off , bool is_u64 )
1216
1263
{
@@ -1222,6 +1269,16 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of,
1222
1269
1223
1270
blkg_conf_init (& ctx , buf );
1224
1271
1272
+ ret = blkg_conf_open_bdev (& ctx );
1273
+ if (ret )
1274
+ goto out_finish ;
1275
+
1276
+ if (!blk_throtl_activated (ctx .bdev -> bd_queue )) {
1277
+ ret = blk_throtl_init (ctx .bdev -> bd_disk );
1278
+ if (ret )
1279
+ goto out_finish ;
1280
+ }
1281
+
1225
1282
ret = blkg_conf_prep (blkcg , & blkcg_policy_throtl , & ctx );
1226
1283
if (ret )
1227
1284
goto out_finish ;
@@ -1396,6 +1453,16 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
1396
1453
1397
1454
blkg_conf_init (& ctx , buf );
1398
1455
1456
+ ret = blkg_conf_open_bdev (& ctx );
1457
+ if (ret )
1458
+ goto out_finish ;
1459
+
1460
+ if (!blk_throtl_activated (ctx .bdev -> bd_queue )) {
1461
+ ret = blk_throtl_init (ctx .bdev -> bd_disk );
1462
+ if (ret )
1463
+ goto out_finish ;
1464
+ }
1465
+
1399
1466
ret = blkg_conf_prep (blkcg , & blkcg_policy_throtl , & ctx );
1400
1467
if (ret )
1401
1468
goto out_finish ;
@@ -1488,6 +1555,9 @@ void blk_throtl_cancel_bios(struct gendisk *disk)
1488
1555
struct cgroup_subsys_state * pos_css ;
1489
1556
struct blkcg_gq * blkg ;
1490
1557
1558
+ if (!blk_throtl_activated (q ))
1559
+ return ;
1560
+
1491
1561
spin_lock_irq (& q -> queue_lock );
1492
1562
/*
1493
1563
* queue_lock is held, rcu lock is not needed here technically.
@@ -1617,57 +1687,19 @@ bool __blk_throtl_bio(struct bio *bio)
1617
1687
return throttled ;
1618
1688
}
1619
1689
1620
- int blk_throtl_init (struct gendisk * disk )
1621
- {
1622
- struct request_queue * q = disk -> queue ;
1623
- struct throtl_data * td ;
1624
- int ret ;
1625
-
1626
- td = kzalloc_node (sizeof (* td ), GFP_KERNEL , q -> node );
1627
- if (!td )
1628
- return - ENOMEM ;
1629
-
1630
- INIT_WORK (& td -> dispatch_work , blk_throtl_dispatch_work_fn );
1631
- throtl_service_queue_init (& td -> service_queue );
1632
-
1633
- q -> td = td ;
1634
- td -> queue = q ;
1635
-
1636
- /* activate policy */
1637
- ret = blkcg_activate_policy (disk , & blkcg_policy_throtl );
1638
- if (ret )
1639
- kfree (td );
1640
- return ret ;
1641
- }
1642
-
1643
1690
void blk_throtl_exit (struct gendisk * disk )
1644
1691
{
1645
1692
struct request_queue * q = disk -> queue ;
1646
1693
1647
- BUG_ON (!q -> td );
1694
+ if (!blk_throtl_activated (q ))
1695
+ return ;
1696
+
1648
1697
del_timer_sync (& q -> td -> service_queue .pending_timer );
1649
1698
throtl_shutdown_wq (q );
1650
1699
blkcg_deactivate_policy (disk , & blkcg_policy_throtl );
1651
1700
kfree (q -> td );
1652
1701
}
1653
1702
1654
- void blk_throtl_register (struct gendisk * disk )
1655
- {
1656
- struct request_queue * q = disk -> queue ;
1657
- struct throtl_data * td ;
1658
-
1659
- td = q -> td ;
1660
- BUG_ON (!td );
1661
-
1662
- if (blk_queue_nonrot (q ))
1663
- td -> throtl_slice = DFL_THROTL_SLICE_SSD ;
1664
- else
1665
- td -> throtl_slice = DFL_THROTL_SLICE_HD ;
1666
- td -> track_bio_latency = !queue_is_mq (q );
1667
- if (!td -> track_bio_latency )
1668
- blk_stat_enable_accounting (q );
1669
- }
1670
-
1671
1703
static int __init throtl_init (void )
1672
1704
{
1673
1705
kthrotld_workqueue = alloc_workqueue ("kthrotld" , WQ_MEM_RECLAIM , 0 );
0 commit comments