Skip to content

Commit 1e4ab7b

Browse files
jthornberMike Snitzer
authored andcommitted
dm cache policy smq: ensure IO doesn't prevent cleaner policy progress
When using the cleaner policy to decommission the cache, there is never any writeback started from the cache as it is constantly delayed due to normal I/O keeping the device busy. Meaning @idle=false was always being passed to clean_target_met() Fix this by adding a specific 'cleaner' flag that is set when the cleaner policy is configured. This flag serves to always allow the cleaner's writeback work to be queued until the cache is decommissioned (even if the cache isn't idle). Reported-by: David Jeffery <[email protected]> Fixes: b29d498 ("dm cache: significant rework to leverage dm-bio-prison-v2") Cc: [email protected] Signed-off-by: Joe Thornber <[email protected]> Signed-off-by: Mike Snitzer <[email protected]>
1 parent 7d5fff8 commit 1e4ab7b

File tree

1 file changed

+18
-10
lines changed

1 file changed

+18
-10
lines changed

drivers/md/dm-cache-policy-smq.c

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -857,7 +857,13 @@ struct smq_policy {
857857

858858
struct background_tracker *bg_work;
859859

860-
bool migrations_allowed;
860+
bool migrations_allowed:1;
861+
862+
/*
863+
* If this is set the policy will try and clean the whole cache
864+
* even if the device is not idle.
865+
*/
866+
bool cleaner:1;
861867
};
862868

863869
/*----------------------------------------------------------------*/
@@ -1138,7 +1144,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
11381144
* Cache entries may not be populated. So we cannot rely on the
11391145
* size of the clean queue.
11401146
*/
1141-
if (idle) {
1147+
if (idle || mq->cleaner) {
11421148
/*
11431149
* We'd like to clean everything.
11441150
*/
@@ -1722,11 +1728,9 @@ static void calc_hotspot_params(sector_t origin_size,
17221728
*hotspot_block_size /= 2u;
17231729
}
17241730

1725-
static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
1726-
sector_t origin_size,
1727-
sector_t cache_block_size,
1728-
bool mimic_mq,
1729-
bool migrations_allowed)
1731+
static struct dm_cache_policy *
1732+
__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
1733+
bool mimic_mq, bool migrations_allowed, bool cleaner)
17301734
{
17311735
unsigned int i;
17321736
unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
@@ -1813,6 +1817,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
18131817
goto bad_btracker;
18141818

18151819
mq->migrations_allowed = migrations_allowed;
1820+
mq->cleaner = cleaner;
18161821

18171822
return &mq->policy;
18181823

@@ -1836,21 +1841,24 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
18361841
sector_t origin_size,
18371842
sector_t cache_block_size)
18381843
{
1839-
return __smq_create(cache_size, origin_size, cache_block_size, false, true);
1844+
return __smq_create(cache_size, origin_size, cache_block_size,
1845+
false, true, false);
18401846
}
18411847

18421848
static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
18431849
sector_t origin_size,
18441850
sector_t cache_block_size)
18451851
{
1846-
return __smq_create(cache_size, origin_size, cache_block_size, true, true);
1852+
return __smq_create(cache_size, origin_size, cache_block_size,
1853+
true, true, false);
18471854
}
18481855

18491856
static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
18501857
sector_t origin_size,
18511858
sector_t cache_block_size)
18521859
{
1853-
return __smq_create(cache_size, origin_size, cache_block_size, false, false);
1860+
return __smq_create(cache_size, origin_size, cache_block_size,
1861+
false, false, true);
18541862
}
18551863

18561864
/*----------------------------------------------------------------*/

0 commit comments

Comments
 (0)