Skip to content

Commit

Permalink
dm cache policy smq: ensure IO doesn't prevent cleaner policy progress
Browse files Browse the repository at this point in the history
commit 1e4ab7b upstream.

When using the cleaner policy to decommission the cache, there is
never any writeback started from the cache as it is constantly delayed
due to normal I/O keeping the device busy. Meaning @idle=false was
always being passed to clean_target_met()

Fix this by adding a specific 'cleaner' flag that is set when the
cleaner policy is configured. This flag serves to always allow the
cleaner's writeback work to be queued until the cache is
decommissioned (even if the cache isn't idle).

Reported-by: David Jeffery <djeffery@redhat.com>
Fixes: b29d498 ("dm cache: significant rework to leverage dm-bio-prison-v2")
Cc: stable@vger.kernel.org
Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
jthornber authored and gregkh committed Aug 3, 2023
1 parent 507f70c commit 3d215ad
Showing 1 changed file with 18 additions and 10 deletions.
28 changes: 18 additions & 10 deletions drivers/md/dm-cache-policy-smq.c
Original file line number Diff line number Diff line change
Expand Up @@ -855,7 +855,13 @@ struct smq_policy {

struct background_tracker *bg_work;

bool migrations_allowed;
bool migrations_allowed:1;

/*
* If this is set the policy will try and clean the whole cache
* even if the device is not idle.
*/
bool cleaner:1;
};

/*----------------------------------------------------------------*/
Expand Down Expand Up @@ -1136,7 +1142,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the
* size of the clean queue.
*/
if (idle) {
if (idle || mq->cleaner) {
/*
* We'd like to clean everything.
*/
Expand Down Expand Up @@ -1719,11 +1725,9 @@ static void calc_hotspot_params(sector_t origin_size,
*hotspot_block_size /= 2u;
}

static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size,
bool mimic_mq,
bool migrations_allowed)
static struct dm_cache_policy *
__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
bool mimic_mq, bool migrations_allowed, bool cleaner)
{
unsigned int i;
unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
Expand Down Expand Up @@ -1810,6 +1814,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
goto bad_btracker;

mq->migrations_allowed = migrations_allowed;
mq->cleaner = cleaner;

return &mq->policy;

Expand All @@ -1833,21 +1838,24 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
return __smq_create(cache_size, origin_size, cache_block_size, false, true);
return __smq_create(cache_size, origin_size, cache_block_size,
false, true, false);
}

static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
return __smq_create(cache_size, origin_size, cache_block_size, true, true);
return __smq_create(cache_size, origin_size, cache_block_size,
true, true, false);
}

static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
return __smq_create(cache_size, origin_size, cache_block_size, false, false);
return __smq_create(cache_size, origin_size, cache_block_size,
false, false, true);
}

/*----------------------------------------------------------------*/
Expand Down

0 comments on commit 3d215ad

Please sign in to comment.