Skip to content

Commit f5ff64c

Browse files
boryaskdave
authored andcommitted
btrfs: dynamic block_group reclaim threshold
We can currently recover allocated block_groups by: - explicitly starting balance operations - "auto reclaim" via bg_reclaim_threshold The latter works by checking against a fixed threshold on frees. If we pass from above the threshold to below, relocation triggers and the block group will get reclaimed by the cleaner thread (assuming it is still eligible) Picking a threshold is challenging. Too high, and you end up trying to reclaim very full block_groups which is quite costly, and you don't do reclaim on block_groups that don't get quite THAT full, but could still be quite fragmented and stranding a lot of space. Too low, and you similarly miss out on reclaim even if you badly need it to avoid running out of unallocated space, if you have heavily fragmented block groups living above the threshold. No matter the threshold, it suffers from a workload that happens to bounce around that threshold, which can introduce arbitrary amounts of reclaim waste. To improve this situation, introduce a dynamic threshold. The basic idea behind this threshold is that it should be very lax when there is plenty of unallocated space, and increasingly aggressive as we approach zero unallocated space. To that end, it sets a target for unallocated space (10 chunks) and then linearly increases the threshold as the amount of space short of the target we are increases. The formula is: (target - unalloc) / target I tested this by running it on three interesting workloads: 1. bounce allocations around X% full. 2. fill up all the way and introduce full fragmentation. 3. write in a fragmented way until the filesystem is just about full. 1. and 2. attack the weaknesses of a fixed threshold; fixed either works perfectly or fully falls apart, depending on the threshold. Dynamic always handles these cases well. 3. attacks dynamic by checking whether it is too zealous to reclaim in conditions with low unallocated and low unused. It tends to claw back 1GiB of unallocated fairly aggressively, but not much more. Early versions of dynamic threshold struggled on this test. Additional work could be done to intelligently ratchet up the urgency of reclaim in very low unallocated conditions. Existing mechanisms are already useless in that case anyway. Reviewed-by: Josef Bacik <[email protected]> Signed-off-by: Boris Burkov <[email protected]> Signed-off-by: David Sterba <[email protected]>
1 parent 42f620a commit f5ff64c

File tree

4 files changed

+169
-27
lines changed

4 files changed

+169
-27
lines changed

fs/btrfs/block-group.c

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1764,24 +1764,21 @@ static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info)
17641764

17651765
static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed)
17661766
{
1767-
const struct btrfs_space_info *space_info = bg->space_info;
1768-
const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold);
1767+
const int thresh_pct = btrfs_calc_reclaim_threshold(bg->space_info);
1768+
u64 thresh_bytes = mult_perc(bg->length, thresh_pct);
17691769
const u64 new_val = bg->used;
17701770
const u64 old_val = new_val + bytes_freed;
1771-
u64 thresh;
17721771

1773-
if (reclaim_thresh == 0)
1772+
if (thresh_bytes == 0)
17741773
return false;
17751774

1776-
thresh = mult_perc(bg->length, reclaim_thresh);
1777-
17781775
/*
17791776
* If we were below the threshold before don't reclaim, we are likely a
17801777
* brand new block group and we don't want to relocate new block groups.
17811778
*/
1782-
if (old_val < thresh)
1779+
if (old_val < thresh_bytes)
17831780
return false;
1784-
if (new_val >= thresh)
1781+
if (new_val >= thresh_bytes)
17851782
return false;
17861783
return true;
17871784
}
@@ -1843,6 +1840,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
18431840
/* Don't race with allocators so take the groups_sem */
18441841
down_write(&space_info->groups_sem);
18451842

1843+
spin_lock(&space_info->lock);
18461844
spin_lock(&bg->lock);
18471845
if (bg->reserved || bg->pinned || bg->ro) {
18481846
/*
@@ -1852,6 +1850,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
18521850
* this block group.
18531851
*/
18541852
spin_unlock(&bg->lock);
1853+
spin_unlock(&space_info->lock);
18551854
up_write(&space_info->groups_sem);
18561855
goto next;
18571856
}
@@ -1870,6 +1869,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
18701869
if (!btrfs_test_opt(fs_info, DISCARD_ASYNC))
18711870
btrfs_mark_bg_unused(bg);
18721871
spin_unlock(&bg->lock);
1872+
spin_unlock(&space_info->lock);
18731873
up_write(&space_info->groups_sem);
18741874
goto next;
18751875

@@ -1886,10 +1886,12 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
18861886
*/
18871887
if (!should_reclaim_block_group(bg, bg->length)) {
18881888
spin_unlock(&bg->lock);
1889+
spin_unlock(&space_info->lock);
18891890
up_write(&space_info->groups_sem);
18901891
goto next;
18911892
}
18921893
spin_unlock(&bg->lock);
1894+
spin_unlock(&space_info->lock);
18931895

18941896
/*
18951897
* Get out fast, in case we're read-only or unmounting the

fs/btrfs/space-info.c

Lines changed: 109 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
// SPDX-License-Identifier: GPL-2.0
22

3+
#include <linux/minmax.h>
34
#include "misc.h"
45
#include "ctree.h"
56
#include "space-info.h"
@@ -190,6 +191,8 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
190191
*/
191192
#define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
192193

194+
#define BTRFS_UNALLOC_BLOCK_GROUP_TARGET (10ULL)
195+
193196
/*
194197
* Calculate chunk size depending on volume type (regular or zoned).
195198
*/
@@ -341,11 +344,32 @@ struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
341344
return NULL;
342345
}
343346

347+
static u64 calc_effective_data_chunk_size(struct btrfs_fs_info *fs_info)
348+
{
349+
struct btrfs_space_info *data_sinfo;
350+
u64 data_chunk_size;
351+
352+
/*
353+
* Calculate the data_chunk_size, space_info->chunk_size is the
354+
* "optimal" chunk size based on the fs size. However when we actually
355+
* allocate the chunk we will strip this down further, making it no
356+
* more than 10% of the disk or 1G, whichever is smaller.
357+
*
358+
* On the zoned mode, we need to use zone_size (= data_sinfo->chunk_size)
359+
* as it is.
360+
*/
361+
data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
362+
if (btrfs_is_zoned(fs_info))
363+
return data_sinfo->chunk_size;
364+
data_chunk_size = min(data_sinfo->chunk_size,
365+
mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
366+
return min_t(u64, data_chunk_size, SZ_1G);
367+
}
368+
344369
static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
345370
struct btrfs_space_info *space_info,
346371
enum btrfs_reserve_flush_enum flush)
347372
{
348-
struct btrfs_space_info *data_sinfo;
349373
u64 profile;
350374
u64 avail;
351375
u64 data_chunk_size;
@@ -369,23 +393,7 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
369393
if (avail == 0)
370394
return 0;
371395

372-
/*
373-
* Calculate the data_chunk_size, space_info->chunk_size is the
374-
* "optimal" chunk size based on the fs size. However when we actually
375-
* allocate the chunk we will strip this down further, making it no more
376-
* than 10% of the disk or 1G, whichever is smaller.
377-
*
378-
* On the zoned mode, we need to use zone_size (=
379-
* data_sinfo->chunk_size) as it is.
380-
*/
381-
data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
382-
if (!btrfs_is_zoned(fs_info)) {
383-
data_chunk_size = min(data_sinfo->chunk_size,
384-
mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
385-
data_chunk_size = min_t(u64, data_chunk_size, SZ_1G);
386-
} else {
387-
data_chunk_size = data_sinfo->chunk_size;
388-
}
396+
data_chunk_size = calc_effective_data_chunk_size(fs_info);
389397

390398
/*
391399
* Since data allocations immediately use block groups as part of the
@@ -1878,3 +1886,86 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
18781886

18791887
return free_bytes;
18801888
}
1889+
1890+
static u64 calc_pct_ratio(u64 x, u64 y)
1891+
{
1892+
int err;
1893+
1894+
if (!y)
1895+
return 0;
1896+
again:
1897+
err = check_mul_overflow(100, x, &x);
1898+
if (err)
1899+
goto lose_precision;
1900+
return div64_u64(x, y);
1901+
lose_precision:
1902+
x >>= 10;
1903+
y >>= 10;
1904+
if (!y)
1905+
y = 1;
1906+
goto again;
1907+
}
1908+
1909+
/*
1910+
* A reasonable buffer for unallocated space is 10 data block_groups.
1911+
* If we claw this back repeatedly, we can still achieve efficient
1912+
* utilization when near full, and not do too much reclaim while
1913+
* always maintaining a solid buffer for workloads that quickly
1914+
* allocate and pressure the unallocated space.
1915+
*/
1916+
static u64 calc_unalloc_target(struct btrfs_fs_info *fs_info)
1917+
{
1918+
return BTRFS_UNALLOC_BLOCK_GROUP_TARGET * calc_effective_data_chunk_size(fs_info);
1919+
}
1920+
1921+
/*
1922+
* The fundamental goal of automatic reclaim is to protect the filesystem's
1923+
* unallocated space and thus minimize the probability of the filesystem going
1924+
* read only when a metadata allocation failure causes a transaction abort.
1925+
*
1926+
* However, relocations happen into the space_info's unused space, therefore
1927+
* automatic reclaim must also back off as that space runs low. There is no
1928+
* value in doing trivial "relocations" of re-writing the same block group
1929+
* into a fresh one.
1930+
*
1931+
* Furthermore, we want to avoid doing too much reclaim even if there are good
1932+
* candidates. This is because the allocator is pretty good at filling up the
1933+
* holes with writes. So we want to do just enough reclaim to try and stay
1934+
* safe from running out of unallocated space but not be wasteful about it.
1935+
*
1936+
* Therefore, the dynamic reclaim threshold is calculated as follows:
1937+
* - calculate a target unallocated amount of 5 block group sized chunks
1938+
* - ratchet up the intensity of reclaim depending on how far we are from
1939+
* that target by using a formula of unalloc / target to set the threshold.
1940+
*
1941+
* Typically with 10 block groups as the target, the discrete values this comes
1942+
* out to are 0, 10, 20, ... , 80, 90, and 99.
1943+
*/
1944+
static int calc_dynamic_reclaim_threshold(struct btrfs_space_info *space_info)
1945+
{
1946+
struct btrfs_fs_info *fs_info = space_info->fs_info;
1947+
u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
1948+
u64 target = calc_unalloc_target(fs_info);
1949+
u64 alloc = space_info->total_bytes;
1950+
u64 used = btrfs_space_info_used(space_info, false);
1951+
u64 unused = alloc - used;
1952+
u64 want = target > unalloc ? target - unalloc : 0;
1953+
u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
1954+
/* Cast to int is OK because want <= target */
1955+
int ratio = calc_pct_ratio(want, target);
1956+
1957+
/* If we have no unused space, don't bother, it won't work anyway */
1958+
if (unused < data_chunk_size)
1959+
return 0;
1960+
1961+
return ratio;
1962+
}
1963+
1964+
int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info)
1965+
{
1966+
lockdep_assert_held(&space_info->lock);
1967+
1968+
if (READ_ONCE(space_info->dynamic_reclaim))
1969+
return calc_dynamic_reclaim_threshold(space_info);
1970+
return READ_ONCE(space_info->bg_reclaim_threshold);
1971+
}

fs/btrfs/space-info.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,12 @@ struct btrfs_space_info {
184184
* Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_errors
185185
*/
186186
u64 reclaim_errors;
187+
188+
/*
189+
* If true, use the dynamic relocation threshold, instead of the
190+
* fixed bg_reclaim_threshold.
191+
*/
192+
bool dynamic_reclaim;
187193
};
188194

189195
struct reserve_ticket {
@@ -266,4 +272,6 @@ void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info);
266272
void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info);
267273
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
268274

275+
int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info);
276+
269277
#endif /* BTRFS_SPACE_INFO_H */

fs/btrfs/sysfs.c

Lines changed: 42 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -905,8 +905,12 @@ static ssize_t btrfs_sinfo_bg_reclaim_threshold_show(struct kobject *kobj,
905905
char *buf)
906906
{
907907
struct btrfs_space_info *space_info = to_space_info(kobj);
908+
ssize_t ret;
908909

909-
return sysfs_emit(buf, "%d\n", READ_ONCE(space_info->bg_reclaim_threshold));
910+
spin_lock(&space_info->lock);
911+
ret = sysfs_emit(buf, "%d\n", btrfs_calc_reclaim_threshold(space_info));
912+
spin_unlock(&space_info->lock);
913+
return ret;
910914
}
911915

912916
static ssize_t btrfs_sinfo_bg_reclaim_threshold_store(struct kobject *kobj,
@@ -917,6 +921,9 @@ static ssize_t btrfs_sinfo_bg_reclaim_threshold_store(struct kobject *kobj,
917921
int thresh;
918922
int ret;
919923

924+
if (READ_ONCE(space_info->dynamic_reclaim))
925+
return -EINVAL;
926+
920927
ret = kstrtoint(buf, 10, &thresh);
921928
if (ret)
922929
return ret;
@@ -933,6 +940,39 @@ BTRFS_ATTR_RW(space_info, bg_reclaim_threshold,
933940
btrfs_sinfo_bg_reclaim_threshold_show,
934941
btrfs_sinfo_bg_reclaim_threshold_store);
935942

943+
static ssize_t btrfs_sinfo_dynamic_reclaim_show(struct kobject *kobj,
944+
struct kobj_attribute *a,
945+
char *buf)
946+
{
947+
struct btrfs_space_info *space_info = to_space_info(kobj);
948+
949+
return sysfs_emit(buf, "%d\n", READ_ONCE(space_info->dynamic_reclaim));
950+
}
951+
952+
static ssize_t btrfs_sinfo_dynamic_reclaim_store(struct kobject *kobj,
953+
struct kobj_attribute *a,
954+
const char *buf, size_t len)
955+
{
956+
struct btrfs_space_info *space_info = to_space_info(kobj);
957+
int dynamic_reclaim;
958+
int ret;
959+
960+
ret = kstrtoint(buf, 10, &dynamic_reclaim);
961+
if (ret)
962+
return ret;
963+
964+
if (dynamic_reclaim < 0)
965+
return -EINVAL;
966+
967+
WRITE_ONCE(space_info->dynamic_reclaim, dynamic_reclaim != 0);
968+
969+
return len;
970+
}
971+
972+
BTRFS_ATTR_RW(space_info, dynamic_reclaim,
973+
btrfs_sinfo_dynamic_reclaim_show,
974+
btrfs_sinfo_dynamic_reclaim_store);
975+
936976
/*
937977
* Allocation information about block group types.
938978
*
@@ -950,6 +990,7 @@ static struct attribute *space_info_attrs[] = {
950990
BTRFS_ATTR_PTR(space_info, disk_used),
951991
BTRFS_ATTR_PTR(space_info, disk_total),
952992
BTRFS_ATTR_PTR(space_info, bg_reclaim_threshold),
993+
BTRFS_ATTR_PTR(space_info, dynamic_reclaim),
953994
BTRFS_ATTR_PTR(space_info, chunk_size),
954995
BTRFS_ATTR_PTR(space_info, size_classes),
955996
BTRFS_ATTR_PTR(space_info, reclaim_count),

0 commit comments

Comments
 (0)