|
1 | 1 | // SPDX-License-Identifier: GPL-2.0
|
2 | 2 |
|
| 3 | +#include "linux/spinlock.h" |
3 | 4 | #include <linux/minmax.h>
|
4 | 5 | #include "misc.h"
|
5 | 6 | #include "ctree.h"
|
@@ -1915,7 +1916,9 @@ static u64 calc_pct_ratio(u64 x, u64 y)
|
1915 | 1916 | */
|
1916 | 1917 | static u64 calc_unalloc_target(struct btrfs_fs_info *fs_info)
|
1917 | 1918 | {
|
1918 |
| - return BTRFS_UNALLOC_BLOCK_GROUP_TARGET * calc_effective_data_chunk_size(fs_info); |
| 1919 | + u64 chunk_sz = calc_effective_data_chunk_size(fs_info); |
| 1920 | + |
| 1921 | + return BTRFS_UNALLOC_BLOCK_GROUP_TARGET * chunk_sz; |
1919 | 1922 | }
|
1920 | 1923 |
|
1921 | 1924 | /*
|
@@ -1951,14 +1954,13 @@ static int calc_dynamic_reclaim_threshold(struct btrfs_space_info *space_info)
|
1951 | 1954 | u64 unused = alloc - used;
|
1952 | 1955 | u64 want = target > unalloc ? target - unalloc : 0;
|
1953 | 1956 | u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
|
1954 |
| - /* Cast to int is OK because want <= target */ |
1955 |
| - int ratio = calc_pct_ratio(want, target); |
1956 | 1957 |
|
1957 |
| - /* If we have no unused space, don't bother, it won't work anyway */ |
| 1958 | + /* If we have no unused space, don't bother, it won't work anyway. */ |
1958 | 1959 | if (unused < data_chunk_size)
|
1959 | 1960 | return 0;
|
1960 | 1961 |
|
1961 |
| - return ratio; |
| 1962 | + /* Cast to int is OK because want <= target. */ |
| 1963 | + return calc_pct_ratio(want, target); |
1962 | 1964 | }
|
1963 | 1965 |
|
1964 | 1966 | int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info)
|
@@ -2000,16 +2002,54 @@ static int do_reclaim_sweep(struct btrfs_fs_info *fs_info,
|
2000 | 2002 | return 0;
|
2001 | 2003 | }
|
2002 | 2004 |
|
| 2005 | +void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes) |
| 2006 | +{ |
| 2007 | + u64 chunk_sz = calc_effective_data_chunk_size(space_info->fs_info); |
| 2008 | + |
| 2009 | + lockdep_assert_held(&space_info->lock); |
| 2010 | + space_info->reclaimable_bytes += bytes; |
| 2011 | + |
| 2012 | + if (space_info->reclaimable_bytes >= chunk_sz) |
| 2013 | + btrfs_set_periodic_reclaim_ready(space_info, true); |
| 2014 | +} |
| 2015 | + |
| 2016 | +void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready) |
| 2017 | +{ |
| 2018 | + lockdep_assert_held(&space_info->lock); |
| 2019 | + if (!READ_ONCE(space_info->periodic_reclaim)) |
| 2020 | + return; |
| 2021 | + if (ready != space_info->periodic_reclaim_ready) { |
| 2022 | + space_info->periodic_reclaim_ready = ready; |
| 2023 | + if (!ready) |
| 2024 | + space_info->reclaimable_bytes = 0; |
| 2025 | + } |
| 2026 | +} |
| 2027 | + |
| 2028 | +bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info) |
| 2029 | +{ |
| 2030 | + bool ret; |
| 2031 | + |
| 2032 | + if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) |
| 2033 | + return false; |
| 2034 | + if (!READ_ONCE(space_info->periodic_reclaim)) |
| 2035 | + return false; |
| 2036 | + |
| 2037 | + spin_lock(&space_info->lock); |
| 2038 | + ret = space_info->periodic_reclaim_ready; |
| 2039 | + btrfs_set_periodic_reclaim_ready(space_info, false); |
| 2040 | + spin_unlock(&space_info->lock); |
| 2041 | + |
| 2042 | + return ret; |
| 2043 | +} |
| 2044 | + |
2003 | 2045 | int btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info)
|
2004 | 2046 | {
|
2005 | 2047 | int ret;
|
2006 | 2048 | int raid;
|
2007 | 2049 | struct btrfs_space_info *space_info;
|
2008 | 2050 |
|
2009 | 2051 | list_for_each_entry(space_info, &fs_info->space_info, list) {
|
2010 |
| - if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) |
2011 |
| - continue; |
2012 |
| - if (!READ_ONCE(space_info->periodic_reclaim)) |
| 2052 | + if (!btrfs_should_periodic_reclaim(space_info)) |
2013 | 2053 | continue;
|
2014 | 2054 | for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
|
2015 | 2055 | ret = do_reclaim_sweep(fs_info, space_info, raid);
|
|
0 commit comments