@@ -1022,6 +1022,13 @@ static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
10221022 }
10231023}
10241024
1025+ static struct btrfs_root * btrfs_block_group_root (struct btrfs_fs_info * fs_info )
1026+ {
1027+ if (btrfs_fs_compat_ro (fs_info , BLOCK_GROUP_TREE ))
1028+ return fs_info -> block_group_root ;
1029+ return btrfs_extent_root (fs_info , 0 );
1030+ }
1031+
10251032static int remove_block_group_item (struct btrfs_trans_handle * trans ,
10261033 struct btrfs_path * path ,
10271034 struct btrfs_block_group * block_group )
@@ -1757,24 +1764,21 @@ static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info)
17571764
17581765static bool should_reclaim_block_group (struct btrfs_block_group * bg , u64 bytes_freed )
17591766{
1760- const struct btrfs_space_info * space_info = bg -> space_info ;
1761- const int reclaim_thresh = READ_ONCE ( space_info -> bg_reclaim_threshold );
1767+ const int thresh_pct = btrfs_calc_reclaim_threshold ( bg -> space_info ) ;
1768+ u64 thresh_bytes = mult_perc ( bg -> length , thresh_pct );
17621769 const u64 new_val = bg -> used ;
17631770 const u64 old_val = new_val + bytes_freed ;
1764- u64 thresh ;
17651771
1766- if (reclaim_thresh == 0 )
1772+ if (thresh_bytes == 0 )
17671773 return false;
17681774
1769- thresh = mult_perc (bg -> length , reclaim_thresh );
1770-
17711775 /*
17721776 * If we were below the threshold before don't reclaim, we are likely a
17731777 * brand new block group and we don't want to relocate new block groups.
17741778 */
1775- if (old_val < thresh )
1779+ if (old_val < thresh_bytes )
17761780 return false;
1777- if (new_val >= thresh )
1781+ if (new_val >= thresh_bytes )
17781782 return false;
17791783 return true;
17801784}
@@ -1821,6 +1825,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
18211825 list_sort (NULL , & fs_info -> reclaim_bgs , reclaim_bgs_cmp );
18221826 while (!list_empty (& fs_info -> reclaim_bgs )) {
18231827 u64 zone_unusable ;
1828+ u64 reclaimed ;
18241829 int ret = 0 ;
18251830
18261831 bg = list_first_entry (& fs_info -> reclaim_bgs ,
@@ -1834,6 +1839,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
18341839 /* Don't race with allocators so take the groups_sem */
18351840 down_write (& space_info -> groups_sem );
18361841
1842+ spin_lock (& space_info -> lock );
18371843 spin_lock (& bg -> lock );
18381844 if (bg -> reserved || bg -> pinned || bg -> ro ) {
18391845 /*
@@ -1843,6 +1849,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
18431849 * this block group.
18441850 */
18451851 spin_unlock (& bg -> lock );
1852+ spin_unlock (& space_info -> lock );
18461853 up_write (& space_info -> groups_sem );
18471854 goto next ;
18481855 }
@@ -1861,6 +1868,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
18611868 if (!btrfs_test_opt (fs_info , DISCARD_ASYNC ))
18621869 btrfs_mark_bg_unused (bg );
18631870 spin_unlock (& bg -> lock );
1871+ spin_unlock (& space_info -> lock );
18641872 up_write (& space_info -> groups_sem );
18651873 goto next ;
18661874
@@ -1877,10 +1885,12 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
18771885 */
18781886 if (!should_reclaim_block_group (bg , bg -> length )) {
18791887 spin_unlock (& bg -> lock );
1888+ spin_unlock (& space_info -> lock );
18801889 up_write (& space_info -> groups_sem );
18811890 goto next ;
18821891 }
18831892 spin_unlock (& bg -> lock );
1893+ spin_unlock (& space_info -> lock );
18841894
18851895 /*
18861896 * Get out fast, in case we're read-only or unmounting the
@@ -1913,15 +1923,26 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
19131923 div64_u64 (bg -> used * 100 , bg -> length ),
19141924 div64_u64 (zone_unusable * 100 , bg -> length ));
19151925 trace_btrfs_reclaim_block_group (bg );
1926+ reclaimed = bg -> used ;
19161927 ret = btrfs_relocate_chunk (fs_info , bg -> start );
19171928 if (ret ) {
19181929 btrfs_dec_block_group_ro (bg );
19191930 btrfs_err (fs_info , "error relocating chunk %llu" ,
19201931 bg -> start );
1932+ spin_lock (& space_info -> lock );
1933+ space_info -> reclaim_count ++ ;
1934+ if (READ_ONCE (space_info -> periodic_reclaim ))
1935+ space_info -> periodic_reclaim_ready = false;
1936+ spin_unlock (& space_info -> lock );
1937+ } else {
1938+ spin_lock (& space_info -> lock );
1939+ space_info -> reclaim_count ++ ;
1940+ space_info -> reclaim_bytes += reclaimed ;
1941+ spin_unlock (& space_info -> lock );
19211942 }
19221943
19231944next :
1924- if (ret )
1945+ if (ret && ! READ_ONCE ( space_info -> periodic_reclaim ) )
19251946 btrfs_mark_bg_to_reclaim (bg );
19261947 btrfs_put_block_group (bg );
19271948
@@ -1948,6 +1969,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
19481969
19491970void btrfs_reclaim_bgs (struct btrfs_fs_info * fs_info )
19501971{
1972+ btrfs_reclaim_sweep (fs_info );
19511973 spin_lock (& fs_info -> unused_bgs_lock );
19521974 if (!list_empty (& fs_info -> reclaim_bgs ))
19531975 queue_work (system_unbound_wq , & fs_info -> reclaim_bgs_work );
@@ -3646,9 +3668,12 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
36463668 old_val += num_bytes ;
36473669 cache -> used = old_val ;
36483670 cache -> reserved -= num_bytes ;
3671+ cache -> reclaim_mark = 0 ;
36493672 space_info -> bytes_reserved -= num_bytes ;
36503673 space_info -> bytes_used += num_bytes ;
36513674 space_info -> disk_used += num_bytes * factor ;
3675+ if (READ_ONCE (space_info -> periodic_reclaim ))
3676+ btrfs_space_info_update_reclaimable (space_info , - num_bytes );
36523677 spin_unlock (& cache -> lock );
36533678 spin_unlock (& space_info -> lock );
36543679 } else {
@@ -3658,8 +3683,10 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
36583683 btrfs_space_info_update_bytes_pinned (info , space_info , num_bytes );
36593684 space_info -> bytes_used -= num_bytes ;
36603685 space_info -> disk_used -= num_bytes * factor ;
3661-
3662- reclaim = should_reclaim_block_group (cache , num_bytes );
3686+ if (READ_ONCE (space_info -> periodic_reclaim ))
3687+ btrfs_space_info_update_reclaimable (space_info , num_bytes );
3688+ else
3689+ reclaim = should_reclaim_block_group (cache , num_bytes );
36633690
36643691 spin_unlock (& cache -> lock );
36653692 spin_unlock (& space_info -> lock );
0 commit comments