@@ -2689,16 +2689,34 @@ static int fiemap_process_hole(struct btrfs_inode *inode,
2689
2689
* it beyond i_size.
2690
2690
*/
2691
2691
while (cur_offset < end && cur_offset < i_size ) {
2692
+ struct extent_state * cached_state = NULL ;
2692
2693
u64 delalloc_start ;
2693
2694
u64 delalloc_end ;
2694
2695
u64 prealloc_start ;
2696
+ u64 lockstart ;
2697
+ u64 lockend ;
2695
2698
u64 prealloc_len = 0 ;
2696
2699
bool delalloc ;
2697
2700
2701
+ lockstart = round_down (cur_offset , inode -> root -> fs_info -> sectorsize );
2702
+ lockend = round_up (end , inode -> root -> fs_info -> sectorsize );
2703
+
2704
+ /*
2705
+ * We are only locking for the delalloc range because that's the
2706
+ * only thing that can change here. With fiemap we have a lock
2707
+ * on the inode, so no buffered or direct writes can happen.
2708
+ *
2709
+ * However mmaps and normal page writeback will cause this to
2710
+ * change arbitrarily. We have to lock the extent lock here to
2711
+ * make sure that nobody messes with the tree while we're doing
2712
+ * btrfs_find_delalloc_in_range.
2713
+ */
2714
+ lock_extent (& inode -> io_tree , lockstart , lockend , & cached_state );
2698
2715
delalloc = btrfs_find_delalloc_in_range (inode , cur_offset , end ,
2699
2716
delalloc_cached_state ,
2700
2717
& delalloc_start ,
2701
2718
& delalloc_end );
2719
+ unlock_extent (& inode -> io_tree , lockstart , lockend , & cached_state );
2702
2720
if (!delalloc )
2703
2721
break ;
2704
2722
@@ -2866,15 +2884,15 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
2866
2884
u64 start , u64 len )
2867
2885
{
2868
2886
const u64 ino = btrfs_ino (inode );
2869
- struct extent_state * cached_state = NULL ;
2870
2887
struct extent_state * delalloc_cached_state = NULL ;
2871
2888
struct btrfs_path * path ;
2872
2889
struct fiemap_cache cache = { 0 };
2873
2890
struct btrfs_backref_share_check_ctx * backref_ctx ;
2874
2891
u64 last_extent_end ;
2875
2892
u64 prev_extent_end ;
2876
- u64 lockstart ;
2877
- u64 lockend ;
2893
+ u64 range_start ;
2894
+ u64 range_end ;
2895
+ const u64 sectorsize = inode -> root -> fs_info -> sectorsize ;
2878
2896
bool stopped = false;
2879
2897
int ret ;
2880
2898
@@ -2885,20 +2903,19 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
2885
2903
goto out ;
2886
2904
}
2887
2905
2888
- lockstart = round_down (start , inode -> root -> fs_info -> sectorsize );
2889
- lockend = round_up (start + len , inode -> root -> fs_info -> sectorsize );
2890
- prev_extent_end = lockstart ;
2906
+ range_start = round_down (start , sectorsize );
2907
+ range_end = round_up (start + len , sectorsize );
2908
+ prev_extent_end = range_start ;
2891
2909
2892
2910
btrfs_inode_lock (inode , BTRFS_ILOCK_SHARED );
2893
- lock_extent (& inode -> io_tree , lockstart , lockend , & cached_state );
2894
2911
2895
2912
ret = fiemap_find_last_extent_offset (inode , path , & last_extent_end );
2896
2913
if (ret < 0 )
2897
2914
goto out_unlock ;
2898
2915
btrfs_release_path (path );
2899
2916
2900
2917
path -> reada = READA_FORWARD ;
2901
- ret = fiemap_search_slot (inode , path , lockstart );
2918
+ ret = fiemap_search_slot (inode , path , range_start );
2902
2919
if (ret < 0 ) {
2903
2920
goto out_unlock ;
2904
2921
} else if (ret > 0 ) {
@@ -2910,7 +2927,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
2910
2927
goto check_eof_delalloc ;
2911
2928
}
2912
2929
2913
- while (prev_extent_end < lockend ) {
2930
+ while (prev_extent_end < range_end ) {
2914
2931
struct extent_buffer * leaf = path -> nodes [0 ];
2915
2932
struct btrfs_file_extent_item * ei ;
2916
2933
struct btrfs_key key ;
@@ -2933,19 +2950,19 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
2933
2950
* The first iteration can leave us at an extent item that ends
2934
2951
* before our range's start. Move to the next item.
2935
2952
*/
2936
- if (extent_end <= lockstart )
2953
+ if (extent_end <= range_start )
2937
2954
goto next_item ;
2938
2955
2939
2956
backref_ctx -> curr_leaf_bytenr = leaf -> start ;
2940
2957
2941
2958
/* We have in implicit hole (NO_HOLES feature enabled). */
2942
2959
if (prev_extent_end < key .offset ) {
2943
- const u64 range_end = min (key .offset , lockend ) - 1 ;
2960
+ const u64 hole_end = min (key .offset , range_end ) - 1 ;
2944
2961
2945
2962
ret = fiemap_process_hole (inode , fieinfo , & cache ,
2946
2963
& delalloc_cached_state ,
2947
2964
backref_ctx , 0 , 0 , 0 ,
2948
- prev_extent_end , range_end );
2965
+ prev_extent_end , hole_end );
2949
2966
if (ret < 0 ) {
2950
2967
goto out_unlock ;
2951
2968
} else if (ret > 0 ) {
@@ -2955,7 +2972,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
2955
2972
}
2956
2973
2957
2974
/* We've reached the end of the fiemap range, stop. */
2958
- if (key .offset >= lockend ) {
2975
+ if (key .offset >= range_end ) {
2959
2976
stopped = true;
2960
2977
break ;
2961
2978
}
@@ -3049,29 +3066,41 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
3049
3066
btrfs_free_path (path );
3050
3067
path = NULL ;
3051
3068
3052
- if (!stopped && prev_extent_end < lockend ) {
3069
+ if (!stopped && prev_extent_end < range_end ) {
3053
3070
ret = fiemap_process_hole (inode , fieinfo , & cache ,
3054
3071
& delalloc_cached_state , backref_ctx ,
3055
- 0 , 0 , 0 , prev_extent_end , lockend - 1 );
3072
+ 0 , 0 , 0 , prev_extent_end , range_end - 1 );
3056
3073
if (ret < 0 )
3057
3074
goto out_unlock ;
3058
- prev_extent_end = lockend ;
3075
+ prev_extent_end = range_end ;
3059
3076
}
3060
3077
3061
3078
if (cache .cached && cache .offset + cache .len >= last_extent_end ) {
3062
3079
const u64 i_size = i_size_read (& inode -> vfs_inode );
3063
3080
3064
3081
if (prev_extent_end < i_size ) {
3082
+ struct extent_state * cached_state = NULL ;
3065
3083
u64 delalloc_start ;
3066
3084
u64 delalloc_end ;
3085
+ u64 lockstart ;
3086
+ u64 lockend ;
3067
3087
bool delalloc ;
3068
3088
3089
+ lockstart = round_down (prev_extent_end , sectorsize );
3090
+ lockend = round_up (i_size , sectorsize );
3091
+
3092
+ /*
3093
+ * See the comment in fiemap_process_hole as to why
3094
+ * we're doing the locking here.
3095
+ */
3096
+ lock_extent (& inode -> io_tree , lockstart , lockend , & cached_state );
3069
3097
delalloc = btrfs_find_delalloc_in_range (inode ,
3070
3098
prev_extent_end ,
3071
3099
i_size - 1 ,
3072
3100
& delalloc_cached_state ,
3073
3101
& delalloc_start ,
3074
3102
& delalloc_end );
3103
+ unlock_extent (& inode -> io_tree , lockstart , lockend , & cached_state );
3075
3104
if (!delalloc )
3076
3105
cache .flags |= FIEMAP_EXTENT_LAST ;
3077
3106
} else {
@@ -3082,7 +3111,6 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
3082
3111
ret = emit_last_fiemap_cache (fieinfo , & cache );
3083
3112
3084
3113
out_unlock :
3085
- unlock_extent (& inode -> io_tree , lockstart , lockend , & cached_state );
3086
3114
btrfs_inode_unlock (inode , BTRFS_ILOCK_SHARED );
3087
3115
out :
3088
3116
free_extent_state (delalloc_cached_state );
0 commit comments