@@ -3689,6 +3689,8 @@ static struct extent_buffer *grab_extent_buffer(
36893689 struct folio * folio = page_folio (page );
36903690 struct extent_buffer * exists ;
36913691
3692+ lockdep_assert_held (& page -> mapping -> i_private_lock );
3693+
36923694 /*
36933695 * For subpage case, we completely rely on radix tree to ensure we
36943696 * don't try to insert two ebs for the same bytenr. So here we always
@@ -3756,13 +3758,14 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
37563758 * The caller needs to free the existing folios and retry using the same order.
37573759 */
37583760static int attach_eb_folio_to_filemap (struct extent_buffer * eb , int i ,
3761+ struct btrfs_subpage * prealloc ,
37593762 struct extent_buffer * * found_eb_ret )
37603763{
37613764
37623765 struct btrfs_fs_info * fs_info = eb -> fs_info ;
37633766 struct address_space * mapping = fs_info -> btree_inode -> i_mapping ;
37643767 const unsigned long index = eb -> start >> PAGE_SHIFT ;
3765- struct folio * existing_folio ;
3768+ struct folio * existing_folio = NULL ;
37663769 int ret ;
37673770
37683771 ASSERT (found_eb_ret );
@@ -3774,12 +3777,14 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
37743777 ret = filemap_add_folio (mapping , eb -> folios [i ], index + i ,
37753778 GFP_NOFS | __GFP_NOFAIL );
37763779 if (!ret )
3777- return 0 ;
3780+ goto finish ;
37783781
37793782 existing_folio = filemap_lock_folio (mapping , index + i );
37803783 /* The page cache only exists for a very short time, just retry. */
3781- if (IS_ERR (existing_folio ))
3784+ if (IS_ERR (existing_folio )) {
3785+ existing_folio = NULL ;
37823786 goto retry ;
3787+ }
37833788
37843789 /* For now, we should only have single-page folios for btree inode. */
37853790 ASSERT (folio_nr_pages (existing_folio ) == 1 );
@@ -3790,21 +3795,21 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
37903795 return - EAGAIN ;
37913796 }
37923797
3793- if (fs_info -> nodesize < PAGE_SIZE ) {
3794- /*
3795- * We're going to reuse the existing page, can drop our page
3796- * and subpage structure now.
3797- */
3798+ finish :
3799+ spin_lock (& mapping -> i_private_lock );
3800+ if (existing_folio && fs_info -> nodesize < PAGE_SIZE ) {
3801+ /* We're going to reuse the existing page, can drop our folio now. */
37983802 __free_page (folio_page (eb -> folios [i ], 0 ));
37993803 eb -> folios [i ] = existing_folio ;
3800- } else {
3804+ } else if ( existing_folio ) {
38013805 struct extent_buffer * existing_eb ;
38023806
38033807 existing_eb = grab_extent_buffer (fs_info ,
38043808 folio_page (existing_folio , 0 ));
38053809 if (existing_eb ) {
38063810 /* The extent buffer still exists, we can use it directly. */
38073811 * found_eb_ret = existing_eb ;
3812+ spin_unlock (& mapping -> i_private_lock );
38083813 folio_unlock (existing_folio );
38093814 folio_put (existing_folio );
38103815 return 1 ;
@@ -3813,6 +3818,22 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
38133818 __free_page (folio_page (eb -> folios [i ], 0 ));
38143819 eb -> folios [i ] = existing_folio ;
38153820 }
3821+ eb -> folio_size = folio_size (eb -> folios [i ]);
3822+ eb -> folio_shift = folio_shift (eb -> folios [i ]);
3823+ /* Should not fail, as we have preallocated the memory. */
3824+ ret = attach_extent_buffer_folio (eb , eb -> folios [i ], prealloc );
3825+ ASSERT (!ret );
3826+ /*
3827+ * To inform we have an extra eb under allocation, so that
3828+ * detach_extent_buffer_page() won't release the folio private when the
3829+ * eb hasn't been inserted into radix tree yet.
3830+ *
3831+ * The ref will be decreased when the eb releases the page, in
3832+ * detach_extent_buffer_page(). Thus needs no special handling in the
3833+ * error path.
3834+ */
3835+ btrfs_folio_inc_eb_refs (fs_info , eb -> folios [i ]);
3836+ spin_unlock (& mapping -> i_private_lock );
38163837 return 0 ;
38173838}
38183839
@@ -3824,7 +3845,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
38243845 int attached = 0 ;
38253846 struct extent_buffer * eb ;
38263847 struct extent_buffer * existing_eb = NULL ;
3827- struct address_space * mapping = fs_info -> btree_inode -> i_mapping ;
38283848 struct btrfs_subpage * prealloc = NULL ;
38293849 u64 lockdep_owner = owner_root ;
38303850 bool page_contig = true;
@@ -3890,7 +3910,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
38903910 for (int i = 0 ; i < num_folios ; i ++ ) {
38913911 struct folio * folio ;
38923912
3893- ret = attach_eb_folio_to_filemap (eb , i , & existing_eb );
3913+ ret = attach_eb_folio_to_filemap (eb , i , prealloc , & existing_eb );
38943914 if (ret > 0 ) {
38953915 ASSERT (existing_eb );
38963916 goto out ;
@@ -3927,24 +3947,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
39273947 * and free the allocated page.
39283948 */
39293949 folio = eb -> folios [i ];
3930- eb -> folio_size = folio_size (folio );
3931- eb -> folio_shift = folio_shift (folio );
3932- spin_lock (& mapping -> i_private_lock );
3933- /* Should not fail, as we have preallocated the memory */
3934- ret = attach_extent_buffer_folio (eb , folio , prealloc );
3935- ASSERT (!ret );
3936- /*
3937- * To inform we have extra eb under allocation, so that
3938- * detach_extent_buffer_page() won't release the folio private
3939- * when the eb hasn't yet been inserted into radix tree.
3940- *
3941- * The ref will be decreased when the eb released the page, in
3942- * detach_extent_buffer_page().
3943- * Thus needs no special handling in error path.
3944- */
3945- btrfs_folio_inc_eb_refs (fs_info , folio );
3946- spin_unlock (& mapping -> i_private_lock );
3947-
39483950 WARN_ON (btrfs_folio_test_dirty (fs_info , folio , eb -> start , eb -> len ));
39493951
39503952 /*
0 commit comments