@@ -378,13 +378,47 @@ int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
378
378
return 0 ;
379
379
}
380
380
381
+ /*
382
+ * Handle different locked folios:
383
+ *
384
+ * - Non-subpage folio
385
+ * Just unlock it.
386
+ *
387
+ * - folio locked but without any subpage locked
388
+ * This happens either before writepage_delalloc() or the delalloc range is
389
+ * already handled by previous folio.
390
+ * We can simple unlock it.
391
+ *
392
+ * - folio locked with subpage range locked.
393
+ * We go through the locked sectors inside the range and clear their locked
394
+ * bitmap, reduce the writer lock number, and unlock the page if that's
395
+ * the last locked range.
396
+ */
381
397
void btrfs_folio_end_writer_lock (const struct btrfs_fs_info * fs_info ,
382
398
struct folio * folio , u64 start , u32 len )
383
399
{
400
+ struct btrfs_subpage * subpage = folio_get_private (folio );
401
+
402
+ ASSERT (folio_test_locked (folio ));
403
+
384
404
if (unlikely (!fs_info ) || !btrfs_is_subpage (fs_info , folio -> mapping )) {
385
405
folio_unlock (folio );
386
406
return ;
387
407
}
408
+
409
+ /*
410
+ * For subpage case, there are two types of locked page. With or
411
+ * without writers number.
412
+ *
413
+ * Since we own the page lock, no one else could touch subpage::writers
414
+ * and we are safe to do several atomic operations without spinlock.
415
+ */
416
+ if (atomic_read (& subpage -> writers ) == 0 ) {
417
+ /* No writers, locked by plain lock_page(). */
418
+ folio_unlock (folio );
419
+ return ;
420
+ }
421
+
388
422
btrfs_subpage_clamp_range (folio , & start , & len );
389
423
if (btrfs_subpage_end_and_test_writer (fs_info , folio , start , len ))
390
424
folio_unlock (folio );
@@ -702,53 +736,6 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
702
736
spin_unlock_irqrestore (& subpage -> lock , flags );
703
737
}
704
738
705
- /*
706
- * Handle different locked pages with different page sizes:
707
- *
708
- * - Page locked by plain lock_page()
709
- * It should not have any subpage::writers count.
710
- * Can be unlocked by unlock_page().
711
- * This is the most common locked page for extent_writepage() called
712
- * inside extent_write_cache_pages().
713
- * Rarer cases include the @locked_page from extent_write_locked_range().
714
- *
715
- * - Page locked by lock_delalloc_pages()
716
- * There is only one caller, all pages except @locked_page for
717
- * extent_write_locked_range().
718
- * In this case, we have to call subpage helper to handle the case.
719
- */
720
- void btrfs_folio_unlock_writer (struct btrfs_fs_info * fs_info ,
721
- struct folio * folio , u64 start , u32 len )
722
- {
723
- struct btrfs_subpage * subpage ;
724
-
725
- ASSERT (folio_test_locked (folio ));
726
- /* For non-subpage case, we just unlock the page */
727
- if (!btrfs_is_subpage (fs_info , folio -> mapping )) {
728
- folio_unlock (folio );
729
- return ;
730
- }
731
-
732
- ASSERT (folio_test_private (folio ) && folio_get_private (folio ));
733
- subpage = folio_get_private (folio );
734
-
735
- /*
736
- * For subpage case, there are two types of locked page. With or
737
- * without writers number.
738
- *
739
- * Since we own the page lock, no one else could touch subpage::writers
740
- * and we are safe to do several atomic operations without spinlock.
741
- */
742
- if (atomic_read (& subpage -> writers ) == 0 ) {
743
- /* No writers, locked by plain lock_page() */
744
- folio_unlock (folio );
745
- return ;
746
- }
747
-
748
- /* Have writers, use proper subpage helper to end it */
749
- btrfs_folio_end_writer_lock (fs_info , folio , start , len );
750
- }
751
-
752
739
/*
753
740
* This is for folio already locked by plain lock_page()/folio_lock(), which
754
741
* doesn't have any subpage awareness.
0 commit comments