@@ -613,17 +613,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
613613 * FIXME: may need to call ->reservepage here as well. That's rather up to the
614614 * address_space though.
615615 */
616- int __set_page_dirty_buffers (struct page * page )
616+ bool block_dirty_folio (struct address_space * mapping , struct folio * folio )
617617{
618- int newly_dirty ;
619- struct address_space * mapping = page_mapping (page );
620-
621- if (unlikely (!mapping ))
622- return !TestSetPageDirty (page );
618+ struct buffer_head * head ;
619+ bool newly_dirty ;
623620
624621 spin_lock (& mapping -> private_lock );
625- if ( page_has_buffers ( page )) {
626- struct buffer_head * head = page_buffers ( page );
622+ head = folio_buffers ( folio );
623+ if ( head ) {
627624 struct buffer_head * bh = head ;
628625
629626 do {
@@ -635,21 +632,21 @@ int __set_page_dirty_buffers(struct page *page)
635632 * Lock out page's memcg migration to keep PageDirty
636633 * synchronized with per-memcg dirty page counters.
637634 */
638- lock_page_memcg ( page );
639- newly_dirty = !TestSetPageDirty ( page );
635+ folio_memcg_lock ( folio );
636+ newly_dirty = !folio_test_set_dirty ( folio );
640637 spin_unlock (& mapping -> private_lock );
641638
642639 if (newly_dirty )
643- __set_page_dirty ( page , mapping , 1 );
640+ __folio_mark_dirty ( folio , mapping , 1 );
644641
645- unlock_page_memcg ( page );
642+ folio_memcg_unlock ( folio );
646643
647644 if (newly_dirty )
648645 __mark_inode_dirty (mapping -> host , I_DIRTY_PAGES );
649646
650647 return newly_dirty ;
651648}
652- EXPORT_SYMBOL (__set_page_dirty_buffers );
649+ EXPORT_SYMBOL (block_dirty_folio );
653650
654651/*
655652 * Write out and wait upon a list of buffers.
@@ -1548,7 +1545,7 @@ EXPORT_SYMBOL(block_invalidate_folio);
15481545
15491546/*
15501547 * We attach and possibly dirty the buffers atomically wrt
1551- * __set_page_dirty_buffers () via private_lock. try_to_free_buffers
1548+ * block_dirty_folio () via private_lock. try_to_free_buffers
15521549 * is already excluded via the page lock.
15531550 */
15541551void create_empty_buffers (struct page * page ,
@@ -1723,12 +1720,12 @@ int __block_write_full_page(struct inode *inode, struct page *page,
17231720 (1 << BH_Dirty )|(1 << BH_Uptodate ));
17241721
17251722 /*
1726- * Be very careful. We have no exclusion from __set_page_dirty_buffers
1723+ * Be very careful. We have no exclusion from block_dirty_folio
17271724 * here, and the (potentially unmapped) buffers may become dirty at
17281725 * any time. If a buffer becomes dirty here after we've inspected it
17291726 * then we just miss that fact, and the page stays dirty.
17301727 *
1731- * Buffers outside i_size may be dirtied by __set_page_dirty_buffers ;
1728+ * Buffers outside i_size may be dirtied by block_dirty_folio ;
17321729 * handle that here by just cleaning them.
17331730 */
17341731
@@ -3182,7 +3179,7 @@ EXPORT_SYMBOL(sync_dirty_buffer);
31823179 *
31833180 * The same applies to regular filesystem pages: if all the buffers are
31843181 * clean then we set the page clean and proceed. To do that, we require
3185- * total exclusion from __set_page_dirty_buffers (). That is obtained with
3182+ * total exclusion from block_dirty_folio (). That is obtained with
31863183 * private_lock.
31873184 *
31883185 * try_to_free_buffers() is non-blocking.
@@ -3249,7 +3246,7 @@ int try_to_free_buffers(struct page *page)
32493246 * the page also.
32503247 *
32513248 * private_lock must be held over this entire operation in order
3252- * to synchronise against __set_page_dirty_buffers and prevent the
3249+ * to synchronise against block_dirty_folio and prevent the
32533250 * dirty bit from being lost.
32543251 */
32553252 if (ret )
0 commit comments