@@ -372,10 +372,10 @@ static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
372
372
}
373
373
374
374
/*
375
- * Completion handler for block_write_full_folio() - pages which are unlocked
376
- * during I/O, and which have PageWriteback cleared upon I/O completion.
375
+ * Completion handler for block_write_full_folio() - folios which are unlocked
376
+ * during I/O, and which have the writeback flag cleared upon I/O completion.
377
377
*/
378
- void end_buffer_async_write (struct buffer_head * bh , int uptodate )
378
+ static void end_buffer_async_write (struct buffer_head * bh , int uptodate )
379
379
{
380
380
unsigned long flags ;
381
381
struct buffer_head * first ;
@@ -415,7 +415,6 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
415
415
spin_unlock_irqrestore (& first -> b_uptodate_lock , flags );
416
416
return ;
417
417
}
418
- EXPORT_SYMBOL (end_buffer_async_write );
419
418
420
419
/*
421
420
* If a page's buffers are under async readin (end_buffer_async_read
@@ -1787,8 +1786,7 @@ static struct buffer_head *folio_create_buffers(struct folio *folio,
1787
1786
* causes the writes to be flagged as synchronous writes.
1788
1787
*/
1789
1788
int __block_write_full_folio (struct inode * inode , struct folio * folio ,
1790
- get_block_t * get_block , struct writeback_control * wbc ,
1791
- bh_end_io_t * handler )
1789
+ get_block_t * get_block , struct writeback_control * wbc )
1792
1790
{
1793
1791
int err ;
1794
1792
sector_t block ;
@@ -1867,7 +1865,8 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
1867
1865
continue ;
1868
1866
}
1869
1867
if (test_clear_buffer_dirty (bh )) {
1870
- mark_buffer_async_write_endio (bh , handler );
1868
+ mark_buffer_async_write_endio (bh ,
1869
+ end_buffer_async_write );
1871
1870
} else {
1872
1871
unlock_buffer (bh );
1873
1872
}
@@ -1920,7 +1919,8 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
1920
1919
if (buffer_mapped (bh ) && buffer_dirty (bh ) &&
1921
1920
!buffer_delay (bh )) {
1922
1921
lock_buffer (bh );
1923
- mark_buffer_async_write_endio (bh , handler );
1922
+ mark_buffer_async_write_endio (bh ,
1923
+ end_buffer_async_write );
1924
1924
} else {
1925
1925
/*
1926
1926
* The buffer may have been set dirty during
@@ -2704,8 +2704,7 @@ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
2704
2704
2705
2705
/* Is the folio fully inside i_size? */
2706
2706
if (folio_pos (folio ) + folio_size (folio ) <= i_size )
2707
- return __block_write_full_folio (inode , folio , get_block , wbc ,
2708
- end_buffer_async_write );
2707
+ return __block_write_full_folio (inode , folio , get_block , wbc );
2709
2708
2710
2709
/* Is the folio fully outside i_size? (truncate in progress) */
2711
2710
if (folio_pos (folio ) >= i_size ) {
@@ -2722,8 +2721,7 @@ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
2722
2721
*/
2723
2722
folio_zero_segment (folio , offset_in_folio (folio , i_size ),
2724
2723
folio_size (folio ));
2725
- return __block_write_full_folio (inode , folio , get_block , wbc ,
2726
- end_buffer_async_write );
2724
+ return __block_write_full_folio (inode , folio , get_block , wbc );
2727
2725
}
2728
2726
2729
2727
sector_t generic_block_bmap (struct address_space * mapping , sector_t block ,
0 commit comments