@@ -107,7 +107,7 @@ static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
107
107
* don't make any buffers if there is only one buffer on
108
108
* the folio and the folio just needs to be set up to date
109
109
*/
110
- if (inode -> i_blkbits == PAGE_SHIFT &&
110
+ if (inode -> i_blkbits == folio_shift ( folio ) &&
111
111
buffer_uptodate (bh )) {
112
112
folio_mark_uptodate (folio );
113
113
return ;
@@ -153,15 +153,15 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
153
153
struct folio * folio = args -> folio ;
154
154
struct inode * inode = folio -> mapping -> host ;
155
155
const unsigned blkbits = inode -> i_blkbits ;
156
- const unsigned blocks_per_page = PAGE_SIZE >> blkbits ;
156
+ const unsigned blocks_per_folio = folio_size ( folio ) >> blkbits ;
157
157
const unsigned blocksize = 1 << blkbits ;
158
158
struct buffer_head * map_bh = & args -> map_bh ;
159
159
sector_t block_in_file ;
160
160
sector_t last_block ;
161
161
sector_t last_block_in_file ;
162
162
sector_t first_block ;
163
163
unsigned page_block ;
164
- unsigned first_hole = blocks_per_page ;
164
+ unsigned first_hole = blocks_per_folio ;
165
165
struct block_device * bdev = NULL ;
166
166
int length ;
167
167
int fully_mapped = 1 ;
@@ -170,9 +170,6 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
170
170
unsigned relative_block ;
171
171
gfp_t gfp = mapping_gfp_constraint (folio -> mapping , GFP_KERNEL );
172
172
173
- /* MAX_BUF_PER_PAGE, for example */
174
- VM_BUG_ON_FOLIO (folio_test_large (folio ), folio );
175
-
176
173
if (args -> is_readahead ) {
177
174
opf |= REQ_RAHEAD ;
178
175
gfp |= __GFP_NORETRY | __GFP_NOWARN ;
@@ -181,8 +178,8 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
181
178
if (folio_buffers (folio ))
182
179
goto confused ;
183
180
184
- block_in_file = ( sector_t ) folio -> index << ( PAGE_SHIFT - blkbits ) ;
185
- last_block = block_in_file + args -> nr_pages * blocks_per_page ;
181
+ block_in_file = folio_pos ( folio ) >> blkbits ;
182
+ last_block = block_in_file + (( args -> nr_pages * PAGE_SIZE ) >> blkbits ) ;
186
183
last_block_in_file = (i_size_read (inode ) + blocksize - 1 ) >> blkbits ;
187
184
if (last_block > last_block_in_file )
188
185
last_block = last_block_in_file ;
@@ -204,7 +201,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
204
201
clear_buffer_mapped (map_bh );
205
202
break ;
206
203
}
207
- if (page_block == blocks_per_page )
204
+ if (page_block == blocks_per_folio )
208
205
break ;
209
206
page_block ++ ;
210
207
block_in_file ++ ;
@@ -216,7 +213,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
216
213
* Then do more get_blocks calls until we are done with this folio.
217
214
*/
218
215
map_bh -> b_folio = folio ;
219
- while (page_block < blocks_per_page ) {
216
+ while (page_block < blocks_per_folio ) {
220
217
map_bh -> b_state = 0 ;
221
218
map_bh -> b_size = 0 ;
222
219
@@ -229,7 +226,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
229
226
230
227
if (!buffer_mapped (map_bh )) {
231
228
fully_mapped = 0 ;
232
- if (first_hole == blocks_per_page )
229
+ if (first_hole == blocks_per_folio )
233
230
first_hole = page_block ;
234
231
page_block ++ ;
235
232
block_in_file ++ ;
@@ -247,7 +244,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
247
244
goto confused ;
248
245
}
249
246
250
- if (first_hole != blocks_per_page )
247
+ if (first_hole != blocks_per_folio )
251
248
goto confused ; /* hole -> non-hole */
252
249
253
250
/* Contiguous blocks? */
@@ -260,16 +257,16 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
260
257
if (relative_block == nblocks ) {
261
258
clear_buffer_mapped (map_bh );
262
259
break ;
263
- } else if (page_block == blocks_per_page )
260
+ } else if (page_block == blocks_per_folio )
264
261
break ;
265
262
page_block ++ ;
266
263
block_in_file ++ ;
267
264
}
268
265
bdev = map_bh -> b_bdev ;
269
266
}
270
267
271
- if (first_hole != blocks_per_page ) {
272
- folio_zero_segment (folio , first_hole << blkbits , PAGE_SIZE );
268
+ if (first_hole != blocks_per_folio ) {
269
+ folio_zero_segment (folio , first_hole << blkbits , folio_size ( folio ) );
273
270
if (first_hole == 0 ) {
274
271
folio_mark_uptodate (folio );
275
272
folio_unlock (folio );
@@ -303,10 +300,10 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
303
300
relative_block = block_in_file - args -> first_logical_block ;
304
301
nblocks = map_bh -> b_size >> blkbits ;
305
302
if ((buffer_boundary (map_bh ) && relative_block == nblocks ) ||
306
- (first_hole != blocks_per_page ))
303
+ (first_hole != blocks_per_folio ))
307
304
args -> bio = mpage_bio_submit_read (args -> bio );
308
305
else
309
- args -> last_block_in_bio = first_block + blocks_per_page - 1 ;
306
+ args -> last_block_in_bio = first_block + blocks_per_folio - 1 ;
310
307
out :
311
308
return args -> bio ;
312
309
@@ -385,7 +382,7 @@ int mpage_read_folio(struct folio *folio, get_block_t get_block)
385
382
{
386
383
struct mpage_readpage_args args = {
387
384
.folio = folio ,
388
- .nr_pages = 1 ,
385
+ .nr_pages = folio_nr_pages ( folio ) ,
389
386
.get_block = get_block ,
390
387
};
391
388
@@ -456,12 +453,12 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
456
453
struct address_space * mapping = folio -> mapping ;
457
454
struct inode * inode = mapping -> host ;
458
455
const unsigned blkbits = inode -> i_blkbits ;
459
- const unsigned blocks_per_page = PAGE_SIZE >> blkbits ;
456
+ const unsigned blocks_per_folio = folio_size ( folio ) >> blkbits ;
460
457
sector_t last_block ;
461
458
sector_t block_in_file ;
462
459
sector_t first_block ;
463
460
unsigned page_block ;
464
- unsigned first_unmapped = blocks_per_page ;
461
+ unsigned first_unmapped = blocks_per_folio ;
465
462
struct block_device * bdev = NULL ;
466
463
int boundary = 0 ;
467
464
sector_t boundary_block = 0 ;
@@ -486,12 +483,12 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
486
483
*/
487
484
if (buffer_dirty (bh ))
488
485
goto confused ;
489
- if (first_unmapped == blocks_per_page )
486
+ if (first_unmapped == blocks_per_folio )
490
487
first_unmapped = page_block ;
491
488
continue ;
492
489
}
493
490
494
- if (first_unmapped != blocks_per_page )
491
+ if (first_unmapped != blocks_per_folio )
495
492
goto confused ; /* hole -> non-hole */
496
493
497
494
if (!buffer_dirty (bh ) || !buffer_uptodate (bh ))
@@ -527,7 +524,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
527
524
* The page has no buffers: map it to disk
528
525
*/
529
526
BUG_ON (!folio_test_uptodate (folio ));
530
- block_in_file = ( sector_t ) folio -> index << ( PAGE_SHIFT - blkbits ) ;
527
+ block_in_file = folio_pos ( folio ) >> blkbits ;
531
528
/*
532
529
* Whole page beyond EOF? Skip allocating blocks to avoid leaking
533
530
* space.
@@ -536,7 +533,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
536
533
goto page_is_mapped ;
537
534
last_block = (i_size - 1 ) >> blkbits ;
538
535
map_bh .b_folio = folio ;
539
- for (page_block = 0 ; page_block < blocks_per_page ; ) {
536
+ for (page_block = 0 ; page_block < blocks_per_folio ; ) {
540
537
541
538
map_bh .b_state = 0 ;
542
539
map_bh .b_size = 1 << blkbits ;
@@ -618,14 +615,14 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
618
615
BUG_ON (folio_test_writeback (folio ));
619
616
folio_start_writeback (folio );
620
617
folio_unlock (folio );
621
- if (boundary || (first_unmapped != blocks_per_page )) {
618
+ if (boundary || (first_unmapped != blocks_per_folio )) {
622
619
bio = mpage_bio_submit_write (bio );
623
620
if (boundary_block ) {
624
621
write_boundary_block (boundary_bdev ,
625
622
boundary_block , 1 << blkbits );
626
623
}
627
624
} else {
628
- mpd -> last_block_in_bio = first_block + blocks_per_page - 1 ;
625
+ mpd -> last_block_in_bio = first_block + blocks_per_folio - 1 ;
629
626
}
630
627
goto out ;
631
628
0 commit comments