@@ -2340,6 +2340,75 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
2340
2340
return lblk < blocks ;
2341
2341
}
2342
2342
2343
+ /*
2344
+ * mpage_process_page - update page buffers corresponding to changed extent and
2345
+ * may submit fully mapped page for IO
2346
+ *
2347
+ * @mpd - description of extent to map, on return next extent to map
2348
+ * @m_lblk - logical block mapping.
2349
+ * @m_pblk - corresponding physical mapping.
2350
+ * @map_bh - determines on return whether this page requires any further
2351
+ * mapping or not.
2352
+ * Scan given page buffers corresponding to changed extent and update buffer
2353
+ * state according to new extent state.
2354
+ * We map delalloc buffers to their physical location, clear unwritten bits.
2355
+ * If the given page is not fully mapped, we update @map to the next extent in
2356
+ * the given page that needs mapping & return @map_bh as true.
2357
+ */
2358
+ static int mpage_process_page (struct mpage_da_data * mpd , struct page * page ,
2359
+ ext4_lblk_t * m_lblk , ext4_fsblk_t * m_pblk ,
2360
+ bool * map_bh )
2361
+ {
2362
+ struct buffer_head * head , * bh ;
2363
+ ext4_io_end_t * io_end = mpd -> io_submit .io_end ;
2364
+ ext4_lblk_t lblk = * m_lblk ;
2365
+ ext4_fsblk_t pblock = * m_pblk ;
2366
+ int err = 0 ;
2367
+ int blkbits = mpd -> inode -> i_blkbits ;
2368
+ ssize_t io_end_size = 0 ;
2369
+ struct ext4_io_end_vec * io_end_vec = ext4_last_io_end_vec (io_end );
2370
+
2371
+ bh = head = page_buffers (page );
2372
+ do {
2373
+ if (lblk < mpd -> map .m_lblk )
2374
+ continue ;
2375
+ if (lblk >= mpd -> map .m_lblk + mpd -> map .m_len ) {
2376
+ /*
2377
+ * Buffer after end of mapped extent.
2378
+ * Find next buffer in the page to map.
2379
+ */
2380
+ mpd -> map .m_len = 0 ;
2381
+ mpd -> map .m_flags = 0 ;
2382
+ io_end_vec -> size += io_end_size ;
2383
+ io_end_size = 0 ;
2384
+
2385
+ err = mpage_process_page_bufs (mpd , head , bh , lblk );
2386
+ if (err > 0 )
2387
+ err = 0 ;
2388
+ if (!err && mpd -> map .m_len && mpd -> map .m_lblk > lblk ) {
2389
+ io_end_vec = ext4_alloc_io_end_vec (io_end );
2390
+ io_end_vec -> offset = mpd -> map .m_lblk << blkbits ;
2391
+ }
2392
+ * map_bh = true;
2393
+ goto out ;
2394
+ }
2395
+ if (buffer_delay (bh )) {
2396
+ clear_buffer_delay (bh );
2397
+ bh -> b_blocknr = pblock ++ ;
2398
+ }
2399
+ clear_buffer_unwritten (bh );
2400
+ io_end_size += (1 << blkbits );
2401
+ } while (lblk ++ , (bh = bh -> b_this_page ) != head );
2402
+
2403
+ io_end_vec -> size += io_end_size ;
2404
+ io_end_size = 0 ;
2405
+ * map_bh = false;
2406
+ out :
2407
+ * m_lblk = lblk ;
2408
+ * m_pblk = pblock ;
2409
+ return err ;
2410
+ }
2411
+
2343
2412
/*
2344
2413
* mpage_map_buffers - update buffers corresponding to changed extent and
2345
2414
* submit fully mapped pages for IO
@@ -2359,12 +2428,12 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2359
2428
struct pagevec pvec ;
2360
2429
int nr_pages , i ;
2361
2430
struct inode * inode = mpd -> inode ;
2362
- struct buffer_head * head , * bh ;
2363
2431
int bpp_bits = PAGE_SHIFT - inode -> i_blkbits ;
2364
2432
pgoff_t start , end ;
2365
2433
ext4_lblk_t lblk ;
2366
- sector_t pblock ;
2434
+ ext4_fsblk_t pblock ;
2367
2435
int err ;
2436
+ bool map_bh = false;
2368
2437
2369
2438
start = mpd -> map .m_lblk >> bpp_bits ;
2370
2439
end = (mpd -> map .m_lblk + mpd -> map .m_len - 1 ) >> bpp_bits ;
@@ -2380,57 +2449,29 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2380
2449
for (i = 0 ; i < nr_pages ; i ++ ) {
2381
2450
struct page * page = pvec .pages [i ];
2382
2451
2383
- bh = head = page_buffers (page );
2384
- do {
2385
- if (lblk < mpd -> map .m_lblk )
2386
- continue ;
2387
- if (lblk >= mpd -> map .m_lblk + mpd -> map .m_len ) {
2388
- /*
2389
- * Buffer after end of mapped extent.
2390
- * Find next buffer in the page to map.
2391
- */
2392
- mpd -> map .m_len = 0 ;
2393
- mpd -> map .m_flags = 0 ;
2394
- /*
2395
- * FIXME: If dioread_nolock supports
2396
- * blocksize < pagesize, we need to make
2397
- * sure we add size mapped so far to
2398
- * io_end->size as the following call
2399
- * can submit the page for IO.
2400
- */
2401
- err = mpage_process_page_bufs (mpd , head ,
2402
- bh , lblk );
2403
- pagevec_release (& pvec );
2404
- if (err > 0 )
2405
- err = 0 ;
2406
- return err ;
2407
- }
2408
- if (buffer_delay (bh )) {
2409
- clear_buffer_delay (bh );
2410
- bh -> b_blocknr = pblock ++ ;
2411
- }
2412
- clear_buffer_unwritten (bh );
2413
- } while (lblk ++ , (bh = bh -> b_this_page ) != head );
2414
-
2452
+ err = mpage_process_page (mpd , page , & lblk , & pblock ,
2453
+ & map_bh );
2415
2454
/*
2416
- * FIXME: This is going to break if dioread_nolock
2417
- * supports blocksize < pagesize as we will try to
2418
- * convert potentially unmapped parts of inode .
2455
+ * If map_bh is true, means page may require further bh
2456
+ * mapping, or maybe the page was submitted for IO.
2457
+ * So we return to call further extent mapping .
2419
2458
*/
2420
- mpd -> io_submit .io_end -> size += PAGE_SIZE ;
2459
+ if (err < 0 || map_bh == true)
2460
+ goto out ;
2421
2461
/* Page fully mapped - let IO run! */
2422
2462
err = mpage_submit_page (mpd , page );
2423
- if (err < 0 ) {
2424
- pagevec_release (& pvec );
2425
- return err ;
2426
- }
2463
+ if (err < 0 )
2464
+ goto out ;
2427
2465
}
2428
2466
pagevec_release (& pvec );
2429
2467
}
2430
2468
/* Extent fully mapped and matches with page boundary. We are done. */
2431
2469
mpd -> map .m_len = 0 ;
2432
2470
mpd -> map .m_flags = 0 ;
2433
2471
return 0 ;
2472
+ out :
2473
+ pagevec_release (& pvec );
2474
+ return err ;
2434
2475
}
2435
2476
2436
2477
static int mpage_map_one_extent (handle_t * handle , struct mpage_da_data * mpd )
@@ -2510,9 +2551,10 @@ static int mpage_map_and_submit_extent(handle_t *handle,
2510
2551
int err ;
2511
2552
loff_t disksize ;
2512
2553
int progress = 0 ;
2554
+ ext4_io_end_t * io_end = mpd -> io_submit .io_end ;
2555
+ struct ext4_io_end_vec * io_end_vec = ext4_alloc_io_end_vec (io_end );
2513
2556
2514
- mpd -> io_submit .io_end -> offset =
2515
- ((loff_t )map -> m_lblk ) << inode -> i_blkbits ;
2557
+ io_end_vec -> offset = ((loff_t )map -> m_lblk ) << inode -> i_blkbits ;
2516
2558
do {
2517
2559
err = mpage_map_one_extent (handle , mpd );
2518
2560
if (err < 0 ) {
@@ -3613,6 +3655,7 @@ static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3613
3655
ssize_t size , void * private )
3614
3656
{
3615
3657
ext4_io_end_t * io_end = private ;
3658
+ struct ext4_io_end_vec * io_end_vec ;
3616
3659
3617
3660
/* if not async direct IO just return */
3618
3661
if (!io_end )
@@ -3630,8 +3673,9 @@ static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3630
3673
ext4_clear_io_unwritten_flag (io_end );
3631
3674
size = 0 ;
3632
3675
}
3633
- io_end -> offset = offset ;
3634
- io_end -> size = size ;
3676
+ io_end_vec = ext4_alloc_io_end_vec (io_end );
3677
+ io_end_vec -> offset = offset ;
3678
+ io_end_vec -> size = size ;
3635
3679
ext4_put_io_end (io_end );
3636
3680
3637
3681
return 0 ;
0 commit comments