@@ -275,6 +275,46 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
275
275
* lenp = plen ;
276
276
}
277
277
278
+ static inline bool iomap_block_needs_zeroing (const struct iomap_iter * iter ,
279
+ loff_t pos )
280
+ {
281
+ const struct iomap * srcmap = iomap_iter_srcmap (iter );
282
+
283
+ return srcmap -> type != IOMAP_MAPPED ||
284
+ (srcmap -> flags & IOMAP_F_NEW ) ||
285
+ pos >= i_size_read (iter -> inode );
286
+ }
287
+
288
+ /**
289
+ * iomap_read_inline_data - copy inline data into the page cache
290
+ * @iter: iteration structure
291
+ * @folio: folio to copy to
292
+ *
293
+ * Copy the inline data in @iter into @folio and zero out the rest of the folio.
294
+ * Only a single IOMAP_INLINE extent is allowed at the end of each file.
295
+ * Returns zero for success to complete the read, or the usual negative errno.
296
+ */
297
+ static int iomap_read_inline_data (const struct iomap_iter * iter ,
298
+ struct folio * folio )
299
+ {
300
+ const struct iomap * iomap = iomap_iter_srcmap (iter );
301
+ size_t size = i_size_read (iter -> inode ) - iomap -> offset ;
302
+ size_t offset = offset_in_folio (folio , iomap -> offset );
303
+
304
+ if (folio_test_uptodate (folio ))
305
+ return 0 ;
306
+
307
+ if (WARN_ON_ONCE (size > iomap -> length ))
308
+ return - EIO ;
309
+ if (offset > 0 )
310
+ ifs_alloc (iter -> inode , folio , iter -> flags );
311
+
312
+ folio_fill_tail (folio , offset , iomap -> inline_data , size );
313
+ iomap_set_range_uptodate (folio , offset , folio_size (folio ) - offset );
314
+ return 0 ;
315
+ }
316
+
317
+ #ifdef CONFIG_BLOCK
278
318
static void iomap_finish_folio_read (struct folio * folio , size_t off ,
279
319
size_t len , int error )
280
320
{
@@ -314,45 +354,6 @@ struct iomap_readpage_ctx {
314
354
struct readahead_control * rac ;
315
355
};
316
356
317
- /**
318
- * iomap_read_inline_data - copy inline data into the page cache
319
- * @iter: iteration structure
320
- * @folio: folio to copy to
321
- *
322
- * Copy the inline data in @iter into @folio and zero out the rest of the folio.
323
- * Only a single IOMAP_INLINE extent is allowed at the end of each file.
324
- * Returns zero for success to complete the read, or the usual negative errno.
325
- */
326
- static int iomap_read_inline_data (const struct iomap_iter * iter ,
327
- struct folio * folio )
328
- {
329
- const struct iomap * iomap = iomap_iter_srcmap (iter );
330
- size_t size = i_size_read (iter -> inode ) - iomap -> offset ;
331
- size_t offset = offset_in_folio (folio , iomap -> offset );
332
-
333
- if (folio_test_uptodate (folio ))
334
- return 0 ;
335
-
336
- if (WARN_ON_ONCE (size > iomap -> length ))
337
- return - EIO ;
338
- if (offset > 0 )
339
- ifs_alloc (iter -> inode , folio , iter -> flags );
340
-
341
- folio_fill_tail (folio , offset , iomap -> inline_data , size );
342
- iomap_set_range_uptodate (folio , offset , folio_size (folio ) - offset );
343
- return 0 ;
344
- }
345
-
346
- static inline bool iomap_block_needs_zeroing (const struct iomap_iter * iter ,
347
- loff_t pos )
348
- {
349
- const struct iomap * srcmap = iomap_iter_srcmap (iter );
350
-
351
- return srcmap -> type != IOMAP_MAPPED ||
352
- (srcmap -> flags & IOMAP_F_NEW ) ||
353
- pos >= i_size_read (iter -> inode );
354
- }
355
-
356
357
static int iomap_readpage_iter (struct iomap_iter * iter ,
357
358
struct iomap_readpage_ctx * ctx )
358
359
{
@@ -545,6 +546,27 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
545
546
}
546
547
EXPORT_SYMBOL_GPL (iomap_readahead );
547
548
549
+ static int iomap_read_folio_range (const struct iomap_iter * iter ,
550
+ struct folio * folio , loff_t pos , size_t len )
551
+ {
552
+ const struct iomap * srcmap = iomap_iter_srcmap (iter );
553
+ struct bio_vec bvec ;
554
+ struct bio bio ;
555
+
556
+ bio_init (& bio , srcmap -> bdev , & bvec , 1 , REQ_OP_READ );
557
+ bio .bi_iter .bi_sector = iomap_sector (srcmap , pos );
558
+ bio_add_folio_nofail (& bio , folio , len , offset_in_folio (folio , pos ));
559
+ return submit_bio_wait (& bio );
560
+ }
561
+ #else
562
+ static int iomap_read_folio_range (const struct iomap_iter * iter ,
563
+ struct folio * folio , loff_t pos , size_t len )
564
+ {
565
+ WARN_ON_ONCE (1 );
566
+ return - EIO ;
567
+ }
568
+ #endif /* CONFIG_BLOCK */
569
+
548
570
/*
549
571
* iomap_is_partially_uptodate checks whether blocks within a folio are
550
572
* uptodate or not.
@@ -658,19 +680,6 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
658
680
pos + len - 1 );
659
681
}
660
682
661
- static int iomap_read_folio_range (const struct iomap_iter * iter ,
662
- struct folio * folio , loff_t pos , size_t len )
663
- {
664
- const struct iomap * srcmap = iomap_iter_srcmap (iter );
665
- struct bio_vec bvec ;
666
- struct bio bio ;
667
-
668
- bio_init (& bio , srcmap -> bdev , & bvec , 1 , REQ_OP_READ );
669
- bio .bi_iter .bi_sector = iomap_sector (srcmap , pos );
670
- bio_add_folio_nofail (& bio , folio , len , offset_in_folio (folio , pos ));
671
- return submit_bio_wait (& bio );
672
- }
673
-
674
683
static int __iomap_write_begin (const struct iomap_iter * iter ,
675
684
const struct iomap_write_ops * write_ops , size_t len ,
676
685
struct folio * folio )
0 commit comments