|
11 | 11 | #include <linux/iomap.h>
|
12 | 12 | #include <linux/backing-dev.h>
|
13 | 13 | #include <linux/uio.h>
|
| 14 | +#include <linux/set_memory.h> |
14 | 15 | #include <linux/task_io_accounting_ops.h>
|
15 | 16 | #include "trace.h"
|
16 | 17 |
|
|
27 | 28 | #define IOMAP_DIO_WRITE (1U << 30)
|
28 | 29 | #define IOMAP_DIO_DIRTY (1U << 31)
|
29 | 30 |
|
| 31 | +/* |
| 32 | + * Used for sub block zeroing in iomap_dio_zero() |
| 33 | + */ |
| 34 | +#define IOMAP_ZERO_PAGE_SIZE (SZ_64K) |
| 35 | +#define IOMAP_ZERO_PAGE_ORDER (get_order(IOMAP_ZERO_PAGE_SIZE)) |
| 36 | +static struct page *zero_page; |
| 37 | + |
30 | 38 | struct iomap_dio {
|
31 | 39 | struct kiocb *iocb;
|
32 | 40 | const struct iomap_dio_ops *dops;
|
@@ -232,22 +240,30 @@ void iomap_dio_bio_end_io(struct bio *bio)
|
232 | 240 | }
|
233 | 241 | EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io);
|
234 | 242 |
|
235 |
| -static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio, |
| 243 | +static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio, |
236 | 244 | loff_t pos, unsigned len)
|
237 | 245 | {
|
238 | 246 | struct inode *inode = file_inode(dio->iocb->ki_filp);
|
239 |
| - struct page *page = ZERO_PAGE(0); |
240 | 247 | struct bio *bio;
|
241 | 248 |
|
| 249 | + if (!len) |
| 250 | + return 0; |
| 251 | + /* |
| 252 | + * Max block size supported is 64k |
| 253 | + */ |
| 254 | + if (WARN_ON_ONCE(len > IOMAP_ZERO_PAGE_SIZE)) |
| 255 | + return -EINVAL; |
| 256 | + |
242 | 257 | bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
|
243 | 258 | fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
|
244 | 259 | GFP_KERNEL);
|
245 | 260 | bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
|
246 | 261 | bio->bi_private = dio;
|
247 | 262 | bio->bi_end_io = iomap_dio_bio_end_io;
|
248 | 263 |
|
249 |
| - __bio_add_page(bio, page, len, 0); |
| 264 | + __bio_add_page(bio, zero_page, len, 0); |
250 | 265 | iomap_dio_submit_bio(iter, dio, bio, pos);
|
| 266 | + return 0; |
251 | 267 | }
|
252 | 268 |
|
253 | 269 | /*
|
@@ -356,8 +372,10 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
|
356 | 372 | if (need_zeroout) {
|
357 | 373 | /* zero out from the start of the block to the write offset */
|
358 | 374 | pad = pos & (fs_block_size - 1);
|
359 |
| - if (pad) |
360 |
| - iomap_dio_zero(iter, dio, pos - pad, pad); |
| 375 | + |
| 376 | + ret = iomap_dio_zero(iter, dio, pos - pad, pad); |
| 377 | + if (ret) |
| 378 | + goto out; |
361 | 379 | }
|
362 | 380 |
|
363 | 381 | /*
|
@@ -431,7 +449,8 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
|
431 | 449 | /* zero out from the end of the write to the end of the block */
|
432 | 450 | pad = pos & (fs_block_size - 1);
|
433 | 451 | if (pad)
|
434 |
| - iomap_dio_zero(iter, dio, pos, fs_block_size - pad); |
| 452 | + ret = iomap_dio_zero(iter, dio, pos, |
| 453 | + fs_block_size - pad); |
435 | 454 | }
|
436 | 455 | out:
|
437 | 456 | /* Undo iter limitation to current extent */
|
@@ -753,3 +772,17 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
753 | 772 | return iomap_dio_complete(dio);
|
754 | 773 | }
|
755 | 774 | EXPORT_SYMBOL_GPL(iomap_dio_rw);
|
| 775 | + |
| 776 | +static int __init iomap_dio_init(void) |
| 777 | +{ |
| 778 | + zero_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, |
| 779 | + IOMAP_ZERO_PAGE_ORDER); |
| 780 | + |
| 781 | + if (!zero_page) |
| 782 | + return -ENOMEM; |
| 783 | + |
| 784 | + set_memory_ro((unsigned long)page_address(zero_page), |
| 785 | + 1U << IOMAP_ZERO_PAGE_ORDER); |
| 786 | + return 0; |
| 787 | +} |
| 788 | +fs_initcall(iomap_dio_init); |
0 commit comments