Skip to content

Commit 7ccd606

Browse files
committed
Merge patch series "enable bs > ps in XFS"
Pankaj Raghav (Samsung) <[email protected]> says: This is the 13th version of the series that enables block size > page size (Large Block Size) experimental support in XFS. Please consider this for the inclusion in 6.12. The context and motivation can be seen in cover letter of the RFC v1 [0]. We also recorded a talk about this effort at LPC [1], if someone would like more context on this effort. Thanks to David Howells, the page cache changes have also been tested on top of AFS[2] with mapping_min_order set. A lot of emphasis has been put on testing using kdevops, starting with an XFS baseline [3]. The testing has been split into regression and progression. Regression testing: In regression testing, we ran the whole test suite to check for regressions on existing profiles due to the page cache changes. I also ran split_huge_page_test selftest on XFS filesystem to check for huge page splits in min order chunks is done correctly. No regressions were found with these patches added on top. Progression testing: For progression testing, we tested for 8k, 16k, 32k and 64k block sizes. To compare it with existing support, an ARM VM with 64k base page system (without our patches) was used as a reference to check for actual failures due to LBS support in a 4k base page size system. No new failures were found with the LBS support. We've done some preliminary performance tests with fio on XFS on 4k block size against pmem and NVMe with buffered IO and Direct IO on vanilla Vs + these patches applied, and detected no regressions. We ran sysbench on postgres and mysql for several hours on LBS XFS without any issues. We also wrote an eBPF tool called blkalgn [5] to see if IO sent to the device is aligned and at least filesystem block size in length. For those who want this in a git tree we have this up on a kdevops large-block-minorder-for-next-v13 tag [6]. [0] https://lore.kernel.org/lkml/[email protected]/ [1] https://www.youtube.com/watch?v=ar72r5Xf7x4 [2] https://lore.kernel.org/linux-mm/[email protected]/ [3] https://github.com/linux-kdevops/kdevops/blob/master/docs/xfs-bugs.md 489 non-critical issues and 55 critical issues. We've determined and reported that the 55 critical issues have all fall into 5 common XFS asserts or hung tasks and 2 memory management asserts. [4] https://github.com/linux-kdevops/fstests/tree/lbs-fixes [5] iovisor/bcc#4813 [6] https://github.com/linux-kdevops/linux/ [7] https://lore.kernel.org/linux-kernel/[email protected]/#t * patches from https://lore.kernel.org/r/[email protected]: (5979 commits) xfs: enable block size larger than page size support xfs: make the calculation generic in xfs_sb_validate_fsb_count() xfs: expose block size in stat xfs: use kvmalloc for xattr buffers iomap: fix iomap_dio_zero() for fs bs > system page size filemap: cap PTE range to be created to allowed zero fill in folio_map_range() mm: split a folio in minimum folio order chunks readahead: allocate folios with mapping_min_order in readahead filemap: allocate mapping_min_order folios in the page cache fs: Allow fine-grained control of folio sizes Add linux-next specific files for 20240821 l2tp: use skb_queue_purge in l2tp_ip_destroy_sock af_unix: Don't call skb_get() for OOB skb. dt-bindings: net: socionext,uniphier-ave4: add top-level constraints dt-bindings: net: renesas,etheravb: add top-level constraints dt-bindings: net: mediatek,net: add top-level constraints dt-bindings: net: mediatek,net: narrow interrupts per variants net: Silence false field-spanning write warning in metadata_dst memcpy net: hns3: Use ARRAY_SIZE() to improve readability selftests: net/forwarding: spawn sh inside vrf to speed up ping loop ... Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Christian Brauner <[email protected]>
2 parents 8400291 + 7df7c20 commit 7ccd606

File tree

14 files changed

+366
-85
lines changed

14 files changed

+366
-85
lines changed

fs/iomap/buffered-io.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2007,10 +2007,10 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
20072007
}
20082008
EXPORT_SYMBOL_GPL(iomap_writepages);
20092009

2010-
static int __init iomap_init(void)
2010+
static int __init iomap_buffered_init(void)
20112011
{
20122012
return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
20132013
offsetof(struct iomap_ioend, io_bio),
20142014
BIOSET_NEED_BVECS);
20152015
}
2016-
fs_initcall(iomap_init);
2016+
fs_initcall(iomap_buffered_init);

fs/iomap/direct-io.c

Lines changed: 39 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include <linux/iomap.h>
1212
#include <linux/backing-dev.h>
1313
#include <linux/uio.h>
14+
#include <linux/set_memory.h>
1415
#include <linux/task_io_accounting_ops.h>
1516
#include "trace.h"
1617

@@ -27,6 +28,13 @@
2728
#define IOMAP_DIO_WRITE (1U << 30)
2829
#define IOMAP_DIO_DIRTY (1U << 31)
2930

31+
/*
32+
* Used for sub block zeroing in iomap_dio_zero()
33+
*/
34+
#define IOMAP_ZERO_PAGE_SIZE (SZ_64K)
35+
#define IOMAP_ZERO_PAGE_ORDER (get_order(IOMAP_ZERO_PAGE_SIZE))
36+
static struct page *zero_page;
37+
3038
struct iomap_dio {
3139
struct kiocb *iocb;
3240
const struct iomap_dio_ops *dops;
@@ -232,22 +240,30 @@ void iomap_dio_bio_end_io(struct bio *bio)
232240
}
233241
EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io);
234242

235-
static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
243+
static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
236244
loff_t pos, unsigned len)
237245
{
238246
struct inode *inode = file_inode(dio->iocb->ki_filp);
239-
struct page *page = ZERO_PAGE(0);
240247
struct bio *bio;
241248

249+
if (!len)
250+
return 0;
251+
/*
252+
* Max block size supported is 64k
253+
*/
254+
if (WARN_ON_ONCE(len > IOMAP_ZERO_PAGE_SIZE))
255+
return -EINVAL;
256+
242257
bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
243258
fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
244259
GFP_KERNEL);
245260
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
246261
bio->bi_private = dio;
247262
bio->bi_end_io = iomap_dio_bio_end_io;
248263

249-
__bio_add_page(bio, page, len, 0);
264+
__bio_add_page(bio, zero_page, len, 0);
250265
iomap_dio_submit_bio(iter, dio, bio, pos);
266+
return 0;
251267
}
252268

253269
/*
@@ -356,8 +372,10 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
356372
if (need_zeroout) {
357373
/* zero out from the start of the block to the write offset */
358374
pad = pos & (fs_block_size - 1);
359-
if (pad)
360-
iomap_dio_zero(iter, dio, pos - pad, pad);
375+
376+
ret = iomap_dio_zero(iter, dio, pos - pad, pad);
377+
if (ret)
378+
goto out;
361379
}
362380

363381
/*
@@ -431,7 +449,8 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
431449
/* zero out from the end of the write to the end of the block */
432450
pad = pos & (fs_block_size - 1);
433451
if (pad)
434-
iomap_dio_zero(iter, dio, pos, fs_block_size - pad);
452+
ret = iomap_dio_zero(iter, dio, pos,
453+
fs_block_size - pad);
435454
}
436455
out:
437456
/* Undo iter limitation to current extent */
@@ -753,3 +772,17 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
753772
return iomap_dio_complete(dio);
754773
}
755774
EXPORT_SYMBOL_GPL(iomap_dio_rw);
775+
776+
static int __init iomap_dio_init(void)
777+
{
778+
zero_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
779+
IOMAP_ZERO_PAGE_ORDER);
780+
781+
if (!zero_page)
782+
return -ENOMEM;
783+
784+
set_memory_ro((unsigned long)page_address(zero_page),
785+
1U << IOMAP_ZERO_PAGE_ORDER);
786+
return 0;
787+
}
788+
fs_initcall(iomap_dio_init);

fs/xfs/libxfs/xfs_attr_leaf.c

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1138,10 +1138,7 @@ xfs_attr3_leaf_to_shortform(
11381138

11391139
trace_xfs_attr_leaf_to_sf(args);
11401140

1141-
tmpbuffer = kmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
1142-
if (!tmpbuffer)
1143-
return -ENOMEM;
1144-
1141+
tmpbuffer = kvmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
11451142
memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
11461143

11471144
leaf = (xfs_attr_leafblock_t *)tmpbuffer;
@@ -1205,7 +1202,7 @@ xfs_attr3_leaf_to_shortform(
12051202
error = 0;
12061203

12071204
out:
1208-
kfree(tmpbuffer);
1205+
kvfree(tmpbuffer);
12091206
return error;
12101207
}
12111208

@@ -1613,7 +1610,7 @@ xfs_attr3_leaf_compact(
16131610

16141611
trace_xfs_attr_leaf_compact(args);
16151612

1616-
tmpbuffer = kmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
1613+
tmpbuffer = kvmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
16171614
memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
16181615
memset(bp->b_addr, 0, args->geo->blksize);
16191616
leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
@@ -1651,7 +1648,7 @@ xfs_attr3_leaf_compact(
16511648
*/
16521649
xfs_trans_log_buf(trans, bp, 0, args->geo->blksize - 1);
16531650

1654-
kfree(tmpbuffer);
1651+
kvfree(tmpbuffer);
16551652
}
16561653

16571654
/*
@@ -2330,7 +2327,7 @@ xfs_attr3_leaf_unbalance(
23302327
struct xfs_attr_leafblock *tmp_leaf;
23312328
struct xfs_attr3_icleaf_hdr tmphdr;
23322329

2333-
tmp_leaf = kzalloc(state->args->geo->blksize,
2330+
tmp_leaf = kvzalloc(state->args->geo->blksize,
23342331
GFP_KERNEL | __GFP_NOFAIL);
23352332

23362333
/*
@@ -2371,7 +2368,7 @@ xfs_attr3_leaf_unbalance(
23712368
}
23722369
memcpy(save_leaf, tmp_leaf, state->args->geo->blksize);
23732370
savehdr = tmphdr; /* struct copy */
2374-
kfree(tmp_leaf);
2371+
kvfree(tmp_leaf);
23752372
}
23762373

23772374
xfs_attr3_leaf_hdr_to_disk(state->args->geo, save_leaf, &savehdr);

fs/xfs/libxfs/xfs_ialloc.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3033,6 +3033,11 @@ xfs_ialloc_setup_geometry(
30333033
igeo->ialloc_align = mp->m_dalign;
30343034
else
30353035
igeo->ialloc_align = 0;
3036+
3037+
if (mp->m_sb.sb_blocksize > PAGE_SIZE)
3038+
igeo->min_folio_order = mp->m_sb.sb_blocklog - PAGE_SHIFT;
3039+
else
3040+
igeo->min_folio_order = 0;
30363041
}
30373042

30383043
/* Compute the location of the root directory inode that is laid out by mkfs. */

fs/xfs/libxfs/xfs_shared.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,9 @@ struct xfs_ino_geometry {
224224
/* precomputed value for di_flags2 */
225225
uint64_t new_diflags2;
226226

227+
/* minimum folio order of a page cache allocation */
228+
unsigned int min_folio_order;
229+
227230
};
228231

229232
#endif /* __XFS_SHARED_H__ */

fs/xfs/xfs_icache.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,8 @@ xfs_inode_alloc(
8888

8989
/* VFS doesn't initialise i_mode! */
9090
VFS_I(ip)->i_mode = 0;
91-
mapping_set_large_folios(VFS_I(ip)->i_mapping);
91+
mapping_set_folio_min_order(VFS_I(ip)->i_mapping,
92+
M_IGEO(mp)->min_folio_order);
9293

9394
XFS_STATS_INC(mp, vn_active);
9495
ASSERT(atomic_read(&ip->i_pincount) == 0);
@@ -325,7 +326,8 @@ xfs_reinit_inode(
325326
inode->i_uid = uid;
326327
inode->i_gid = gid;
327328
inode->i_state = state;
328-
mapping_set_large_folios(inode->i_mapping);
329+
mapping_set_folio_min_order(inode->i_mapping,
330+
M_IGEO(mp)->min_folio_order);
329331
return error;
330332
}
331333

fs/xfs/xfs_iops.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -567,7 +567,7 @@ xfs_stat_blksize(
567567
return 1U << mp->m_allocsize_log;
568568
}
569569

570-
return PAGE_SIZE;
570+
return max_t(uint32_t, PAGE_SIZE, mp->m_sb.sb_blocksize);
571571
}
572572

573573
STATIC int

fs/xfs/xfs_mount.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,11 +132,15 @@ xfs_sb_validate_fsb_count(
132132
xfs_sb_t *sbp,
133133
uint64_t nblocks)
134134
{
135-
ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
135+
uint64_t max_bytes;
136+
136137
ASSERT(sbp->sb_blocklog >= BBSHIFT);
137138

139+
if (check_shl_overflow(nblocks, sbp->sb_blocklog, &max_bytes))
140+
return -EFBIG;
141+
138142
/* Limited by ULONG_MAX of page cache index */
139-
if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
143+
if (max_bytes >> PAGE_SHIFT > ULONG_MAX)
140144
return -EFBIG;
141145
return 0;
142146
}

fs/xfs/xfs_super.c

Lines changed: 20 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1638,16 +1638,28 @@ xfs_fs_fill_super(
16381638
goto out_free_sb;
16391639
}
16401640

1641-
/*
1642-
* Until this is fixed only page-sized or smaller data blocks work.
1643-
*/
16441641
if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1645-
xfs_warn(mp,
1646-
"File system with blocksize %d bytes. "
1647-
"Only pagesize (%ld) or less will currently work.",
1642+
size_t max_folio_size = mapping_max_folio_size_supported();
1643+
1644+
if (!xfs_has_crc(mp)) {
1645+
xfs_warn(mp,
1646+
"V4 Filesystem with blocksize %d bytes. Only pagesize (%ld) or less is supported.",
16481647
mp->m_sb.sb_blocksize, PAGE_SIZE);
1649-
error = -ENOSYS;
1650-
goto out_free_sb;
1648+
error = -ENOSYS;
1649+
goto out_free_sb;
1650+
}
1651+
1652+
if (mp->m_sb.sb_blocksize > max_folio_size) {
1653+
xfs_warn(mp,
1654+
"block size (%u bytes) not supported; Only block size (%zu) or less is supported",
1655+
mp->m_sb.sb_blocksize, max_folio_size);
1656+
error = -ENOSYS;
1657+
goto out_free_sb;
1658+
}
1659+
1660+
xfs_warn(mp,
1661+
"EXPERIMENTAL: V5 Filesystem with Large Block Size (%d bytes) enabled.",
1662+
mp->m_sb.sb_blocksize);
16511663
}
16521664

16531665
/* Ensure this filesystem fits in the page cache limits */

include/linux/huge_mm.h

Lines changed: 24 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,8 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
9696
#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
9797
(!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
9898

99+
#define split_folio(f) split_folio_to_list(f, NULL)
100+
99101
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
100102
#define HPAGE_PMD_SHIFT PMD_SHIFT
101103
#define HPAGE_PUD_SHIFT PUD_SHIFT
@@ -317,9 +319,24 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
317319
bool can_split_folio(struct folio *folio, int *pextra_pins);
318320
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
319321
unsigned int new_order);
322+
int min_order_for_split(struct folio *folio);
323+
int split_folio_to_list(struct folio *folio, struct list_head *list);
320324
static inline int split_huge_page(struct page *page)
321325
{
322-
return split_huge_page_to_list_to_order(page, NULL, 0);
326+
struct folio *folio = page_folio(page);
327+
int ret = min_order_for_split(folio);
328+
329+
if (ret < 0)
330+
return ret;
331+
332+
/*
333+
* split_huge_page() locks the page before splitting and
334+
* expects the same page that has been split to be locked when
335+
* returned. split_folio(page_folio(page)) cannot be used here
336+
* because it converts the page to folio and passes the head
337+
* page to be split.
338+
*/
339+
return split_huge_page_to_list_to_order(page, NULL, ret);
323340
}
324341
void deferred_split_folio(struct folio *folio);
325342

@@ -484,6 +501,12 @@ static inline int split_huge_page(struct page *page)
484501
{
485502
return 0;
486503
}
504+
505+
static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
506+
{
507+
return 0;
508+
}
509+
487510
static inline void deferred_split_folio(struct folio *folio) {}
488511
#define split_huge_pmd(__vma, __pmd, __address) \
489512
do { } while (0)
@@ -598,7 +621,4 @@ static inline int split_folio_to_order(struct folio *folio, int new_order)
598621
return split_folio_to_list_to_order(folio, NULL, new_order);
599622
}
600623

601-
#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
602-
#define split_folio(f) split_folio_to_order(f, 0)
603-
604624
#endif /* _LINUX_HUGE_MM_H */

0 commit comments

Comments
 (0)