Skip to content

Commit a0102bd

Browse files
jtlaytonidryomov
authored andcommitted
ceph: move sb->wb_pagevec_pool to be a global mempool
When doing some testing recently, I hit some page allocation failures on mount, when creating the wb_pagevec_pool for the mount. That requires 128k (32 contiguous pages), and after thrashing the memory during an xfstests run, sometimes that would fail. 128k for each mount seems like a lot to hold in reserve for a rainy day, so let's change this to a global mempool that gets allocated when the module is plugged in. Signed-off-by: Jeff Layton <[email protected]> Reviewed-by: Ilya Dryomov <[email protected]> Signed-off-by: Ilya Dryomov <[email protected]>
1 parent b748fc7 commit a0102bd

File tree

4 files changed

+20
-28
lines changed

4 files changed

+20
-28
lines changed

fs/ceph/addr.c

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -862,8 +862,7 @@ static void writepages_finish(struct ceph_osd_request *req)
862862

863863
osd_data = osd_req_op_extent_osd_data(req, 0);
864864
if (osd_data->pages_from_pool)
865-
mempool_free(osd_data->pages,
866-
ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool);
865+
mempool_free(osd_data->pages, ceph_wb_pagevec_pool);
867866
else
868867
kfree(osd_data->pages);
869868
ceph_osdc_put_request(req);
@@ -955,10 +954,10 @@ static int ceph_writepages_start(struct address_space *mapping,
955954
int num_ops = 0, op_idx;
956955
unsigned i, pvec_pages, max_pages, locked_pages = 0;
957956
struct page **pages = NULL, **data_pages;
958-
mempool_t *pool = NULL; /* Becomes non-null if mempool used */
959957
struct page *page;
960958
pgoff_t strip_unit_end = 0;
961959
u64 offset = 0, len = 0;
960+
bool from_pool = false;
962961

963962
max_pages = wsize >> PAGE_SHIFT;
964963

@@ -1057,16 +1056,16 @@ static int ceph_writepages_start(struct address_space *mapping,
10571056
sizeof(*pages),
10581057
GFP_NOFS);
10591058
if (!pages) {
1060-
pool = fsc->wb_pagevec_pool;
1061-
pages = mempool_alloc(pool, GFP_NOFS);
1059+
from_pool = true;
1060+
pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
10621061
BUG_ON(!pages);
10631062
}
10641063

10651064
len = 0;
10661065
} else if (page->index !=
10671066
(offset + len) >> PAGE_SHIFT) {
1068-
if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS :
1069-
CEPH_OSD_MAX_OPS)) {
1067+
if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS :
1068+
CEPH_OSD_MAX_OPS)) {
10701069
redirty_page_for_writepage(wbc, page);
10711070
unlock_page(page);
10721071
break;
@@ -1161,7 +1160,7 @@ static int ceph_writepages_start(struct address_space *mapping,
11611160
offset, len);
11621161
osd_req_op_extent_osd_data_pages(req, op_idx,
11631162
data_pages, len, 0,
1164-
!!pool, false);
1163+
from_pool, false);
11651164
osd_req_op_extent_update(req, op_idx, len);
11661165

11671166
len = 0;
@@ -1188,12 +1187,12 @@ static int ceph_writepages_start(struct address_space *mapping,
11881187
dout("writepages got pages at %llu~%llu\n", offset, len);
11891188

11901189
osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
1191-
0, !!pool, false);
1190+
0, from_pool, false);
11921191
osd_req_op_extent_update(req, op_idx, len);
11931192

11941193
BUG_ON(op_idx + 1 != req->r_num_ops);
11951194

1196-
pool = NULL;
1195+
from_pool = false;
11971196
if (i < locked_pages) {
11981197
BUG_ON(num_ops <= req->r_num_ops);
11991198
num_ops -= req->r_num_ops;
@@ -1204,8 +1203,8 @@ static int ceph_writepages_start(struct address_space *mapping,
12041203
pages = kmalloc_array(locked_pages, sizeof(*pages),
12051204
GFP_NOFS);
12061205
if (!pages) {
1207-
pool = fsc->wb_pagevec_pool;
1208-
pages = mempool_alloc(pool, GFP_NOFS);
1206+
from_pool = true;
1207+
pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
12091208
BUG_ON(!pages);
12101209
}
12111210
memcpy(pages, data_pages + i,

fs/ceph/super.c

Lines changed: 8 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -637,8 +637,6 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
637637
struct ceph_options *opt)
638638
{
639639
struct ceph_fs_client *fsc;
640-
int page_count;
641-
size_t size;
642640
int err;
643641

644642
fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
@@ -686,22 +684,12 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
686684
if (!fsc->cap_wq)
687685
goto fail_inode_wq;
688686

689-
/* set up mempools */
690-
err = -ENOMEM;
691-
page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
692-
size = sizeof (struct page *) * (page_count ? page_count : 1);
693-
fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
694-
if (!fsc->wb_pagevec_pool)
695-
goto fail_cap_wq;
696-
697687
spin_lock(&ceph_fsc_lock);
698688
list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list);
699689
spin_unlock(&ceph_fsc_lock);
700690

701691
return fsc;
702692

703-
fail_cap_wq:
704-
destroy_workqueue(fsc->cap_wq);
705693
fail_inode_wq:
706694
destroy_workqueue(fsc->inode_wq);
707695
fail_client:
@@ -732,8 +720,6 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
732720
destroy_workqueue(fsc->inode_wq);
733721
destroy_workqueue(fsc->cap_wq);
734722

735-
mempool_destroy(fsc->wb_pagevec_pool);
736-
737723
destroy_mount_options(fsc->mount_options);
738724

739725
ceph_destroy_client(fsc->client);
@@ -752,6 +738,7 @@ struct kmem_cache *ceph_dentry_cachep;
752738
struct kmem_cache *ceph_file_cachep;
753739
struct kmem_cache *ceph_dir_file_cachep;
754740
struct kmem_cache *ceph_mds_request_cachep;
741+
mempool_t *ceph_wb_pagevec_pool;
755742

756743
static void ceph_inode_init_once(void *foo)
757744
{
@@ -796,6 +783,10 @@ static int __init init_caches(void)
796783
if (!ceph_mds_request_cachep)
797784
goto bad_mds_req;
798785

786+
ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT);
787+
if (!ceph_wb_pagevec_pool)
788+
goto bad_pagevec_pool;
789+
799790
error = ceph_fscache_register();
800791
if (error)
801792
goto bad_fscache;
@@ -804,6 +795,8 @@ static int __init init_caches(void)
804795

805796
bad_fscache:
806797
kmem_cache_destroy(ceph_mds_request_cachep);
798+
bad_pagevec_pool:
799+
mempool_destroy(ceph_wb_pagevec_pool);
807800
bad_mds_req:
808801
kmem_cache_destroy(ceph_dir_file_cachep);
809802
bad_dir_file:
@@ -834,6 +827,7 @@ static void destroy_caches(void)
834827
kmem_cache_destroy(ceph_file_cachep);
835828
kmem_cache_destroy(ceph_dir_file_cachep);
836829
kmem_cache_destroy(ceph_mds_request_cachep);
830+
mempool_destroy(ceph_wb_pagevec_pool);
837831

838832
ceph_fscache_unregister();
839833
}

fs/ceph/super.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,8 +118,6 @@ struct ceph_fs_client {
118118

119119
struct ceph_mds_client *mdsc;
120120

121-
/* writeback */
122-
mempool_t *wb_pagevec_pool;
123121
atomic_long_t writeback_count;
124122

125123
struct workqueue_struct *inode_wq;

include/linux/ceph/libceph.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,7 @@ extern struct kmem_cache *ceph_dentry_cachep;
282282
extern struct kmem_cache *ceph_file_cachep;
283283
extern struct kmem_cache *ceph_dir_file_cachep;
284284
extern struct kmem_cache *ceph_mds_request_cachep;
285+
extern mempool_t *ceph_wb_pagevec_pool;
285286

286287
/* ceph_common.c */
287288
extern bool libceph_compatible(void *data);

0 commit comments

Comments
 (0)