Skip to content

Commit dbda8fe

Browse files
Barry Songtorvalds
authored andcommitted
mm/hugetlb: avoid hardcoding while checking if cma is enabled
hugetlb_cma[0] can be NULL due to various reasons, for example, node0 has no memory. so NULL hugetlb_cma[0] doesn't necessarily mean cma is not enabled. gigantic pages might have been reserved on other nodes. This patch fixes possible double reservation and CMA leak. [[email protected]: fix CONFIG_CMA=n warning] [[email protected]: better checks before using hugetlb_cma] Link: http://lkml.kernel.org/r/[email protected] Fixes: cf11e85 ("mm: hugetlb: optionally allocate gigantic hugepages using cma") Signed-off-by: Barry Song <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Reviewed-by: Mike Kravetz <[email protected]> Acked-by: Roman Gushchin <[email protected]> Cc: Jonathan Cameron <[email protected]> Cc: <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent d38a2b7 commit dbda8fe

File tree

1 file changed

+10
-5
lines changed

1 file changed

+10
-5
lines changed

mm/hugetlb.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,10 @@ int hugetlb_max_hstate __read_mostly;
4545
unsigned int default_hstate_idx;
4646
struct hstate hstates[HUGE_MAX_HSTATE];
4747

48+
#ifdef CONFIG_CMA
4849
static struct cma *hugetlb_cma[MAX_NUMNODES];
50+
#endif
51+
static unsigned long hugetlb_cma_size __initdata;
4952

5053
/*
5154
* Minimum page order among possible hugepage sizes, set to a proper value
@@ -1235,9 +1238,10 @@ static void free_gigantic_page(struct page *page, unsigned int order)
12351238
* If the page isn't allocated using the cma allocator,
12361239
* cma_release() returns false.
12371240
*/
1238-
if (IS_ENABLED(CONFIG_CMA) &&
1239-
cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1241+
#ifdef CONFIG_CMA
1242+
if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
12401243
return;
1244+
#endif
12411245

12421246
free_contig_range(page_to_pfn(page), 1 << order);
12431247
}
@@ -1248,7 +1252,8 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
12481252
{
12491253
unsigned long nr_pages = 1UL << huge_page_order(h);
12501254

1251-
if (IS_ENABLED(CONFIG_CMA)) {
1255+
#ifdef CONFIG_CMA
1256+
{
12521257
struct page *page;
12531258
int node;
12541259

@@ -1262,6 +1267,7 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
12621267
return page;
12631268
}
12641269
}
1270+
#endif
12651271

12661272
return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
12671273
}
@@ -2571,7 +2577,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
25712577

25722578
for (i = 0; i < h->max_huge_pages; ++i) {
25732579
if (hstate_is_gigantic(h)) {
2574-
if (IS_ENABLED(CONFIG_CMA) && hugetlb_cma[0]) {
2580+
if (hugetlb_cma_size) {
25752581
pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
25762582
break;
25772583
}
@@ -5654,7 +5660,6 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
56545660
}
56555661

56565662
#ifdef CONFIG_CMA
5657-
static unsigned long hugetlb_cma_size __initdata;
56585663
static bool cma_reserve_called __initdata;
56595664

56605665
static int __init cmdline_parse_hugetlb_cma(char *p)

0 commit comments

Comments
 (0)