Skip to content

Commit b455f39

Browse files
VMoolaakpm00
authored andcommitted
mm/khugepaged: convert alloc_charge_hpage() to use folios
Also remove count_memcg_page_event now that its last caller no longer uses it and reword hpage_collapse_alloc_page() to hpage_collapse_alloc_folio(). This removes 1 call to compound_head() and helps convert khugepaged to use folios throughout. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Vishal Moola (Oracle) <[email protected]> Reviewed-by: Rik van Riel <[email protected]> Reviewed-by: Yang Shi <[email protected]> Cc: Kefeng Wang <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent dbf85c2 commit b455f39

File tree

2 files changed

+10
-21
lines changed

2 files changed

+10
-21
lines changed

include/linux/memcontrol.h

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1087,15 +1087,6 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
10871087
local_irq_restore(flags);
10881088
}
10891089

1090-
static inline void count_memcg_page_event(struct page *page,
1091-
enum vm_event_item idx)
1092-
{
1093-
struct mem_cgroup *memcg = page_memcg(page);
1094-
1095-
if (memcg)
1096-
count_memcg_events(memcg, idx, 1);
1097-
}
1098-
10991090
static inline void count_memcg_folio_events(struct folio *folio,
11001091
enum vm_event_item idx, unsigned long nr)
11011092
{
@@ -1598,11 +1589,6 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
15981589
{
15991590
}
16001591

1601-
static inline void count_memcg_page_event(struct page *page,
1602-
int idx)
1603-
{
1604-
}
1605-
16061592
static inline void count_memcg_folio_events(struct folio *folio,
16071593
enum vm_event_item idx, unsigned long nr)
16081594
{

mm/khugepaged.c

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -888,16 +888,16 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
888888
}
889889
#endif
890890

891-
static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
891+
static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
892892
nodemask_t *nmask)
893893
{
894-
*hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
895-
if (unlikely(!*hpage)) {
894+
*folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
895+
896+
if (unlikely(!*folio)) {
896897
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
897898
return false;
898899
}
899900

900-
folio_prep_large_rmappable((struct folio *)*hpage);
901901
count_vm_event(THP_COLLAPSE_ALLOC);
902902
return true;
903903
}
@@ -1064,17 +1064,20 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
10641064
int node = hpage_collapse_find_target_node(cc);
10651065
struct folio *folio;
10661066

1067-
if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
1067+
if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask)) {
1068+
*hpage = NULL;
10681069
return SCAN_ALLOC_HUGE_PAGE_FAIL;
1070+
}
10691071

1070-
folio = page_folio(*hpage);
10711072
if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
10721073
folio_put(folio);
10731074
*hpage = NULL;
10741075
return SCAN_CGROUP_CHARGE_FAIL;
10751076
}
1076-
count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
10771077

1078+
count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
1079+
1080+
*hpage = folio_page(folio, 0);
10781081
return SCAN_SUCCEED;
10791082
}
10801083

0 commit comments

Comments
 (0)