Skip to content

Commit 5d91f31

Browse files
shakeelbtorvalds
authored andcommitted
mm: swap: fix vmstats for huge pages
Many of the callbacks called by pagevec_lru_move_fn() does not correctly update the vmstats for huge pages. Fix that. Also __pagevec_lru_add_fn() use the irq-unsafe alternative to update the stat as the irqs are already disabled. Signed-off-by: Shakeel Butt <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Acked-by: Johannes Weiner <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent d483a5d commit 5d91f31

File tree

1 file changed

+8
-6
lines changed

1 file changed

+8
-6
lines changed

mm/swap.c

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
241241
del_page_from_lru_list(page, lruvec, page_lru(page));
242242
ClearPageActive(page);
243243
add_page_to_lru_list_tail(page, lruvec, page_lru(page));
244-
(*pgmoved)++;
244+
(*pgmoved) += hpage_nr_pages(page);
245245
}
246246
}
247247

@@ -327,7 +327,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
327327
add_page_to_lru_list(page, lruvec, lru);
328328
trace_mm_lru_activate(page);
329329

330-
__count_vm_event(PGACTIVATE);
330+
__count_vm_events(PGACTIVATE, hpage_nr_pages(page));
331331
}
332332
}
333333

@@ -529,6 +529,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
529529
{
530530
int lru;
531531
bool active;
532+
int nr_pages = hpage_nr_pages(page);
532533

533534
if (!PageLRU(page))
534535
return;
@@ -561,11 +562,11 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
561562
* We moves tha page into tail of inactive.
562563
*/
563564
add_page_to_lru_list_tail(page, lruvec, lru);
564-
__count_vm_event(PGROTATED);
565+
__count_vm_events(PGROTATED, nr_pages);
565566
}
566567

567568
if (active)
568-
__count_vm_event(PGDEACTIVATE);
569+
__count_vm_events(PGDEACTIVATE, nr_pages);
569570
}
570571

571572
static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
@@ -960,6 +961,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
960961
{
961962
enum lru_list lru;
962963
int was_unevictable = TestClearPageUnevictable(page);
964+
int nr_pages = hpage_nr_pages(page);
963965

964966
VM_BUG_ON_PAGE(PageLRU(page), page);
965967

@@ -995,13 +997,13 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
995997
if (page_evictable(page)) {
996998
lru = page_lru(page);
997999
if (was_unevictable)
998-
count_vm_event(UNEVICTABLE_PGRESCUED);
1000+
__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
9991001
} else {
10001002
lru = LRU_UNEVICTABLE;
10011003
ClearPageActive(page);
10021004
SetPageUnevictable(page);
10031005
if (!was_unevictable)
1004-
count_vm_event(UNEVICTABLE_PGCULLED);
1006+
__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
10051007
}
10061008

10071009
add_page_to_lru_list(page, lruvec, lru);

0 commit comments

Comments
 (0)