Skip to content

Commit e52401e

Browse files
shakeelbakpm00
authored andcommitted
memcg: make count_memcg_events re-entrant safe against irqs
Let's make count_memcg_events re-entrant safe against irqs. The only thing needed is to convert the usage of __this_cpu_add() to this_cpu_add(). In addition, with re-entrant safety, there is no need to disable irqs. Also add warnings for in_nmi() as it is not safe against nmi context. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Shakeel Butt <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: Alexei Starovoitov <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Muchun Song <[email protected]> Cc: Roman Gushchin <[email protected]> Cc: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 8814e3b commit e52401e

File tree

5 files changed

+19
-36
lines changed

5 files changed

+19
-36
lines changed

include/linux/memcontrol.h

Lines changed: 2 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -942,19 +942,8 @@ static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
942942
local_irq_restore(flags);
943943
}
944944

945-
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
946-
unsigned long count);
947-
948-
static inline void count_memcg_events(struct mem_cgroup *memcg,
949-
enum vm_event_item idx,
950-
unsigned long count)
951-
{
952-
unsigned long flags;
953-
954-
local_irq_save(flags);
955-
__count_memcg_events(memcg, idx, count);
956-
local_irq_restore(flags);
957-
}
945+
void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
946+
unsigned long count);
958947

959948
static inline void count_memcg_folio_events(struct folio *folio,
960949
enum vm_event_item idx, unsigned long nr)
@@ -1418,12 +1407,6 @@ static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
14181407
}
14191408

14201409
static inline void count_memcg_events(struct mem_cgroup *memcg,
1421-
enum vm_event_item idx,
1422-
unsigned long count)
1423-
{
1424-
}
1425-
1426-
static inline void __count_memcg_events(struct mem_cgroup *memcg,
14271410
enum vm_event_item idx,
14281411
unsigned long count)
14291412
{

mm/memcontrol-v1.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -512,9 +512,9 @@ static void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages)
512512
{
513513
/* pagein of a big page is an event. So, ignore page size */
514514
if (nr_pages > 0)
515-
__count_memcg_events(memcg, PGPGIN, 1);
515+
count_memcg_events(memcg, PGPGIN, 1);
516516
else {
517-
__count_memcg_events(memcg, PGPGOUT, 1);
517+
count_memcg_events(memcg, PGPGOUT, 1);
518518
nr_pages = -nr_pages; /* for event */
519519
}
520520

@@ -689,7 +689,7 @@ void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
689689
unsigned long flags;
690690

691691
local_irq_save(flags);
692-
__count_memcg_events(memcg, PGPGOUT, pgpgout);
692+
count_memcg_events(memcg, PGPGOUT, pgpgout);
693693
__this_cpu_add(memcg->events_percpu->nr_page_events, nr_memory);
694694
memcg1_check_events(memcg, nid);
695695
local_irq_restore(flags);

mm/memcontrol.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -823,12 +823,12 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
823823
}
824824

825825
/**
826-
* __count_memcg_events - account VM events in a cgroup
826+
* count_memcg_events - account VM events in a cgroup
827827
* @memcg: the memory cgroup
828828
* @idx: the event item
829829
* @count: the number of events that occurred
830830
*/
831-
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
831+
void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
832832
unsigned long count)
833833
{
834834
int i = memcg_events_index(idx);
@@ -842,7 +842,7 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
842842

843843
cpu = get_cpu();
844844

845-
__this_cpu_add(memcg->vmstats_percpu->events[i], count);
845+
this_cpu_add(memcg->vmstats_percpu->events[i], count);
846846
memcg_rstat_updated(memcg, count, cpu);
847847
trace_count_memcg_events(memcg, idx, count);
848848

mm/swap.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,7 @@ static void lru_activate(struct lruvec *lruvec, struct folio *folio)
309309
trace_mm_lru_activate(folio);
310310

311311
__count_vm_events(PGACTIVATE, nr_pages);
312-
__count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
312+
count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
313313
}
314314

315315
#ifdef CONFIG_SMP
@@ -581,7 +581,7 @@ static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio)
581581

582582
if (active) {
583583
__count_vm_events(PGDEACTIVATE, nr_pages);
584-
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
584+
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
585585
nr_pages);
586586
}
587587
}
@@ -599,7 +599,7 @@ static void lru_deactivate(struct lruvec *lruvec, struct folio *folio)
599599
lruvec_add_folio(lruvec, folio);
600600

601601
__count_vm_events(PGDEACTIVATE, nr_pages);
602-
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
602+
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
603603
}
604604

605605
static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
@@ -625,7 +625,7 @@ static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
625625
lruvec_add_folio(lruvec, folio);
626626

627627
__count_vm_events(PGLAZYFREE, nr_pages);
628-
__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
628+
count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
629629
}
630630

631631
/*

mm/vmscan.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2028,7 +2028,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
20282028
item = PGSCAN_KSWAPD + reclaimer_offset(sc);
20292029
if (!cgroup_reclaim(sc))
20302030
__count_vm_events(item, nr_scanned);
2031-
__count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
2031+
count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
20322032
__count_vm_events(PGSCAN_ANON + file, nr_scanned);
20332033

20342034
spin_unlock_irq(&lruvec->lru_lock);
@@ -2048,7 +2048,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
20482048
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
20492049
if (!cgroup_reclaim(sc))
20502050
__count_vm_events(item, nr_reclaimed);
2051-
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
2051+
count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
20522052
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
20532053
spin_unlock_irq(&lruvec->lru_lock);
20542054

@@ -2138,7 +2138,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
21382138

21392139
if (!cgroup_reclaim(sc))
21402140
__count_vm_events(PGREFILL, nr_scanned);
2141-
__count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2141+
count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
21422142

21432143
spin_unlock_irq(&lruvec->lru_lock);
21442144

@@ -2195,7 +2195,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
21952195
nr_deactivate = move_folios_to_lru(lruvec, &l_inactive);
21962196

21972197
__count_vm_events(PGDEACTIVATE, nr_deactivate);
2198-
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2198+
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
21992199

22002200
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
22012201
spin_unlock_irq(&lruvec->lru_lock);
@@ -4612,8 +4612,8 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
46124612
__count_vm_events(item, isolated);
46134613
__count_vm_events(PGREFILL, sorted);
46144614
}
4615-
__count_memcg_events(memcg, item, isolated);
4616-
__count_memcg_events(memcg, PGREFILL, sorted);
4615+
count_memcg_events(memcg, item, isolated);
4616+
count_memcg_events(memcg, PGREFILL, sorted);
46174617
__count_vm_events(PGSCAN_ANON + type, isolated);
46184618
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH,
46194619
scanned, skipped, isolated,
@@ -4763,7 +4763,7 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
47634763
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
47644764
if (!cgroup_reclaim(sc))
47654765
__count_vm_events(item, reclaimed);
4766-
__count_memcg_events(memcg, item, reclaimed);
4766+
count_memcg_events(memcg, item, reclaimed);
47674767
__count_vm_events(PGSTEAL_ANON + type, reclaimed);
47684768

47694769
spin_unlock_irq(&lruvec->lru_lock);

0 commit comments

Comments
 (0)