Skip to content

Commit c716353

Browse files
shakeelbakpm00
authored andcommitted
memcg: move preempt disable to callers of memcg_rstat_updated
Let's move the explicit preempt disable code to the callers of memcg_rstat_updated and also remove the memcg_stats_lock and related functions which ensures the callers of stats update functions have disabled preemption because now the stats update functions are explicitly disabling preemption. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Shakeel Butt <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: Alexei Starovoitov <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Muchun Song <[email protected]> Cc: Roman Gushchin <[email protected]> Cc: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 8a4b42b commit c716353

File tree

1 file changed

+19
-55
lines changed

1 file changed

+19
-55
lines changed

mm/memcontrol.c

Lines changed: 19 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -555,48 +555,22 @@ static u64 flush_last_time;
555555

556556
#define FLUSH_TIME (2UL*HZ)
557557

558-
/*
559-
* Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
560-
* not rely on this as part of an acquired spinlock_t lock. These functions are
561-
* never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
562-
* is sufficient.
563-
*/
564-
static void memcg_stats_lock(void)
565-
{
566-
preempt_disable_nested();
567-
VM_WARN_ON_IRQS_ENABLED();
568-
}
569-
570-
static void __memcg_stats_lock(void)
571-
{
572-
preempt_disable_nested();
573-
}
574-
575-
static void memcg_stats_unlock(void)
576-
{
577-
preempt_enable_nested();
578-
}
579-
580-
581558
static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
582559
{
583560
return atomic64_read(&vmstats->stats_updates) >
584561
MEMCG_CHARGE_BATCH * num_online_cpus();
585562
}
586563

587-
static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
564+
static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val,
565+
int cpu)
588566
{
589567
struct memcg_vmstats_percpu __percpu *statc_pcpu;
590568
struct memcg_vmstats_percpu *statc;
591-
int cpu;
592569
unsigned int stats_updates;
593570

594571
if (!val)
595572
return;
596573

597-
/* Don't assume callers have preemption disabled. */
598-
cpu = get_cpu();
599-
600574
cgroup_rstat_updated(memcg->css.cgroup, cpu);
601575
statc_pcpu = memcg->vmstats_percpu;
602576
for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) {
@@ -617,7 +591,6 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
617591
stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0);
618592
atomic64_add(stats_updates, &statc->vmstats->stats_updates);
619593
}
620-
put_cpu();
621594
}
622595

623596
static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
@@ -715,19 +688,22 @@ void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
715688
int val)
716689
{
717690
int i = memcg_stats_index(idx);
691+
int cpu;
718692

719693
if (mem_cgroup_disabled())
720694
return;
721695

722696
if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
723697
return;
724698

725-
memcg_stats_lock();
699+
cpu = get_cpu();
700+
726701
__this_cpu_add(memcg->vmstats_percpu->state[i], val);
727702
val = memcg_state_val_in_pages(idx, val);
728-
memcg_rstat_updated(memcg, val);
703+
memcg_rstat_updated(memcg, val, cpu);
729704
trace_mod_memcg_state(memcg, idx, val);
730-
memcg_stats_unlock();
705+
706+
put_cpu();
731707
}
732708

733709
#ifdef CONFIG_MEMCG_V1
@@ -756,31 +732,15 @@ static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
756732
struct mem_cgroup_per_node *pn;
757733
struct mem_cgroup *memcg;
758734
int i = memcg_stats_index(idx);
735+
int cpu;
759736

760737
if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
761738
return;
762739

763740
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
764741
memcg = pn->memcg;
765742

766-
/*
767-
* The caller from rmap relies on disabled preemption because they never
768-
* update their counter from in-interrupt context. For these two
769-
* counters we check that the update is never performed from an
770-
* interrupt context while other caller need to have disabled interrupt.
771-
*/
772-
__memcg_stats_lock();
773-
if (IS_ENABLED(CONFIG_DEBUG_VM)) {
774-
switch (idx) {
775-
case NR_ANON_MAPPED:
776-
case NR_FILE_MAPPED:
777-
case NR_ANON_THPS:
778-
WARN_ON_ONCE(!in_task());
779-
break;
780-
default:
781-
VM_WARN_ON_IRQS_ENABLED();
782-
}
783-
}
743+
cpu = get_cpu();
784744

785745
/* Update memcg */
786746
__this_cpu_add(memcg->vmstats_percpu->state[i], val);
@@ -789,9 +749,10 @@ static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
789749
__this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
790750

791751
val = memcg_state_val_in_pages(idx, val);
792-
memcg_rstat_updated(memcg, val);
752+
memcg_rstat_updated(memcg, val, cpu);
793753
trace_mod_memcg_lruvec_state(memcg, idx, val);
794-
memcg_stats_unlock();
754+
755+
put_cpu();
795756
}
796757

797758
/**
@@ -871,18 +832,21 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
871832
unsigned long count)
872833
{
873834
int i = memcg_events_index(idx);
835+
int cpu;
874836

875837
if (mem_cgroup_disabled())
876838
return;
877839

878840
if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
879841
return;
880842

881-
memcg_stats_lock();
843+
cpu = get_cpu();
844+
882845
__this_cpu_add(memcg->vmstats_percpu->events[i], count);
883-
memcg_rstat_updated(memcg, count);
846+
memcg_rstat_updated(memcg, count, cpu);
884847
trace_count_memcg_events(memcg, idx, count);
885-
memcg_stats_unlock();
848+
849+
put_cpu();
886850
}
887851

888852
unsigned long memcg_events(struct mem_cgroup *memcg, int event)

0 commit comments

Comments
 (0)