@@ -555,48 +555,22 @@ static u64 flush_last_time;
555
555
556
556
#define FLUSH_TIME (2UL*HZ)
557
557
558
- /*
559
- * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
560
- * not rely on this as part of an acquired spinlock_t lock. These functions are
561
- * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
562
- * is sufficient.
563
- */
564
- static void memcg_stats_lock (void )
565
- {
566
- preempt_disable_nested ();
567
- VM_WARN_ON_IRQS_ENABLED ();
568
- }
569
-
570
- static void __memcg_stats_lock (void )
571
- {
572
- preempt_disable_nested ();
573
- }
574
-
575
- static void memcg_stats_unlock (void )
576
- {
577
- preempt_enable_nested ();
578
- }
579
-
580
-
581
558
static bool memcg_vmstats_needs_flush (struct memcg_vmstats * vmstats )
582
559
{
583
560
return atomic64_read (& vmstats -> stats_updates ) >
584
561
MEMCG_CHARGE_BATCH * num_online_cpus ();
585
562
}
586
563
587
- static inline void memcg_rstat_updated (struct mem_cgroup * memcg , int val )
564
+ static inline void memcg_rstat_updated (struct mem_cgroup * memcg , int val ,
565
+ int cpu )
588
566
{
589
567
struct memcg_vmstats_percpu __percpu * statc_pcpu ;
590
568
struct memcg_vmstats_percpu * statc ;
591
- int cpu ;
592
569
unsigned int stats_updates ;
593
570
594
571
if (!val )
595
572
return ;
596
573
597
- /* Don't assume callers have preemption disabled. */
598
- cpu = get_cpu ();
599
-
600
574
cgroup_rstat_updated (memcg -> css .cgroup , cpu );
601
575
statc_pcpu = memcg -> vmstats_percpu ;
602
576
for (; statc_pcpu ; statc_pcpu = statc -> parent_pcpu ) {
@@ -617,7 +591,6 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
617
591
stats_updates = this_cpu_xchg (statc_pcpu -> stats_updates , 0 );
618
592
atomic64_add (stats_updates , & statc -> vmstats -> stats_updates );
619
593
}
620
- put_cpu ();
621
594
}
622
595
623
596
static void __mem_cgroup_flush_stats (struct mem_cgroup * memcg , bool force )
@@ -715,19 +688,22 @@ void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
715
688
int val )
716
689
{
717
690
int i = memcg_stats_index (idx );
691
+ int cpu ;
718
692
719
693
if (mem_cgroup_disabled ())
720
694
return ;
721
695
722
696
if (WARN_ONCE (BAD_STAT_IDX (i ), "%s: missing stat item %d\n" , __func__ , idx ))
723
697
return ;
724
698
725
- memcg_stats_lock ();
699
+ cpu = get_cpu ();
700
+
726
701
__this_cpu_add (memcg -> vmstats_percpu -> state [i ], val );
727
702
val = memcg_state_val_in_pages (idx , val );
728
- memcg_rstat_updated (memcg , val );
703
+ memcg_rstat_updated (memcg , val , cpu );
729
704
trace_mod_memcg_state (memcg , idx , val );
730
- memcg_stats_unlock ();
705
+
706
+ put_cpu ();
731
707
}
732
708
733
709
#ifdef CONFIG_MEMCG_V1
@@ -756,31 +732,15 @@ static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
756
732
struct mem_cgroup_per_node * pn ;
757
733
struct mem_cgroup * memcg ;
758
734
int i = memcg_stats_index (idx );
735
+ int cpu ;
759
736
760
737
if (WARN_ONCE (BAD_STAT_IDX (i ), "%s: missing stat item %d\n" , __func__ , idx ))
761
738
return ;
762
739
763
740
pn = container_of (lruvec , struct mem_cgroup_per_node , lruvec );
764
741
memcg = pn -> memcg ;
765
742
766
- /*
767
- * The caller from rmap relies on disabled preemption because they never
768
- * update their counter from in-interrupt context. For these two
769
- * counters we check that the update is never performed from an
770
- * interrupt context while other caller need to have disabled interrupt.
771
- */
772
- __memcg_stats_lock ();
773
- if (IS_ENABLED (CONFIG_DEBUG_VM )) {
774
- switch (idx ) {
775
- case NR_ANON_MAPPED :
776
- case NR_FILE_MAPPED :
777
- case NR_ANON_THPS :
778
- WARN_ON_ONCE (!in_task ());
779
- break ;
780
- default :
781
- VM_WARN_ON_IRQS_ENABLED ();
782
- }
783
- }
743
+ cpu = get_cpu ();
784
744
785
745
/* Update memcg */
786
746
__this_cpu_add (memcg -> vmstats_percpu -> state [i ], val );
@@ -789,9 +749,10 @@ static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
789
749
__this_cpu_add (pn -> lruvec_stats_percpu -> state [i ], val );
790
750
791
751
val = memcg_state_val_in_pages (idx , val );
792
- memcg_rstat_updated (memcg , val );
752
+ memcg_rstat_updated (memcg , val , cpu );
793
753
trace_mod_memcg_lruvec_state (memcg , idx , val );
794
- memcg_stats_unlock ();
754
+
755
+ put_cpu ();
795
756
}
796
757
797
758
/**
@@ -871,18 +832,21 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
871
832
unsigned long count )
872
833
{
873
834
int i = memcg_events_index (idx );
835
+ int cpu ;
874
836
875
837
if (mem_cgroup_disabled ())
876
838
return ;
877
839
878
840
if (WARN_ONCE (BAD_STAT_IDX (i ), "%s: missing stat item %d\n" , __func__ , idx ))
879
841
return ;
880
842
881
- memcg_stats_lock ();
843
+ cpu = get_cpu ();
844
+
882
845
__this_cpu_add (memcg -> vmstats_percpu -> events [i ], count );
883
- memcg_rstat_updated (memcg , count );
846
+ memcg_rstat_updated (memcg , count , cpu );
884
847
trace_count_memcg_events (memcg , idx , count );
885
- memcg_stats_unlock ();
848
+
849
+ put_cpu ();
886
850
}
887
851
888
852
unsigned long memcg_events (struct mem_cgroup * memcg , int event )
0 commit comments