@@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
776
776
rcu_read_unlock ();
777
777
}
778
778
779
- /*
780
- * mod_objcg_mlstate() may be called with irq enabled, so
781
- * mod_memcg_lruvec_state() should be used.
782
- */
783
- static inline void mod_objcg_mlstate (struct obj_cgroup * objcg ,
784
- struct pglist_data * pgdat ,
785
- enum node_stat_item idx , int nr )
786
- {
787
- struct mem_cgroup * memcg ;
788
- struct lruvec * lruvec ;
789
-
790
- rcu_read_lock ();
791
- memcg = obj_cgroup_memcg (objcg );
792
- lruvec = mem_cgroup_lruvec (memcg , pgdat );
793
- mod_memcg_lruvec_state (lruvec , idx , nr );
794
- rcu_read_unlock ();
795
- }
796
-
797
779
/**
798
780
* __count_memcg_events - account VM events in a cgroup
799
781
* @memcg: the memory cgroup
@@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2137
2119
}
2138
2120
#endif
2139
2121
2140
- /*
2141
- * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
2142
- * sequence used in this case to access content from object stock is slow.
2143
- * To optimize for user context access, there are now two object stocks for
2144
- * task context and interrupt context access respectively.
2145
- *
2146
- * The task context object stock can be accessed by disabling preemption only
2147
- * which is cheap in non-preempt kernel. The interrupt context object stock
2148
- * can only be accessed after disabling interrupt. User context code can
2149
- * access interrupt object stock, but not vice versa.
2150
- */
2151
- static inline struct obj_stock * get_obj_stock (unsigned long * pflags )
2152
- {
2153
- struct memcg_stock_pcp * stock ;
2154
-
2155
- if (likely (in_task ())) {
2156
- * pflags = 0UL ;
2157
- preempt_disable ();
2158
- stock = this_cpu_ptr (& memcg_stock );
2159
- return & stock -> task_obj ;
2160
- }
2161
-
2162
- local_irq_save (* pflags );
2163
- stock = this_cpu_ptr (& memcg_stock );
2164
- return & stock -> irq_obj ;
2165
- }
2166
-
2167
- static inline void put_obj_stock (unsigned long flags )
2168
- {
2169
- if (likely (in_task ()))
2170
- preempt_enable ();
2171
- else
2172
- local_irq_restore (flags );
2173
- }
2174
-
2175
2122
/**
2176
2123
* consume_stock: Try to consume stocked charge on this cpu.
2177
2124
* @memcg: memcg to consume from.
@@ -2816,6 +2763,59 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
2816
2763
*/
2817
2764
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
2818
2765
2766
+ /*
2767
+ * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
2768
+ * sequence used in this case to access content from object stock is slow.
2769
+ * To optimize for user context access, there are now two object stocks for
2770
+ * task context and interrupt context access respectively.
2771
+ *
2772
+ * The task context object stock can be accessed by disabling preemption only
2773
+ * which is cheap in non-preempt kernel. The interrupt context object stock
2774
+ * can only be accessed after disabling interrupt. User context code can
2775
+ * access interrupt object stock, but not vice versa.
2776
+ */
2777
+ static inline struct obj_stock * get_obj_stock (unsigned long * pflags )
2778
+ {
2779
+ struct memcg_stock_pcp * stock ;
2780
+
2781
+ if (likely (in_task ())) {
2782
+ * pflags = 0UL ;
2783
+ preempt_disable ();
2784
+ stock = this_cpu_ptr (& memcg_stock );
2785
+ return & stock -> task_obj ;
2786
+ }
2787
+
2788
+ local_irq_save (* pflags );
2789
+ stock = this_cpu_ptr (& memcg_stock );
2790
+ return & stock -> irq_obj ;
2791
+ }
2792
+
2793
+ static inline void put_obj_stock (unsigned long flags )
2794
+ {
2795
+ if (likely (in_task ()))
2796
+ preempt_enable ();
2797
+ else
2798
+ local_irq_restore (flags );
2799
+ }
2800
+
2801
+ /*
2802
+ * mod_objcg_mlstate() may be called with irq enabled, so
2803
+ * mod_memcg_lruvec_state() should be used.
2804
+ */
2805
+ static inline void mod_objcg_mlstate (struct obj_cgroup * objcg ,
2806
+ struct pglist_data * pgdat ,
2807
+ enum node_stat_item idx , int nr )
2808
+ {
2809
+ struct mem_cgroup * memcg ;
2810
+ struct lruvec * lruvec ;
2811
+
2812
+ rcu_read_lock ();
2813
+ memcg = obj_cgroup_memcg (objcg );
2814
+ lruvec = mem_cgroup_lruvec (memcg , pgdat );
2815
+ mod_memcg_lruvec_state (lruvec , idx , nr );
2816
+ rcu_read_unlock ();
2817
+ }
2818
+
2819
2819
int memcg_alloc_page_obj_cgroups (struct page * page , struct kmem_cache * s ,
2820
2820
gfp_t gfp , bool new_page )
2821
2821
{
0 commit comments