@@ -2033,11 +2033,54 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
2033
2033
return slab_obj_exts (slab ) + obj_to_index (s , slab , p );
2034
2034
}
2035
2035
2036
+ #else /* CONFIG_SLAB_OBJ_EXT */
2037
+
2038
+ static int alloc_slab_obj_exts (struct slab * slab , struct kmem_cache * s ,
2039
+ gfp_t gfp , bool new_slab )
2040
+ {
2041
+ return 0 ;
2042
+ }
2043
+
2044
+ static inline void free_slab_obj_exts (struct slab * slab )
2045
+ {
2046
+ }
2047
+
2048
+ static inline bool need_slab_obj_ext (void )
2049
+ {
2050
+ return false;
2051
+ }
2052
+
2053
+ static inline struct slabobj_ext *
2054
+ prepare_slab_obj_exts_hook (struct kmem_cache * s , gfp_t flags , void * p )
2055
+ {
2056
+ return NULL ;
2057
+ }
2058
+
2059
+ #endif /* CONFIG_SLAB_OBJ_EXT */
2060
+
2061
+ #ifdef CONFIG_MEM_ALLOC_PROFILING
2062
+
2063
+ static inline void
2064
+ alloc_tagging_slab_alloc_hook (struct kmem_cache * s , void * object , gfp_t flags )
2065
+ {
2066
+ if (need_slab_obj_ext ()) {
2067
+ struct slabobj_ext * obj_exts ;
2068
+
2069
+ obj_exts = prepare_slab_obj_exts_hook (s , flags , object );
2070
+ /*
2071
+ * Currently obj_exts is used only for allocation profiling.
2072
+ * If other users appear then mem_alloc_profiling_enabled()
2073
+ * check should be added before alloc_tag_add().
2074
+ */
2075
+ if (likely (obj_exts ))
2076
+ alloc_tag_add (& obj_exts -> ref , current -> alloc_tag , s -> size );
2077
+ }
2078
+ }
2079
+
2036
2080
static inline void
2037
2081
alloc_tagging_slab_free_hook (struct kmem_cache * s , struct slab * slab , void * * p ,
2038
2082
int objects )
2039
2083
{
2040
- #ifdef CONFIG_MEM_ALLOC_PROFILING
2041
2084
struct slabobj_ext * obj_exts ;
2042
2085
int i ;
2043
2086
@@ -2053,30 +2096,13 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2053
2096
2054
2097
alloc_tag_sub (& obj_exts [off ].ref , s -> size );
2055
2098
}
2056
- #endif
2057
- }
2058
-
2059
- #else /* CONFIG_SLAB_OBJ_EXT */
2060
-
2061
- static int alloc_slab_obj_exts (struct slab * slab , struct kmem_cache * s ,
2062
- gfp_t gfp , bool new_slab )
2063
- {
2064
- return 0 ;
2065
- }
2066
-
2067
- static inline void free_slab_obj_exts (struct slab * slab )
2068
- {
2069
2099
}
2070
2100
2071
- static inline bool need_slab_obj_ext (void )
2072
- {
2073
- return false;
2074
- }
2101
+ #else /* CONFIG_MEM_ALLOC_PROFILING */
2075
2102
2076
- static inline struct slabobj_ext *
2077
- prepare_slab_obj_exts_hook (struct kmem_cache * s , gfp_t flags , void * p )
2103
+ static inline void
2104
+ alloc_tagging_slab_alloc_hook (struct kmem_cache * s , void * object , gfp_t flags )
2078
2105
{
2079
- return NULL ;
2080
2106
}
2081
2107
2082
2108
static inline void
@@ -2085,7 +2111,8 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2085
2111
{
2086
2112
}
2087
2113
2088
- #endif /* CONFIG_SLAB_OBJ_EXT */
2114
+ #endif /* CONFIG_MEM_ALLOC_PROFILING */
2115
+
2089
2116
2090
2117
#ifdef CONFIG_MEMCG_KMEM
2091
2118
@@ -3944,20 +3971,7 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
3944
3971
kmemleak_alloc_recursive (p [i ], s -> object_size , 1 ,
3945
3972
s -> flags , init_flags );
3946
3973
kmsan_slab_alloc (s , p [i ], init_flags );
3947
- #ifdef CONFIG_MEM_ALLOC_PROFILING
3948
- if (need_slab_obj_ext ()) {
3949
- struct slabobj_ext * obj_exts ;
3950
-
3951
- obj_exts = prepare_slab_obj_exts_hook (s , flags , p [i ]);
3952
- /*
3953
- * Currently obj_exts is used only for allocation profiling.
3954
- * If other users appear then mem_alloc_profiling_enabled()
3955
- * check should be added before alloc_tag_add().
3956
- */
3957
- if (likely (obj_exts ))
3958
- alloc_tag_add (& obj_exts -> ref , current -> alloc_tag , s -> size );
3959
- }
3960
- #endif
3974
+ alloc_tagging_slab_alloc_hook (s , p [i ], flags );
3961
3975
}
3962
3976
3963
3977
return memcg_slab_post_alloc_hook (s , lru , flags , size , p );
0 commit comments