@@ -962,19 +962,19 @@ static struct track *get_track(struct kmem_cache *s, void *object,
962
962
}
963
963
964
964
#ifdef CONFIG_STACKDEPOT
965
- static noinline depot_stack_handle_t set_track_prepare (void )
965
+ static noinline depot_stack_handle_t set_track_prepare (gfp_t gfp_flags )
966
966
{
967
967
depot_stack_handle_t handle ;
968
968
unsigned long entries [TRACK_ADDRS_COUNT ];
969
969
unsigned int nr_entries ;
970
970
971
971
nr_entries = stack_trace_save (entries , ARRAY_SIZE (entries ), 3 );
972
- handle = stack_depot_save (entries , nr_entries , GFP_NOWAIT );
972
+ handle = stack_depot_save (entries , nr_entries , gfp_flags );
973
973
974
974
return handle ;
975
975
}
976
976
#else
977
- static inline depot_stack_handle_t set_track_prepare (void )
977
+ static inline depot_stack_handle_t set_track_prepare (gfp_t gfp_flags )
978
978
{
979
979
return 0 ;
980
980
}
@@ -996,9 +996,9 @@ static void set_track_update(struct kmem_cache *s, void *object,
996
996
}
997
997
998
998
static __always_inline void set_track (struct kmem_cache * s , void * object ,
999
- enum track_item alloc , unsigned long addr )
999
+ enum track_item alloc , unsigned long addr , gfp_t gfp_flags )
1000
1000
{
1001
- depot_stack_handle_t handle = set_track_prepare ();
1001
+ depot_stack_handle_t handle = set_track_prepare (gfp_flags );
1002
1002
1003
1003
set_track_update (s , object , alloc , addr , handle );
1004
1004
}
@@ -1140,7 +1140,12 @@ static void object_err(struct kmem_cache *s, struct slab *slab,
1140
1140
return ;
1141
1141
1142
1142
slab_bug (s , reason );
1143
- print_trailer (s , slab , object );
1143
+ if (!object || !check_valid_pointer (s , slab , object )) {
1144
+ print_slab_info (slab );
1145
+ pr_err ("Invalid pointer 0x%p\n" , object );
1146
+ } else {
1147
+ print_trailer (s , slab , object );
1148
+ }
1144
1149
add_taint (TAINT_BAD_PAGE , LOCKDEP_NOW_UNRELIABLE );
1145
1150
1146
1151
WARN_ON (1 );
@@ -1921,9 +1926,9 @@ static inline bool free_debug_processing(struct kmem_cache *s,
1921
1926
static inline void slab_pad_check (struct kmem_cache * s , struct slab * slab ) {}
1922
1927
static inline int check_object (struct kmem_cache * s , struct slab * slab ,
1923
1928
void * object , u8 val ) { return 1 ; }
1924
- static inline depot_stack_handle_t set_track_prepare (void ) { return 0 ; }
1929
+ static inline depot_stack_handle_t set_track_prepare (gfp_t gfp_flags ) { return 0 ; }
1925
1930
static inline void set_track (struct kmem_cache * s , void * object ,
1926
- enum track_item alloc , unsigned long addr ) {}
1931
+ enum track_item alloc , unsigned long addr , gfp_t gfp_flags ) {}
1927
1932
static inline void add_full (struct kmem_cache * s , struct kmem_cache_node * n ,
1928
1933
struct slab * slab ) {}
1929
1934
static inline void remove_full (struct kmem_cache * s , struct kmem_cache_node * n ,
@@ -3876,9 +3881,14 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3876
3881
* For debug caches here we had to go through
3877
3882
* alloc_single_from_partial() so just store the
3878
3883
* tracking info and return the object.
3884
+ *
3885
+ * Due to disabled preemption we need to disallow
3886
+ * blocking. The flags are further adjusted by
3887
+ * gfp_nested_mask() in stack_depot itself.
3879
3888
*/
3880
3889
if (s -> flags & SLAB_STORE_USER )
3881
- set_track (s , freelist , TRACK_ALLOC , addr );
3890
+ set_track (s , freelist , TRACK_ALLOC , addr ,
3891
+ gfpflags & ~(__GFP_DIRECT_RECLAIM ));
3882
3892
3883
3893
return freelist ;
3884
3894
}
@@ -3910,7 +3920,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3910
3920
goto new_objects ;
3911
3921
3912
3922
if (s -> flags & SLAB_STORE_USER )
3913
- set_track (s , freelist , TRACK_ALLOC , addr );
3923
+ set_track (s , freelist , TRACK_ALLOC , addr ,
3924
+ gfpflags & ~(__GFP_DIRECT_RECLAIM ));
3914
3925
3915
3926
return freelist ;
3916
3927
}
@@ -4421,8 +4432,12 @@ static noinline void free_to_partial_list(
4421
4432
unsigned long flags ;
4422
4433
depot_stack_handle_t handle = 0 ;
4423
4434
4435
+ /*
4436
+ * We cannot use GFP_NOWAIT as there are callsites where waking up
4437
+ * kswapd could deadlock
4438
+ */
4424
4439
if (s -> flags & SLAB_STORE_USER )
4425
- handle = set_track_prepare ();
4440
+ handle = set_track_prepare (__GFP_NOWARN );
4426
4441
4427
4442
spin_lock_irqsave (& n -> list_lock , flags );
4428
4443
0 commit comments