@@ -726,25 +726,48 @@ static struct track *get_track(struct kmem_cache *s, void *object,
726
726
return kasan_reset_tag (p + alloc );
727
727
}
728
728
729
- static void noinline set_track (struct kmem_cache * s , void * object ,
730
- enum track_item alloc , unsigned long addr )
731
- {
732
- struct track * p = get_track (s , object , alloc );
733
-
734
729
#ifdef CONFIG_STACKDEPOT
730
+ static noinline depot_stack_handle_t set_track_prepare (void )
731
+ {
732
+ depot_stack_handle_t handle ;
735
733
unsigned long entries [TRACK_ADDRS_COUNT ];
736
734
unsigned int nr_entries ;
737
735
738
736
nr_entries = stack_trace_save (entries , ARRAY_SIZE (entries ), 3 );
739
- p -> handle = stack_depot_save (entries , nr_entries , GFP_NOWAIT );
737
+ handle = stack_depot_save (entries , nr_entries , GFP_NOWAIT );
738
+
739
+ return handle ;
740
+ }
741
+ #else
742
+ static inline depot_stack_handle_t set_track_prepare (void )
743
+ {
744
+ return 0 ;
745
+ }
740
746
#endif
741
747
748
+ static void set_track_update (struct kmem_cache * s , void * object ,
749
+ enum track_item alloc , unsigned long addr ,
750
+ depot_stack_handle_t handle )
751
+ {
752
+ struct track * p = get_track (s , object , alloc );
753
+
754
+ #ifdef CONFIG_STACKDEPOT
755
+ p -> handle = handle ;
756
+ #endif
742
757
p -> addr = addr ;
743
758
p -> cpu = smp_processor_id ();
744
759
p -> pid = current -> pid ;
745
760
p -> when = jiffies ;
746
761
}
747
762
763
+ static __always_inline void set_track (struct kmem_cache * s , void * object ,
764
+ enum track_item alloc , unsigned long addr )
765
+ {
766
+ depot_stack_handle_t handle = set_track_prepare ();
767
+
768
+ set_track_update (s , object , alloc , addr , handle );
769
+ }
770
+
748
771
static void init_tracking (struct kmem_cache * s , void * object )
749
772
{
750
773
struct track * p ;
@@ -1373,6 +1396,10 @@ static noinline int free_debug_processing(
1373
1396
int cnt = 0 ;
1374
1397
unsigned long flags , flags2 ;
1375
1398
int ret = 0 ;
1399
+ depot_stack_handle_t handle = 0 ;
1400
+
1401
+ if (s -> flags & SLAB_STORE_USER )
1402
+ handle = set_track_prepare ();
1376
1403
1377
1404
spin_lock_irqsave (& n -> list_lock , flags );
1378
1405
slab_lock (slab , & flags2 );
@@ -1391,7 +1418,7 @@ static noinline int free_debug_processing(
1391
1418
}
1392
1419
1393
1420
if (s -> flags & SLAB_STORE_USER )
1394
- set_track (s , object , TRACK_FREE , addr );
1421
+ set_track_update (s , object , TRACK_FREE , addr , handle );
1395
1422
trace (s , slab , object , 0 );
1396
1423
/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1397
1424
init_object (s , object , SLUB_RED_INACTIVE );
@@ -2936,6 +2963,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2936
2963
2937
2964
if (!freelist ) {
2938
2965
c -> slab = NULL ;
2966
+ c -> tid = next_tid (c -> tid );
2939
2967
local_unlock_irqrestore (& s -> cpu_slab -> lock , flags );
2940
2968
stat (s , DEACTIVATE_BYPASS );
2941
2969
goto new_slab ;
@@ -2968,6 +2996,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2968
2996
freelist = c -> freelist ;
2969
2997
c -> slab = NULL ;
2970
2998
c -> freelist = NULL ;
2999
+ c -> tid = next_tid (c -> tid );
2971
3000
local_unlock_irqrestore (& s -> cpu_slab -> lock , flags );
2972
3001
deactivate_slab (s , slab , freelist );
2973
3002
0 commit comments