@@ -892,6 +892,47 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
892
892
rcu_read_unlock ();
893
893
}
894
894
895
+ static int perf_cgroup_ensure_storage (struct perf_event * event ,
896
+ struct cgroup_subsys_state * css )
897
+ {
898
+ struct perf_cpu_context * cpuctx ;
899
+ struct perf_event * * storage ;
900
+ int cpu , heap_size , ret = 0 ;
901
+
902
+ /*
903
+ * Allow storage to have sufficent space for an iterator for each
904
+ * possibly nested cgroup plus an iterator for events with no cgroup.
905
+ */
906
+ for (heap_size = 1 ; css ; css = css -> parent )
907
+ heap_size ++ ;
908
+
909
+ for_each_possible_cpu (cpu ) {
910
+ cpuctx = per_cpu_ptr (event -> pmu -> pmu_cpu_context , cpu );
911
+ if (heap_size <= cpuctx -> heap_size )
912
+ continue ;
913
+
914
+ storage = kmalloc_node (heap_size * sizeof (struct perf_event * ),
915
+ GFP_KERNEL , cpu_to_node (cpu ));
916
+ if (!storage ) {
917
+ ret = - ENOMEM ;
918
+ break ;
919
+ }
920
+
921
+ raw_spin_lock_irq (& cpuctx -> ctx .lock );
922
+ if (cpuctx -> heap_size < heap_size ) {
923
+ swap (cpuctx -> heap , storage );
924
+ if (storage == cpuctx -> heap_default )
925
+ storage = NULL ;
926
+ cpuctx -> heap_size = heap_size ;
927
+ }
928
+ raw_spin_unlock_irq (& cpuctx -> ctx .lock );
929
+
930
+ kfree (storage );
931
+ }
932
+
933
+ return ret ;
934
+ }
935
+
895
936
static inline int perf_cgroup_connect (int fd , struct perf_event * event ,
896
937
struct perf_event_attr * attr ,
897
938
struct perf_event * group_leader )
@@ -911,6 +952,10 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
911
952
goto out ;
912
953
}
913
954
955
+ ret = perf_cgroup_ensure_storage (event , css );
956
+ if (ret )
957
+ goto out ;
958
+
914
959
cgrp = container_of (css , struct perf_cgroup , css );
915
960
event -> cgrp = cgrp ;
916
961
@@ -3440,6 +3485,8 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
3440
3485
.nr = 0 ,
3441
3486
.size = cpuctx -> heap_size ,
3442
3487
};
3488
+
3489
+ lockdep_assert_held (& cpuctx -> ctx .lock );
3443
3490
} else {
3444
3491
event_heap = (struct min_heap ){
3445
3492
.data = itrs ,
0 commit comments