Skip to content

Commit c2283c9

Browse files
captain5050Ingo Molnar
authored andcommitted
perf/cgroup: Grow per perf_cpu_context heap storage
Allow the per-CPU min heap storage to have sufficient space for per-cgroup iterators. Based-on-work-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Ian Rogers <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 836196b commit c2283c9

File tree

1 file changed

+47
-0
lines changed

1 file changed

+47
-0
lines changed

kernel/events/core.c

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -892,6 +892,47 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
892892
rcu_read_unlock();
893893
}
894894

895+
static int perf_cgroup_ensure_storage(struct perf_event *event,
896+
struct cgroup_subsys_state *css)
897+
{
898+
struct perf_cpu_context *cpuctx;
899+
struct perf_event **storage;
900+
int cpu, heap_size, ret = 0;
901+
902+
/*
903+
* Allow storage to have sufficent space for an iterator for each
904+
* possibly nested cgroup plus an iterator for events with no cgroup.
905+
*/
906+
for (heap_size = 1; css; css = css->parent)
907+
heap_size++;
908+
909+
for_each_possible_cpu(cpu) {
910+
cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu);
911+
if (heap_size <= cpuctx->heap_size)
912+
continue;
913+
914+
storage = kmalloc_node(heap_size * sizeof(struct perf_event *),
915+
GFP_KERNEL, cpu_to_node(cpu));
916+
if (!storage) {
917+
ret = -ENOMEM;
918+
break;
919+
}
920+
921+
raw_spin_lock_irq(&cpuctx->ctx.lock);
922+
if (cpuctx->heap_size < heap_size) {
923+
swap(cpuctx->heap, storage);
924+
if (storage == cpuctx->heap_default)
925+
storage = NULL;
926+
cpuctx->heap_size = heap_size;
927+
}
928+
raw_spin_unlock_irq(&cpuctx->ctx.lock);
929+
930+
kfree(storage);
931+
}
932+
933+
return ret;
934+
}
935+
895936
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
896937
struct perf_event_attr *attr,
897938
struct perf_event *group_leader)
@@ -911,6 +952,10 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
911952
goto out;
912953
}
913954

955+
ret = perf_cgroup_ensure_storage(event, css);
956+
if (ret)
957+
goto out;
958+
914959
cgrp = container_of(css, struct perf_cgroup, css);
915960
event->cgrp = cgrp;
916961

@@ -3440,6 +3485,8 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
34403485
.nr = 0,
34413486
.size = cpuctx->heap_size,
34423487
};
3488+
3489+
lockdep_assert_held(&cpuctx->ctx.lock);
34433490
} else {
34443491
event_heap = (struct min_heap){
34453492
.data = itrs,

0 commit comments

Comments
 (0)