Skip to content

Commit 8f2221f

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
perf/core: Simplify perf_event_alloc()
Using the previous simplifications, transition perf_event_alloc() to the cleanup way of things -- reducing error path magic. [ mingo: Ported it to recent kernels. ] Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Reviewed-by: Ravi Bangoria <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent caf8b76 commit 8f2221f

File tree

1 file changed

+22
-37
lines changed

1 file changed

+22
-37
lines changed

kernel/events/core.c

Lines changed: 22 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -5410,6 +5410,8 @@ static void __free_event(struct perf_event *event)
54105410
call_rcu(&event->rcu_head, free_event_rcu);
54115411
}
54125412

5413+
DEFINE_FREE(__free_event, struct perf_event *, if (_T) __free_event(_T))
5414+
54135415
/* vs perf_event_alloc() success */
54145416
static void _free_event(struct perf_event *event)
54155417
{
@@ -12291,7 +12293,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
1229112293
void *context, int cgroup_fd)
1229212294
{
1229312295
struct pmu *pmu;
12294-
struct perf_event *event;
1229512296
struct hw_perf_event *hwc;
1229612297
long err = -EINVAL;
1229712298
int node;
@@ -12306,8 +12307,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
1230612307
}
1230712308

1230812309
node = (cpu >= 0) ? cpu_to_node(cpu) : -1;
12309-
event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO,
12310-
node);
12310+
struct perf_event *event __free(__free_event) =
12311+
kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, node);
1231112312
if (!event)
1231212313
return ERR_PTR(-ENOMEM);
1231312314

@@ -12414,65 +12415,53 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
1241412415
* See perf_output_read().
1241512416
*/
1241612417
if (has_inherit_and_sample_read(attr) && !(attr->sample_type & PERF_SAMPLE_TID))
12417-
goto err;
12418+
return ERR_PTR(-EINVAL);
1241812419

1241912420
if (!has_branch_stack(event))
1242012421
event->attr.branch_sample_type = 0;
1242112422

1242212423
pmu = perf_init_event(event);
12423-
if (IS_ERR(pmu)) {
12424-
err = PTR_ERR(pmu);
12425-
goto err;
12426-
}
12424+
if (IS_ERR(pmu))
12425+
return (void*)pmu;
1242712426

1242812427
/*
1242912428
* Disallow uncore-task events. Similarly, disallow uncore-cgroup
1243012429
* events (they don't make sense as the cgroup will be different
1243112430
* on other CPUs in the uncore mask).
1243212431
*/
12433-
if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) {
12434-
err = -EINVAL;
12435-
goto err;
12436-
}
12432+
if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1))
12433+
return ERR_PTR(-EINVAL);
1243712434

1243812435
if (event->attr.aux_output &&
1243912436
(!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT) ||
12440-
event->attr.aux_pause || event->attr.aux_resume)) {
12441-
err = -EOPNOTSUPP;
12442-
goto err;
12443-
}
12437+
event->attr.aux_pause || event->attr.aux_resume))
12438+
return ERR_PTR(-EOPNOTSUPP);
1244412439

12445-
if (event->attr.aux_pause && event->attr.aux_resume) {
12446-
err = -EINVAL;
12447-
goto err;
12448-
}
12440+
if (event->attr.aux_pause && event->attr.aux_resume)
12441+
return ERR_PTR(-EINVAL);
1244912442

1245012443
if (event->attr.aux_start_paused) {
12451-
if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE)) {
12452-
err = -EOPNOTSUPP;
12453-
goto err;
12454-
}
12444+
if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE))
12445+
return ERR_PTR(-EOPNOTSUPP);
1245512446
event->hw.aux_paused = 1;
1245612447
}
1245712448

1245812449
if (cgroup_fd != -1) {
1245912450
err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
1246012451
if (err)
12461-
goto err;
12452+
return ERR_PTR(err);
1246212453
}
1246312454

1246412455
err = exclusive_event_init(event);
1246512456
if (err)
12466-
goto err;
12457+
return ERR_PTR(err);
1246712458

1246812459
if (has_addr_filter(event)) {
1246912460
event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
1247012461
sizeof(struct perf_addr_filter_range),
1247112462
GFP_KERNEL);
12472-
if (!event->addr_filter_ranges) {
12473-
err = -ENOMEM;
12474-
goto err;
12475-
}
12463+
if (!event->addr_filter_ranges)
12464+
return ERR_PTR(-ENOMEM);
1247612465

1247712466
/*
1247812467
* Clone the parent's vma offsets: they are valid until exec()
@@ -12496,23 +12485,19 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
1249612485
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
1249712486
err = get_callchain_buffers(attr->sample_max_stack);
1249812487
if (err)
12499-
goto err;
12488+
return ERR_PTR(err);
1250012489
event->attach_state |= PERF_ATTACH_CALLCHAIN;
1250112490
}
1250212491
}
1250312492

1250412493
err = security_perf_event_alloc(event);
1250512494
if (err)
12506-
goto err;
12495+
return ERR_PTR(err);
1250712496

1250812497
/* symmetric to unaccount_event() in _free_event() */
1250912498
account_event(event);
1251012499

12511-
return event;
12512-
12513-
err:
12514-
__free_event(event);
12515-
return ERR_PTR(err);
12500+
return_ptr(event);
1251612501
}
1251712502

1251812503
static int perf_copy_attr(struct perf_event_attr __user *uattr,

0 commit comments

Comments
 (0)