@@ -5410,6 +5410,8 @@ static void __free_event(struct perf_event *event)
5410
5410
call_rcu (& event -> rcu_head , free_event_rcu );
5411
5411
}
5412
5412
5413
+ DEFINE_FREE (__free_event , struct perf_event * , if (_T ) __free_event (_T ))
5414
+
5413
5415
/* vs perf_event_alloc() success */
5414
5416
static void _free_event (struct perf_event * event )
5415
5417
{
@@ -12291,7 +12293,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
12291
12293
void * context , int cgroup_fd )
12292
12294
{
12293
12295
struct pmu * pmu ;
12294
- struct perf_event * event ;
12295
12296
struct hw_perf_event * hwc ;
12296
12297
long err = - EINVAL ;
12297
12298
int node ;
@@ -12306,8 +12307,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
12306
12307
}
12307
12308
12308
12309
node = (cpu >= 0 ) ? cpu_to_node (cpu ) : -1 ;
12309
- event = kmem_cache_alloc_node ( perf_event_cache , GFP_KERNEL | __GFP_ZERO ,
12310
- node );
12310
+ struct perf_event * event __free ( __free_event ) =
12311
+ kmem_cache_alloc_node ( perf_event_cache , GFP_KERNEL | __GFP_ZERO , node );
12311
12312
if (!event )
12312
12313
return ERR_PTR (- ENOMEM );
12313
12314
@@ -12414,65 +12415,53 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
12414
12415
* See perf_output_read().
12415
12416
*/
12416
12417
if (has_inherit_and_sample_read (attr ) && !(attr -> sample_type & PERF_SAMPLE_TID ))
12417
- goto err ;
12418
+ return ERR_PTR ( - EINVAL ) ;
12418
12419
12419
12420
if (!has_branch_stack (event ))
12420
12421
event -> attr .branch_sample_type = 0 ;
12421
12422
12422
12423
pmu = perf_init_event (event );
12423
- if (IS_ERR (pmu )) {
12424
- err = PTR_ERR (pmu );
12425
- goto err ;
12426
- }
12424
+ if (IS_ERR (pmu ))
12425
+ return (void * )pmu ;
12427
12426
12428
12427
/*
12429
12428
* Disallow uncore-task events. Similarly, disallow uncore-cgroup
12430
12429
* events (they don't make sense as the cgroup will be different
12431
12430
* on other CPUs in the uncore mask).
12432
12431
*/
12433
- if (pmu -> task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1 )) {
12434
- err = - EINVAL ;
12435
- goto err ;
12436
- }
12432
+ if (pmu -> task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1 ))
12433
+ return ERR_PTR (- EINVAL );
12437
12434
12438
12435
if (event -> attr .aux_output &&
12439
12436
(!(pmu -> capabilities & PERF_PMU_CAP_AUX_OUTPUT ) ||
12440
- event -> attr .aux_pause || event -> attr .aux_resume )) {
12441
- err = - EOPNOTSUPP ;
12442
- goto err ;
12443
- }
12437
+ event -> attr .aux_pause || event -> attr .aux_resume ))
12438
+ return ERR_PTR (- EOPNOTSUPP );
12444
12439
12445
- if (event -> attr .aux_pause && event -> attr .aux_resume ) {
12446
- err = - EINVAL ;
12447
- goto err ;
12448
- }
12440
+ if (event -> attr .aux_pause && event -> attr .aux_resume )
12441
+ return ERR_PTR (- EINVAL );
12449
12442
12450
12443
if (event -> attr .aux_start_paused ) {
12451
- if (!(pmu -> capabilities & PERF_PMU_CAP_AUX_PAUSE )) {
12452
- err = - EOPNOTSUPP ;
12453
- goto err ;
12454
- }
12444
+ if (!(pmu -> capabilities & PERF_PMU_CAP_AUX_PAUSE ))
12445
+ return ERR_PTR (- EOPNOTSUPP );
12455
12446
event -> hw .aux_paused = 1 ;
12456
12447
}
12457
12448
12458
12449
if (cgroup_fd != -1 ) {
12459
12450
err = perf_cgroup_connect (cgroup_fd , event , attr , group_leader );
12460
12451
if (err )
12461
- goto err ;
12452
+ return ERR_PTR ( err ) ;
12462
12453
}
12463
12454
12464
12455
err = exclusive_event_init (event );
12465
12456
if (err )
12466
- goto err ;
12457
+ return ERR_PTR ( err ) ;
12467
12458
12468
12459
if (has_addr_filter (event )) {
12469
12460
event -> addr_filter_ranges = kcalloc (pmu -> nr_addr_filters ,
12470
12461
sizeof (struct perf_addr_filter_range ),
12471
12462
GFP_KERNEL );
12472
- if (!event -> addr_filter_ranges ) {
12473
- err = - ENOMEM ;
12474
- goto err ;
12475
- }
12463
+ if (!event -> addr_filter_ranges )
12464
+ return ERR_PTR (- ENOMEM );
12476
12465
12477
12466
/*
12478
12467
* Clone the parent's vma offsets: they are valid until exec()
@@ -12496,23 +12485,19 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
12496
12485
if (event -> attr .sample_type & PERF_SAMPLE_CALLCHAIN ) {
12497
12486
err = get_callchain_buffers (attr -> sample_max_stack );
12498
12487
if (err )
12499
- goto err ;
12488
+ return ERR_PTR ( err ) ;
12500
12489
event -> attach_state |= PERF_ATTACH_CALLCHAIN ;
12501
12490
}
12502
12491
}
12503
12492
12504
12493
err = security_perf_event_alloc (event );
12505
12494
if (err )
12506
- goto err ;
12495
+ return ERR_PTR ( err ) ;
12507
12496
12508
12497
/* symmetric to unaccount_event() in _free_event() */
12509
12498
account_event (event );
12510
12499
12511
- return event ;
12512
-
12513
- err :
12514
- __free_event (event );
12515
- return ERR_PTR (err );
12500
+ return_ptr (event );
12516
12501
}
12517
12502
12518
12503
static int perf_copy_attr (struct perf_event_attr __user * uattr ,
0 commit comments