@@ -1546,19 +1546,18 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1546
1546
if (ret )
1547
1547
return ret ;
1548
1548
1549
+ guard (mutex )(& event_mutex );
1550
+
1549
1551
switch (val ) {
1550
1552
case 0 :
1551
1553
case 1 :
1552
- mutex_lock (& event_mutex );
1553
1554
file = event_file_file (filp );
1554
- if (likely (file )) {
1555
- ret = tracing_update_buffers (file -> tr );
1556
- if (ret >= 0 )
1557
- ret = ftrace_event_enable_disable (file , val );
1558
- } else {
1559
- ret = - ENODEV ;
1560
- }
1561
- mutex_unlock (& event_mutex );
1555
+ if (!file )
1556
+ return - ENODEV ;
1557
+ ret = tracing_update_buffers (file -> tr );
1558
+ if (ret < 0 )
1559
+ return ret ;
1560
+ ret = ftrace_event_enable_disable (file , val );
1562
1561
if (ret < 0 )
1563
1562
return ret ;
1564
1563
break ;
@@ -2145,7 +2144,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
2145
2144
if (ret < 0 )
2146
2145
return ret ;
2147
2146
2148
- mutex_lock (& event_mutex );
2147
+ guard ( mutex ) (& event_mutex );
2149
2148
2150
2149
if (type == TRACE_PIDS ) {
2151
2150
filtered_pids = rcu_dereference_protected (tr -> filtered_pids ,
@@ -2161,7 +2160,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
2161
2160
2162
2161
ret = trace_pid_write (filtered_pids , & pid_list , ubuf , cnt );
2163
2162
if (ret < 0 )
2164
- goto out ;
2163
+ return ret ;
2165
2164
2166
2165
if (type == TRACE_PIDS )
2167
2166
rcu_assign_pointer (tr -> filtered_pids , pid_list );
@@ -2186,11 +2185,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
2186
2185
*/
2187
2186
on_each_cpu (ignore_task_cpu , tr , 1 );
2188
2187
2189
- out :
2190
- mutex_unlock (& event_mutex );
2191
-
2192
- if (ret > 0 )
2193
- * ppos += ret ;
2188
+ * ppos += ret ;
2194
2189
2195
2190
return ret ;
2196
2191
}
@@ -3257,13 +3252,13 @@ int trace_add_event_call(struct trace_event_call *call)
3257
3252
int ret ;
3258
3253
lockdep_assert_held (& event_mutex );
3259
3254
3260
- mutex_lock (& trace_types_lock );
3255
+ guard ( mutex ) (& trace_types_lock );
3261
3256
3262
3257
ret = __register_event (call , NULL );
3263
- if (ret >= 0 )
3264
- __add_event_to_tracers ( call ) ;
3258
+ if (ret < 0 )
3259
+ return ret ;
3265
3260
3266
- mutex_unlock ( & trace_types_lock );
3261
+ __add_event_to_tracers ( call );
3267
3262
return ret ;
3268
3263
}
3269
3264
EXPORT_SYMBOL_GPL (trace_add_event_call );
@@ -3517,30 +3512,21 @@ struct trace_event_file *trace_get_event_file(const char *instance,
3517
3512
return ERR_PTR (ret );
3518
3513
}
3519
3514
3520
- mutex_lock (& event_mutex );
3515
+ guard ( mutex ) (& event_mutex );
3521
3516
3522
3517
file = find_event_file (tr , system , event );
3523
3518
if (!file ) {
3524
3519
trace_array_put (tr );
3525
- ret = - EINVAL ;
3526
- goto out ;
3520
+ return ERR_PTR (- EINVAL );
3527
3521
}
3528
3522
3529
3523
/* Don't let event modules unload while in use */
3530
3524
ret = trace_event_try_get_ref (file -> event_call );
3531
3525
if (!ret ) {
3532
3526
trace_array_put (tr );
3533
- ret = - EBUSY ;
3534
- goto out ;
3527
+ return ERR_PTR (- EBUSY );
3535
3528
}
3536
3529
3537
- ret = 0 ;
3538
- out :
3539
- mutex_unlock (& event_mutex );
3540
-
3541
- if (ret )
3542
- file = ERR_PTR (ret );
3543
-
3544
3530
return file ;
3545
3531
}
3546
3532
EXPORT_SYMBOL_GPL (trace_get_event_file );
@@ -3778,12 +3764,11 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
3778
3764
3779
3765
event = strsep (& param , ":" );
3780
3766
3781
- mutex_lock (& event_mutex );
3767
+ guard ( mutex ) (& event_mutex );
3782
3768
3783
- ret = - EINVAL ;
3784
3769
file = find_event_file (tr , system , event );
3785
3770
if (!file )
3786
- goto out ;
3771
+ return - EINVAL ;
3787
3772
3788
3773
enable = strcmp (cmd , ENABLE_EVENT_STR ) == 0 ;
3789
3774
@@ -3792,40 +3777,34 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
3792
3777
else
3793
3778
ops = param ? & event_disable_count_probe_ops : & event_disable_probe_ops ;
3794
3779
3795
- if (glob [0 ] == '!' ) {
3796
- ret = unregister_ftrace_function_probe_func (glob + 1 , tr , ops );
3797
- goto out ;
3798
- }
3799
-
3800
- ret = - ENOMEM ;
3780
+ if (glob [0 ] == '!' )
3781
+ return unregister_ftrace_function_probe_func (glob + 1 , tr , ops );
3801
3782
3802
3783
if (param ) {
3803
3784
number = strsep (& param , ":" );
3804
3785
3805
- ret = - EINVAL ;
3806
3786
if (!strlen (number ))
3807
- goto out ;
3787
+ return - EINVAL ;
3808
3788
3809
3789
/*
3810
3790
* We use the callback data field (which is a pointer)
3811
3791
* as our counter.
3812
3792
*/
3813
3793
ret = kstrtoul (number , 0 , & count );
3814
3794
if (ret )
3815
- goto out ;
3795
+ return ret ;
3816
3796
}
3817
3797
3818
3798
/* Don't let event modules unload while probe registered */
3819
3799
ret = trace_event_try_get_ref (file -> event_call );
3820
- if (!ret ) {
3821
- ret = - EBUSY ;
3822
- goto out ;
3823
- }
3800
+ if (!ret )
3801
+ return - EBUSY ;
3824
3802
3825
3803
ret = __ftrace_event_enable_disable (file , 1 , 1 );
3826
3804
if (ret < 0 )
3827
3805
goto out_put ;
3828
3806
3807
+ ret = - ENOMEM ;
3829
3808
data = kzalloc (sizeof (* data ), GFP_KERNEL );
3830
3809
if (!data )
3831
3810
goto out_put ;
@@ -3840,23 +3819,20 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
3840
3819
* but if it didn't find any functions it returns zero.
3841
3820
* Consider no functions a failure too.
3842
3821
*/
3843
- if (!ret ) {
3844
- ret = - ENOENT ;
3845
- goto out_disable ;
3846
- } else if (ret < 0 )
3847
- goto out_disable ;
3822
+
3848
3823
/* Just return zero, not the number of enabled functions */
3849
- ret = 0 ;
3850
- out :
3851
- mutex_unlock (& event_mutex );
3852
- return ret ;
3824
+ if (ret > 0 )
3825
+ return 0 ;
3853
3826
3854
- out_disable :
3855
3827
kfree (data );
3828
+
3829
+ if (!ret )
3830
+ ret = - ENOENT ;
3831
+
3856
3832
__ftrace_event_enable_disable (file , 0 , 1 );
3857
3833
out_put :
3858
3834
trace_event_put_ref (file -> event_call );
3859
- goto out ;
3835
+ return ret ;
3860
3836
}
3861
3837
3862
3838
static struct ftrace_func_command event_enable_cmd = {
@@ -4079,20 +4055,17 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
4079
4055
{
4080
4056
int ret ;
4081
4057
4082
- mutex_lock (& event_mutex );
4058
+ guard ( mutex ) (& event_mutex );
4083
4059
4084
4060
ret = create_event_toplevel_files (parent , tr );
4085
4061
if (ret )
4086
- goto out_unlock ;
4062
+ return ret ;
4087
4063
4088
4064
down_write (& trace_event_sem );
4089
4065
__trace_early_add_event_dirs (tr );
4090
4066
up_write (& trace_event_sem );
4091
4067
4092
- out_unlock :
4093
- mutex_unlock (& event_mutex );
4094
-
4095
- return ret ;
4068
+ return 0 ;
4096
4069
}
4097
4070
4098
4071
/* Must be called with event_mutex held */
0 commit comments