@@ -7161,36 +7161,34 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
7161
7161
{
7162
7162
struct ring_buffer_per_cpu * cpu_buffer ;
7163
7163
unsigned long flags , * subbuf_ids ;
7164
- int err = 0 ;
7164
+ int err ;
7165
7165
7166
7166
if (!cpumask_test_cpu (cpu , buffer -> cpumask ))
7167
7167
return - EINVAL ;
7168
7168
7169
7169
cpu_buffer = buffer -> buffers [cpu ];
7170
7170
7171
- mutex_lock (& cpu_buffer -> mapping_lock );
7171
+ guard ( mutex ) (& cpu_buffer -> mapping_lock );
7172
7172
7173
7173
if (cpu_buffer -> user_mapped ) {
7174
7174
err = __rb_map_vma (cpu_buffer , vma );
7175
7175
if (!err )
7176
7176
err = __rb_inc_dec_mapped (cpu_buffer , true);
7177
- mutex_unlock (& cpu_buffer -> mapping_lock );
7178
7177
return err ;
7179
7178
}
7180
7179
7181
7180
/* prevent another thread from changing buffer/sub-buffer sizes */
7182
- mutex_lock (& buffer -> mutex );
7181
+ guard ( mutex ) (& buffer -> mutex );
7183
7182
7184
7183
err = rb_alloc_meta_page (cpu_buffer );
7185
7184
if (err )
7186
- goto unlock ;
7185
+ return err ;
7187
7186
7188
7187
/* subbuf_ids include the reader while nr_pages does not */
7189
7188
subbuf_ids = kcalloc (cpu_buffer -> nr_pages + 1 , sizeof (* subbuf_ids ), GFP_KERNEL );
7190
7189
if (!subbuf_ids ) {
7191
7190
rb_free_meta_page (cpu_buffer );
7192
- err = - ENOMEM ;
7193
- goto unlock ;
7191
+ return - ENOMEM ;
7194
7192
}
7195
7193
7196
7194
atomic_inc (& cpu_buffer -> resize_disabled );
@@ -7218,35 +7216,29 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
7218
7216
atomic_dec (& cpu_buffer -> resize_disabled );
7219
7217
}
7220
7218
7221
- unlock :
7222
- mutex_unlock (& buffer -> mutex );
7223
- mutex_unlock (& cpu_buffer -> mapping_lock );
7224
-
7225
- return err ;
7219
+ return 0 ;
7226
7220
}
7227
7221
7228
7222
int ring_buffer_unmap (struct trace_buffer * buffer , int cpu )
7229
7223
{
7230
7224
struct ring_buffer_per_cpu * cpu_buffer ;
7231
7225
unsigned long flags ;
7232
- int err = 0 ;
7233
7226
7234
7227
if (!cpumask_test_cpu (cpu , buffer -> cpumask ))
7235
7228
return - EINVAL ;
7236
7229
7237
7230
cpu_buffer = buffer -> buffers [cpu ];
7238
7231
7239
- mutex_lock (& cpu_buffer -> mapping_lock );
7232
+ guard ( mutex ) (& cpu_buffer -> mapping_lock );
7240
7233
7241
7234
if (!cpu_buffer -> user_mapped ) {
7242
- err = - ENODEV ;
7243
- goto out ;
7235
+ return - ENODEV ;
7244
7236
} else if (cpu_buffer -> user_mapped > 1 ) {
7245
7237
__rb_inc_dec_mapped (cpu_buffer , false);
7246
- goto out ;
7238
+ return 0 ;
7247
7239
}
7248
7240
7249
- mutex_lock (& buffer -> mutex );
7241
+ guard ( mutex ) (& buffer -> mutex );
7250
7242
raw_spin_lock_irqsave (& cpu_buffer -> reader_lock , flags );
7251
7243
7252
7244
/* This is the last user space mapping */
@@ -7261,12 +7253,7 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
7261
7253
rb_free_meta_page (cpu_buffer );
7262
7254
atomic_dec (& cpu_buffer -> resize_disabled );
7263
7255
7264
- mutex_unlock (& buffer -> mutex );
7265
-
7266
- out :
7267
- mutex_unlock (& cpu_buffer -> mapping_lock );
7268
-
7269
- return err ;
7256
+ return 0 ;
7270
7257
}
7271
7258
7272
7259
int ring_buffer_map_get_reader (struct trace_buffer * buffer , int cpu )
0 commit comments