@@ -2360,133 +2360,91 @@ int is_tracing_stopped(void)
2360
2360
return global_trace .stop_count ;
2361
2361
}
2362
2362
2363
- /**
2364
- * tracing_start - quick start of the tracer
2365
- *
2366
- * If tracing is enabled but was stopped by tracing_stop,
2367
- * this will start the tracer back up.
2368
- */
2369
- void tracing_start (void )
2363
+ static void tracing_start_tr (struct trace_array * tr )
2370
2364
{
2371
2365
struct trace_buffer * buffer ;
2372
2366
unsigned long flags ;
2373
2367
2374
2368
if (tracing_disabled )
2375
2369
return ;
2376
2370
2377
- raw_spin_lock_irqsave (& global_trace . start_lock , flags );
2378
- if (-- global_trace . stop_count ) {
2379
- if (global_trace . stop_count < 0 ) {
2371
+ raw_spin_lock_irqsave (& tr -> start_lock , flags );
2372
+ if (-- tr -> stop_count ) {
2373
+ if (WARN_ON_ONCE ( tr -> stop_count < 0 ) ) {
2380
2374
/* Someone screwed up their debugging */
2381
- WARN_ON_ONCE (1 );
2382
- global_trace .stop_count = 0 ;
2375
+ tr -> stop_count = 0 ;
2383
2376
}
2384
2377
goto out ;
2385
2378
}
2386
2379
2387
2380
/* Prevent the buffers from switching */
2388
- arch_spin_lock (& global_trace . max_lock );
2381
+ arch_spin_lock (& tr -> max_lock );
2389
2382
2390
- buffer = global_trace . array_buffer .buffer ;
2383
+ buffer = tr -> array_buffer .buffer ;
2391
2384
if (buffer )
2392
2385
ring_buffer_record_enable (buffer );
2393
2386
2394
2387
#ifdef CONFIG_TRACER_MAX_TRACE
2395
- buffer = global_trace . max_buffer .buffer ;
2388
+ buffer = tr -> max_buffer .buffer ;
2396
2389
if (buffer )
2397
2390
ring_buffer_record_enable (buffer );
2398
2391
#endif
2399
2392
2400
- arch_spin_unlock (& global_trace .max_lock );
2401
-
2402
- out :
2403
- raw_spin_unlock_irqrestore (& global_trace .start_lock , flags );
2404
- }
2405
-
2406
- static void tracing_start_tr (struct trace_array * tr )
2407
- {
2408
- struct trace_buffer * buffer ;
2409
- unsigned long flags ;
2410
-
2411
- if (tracing_disabled )
2412
- return ;
2413
-
2414
- /* If global, we need to also start the max tracer */
2415
- if (tr -> flags & TRACE_ARRAY_FL_GLOBAL )
2416
- return tracing_start ();
2417
-
2418
- raw_spin_lock_irqsave (& tr -> start_lock , flags );
2419
-
2420
- if (-- tr -> stop_count ) {
2421
- if (tr -> stop_count < 0 ) {
2422
- /* Someone screwed up their debugging */
2423
- WARN_ON_ONCE (1 );
2424
- tr -> stop_count = 0 ;
2425
- }
2426
- goto out ;
2427
- }
2428
-
2429
- buffer = tr -> array_buffer .buffer ;
2430
- if (buffer )
2431
- ring_buffer_record_enable (buffer );
2393
+ arch_spin_unlock (& tr -> max_lock );
2432
2394
2433
2395
out :
2434
2396
raw_spin_unlock_irqrestore (& tr -> start_lock , flags );
2435
2397
}
2436
2398
2437
2399
/**
2438
- * tracing_stop - quick stop of the tracer
2400
+ * tracing_start - quick start of the tracer
2439
2401
*
2440
- * Light weight way to stop tracing. Use in conjunction with
2441
- * tracing_start .
2402
+ * If tracing is enabled but was stopped by tracing_stop,
2403
+ * this will start the tracer back up .
2442
2404
*/
2443
- void tracing_stop (void )
2405
+ void tracing_start (void )
2406
+
2407
+ {
2408
+ return tracing_start_tr (& global_trace );
2409
+ }
2410
+
2411
+ static void tracing_stop_tr (struct trace_array * tr )
2444
2412
{
2445
2413
struct trace_buffer * buffer ;
2446
2414
unsigned long flags ;
2447
2415
2448
- raw_spin_lock_irqsave (& global_trace . start_lock , flags );
2449
- if (global_trace . stop_count ++ )
2416
+ raw_spin_lock_irqsave (& tr -> start_lock , flags );
2417
+ if (tr -> stop_count ++ )
2450
2418
goto out ;
2451
2419
2452
2420
/* Prevent the buffers from switching */
2453
- arch_spin_lock (& global_trace . max_lock );
2421
+ arch_spin_lock (& tr -> max_lock );
2454
2422
2455
- buffer = global_trace . array_buffer .buffer ;
2423
+ buffer = tr -> array_buffer .buffer ;
2456
2424
if (buffer )
2457
2425
ring_buffer_record_disable (buffer );
2458
2426
2459
2427
#ifdef CONFIG_TRACER_MAX_TRACE
2460
- buffer = global_trace . max_buffer .buffer ;
2428
+ buffer = tr -> max_buffer .buffer ;
2461
2429
if (buffer )
2462
2430
ring_buffer_record_disable (buffer );
2463
2431
#endif
2464
2432
2465
- arch_spin_unlock (& global_trace . max_lock );
2433
+ arch_spin_unlock (& tr -> max_lock );
2466
2434
2467
2435
out :
2468
- raw_spin_unlock_irqrestore (& global_trace . start_lock , flags );
2436
+ raw_spin_unlock_irqrestore (& tr -> start_lock , flags );
2469
2437
}
2470
2438
2471
- static void tracing_stop_tr (struct trace_array * tr )
2439
+ /**
2440
+ * tracing_stop - quick stop of the tracer
2441
+ *
2442
+ * Light weight way to stop tracing. Use in conjunction with
2443
+ * tracing_start.
2444
+ */
2445
+ void tracing_stop (void )
2472
2446
{
2473
- struct trace_buffer * buffer ;
2474
- unsigned long flags ;
2475
-
2476
- /* If global, we need to also stop the max tracer */
2477
- if (tr -> flags & TRACE_ARRAY_FL_GLOBAL )
2478
- return tracing_stop ();
2479
-
2480
- raw_spin_lock_irqsave (& tr -> start_lock , flags );
2481
- if (tr -> stop_count ++ )
2482
- goto out ;
2483
-
2484
- buffer = tr -> array_buffer .buffer ;
2485
- if (buffer )
2486
- ring_buffer_record_disable (buffer );
2487
-
2488
- out :
2489
- raw_spin_unlock_irqrestore (& tr -> start_lock , flags );
2447
+ return tracing_stop_tr (& global_trace );
2490
2448
}
2491
2449
2492
2450
static int trace_save_cmdline (struct task_struct * tsk )
0 commit comments