@@ -493,15 +493,17 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_export);
493
493
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
494
494
TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
495
495
TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
496
- TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK)
496
+ TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK | \
497
+ TRACE_ITER_COPY_MARKER)
497
498
498
499
/* trace_options that are only supported by global_trace */
499
500
#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
500
501
TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
501
502
502
503
/* trace_flags that are default zero for instances */
503
504
#define ZEROED_TRACE_FLAGS \
504
- (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK)
505
+ (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK | \
506
+ TRACE_ITER_COPY_MARKER)
505
507
506
508
/*
507
509
* The global_trace is the descriptor that holds the top-level tracing
@@ -513,6 +515,9 @@ static struct trace_array global_trace = {
513
515
514
516
static struct trace_array * printk_trace = & global_trace ;
515
517
518
+ /* List of trace_arrays interested in the top level trace_marker */
519
+ static LIST_HEAD (marker_copies );
520
+
516
521
static __always_inline bool printk_binsafe (struct trace_array * tr )
517
522
{
518
523
/*
@@ -534,6 +539,28 @@ static void update_printk_trace(struct trace_array *tr)
534
539
tr -> trace_flags |= TRACE_ITER_TRACE_PRINTK ;
535
540
}
536
541
542
+ /* Returns true if the status of tr changed */
543
+ static bool update_marker_trace (struct trace_array * tr , int enabled )
544
+ {
545
+ lockdep_assert_held (& event_mutex );
546
+
547
+ if (enabled ) {
548
+ if (!list_empty (& tr -> marker_list ))
549
+ return false;
550
+
551
+ list_add_rcu (& tr -> marker_list , & marker_copies );
552
+ tr -> trace_flags |= TRACE_ITER_COPY_MARKER ;
553
+ return true;
554
+ }
555
+
556
+ if (list_empty (& tr -> marker_list ))
557
+ return false;
558
+
559
+ list_del_init (& tr -> marker_list );
560
+ tr -> trace_flags &= ~TRACE_ITER_COPY_MARKER ;
561
+ return true;
562
+ }
563
+
537
564
void trace_set_ring_buffer_expanded (struct trace_array * tr )
538
565
{
539
566
if (!tr )
@@ -5220,7 +5247,8 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5220
5247
{
5221
5248
if ((mask == TRACE_ITER_RECORD_TGID ) ||
5222
5249
(mask == TRACE_ITER_RECORD_CMD ) ||
5223
- (mask == TRACE_ITER_TRACE_PRINTK ))
5250
+ (mask == TRACE_ITER_TRACE_PRINTK ) ||
5251
+ (mask == TRACE_ITER_COPY_MARKER ))
5224
5252
lockdep_assert_held (& event_mutex );
5225
5253
5226
5254
/* do nothing if flag is already set */
@@ -5251,6 +5279,9 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5251
5279
}
5252
5280
}
5253
5281
5282
+ if (mask == TRACE_ITER_COPY_MARKER )
5283
+ update_marker_trace (tr , enabled );
5284
+
5254
5285
if (enabled )
5255
5286
tr -> trace_flags |= mask ;
5256
5287
else
@@ -7134,11 +7165,9 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
7134
7165
7135
7166
#define TRACE_MARKER_MAX_SIZE 4096
7136
7167
7137
- static ssize_t
7138
- tracing_mark_write (struct file * filp , const char __user * ubuf ,
7139
- size_t cnt , loff_t * fpos )
7168
+ static ssize_t write_marker_to_buffer (struct trace_array * tr , const char __user * ubuf ,
7169
+ size_t cnt , unsigned long ip )
7140
7170
{
7141
- struct trace_array * tr = filp -> private_data ;
7142
7171
struct ring_buffer_event * event ;
7143
7172
enum event_trigger_type tt = ETT_NONE ;
7144
7173
struct trace_buffer * buffer ;
@@ -7152,18 +7181,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
7152
7181
#define FAULTED_STR "<faulted>"
7153
7182
#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7154
7183
7155
- if (tracing_disabled )
7156
- return - EINVAL ;
7157
-
7158
- if (!(tr -> trace_flags & TRACE_ITER_MARKERS ))
7159
- return - EINVAL ;
7160
-
7161
- if ((ssize_t )cnt < 0 )
7162
- return - EINVAL ;
7163
-
7164
- if (cnt > TRACE_MARKER_MAX_SIZE )
7165
- cnt = TRACE_MARKER_MAX_SIZE ;
7166
-
7167
7184
meta_size = sizeof (* entry ) + 2 ; /* add '\0' and possible '\n' */
7168
7185
again :
7169
7186
size = cnt + meta_size ;
@@ -7196,7 +7213,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
7196
7213
}
7197
7214
7198
7215
entry = ring_buffer_event_data (event );
7199
- entry -> ip = _THIS_IP_ ;
7216
+ entry -> ip = ip ;
7200
7217
7201
7218
len = __copy_from_user_inatomic (& entry -> buf , ubuf , cnt );
7202
7219
if (len ) {
@@ -7229,29 +7246,55 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
7229
7246
}
7230
7247
7231
7248
static ssize_t
7232
- tracing_mark_raw_write (struct file * filp , const char __user * ubuf ,
7249
+ tracing_mark_write (struct file * filp , const char __user * ubuf ,
7233
7250
size_t cnt , loff_t * fpos )
7234
7251
{
7235
7252
struct trace_array * tr = filp -> private_data ;
7236
- struct ring_buffer_event * event ;
7237
- struct trace_buffer * buffer ;
7238
- struct raw_data_entry * entry ;
7239
- ssize_t written ;
7240
- int size ;
7241
- int len ;
7242
-
7243
- #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7253
+ ssize_t written = - ENODEV ;
7254
+ unsigned long ip ;
7244
7255
7245
7256
if (tracing_disabled )
7246
7257
return - EINVAL ;
7247
7258
7248
7259
if (!(tr -> trace_flags & TRACE_ITER_MARKERS ))
7249
7260
return - EINVAL ;
7250
7261
7251
- /* The marker must at least have a tag id */
7252
- if (cnt < sizeof (unsigned int ))
7262
+ if ((ssize_t )cnt < 0 )
7253
7263
return - EINVAL ;
7254
7264
7265
+ if (cnt > TRACE_MARKER_MAX_SIZE )
7266
+ cnt = TRACE_MARKER_MAX_SIZE ;
7267
+
7268
+ /* The selftests expect this function to be the IP address */
7269
+ ip = _THIS_IP_ ;
7270
+
7271
+ /* The global trace_marker can go to multiple instances */
7272
+ if (tr == & global_trace ) {
7273
+ guard (rcu )();
7274
+ list_for_each_entry_rcu (tr , & marker_copies , marker_list ) {
7275
+ written = write_marker_to_buffer (tr , ubuf , cnt , ip );
7276
+ if (written < 0 )
7277
+ break ;
7278
+ }
7279
+ } else {
7280
+ written = write_marker_to_buffer (tr , ubuf , cnt , ip );
7281
+ }
7282
+
7283
+ return written ;
7284
+ }
7285
+
7286
+ static ssize_t write_raw_marker_to_buffer (struct trace_array * tr ,
7287
+ const char __user * ubuf , size_t cnt )
7288
+ {
7289
+ struct ring_buffer_event * event ;
7290
+ struct trace_buffer * buffer ;
7291
+ struct raw_data_entry * entry ;
7292
+ ssize_t written ;
7293
+ int size ;
7294
+ int len ;
7295
+
7296
+ #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7297
+
7255
7298
size = sizeof (* entry ) + cnt ;
7256
7299
if (cnt < FAULT_SIZE_ID )
7257
7300
size += FAULT_SIZE_ID - cnt ;
@@ -7282,6 +7325,40 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7282
7325
return written ;
7283
7326
}
7284
7327
7328
+ static ssize_t
7329
+ tracing_mark_raw_write (struct file * filp , const char __user * ubuf ,
7330
+ size_t cnt , loff_t * fpos )
7331
+ {
7332
+ struct trace_array * tr = filp -> private_data ;
7333
+ ssize_t written = - ENODEV ;
7334
+
7335
+ #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7336
+
7337
+ if (tracing_disabled )
7338
+ return - EINVAL ;
7339
+
7340
+ if (!(tr -> trace_flags & TRACE_ITER_MARKERS ))
7341
+ return - EINVAL ;
7342
+
7343
+ /* The marker must at least have a tag id */
7344
+ if (cnt < sizeof (unsigned int ))
7345
+ return - EINVAL ;
7346
+
7347
+ /* The global trace_marker_raw can go to multiple instances */
7348
+ if (tr == & global_trace ) {
7349
+ guard (rcu )();
7350
+ list_for_each_entry_rcu (tr , & marker_copies , marker_list ) {
7351
+ written = write_raw_marker_to_buffer (tr , ubuf , cnt );
7352
+ if (written < 0 )
7353
+ break ;
7354
+ }
7355
+ } else {
7356
+ written = write_raw_marker_to_buffer (tr , ubuf , cnt );
7357
+ }
7358
+
7359
+ return written ;
7360
+ }
7361
+
7285
7362
static int tracing_clock_show (struct seq_file * m , void * v )
7286
7363
{
7287
7364
struct trace_array * tr = m -> private ;
@@ -9775,6 +9852,7 @@ trace_array_create_systems(const char *name, const char *systems,
9775
9852
INIT_LIST_HEAD (& tr -> events );
9776
9853
INIT_LIST_HEAD (& tr -> hist_vars );
9777
9854
INIT_LIST_HEAD (& tr -> err_log );
9855
+ INIT_LIST_HEAD (& tr -> marker_list );
9778
9856
9779
9857
#ifdef CONFIG_MODULES
9780
9858
INIT_LIST_HEAD (& tr -> mod_events );
@@ -9934,6 +10012,9 @@ static int __remove_instance(struct trace_array *tr)
9934
10012
if (printk_trace == tr )
9935
10013
update_printk_trace (& global_trace );
9936
10014
10015
+ if (update_marker_trace (tr , 0 ))
10016
+ synchronize_rcu ();
10017
+
9937
10018
tracing_set_nop (tr );
9938
10019
clear_ftrace_function_probes (tr );
9939
10020
event_trace_del_tracer (tr );
@@ -10999,6 +11080,7 @@ __init static int tracer_alloc_buffers(void)
10999
11080
INIT_LIST_HEAD (& global_trace .events );
11000
11081
INIT_LIST_HEAD (& global_trace .hist_vars );
11001
11082
INIT_LIST_HEAD (& global_trace .err_log );
11083
+ list_add (& global_trace .marker_list , & marker_copies );
11002
11084
list_add (& global_trace .list , & ftrace_trace_arrays );
11003
11085
11004
11086
apply_trace_boot_options ();
0 commit comments