@@ -102,7 +102,7 @@ static bool ftrace_pids_enabled(struct ftrace_ops *ops)
102
102
103
103
tr = ops -> private ;
104
104
105
- return tr -> function_pids != NULL ;
105
+ return tr -> function_pids != NULL || tr -> function_no_pids != NULL ;
106
106
}
107
107
108
108
static void ftrace_update_trampoline (struct ftrace_ops * ops );
@@ -6931,10 +6931,12 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
6931
6931
{
6932
6932
struct trace_array * tr = data ;
6933
6933
struct trace_pid_list * pid_list ;
6934
+ struct trace_pid_list * no_pid_list ;
6934
6935
6935
6936
pid_list = rcu_dereference_sched (tr -> function_pids );
6937
+ no_pid_list = rcu_dereference_sched (tr -> function_no_pids );
6936
6938
6937
- if (trace_ignore_this_task (pid_list , next ))
6939
+ if (trace_ignore_this_task (pid_list , no_pid_list , next ))
6938
6940
this_cpu_write (tr -> array_buffer .data -> ftrace_ignore_pid ,
6939
6941
FTRACE_PID_IGNORE );
6940
6942
else
@@ -6952,6 +6954,9 @@ ftrace_pid_follow_sched_process_fork(void *data,
6952
6954
6953
6955
pid_list = rcu_dereference_sched (tr -> function_pids );
6954
6956
trace_filter_add_remove_task (pid_list , self , task );
6957
+
6958
+ pid_list = rcu_dereference_sched (tr -> function_no_pids );
6959
+ trace_filter_add_remove_task (pid_list , self , task );
6955
6960
}
6956
6961
6957
6962
static void
@@ -6962,6 +6967,9 @@ ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
6962
6967
6963
6968
pid_list = rcu_dereference_sched (tr -> function_pids );
6964
6969
trace_filter_add_remove_task (pid_list , NULL , task );
6970
+
6971
+ pid_list = rcu_dereference_sched (tr -> function_no_pids );
6972
+ trace_filter_add_remove_task (pid_list , NULL , task );
6965
6973
}
6966
6974
6967
6975
void ftrace_pid_follow_fork (struct trace_array * tr , bool enable )
@@ -6979,42 +6987,64 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
6979
6987
}
6980
6988
}
6981
6989
6982
- static void clear_ftrace_pids (struct trace_array * tr )
6990
+ enum {
6991
+ TRACE_PIDS = BIT (0 ),
6992
+ TRACE_NO_PIDS = BIT (1 ),
6993
+ };
6994
+
6995
+ static void clear_ftrace_pids (struct trace_array * tr , int type )
6983
6996
{
6984
6997
struct trace_pid_list * pid_list ;
6998
+ struct trace_pid_list * no_pid_list ;
6985
6999
int cpu ;
6986
7000
6987
7001
pid_list = rcu_dereference_protected (tr -> function_pids ,
6988
7002
lockdep_is_held (& ftrace_lock ));
6989
- if (!pid_list )
7003
+ no_pid_list = rcu_dereference_protected (tr -> function_no_pids ,
7004
+ lockdep_is_held (& ftrace_lock ));
7005
+
7006
+ /* Make sure there's something to do */
7007
+ if (!(((type & TRACE_PIDS ) && pid_list ) ||
7008
+ ((type & TRACE_NO_PIDS ) && no_pid_list )))
6990
7009
return ;
6991
7010
6992
- unregister_trace_sched_switch (ftrace_filter_pid_sched_switch_probe , tr );
7011
+ /* See if the pids still need to be checked after this */
7012
+ if (!((!(type & TRACE_PIDS ) && pid_list ) ||
7013
+ (!(type & TRACE_NO_PIDS ) && no_pid_list ))) {
7014
+ unregister_trace_sched_switch (ftrace_filter_pid_sched_switch_probe , tr );
7015
+ for_each_possible_cpu (cpu )
7016
+ per_cpu_ptr (tr -> array_buffer .data , cpu )-> ftrace_ignore_pid = FTRACE_PID_TRACE ;
7017
+ }
6993
7018
6994
- for_each_possible_cpu ( cpu )
6995
- per_cpu_ptr (tr -> array_buffer . data , cpu ) -> ftrace_ignore_pid = FTRACE_PID_TRACE ;
7019
+ if ( type & TRACE_PIDS )
7020
+ rcu_assign_pointer (tr -> function_pids , NULL ) ;
6996
7021
6997
- rcu_assign_pointer (tr -> function_pids , NULL );
7022
+ if (type & TRACE_NO_PIDS )
7023
+ rcu_assign_pointer (tr -> function_no_pids , NULL );
6998
7024
6999
7025
/* Wait till all users are no longer using pid filtering */
7000
7026
synchronize_rcu ();
7001
7027
7002
- trace_free_pid_list (pid_list );
7028
+ if ((type & TRACE_PIDS ) && pid_list )
7029
+ trace_free_pid_list (pid_list );
7030
+
7031
+ if ((type & TRACE_NO_PIDS ) && no_pid_list )
7032
+ trace_free_pid_list (no_pid_list );
7003
7033
}
7004
7034
7005
7035
void ftrace_clear_pids (struct trace_array * tr )
7006
7036
{
7007
7037
mutex_lock (& ftrace_lock );
7008
7038
7009
- clear_ftrace_pids (tr );
7039
+ clear_ftrace_pids (tr , TRACE_PIDS | TRACE_NO_PIDS );
7010
7040
7011
7041
mutex_unlock (& ftrace_lock );
7012
7042
}
7013
7043
7014
- static void ftrace_pid_reset (struct trace_array * tr )
7044
+ static void ftrace_pid_reset (struct trace_array * tr , int type )
7015
7045
{
7016
7046
mutex_lock (& ftrace_lock );
7017
- clear_ftrace_pids (tr );
7047
+ clear_ftrace_pids (tr , type );
7018
7048
7019
7049
ftrace_update_pid_func ();
7020
7050
ftrace_startup_all (0 );
@@ -7078,9 +7108,45 @@ static const struct seq_operations ftrace_pid_sops = {
7078
7108
.show = fpid_show ,
7079
7109
};
7080
7110
7081
- static int
7082
- ftrace_pid_open (struct inode * inode , struct file * file )
7111
+ static void * fnpid_start (struct seq_file * m , loff_t * pos )
7112
+ __acquires (RCU )
7113
+ {
7114
+ struct trace_pid_list * pid_list ;
7115
+ struct trace_array * tr = m -> private ;
7116
+
7117
+ mutex_lock (& ftrace_lock );
7118
+ rcu_read_lock_sched ();
7119
+
7120
+ pid_list = rcu_dereference_sched (tr -> function_no_pids );
7121
+
7122
+ if (!pid_list )
7123
+ return !(* pos ) ? FTRACE_NO_PIDS : NULL ;
7124
+
7125
+ return trace_pid_start (pid_list , pos );
7126
+ }
7127
+
7128
+ static void * fnpid_next (struct seq_file * m , void * v , loff_t * pos )
7083
7129
{
7130
+ struct trace_array * tr = m -> private ;
7131
+ struct trace_pid_list * pid_list = rcu_dereference_sched (tr -> function_no_pids );
7132
+
7133
+ if (v == FTRACE_NO_PIDS ) {
7134
+ (* pos )++ ;
7135
+ return NULL ;
7136
+ }
7137
+ return trace_pid_next (pid_list , v , pos );
7138
+ }
7139
+
7140
+ static const struct seq_operations ftrace_no_pid_sops = {
7141
+ .start = fnpid_start ,
7142
+ .next = fnpid_next ,
7143
+ .stop = fpid_stop ,
7144
+ .show = fpid_show ,
7145
+ };
7146
+
7147
+ static int pid_open (struct inode * inode , struct file * file , int type )
7148
+ {
7149
+ const struct seq_operations * seq_ops ;
7084
7150
struct trace_array * tr = inode -> i_private ;
7085
7151
struct seq_file * m ;
7086
7152
int ret = 0 ;
@@ -7091,9 +7157,18 @@ ftrace_pid_open(struct inode *inode, struct file *file)
7091
7157
7092
7158
if ((file -> f_mode & FMODE_WRITE ) &&
7093
7159
(file -> f_flags & O_TRUNC ))
7094
- ftrace_pid_reset (tr );
7160
+ ftrace_pid_reset (tr , type );
7161
+
7162
+ switch (type ) {
7163
+ case TRACE_PIDS :
7164
+ seq_ops = & ftrace_pid_sops ;
7165
+ break ;
7166
+ case TRACE_NO_PIDS :
7167
+ seq_ops = & ftrace_no_pid_sops ;
7168
+ break ;
7169
+ }
7095
7170
7096
- ret = seq_open (file , & ftrace_pid_sops );
7171
+ ret = seq_open (file , seq_ops );
7097
7172
if (ret < 0 ) {
7098
7173
trace_array_put (tr );
7099
7174
} else {
@@ -7105,19 +7180,34 @@ ftrace_pid_open(struct inode *inode, struct file *file)
7105
7180
return ret ;
7106
7181
}
7107
7182
7183
+ static int
7184
+ ftrace_pid_open (struct inode * inode , struct file * file )
7185
+ {
7186
+ return pid_open (inode , file , TRACE_PIDS );
7187
+ }
7188
+
7189
+ static int
7190
+ ftrace_no_pid_open (struct inode * inode , struct file * file )
7191
+ {
7192
+ return pid_open (inode , file , TRACE_NO_PIDS );
7193
+ }
7194
+
7108
7195
static void ignore_task_cpu (void * data )
7109
7196
{
7110
7197
struct trace_array * tr = data ;
7111
7198
struct trace_pid_list * pid_list ;
7199
+ struct trace_pid_list * no_pid_list ;
7112
7200
7113
7201
/*
7114
7202
* This function is called by on_each_cpu() while the
7115
7203
* event_mutex is held.
7116
7204
*/
7117
7205
pid_list = rcu_dereference_protected (tr -> function_pids ,
7118
7206
mutex_is_locked (& ftrace_lock ));
7207
+ no_pid_list = rcu_dereference_protected (tr -> function_no_pids ,
7208
+ mutex_is_locked (& ftrace_lock ));
7119
7209
7120
- if (trace_ignore_this_task (pid_list , current ))
7210
+ if (trace_ignore_this_task (pid_list , no_pid_list , current ))
7121
7211
this_cpu_write (tr -> array_buffer .data -> ftrace_ignore_pid ,
7122
7212
FTRACE_PID_IGNORE );
7123
7213
else
@@ -7126,12 +7216,13 @@ static void ignore_task_cpu(void *data)
7126
7216
}
7127
7217
7128
7218
static ssize_t
7129
- ftrace_pid_write (struct file * filp , const char __user * ubuf ,
7130
- size_t cnt , loff_t * ppos )
7219
+ pid_write (struct file * filp , const char __user * ubuf ,
7220
+ size_t cnt , loff_t * ppos , int type )
7131
7221
{
7132
7222
struct seq_file * m = filp -> private_data ;
7133
7223
struct trace_array * tr = m -> private ;
7134
- struct trace_pid_list * filtered_pids = NULL ;
7224
+ struct trace_pid_list * filtered_pids ;
7225
+ struct trace_pid_list * other_pids ;
7135
7226
struct trace_pid_list * pid_list ;
7136
7227
ssize_t ret ;
7137
7228
@@ -7140,19 +7231,39 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
7140
7231
7141
7232
mutex_lock (& ftrace_lock );
7142
7233
7143
- filtered_pids = rcu_dereference_protected (tr -> function_pids ,
7234
+ switch (type ) {
7235
+ case TRACE_PIDS :
7236
+ filtered_pids = rcu_dereference_protected (tr -> function_pids ,
7144
7237
lockdep_is_held (& ftrace_lock ));
7238
+ other_pids = rcu_dereference_protected (tr -> function_no_pids ,
7239
+ lockdep_is_held (& ftrace_lock ));
7240
+ break ;
7241
+ case TRACE_NO_PIDS :
7242
+ filtered_pids = rcu_dereference_protected (tr -> function_no_pids ,
7243
+ lockdep_is_held (& ftrace_lock ));
7244
+ other_pids = rcu_dereference_protected (tr -> function_pids ,
7245
+ lockdep_is_held (& ftrace_lock ));
7246
+ break ;
7247
+ }
7145
7248
7146
7249
ret = trace_pid_write (filtered_pids , & pid_list , ubuf , cnt );
7147
7250
if (ret < 0 )
7148
7251
goto out ;
7149
7252
7150
- rcu_assign_pointer (tr -> function_pids , pid_list );
7253
+ switch (type ) {
7254
+ case TRACE_PIDS :
7255
+ rcu_assign_pointer (tr -> function_pids , pid_list );
7256
+ break ;
7257
+ case TRACE_NO_PIDS :
7258
+ rcu_assign_pointer (tr -> function_no_pids , pid_list );
7259
+ break ;
7260
+ }
7261
+
7151
7262
7152
7263
if (filtered_pids ) {
7153
7264
synchronize_rcu ();
7154
7265
trace_free_pid_list (filtered_pids );
7155
- } else if (pid_list ) {
7266
+ } else if (pid_list && ! other_pids ) {
7156
7267
/* Register a probe to set whether to ignore the tracing of a task */
7157
7268
register_trace_sched_switch (ftrace_filter_pid_sched_switch_probe , tr );
7158
7269
}
@@ -7175,6 +7286,20 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
7175
7286
return ret ;
7176
7287
}
7177
7288
7289
+ static ssize_t
7290
+ ftrace_pid_write (struct file * filp , const char __user * ubuf ,
7291
+ size_t cnt , loff_t * ppos )
7292
+ {
7293
+ return pid_write (filp , ubuf , cnt , ppos , TRACE_PIDS );
7294
+ }
7295
+
7296
+ static ssize_t
7297
+ ftrace_no_pid_write (struct file * filp , const char __user * ubuf ,
7298
+ size_t cnt , loff_t * ppos )
7299
+ {
7300
+ return pid_write (filp , ubuf , cnt , ppos , TRACE_NO_PIDS );
7301
+ }
7302
+
7178
7303
static int
7179
7304
ftrace_pid_release (struct inode * inode , struct file * file )
7180
7305
{
@@ -7193,10 +7318,20 @@ static const struct file_operations ftrace_pid_fops = {
7193
7318
.release = ftrace_pid_release ,
7194
7319
};
7195
7320
7321
+ static const struct file_operations ftrace_no_pid_fops = {
7322
+ .open = ftrace_no_pid_open ,
7323
+ .write = ftrace_no_pid_write ,
7324
+ .read = seq_read ,
7325
+ .llseek = tracing_lseek ,
7326
+ .release = ftrace_pid_release ,
7327
+ };
7328
+
7196
7329
void ftrace_init_tracefs (struct trace_array * tr , struct dentry * d_tracer )
7197
7330
{
7198
7331
trace_create_file ("set_ftrace_pid" , 0644 , d_tracer ,
7199
7332
tr , & ftrace_pid_fops );
7333
+ trace_create_file ("set_ftrace_notrace_pid" , 0644 , d_tracer ,
7334
+ tr , & ftrace_no_pid_fops );
7200
7335
}
7201
7336
7202
7337
void __init ftrace_init_tracefs_toplevel (struct trace_array * tr ,
0 commit comments