8
8
#include <linux/ftrace.h>
9
9
#include <linux/uaccess.h>
10
10
#include <linux/memory.h>
11
+ #include <linux/stop_machine.h>
11
12
#include <asm/cacheflush.h>
12
13
#include <asm/patch.h>
13
14
@@ -75,8 +76,7 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
75
76
make_call_t0 (hook_pos , target , call );
76
77
77
78
/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
78
- if (patch_text_nosync
79
- ((void * )hook_pos , enable ? call : nops , MCOUNT_INSN_SIZE ))
79
+ if (patch_insn_write ((void * )hook_pos , enable ? call : nops , MCOUNT_INSN_SIZE ))
80
80
return - EPERM ;
81
81
82
82
return 0 ;
@@ -88,7 +88,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
88
88
89
89
make_call_t0 (rec -> ip , addr , call );
90
90
91
- if (patch_text_nosync ((void * )rec -> ip , call , MCOUNT_INSN_SIZE ))
91
+ if (patch_insn_write ((void * )rec -> ip , call , MCOUNT_INSN_SIZE ))
92
92
return - EPERM ;
93
93
94
94
return 0 ;
@@ -99,7 +99,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
99
99
{
100
100
unsigned int nops [2 ] = {NOP4 , NOP4 };
101
101
102
- if (patch_text_nosync ((void * )rec -> ip , nops , MCOUNT_INSN_SIZE ))
102
+ if (patch_insn_write ((void * )rec -> ip , nops , MCOUNT_INSN_SIZE ))
103
103
return - EPERM ;
104
104
105
105
return 0 ;
@@ -134,6 +134,42 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
134
134
135
135
return ret ;
136
136
}
137
+
138
+ struct ftrace_modify_param {
139
+ int command ;
140
+ atomic_t cpu_count ;
141
+ };
142
+
143
+ static int __ftrace_modify_code (void * data )
144
+ {
145
+ struct ftrace_modify_param * param = data ;
146
+
147
+ if (atomic_inc_return (& param -> cpu_count ) == num_online_cpus ()) {
148
+ ftrace_modify_all_code (param -> command );
149
+ /*
150
+ * Make sure the patching store is effective *before* we
151
+ * increment the counter which releases all waiting CPUs
152
+ * by using the release variant of atomic increment. The
153
+ * release pairs with the call to local_flush_icache_all()
154
+ * on the waiting CPU.
155
+ */
156
+ atomic_inc_return_release (& param -> cpu_count );
157
+ } else {
158
+ while (atomic_read (& param -> cpu_count ) <= num_online_cpus ())
159
+ cpu_relax ();
160
+ }
161
+
162
+ local_flush_icache_all ();
163
+
164
+ return 0 ;
165
+ }
166
+
167
+ void arch_ftrace_update_code (int command )
168
+ {
169
+ struct ftrace_modify_param param = { command , ATOMIC_INIT (0 ) };
170
+
171
+ stop_machine (__ftrace_modify_code , & param , cpu_online_mask );
172
+ }
137
173
#endif
138
174
139
175
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
0 commit comments