Skip to content

Commit 8391aa4

Browse files
committed
Merge tag 'trace-v6.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull `lTracing fixes for 6.1-rc3: - Fixed NULL pointer dereference in the ring buffer wait-waiters code for machines that have less CPUs than what nr_cpu_ids returns. The buffer array is of size nr_cpu_ids, but only the online CPUs get initialized. - Fixed use after free call in ftrace_shutdown. - Fix accounting of if a kprobe is enabled - Fix NULL pointer dereference on error path of fprobe rethook_alloc(). - Fix unregistering of fprobe_kprobe_handler - Fix memory leak in kprobe test module * tag 'trace-v6.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: tracing: kprobe: Fix memory leak in test_gen_kprobe/kretprobe_cmd() tracing/fprobe: Fix to check whether fprobe is registered correctly fprobe: Check rethook_alloc() return in rethook initialization kprobe: reverse kp->flags when arm_kprobe failed ftrace: Fix use-after-free for dynamic ftrace_ops ring-buffer: Check for NULL cpu_buffer in ring_buffer_wake_waiters()
2 parents 2f5065a + 66f0919 commit 8391aa4

File tree

5 files changed

+29
-26
lines changed

5 files changed

+29
-26
lines changed

kernel/kprobes.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2429,8 +2429,11 @@ int enable_kprobe(struct kprobe *kp)
24292429
if (!kprobes_all_disarmed && kprobe_disabled(p)) {
24302430
p->flags &= ~KPROBE_FLAG_DISABLED;
24312431
ret = arm_kprobe(p);
2432-
if (ret)
2432+
if (ret) {
24332433
p->flags |= KPROBE_FLAG_DISABLED;
2434+
if (p != kp)
2435+
kp->flags |= KPROBE_FLAG_DISABLED;
2436+
}
24342437
}
24352438
out:
24362439
mutex_unlock(&kprobe_mutex);

kernel/trace/fprobe.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,8 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
141141
return -E2BIG;
142142

143143
fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
144+
if (!fp->rethook)
145+
return -ENOMEM;
144146
for (i = 0; i < size; i++) {
145147
struct fprobe_rethook_node *node;
146148

@@ -301,7 +303,8 @@ int unregister_fprobe(struct fprobe *fp)
301303
{
302304
int ret;
303305

304-
if (!fp || fp->ops.func != fprobe_handler)
306+
if (!fp || (fp->ops.saved_func != fprobe_handler &&
307+
fp->ops.saved_func != fprobe_kprobe_handler))
305308
return -EINVAL;
306309

307310
/*

kernel/trace/ftrace.c

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -3028,18 +3028,8 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
30283028
command |= FTRACE_UPDATE_TRACE_FUNC;
30293029
}
30303030

3031-
if (!command || !ftrace_enabled) {
3032-
/*
3033-
* If these are dynamic or per_cpu ops, they still
3034-
* need their data freed. Since, function tracing is
3035-
* not currently active, we can just free them
3036-
* without synchronizing all CPUs.
3037-
*/
3038-
if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
3039-
goto free_ops;
3040-
3041-
return 0;
3042-
}
3031+
if (!command || !ftrace_enabled)
3032+
goto out;
30433033

30443034
/*
30453035
* If the ops uses a trampoline, then it needs to be
@@ -3076,6 +3066,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
30763066
removed_ops = NULL;
30773067
ops->flags &= ~FTRACE_OPS_FL_REMOVING;
30783068

3069+
out:
30793070
/*
30803071
* Dynamic ops may be freed, we must make sure that all
30813072
* callers are done before leaving this function.
@@ -3103,7 +3094,6 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
31033094
if (IS_ENABLED(CONFIG_PREEMPTION))
31043095
synchronize_rcu_tasks();
31053096

3106-
free_ops:
31073097
ftrace_trampoline_free(ops);
31083098
}
31093099

kernel/trace/kprobe_event_gen_test.c

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -100,20 +100,20 @@ static int __init test_gen_kprobe_cmd(void)
100100
KPROBE_GEN_TEST_FUNC,
101101
KPROBE_GEN_TEST_ARG0, KPROBE_GEN_TEST_ARG1);
102102
if (ret)
103-
goto free;
103+
goto out;
104104

105105
/* Use kprobe_event_add_fields to add the rest of the fields */
106106

107107
ret = kprobe_event_add_fields(&cmd, KPROBE_GEN_TEST_ARG2, KPROBE_GEN_TEST_ARG3);
108108
if (ret)
109-
goto free;
109+
goto out;
110110

111111
/*
112112
* This actually creates the event.
113113
*/
114114
ret = kprobe_event_gen_cmd_end(&cmd);
115115
if (ret)
116-
goto free;
116+
goto out;
117117

118118
/*
119119
* Now get the gen_kprobe_test event file. We need to prevent
@@ -136,13 +136,11 @@ static int __init test_gen_kprobe_cmd(void)
136136
goto delete;
137137
}
138138
out:
139+
kfree(buf);
139140
return ret;
140141
delete:
141142
/* We got an error after creating the event, delete it */
142143
ret = kprobe_event_delete("gen_kprobe_test");
143-
free:
144-
kfree(buf);
145-
146144
goto out;
147145
}
148146

@@ -170,14 +168,14 @@ static int __init test_gen_kretprobe_cmd(void)
170168
KPROBE_GEN_TEST_FUNC,
171169
"$retval");
172170
if (ret)
173-
goto free;
171+
goto out;
174172

175173
/*
176174
* This actually creates the event.
177175
*/
178176
ret = kretprobe_event_gen_cmd_end(&cmd);
179177
if (ret)
180-
goto free;
178+
goto out;
181179

182180
/*
183181
* Now get the gen_kretprobe_test event file. We need to
@@ -201,13 +199,11 @@ static int __init test_gen_kretprobe_cmd(void)
201199
goto delete;
202200
}
203201
out:
202+
kfree(buf);
204203
return ret;
205204
delete:
206205
/* We got an error after creating the event, delete it */
207206
ret = kprobe_event_delete("gen_kretprobe_test");
208-
free:
209-
kfree(buf);
210-
211207
goto out;
212208
}
213209

kernel/trace/ring_buffer.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -937,6 +937,9 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
937937
struct ring_buffer_per_cpu *cpu_buffer;
938938
struct rb_irq_work *rbwork;
939939

940+
if (!buffer)
941+
return;
942+
940943
if (cpu == RING_BUFFER_ALL_CPUS) {
941944

942945
/* Wake up individual ones too. One level recursion */
@@ -945,7 +948,15 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
945948

946949
rbwork = &buffer->irq_work;
947950
} else {
951+
if (WARN_ON_ONCE(!buffer->buffers))
952+
return;
953+
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
954+
return;
955+
948956
cpu_buffer = buffer->buffers[cpu];
957+
/* The CPU buffer may not have been initialized yet */
958+
if (!cpu_buffer)
959+
return;
949960
rbwork = &cpu_buffer->irq_work;
950961
}
951962

0 commit comments

Comments
 (0)