Skip to content

Commit af90e85

Browse files
author
Alexei Starovoitov
committed
Merge branch 's390-bpf-fix-bpf_arch_text_poke-with-new_addr-null-again'
Ilya Leoshkevich says: ==================== s390/bpf: Fix bpf_arch_text_poke() with new_addr == NULL again This series fixes a regression causing perf on s390 to trigger a kernel panic. Patch 1 fixes the issue, patch 2 adds a test to make sure this doesn't happen again. ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
2 parents bf4807c + d459dbb commit af90e85

File tree

2 files changed

+76
-1
lines changed

2 files changed

+76
-1
lines changed

arch/s390/net/bpf_jit_comp.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -566,7 +566,15 @@ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
566566
{
567567
memcpy(plt, &bpf_plt, sizeof(*plt));
568568
plt->ret = ret;
569-
plt->target = target;
569+
/*
570+
* (target == NULL) implies that the branch to this PLT entry was
571+
* patched and became a no-op. However, some CPU could have jumped
572+
* to this PLT entry before patching and may be still executing it.
573+
*
574+
* Since the intention in this case is to make the PLT entry a no-op,
575+
* make the target point to the return label instead of NULL.
576+
*/
577+
plt->target = target ?: ret;
570578
}
571579

572580
/*

tools/testing/selftests/bpf/prog_tests/recursive_attach.c

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -149,3 +149,70 @@ void test_fentry_attach_btf_presence(void)
149149
fentry_recursive_target__destroy(target_skel);
150150
fentry_recursive__destroy(tracing_skel);
151151
}
152+
153+
static void *fentry_target_test_run(void *arg)
154+
{
155+
for (;;) {
156+
int prog_fd = __atomic_load_n((int *)arg, __ATOMIC_SEQ_CST);
157+
LIBBPF_OPTS(bpf_test_run_opts, topts);
158+
int err;
159+
160+
if (prog_fd == -1)
161+
break;
162+
err = bpf_prog_test_run_opts(prog_fd, &topts);
163+
if (!ASSERT_OK(err, "fentry_target test_run"))
164+
break;
165+
}
166+
167+
return NULL;
168+
}
169+
170+
void test_fentry_attach_stress(void)
171+
{
172+
struct fentry_recursive_target *target_skel = NULL;
173+
struct fentry_recursive *tracing_skel = NULL;
174+
struct bpf_program *prog;
175+
int err, i, tgt_prog_fd;
176+
pthread_t thread;
177+
178+
target_skel = fentry_recursive_target__open_and_load();
179+
if (!ASSERT_OK_PTR(target_skel,
180+
"fentry_recursive_target__open_and_load"))
181+
goto close_prog;
182+
tgt_prog_fd = bpf_program__fd(target_skel->progs.fentry_target);
183+
err = pthread_create(&thread, NULL,
184+
fentry_target_test_run, &tgt_prog_fd);
185+
if (!ASSERT_OK(err, "bpf_program__set_attach_target"))
186+
goto close_prog;
187+
188+
for (i = 0; i < 1000; i++) {
189+
tracing_skel = fentry_recursive__open();
190+
if (!ASSERT_OK_PTR(tracing_skel, "fentry_recursive__open"))
191+
goto stop_thread;
192+
193+
prog = tracing_skel->progs.recursive_attach;
194+
err = bpf_program__set_attach_target(prog, tgt_prog_fd,
195+
"fentry_target");
196+
if (!ASSERT_OK(err, "bpf_program__set_attach_target"))
197+
goto stop_thread;
198+
199+
err = fentry_recursive__load(tracing_skel);
200+
if (!ASSERT_OK(err, "fentry_recursive__load"))
201+
goto stop_thread;
202+
203+
err = fentry_recursive__attach(tracing_skel);
204+
if (!ASSERT_OK(err, "fentry_recursive__attach"))
205+
goto stop_thread;
206+
207+
fentry_recursive__destroy(tracing_skel);
208+
tracing_skel = NULL;
209+
}
210+
211+
stop_thread:
212+
__atomic_store_n(&tgt_prog_fd, -1, __ATOMIC_SEQ_CST);
213+
err = pthread_join(thread, NULL);
214+
ASSERT_OK(err, "pthread_join");
215+
close_prog:
216+
fentry_recursive__destroy(tracing_skel);
217+
fentry_recursive_target__destroy(target_skel);
218+
}

0 commit comments

Comments
 (0)