|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ |
| 3 | +#include <test_progs.h> |
| 4 | +#include <string.h> |
| 5 | +#include <stdio.h> |
| 6 | +#include "task_work_stress.skel.h" |
| 7 | +#include <linux/bpf.h> |
| 8 | +#include <linux/perf_event.h> |
| 9 | +#include <sys/syscall.h> |
| 10 | +#include <time.h> |
| 11 | +#include <stdlib.h> |
| 12 | +#include <stdatomic.h> |
| 13 | + |
| 14 | +struct test_data { |
| 15 | + int prog_fd; |
| 16 | + atomic_int exit; |
| 17 | +}; |
| 18 | + |
| 19 | +void *runner(void *test_data) |
| 20 | +{ |
| 21 | + struct test_data *td = test_data; |
| 22 | + int err = 0; |
| 23 | + LIBBPF_OPTS(bpf_test_run_opts, opts); |
| 24 | + |
| 25 | + while (!err && !atomic_load(&td->exit)) |
| 26 | + err = bpf_prog_test_run_opts(td->prog_fd, &opts); |
| 27 | + |
| 28 | + return NULL; |
| 29 | +} |
| 30 | + |
| 31 | +static int get_env_int(const char *str, int def) |
| 32 | +{ |
| 33 | + const char *s = getenv(str); |
| 34 | + char *end; |
| 35 | + int retval; |
| 36 | + |
| 37 | + if (!s || !*s) |
| 38 | + return def; |
| 39 | + errno = 0; |
| 40 | + retval = strtol(s, &end, 10); |
| 41 | + if (errno || *end || retval < 0) |
| 42 | + return def; |
| 43 | + return retval; |
| 44 | +} |
| 45 | + |
| 46 | +static void task_work_run(bool enable_delete) |
| 47 | +{ |
| 48 | + struct task_work_stress *skel; |
| 49 | + struct bpf_program *scheduler, *deleter; |
| 50 | + int nthreads = 16; |
| 51 | + int test_time_s = get_env_int("BPF_TASK_WORK_TEST_TIME", 1); |
| 52 | + pthread_t tid[nthreads], tid_del; |
| 53 | + bool started[nthreads], started_del = false; |
| 54 | + struct test_data td_sched = { .exit = 0 }, td_del = { .exit = 1 }; |
| 55 | + int i, err; |
| 56 | + |
| 57 | + skel = task_work_stress__open(); |
| 58 | + if (!ASSERT_OK_PTR(skel, "task_work__open")) |
| 59 | + return; |
| 60 | + |
| 61 | + scheduler = bpf_object__find_program_by_name(skel->obj, "schedule_task_work"); |
| 62 | + bpf_program__set_autoload(scheduler, true); |
| 63 | + |
| 64 | + deleter = bpf_object__find_program_by_name(skel->obj, "delete_task_work"); |
| 65 | + bpf_program__set_autoload(deleter, true); |
| 66 | + |
| 67 | + err = task_work_stress__load(skel); |
| 68 | + if (!ASSERT_OK(err, "skel_load")) |
| 69 | + goto cleanup; |
| 70 | + |
| 71 | + for (i = 0; i < nthreads; ++i) |
| 72 | + started[i] = false; |
| 73 | + |
| 74 | + td_sched.prog_fd = bpf_program__fd(scheduler); |
| 75 | + for (i = 0; i < nthreads; ++i) { |
| 76 | + if (pthread_create(&tid[i], NULL, runner, &td_sched) != 0) { |
| 77 | + fprintf(stderr, "could not start thread"); |
| 78 | + goto cancel; |
| 79 | + } |
| 80 | + started[i] = true; |
| 81 | + } |
| 82 | + |
| 83 | + if (enable_delete) |
| 84 | + atomic_store(&td_del.exit, 0); |
| 85 | + |
| 86 | + td_del.prog_fd = bpf_program__fd(deleter); |
| 87 | + if (pthread_create(&tid_del, NULL, runner, &td_del) != 0) { |
| 88 | + fprintf(stderr, "could not start thread"); |
| 89 | + goto cancel; |
| 90 | + } |
| 91 | + started_del = true; |
| 92 | + |
| 93 | + /* Run stress test for some time */ |
| 94 | + sleep(test_time_s); |
| 95 | + |
| 96 | +cancel: |
| 97 | + atomic_store(&td_sched.exit, 1); |
| 98 | + atomic_store(&td_del.exit, 1); |
| 99 | + for (i = 0; i < nthreads; ++i) { |
| 100 | + if (started[i]) |
| 101 | + pthread_join(tid[i], NULL); |
| 102 | + } |
| 103 | + |
| 104 | + if (started_del) |
| 105 | + pthread_join(tid_del, NULL); |
| 106 | + |
| 107 | + ASSERT_GT(skel->bss->callback_scheduled, 0, "work scheduled"); |
| 108 | + /* Some scheduling attempts should have failed due to contention */ |
| 109 | + ASSERT_GT(skel->bss->schedule_error, 0, "schedule error"); |
| 110 | + |
| 111 | + if (enable_delete) { |
| 112 | + /* If delete thread is enabled, it has cancelled some callbacks */ |
| 113 | + ASSERT_GT(skel->bss->delete_success, 0, "delete success"); |
| 114 | + ASSERT_LT(skel->bss->callback_success, skel->bss->callback_scheduled, "callbacks"); |
| 115 | + } else { |
| 116 | + /* Without delete thread number of scheduled callbacks is the same as fired */ |
| 117 | + ASSERT_EQ(skel->bss->callback_success, skel->bss->callback_scheduled, "callbacks"); |
| 118 | + } |
| 119 | + |
| 120 | +cleanup: |
| 121 | + task_work_stress__destroy(skel); |
| 122 | +} |
| 123 | + |
| 124 | +void test_task_work_stress(void) |
| 125 | +{ |
| 126 | + if (test__start_subtest("no_delete")) |
| 127 | + task_work_run(false); |
| 128 | + if (test__start_subtest("with_delete")) |
| 129 | + task_work_run(true); |
| 130 | +} |
0 commit comments