diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 0ec1897a2f3e5..b284a0dcc01ca 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -64,6 +64,7 @@ void z_ready_thread(struct k_thread *thread); void z_requeue_current(struct k_thread *curr); struct k_thread *z_swap_next_thread(void); void z_thread_abort(struct k_thread *thread); +void z_thread_wake_joiners(struct k_thread *thread); static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, k_timeout_t timeout) { diff --git a/kernel/sched.c b/kernel/sched.c index 5cda295f73115..3fcdf1842567a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1729,6 +1729,13 @@ static void end_thread(struct k_thread *thread) } } +void z_thread_wake_joiners(struct k_thread *thread) +{ + LOCKED(&sched_spinlock) { + unpend_all(&thread->join_queue); + } +} + void z_thread_abort(struct k_thread *thread) { k_spinlock_key_t key = k_spin_lock(&sched_spinlock); diff --git a/lib/posix/posix_internal.h b/lib/posix/posix_internal.h index 48999c5694ce6..0d9ef9072a28b 100644 --- a/lib/posix/posix_internal.h +++ b/lib/posix/posix_internal.h @@ -8,6 +8,7 @@ #define ZEPHYR_LIB_POSIX_POSIX_INTERNAL_H_ #include +#include /* * Bit used to mark a pthread object as initialized. Initialization status is @@ -31,14 +32,15 @@ enum pthread_state { PTHREAD_DETACHED = PTHREAD_CREATE_DETACHED, /* The thread is running and joinable. */ PTHREAD_JOINABLE = PTHREAD_CREATE_JOINABLE, + /* A joinable thread exited and its return code is available. */ + PTHREAD_EXITED, /* The thread structure is unallocated and available for reuse. */ PTHREAD_TERMINATED, - /* A joinable thread exited and its return code is available. */ - PTHREAD_EXITED }; struct posix_thread { struct k_thread thread; + struct k_spinlock lock; /* List of keys that thread has called pthread_setspecific() on */ sys_slist_t key_list; @@ -49,12 +51,9 @@ struct posix_thread { /* Pthread cancellation */ int cancel_state; int cancel_pending; - struct k_spinlock cancel_lock; /* Pthread State */ enum pthread_state state; - pthread_mutex_t state_lock; - pthread_cond_t state_cond; }; typedef struct pthread_key_obj { diff --git a/lib/posix/pthread.c b/lib/posix/pthread.c index ab28b22e1ef7c..102883571daf3 100644 --- a/lib/posix/pthread.c +++ b/lib/posix/pthread.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include "posix_internal.h" @@ -37,22 +38,58 @@ static const struct pthread_attr init_pthread_attrs = { }; static struct posix_thread posix_thread_pool[CONFIG_MAX_PTHREAD_COUNT]; -static struct k_spinlock pthread_pool_lock; +SYS_BITARRAY_DEFINE_STATIC(posix_thread_bitarray, CONFIG_MAX_PTHREAD_COUNT); -pthread_t pthread_self(void) +/* + * We reserve the MSB to mark a pthread_t as initialized (from the + * perspective of the application). With a linear space, this means that + * the theoretical pthread_t range is [0,2147483647]. + */ +BUILD_ASSERT(CONFIG_MAX_PTHREAD_COUNT < PTHREAD_OBJ_MASK_INIT, + "CONFIG_MAX_PTHREAD_COUNT is too high"); + +static inline size_t posix_thread_to_offset(struct posix_thread *t) +{ + return t - posix_thread_pool; +} + +static inline size_t get_posix_thread_idx(pthread_t pth) { - return (struct posix_thread *) - CONTAINER_OF(k_current_get(), struct posix_thread, thread) - - posix_thread_pool; + return mark_pthread_obj_uninitialized(pth); } -struct posix_thread *to_posix_thread(pthread_t pthread) +struct posix_thread *to_posix_thread(pthread_t pth) { - if (pthread >= CONFIG_MAX_PTHREAD_COUNT) { + int actually_initialized; + size_t bit = get_posix_thread_idx(pth); + + /* if the provided thread does not claim to be initialized, its invalid */ + if (!is_pthread_obj_initialized(pth)) { + return NULL; + } + + /* Mask off the MSB to get the actual bit index */ + if (sys_bitarray_test_bit(&posix_thread_bitarray, bit, &actually_initialized) < 0) { + return NULL; + } + + if (actually_initialized == 0) { + /* The thread claims to be initialized but is actually not */ return NULL; } - return &posix_thread_pool[pthread]; + return &posix_thread_pool[bit]; +} + +pthread_t pthread_self(void) +{ + size_t bit; + struct posix_thread *t; + + t = (struct posix_thread *)CONTAINER_OF(k_current_get(), struct posix_thread, thread); + bit = posix_thread_to_offset(t); + + return mark_pthread_obj_initialized(bit); } static bool is_posix_policy_prio_valid(uint32_t priority, int policy) @@ -135,12 +172,38 @@ int pthread_attr_setstack(pthread_attr_t *_attr, void *stackaddr, size_t stacksi return 0; } +FUNC_NORETURN static void zephyr_thread_wrapper(void *arg1, void *arg2, void *arg3) { - void * (*fun_ptr)(void *) = arg3; + void *(*fun_ptr)(void *) = arg3; + + pthread_exit(fun_ptr(arg1)); + CODE_UNREACHABLE; +} + +static bool pthread_attr_is_valid(const struct pthread_attr *attr) +{ + /* + * FIXME: Pthread attribute must be non-null and it provides stack + * pointer and stack size. So even though POSIX 1003.1 spec accepts + * attrib as NULL but zephyr needs it initialized with valid stack. + */ + if (attr == NULL || attr->initialized == 0U || attr->stack == NULL || + attr->stacksize == 0) { + return false; + } - fun_ptr(arg1); - pthread_exit(NULL); + /* require a valid scheduler policy */ + if (!valid_posix_policy(attr->schedpolicy)) { + return false; + } + + /* require a valid detachstate */ + if (!(attr->detachstate == PTHREAD_JOINABLE || attr->detachstate == PTHREAD_DETACHED)) { + return false; + } + + return true; } /** @@ -154,70 +217,39 @@ static void zephyr_thread_wrapper(void *arg1, void *arg2, void *arg3) int pthread_create(pthread_t *newthread, const pthread_attr_t *_attr, void *(*threadroutine)(void *), void *arg) { - int rv; + size_t bit; int32_t prio; k_spinlock_key_t key; - uint32_t pthread_num; - k_spinlock_key_t cancel_key; - pthread_condattr_t cond_attr; - struct posix_thread *thread; + struct posix_thread *t; const struct pthread_attr *attr = (const struct pthread_attr *)_attr; - /* - * FIXME: Pthread attribute must be non-null and it provides stack - * pointer and stack size. So even though POSIX 1003.1 spec accepts - * attrib as NULL but zephyr needs it initialized with valid stack. - */ - if ((attr == NULL) || (attr->initialized == 0U) - || (attr->stack == NULL) || (attr->stacksize == 0)) { + if (!pthread_attr_is_valid(attr)) { return EINVAL; } - key = k_spin_lock(&pthread_pool_lock); - for (pthread_num = 0; - pthread_num < CONFIG_MAX_PTHREAD_COUNT; pthread_num++) { - thread = &posix_thread_pool[pthread_num]; - if (thread->state == PTHREAD_EXITED || thread->state == PTHREAD_TERMINATED) { - thread->state = PTHREAD_JOINABLE; - break; - } - } - k_spin_unlock(&pthread_pool_lock, key); - - if (pthread_num >= CONFIG_MAX_PTHREAD_COUNT) { + if (sys_bitarray_alloc(&posix_thread_bitarray, 1, &bit) < 0) { + /* No threads left to allocate */ return EAGAIN; } - rv = pthread_mutex_init(&thread->state_lock, NULL); - if (rv != 0) { - key = k_spin_lock(&pthread_pool_lock); - thread->state = PTHREAD_EXITED; - k_spin_unlock(&pthread_pool_lock, key); - return rv; - } - + t = &posix_thread_pool[bit]; + key = k_spin_lock(&t->lock); + __ASSERT_NO_MSG(t->state == PTHREAD_TERMINATED); + t->state = attr->detachstate; prio = posix_to_zephyr_priority(attr->priority, attr->schedpolicy); + t->cancel_state = BIT(_PTHREAD_CANCEL_POS) & attr->flags; + t->cancel_pending = 0; + sys_slist_init(&t->key_list); + k_spin_unlock(&t->lock, key); - cancel_key = k_spin_lock(&thread->cancel_lock); - thread->cancel_state = (1 << _PTHREAD_CANCEL_POS) & attr->flags; - thread->cancel_pending = 0; - k_spin_unlock(&thread->cancel_lock, cancel_key); - - pthread_mutex_lock(&thread->state_lock); - thread->state = attr->detachstate; - pthread_mutex_unlock(&thread->state_lock); - - pthread_cond_init(&thread->state_cond, &cond_attr); - sys_slist_init(&thread->key_list); - - *newthread = pthread_num; - k_thread_create(&thread->thread, attr->stack, attr->stacksize, + k_thread_create(&t->thread, attr->stack, attr->stacksize, (k_thread_entry_t)zephyr_thread_wrapper, (void *)arg, NULL, threadroutine, prio, (~K_ESSENTIAL & attr->flags), K_MSEC(attr->delayedstart)); + *newthread = mark_pthread_obj_initialized(bit); + return 0; } - /** * @brief Set cancelability State. * @@ -226,22 +258,22 @@ int pthread_create(pthread_t *newthread, const pthread_attr_t *_attr, int pthread_setcancelstate(int state, int *oldstate) { bool cancel_pending; - k_spinlock_key_t cancel_key; - struct posix_thread *pthread = to_posix_thread(pthread_self()); + k_spinlock_key_t key; + struct posix_thread *t = to_posix_thread(pthread_self()); if (state != PTHREAD_CANCEL_ENABLE && state != PTHREAD_CANCEL_DISABLE) { return EINVAL; } - cancel_key = k_spin_lock(&pthread->cancel_lock); - *oldstate = pthread->cancel_state; - pthread->cancel_state = state; - cancel_pending = pthread->cancel_pending; - k_spin_unlock(&pthread->cancel_lock, cancel_key); + key = k_spin_lock(&t->lock); + *oldstate = t->cancel_state; + t->cancel_state = state; + cancel_pending = t->cancel_pending; + k_spin_unlock(&t->lock, key); if (state == PTHREAD_CANCEL_ENABLE && cancel_pending) { - pthread_exit((void *)PTHREAD_CANCELED); + pthread_exit(INT_TO_POINTER(PTHREAD_CANCELED)); } return 0; @@ -254,34 +286,46 @@ int pthread_setcancelstate(int state, int *oldstate) */ int pthread_cancel(pthread_t pthread) { - struct posix_thread *thread = to_posix_thread(pthread); - int cancel_state; - k_spinlock_key_t cancel_key; + int rv = 0; + k_spinlock_key_t key; + bool should_abort = false; + struct posix_thread *t = to_posix_thread(pthread); + size_t bit = get_posix_thread_idx(pthread); - if ((thread == NULL) || (thread->state == PTHREAD_TERMINATED)) { + if (t == NULL) { return ESRCH; } - cancel_key = k_spin_lock(&thread->cancel_lock); - thread->cancel_pending = 1; - cancel_state = thread->cancel_state; - k_spin_unlock(&thread->cancel_lock, cancel_key); - - if (cancel_state == PTHREAD_CANCEL_ENABLE) { - pthread_mutex_lock(&thread->state_lock); - if (thread->state == PTHREAD_DETACHED) { - thread->state = PTHREAD_TERMINATED; - } else { - thread->retval = PTHREAD_CANCELED; - thread->state = PTHREAD_EXITED; - pthread_cond_broadcast(&thread->state_cond); - } - pthread_mutex_unlock(&thread->state_lock); + key = k_spin_lock(&t->lock); + switch (t->state) { + case PTHREAD_DETACHED: + t->cancel_pending = 1; + t->state = PTHREAD_TERMINATED; + should_abort = true; + break; + case PTHREAD_JOINABLE: + t->retval = PTHREAD_CANCELED; + t->state = PTHREAD_EXITED; + should_abort = true; + break; + case PTHREAD_EXITED: + break; + case PTHREAD_TERMINATED: + rv = ESRCH; + break; + default: + __ASSERT(false, "bad thread state %d", t->state); + rv = ESRCH; + break; + } + k_spin_unlock(&t->lock, key); - k_thread_abort(&thread->thread); + if (should_abort) { + sys_bitarray_free(&posix_thread_bitarray, 1, bit); + k_thread_abort(&t->thread); } - return 0; + return rv; } /** @@ -375,47 +419,68 @@ int pthread_once(pthread_once_t *once, void (*init_func)(void)) * * See IEEE 1003.1 */ +FUNC_NORETURN void pthread_exit(void *retval) { - k_spinlock_key_t cancel_key; - struct posix_thread *self = to_posix_thread(pthread_self()); + sys_snode_t *node_l; + k_spinlock_key_t key; pthread_key_obj *key_obj; + bool should_free = false; + bool should_destruct = false; pthread_thread_data *thread_spec_data; - sys_snode_t *node_l; - - /* Make a thread as cancelable before exiting */ - cancel_key = k_spin_lock(&self->cancel_lock); - if (self->cancel_state == PTHREAD_CANCEL_DISABLE) { - self->cancel_state = PTHREAD_CANCEL_ENABLE; - } - - k_spin_unlock(&self->cancel_lock, cancel_key); - - pthread_mutex_lock(&self->state_lock); - if (self->state == PTHREAD_JOINABLE) { - self->state = PTHREAD_EXITED; - self->retval = retval; - pthread_cond_broadcast(&self->state_cond); - } else { - self->state = PTHREAD_TERMINATED; + struct posix_thread *t = to_posix_thread(pthread_self()); + size_t bit = posix_thread_to_offset(t); + + __ASSERT(t != NULL, "not a valid pthread (%p)", k_current_get()); + __ASSERT(&t->thread == k_current_get(), "mismatch! &self->thread: %p k_current_get(): %p", + &t->thread, k_current_get()); + + key = k_spin_lock(&t->lock); + switch (t->state) { + case PTHREAD_DETACHED: + t->state = PTHREAD_TERMINATED; + should_free = true; + should_destruct = true; + break; + case PTHREAD_JOINABLE: + z_thread_wake_joiners(&t->thread); + t->state = PTHREAD_EXITED; + should_destruct = true; + if (retval) { + t->retval = retval; + } + break; + case PTHREAD_EXITED: + /* fall through */ + case PTHREAD_TERMINATED: + /* fall through */ + default: + __ASSERT(false, "bad thread state %d", t->state); + break; } + k_spin_unlock(&t->lock, key); - SYS_SLIST_FOR_EACH_NODE(&self->key_list, node_l) { - thread_spec_data = (pthread_thread_data *)node_l; - if (thread_spec_data != NULL) { - key_obj = thread_spec_data->key; - if (key_obj->destructor != NULL) { - (key_obj->destructor)(thread_spec_data->spec_data); + if (should_destruct) { + SYS_SLIST_FOR_EACH_NODE(&t->key_list, node_l) { + thread_spec_data = (pthread_thread_data *)node_l; + if (thread_spec_data != NULL) { + key_obj = thread_spec_data->key; + if (key_obj->destructor != NULL) { + (key_obj->destructor)(thread_spec_data->spec_data); + } } } } - pthread_mutex_unlock(&self->state_lock); - pthread_mutex_destroy(&self->state_lock); + if (should_free) { + /* only free detached threads on exit */ + sys_bitarray_free(&posix_thread_bitarray, 1, bit); + } - pthread_cond_destroy(&self->state_cond); + /* caller will *ALWAYS* be aborted */ + k_thread_abort(k_current_get()); - k_thread_abort((k_tid_t)self); + CODE_UNREACHABLE; } /** @@ -423,38 +488,55 @@ void pthread_exit(void *retval) * * See IEEE 1003.1 */ -int pthread_join(pthread_t thread, void **status) +int pthread_join(pthread_t pthread, void **status) { - struct posix_thread *pthread = to_posix_thread(thread); - int ret = 0; + int ret; + k_spinlock_key_t key; + bool should_free = false; + struct posix_thread *t = to_posix_thread(pthread); + size_t bit = get_posix_thread_idx(pthread); - if (thread == pthread_self()) { + if (pthread == pthread_self()) { return EDEADLK; } - if (pthread == NULL) { + if (t == NULL) { return ESRCH; } - pthread_mutex_lock(&pthread->state_lock); - - if (pthread->state == PTHREAD_JOINABLE) { - pthread_cond_wait(&pthread->state_cond, &pthread->state_lock); - } - - if (pthread->state == PTHREAD_EXITED) { + key = k_spin_lock(&t->lock); + switch (t->state) { + case PTHREAD_DETACHED: + ret = EINVAL; + break; + case PTHREAD_JOINABLE: + k_spin_unlock(&t->lock, key); + ret = k_thread_join(&t->thread, K_FOREVER); + __ASSERT(ret == 0, "k_thread_join() failed: %d", ret); + key = k_spin_lock(&t->lock); + /* fall through */ + case PTHREAD_EXITED: + ret = 0; + should_free = true; + t->state = PTHREAD_TERMINATED; if (status != NULL) { - *status = pthread->retval; + *status = t->retval; } - } else if (pthread->state == PTHREAD_DETACHED) { - ret = EINVAL; - } else { + z_thread_wake_joiners(&t->thread); + break; + case PTHREAD_TERMINATED: ret = ESRCH; + break; + default: + __ASSERT(false, "bad pthread state %d", t->state); + ret = ESRCH; + break; } + k_spin_unlock(&t->lock, key); - pthread_mutex_unlock(&pthread->state_lock); - if (pthread->state == PTHREAD_EXITED) { - pthread_mutex_destroy(&pthread->state_lock); + if (should_free) { + /* only free joinable threads on join */ + sys_bitarray_free(&posix_thread_bitarray, 1, bit); } return ret; @@ -465,30 +547,25 @@ int pthread_join(pthread_t thread, void **status) * * See IEEE 1003.1 */ -int pthread_detach(pthread_t thread) +int pthread_detach(pthread_t pthread) { - struct posix_thread *pthread = to_posix_thread(thread); int ret = 0; + bool join = false; + k_spinlock_key_t key; + struct posix_thread *t = to_posix_thread(pthread); - if (pthread == NULL) { + if (t == NULL) { return ESRCH; } - pthread_mutex_lock(&pthread->state_lock); - - switch (pthread->state) { + key = k_spin_lock(&t->lock); + switch (t->state) { case PTHREAD_JOINABLE: - pthread->state = PTHREAD_DETACHED; - /* Broadcast the condition. - * This will make threads waiting to join this thread continue. - */ - pthread_cond_broadcast(&pthread->state_cond); + t->state = PTHREAD_DETACHED; + z_thread_wake_joiners(&t->thread); break; case PTHREAD_EXITED: - pthread->state = PTHREAD_TERMINATED; - /* THREAD has already exited. - * Pthread remained to provide exit status. - */ + join = true; break; case PTHREAD_TERMINATED: ret = ESRCH; @@ -497,8 +574,12 @@ int pthread_detach(pthread_t thread) ret = EINVAL; break; } + k_spin_unlock(&t->lock, key); + + if (join) { + pthread_join(pthread, NULL); + } - pthread_mutex_unlock(&pthread->state_lock); return ret; } @@ -665,16 +746,17 @@ int pthread_attr_destroy(pthread_attr_t *_attr) return EINVAL; } -int pthread_setname_np(pthread_t thread, const char *name) +int pthread_setname_np(pthread_t pthread, const char *name) { #ifdef CONFIG_THREAD_NAME k_tid_t kthread; + struct posix_thread *t = to_posix_thread(pthread); - if (thread >= CONFIG_MAX_PTHREAD_COUNT) { + if (t == NULL) { return ESRCH; } - kthread = &posix_thread_pool[thread].thread; + kthread = &posix_thread_pool[pthread].thread; if (name == NULL) { return EINVAL; @@ -682,18 +764,19 @@ int pthread_setname_np(pthread_t thread, const char *name) return k_thread_name_set(kthread, name); #else - ARG_UNUSED(thread); + ARG_UNUSED(pthread); ARG_UNUSED(name); return 0; #endif } -int pthread_getname_np(pthread_t thread, char *name, size_t len) +int pthread_getname_np(pthread_t pthread, char *name, size_t len) { #ifdef CONFIG_THREAD_NAME k_tid_t kthread; + struct posix_thread *t = to_posix_thread(pthread); - if (thread >= CONFIG_MAX_PTHREAD_COUNT) { + if (t == NULL) { return ESRCH; } @@ -702,10 +785,10 @@ int pthread_getname_np(pthread_t thread, char *name, size_t len) } memset(name, '\0', len); - kthread = &posix_thread_pool[thread].thread; + kthread = &posix_thread_pool[pthread].thread; return k_thread_name_copy(kthread, name, len-1); #else - ARG_UNUSED(thread); + ARG_UNUSED(pthread); ARG_UNUSED(name); ARG_UNUSED(len); return 0; @@ -718,7 +801,7 @@ static int posix_thread_pool_init(void) for (i = 0; i < CONFIG_MAX_PTHREAD_COUNT; ++i) { - posix_thread_pool[i].state = PTHREAD_EXITED; + posix_thread_pool[i].state = PTHREAD_TERMINATED; } return 0; diff --git a/tests/posix/common/src/pthread.c b/tests/posix/common/src/pthread.c index 9b53468f9fb38..3c20daf3fbec2 100644 --- a/tests/posix/common/src/pthread.c +++ b/tests/posix/common/src/pthread.c @@ -587,12 +587,15 @@ ZTEST(posix_apis, test_pthread_descriptor_leak) pthread_t pthread1; pthread_attr_t attr; + zassert_ok(pthread_attr_init(&attr)); + zassert_ok(pthread_attr_setstack(&attr, &stack_e[0][0], STACKS)); + /* If we are leaking descriptors, then this loop will never complete */ for (size_t i = 0; i < CONFIG_MAX_PTHREAD_COUNT * 2; ++i) { - zassert_ok(pthread_attr_init(&attr)); - zassert_ok(pthread_attr_setstack(&attr, &stack_e[0][0], STACKS)); zassert_ok(pthread_create(&pthread1, &attr, create_thread1, NULL), "unable to create thread %zu", i); + /* Small delay to prevent joining before the thread has been spawned */ + k_msleep(100); zassert_ok(pthread_join(pthread1, NULL), "unable to join thread %zu", i); } } diff --git a/tests/posix/pthread_pressure/CMakeLists.txt b/tests/posix/pthread_pressure/CMakeLists.txt new file mode 100644 index 0000000000000..efbb1bb00e2fd --- /dev/null +++ b/tests/posix/pthread_pressure/CMakeLists.txt @@ -0,0 +1,10 @@ +# Copyright (c) 2023, Meta +# +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(pthread_pressure) + +FILE(GLOB app_sources src/*.c) +target_sources(app PRIVATE ${app_sources}) diff --git a/tests/posix/pthread_pressure/Kconfig b/tests/posix/pthread_pressure/Kconfig new file mode 100644 index 0000000000000..385e2d39b7f78 --- /dev/null +++ b/tests/posix/pthread_pressure/Kconfig @@ -0,0 +1,58 @@ +# Copyright (c) 2023, Meta +# +# SPDX-License-Identifier: Apache-2.0 + +source "Kconfig.zephyr" + +config TEST_NUM_CPUS + int "Number of CPUs to use in parallel" + range 1 MP_NUM_CPUS + default MP_NUM_CPUS + help + The number of parallel threads to run during the test. The test + thread itself yields so that all cores have some probability of + causing racey behaviour. + +config TEST_DURATION_S + int "Number of seconds to run the test" + range 1 21600 + default 29 + help + Duration for the test, in seconds. The range has a reblatively high + upper bound because we should expect that pthread_create() and + pthread_join() are stable enough to run for an arbitrarily long + period of time without encountering any race conditions. + + Some exceptions apply, notably Qemu SMP targets. + +config TEST_DELAY_US + int "Microseconds to delay between pthread join and create" + default 0 + help + If there is a race condition, a value of zero here should + cause a crash. + +config TEST_STACK_SIZE + int "Size of each thread stack in this test" + default 2048 if !64_BIT + default 4096 if 64_BIT + help + The minimal stack size required to run a no-op thread. + +config TEST_KTHREADS + bool "Test k_threads" + default n + help + Run tests for k_threads + +config TEST_PTHREADS + bool "Test pthreads" + default y + help + Run tests for pthreads + +config TEST_EXTRA_ASSERTIONS + bool "Add extra assertions into the hot path" + help + On Qemu SMP targets, this can potentially lead to "scheduler noise" + leaking in from the host system, which can cause the test to fail. diff --git a/tests/posix/pthread_pressure/prj.conf b/tests/posix/pthread_pressure/prj.conf new file mode 100644 index 0000000000000..d189ad86a6805 --- /dev/null +++ b/tests/posix/pthread_pressure/prj.conf @@ -0,0 +1,3 @@ +CONFIG_ZTEST=y +CONFIG_ZTEST_NEW_API=y +CONFIG_POSIX_API=y diff --git a/tests/posix/pthread_pressure/src/main.c b/tests/posix/pthread_pressure/src/main.c new file mode 100644 index 0000000000000..7e0a78b33fb12 --- /dev/null +++ b/tests/posix/pthread_pressure/src/main.c @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2023, Meta + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include +#include + +#define STACK_SIZE K_THREAD_STACK_LEN(CONFIG_TEST_STACK_SIZE) + +/* update interval for printing stats */ +#if CONFIG_TEST_DURATION_S >= 60 +#define UPDATE_INTERVAL_S 10 +#elif CONFIG_TEST_DURATION_S >= 30 +#define UPDATE_INTERVAL_S 5 +#else +#define UPDATE_INTERVAL_S 1 +#endif + +/* 32 threads is mainly a limitation of find_lsb_set() */ +#define NUM_THREADS MIN(32, MIN(CONFIG_TEST_NUM_CPUS, CONFIG_MAX_PTHREAD_COUNT)) + +typedef int (*create_fn)(int i); +typedef int (*join_fn)(int i); + +static void *setup(void); +static void before(void *fixture); + +/* bitmask of available threads */ +static bool alive[NUM_THREADS]; + +/* array of thread stacks */ +static K_THREAD_STACK_ARRAY_DEFINE(thread_stacks, NUM_THREADS, STACK_SIZE); + +static struct k_thread k_threads[NUM_THREADS]; +static size_t counters[NUM_THREADS]; + +static void print_stats(uint64_t now, uint64_t end) +{ + printk("now (ms): %llu end (ms): %llu\n", now, end); + for (int i = 0; i < NUM_THREADS; ++i) { + printk("Thread %d created and joined %zu times\n", i, counters[i]); + } +} + +static void test_create_join_common(const char *tag, create_fn create, join_fn join) +{ + int i; + __unused int ret; + uint64_t now_ms = k_uptime_get(); + const uint64_t end_ms = now_ms + MSEC_PER_SEC * CONFIG_TEST_DURATION_S; + uint64_t update_ms = now_ms + MSEC_PER_SEC * UPDATE_INTERVAL_S; + + printk("BOARD: %s\n", CONFIG_BOARD); + printk("NUM_THREADS: %u\n", NUM_THREADS); + printk("TEST_NUM_CPUS: %u\n", CONFIG_TEST_NUM_CPUS); + printk("TEST_DURATION_S: %u\n", CONFIG_TEST_DURATION_S); + printk("TEST_DELAY_US: %u\n", CONFIG_TEST_DELAY_US); + + for (i = 0; i < NUM_THREADS; ++i) { + /* spawn thread i */ + ret = create(i); +#ifdef CONFIG_EXTRA_ASSERTIONS + zassert_ok(ret, "%s_create(%d)[%zu] failed: %d", tag, i, counters[i], ret); +#endif + } + + do { +#ifndef CONFIG_SMP + /* allow the test thread to be swapped-out */ + k_yield(); +#endif + + for (i = 0; i < NUM_THREADS; ++i) { + if (alive[i]) { + ret = join(i); +#ifdef CONFIG_EXTRA_ASSERTIONS + zassert_ok(ret, "%s_join(%d)[%zu] failed: %d", tag, i, counters[i], + ret); +#endif + alive[i] = false; + + /* update counter i after each (create,join) pair */ + ++counters[i]; + +#if CONFIG_TEST_DELAY_US > 0 + /* success with 0 delay means we are ~raceless */ + k_busy_wait(CONFIG_TEST_DELAY_US); +#endif + + /* re-spawn thread i */ + ret = create(i); +#ifdef CONFIG_TEST_EXTRA_ASSERTIONS + zassert_ok(ret, "%s_create(%d)[%zu] failed: %d", tag, i, + counters[i], ret); +#endif + } + } + + /* are we there yet? */ + now_ms = k_uptime_get(); + + /* dump some stats periodically */ + if (now_ms > update_ms) { + update_ms += MSEC_PER_SEC * UPDATE_INTERVAL_S; + + /* at this point, we should have seen many context switches */ + for (i = 0; i < NUM_THREADS; ++i) { +#ifdef CONFIG_TEST_EXTRA_ASSERTIONS + zassert_true(counters[i] > 0, "%s %d was never scheduled", tag, i); +#endif + } + + print_stats(now_ms, end_ms); + } + } while (end_ms > now_ms); + + print_stats(now_ms, end_ms); +} + +/* + * Wrappers for k_threads + */ + +static void k_thread_fun(void *arg1, void *arg2, void *arg3) +{ + int i = POINTER_TO_INT(arg1); + + alive[i] = true; +} + +static int k_thread_create_wrapper(int i) +{ + k_thread_create(&k_threads[i], thread_stacks[i], STACK_SIZE, k_thread_fun, + INT_TO_POINTER(i), NULL, NULL, K_HIGHEST_APPLICATION_THREAD_PRIO, 0, + K_NO_WAIT); + + return 0; +} + +static int k_thread_join_wrapper(int i) +{ + return k_thread_join(&k_threads[i], K_FOREVER); +} + +ZTEST(pthread_pressure, test_k_thread_create_join) +{ + if (IS_ENABLED(CONFIG_TEST_KTHREADS)) { + test_create_join_common("k_thread", k_thread_create_wrapper, k_thread_join_wrapper); + } else { + ztest_test_skip(); + } +} + +/* + * Wrappers for pthreads + */ + +static pthread_t pthreads[NUM_THREADS]; +static pthread_attr_t pthread_attrs[NUM_THREADS]; + +static void *pthread_fun(void *arg) +{ + k_thread_fun(arg, NULL, NULL); + return NULL; +} + +static int pthread_create_wrapper(int i) +{ + return pthread_create(&pthreads[i], &pthread_attrs[i], pthread_fun, INT_TO_POINTER(i)); +} + +static int pthread_join_wrapper(int i) +{ + return pthread_join(pthreads[i], NULL); +} + +ZTEST(pthread_pressure, test_pthread_create_join) +{ + if (IS_ENABLED(CONFIG_TEST_PTHREADS)) { + test_create_join_common("pthread", pthread_create_wrapper, pthread_join_wrapper); + } else { + ztest_test_skip(); + } +} + +/* + * Test suite / fixture + */ + +ZTEST_SUITE(pthread_pressure, NULL, setup, before, NULL, NULL); + +static void *setup(void) +{ + if (IS_ENABLED(CONFIG_TEST_PTHREADS)) { + const struct sched_param param = { + .sched_priority = sched_get_priority_max(SCHED_FIFO), + }; + + /* setup pthread stacks */ + for (int i = 0; i < NUM_THREADS; ++i) { + zassert_ok(pthread_attr_init(&pthread_attrs[i])); + zassert_ok(pthread_attr_setstack(&pthread_attrs[i], thread_stacks[i], + STACK_SIZE)); + zassert_ok(pthread_attr_setschedpolicy(&pthread_attrs[i], SCHED_FIFO)); + zassert_ok(pthread_attr_setschedparam(&pthread_attrs[i], ¶m)); + } + } + + return NULL; +} + +static void before(void *fixture) +{ + ARG_UNUSED(before); + + for (int i = 0; i < NUM_THREADS; ++i) { + counters[i] = 0; + } +} diff --git a/tests/posix/pthread_pressure/testcase.yaml b/tests/posix/pthread_pressure/testcase.yaml new file mode 100644 index 0000000000000..05d3c40c3368b --- /dev/null +++ b/tests/posix/pthread_pressure/testcase.yaml @@ -0,0 +1,34 @@ +common: + arch_exclude: + - posix + tags: posix + min_ram: 64 + integration_platforms: + - qemu_riscv64_smp +tests: + portability.posix.pthread_pressure: + extra_configs: + - CONFIG_NEWLIB_LIBC=n + portability.posix.pthread_pressure.newlib: + filter: TOOLCHAIN_HAS_NEWLIB == 1 + extra_configs: + - CONFIG_NEWLIB_LIBC=y + portability.posix.pthread_pressure.tls: + filter: CONFIG_ARCH_HAS_THREAD_LOCAL_STORAGE and + CONFIG_TOOLCHAIN_SUPPORTS_THREAD_LOCAL_STORAGE + extra_configs: + - CONFIG_NEWLIB_LIBC=n + - CONFIG_THREAD_LOCAL_STORAGE=y + - CONFIG_MAIN_STACK_SIZE=4096 + portability.posix.pthread_pressure.tls.newlib: + filter: TOOLCHAIN_HAS_NEWLIB == 1 and CONFIG_ARCH_HAS_THREAD_LOCAL_STORAGE and + CONFIG_TOOLCHAIN_SUPPORTS_THREAD_LOCAL_STORAGE + extra_configs: + - CONFIG_NEWLIB_LIBC=y + - CONFIG_THREAD_LOCAL_STORAGE=y + - CONFIG_MAIN_STACK_SIZE=4096 + portability.posix.pthread_pressure.picolibc: + tags: picolibc + filter: CONFIG_PICOLIBC_SUPPORTED + extra_configs: + - CONFIG_PICOLIBC=y