Skip to content

Commit adc901a

Browse files
Andy Rossnashif
authored andcommitted
subsys/ztest: Make 1cpu tests run on CPU 0 specifically
Some hardware has "interesting" configuration like asymmetric default interrupt masking (the intel_adsp devices in particular, but x86's IO-APIC driver has tripped over this in the past too) that needs special treatment if you want to run something on "core 1" specifically, and 1cpu test cases pretty much by definition are going to have been written without SMP details in mind. Switch the logic around a tiny bit such that these test cases always run on CPU ID zero explicitly. Actually in practice this was ALMOST guaranteed to be true already, because test setup and happens serially, having been started on the main thread, which starts on CPU 0 by definition. Then the test teardown aborts all the spawned threads that might have been running on CPUs 1+, so those reach idle, and the next test case starts syncronously on the same thread (and thus CPU) where it started. But nonetheless that wasn't actually enforced, and we've found at least one simulation environment where timing conspires to break things. Signed-off-by: Andy Ross <[email protected]>
1 parent 47ddbc2 commit adc901a

File tree

1 file changed

+42
-19
lines changed

1 file changed

+42
-19
lines changed

subsys/testsuite/ztest/src/ztest.c

Lines changed: 42 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -97,26 +97,28 @@ static int cleanup_test(struct unit_test *test)
9797
#endif
9898
#define CPUHOLD_STACK_SZ (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
9999

100-
static struct k_thread cpuhold_threads[NUM_CPUHOLD];
101-
K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, NUM_CPUHOLD, CPUHOLD_STACK_SZ);
100+
#if defined(CONFIG_SMP) && (CONFIG_MP_NUM_CPUS > 1)
101+
static struct k_thread cpuhold_threads[CONFIG_MP_NUM_CPUS];
102+
K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, CONFIG_MP_NUM_CPUS, CPUHOLD_STACK_SZ);
102103
static struct k_sem cpuhold_sem;
104+
atomic_t cpuhold_holding;
103105
volatile int cpuhold_active;
106+
#endif
104107

105108
/* "Holds" a CPU for use with the "1cpu" test cases. Note that we
106109
* can't use tools like the cpumask feature because we have tests that
107110
* may need to control that configuration themselves. We do this at
108111
* the lowest level, but locking interrupts directly and spinning.
109112
*/
110-
static void cpu_hold(void *arg1, void *arg2, void *arg3)
113+
static inline void cpu_hold(void *arg1, void *arg2, void *arg3)
111114
{
112115
ARG_UNUSED(arg1);
113116
ARG_UNUSED(arg2);
114117
ARG_UNUSED(arg3);
118+
#if defined(CONFIG_SMP) && (CONFIG_MP_NUM_CPUS > 1)
115119
unsigned int key = arch_irq_lock();
116120
uint32_t dt, start_ms = k_uptime_get_32();
117121

118-
k_sem_give(&cpuhold_sem);
119-
120122
#if defined(CONFIG_ARM64) && defined(CONFIG_FPU_SHARING)
121123
/*
122124
* We'll be spinning with IRQs disabled. The flush-your-FPU request
@@ -128,6 +130,26 @@ static void cpu_hold(void *arg1, void *arg2, void *arg3)
128130
z_arm64_flush_local_fpu();
129131
#endif
130132

133+
/* One of these threads will end up on cpu 0. Its job is to
134+
* wait for the others to start holding their CPUs, then wake
135+
* up the main test thread and exit, so that the test starts
136+
* running on cpu 0. Note that the spinning here is a
137+
* CPU-local loop, not k_busy_wait(), which tends to involve
138+
* the timer driver and cause performance weirdness in SMP
139+
* situations.
140+
*/
141+
if (arch_curr_cpu()->id == 0) {
142+
while (atomic_get(&cpuhold_holding) < (CONFIG_MP_NUM_CPUS - 1)) {
143+
for (volatile int i = 0; i < 10000; i++) {
144+
}
145+
}
146+
147+
k_sem_give(&cpuhold_sem);
148+
arch_irq_unlock(key);
149+
return;
150+
}
151+
152+
atomic_inc(&cpuhold_holding);
131153
while (cpuhold_active) {
132154
k_busy_wait(1000);
133155
}
@@ -141,24 +163,23 @@ static void cpu_hold(void *arg1, void *arg2, void *arg3)
141163
zassert_true(dt < 3000,
142164
"1cpu test took too long (%d ms)", dt);
143165
arch_irq_unlock(key);
166+
#endif
144167
}
145168

146169
void z_impl_z_test_1cpu_start(void)
147170
{
148-
cpuhold_active = 1;
171+
#if defined(CONFIG_SMP) && (CONFIG_MP_NUM_CPUS > 1)
149172
#ifdef CONFIG_THREAD_NAME
150173
char tname[CONFIG_THREAD_MAX_NAME_LEN];
151174
#endif
175+
cpuhold_active = 1;
152176
k_sem_init(&cpuhold_sem, 0, 999);
177+
atomic_set(&cpuhold_holding, 0);
153178

154-
/* Spawn N-1 threads to "hold" the other CPUs, waiting for
155-
* each to signal us that it's locked and spinning.
156-
*
157-
* Note that NUM_CPUHOLD can be a value that causes coverity
158-
* to flag the following loop as DEADCODE so suppress the warning.
179+
/* Spawn threads to "hold" the other CPUs, waiting for each to
180+
* signal us that it's locked and spinning.
159181
*/
160-
/* coverity[DEADCODE] */
161-
for (int i = 0; i < NUM_CPUHOLD; i++) {
182+
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
162183
k_thread_create(&cpuhold_threads[i],
163184
cpuhold_stacks[i], CPUHOLD_STACK_SZ,
164185
(k_thread_entry_t) cpu_hold, NULL, NULL, NULL,
@@ -167,21 +188,23 @@ void z_impl_z_test_1cpu_start(void)
167188
snprintk(tname, CONFIG_THREAD_MAX_NAME_LEN, "cpuhold%02d", i);
168189
k_thread_name_set(&cpuhold_threads[i], tname);
169190
#endif
170-
k_sem_take(&cpuhold_sem, K_FOREVER);
171191
}
192+
193+
/* Sleep, waiting to be woken up on cpu0 */
194+
k_sem_take(&cpuhold_sem, K_FOREVER);
195+
__ASSERT(arch_curr_cpu()->id == 0, "1cpu case running on wrong cpu");
196+
#endif
172197
}
173198

174199
void z_impl_z_test_1cpu_stop(void)
175200
{
201+
#if defined(CONFIG_SMP) && (CONFIG_MP_NUM_CPUS > 1)
176202
cpuhold_active = 0;
177203

178-
/* Note that NUM_CPUHOLD can be a value that causes coverity
179-
* to flag the following loop as DEADCODE so suppress the warning.
180-
*/
181-
/* coverity[DEADCODE] */
182-
for (int i = 0; i < NUM_CPUHOLD; i++) {
204+
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
183205
k_thread_abort(&cpuhold_threads[i]);
184206
}
207+
#endif
185208
}
186209

187210
#ifdef CONFIG_USERSPACE

0 commit comments

Comments
 (0)