Skip to content

Commit c5cf46a

Browse files
Andy Rossnashif
authored andcommitted
subsys/ztest: Revert "Make 1cpu tests run on CPU 0 specifically"
This change introduced some weird failures on x86 that will take some time to untangle, and wasn't a particularly important feature to merge. Revert for now. This reverts commit adc901a. Signed-off-by: Andy Ross <[email protected]>
1 parent a10cb58 commit c5cf46a

File tree

1 file changed

+19
-42
lines changed

1 file changed

+19
-42
lines changed

subsys/testsuite/ztest/src/ztest.c

Lines changed: 19 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -97,28 +97,26 @@ static int cleanup_test(struct unit_test *test)
9797
#endif
9898
#define CPUHOLD_STACK_SZ (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
9999

100-
#if defined(CONFIG_SMP) && (CONFIG_MP_NUM_CPUS > 1)
101-
static struct k_thread cpuhold_threads[CONFIG_MP_NUM_CPUS];
102-
K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, CONFIG_MP_NUM_CPUS, CPUHOLD_STACK_SZ);
100+
static struct k_thread cpuhold_threads[NUM_CPUHOLD];
101+
K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, NUM_CPUHOLD, CPUHOLD_STACK_SZ);
103102
static struct k_sem cpuhold_sem;
104-
atomic_t cpuhold_holding;
105103
volatile int cpuhold_active;
106-
#endif
107104

108105
/* "Holds" a CPU for use with the "1cpu" test cases. Note that we
109106
* can't use tools like the cpumask feature because we have tests that
110107
* may need to control that configuration themselves. We do this at
111108
* the lowest level, but locking interrupts directly and spinning.
112109
*/
113-
static inline void cpu_hold(void *arg1, void *arg2, void *arg3)
110+
static void cpu_hold(void *arg1, void *arg2, void *arg3)
114111
{
115112
ARG_UNUSED(arg1);
116113
ARG_UNUSED(arg2);
117114
ARG_UNUSED(arg3);
118-
#if defined(CONFIG_SMP) && (CONFIG_MP_NUM_CPUS > 1)
119115
unsigned int key = arch_irq_lock();
120116
uint32_t dt, start_ms = k_uptime_get_32();
121117

118+
k_sem_give(&cpuhold_sem);
119+
122120
#if defined(CONFIG_ARM64) && defined(CONFIG_FPU_SHARING)
123121
/*
124122
* We'll be spinning with IRQs disabled. The flush-your-FPU request
@@ -130,26 +128,6 @@ static inline void cpu_hold(void *arg1, void *arg2, void *arg3)
130128
z_arm64_flush_local_fpu();
131129
#endif
132130

133-
/* One of these threads will end up on cpu 0. Its job is to
134-
* wait for the others to start holding their CPUs, then wake
135-
* up the main test thread and exit, so that the test starts
136-
* running on cpu 0. Note that the spinning here is a
137-
* CPU-local loop, not k_busy_wait(), which tends to involve
138-
* the timer driver and cause performance weirdness in SMP
139-
* situations.
140-
*/
141-
if (arch_curr_cpu()->id == 0) {
142-
while (atomic_get(&cpuhold_holding) < (CONFIG_MP_NUM_CPUS - 1)) {
143-
for (volatile int i = 0; i < 10000; i++) {
144-
}
145-
}
146-
147-
k_sem_give(&cpuhold_sem);
148-
arch_irq_unlock(key);
149-
return;
150-
}
151-
152-
atomic_inc(&cpuhold_holding);
153131
while (cpuhold_active) {
154132
k_busy_wait(1000);
155133
}
@@ -163,23 +141,24 @@ static inline void cpu_hold(void *arg1, void *arg2, void *arg3)
163141
zassert_true(dt < 3000,
164142
"1cpu test took too long (%d ms)", dt);
165143
arch_irq_unlock(key);
166-
#endif
167144
}
168145

169146
void z_impl_z_test_1cpu_start(void)
170147
{
171-
#if defined(CONFIG_SMP) && (CONFIG_MP_NUM_CPUS > 1)
148+
cpuhold_active = 1;
172149
#ifdef CONFIG_THREAD_NAME
173150
char tname[CONFIG_THREAD_MAX_NAME_LEN];
174151
#endif
175-
cpuhold_active = 1;
176152
k_sem_init(&cpuhold_sem, 0, 999);
177-
atomic_set(&cpuhold_holding, 0);
178153

179-
/* Spawn threads to "hold" the other CPUs, waiting for each to
180-
* signal us that it's locked and spinning.
154+
/* Spawn N-1 threads to "hold" the other CPUs, waiting for
155+
* each to signal us that it's locked and spinning.
156+
*
157+
* Note that NUM_CPUHOLD can be a value that causes coverity
158+
* to flag the following loop as DEADCODE so suppress the warning.
181159
*/
182-
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
160+
/* coverity[DEADCODE] */
161+
for (int i = 0; i < NUM_CPUHOLD; i++) {
183162
k_thread_create(&cpuhold_threads[i],
184163
cpuhold_stacks[i], CPUHOLD_STACK_SZ,
185164
(k_thread_entry_t) cpu_hold, NULL, NULL, NULL,
@@ -188,23 +167,21 @@ void z_impl_z_test_1cpu_start(void)
188167
snprintk(tname, CONFIG_THREAD_MAX_NAME_LEN, "cpuhold%02d", i);
189168
k_thread_name_set(&cpuhold_threads[i], tname);
190169
#endif
170+
k_sem_take(&cpuhold_sem, K_FOREVER);
191171
}
192-
193-
/* Sleep, waiting to be woken up on cpu0 */
194-
k_sem_take(&cpuhold_sem, K_FOREVER);
195-
__ASSERT(arch_curr_cpu()->id == 0, "1cpu case running on wrong cpu");
196-
#endif
197172
}
198173

199174
void z_impl_z_test_1cpu_stop(void)
200175
{
201-
#if defined(CONFIG_SMP) && (CONFIG_MP_NUM_CPUS > 1)
202176
cpuhold_active = 0;
203177

204-
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
178+
/* Note that NUM_CPUHOLD can be a value that causes coverity
179+
* to flag the following loop as DEADCODE so suppress the warning.
180+
*/
181+
/* coverity[DEADCODE] */
182+
for (int i = 0; i < NUM_CPUHOLD; i++) {
205183
k_thread_abort(&cpuhold_threads[i]);
206184
}
207-
#endif
208185
}
209186

210187
#ifdef CONFIG_USERSPACE

0 commit comments

Comments
 (0)