@@ -652,7 +652,7 @@ struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
652
652
void z_unpend_thread (struct k_thread * thread )
653
653
{
654
654
z_unpend_thread_no_timeout (thread );
655
- ( void ) z_abort_thread_timeout (thread );
655
+ z_abort_thread_timeout (thread );
656
656
}
657
657
658
658
/* Priority set utility that does no rescheduling, it just changes the
@@ -1164,7 +1164,7 @@ void z_impl_k_wakeup(k_tid_t thread)
1164
1164
{
1165
1165
SYS_PORT_TRACING_OBJ_FUNC (k_thread , wakeup , thread );
1166
1166
1167
- ( void ) z_abort_thread_timeout (thread );
1167
+ z_abort_thread_timeout (thread );
1168
1168
1169
1169
k_spinlock_key_t key = k_spin_lock (& _sched_spinlock );
1170
1170
@@ -1212,7 +1212,7 @@ static inline void unpend_all(_wait_q_t *wait_q)
1212
1212
1213
1213
for (thread = z_waitq_head (wait_q ); thread != NULL ; thread = z_waitq_head (wait_q )) {
1214
1214
unpend_thread_no_timeout (thread );
1215
- ( void ) z_abort_thread_timeout (thread );
1215
+ z_abort_thread_timeout (thread );
1216
1216
arch_thread_return_value_set (thread , 0 );
1217
1217
ready_thread (thread );
1218
1218
}
@@ -1247,7 +1247,7 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state
1247
1247
if (thread -> base .pended_on != NULL ) {
1248
1248
unpend_thread_no_timeout (thread );
1249
1249
}
1250
- ( void ) z_abort_thread_timeout (thread );
1250
+ z_abort_thread_timeout (thread );
1251
1251
unpend_all (& thread -> join_queue );
1252
1252
1253
1253
/* Edge case: aborting arch_current_thread() from within an
@@ -1458,7 +1458,7 @@ bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
1458
1458
swap_retval ,
1459
1459
swap_data );
1460
1460
unpend_thread_no_timeout (thread );
1461
- ( void ) z_abort_thread_timeout (thread );
1461
+ z_abort_thread_timeout (thread );
1462
1462
ready_thread (thread );
1463
1463
ret = true;
1464
1464
}
0 commit comments