@@ -71,7 +71,7 @@ static void (*rt_scheduler_switch_hook)(struct rt_thread *tid);
7171 * @param hook is the hook function.
7272 */
7373void rt_scheduler_sethook (void (* hook )(struct rt_thread * from ,
74- struct rt_thread * to ))
74+ struct rt_thread * to ))
7575{
7676 rt_scheduler_hook = hook ;
7777}
@@ -106,8 +106,7 @@ rt_inline void _scheduler_update_highest_priority(void)
106106rt_inline struct rt_thread * _scheduler_get_priority_thread (rt_ubase_t priority )
107107{
108108 /* get highest ready priority thread */
109- return RT_THREAD_LIST_NODE_ENTRY (
110- rt_thread_priority_table [priority ].next );
109+ return RT_THREAD_LIST_NODE_ENTRY (rt_thread_priority_table [priority ].next );
111110}
112111
113112rt_err_t rt_sched_lock (rt_sched_lock_level_t * plvl )
@@ -172,8 +171,8 @@ void rt_system_scheduler_start(void)
172171 struct rt_thread * to_thread ;
173172
174173 _scheduler_update_highest_priority ();
175- to_thread = _scheduler_get_priority_thread (
176- rt_thread_ready_highest_priority );
174+ to_thread =
175+ _scheduler_get_priority_thread ( rt_thread_ready_highest_priority );
177176
178177 rt_cpu_self ()-> current_thread = to_thread ;
179178
@@ -200,26 +199,23 @@ rt_inline void _rt_sched_insert_thread(struct rt_thread *thread)
200199{
201200 /* READY thread, insert to ready queue */
202201 RT_SCHED_CTX (thread ).stat =
203- RT_THREAD_READY |
204- (RT_SCHED_CTX (thread ).stat & ~RT_THREAD_STAT_MASK );
202+ RT_THREAD_READY | (RT_SCHED_CTX (thread ).stat & ~RT_THREAD_STAT_MASK );
205203 /* there is no time slices left(YIELD), inserting thread before ready list*/
206204 if ((RT_SCHED_CTX (thread ).stat & RT_THREAD_STAT_YIELD_MASK ) != 0 ) {
207205 rt_list_insert_before (
208- & (rt_thread_priority_table [RT_SCHED_PRIV (thread )
209- .current_priority ]),
206+ & (rt_thread_priority_table [RT_SCHED_PRIV (thread ).current_priority ]),
210207 & RT_THREAD_LIST_NODE (thread ));
211208 }
212209 /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
213210 else {
214211 rt_list_insert_after (
215- & (rt_thread_priority_table [RT_SCHED_PRIV (thread )
216- .current_priority ]),
212+ & (rt_thread_priority_table [RT_SCHED_PRIV (thread ).current_priority ]),
217213 & RT_THREAD_LIST_NODE (thread ));
218214 }
219215
220216 LOG_D ("insert thread[%.*s], the priority: %d" , RT_NAME_MAX ,
221- thread -> parent .name ,
222- RT_SCHED_PRIV (rt_current_thread ).current_priority );
217+ thread -> parent .name ,
218+ RT_SCHED_PRIV (rt_current_thread ).current_priority );
223219
224220 /* set priority mask */
225221#if RT_THREAD_PRIORITY_MAX > 32
@@ -239,14 +235,13 @@ rt_inline void _rt_sched_insert_thread(struct rt_thread *thread)
239235rt_inline void _rt_sched_remove_thread (struct rt_thread * thread )
240236{
241237 LOG_D ("remove thread[%.*s], the priority: %d" , RT_NAME_MAX ,
242- thread -> parent .name ,
243- RT_SCHED_PRIV (rt_current_thread ).current_priority );
238+ thread -> parent .name ,
239+ RT_SCHED_PRIV (rt_current_thread ).current_priority );
244240
245241 /* remove thread from ready list */
246242 rt_list_remove (& RT_THREAD_LIST_NODE (thread ));
247- if (rt_list_isempty (
248- & (rt_thread_priority_table [RT_SCHED_PRIV (thread )
249- .current_priority ]))) {
243+ if (rt_list_isempty (& (
244+ rt_thread_priority_table [RT_SCHED_PRIV (thread ).current_priority ]))) {
250245#if RT_THREAD_PRIORITY_MAX > 32
251246 rt_thread_ready_table [RT_SCHED_PRIV (thread ).number ] &=
252247 ~RT_SCHED_PRIV (thread ).high_mask ;
@@ -255,8 +250,7 @@ rt_inline void _rt_sched_remove_thread(struct rt_thread *thread)
255250 ~RT_SCHED_PRIV (thread ).number_mask ;
256251 }
257252#else
258- rt_thread_ready_priority_group &=
259- ~RT_SCHED_PRIV (thread ).number_mask ;
253+ rt_thread_ready_priority_group &= ~RT_SCHED_PRIV (thread ).number_mask ;
260254#endif /* RT_THREAD_PRIORITY_MAX > 32 */
261255 }
262256}
@@ -290,14 +284,14 @@ void rt_schedule(void)
290284 curr_thread = rt_thread_self ();
291285
292286 if ((RT_SCHED_CTX (curr_thread ).stat & RT_THREAD_STAT_MASK ) ==
293- RT_THREAD_RUNNING ) {
287+ RT_THREAD_RUNNING ) {
294288 if (RT_SCHED_PRIV (curr_thread ).current_priority <
295- rt_thread_ready_highest_priority ) {
289+ rt_thread_ready_highest_priority ) {
296290 to_thread = curr_thread ;
297291 } else if (RT_SCHED_PRIV (curr_thread ).current_priority ==
298- rt_thread_ready_highest_priority &&
299- (RT_SCHED_CTX (curr_thread ).stat &
300- RT_THREAD_STAT_YIELD_MASK ) == 0 ) {
292+ rt_thread_ready_highest_priority &&
293+ (RT_SCHED_CTX (curr_thread ).stat &
294+ RT_THREAD_STAT_YIELD_MASK ) == 0 ) {
301295 to_thread = curr_thread ;
302296 } else {
303297 to_thread = _scheduler_get_priority_thread (
@@ -311,54 +305,46 @@ void rt_schedule(void)
311305
312306 if (to_thread != curr_thread ) {
313307 /* if the destination thread is not the same as current thread */
314- rt_current_priority =
315- (rt_uint8_t )rt_thread_ready_highest_priority ;
308+ rt_current_priority = (rt_uint8_t )rt_thread_ready_highest_priority ;
316309 from_thread = curr_thread ;
317310 rt_cpu_self ()-> current_thread = to_thread ;
318311
319- RT_OBJECT_HOOK_CALL (rt_scheduler_hook ,
320- (from_thread , to_thread ));
312+ RT_OBJECT_HOOK_CALL (rt_scheduler_hook , (from_thread , to_thread ));
321313
322314 if (need_insert_from_thread ) {
323315 _rt_sched_remove_thread (from_thread );
324316 _rt_sched_insert_thread (from_thread );
325317 }
326318
327- if ((RT_SCHED_CTX (from_thread ).stat &
328- RT_THREAD_STAT_YIELD_MASK ) != 0 ) {
329- RT_SCHED_CTX (from_thread ).stat &=
330- ~RT_THREAD_STAT_YIELD_MASK ;
319+ if ((RT_SCHED_CTX (from_thread ).stat & RT_THREAD_STAT_YIELD_MASK ) !=
320+ 0 ) {
321+ RT_SCHED_CTX (from_thread ).stat &= ~RT_THREAD_STAT_YIELD_MASK ;
331322 }
332323
333324 _rt_sched_remove_thread (to_thread );
334325 RT_SCHED_CTX (to_thread ).stat =
335326 RT_THREAD_RUNNING |
336- (RT_SCHED_CTX (to_thread ).stat &
337- ~RT_THREAD_STAT_MASK );
327+ (RT_SCHED_CTX (to_thread ).stat & ~RT_THREAD_STAT_MASK );
338328
339329 _scheduler_update_highest_priority ();
340330
341331 /* switch to new thread */
342332 LOG_D ("[%d]switch to priority#%d "
343- "thread:%.*s(sp:0x%08x), "
344- "from thread:%.*s(sp: 0x%08x)" ,
345- rt_interrupt_nest , highest_ready_priority ,
346- RT_NAME_MAX , to_thread -> parent .name ,
347- to_thread -> sp , RT_NAME_MAX ,
348- from_thread -> parent .name , from_thread -> sp );
333+ "thread:%.*s(sp:0x%08x), "
334+ "from thread:%.*s(sp: 0x%08x)" ,
335+ rt_interrupt_nest , highest_ready_priority , RT_NAME_MAX ,
336+ to_thread -> parent .name , to_thread -> sp , RT_NAME_MAX ,
337+ from_thread -> parent .name , from_thread -> sp );
349338
350339 RT_SCHEDULER_STACK_CHECK (to_thread );
351340
352341 if (rt_interrupt_nest == 0 ) {
353- extern void rt_thread_handle_sig (
354- rt_bool_t clean_state );
342+ extern void rt_thread_handle_sig (rt_bool_t clean_state );
355343
356- RT_OBJECT_HOOK_CALL (rt_scheduler_switch_hook ,
357- (from_thread ));
344+ RT_OBJECT_HOOK_CALL (rt_scheduler_switch_hook , (from_thread ));
358345
359- rt_hw_context_switch (
360- (rt_uintptr_t )& from_thread -> sp ,
361- (rt_uintptr_t )& to_thread -> sp );
346+ rt_hw_context_switch ((rt_uintptr_t )& from_thread -> sp ,
347+ (rt_uintptr_t )& to_thread -> sp );
362348
363349 /* enable interrupt */
364350 rt_hw_interrupt_enable (level );
@@ -367,9 +353,8 @@ void rt_schedule(void)
367353 /* check stat of thread for signal */
368354 level = rt_hw_interrupt_disable ();
369355 if (RT_SCHED_CTX (curr_thread ).stat &
370- RT_THREAD_STAT_SIGNAL_PENDING ) {
371- extern void rt_thread_handle_sig (
372- rt_bool_t clean_state );
356+ RT_THREAD_STAT_SIGNAL_PENDING ) {
357+ extern void rt_thread_handle_sig (rt_bool_t clean_state );
373358
374359 RT_SCHED_CTX (curr_thread ).stat &=
375360 ~RT_THREAD_STAT_SIGNAL_PENDING ;
@@ -386,10 +371,9 @@ void rt_schedule(void)
386371 } else {
387372 LOG_D ("switch in interrupt" );
388373
389- rt_hw_context_switch_interrupt (
390- (rt_uintptr_t )& from_thread -> sp ,
391- (rt_uintptr_t )& to_thread -> sp ,
392- from_thread , to_thread );
374+ rt_hw_context_switch_interrupt ((rt_uintptr_t )& from_thread -> sp ,
375+ (rt_uintptr_t )& to_thread -> sp ,
376+ from_thread , to_thread );
393377 }
394378 }
395379 }
@@ -404,11 +388,10 @@ void rt_sched_thread_startup(struct rt_thread *thread)
404388{
405389#if RT_THREAD_PRIORITY_MAX > 32
406390 RT_SCHED_PRIV (thread ).number = RT_SCHED_PRIV (thread ).current_priority >>
407- 3 ; /* 5bit */
391+ 3 ; /* 5bit */
408392 RT_SCHED_PRIV (thread ).number_mask = 1L << RT_SCHED_PRIV (thread ).number ;
409393 RT_SCHED_PRIV (thread ).high_mask =
410- 1L
411- << (RT_SCHED_PRIV (thread ).current_priority & 0x07 ); /* 3bit */
394+ 1L << (RT_SCHED_PRIV (thread ).current_priority & 0x07 ); /* 3bit */
412395#else
413396 RT_SCHED_PRIV (thread ).number_mask =
414397 1L << RT_SCHED_PRIV (thread ).current_priority ;
@@ -432,7 +415,7 @@ void rt_sched_thread_startup(struct rt_thread *thread)
432415 * - Sets initial and remaining time slice ticks
433416 */
434417void rt_sched_thread_init_priv (struct rt_thread * thread , rt_uint32_t tick ,
435- rt_uint8_t priority )
418+ rt_uint8_t priority )
436419{
437420 rt_list_init (& RT_THREAD_LIST_NODE (thread ));
438421
@@ -527,9 +510,8 @@ void rt_exit_critical_safe(rt_base_t critical_level)
527510 _critical_error_occurred = 1 ;
528511
529512 rt_kprintf ("%s: un-compatible critical level\n"
530- "\tCurrent %d\n\tCaller %d\n" ,
531- __func__ , rt_scheduler_lock_nest ,
532- critical_level );
513+ "\tCurrent %d\n\tCaller %d\n" ,
514+ __func__ , rt_scheduler_lock_nest , critical_level );
533515 rt_backtrace ();
534516
535517 while (dummy )
0 commit comments