Skip to content

Commit 52cab87

Browse files
committed
在格式化
1 parent 97d49e9 commit 52cab87

File tree

1 file changed

+134
-135
lines changed

1 file changed

+134
-135
lines changed

src/scheduler_up.c

Lines changed: 134 additions & 135 deletions
Original file line numberDiff line numberDiff line change
@@ -128,10 +128,10 @@ rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
128128
rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
129129
{
130130
if (rt_thread_self())
131-
{
132-
/* if scheduler is available */
133-
rt_schedule();
134-
}
131+
{
132+
/* if scheduler is available */
133+
rt_schedule();
134+
}
135135
rt_hw_interrupt_enable(level);
136136

137137
return RT_EOK;
@@ -148,9 +148,9 @@ void rt_system_scheduler_init(void)
148148
LOG_D("start scheduler: max priority 0x%02x", RT_THREAD_PRIORITY_MAX);
149149

150150
for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; ++offset)
151-
{
152-
rt_list_init(&rt_thread_priority_table[offset]);
153-
}
151+
{
152+
rt_list_init(&rt_thread_priority_table[offset]);
153+
}
154154

155155
/* initialize ready priority group */
156156
rt_thread_ready_priority_group = 0;
@@ -199,16 +199,16 @@ rt_inline void _rt_sched_insert_thread(struct rt_thread *thread)
199199
RT_SCHED_CTX(thread).stat = RT_THREAD_READY | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
200200
/* there is no time slices left(YIELD), inserting thread before ready list*/
201201
if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
202-
{
203-
rt_list_insert_before(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
204-
&RT_THREAD_LIST_NODE(thread));
205-
}
202+
{
203+
rt_list_insert_before(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
204+
&RT_THREAD_LIST_NODE(thread));
205+
}
206206
/* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
207207
else
208-
{
209-
rt_list_insert_after(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
210-
&RT_THREAD_LIST_NODE(thread));
211-
}
208+
{
209+
rt_list_insert_after(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
210+
&RT_THREAD_LIST_NODE(thread));
211+
}
212212

213213
LOG_D("insert thread[%.*s], the priority: %d", RT_NAME_MAX, thread->parent.name,
214214
RT_SCHED_PRIV(rt_current_thread).current_priority);
@@ -235,17 +235,17 @@ rt_inline void _rt_sched_remove_thread(struct rt_thread *thread)
235235
/* remove thread from ready list */
236236
rt_list_remove(&RT_THREAD_LIST_NODE(thread));
237237
if (rt_list_isempty(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority])))
238-
{
238+
{
239239
#if RT_THREAD_PRIORITY_MAX > 32
240-
rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask;
241-
if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0)
242-
{
243-
rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
244-
}
245-
#else
240+
rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask;
241+
if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0)
242+
{
246243
rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
247-
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
248244
}
245+
#else
246+
rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
247+
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
248+
}
249249
}
250250

251251
/**
@@ -273,108 +273,107 @@ void rt_schedule(void)
273273

274274
/* check the scheduler is enabled or not */
275275
if (rt_scheduler_lock_nest == 0 && rt_thread_ready_priority_group)
276-
{
277-
need_insert_from_thread = RT_FALSE;
278-
curr_thread = rt_thread_self();
276+
{
277+
need_insert_from_thread = RT_FALSE;
278+
curr_thread = rt_thread_self();
279279

280-
if ((RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
281-
{
282-
if (RT_SCHED_PRIV(curr_thread).current_priority < rt_thread_ready_highest_priority)
283-
{
284-
to_thread = curr_thread;
285-
}
286-
else if (RT_SCHED_PRIV(curr_thread).current_priority == rt_thread_ready_highest_priority &&
287-
(RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0)
288-
{
289-
to_thread = curr_thread;
290-
}
291-
else
292-
{
293-
to_thread = _scheduler_get_priority_thread(rt_thread_ready_highest_priority);
294-
need_insert_from_thread = RT_TRUE;
295-
}
296-
}
280+
if ((RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
281+
{
282+
if (RT_SCHED_PRIV(curr_thread).current_priority < rt_thread_ready_highest_priority)
283+
{
284+
to_thread = curr_thread;
285+
}
286+
else if (RT_SCHED_PRIV(curr_thread).current_priority == rt_thread_ready_highest_priority &&
287+
(RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0)
288+
{
289+
to_thread = curr_thread;
290+
}
297291
else
298-
{
299-
to_thread = _scheduler_get_priority_thread(rt_thread_ready_highest_priority);
300-
}
292+
{
293+
to_thread = _scheduler_get_priority_thread(rt_thread_ready_highest_priority);
294+
need_insert_from_thread = RT_TRUE;
295+
}
296+
}
297+
else
298+
{
299+
to_thread = _scheduler_get_priority_thread(rt_thread_ready_highest_priority);
300+
}
301301

302-
if (to_thread != curr_thread)
303-
{
304-
/* if the destination thread is not the same as current thread */
305-
rt_current_priority = (rt_uint8_t)rt_thread_ready_highest_priority;
306-
from_thread = curr_thread;
307-
rt_cpu_self()->current_thread = to_thread;
302+
if (to_thread != curr_thread)
303+
{
304+
/* if the destination thread is not the same as current thread */
305+
rt_current_priority = (rt_uint8_t)rt_thread_ready_highest_priority;
306+
from_thread = curr_thread;
307+
rt_cpu_self()->current_thread = to_thread;
308308

309-
RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));
309+
RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));
310310

311-
if (need_insert_from_thread)
312-
{
313-
_rt_sched_remove_thread(from_thread);
314-
_rt_sched_insert_thread(from_thread);
315-
}
311+
if (need_insert_from_thread)
312+
{
313+
_rt_sched_remove_thread(from_thread);
314+
_rt_sched_insert_thread(from_thread);
315+
}
316316

317-
if ((RT_SCHED_CTX(from_thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
318-
{
319-
RT_SCHED_CTX(from_thread).stat &= ~RT_THREAD_STAT_YIELD_MASK;
320-
}
317+
if ((RT_SCHED_CTX(from_thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
318+
{
319+
RT_SCHED_CTX(from_thread).stat &= ~RT_THREAD_STAT_YIELD_MASK;
320+
}
321321

322-
_rt_sched_remove_thread(to_thread);
323-
RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING |
324-
(RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK);
322+
_rt_sched_remove_thread(to_thread);
323+
RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK);
325324

326-
_scheduler_update_highest_priority();
325+
_scheduler_update_highest_priority();
327326

328-
/* switch to new thread */
329-
LOG_D("[%d]switch to priority#%d "
330-
"thread:%.*s(sp:0x%08x), "
331-
"from thread:%.*s(sp: 0x%08x)",
332-
rt_interrupt_nest, highest_ready_priority, RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
333-
RT_NAME_MAX, from_thread->parent.name, from_thread->sp);
327+
/* switch to new thread */
328+
LOG_D("[%d]switch to priority#%d "
329+
"thread:%.*s(sp:0x%08x), "
330+
"from thread:%.*s(sp: 0x%08x)",
331+
rt_interrupt_nest, highest_ready_priority, RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
332+
RT_NAME_MAX, from_thread->parent.name, from_thread->sp);
334333

335-
RT_SCHEDULER_STACK_CHECK(to_thread);
334+
RT_SCHEDULER_STACK_CHECK(to_thread);
336335

337-
if (rt_interrupt_nest == 0)
338-
{
339-
extern void rt_thread_handle_sig(rt_bool_t clean_state);
336+
if (rt_interrupt_nest == 0)
337+
{
338+
extern void rt_thread_handle_sig(rt_bool_t clean_state);
340339

341-
RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (from_thread));
340+
RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (from_thread));
342341

343-
rt_hw_context_switch((rt_uintptr_t)&from_thread->sp, (rt_uintptr_t)&to_thread->sp);
342+
rt_hw_context_switch((rt_uintptr_t)&from_thread->sp, (rt_uintptr_t)&to_thread->sp);
344343

345-
/* enable interrupt */
346-
rt_hw_interrupt_enable(level);
344+
/* enable interrupt */
345+
rt_hw_interrupt_enable(level);
347346

348347
#ifdef RT_USING_SIGNALS
349-
/* check stat of thread for signal */
350-
level = rt_hw_interrupt_disable();
351-
if (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
352-
{
353-
extern void rt_thread_handle_sig(rt_bool_t clean_state);
354-
355-
RT_SCHED_CTX(curr_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
356-
357-
rt_hw_interrupt_enable(level);
358-
359-
/* check signal status */
360-
rt_thread_handle_sig(RT_TRUE);
361-
}
362-
else
363-
{
364-
rt_hw_interrupt_enable(level);
365-
}
366-
#endif /* RT_USING_SIGNALS */
367-
goto __exit;
368-
}
369-
else
370-
{
371-
LOG_D("switch in interrupt");
372-
373-
rt_hw_context_switch_interrupt((rt_uintptr_t)&from_thread->sp, (rt_uintptr_t)&to_thread->sp,
374-
from_thread, to_thread);
375-
}
348+
/* check stat of thread for signal */
349+
level = rt_hw_interrupt_disable();
350+
if (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
351+
{
352+
extern void rt_thread_handle_sig(rt_bool_t clean_state);
353+
354+
RT_SCHED_CTX(curr_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
355+
356+
rt_hw_interrupt_enable(level);
357+
358+
/* check signal status */
359+
rt_thread_handle_sig(RT_TRUE);
360+
}
361+
else
362+
{
363+
rt_hw_interrupt_enable(level);
376364
}
365+
#endif /* RT_USING_SIGNALS */
366+
goto __exit;
367+
}
368+
else
369+
{
370+
LOG_D("switch in interrupt");
371+
372+
rt_hw_context_switch_interrupt((rt_uintptr_t)&from_thread->sp, (rt_uintptr_t)&to_thread->sp,
373+
from_thread, to_thread);
374+
}
377375
}
376+
}
378377
/* enable interrupt */
379378
rt_hw_interrupt_enable(level);
380379
__exit:
@@ -449,10 +448,10 @@ void rt_sched_insert_thread(struct rt_thread *thread)
449448

450449
/* it's current thread, it should be RUNNING thread */
451450
if (thread == rt_current_thread)
452-
{
453-
RT_SCHED_CTX(thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
454-
goto __exit;
455-
}
451+
{
452+
RT_SCHED_CTX(thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
453+
goto __exit;
454+
}
456455

457456
_rt_sched_insert_thread(thread);
458457

@@ -498,21 +497,21 @@ void rt_exit_critical_safe(rt_base_t critical_level)
498497
level = rt_hw_interrupt_disable();
499498

500499
if (!_critical_error_occurred)
500+
{
501+
if (critical_level != rt_scheduler_lock_nest)
501502
{
502-
if (critical_level != rt_scheduler_lock_nest)
503-
{
504-
int dummy = 1;
505-
_critical_error_occurred = 1;
503+
int dummy = 1;
504+
_critical_error_occurred = 1;
506505

507-
rt_kprintf("%s: un-compatible critical level\n"
508-
"\tCurrent %d\n\tCaller %d\n",
509-
__func__, rt_scheduler_lock_nest, critical_level);
510-
rt_backtrace();
506+
rt_kprintf("%s: un-compatible critical level\n"
507+
"\tCurrent %d\n\tCaller %d\n",
508+
__func__, rt_scheduler_lock_nest, critical_level);
509+
rt_backtrace();
511510

512-
while (dummy)
513-
;
514-
}
511+
while (dummy)
512+
;
515513
}
514+
}
516515
rt_hw_interrupt_enable(level);
517516

518517
rt_exit_critical();
@@ -564,22 +563,22 @@ void rt_exit_critical(void)
564563

565564
--rt_scheduler_lock_nest;
566565
if (rt_scheduler_lock_nest <= 0)
567-
{
568-
rt_scheduler_lock_nest = 0;
569-
/* enable interrupt */
570-
rt_hw_interrupt_enable(level);
566+
{
567+
rt_scheduler_lock_nest = 0;
568+
/* enable interrupt */
569+
rt_hw_interrupt_enable(level);
571570

572-
if (rt_current_thread)
573-
{
574-
/* if scheduler is started, do a schedule */
575-
rt_schedule();
576-
}
577-
}
578-
else
571+
if (rt_current_thread)
579572
{
580-
/* enable interrupt */
581-
rt_hw_interrupt_enable(level);
573+
/* if scheduler is started, do a schedule */
574+
rt_schedule();
582575
}
576+
}
577+
else
578+
{
579+
/* enable interrupt */
580+
rt_hw_interrupt_enable(level);
581+
}
583582
}
584583
RTM_EXPORT(rt_exit_critical);
585584

0 commit comments

Comments
 (0)