Skip to content

Commit 97d49e9

Browse files
committed
在格式化
1 parent b0fb809 commit 97d49e9

File tree

1 file changed

+149
-118
lines changed

1 file changed

+149
-118
lines changed

src/scheduler_up.c

Lines changed: 149 additions & 118 deletions
Original file line numberDiff line numberDiff line change
@@ -127,10 +127,11 @@ rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
127127

128128
rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
129129
{
130-
if (rt_thread_self()) {
131-
/* if scheduler is available */
132-
rt_schedule();
133-
}
130+
if (rt_thread_self())
131+
{
132+
/* if scheduler is available */
133+
rt_schedule();
134+
}
134135
rt_hw_interrupt_enable(level);
135136

136137
return RT_EOK;
@@ -146,9 +147,10 @@ void rt_system_scheduler_init(void)
146147

147148
LOG_D("start scheduler: max priority 0x%02x", RT_THREAD_PRIORITY_MAX);
148149

149-
for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; ++offset) {
150-
rt_list_init(&rt_thread_priority_table[offset]);
151-
}
150+
for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; ++offset)
151+
{
152+
rt_list_init(&rt_thread_priority_table[offset]);
153+
}
152154

153155
/* initialize ready priority group */
154156
rt_thread_ready_priority_group = 0;
@@ -196,15 +198,17 @@ rt_inline void _rt_sched_insert_thread(struct rt_thread *thread)
196198
/* READY thread, insert to ready queue */
197199
RT_SCHED_CTX(thread).stat = RT_THREAD_READY | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
198200
/* there is no time slices left(YIELD), inserting thread before ready list*/
199-
if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0) {
200-
rt_list_insert_before(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
201-
&RT_THREAD_LIST_NODE(thread));
202-
}
201+
if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
202+
{
203+
rt_list_insert_before(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
204+
&RT_THREAD_LIST_NODE(thread));
205+
}
203206
/* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
204-
else {
205-
rt_list_insert_after(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
206-
&RT_THREAD_LIST_NODE(thread));
207-
}
207+
else
208+
{
209+
rt_list_insert_after(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
210+
&RT_THREAD_LIST_NODE(thread));
211+
}
208212

209213
LOG_D("insert thread[%.*s], the priority: %d", RT_NAME_MAX, thread->parent.name,
210214
RT_SCHED_PRIV(rt_current_thread).current_priority);
@@ -230,16 +234,18 @@ rt_inline void _rt_sched_remove_thread(struct rt_thread *thread)
230234

231235
/* remove thread from ready list */
232236
rt_list_remove(&RT_THREAD_LIST_NODE(thread));
233-
if (rt_list_isempty(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]))) {
237+
if (rt_list_isempty(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority])))
238+
{
234239
#if RT_THREAD_PRIORITY_MAX > 32
235-
rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask;
236-
if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0) {
237-
rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
238-
}
240+
rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask;
241+
if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0)
242+
{
243+
rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
244+
}
239245
#else
240-
rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
246+
rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
241247
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
242-
}
248+
}
243249
}
244250

245251
/**
@@ -266,91 +272,109 @@ void rt_schedule(void)
266272
level = rt_hw_interrupt_disable();
267273

268274
/* check the scheduler is enabled or not */
269-
if (rt_scheduler_lock_nest == 0 && rt_thread_ready_priority_group) {
270-
need_insert_from_thread = RT_FALSE;
271-
curr_thread = rt_thread_self();
272-
273-
if ((RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING) {
274-
if (RT_SCHED_PRIV(curr_thread).current_priority < rt_thread_ready_highest_priority) {
275-
to_thread = curr_thread;
276-
} else if (RT_SCHED_PRIV(curr_thread).current_priority == rt_thread_ready_highest_priority &&
277-
(RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0) {
278-
to_thread = curr_thread;
279-
} else {
280-
to_thread = _scheduler_get_priority_thread(rt_thread_ready_highest_priority);
281-
need_insert_from_thread = RT_TRUE;
282-
}
283-
} else {
284-
to_thread = _scheduler_get_priority_thread(rt_thread_ready_highest_priority);
285-
}
275+
if (rt_scheduler_lock_nest == 0 && rt_thread_ready_priority_group)
276+
{
277+
need_insert_from_thread = RT_FALSE;
278+
curr_thread = rt_thread_self();
279+
280+
if ((RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
281+
{
282+
if (RT_SCHED_PRIV(curr_thread).current_priority < rt_thread_ready_highest_priority)
283+
{
284+
to_thread = curr_thread;
285+
}
286+
else if (RT_SCHED_PRIV(curr_thread).current_priority == rt_thread_ready_highest_priority &&
287+
(RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0)
288+
{
289+
to_thread = curr_thread;
290+
}
291+
else
292+
{
293+
to_thread = _scheduler_get_priority_thread(rt_thread_ready_highest_priority);
294+
need_insert_from_thread = RT_TRUE;
295+
}
296+
}
297+
else
298+
{
299+
to_thread = _scheduler_get_priority_thread(rt_thread_ready_highest_priority);
300+
}
286301

287-
if (to_thread != curr_thread) {
288-
/* if the destination thread is not the same as current thread */
289-
rt_current_priority = (rt_uint8_t)rt_thread_ready_highest_priority;
290-
from_thread = curr_thread;
291-
rt_cpu_self()->current_thread = to_thread;
302+
if (to_thread != curr_thread)
303+
{
304+
/* if the destination thread is not the same as current thread */
305+
rt_current_priority = (rt_uint8_t)rt_thread_ready_highest_priority;
306+
from_thread = curr_thread;
307+
rt_cpu_self()->current_thread = to_thread;
292308

293-
RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));
309+
RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));
294310

295-
if (need_insert_from_thread) {
296-
_rt_sched_remove_thread(from_thread);
297-
_rt_sched_insert_thread(from_thread);
298-
}
311+
if (need_insert_from_thread)
312+
{
313+
_rt_sched_remove_thread(from_thread);
314+
_rt_sched_insert_thread(from_thread);
315+
}
299316

300-
if ((RT_SCHED_CTX(from_thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0) {
301-
RT_SCHED_CTX(from_thread).stat &= ~RT_THREAD_STAT_YIELD_MASK;
302-
}
317+
if ((RT_SCHED_CTX(from_thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
318+
{
319+
RT_SCHED_CTX(from_thread).stat &= ~RT_THREAD_STAT_YIELD_MASK;
320+
}
303321

304-
_rt_sched_remove_thread(to_thread);
305-
RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK);
322+
_rt_sched_remove_thread(to_thread);
323+
RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING |
324+
(RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK);
306325

307-
_scheduler_update_highest_priority();
326+
_scheduler_update_highest_priority();
308327

309-
/* switch to new thread */
310-
LOG_D("[%d]switch to priority#%d "
311-
"thread:%.*s(sp:0x%08x), "
312-
"from thread:%.*s(sp: 0x%08x)",
313-
rt_interrupt_nest, highest_ready_priority,
314-
RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
315-
RT_NAME_MAX, from_thread->parent.name, from_thread->sp);
328+
/* switch to new thread */
329+
LOG_D("[%d]switch to priority#%d "
330+
"thread:%.*s(sp:0x%08x), "
331+
"from thread:%.*s(sp: 0x%08x)",
332+
rt_interrupt_nest, highest_ready_priority, RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
333+
RT_NAME_MAX, from_thread->parent.name, from_thread->sp);
316334

317-
RT_SCHEDULER_STACK_CHECK(to_thread);
335+
RT_SCHEDULER_STACK_CHECK(to_thread);
318336

319-
if (rt_interrupt_nest == 0) {
320-
extern void rt_thread_handle_sig(rt_bool_t clean_state);
337+
if (rt_interrupt_nest == 0)
338+
{
339+
extern void rt_thread_handle_sig(rt_bool_t clean_state);
321340

322-
RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (from_thread));
341+
RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (from_thread));
323342

324-
rt_hw_context_switch((rt_uintptr_t)&from_thread->sp, (rt_uintptr_t)&to_thread->sp);
343+
rt_hw_context_switch((rt_uintptr_t)&from_thread->sp, (rt_uintptr_t)&to_thread->sp);
325344

326-
/* enable interrupt */
327-
rt_hw_interrupt_enable(level);
345+
/* enable interrupt */
346+
rt_hw_interrupt_enable(level);
328347

329348
#ifdef RT_USING_SIGNALS
330-
/* check stat of thread for signal */
331-
level = rt_hw_interrupt_disable();
332-
if (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING) {
333-
extern void rt_thread_handle_sig(rt_bool_t clean_state);
334-
335-
RT_SCHED_CTX(curr_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
336-
337-
rt_hw_interrupt_enable(level);
338-
339-
/* check signal status */
340-
rt_thread_handle_sig(RT_TRUE);
341-
} else {
342-
rt_hw_interrupt_enable(level);
343-
}
349+
/* check stat of thread for signal */
350+
level = rt_hw_interrupt_disable();
351+
if (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
352+
{
353+
extern void rt_thread_handle_sig(rt_bool_t clean_state);
354+
355+
RT_SCHED_CTX(curr_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
356+
357+
rt_hw_interrupt_enable(level);
358+
359+
/* check signal status */
360+
rt_thread_handle_sig(RT_TRUE);
361+
}
362+
else
363+
{
364+
rt_hw_interrupt_enable(level);
365+
}
344366
#endif /* RT_USING_SIGNALS */
345-
goto __exit;
346-
} else {
347-
LOG_D("switch in interrupt");
348-
349-
rt_hw_context_switch_interrupt((rt_uintptr_t)&from_thread->sp, (rt_uintptr_t)&to_thread->sp,
350-
from_thread, to_thread);
351-
}
367+
goto __exit;
368+
}
369+
else
370+
{
371+
LOG_D("switch in interrupt");
372+
373+
rt_hw_context_switch_interrupt((rt_uintptr_t)&from_thread->sp, (rt_uintptr_t)&to_thread->sp,
374+
from_thread, to_thread);
375+
}
376+
}
352377
}
353-
}
354378
/* enable interrupt */
355379
rt_hw_interrupt_enable(level);
356380
__exit:
@@ -424,10 +448,11 @@ void rt_sched_insert_thread(struct rt_thread *thread)
424448
level = rt_hw_interrupt_disable();
425449

426450
/* it's current thread, it should be RUNNING thread */
427-
if (thread == rt_current_thread) {
428-
RT_SCHED_CTX(thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
429-
goto __exit;
430-
}
451+
if (thread == rt_current_thread)
452+
{
453+
RT_SCHED_CTX(thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
454+
goto __exit;
455+
}
431456

432457
_rt_sched_insert_thread(thread);
433458

@@ -472,20 +497,22 @@ void rt_exit_critical_safe(rt_base_t critical_level)
472497
/* disable interrupt */
473498
level = rt_hw_interrupt_disable();
474499

475-
if (!_critical_error_occurred) {
476-
if (critical_level != rt_scheduler_lock_nest) {
477-
int dummy = 1;
478-
_critical_error_occurred = 1;
500+
if (!_critical_error_occurred)
501+
{
502+
if (critical_level != rt_scheduler_lock_nest)
503+
{
504+
int dummy = 1;
505+
_critical_error_occurred = 1;
479506

480-
rt_kprintf("%s: un-compatible critical level\n"
481-
"\tCurrent %d\n\tCaller %d\n",
482-
__func__, rt_scheduler_lock_nest, critical_level);
483-
rt_backtrace();
507+
rt_kprintf("%s: un-compatible critical level\n"
508+
"\tCurrent %d\n\tCaller %d\n",
509+
__func__, rt_scheduler_lock_nest, critical_level);
510+
rt_backtrace();
484511

485-
while (dummy)
486-
;
512+
while (dummy)
513+
;
514+
}
487515
}
488-
}
489516
rt_hw_interrupt_enable(level);
490517

491518
rt_exit_critical();
@@ -536,19 +563,23 @@ void rt_exit_critical(void)
536563
level = rt_hw_interrupt_disable();
537564

538565
--rt_scheduler_lock_nest;
539-
if (rt_scheduler_lock_nest <= 0) {
540-
rt_scheduler_lock_nest = 0;
541-
/* enable interrupt */
542-
rt_hw_interrupt_enable(level);
543-
544-
if (rt_current_thread) {
545-
/* if scheduler is started, do a schedule */
546-
rt_schedule();
566+
if (rt_scheduler_lock_nest <= 0)
567+
{
568+
rt_scheduler_lock_nest = 0;
569+
/* enable interrupt */
570+
rt_hw_interrupt_enable(level);
571+
572+
if (rt_current_thread)
573+
{
574+
/* if scheduler is started, do a schedule */
575+
rt_schedule();
576+
}
577+
}
578+
else
579+
{
580+
/* enable interrupt */
581+
rt_hw_interrupt_enable(level);
547582
}
548-
} else {
549-
/* enable interrupt */
550-
rt_hw_interrupt_enable(level);
551-
}
552583
}
553584
RTM_EXPORT(rt_exit_critical);
554585

0 commit comments

Comments
 (0)