Skip to content

Commit 8439296

Browse files
hujun260xiaoxiang781216
authored andcommitted
irq: inline restore_critical_section
reason: In the SMP, when a context switch occurs, restore_critical_section is executed. In order to reduce the time taken for context switching, we inline the restore_critical_section function. Given that restore_critical_section is small in size and is called from only one location, inlining it does not increase the size of the image. Signed-off-by: hujun5 <[email protected]>
1 parent c9eef2d commit 8439296

File tree

3 files changed

+23
-44
lines changed

3 files changed

+23
-44
lines changed

include/nuttx/irq.h

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,19 @@ void leave_critical_section(irqstate_t flags) noinstrument_function;
312312
****************************************************************************/
313313

314314
#ifdef CONFIG_SMP
315-
void restore_critical_section(void);
315+
# define restore_critical_section() \
316+
do { \
317+
FAR struct tcb_s *tcb; \
318+
int me = this_cpu(); \
319+
tcb = current_task(me); \
320+
if (tcb->irqcount <= 0) \
321+
{\
322+
if ((g_cpu_irqset & (1 << me)) != 0) \
323+
{ \
324+
cpu_irqlock_clear(); \
325+
} \
326+
} \
327+
} while (0)
316328
#else
317329
# define restore_critical_section()
318330
#endif

sched/irq/irq_csection.c

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -644,47 +644,4 @@ inline_function void leave_critical_section_nonirq(irqstate_t flags)
644644
up_irq_restore(flags);
645645
}
646646
#endif
647-
648-
/****************************************************************************
649-
* Name: restore_critical_section
650-
*
651-
* Description:
652-
* Restore the critical_section
653-
*
654-
* Input Parameters:
655-
* None
656-
*
657-
* Returned Value:
658-
* None
659-
*
660-
****************************************************************************/
661-
662-
#ifdef CONFIG_SMP
663-
void restore_critical_section(void)
664-
{
665-
/* NOTE: The following logic for adjusting global IRQ controls were
666-
* derived from nxsched_add_readytorun() and sched_removedreadytorun()
667-
* Here, we only handles clearing logic to defer unlocking IRQ lock
668-
* followed by context switching.
669-
*/
670-
671-
FAR struct tcb_s *tcb;
672-
int me = this_cpu();
673-
674-
/* Adjust global IRQ controls. If irqcount is greater than zero,
675-
* then this task/this CPU holds the IRQ lock
676-
*/
677-
678-
tcb = current_task(me);
679-
DEBUGASSERT(g_cpu_nestcount[me] <= 0);
680-
if (tcb->irqcount <= 0)
681-
{
682-
if ((g_cpu_irqset & (1 << me)) != 0)
683-
{
684-
cpu_irqlock_clear();
685-
}
686-
}
687-
}
688-
#endif /* CONFIG_SMP */
689-
690647
#endif /* CONFIG_IRQCOUNT */

sched/sched/sched.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -291,6 +291,16 @@ extern volatile clock_t g_cpuload_total;
291291

292292
extern volatile cpu_set_t g_cpu_lockset;
293293

294+
/* This is the spinlock that enforces critical sections when interrupts are
295+
* disabled.
296+
*/
297+
298+
extern volatile spinlock_t g_cpu_irqlock;
299+
300+
/* Used to keep track of which CPU(s) hold the IRQ lock. */
301+
302+
extern volatile cpu_set_t g_cpu_irqset;
303+
294304
/* Used to lock tasklist to prevent from concurrent access */
295305

296306
extern volatile spinlock_t g_cpu_tasklistlock;

0 commit comments

Comments
 (0)