Skip to content

Commit 413e379

Browse files
committed
Kernel: Don't run non-essential trap exit steps for low-level exceptions
If there's a higher level trap, then that should take care of signal dispatching, scheduling, etc. when it exits its own trap frame.
1 parent 756058d commit 413e379

File tree

7 files changed

+46
-20
lines changed

7 files changed

+46
-20
lines changed

Kernel/Arch/Processor.cpp

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,7 @@ ErrorOr<Vector<FlatPtr, 32>> ProcessorBase::capture_stack_trace(Thread& thread,
265265
return stack_trace;
266266
}
267267

268-
void ProcessorBase::exit_trap(TrapFrame& trap)
268+
void ProcessorBase::exit_trap(TrapFrame& trap, bool handling_exception)
269269
{
270270
VERIFY_INTERRUPTS_DISABLED();
271271
VERIFY(&Processor::current() == this);
@@ -277,23 +277,26 @@ void ProcessorBase::exit_trap(TrapFrame& trap)
277277
// ScopedCritical here.
278278
m_in_critical = m_in_critical + 1;
279279

280-
m_in_irq = 0;
280+
bool only_update_state = handling_exception && trap.next_trap;
281281

282+
auto* current_thread = Processor::current_thread();
283+
if (!only_update_state) {
284+
m_in_irq = 0;
282285
#if ARCH(X86_64)
283-
auto* self = static_cast<Processor*>(this);
284-
if (is_smp_enabled())
285-
self->smp_process_pending_messages();
286+
auto* self = static_cast<Processor*>(this);
287+
if (is_smp_enabled())
288+
self->smp_process_pending_messages();
286289
#endif
287290

288-
auto* current_thread = Processor::current_thread();
289-
if (current_thread) {
290-
SpinlockLocker thread_lock(current_thread->get_lock());
291-
current_thread->check_dispatch_pending_signal(YieldBehavior::FlagYield);
292-
}
291+
if (current_thread) {
292+
SpinlockLocker thread_lock(current_thread->get_lock());
293+
current_thread->check_dispatch_pending_signal(YieldBehavior::FlagYield);
294+
}
293295

294-
// Process the deferred call queue. Among other things, this ensures
295-
// that any pending thread unblocks happen before we enter the scheduler.
296-
m_deferred_call_pool.execute_pending();
296+
// Process the deferred call queue. Among other things, this ensures
297+
// that any pending thread unblocks happen before we enter the scheduler.
298+
m_deferred_call_pool.execute_pending();
299+
}
297300

298301
if (current_thread) {
299302
auto& current_trap = current_thread->current_trap();
@@ -320,7 +323,7 @@ void ProcessorBase::exit_trap(TrapFrame& trap)
320323
// We don't want context switches to happen until we're explicitly
321324
// triggering a switch in check_invoke_scheduler.
322325
m_in_critical = m_in_critical - 1;
323-
if (!m_in_irq && !m_in_critical)
326+
if (!only_update_state && !m_in_irq && !m_in_critical)
324327
check_invoke_scheduler();
325328
}
326329

Kernel/Arch/Processor.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ class ProcessorBase {
113113
}
114114

115115
void enter_trap(TrapFrame& trap, bool raise_irq);
116-
void exit_trap(TrapFrame& trap);
116+
void exit_trap(TrapFrame& trap, bool handling_exception = false);
117117

118118
static void flush_entire_tlb_local();
119119

Kernel/Arch/TrapFrame.cpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,4 +29,10 @@ extern "C" void exit_trap(TrapFrame* trap)
2929
return Processor::current().exit_trap(*trap);
3030
}
3131

32+
extern "C" void exit_trap_exception(TrapFrame* trap)
33+
{
34+
InterruptDisabler disable;
35+
return Processor::current().exit_trap(*trap, true);
36+
}
37+
3238
}

Kernel/Arch/TrapFrame.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,5 +31,6 @@ static_assert(AssertSize<TrapFrame, TRAP_FRAME_SIZE>());
3131
extern "C" void enter_trap_no_irq(TrapFrame* trap) __attribute__((used));
3232
extern "C" void enter_trap(TrapFrame*) __attribute__((used));
3333
extern "C" void exit_trap(TrapFrame*) __attribute__((used));
34+
extern "C" void exit_trap_exception(TrapFrame*) __attribute__((used));
3435

3536
}

Kernel/Arch/aarch64/Interrupts.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,8 @@ extern "C" void exception_common(Kernel::TrapFrame* trap_frame)
9393
trap_frame->regs->set_ip(trap_frame->regs->ip() + 4);
9494
}
9595

96+
bool is_syscall = Aarch64::exception_class_is_svc_instruction_execution(esr_el1.EC);
97+
9698
if (Aarch64::exception_class_is_data_abort(esr_el1.EC) || Aarch64::exception_class_is_instruction_abort(esr_el1.EC)) {
9799
auto page_fault_or_error = page_fault_from_exception_syndrome_register(VirtualAddress(fault_address), esr_el1);
98100
if (page_fault_or_error.is_error()) {
@@ -101,7 +103,7 @@ extern "C" void exception_common(Kernel::TrapFrame* trap_frame)
101103
auto page_fault = page_fault_or_error.release_value();
102104
page_fault.handle(*trap_frame->regs);
103105
}
104-
} else if (Aarch64::exception_class_is_svc_instruction_execution(esr_el1.EC)) {
106+
} else if (is_syscall) {
105107
Processor::enable_interrupts();
106108
syscall_handler(trap_frame);
107109
Processor::disable_interrupts();
@@ -133,7 +135,7 @@ extern "C" void exception_common(Kernel::TrapFrame* trap_frame)
133135
}
134136

135137
VERIFY_INTERRUPTS_DISABLED();
136-
Processor::current().exit_trap(*trap_frame);
138+
Processor::current().exit_trap(*trap_frame, !is_syscall);
137139
}
138140

139141
// This spinlock is used to reserve IRQs that can be later used by interrupt mechanism such as MSIx

Kernel/Arch/riscv64/Interrupts.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,8 @@ extern "C" void trap_handler(TrapFrame& trap_frame)
8989
// Exception
9090
Processor::current().enter_trap(trap_frame, false);
9191

92+
bool handled_syscall = false;
93+
9294
using enum RISCV64::CSR::SCAUSE;
9395
switch (scause) {
9496
case InstructionAddressMisaligned:
@@ -134,6 +136,7 @@ extern "C" void trap_handler(TrapFrame& trap_frame)
134136
Processor::enable_interrupts();
135137
syscall_handler(&trap_frame);
136138
Processor::disable_interrupts();
139+
handled_syscall = true;
137140
break;
138141

139142
case Breakpoint:
@@ -155,7 +158,7 @@ extern "C" void trap_handler(TrapFrame& trap_frame)
155158
};
156159

157160
Processor::disable_interrupts();
158-
Processor::current().exit_trap(trap_frame);
161+
Processor::current().exit_trap(trap_frame, !handled_syscall);
159162
}
160163
}
161164

Kernel/Arch/x86_64/Interrupts.cpp

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,17 @@ static EntropySource s_entropy_source_interrupts { EntropySource::Static::Interr
5454

5555
// clang-format off
5656

57+
asm(
58+
".globl trap_exit_exception \n"
59+
"trap_exit_exception: \n"
60+
// another thread may have handled this trap at this point, so don't
61+
// make assumptions about the stack other than there's a TrapFrame.
62+
" movq %rsp, %rdi \n"
63+
" call exit_trap_exception \n"
64+
" addq $" __STRINGIFY(TRAP_FRAME_SIZE) ", %rsp\n" // pop TrapFrame
65+
" jmp interrupt_common_asm_exit \n"
66+
);
67+
5768
#define EH_ENTRY(ec, title) \
5869
extern "C" void title##_asm_entry(); \
5970
extern "C" void title##_handler(TrapFrame*) __attribute__((used)); \
@@ -82,7 +93,7 @@ static EntropySource s_entropy_source_interrupts { EntropySource::Static::Interr
8293
" call enter_trap_no_irq \n" \
8394
" movq %rsp, %rdi \n" \
8495
" call " #title "_handler\n" \
85-
" jmp common_trap_exit \n" \
96+
" jmp trap_exit_exception \n" \
8697
); \
8798
}
8899

@@ -115,7 +126,7 @@ static EntropySource s_entropy_source_interrupts { EntropySource::Static::Interr
115126
" call enter_trap_no_irq \n" \
116127
" movq %rsp, %rdi \n" \
117128
" call " #title "_handler\n" \
118-
" jmp common_trap_exit \n" \
129+
" jmp trap_exit_exception \n" \
119130
); \
120131
}
121132

0 commit comments

Comments
 (0)