@@ -265,7 +265,7 @@ ErrorOr<Vector<FlatPtr, 32>> ProcessorBase::capture_stack_trace(Thread& thread,
265265 return stack_trace;
266266}
267267
268- void ProcessorBase::exit_trap (TrapFrame& trap)
268+ void ProcessorBase::exit_trap (TrapFrame& trap, bool handling_exception )
269269{
270270 VERIFY_INTERRUPTS_DISABLED ();
271271 VERIFY (&Processor::current () == this );
@@ -277,23 +277,30 @@ void ProcessorBase::exit_trap(TrapFrame& trap)
277277 // ScopedCritical here.
278278 m_in_critical = m_in_critical + 1 ;
279279
280- m_in_irq = 0 ;
280+ // Don't clear m_in_irq yet if we're still handling a lower-level
281+ // exception.
282+ if (m_in_irq && !handling_exception)
283+ m_in_irq = 0 ;
281284
285+ bool only_update_state = handling_exception && trap.next_trap ;
286+
287+ auto * current_thread = Processor::current_thread ();
288+ if (!only_update_state) {
282289#if ARCH(X86_64)
283- auto * self = static_cast <Processor*>(this );
284- if (is_smp_enabled ())
285- self->smp_process_pending_messages ();
290+ auto * self = static_cast <Processor*>(this );
291+ if (is_smp_enabled ())
292+ self->smp_process_pending_messages ();
286293#endif
287294
288- auto * current_thread = Processor::current_thread ();
289- if (current_thread) {
290- SpinlockLocker thread_lock (current_thread->get_lock ());
291- current_thread->check_dispatch_pending_signal (YieldBehavior::FlagYield);
292- }
295+ if (current_thread) {
296+ SpinlockLocker thread_lock (current_thread->get_lock ());
297+ current_thread->check_dispatch_pending_signal (YieldBehavior::FlagYield);
298+ }
293299
294- // Process the deferred call queue. Among other things, this ensures
295- // that any pending thread unblocks happen before we enter the scheduler.
296- m_deferred_call_pool.execute_pending ();
300+ // Process the deferred call queue. Among other things, this ensures
301+ // that any pending thread unblocks happen before we enter the scheduler.
302+ m_deferred_call_pool.execute_pending ();
303+ }
297304
298305 if (current_thread) {
299306 auto & current_trap = current_thread->current_trap ();
@@ -320,7 +327,7 @@ void ProcessorBase::exit_trap(TrapFrame& trap)
320327 // We don't want context switches to happen until we're explicitly
321328 // triggering a switch in check_invoke_scheduler.
322329 m_in_critical = m_in_critical - 1 ;
323- if (!m_in_irq && !m_in_critical)
330+ if (!only_update_state && ! m_in_irq && !m_in_critical)
324331 check_invoke_scheduler ();
325332}
326333
0 commit comments