Skip to content

Commit 20544ac

Browse files
Use EntryStoreContext to manage state when entering and exiting Wasm (#10626)
* Use EntryStoreContext to manage state for entering and exiting Wasm * use checked arithmetic for stack_pointer Co-authored-by: Nick Fitzgerald <[email protected]> * comment fixes * address review comments * remove FIXME --------- Co-authored-by: Nick Fitzgerald <[email protected]>
1 parent 1819edb commit 20544ac

File tree

2 files changed

+161
-121
lines changed

2 files changed

+161
-121
lines changed

crates/wasmtime/src/runtime/func.rs

Lines changed: 137 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
use crate::prelude::*;
22
use crate::runtime::vm::{
33
ExportFunction, InterpreterRef, SendSyncPtr, StoreBox, VMArrayCallHostFuncContext, VMContext,
4-
VMFuncRef, VMFunctionImport, VMOpaqueContext,
4+
VMFuncRef, VMFunctionImport, VMOpaqueContext, VMStoreContext,
55
};
66
use crate::runtime::Uninhabited;
77
use crate::store::{AutoAssertNoGc, StoreData, StoreOpaque, Stored};
@@ -1604,103 +1604,155 @@ pub(crate) fn invoke_wasm_and_catch_traps<T>(
16041604
closure: impl FnMut(NonNull<VMContext>, Option<InterpreterRef<'_>>) -> bool,
16051605
) -> Result<()> {
16061606
unsafe {
1607-
let exit = enter_wasm(store);
1607+
let previous_runtime_state = EntryStoreContext::enter_wasm(store);
16081608

16091609
if let Err(trap) = store.0.call_hook(CallHook::CallingWasm) {
1610-
exit_wasm(store, exit);
1610+
// `previous_runtime_state` implicitly dropped here
16111611
return Err(trap);
16121612
}
1613-
let result = crate::runtime::vm::catch_traps(store, closure);
1614-
exit_wasm(store, exit);
1613+
let result = crate::runtime::vm::catch_traps(store, &previous_runtime_state, closure);
1614+
core::mem::drop(previous_runtime_state);
16151615
store.0.call_hook(CallHook::ReturningFromWasm)?;
16161616
result.map_err(|t| crate::trap::from_runtime_box(store.0, t))
16171617
}
16181618
}
16191619

1620-
/// This function is called to register state within `Store` whenever
1621-
/// WebAssembly is entered within the `Store`.
1622-
///
1623-
/// This function sets up various limits such as:
1624-
///
1625-
/// * The stack limit. This is what ensures that we limit the stack space
1626-
/// allocated by WebAssembly code and it's relative to the initial stack
1627-
/// pointer that called into wasm.
1628-
///
1629-
/// This function may fail if the stack limit can't be set because an
1630-
/// interrupt already happened.
1631-
fn enter_wasm<T>(store: &mut StoreContextMut<'_, T>) -> Option<usize> {
1632-
// If this is a recursive call, e.g. our stack limit is already set, then
1633-
// we may be able to skip this function.
1634-
//
1635-
// For synchronous stores there's nothing else to do because all wasm calls
1636-
// happen synchronously and on the same stack. This means that the previous
1637-
// stack limit will suffice for the next recursive call.
1638-
//
1639-
// For asynchronous stores then each call happens on a separate native
1640-
// stack. This means that the previous stack limit is no longer relevant
1641-
// because we're on a separate stack.
1642-
if unsafe { *store.0.vm_store_context().stack_limit.get() } != usize::MAX
1643-
&& !store.0.async_support()
1644-
{
1645-
return None;
1646-
}
1647-
1648-
// Ignore this stack pointer business on miri since we can't execute wasm
1649-
// anyway and the concept of a stack pointer on miri is a bit nebulous
1650-
// regardless.
1651-
if cfg!(miri) {
1652-
return None;
1653-
}
1654-
1655-
// When Cranelift has support for the host then we might be running native
1656-
// compiled code meaning we need to read the actual stack pointer. If
1657-
// Cranelift can't be used though then we're guaranteed to be running pulley
1658-
// in which case this stack pointer isn't actually used as Pulley has custom
1659-
// mechanisms for stack overflow.
1660-
#[cfg(has_host_compiler_backend)]
1661-
let stack_pointer = crate::runtime::vm::get_stack_pointer();
1662-
#[cfg(not(has_host_compiler_backend))]
1663-
let stack_pointer = {
1664-
use wasmtime_environ::TripleExt;
1665-
debug_assert!(store.engine().target().is_pulley());
1666-
usize::MAX
1667-
};
1620+
/// This type helps managing the state of the runtime when entering and exiting
1621+
/// Wasm. To this end, it contains a subset of the data in `VMStoreContext`.
1622+
/// Upon entering Wasm, it updates various runtime fields and their
1623+
/// original values saved in this struct. Upon exiting Wasm, the previous values
1624+
/// are restored.
1625+
pub(crate) struct EntryStoreContext {
1626+
/// If set, contains value of `stack_limit` field to restore in
1627+
/// `VMRuntimeLimits` when exiting Wasm.
1628+
pub stack_limit: Option<usize>,
1629+
/// Contains value of `last_wasm_exit_pc` field to restore in
1630+
/// `VMStoreContext` when exiting Wasm.
1631+
pub last_wasm_exit_pc: usize,
1632+
/// Contains value of `last_wasm_exit_fp` field to restore in
1633+
/// `VMStoreContext` when exiting Wasm.
1634+
pub last_wasm_exit_fp: usize,
1635+
/// Contains value of `last_wasm_entry_fp` field to restore in
1636+
/// `VMStoreContext` when exiting Wasm.
1637+
pub last_wasm_entry_fp: usize,
1638+
1639+
/// We need a pointer to the runtime limits, so we can update them from
1640+
/// `drop`/`exit_wasm`.
1641+
vm_store_context: *const VMStoreContext,
1642+
}
16681643

1669-
// Determine the stack pointer where, after which, any wasm code will
1670-
// immediately trap. This is checked on the entry to all wasm functions.
1671-
//
1672-
// Note that this isn't 100% precise. We are requested to give wasm
1673-
// `max_wasm_stack` bytes, but what we're actually doing is giving wasm
1674-
// probably a little less than `max_wasm_stack` because we're
1675-
// calculating the limit relative to this function's approximate stack
1676-
// pointer. Wasm will be executed on a frame beneath this one (or next
1677-
// to it). In any case it's expected to be at most a few hundred bytes
1678-
// of slop one way or another. When wasm is typically given a MB or so
1679-
// (a million bytes) the slop shouldn't matter too much.
1680-
//
1681-
// After we've got the stack limit then we store it into the `stack_limit`
1682-
// variable.
1683-
let wasm_stack_limit = stack_pointer - store.engine().config().max_wasm_stack;
1684-
let prev_stack = unsafe {
1685-
mem::replace(
1686-
&mut *store.0.vm_store_context().stack_limit.get(),
1687-
wasm_stack_limit,
1688-
)
1689-
};
1644+
impl EntryStoreContext {
1645+
/// This function is called to update and save state when
1646+
/// WebAssembly is entered within the `Store`.
1647+
///
1648+
/// This updates various fields such as:
1649+
///
1650+
/// * The stack limit. This is what ensures that we limit the stack space
1651+
/// allocated by WebAssembly code and it's relative to the initial stack
1652+
/// pointer that called into wasm.
1653+
///
1654+
/// It also saves the different last_wasm_* values in the `VMRuntimeLimits`.
1655+
pub fn enter_wasm<T>(store: &mut StoreContextMut<'_, T>) -> Self {
1656+
let stack_limit;
16901657

1691-
Some(prev_stack)
1692-
}
1658+
// If this is a recursive call, e.g. our stack limit is already set, then
1659+
// we may be able to skip this function.
1660+
//
1661+
// For synchronous stores there's nothing else to do because all wasm calls
1662+
// happen synchronously and on the same stack. This means that the previous
1663+
// stack limit will suffice for the next recursive call.
1664+
//
1665+
// For asynchronous stores then each call happens on a separate native
1666+
// stack. This means that the previous stack limit is no longer relevant
1667+
// because we're on a separate stack.
1668+
if unsafe { *store.0.vm_store_context().stack_limit.get() } != usize::MAX
1669+
&& !store.0.async_support()
1670+
{
1671+
stack_limit = None;
1672+
}
1673+
// Ignore this stack pointer business on miri since we can't execute wasm
1674+
// anyway and the concept of a stack pointer on miri is a bit nebulous
1675+
// regardless.
1676+
else if cfg!(miri) {
1677+
stack_limit = None;
1678+
} else {
1679+
// When Cranelift has support for the host then we might be running native
1680+
// compiled code meaning we need to read the actual stack pointer. If
1681+
// Cranelift can't be used though then we're guaranteed to be running pulley
1682+
// in which case this stack pointer isn't actually used as Pulley has custom
1683+
// mechanisms for stack overflow.
1684+
#[cfg(has_host_compiler_backend)]
1685+
let stack_pointer = crate::runtime::vm::get_stack_pointer();
1686+
#[cfg(not(has_host_compiler_backend))]
1687+
let stack_pointer = {
1688+
use wasmtime_environ::TripleExt;
1689+
debug_assert!(store.engine().target().is_pulley());
1690+
usize::MAX
1691+
};
16931692

1694-
fn exit_wasm<T>(store: &mut StoreContextMut<'_, T>, prev_stack: Option<usize>) {
1695-
// If we don't have a previous stack pointer to restore, then there's no
1696-
// cleanup we need to perform here.
1697-
let prev_stack = match prev_stack {
1698-
Some(stack) => stack,
1699-
None => return,
1700-
};
1693+
// Determine the stack pointer where, after which, any wasm code will
1694+
// immediately trap. This is checked on the entry to all wasm functions.
1695+
//
1696+
// Note that this isn't 100% precise. We are requested to give wasm
1697+
// `max_wasm_stack` bytes, but what we're actually doing is giving wasm
1698+
// probably a little less than `max_wasm_stack` because we're
1699+
// calculating the limit relative to this function's approximate stack
1700+
// pointer. Wasm will be executed on a frame beneath this one (or next
1701+
// to it). In any case it's expected to be at most a few hundred bytes
1702+
// of slop one way or another. When wasm is typically given a MB or so
1703+
// (a million bytes) the slop shouldn't matter too much.
1704+
//
1705+
// After we've got the stack limit then we store it into the `stack_limit`
1706+
// variable.
1707+
let wasm_stack_limit = stack_pointer
1708+
.checked_sub(store.engine().config().max_wasm_stack)
1709+
.unwrap();
1710+
let prev_stack = unsafe {
1711+
mem::replace(
1712+
&mut *store.0.vm_store_context().stack_limit.get(),
1713+
wasm_stack_limit,
1714+
)
1715+
};
1716+
stack_limit = Some(prev_stack);
1717+
}
17011718

1702-
unsafe {
1703-
*store.0.vm_store_context().stack_limit.get() = prev_stack;
1719+
unsafe {
1720+
let last_wasm_exit_pc = *store.0.vm_store_context().last_wasm_exit_pc.get();
1721+
let last_wasm_exit_fp = *store.0.vm_store_context().last_wasm_exit_fp.get();
1722+
let last_wasm_entry_fp = *store.0.vm_store_context().last_wasm_entry_fp.get();
1723+
1724+
let vm_store_context = store.0.vm_store_context();
1725+
1726+
Self {
1727+
stack_limit,
1728+
last_wasm_exit_pc,
1729+
last_wasm_exit_fp,
1730+
last_wasm_entry_fp,
1731+
vm_store_context,
1732+
}
1733+
}
1734+
}
1735+
1736+
/// This function restores the values stored in this struct. We invoke this
1737+
/// function through this type's `Drop` implementation. This ensures that we
1738+
/// even restore the values if we unwind the stack (e.g., because we are
1739+
/// panicing out of a Wasm execution).
1740+
fn exit_wasm(&mut self) {
1741+
unsafe {
1742+
if let Some(limit) = self.stack_limit {
1743+
*(&*self.vm_store_context).stack_limit.get() = limit;
1744+
}
1745+
1746+
*(*self.vm_store_context).last_wasm_exit_fp.get() = self.last_wasm_exit_fp;
1747+
*(*self.vm_store_context).last_wasm_exit_pc.get() = self.last_wasm_exit_pc;
1748+
*(*self.vm_store_context).last_wasm_entry_fp.get() = self.last_wasm_entry_fp;
1749+
}
1750+
}
1751+
}
1752+
1753+
impl Drop for EntryStoreContext {
1754+
fn drop(&mut self) {
1755+
self.exit_wasm();
17041756
}
17051757
}
17061758

crates/wasmtime/src/runtime/vm/traphandlers.rs

Lines changed: 24 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,11 @@ mod signals;
1515
#[cfg(all(has_native_signals))]
1616
pub use self::signals::*;
1717

18-
use crate::prelude::*;
1918
use crate::runtime::module::lookup_code;
2019
use crate::runtime::store::{ExecutorRef, StoreOpaque};
2120
use crate::runtime::vm::sys::traphandlers;
2221
use crate::runtime::vm::{InterpreterRef, VMContext, VMStoreContext};
22+
use crate::{prelude::*, EntryStoreContext};
2323
use crate::{StoreContextMut, WasmBacktrace};
2424
use core::cell::Cell;
2525
use core::num::NonZeroU32;
@@ -365,14 +365,15 @@ impl From<wasmtime_environ::Trap> for TrapReason {
365365
/// longjmp'd over and none of its destructors on the stack may be run.
366366
pub unsafe fn catch_traps<T, F>(
367367
store: &mut StoreContextMut<'_, T>,
368+
old_state: &EntryStoreContext,
368369
mut closure: F,
369370
) -> Result<(), Box<Trap>>
370371
where
371372
F: FnMut(NonNull<VMContext>, Option<InterpreterRef<'_>>) -> bool,
372373
{
373374
let caller = store.0.default_caller();
374375

375-
let result = CallThreadState::new(store.0).with(|cx| match store.0.executor() {
376+
let result = CallThreadState::new(store.0, old_state).with(|cx| match store.0.executor() {
376377
// In interpreted mode directly invoke the host closure since we won't
377378
// be using host-based `setjmp`/`longjmp` as that's not going to save
378379
// the context we want.
@@ -424,6 +425,7 @@ where
424425
mod call_thread_state {
425426
use super::*;
426427
use crate::runtime::vm::Unwind;
428+
use crate::EntryStoreContext;
427429

428430
/// Temporary state stored on the stack which is registered in the `tls`
429431
/// module below for calls into wasm.
@@ -462,39 +464,33 @@ mod call_thread_state {
462464
#[cfg(all(has_native_signals, unix))]
463465
pub(crate) async_guard_range: Range<*mut u8>,
464466

465-
// The values of `VMStoreContext::last_wasm_{exit_{pc,fp},entry_sp}` for
466-
// the *previous* `CallThreadState` for this same store/limits. Our
467-
// *current* last wasm PC/FP/SP are saved in `self.vm_store_context`. We
468-
// save a copy of the old registers here because the `VMStoreContext`
469-
// typically doesn't change across nested calls into Wasm (i.e. they are
470-
// typically calls back into the same store and `self.vm_store_context
471-
// == self.prev.vm_store_context`) and we must to maintain the list of
472-
// contiguous-Wasm-frames stack regions for backtracing purposes.
473-
old_last_wasm_exit_fp: Cell<usize>,
474-
old_last_wasm_exit_pc: Cell<usize>,
475-
old_last_wasm_entry_fp: Cell<usize>,
467+
// The state of the runtime for the *previous* `CallThreadState` for
468+
// this same store. Our *current* state is saved in `self.vm_store_context`,
469+
// etc. We need access to the old values of these
470+
// fields because the `VMStoreContext` typically doesn't change across
471+
// nested calls into Wasm (i.e. they are typically calls back into the
472+
// same store and `self.vm_store_context == self.prev.vm_store_context`) and we must to
473+
// maintain the list of contiguous-Wasm-frames stack regions for
474+
// backtracing purposes.
475+
old_state: *const EntryStoreContext,
476476
}
477477

478478
impl Drop for CallThreadState {
479479
fn drop(&mut self) {
480480
// Unwind information should not be present as it should have
481481
// already been processed.
482482
debug_assert!(self.unwind.replace(None).is_none());
483-
484-
unsafe {
485-
let cx = self.vm_store_context.as_ref();
486-
*cx.last_wasm_exit_fp.get() = self.old_last_wasm_exit_fp.get();
487-
*cx.last_wasm_exit_pc.get() = self.old_last_wasm_exit_pc.get();
488-
*cx.last_wasm_entry_fp.get() = self.old_last_wasm_entry_fp.get();
489-
}
490483
}
491484
}
492485

493486
impl CallThreadState {
494487
pub const JMP_BUF_INTERPRETER_SENTINEL: *mut u8 = 1 as *mut u8;
495488

496489
#[inline]
497-
pub(super) fn new(store: &mut StoreOpaque) -> CallThreadState {
490+
pub(super) fn new(
491+
store: &mut StoreOpaque,
492+
old_state: *const EntryStoreContext,
493+
) -> CallThreadState {
498494
// Don't try to plumb #[cfg] everywhere for this field, just pretend
499495
// we're using it on miri/windows to silence compiler warnings.
500496
let _: Range<_> = store.async_guard_range();
@@ -512,31 +508,23 @@ mod call_thread_state {
512508
#[cfg(all(has_native_signals, unix))]
513509
async_guard_range: store.async_guard_range(),
514510
prev: Cell::new(ptr::null()),
515-
old_last_wasm_exit_fp: Cell::new(unsafe {
516-
*store.vm_store_context().last_wasm_exit_fp.get()
517-
}),
518-
old_last_wasm_exit_pc: Cell::new(unsafe {
519-
*store.vm_store_context().last_wasm_exit_pc.get()
520-
}),
521-
old_last_wasm_entry_fp: Cell::new(unsafe {
522-
*store.vm_store_context().last_wasm_entry_fp.get()
523-
}),
511+
old_state,
524512
}
525513
}
526514

527515
/// Get the saved FP upon exit from Wasm for the previous `CallThreadState`.
528-
pub fn old_last_wasm_exit_fp(&self) -> usize {
529-
self.old_last_wasm_exit_fp.get()
516+
pub unsafe fn old_last_wasm_exit_fp(&self) -> usize {
517+
(&*self.old_state).last_wasm_exit_fp
530518
}
531519

532520
/// Get the saved PC upon exit from Wasm for the previous `CallThreadState`.
533-
pub fn old_last_wasm_exit_pc(&self) -> usize {
534-
self.old_last_wasm_exit_pc.get()
521+
pub unsafe fn old_last_wasm_exit_pc(&self) -> usize {
522+
(&*self.old_state).last_wasm_exit_pc
535523
}
536524

537525
/// Get the saved FP upon entry into Wasm for the previous `CallThreadState`.
538-
pub fn old_last_wasm_entry_fp(&self) -> usize {
539-
self.old_last_wasm_entry_fp.get()
526+
pub unsafe fn old_last_wasm_entry_fp(&self) -> usize {
527+
(&*self.old_state).last_wasm_entry_fp
540528
}
541529

542530
/// Get the previous `CallThreadState`.

0 commit comments

Comments
 (0)