|
1 | 1 | use crate::prelude::*; |
2 | 2 | use crate::runtime::vm::{ |
3 | 3 | ExportFunction, InterpreterRef, SendSyncPtr, StoreBox, VMArrayCallHostFuncContext, VMContext, |
4 | | - VMFuncRef, VMFunctionImport, VMOpaqueContext, |
| 4 | + VMFuncRef, VMFunctionImport, VMOpaqueContext, VMStoreContext, |
5 | 5 | }; |
6 | 6 | use crate::runtime::Uninhabited; |
7 | 7 | use crate::store::{AutoAssertNoGc, StoreData, StoreOpaque, Stored}; |
@@ -1604,103 +1604,155 @@ pub(crate) fn invoke_wasm_and_catch_traps<T>( |
1604 | 1604 | closure: impl FnMut(NonNull<VMContext>, Option<InterpreterRef<'_>>) -> bool, |
1605 | 1605 | ) -> Result<()> { |
1606 | 1606 | unsafe { |
1607 | | - let exit = enter_wasm(store); |
| 1607 | + let previous_runtime_state = EntryStoreContext::enter_wasm(store); |
1608 | 1608 |
|
1609 | 1609 | if let Err(trap) = store.0.call_hook(CallHook::CallingWasm) { |
1610 | | - exit_wasm(store, exit); |
| 1610 | + // `previous_runtime_state` implicitly dropped here |
1611 | 1611 | return Err(trap); |
1612 | 1612 | } |
1613 | | - let result = crate::runtime::vm::catch_traps(store, closure); |
1614 | | - exit_wasm(store, exit); |
| 1613 | + let result = crate::runtime::vm::catch_traps(store, &previous_runtime_state, closure); |
| 1614 | + core::mem::drop(previous_runtime_state); |
1615 | 1615 | store.0.call_hook(CallHook::ReturningFromWasm)?; |
1616 | 1616 | result.map_err(|t| crate::trap::from_runtime_box(store.0, t)) |
1617 | 1617 | } |
1618 | 1618 | } |
1619 | 1619 |
|
1620 | | -/// This function is called to register state within `Store` whenever |
1621 | | -/// WebAssembly is entered within the `Store`. |
1622 | | -/// |
1623 | | -/// This function sets up various limits such as: |
1624 | | -/// |
1625 | | -/// * The stack limit. This is what ensures that we limit the stack space |
1626 | | -/// allocated by WebAssembly code and it's relative to the initial stack |
1627 | | -/// pointer that called into wasm. |
1628 | | -/// |
1629 | | -/// This function may fail if the stack limit can't be set because an |
1630 | | -/// interrupt already happened. |
1631 | | -fn enter_wasm<T>(store: &mut StoreContextMut<'_, T>) -> Option<usize> { |
1632 | | - // If this is a recursive call, e.g. our stack limit is already set, then |
1633 | | - // we may be able to skip this function. |
1634 | | - // |
1635 | | - // For synchronous stores there's nothing else to do because all wasm calls |
1636 | | - // happen synchronously and on the same stack. This means that the previous |
1637 | | - // stack limit will suffice for the next recursive call. |
1638 | | - // |
1639 | | - // For asynchronous stores then each call happens on a separate native |
1640 | | - // stack. This means that the previous stack limit is no longer relevant |
1641 | | - // because we're on a separate stack. |
1642 | | - if unsafe { *store.0.vm_store_context().stack_limit.get() } != usize::MAX |
1643 | | - && !store.0.async_support() |
1644 | | - { |
1645 | | - return None; |
1646 | | - } |
1647 | | - |
1648 | | - // Ignore this stack pointer business on miri since we can't execute wasm |
1649 | | - // anyway and the concept of a stack pointer on miri is a bit nebulous |
1650 | | - // regardless. |
1651 | | - if cfg!(miri) { |
1652 | | - return None; |
1653 | | - } |
1654 | | - |
1655 | | - // When Cranelift has support for the host then we might be running native |
1656 | | - // compiled code meaning we need to read the actual stack pointer. If |
1657 | | - // Cranelift can't be used though then we're guaranteed to be running pulley |
1658 | | - // in which case this stack pointer isn't actually used as Pulley has custom |
1659 | | - // mechanisms for stack overflow. |
1660 | | - #[cfg(has_host_compiler_backend)] |
1661 | | - let stack_pointer = crate::runtime::vm::get_stack_pointer(); |
1662 | | - #[cfg(not(has_host_compiler_backend))] |
1663 | | - let stack_pointer = { |
1664 | | - use wasmtime_environ::TripleExt; |
1665 | | - debug_assert!(store.engine().target().is_pulley()); |
1666 | | - usize::MAX |
1667 | | - }; |
| 1620 | +/// This type helps managing the state of the runtime when entering and exiting |
| 1621 | +/// Wasm. To this end, it contains a subset of the data in `VMStoreContext`. |
| 1622 | +/// Upon entering Wasm, it updates various runtime fields and their |
| 1623 | +/// original values saved in this struct. Upon exiting Wasm, the previous values |
| 1624 | +/// are restored. |
| 1625 | +pub(crate) struct EntryStoreContext { |
| 1626 | + /// If set, contains value of `stack_limit` field to restore in |
| 1627 | + /// `VMRuntimeLimits` when exiting Wasm. |
| 1628 | + pub stack_limit: Option<usize>, |
| 1629 | + /// Contains value of `last_wasm_exit_pc` field to restore in |
| 1630 | + /// `VMStoreContext` when exiting Wasm. |
| 1631 | + pub last_wasm_exit_pc: usize, |
| 1632 | + /// Contains value of `last_wasm_exit_fp` field to restore in |
| 1633 | + /// `VMStoreContext` when exiting Wasm. |
| 1634 | + pub last_wasm_exit_fp: usize, |
| 1635 | + /// Contains value of `last_wasm_entry_fp` field to restore in |
| 1636 | + /// `VMStoreContext` when exiting Wasm. |
| 1637 | + pub last_wasm_entry_fp: usize, |
| 1638 | + |
| 1639 | + /// We need a pointer to the runtime limits, so we can update them from |
| 1640 | + /// `drop`/`exit_wasm`. |
| 1641 | + vm_store_context: *const VMStoreContext, |
| 1642 | +} |
1668 | 1643 |
|
1669 | | - // Determine the stack pointer where, after which, any wasm code will |
1670 | | - // immediately trap. This is checked on the entry to all wasm functions. |
1671 | | - // |
1672 | | - // Note that this isn't 100% precise. We are requested to give wasm |
1673 | | - // `max_wasm_stack` bytes, but what we're actually doing is giving wasm |
1674 | | - // probably a little less than `max_wasm_stack` because we're |
1675 | | - // calculating the limit relative to this function's approximate stack |
1676 | | - // pointer. Wasm will be executed on a frame beneath this one (or next |
1677 | | - // to it). In any case it's expected to be at most a few hundred bytes |
1678 | | - // of slop one way or another. When wasm is typically given a MB or so |
1679 | | - // (a million bytes) the slop shouldn't matter too much. |
1680 | | - // |
1681 | | - // After we've got the stack limit then we store it into the `stack_limit` |
1682 | | - // variable. |
1683 | | - let wasm_stack_limit = stack_pointer - store.engine().config().max_wasm_stack; |
1684 | | - let prev_stack = unsafe { |
1685 | | - mem::replace( |
1686 | | - &mut *store.0.vm_store_context().stack_limit.get(), |
1687 | | - wasm_stack_limit, |
1688 | | - ) |
1689 | | - }; |
| 1644 | +impl EntryStoreContext { |
| 1645 | + /// This function is called to update and save state when |
| 1646 | + /// WebAssembly is entered within the `Store`. |
| 1647 | + /// |
| 1648 | + /// This updates various fields such as: |
| 1649 | + /// |
| 1650 | + /// * The stack limit. This is what ensures that we limit the stack space |
| 1651 | + /// allocated by WebAssembly code and it's relative to the initial stack |
| 1652 | + /// pointer that called into wasm. |
| 1653 | + /// |
| 1654 | + /// It also saves the different last_wasm_* values in the `VMRuntimeLimits`. |
| 1655 | + pub fn enter_wasm<T>(store: &mut StoreContextMut<'_, T>) -> Self { |
| 1656 | + let stack_limit; |
1690 | 1657 |
|
1691 | | - Some(prev_stack) |
1692 | | -} |
| 1658 | + // If this is a recursive call, e.g. our stack limit is already set, then |
| 1659 | + // we may be able to skip this function. |
| 1660 | + // |
| 1661 | + // For synchronous stores there's nothing else to do because all wasm calls |
| 1662 | + // happen synchronously and on the same stack. This means that the previous |
| 1663 | + // stack limit will suffice for the next recursive call. |
| 1664 | + // |
| 1665 | + // For asynchronous stores then each call happens on a separate native |
| 1666 | + // stack. This means that the previous stack limit is no longer relevant |
| 1667 | + // because we're on a separate stack. |
| 1668 | + if unsafe { *store.0.vm_store_context().stack_limit.get() } != usize::MAX |
| 1669 | + && !store.0.async_support() |
| 1670 | + { |
| 1671 | + stack_limit = None; |
| 1672 | + } |
| 1673 | + // Ignore this stack pointer business on miri since we can't execute wasm |
| 1674 | + // anyway and the concept of a stack pointer on miri is a bit nebulous |
| 1675 | + // regardless. |
| 1676 | + else if cfg!(miri) { |
| 1677 | + stack_limit = None; |
| 1678 | + } else { |
| 1679 | + // When Cranelift has support for the host then we might be running native |
| 1680 | + // compiled code meaning we need to read the actual stack pointer. If |
| 1681 | + // Cranelift can't be used though then we're guaranteed to be running pulley |
| 1682 | + // in which case this stack pointer isn't actually used as Pulley has custom |
| 1683 | + // mechanisms for stack overflow. |
| 1684 | + #[cfg(has_host_compiler_backend)] |
| 1685 | + let stack_pointer = crate::runtime::vm::get_stack_pointer(); |
| 1686 | + #[cfg(not(has_host_compiler_backend))] |
| 1687 | + let stack_pointer = { |
| 1688 | + use wasmtime_environ::TripleExt; |
| 1689 | + debug_assert!(store.engine().target().is_pulley()); |
| 1690 | + usize::MAX |
| 1691 | + }; |
1693 | 1692 |
|
1694 | | -fn exit_wasm<T>(store: &mut StoreContextMut<'_, T>, prev_stack: Option<usize>) { |
1695 | | - // If we don't have a previous stack pointer to restore, then there's no |
1696 | | - // cleanup we need to perform here. |
1697 | | - let prev_stack = match prev_stack { |
1698 | | - Some(stack) => stack, |
1699 | | - None => return, |
1700 | | - }; |
| 1693 | + // Determine the stack pointer where, after which, any wasm code will |
| 1694 | + // immediately trap. This is checked on the entry to all wasm functions. |
| 1695 | + // |
| 1696 | + // Note that this isn't 100% precise. We are requested to give wasm |
| 1697 | + // `max_wasm_stack` bytes, but what we're actually doing is giving wasm |
| 1698 | + // probably a little less than `max_wasm_stack` because we're |
| 1699 | + // calculating the limit relative to this function's approximate stack |
| 1700 | + // pointer. Wasm will be executed on a frame beneath this one (or next |
| 1701 | + // to it). In any case it's expected to be at most a few hundred bytes |
| 1702 | + // of slop one way or another. When wasm is typically given a MB or so |
| 1703 | + // (a million bytes) the slop shouldn't matter too much. |
| 1704 | + // |
| 1705 | + // After we've got the stack limit then we store it into the `stack_limit` |
| 1706 | + // variable. |
| 1707 | + let wasm_stack_limit = stack_pointer |
| 1708 | + .checked_sub(store.engine().config().max_wasm_stack) |
| 1709 | + .unwrap(); |
| 1710 | + let prev_stack = unsafe { |
| 1711 | + mem::replace( |
| 1712 | + &mut *store.0.vm_store_context().stack_limit.get(), |
| 1713 | + wasm_stack_limit, |
| 1714 | + ) |
| 1715 | + }; |
| 1716 | + stack_limit = Some(prev_stack); |
| 1717 | + } |
1701 | 1718 |
|
1702 | | - unsafe { |
1703 | | - *store.0.vm_store_context().stack_limit.get() = prev_stack; |
| 1719 | + unsafe { |
| 1720 | + let last_wasm_exit_pc = *store.0.vm_store_context().last_wasm_exit_pc.get(); |
| 1721 | + let last_wasm_exit_fp = *store.0.vm_store_context().last_wasm_exit_fp.get(); |
| 1722 | + let last_wasm_entry_fp = *store.0.vm_store_context().last_wasm_entry_fp.get(); |
| 1723 | + |
| 1724 | + let vm_store_context = store.0.vm_store_context(); |
| 1725 | + |
| 1726 | + Self { |
| 1727 | + stack_limit, |
| 1728 | + last_wasm_exit_pc, |
| 1729 | + last_wasm_exit_fp, |
| 1730 | + last_wasm_entry_fp, |
| 1731 | + vm_store_context, |
| 1732 | + } |
| 1733 | + } |
| 1734 | + } |
| 1735 | + |
| 1736 | + /// This function restores the values stored in this struct. We invoke this |
| 1737 | + /// function through this type's `Drop` implementation. This ensures that we |
| 1738 | + /// even restore the values if we unwind the stack (e.g., because we are |
| 1739 | + /// panicing out of a Wasm execution). |
| 1740 | + fn exit_wasm(&mut self) { |
| 1741 | + unsafe { |
| 1742 | + if let Some(limit) = self.stack_limit { |
| 1743 | + *(&*self.vm_store_context).stack_limit.get() = limit; |
| 1744 | + } |
| 1745 | + |
| 1746 | + *(*self.vm_store_context).last_wasm_exit_fp.get() = self.last_wasm_exit_fp; |
| 1747 | + *(*self.vm_store_context).last_wasm_exit_pc.get() = self.last_wasm_exit_pc; |
| 1748 | + *(*self.vm_store_context).last_wasm_entry_fp.get() = self.last_wasm_entry_fp; |
| 1749 | + } |
| 1750 | + } |
| 1751 | +} |
| 1752 | + |
| 1753 | +impl Drop for EntryStoreContext { |
| 1754 | + fn drop(&mut self) { |
| 1755 | + self.exit_wasm(); |
1704 | 1756 | } |
1705 | 1757 | } |
1706 | 1758 |
|
|
0 commit comments