diff --git a/Cargo.lock b/Cargo.lock index e3ac311bec6f..d79ac1d110a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4604,6 +4604,7 @@ dependencies = [ "wasmtime-internal-component-util", "wasmtime-internal-cranelift", "wasmtime-internal-explorer", + "wasmtime-internal-unwinder", "wasmtime-test-macros", "wasmtime-test-util", "wasmtime-wasi", @@ -4823,6 +4824,7 @@ dependencies = [ "wasmparser 0.236.0", "wasmtime-environ", "wasmtime-internal-math", + "wasmtime-internal-unwinder", "wasmtime-internal-versioned-export-macros", ] diff --git a/Cargo.toml b/Cargo.toml index bf2e5447b780..bfdce00b0668 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,6 +58,7 @@ wasmtime-wasi-tls = { workspace = true, optional = true } wasmtime-wasi-keyvalue = { workspace = true, optional = true } wasmtime-wasi-threads = { workspace = true, optional = true } wasmtime-wasi-http = { workspace = true, optional = true } +wasmtime-unwinder = { workspace = true } clap = { workspace = true } clap_complete = { workspace = true, optional = true } anyhow = { workspace = true, features = ['std'] } diff --git a/cranelift/codegen/src/isa/aarch64/inst/mod.rs b/cranelift/codegen/src/isa/aarch64/inst/mod.rs index 4e7ef989c700..e0fddfa1096a 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/mod.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/mod.rs @@ -976,7 +976,7 @@ impl MachInst for Inst { // // See the note in [crate::isa::aarch64::abi::is_caller_save_reg] for // more information on this ABI-implementation hack. - let caller_clobbers = AArch64MachineDeps::get_regs_clobbered_by_call(caller, is_exception); + let caller_clobbers = AArch64MachineDeps::get_regs_clobbered_by_call(caller, false); let callee_clobbers = AArch64MachineDeps::get_regs_clobbered_by_call(callee, is_exception); let mut all_clobbers = caller_clobbers; diff --git a/cranelift/codegen/src/machinst/abi.rs b/cranelift/codegen/src/machinst/abi.rs index e76abcf5e015..e15ec09a43d2 100644 --- a/cranelift/codegen/src/machinst/abi.rs +++ b/cranelift/codegen/src/machinst/abi.rs @@ -2495,7 +2495,9 @@ impl TryCallInfo { TryCallHandler::Default(label) => MachExceptionHandler::Default(*label), TryCallHandler::Context(reg) => { let loc = if let Some(spillslot) = reg.to_spillslot() { - let offset = layout.spillslot_offset(spillslot); + // The spillslot offset is relative to the "fixed + // storage area", which comes after outgoing args. + let offset = layout.spillslot_offset(spillslot) + i64::from(layout.outgoing_args_size); ExceptionContextLoc::SPOffset(u32::try_from(offset).expect("SP offset cannot be negative or larger than 4GiB")) } else if let Some(realreg) = reg.to_real_reg() { ExceptionContextLoc::GPR(realreg.hw_enc()) diff --git a/cranelift/filetests/src/function_runner.rs b/cranelift/filetests/src/function_runner.rs index 92833cc5d046..18fc626124a1 100644 --- a/cranelift/filetests/src/function_runner.rs +++ b/cranelift/filetests/src/function_runner.rs @@ -648,21 +648,32 @@ extern "C-unwind" fn __cranelift_throw( ) -> ! { let compiled_test_file = unsafe { &*COMPILED_TEST_FILE.get() }; let unwind_host = wasmtime_unwinder::UnwindHost; - let module_lookup = |pc| { - compiled_test_file + let frame_handler = |frame: &wasmtime_unwinder::Frame| -> Option { + let (base, table) = compiled_test_file .module .as_ref() .unwrap() - .lookup_wasmtime_exception_data(pc) + .lookup_wasmtime_exception_data(frame.pc())?; + let relative_pc = u32::try_from( + frame + .pc() + .checked_sub(base) + .expect("module lookup did not return a module base below the PC"), + ) + .expect("module larger than 4GiB"); + + table.lookup_pc_tag(relative_pc, tag).map(|handler| { + base.checked_add(usize::try_from(handler).unwrap()) + .expect("Handler address computation overflowed") + }) }; unsafe { match wasmtime_unwinder::compute_throw_action( &unwind_host, - module_lookup, + frame_handler, exit_pc, exit_fp, entry_fp, - tag, ) { wasmtime_unwinder::ThrowAction::Handler { pc, sp, fp } => { wasmtime_unwinder::resume_to_exception_handler(pc, sp, fp, payload1, payload2); diff --git a/crates/cli-flags/src/lib.rs b/crates/cli-flags/src/lib.rs index c2130943b596..90f571c3627e 100644 --- a/crates/cli-flags/src/lib.rs +++ b/crates/cli-flags/src/lib.rs @@ -401,8 +401,6 @@ wasmtime_option_group! { pub extended_const: Option, /// Configure support for the exceptions proposal. pub exceptions: Option, - /// DEPRECATED: Configure support for the legacy exceptions proposal. - pub legacy_exceptions: Option, /// Whether or not any GC infrastructure in Wasmtime is enabled or not. pub gc_support: Option, } @@ -1035,13 +1033,6 @@ impl CommonOptions { if let Some(enable) = self.wasm.extended_const.or(all) { config.wasm_extended_const(enable); } - if let Some(enable) = self.wasm.exceptions.or(all) { - config.wasm_exceptions(enable); - } - if let Some(enable) = self.wasm.legacy_exceptions.or(all) { - #[expect(deprecated, reason = "forwarding CLI flag")] - config.wasm_legacy_exceptions(enable); - } macro_rules! handle_conditionally_compiled { ($(($feature:tt, $field:tt, $method:tt))*) => ($( @@ -1066,6 +1057,7 @@ impl CommonOptions { ("gc", gc, wasm_gc) ("gc", reference_types, wasm_reference_types) ("gc", function_references, wasm_function_references) + ("gc", exceptions, wasm_exceptions) ("stack-switching", stack_switching, wasm_stack_switching) } diff --git a/crates/cranelift/Cargo.toml b/crates/cranelift/Cargo.toml index 0eaea583361e..c3c1a95efceb 100644 --- a/crates/cranelift/Cargo.toml +++ b/crates/cranelift/Cargo.toml @@ -34,6 +34,7 @@ wasmtime-versioned-export-macros = { workspace = true } itertools = { workspace = true } pulley-interpreter = { workspace = true, optional = true } wasmtime-math = { workspace = true } +wasmtime-unwinder = { workspace = true, features = ["cranelift"] } [features] all-arch = ["cranelift-codegen/all-arch"] diff --git a/crates/cranelift/src/compiler.rs b/crates/cranelift/src/compiler.rs index f2d958b7fccd..ff3a9be4e976 100644 --- a/crates/cranelift/src/compiler.rs +++ b/crates/cranelift/src/compiler.rs @@ -14,7 +14,7 @@ use cranelift_codegen::isa::{ unwind::{UnwindInfo, UnwindInfoKind}, }; use cranelift_codegen::print_errors::pretty_error; -use cranelift_codegen::{CompiledCode, Context}; +use cranelift_codegen::{CompiledCode, Context, FinalizedMachCallSite}; use cranelift_entity::PrimaryMap; use cranelift_frontend::FunctionBuilder; use object::write::{Object, StandardSegment, SymbolId}; @@ -28,6 +28,7 @@ use std::ops::Range; use std::path; use std::sync::{Arc, Mutex}; use wasmparser::{FuncValidatorAllocations, FunctionBody}; +use wasmtime_environ::obj::ELF_WASMTIME_EXCEPTIONS; use wasmtime_environ::{ AddressMapSection, BuiltinFunctionIndex, CacheStore, CompileError, CompiledFunctionBody, DefinedFuncIndex, FlagValue, FuncKey, FunctionBodyData, FunctionLoc, HostCall, @@ -35,6 +36,7 @@ use wasmtime_environ::{ StaticModuleIndex, TrapEncodingBuilder, TrapSentinel, TripleExt, Tunables, VMOffsets, WasmFuncType, WasmValType, }; +use wasmtime_unwinder::ExceptionTableBuilder; #[cfg(feature = "component-model")] mod component; @@ -525,6 +527,7 @@ impl wasmtime_environ::Compiler for Compiler { let mut addrs = AddressMapSection::default(); let mut traps = TrapEncodingBuilder::default(); let mut stack_maps = StackMapSection::default(); + let mut exception_tables = ExceptionTableBuilder::default(); let mut ret = Vec::with_capacity(funcs.len()); for (i, (sym, func)) in funcs.iter().enumerate() { @@ -547,6 +550,11 @@ impl wasmtime_environ::Compiler for Compiler { ); traps.push(range.clone(), &func.traps().collect::>()); + clif_to_env_exception_tables( + &mut exception_tables, + range.clone(), + func.buffer.call_sites(), + )?; builder.append_padding(self.linkopts.padding_between_functions); let info = FunctionLoc { @@ -564,6 +572,15 @@ impl wasmtime_environ::Compiler for Compiler { stack_maps.append_to(obj); traps.append_to(obj); + let exception_section = obj.add_section( + obj.segment_name(StandardSegment::Data).to_vec(), + ELF_WASMTIME_EXCEPTIONS.as_bytes().to_vec(), + SectionKind::ReadOnlyData, + ); + exception_tables.serialize(|bytes| { + obj.append_section_data(exception_section, bytes, 1); + }); + Ok(ret) } @@ -1328,6 +1345,21 @@ fn clif_to_env_stack_maps( } } +/// Convert from Cranelift's representation of exception handler +/// metadata to Wasmtime's compiler-agnostic representation. +/// +/// Here `builder` is the wasmtime-unwinder exception section being +/// created and `range` is the range of the function being added. The +/// `call_sites` iterator is the raw iterator over callsite metadata +/// (including exception handlers) from Cranelift. +fn clif_to_env_exception_tables<'a>( + builder: &mut ExceptionTableBuilder, + range: Range, + call_sites: impl Iterator>, +) -> anyhow::Result<()> { + builder.add_func(CodeOffset::try_from(range.start).unwrap(), call_sites) +} + fn declare_and_call( builder: &mut FunctionBuilder, signature: ir::Signature, @@ -1417,25 +1449,17 @@ fn save_last_wasm_exit_fp_and_pc( ptr: &impl PtrSize, limits: Value, ) { - // Save the exit Wasm FP to the limits. We dereference the current FP to get - // the previous FP because the current FP is the trampoline's FP, and we - // want the Wasm function's FP, which is the caller of this trampoline. + // Save the trampoline FP to the limits. Exception unwind needs + // this so that it can know the SP (bottom of frame) for the very + // last Wasm frame. let trampoline_fp = builder.ins().get_frame_pointer(pointer_type); - let wasm_fp = builder.ins().load( - pointer_type, - MemFlags::trusted(), - trampoline_fp, - // The FP always points to the next older FP for all supported - // targets. See assertion in - // `crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs`. - 0, - ); builder.ins().store( MemFlags::trusted(), - wasm_fp, + trampoline_fp, limits, - ptr.vmstore_context_last_wasm_exit_fp(), + ptr.vmstore_context_last_wasm_exit_trampoline_fp(), ); + // Finally save the Wasm return address to the limits. let wasm_pc = builder.ins().get_return_address(pointer_type); builder.ins().store( diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index 2dff25a3995f..3871f47b923d 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -10,23 +10,23 @@ use cranelift_codegen::cursor::FuncCursor; use cranelift_codegen::ir::condcodes::{FloatCC, IntCC}; use cranelift_codegen::ir::immediates::{Imm64, Offset32, V128Imm}; use cranelift_codegen::ir::pcc::Fact; -use cranelift_codegen::ir::types::*; -use cranelift_codegen::ir::{self, types}; +use cranelift_codegen::ir::{self, BlockArg, ExceptionTableData, ExceptionTableItem, types}; use cranelift_codegen::ir::{ArgumentPurpose, ConstantData, Function, InstBuilder, MemFlags}; +use cranelift_codegen::ir::{Block, ExceptionTag, types::*}; use cranelift_codegen::isa::{TargetFrontendConfig, TargetIsa}; use cranelift_entity::packed_option::{PackedOption, ReservedValue}; use cranelift_entity::{EntityRef, PrimaryMap, SecondaryMap}; use cranelift_frontend::Variable; use cranelift_frontend::{FuncInstBuilder, FunctionBuilder}; -use smallvec::SmallVec; +use smallvec::{SmallVec, smallvec}; use std::mem; use wasmparser::{Operator, WasmFeatures}; use wasmtime_environ::{ BuiltinFunctionIndex, DataIndex, DefinedFuncIndex, ElemIndex, EngineOrModuleTypeIndex, FuncIndex, FuncKey, GlobalIndex, IndexType, Memory, MemoryIndex, Module, ModuleInternedTypeIndex, ModuleTranslation, ModuleTypesBuilder, PtrSize, Table, TableIndex, - TripleExt, Tunables, TypeConvert, TypeIndex, VMOffsets, WasmCompositeInnerType, WasmFuncType, - WasmHeapTopType, WasmHeapType, WasmRefType, WasmResult, WasmValType, + TagIndex, TripleExt, Tunables, TypeConvert, TypeIndex, VMOffsets, WasmCompositeInnerType, + WasmFuncType, WasmHeapTopType, WasmHeapType, WasmRefType, WasmResult, WasmValType, }; use wasmtime_environ::{FUNCREF_INIT_BIT, FUNCREF_MASK}; use wasmtime_math::f64_cvt_to_int_bounds; @@ -399,7 +399,8 @@ impl<'module_environment> FuncEnvironment<'module_environment> { | Operator::Call { .. } | Operator::ReturnCall { .. } | Operator::ReturnCallRef { .. } - | Operator::ReturnCallIndirect { .. } => { + | Operator::ReturnCallIndirect { .. } + | Operator::Throw { .. } | Operator::ThrowRef => { self.fuel_increment_var(builder); self.fuel_save_from_var(builder); } @@ -1612,11 +1613,71 @@ impl FuncEnvironment<'_> { element_size, } } + + /// Get the type index associated with an exception object. + #[cfg(feature = "gc")] + pub(crate) fn exception_type_from_tag(&self, tag: TagIndex) -> EngineOrModuleTypeIndex { + self.module.tags[tag].exception + } + + /// Get the parameter arity of the associated function type for the given tag. + pub(crate) fn tag_param_arity(&self, tag: TagIndex) -> usize { + let func_ty = self.module.tags[tag].signature.unwrap_module_type_index(); + let func_ty = self + .types + .unwrap_func(func_ty) + .expect("already validated to refer to a function type"); + func_ty.params().len() + } + + /// Get the runtime instance ID and defined-tag ID in that + /// instance for a particular static tag ID. + #[cfg(feature = "gc")] + pub(crate) fn get_instance_and_tag( + &mut self, + builder: &mut FunctionBuilder<'_>, + tag_index: TagIndex, + ) -> (ir::Value, ir::Value) { + if let Some(defined_tag_index) = self.module.defined_tag_index(tag_index) { + // Our own tag -- we only need to get our instance ID. + let builtin = self.builtin_functions.get_instance_id(builder.func); + let vmctx = self.vmctx_val(&mut builder.cursor()); + let call = builder.ins().call(builtin, &[vmctx]); + let instance_id = builder.func.dfg.inst_results(call)[0]; + let tag_id = builder + .ins() + .iconst(I32, i64::from(defined_tag_index.as_u32())); + (instance_id, tag_id) + } else { + // An imported tag -- we need to load the VMTagImport struct. + let vmctx_tag_vmctx_offset = self.offsets.vmctx_vmtag_import_vmctx(tag_index); + let vmctx_tag_index_offset = self.offsets.vmctx_vmtag_import_index(tag_index); + let vmctx = self.vmctx_val(&mut builder.cursor()); + let pointer_type = self.pointer_type(); + let from_vmctx = builder.ins().load( + pointer_type, + MemFlags::trusted().with_readonly(), + vmctx, + i32::try_from(vmctx_tag_vmctx_offset).unwrap(), + ); + let index = builder.ins().load( + I32, + MemFlags::trusted().with_readonly(), + vmctx, + i32::try_from(vmctx_tag_index_offset).unwrap(), + ); + let builtin = self.builtin_functions.get_instance_id(builder.func); + let call = builder.ins().call(builtin, &[from_vmctx]); + let from_instance_id = builder.func.dfg.inst_results(call)[0]; + (from_instance_id, index) + } + } } struct Call<'a, 'func, 'module_env> { builder: &'a mut FunctionBuilder<'func>, env: &'a mut FuncEnvironment<'module_env>, + handlers: Vec<(Option, Block)>, tail: bool, } @@ -1630,15 +1691,20 @@ enum CheckIndirectCallTypeSignature { StaticTrap, } +type CallRets = SmallVec<[ir::Value; 4]>; + impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { /// Create a new `Call` site that will do regular, non-tail calls. pub fn new( builder: &'a mut FunctionBuilder<'func>, env: &'a mut FuncEnvironment<'module_env>, + handlers: impl IntoIterator, Block)>, ) -> Self { + let handlers = handlers.into_iter().collect(); Call { builder, env, + handlers, tail: false, } } @@ -1651,6 +1717,7 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { Call { builder, env, + handlers: vec![], tail: true, } } @@ -1661,7 +1728,7 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { callee_index: FuncIndex, sig_ref: ir::SigRef, call_args: &[ir::Value], - ) -> WasmResult { + ) -> WasmResult { let mut real_call_args = Vec::with_capacity(call_args.len() + 2); let caller_vmctx = self .builder @@ -1745,7 +1812,7 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { sig_ref: ir::SigRef, callee: ir::Value, call_args: &[ir::Value], - ) -> WasmResult> { + ) -> WasmResult> { let (code_ptr, callee_vmctx) = match self.check_and_load_code_and_callee_vmctx( features, table_index, @@ -1960,11 +2027,11 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { /// Call a typed function reference. pub fn call_ref( - mut self, + self, sig_ref: ir::SigRef, callee: ir::Value, args: &[ir::Value], - ) -> WasmResult { + ) -> WasmResult { // FIXME: the wasm type system tracks enough information to know whether // `callee` is a null reference or not. In some situations it can be // statically known here that `callee` cannot be null in which case this @@ -1982,12 +2049,12 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { /// special callee/caller vmctxs. It is used by both call_indirect (which /// checks the signature) and call_ref (which doesn't). fn unchecked_call( - &mut self, + mut self, sig_ref: ir::SigRef, callee: ir::Value, callee_load_trap_code: Option, call_args: &[ir::Value], - ) -> WasmResult { + ) -> WasmResult { let (func_addr, callee_vmctx) = self.load_code_and_vmctx(callee, callee_load_trap_code); self.unchecked_call_impl(sig_ref, func_addr, callee_vmctx, call_args) } @@ -2031,22 +2098,25 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { (func_addr, callee_vmctx) } + fn caller_vmctx(&self) -> ir::Value { + self.builder + .func + .special_param(ArgumentPurpose::VMContext) + .unwrap() + } + /// This calls a function by reference without checking the /// signature, given the raw code pointer to the /// Wasm-calling-convention entry point and the callee vmctx. fn unchecked_call_impl( - &mut self, + mut self, sig_ref: ir::SigRef, func_addr: ir::Value, callee_vmctx: ir::Value, call_args: &[ir::Value], - ) -> WasmResult { + ) -> WasmResult { let mut real_call_args = Vec::with_capacity(call_args.len() + 2); - let caller_vmctx = self - .builder - .func - .special_param(ArgumentPurpose::VMContext) - .unwrap(); + let caller_vmctx = self.caller_vmctx(); // First append the callee and caller vmctx addresses. real_call_args.push(callee_vmctx); @@ -2058,28 +2128,86 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { Ok(self.indirect_call_inst(sig_ref, func_addr, &real_call_args)) } - fn direct_call_inst(&mut self, callee: ir::FuncRef, args: &[ir::Value]) -> ir::Inst { - if self.tail { - self.builder.ins().return_call(callee, args) - } else { - let inst = self.builder.ins().call(callee, args); - let results: SmallVec<[_; 4]> = self + fn exception_table( + &mut self, + sig: ir::SigRef, + ) -> Option<(ir::ExceptionTable, Block, CallRets)> { + if self.handlers.len() > 0 { + let continuation_block = self.builder.create_block(); + let mut args = vec![]; + let mut results = smallvec![]; + for i in 0..self.builder.func.dfg.signatures[sig].returns.len() { + let ty = self.builder.func.dfg.signatures[sig].returns[i].value_type; + results.push( + self.builder + .func + .dfg + .append_block_param(continuation_block, ty), + ); + args.push(BlockArg::TryCallRet(u32::try_from(i).unwrap())); + } + + let continuation = self .builder .func .dfg - .inst_results(inst) - .iter() - .copied() - .collect(); - for (i, val) in results.into_iter().enumerate() { - if self - .env - .func_ref_result_needs_stack_map(&self.builder.func, callee, i) - { - self.builder.declare_value_needs_stack_map(val); - } + .block_call(continuation_block, args.iter()); + let mut handlers = vec![ExceptionTableItem::Context(self.caller_vmctx())]; + for (tag, block) in &self.handlers { + let block_call = self + .builder + .func + .dfg + .block_call(*block, &[BlockArg::TryCallExn(0)]); + handlers.push(match tag { + Some(tag) => ExceptionTableItem::Tag(*tag, block_call), + None => ExceptionTableItem::Default(block_call), + }); + } + let etd = ExceptionTableData::new(sig, continuation, handlers); + let et = self.builder.func.dfg.exception_tables.push(etd); + Some((et, continuation_block, results)) + } else { + None + } + } + + fn results_from_call_inst(&self, inst: ir::Inst) -> CallRets { + self.builder + .func + .dfg + .inst_results(inst) + .iter() + .copied() + .collect() + } + + fn handle_call_result_stackmap(&mut self, results: &[ir::Value], sig_ref: ir::SigRef) { + for (i, &val) in results.iter().enumerate() { + if self.env.sig_ref_result_needs_stack_map(sig_ref, i) { + self.builder.declare_value_needs_stack_map(val); } - inst + } + } + + fn direct_call_inst(&mut self, callee: ir::FuncRef, args: &[ir::Value]) -> CallRets { + let sig_ref = self.builder.func.dfg.ext_funcs[callee].signature; + if self.tail { + self.builder.ins().return_call(callee, args); + smallvec![] + } else if let Some((exception_table, continuation_block, results)) = + self.exception_table(sig_ref) + { + self.builder.ins().try_call(callee, args, exception_table); + self.handle_call_result_stackmap(&results, sig_ref); + self.builder.switch_to_block(continuation_block); + self.builder.seal_block(continuation_block); + results + } else { + let inst = self.builder.ins().call(callee, args); + let results = self.results_from_call_inst(inst); + self.handle_call_result_stackmap(&results, sig_ref); + results } } @@ -2088,27 +2216,27 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { sig_ref: ir::SigRef, func_addr: ir::Value, args: &[ir::Value], - ) -> ir::Inst { + ) -> CallRets { if self.tail { self.builder .ins() - .return_call_indirect(sig_ref, func_addr, args) + .return_call_indirect(sig_ref, func_addr, args); + smallvec![] + } else if let Some((exception_table, continuation_block, results)) = + self.exception_table(sig_ref) + { + self.builder + .ins() + .try_call_indirect(func_addr, args, exception_table); + self.handle_call_result_stackmap(&results, sig_ref); + self.builder.switch_to_block(continuation_block); + self.builder.seal_block(continuation_block); + results } else { let inst = self.builder.ins().call_indirect(sig_ref, func_addr, args); - let results: SmallVec<[_; 4]> = self - .builder - .func - .dfg - .inst_results(inst) - .iter() - .copied() - .collect(); - for (i, val) in results.into_iter().enumerate() { - if self.env.sig_ref_result_needs_stack_map(sig_ref, i) { - self.builder.declare_value_needs_stack_map(val); - } - } - inst + let results = self.results_from_call_inst(inst); + self.handle_call_result_stackmap(&results, sig_ref); + results } } } @@ -2182,17 +2310,6 @@ impl FuncEnvironment<'_> { wasm_func_ty.returns()[index].is_vmgcref_type_and_not_i31() } - pub fn func_ref_result_needs_stack_map( - &self, - func: &ir::Function, - func_ref: ir::FuncRef, - index: usize, - ) -> bool { - let sig_ref = func.dfg.ext_funcs[func_ref].signature; - let wasm_func_ty = self.sig_ref_to_ty[sig_ref].as_ref().unwrap(); - wasm_func_ty.returns()[index].is_vmgcref_type_and_not_i31() - } - pub fn translate_table_grow( &mut self, builder: &mut FunctionBuilder<'_>, @@ -2431,6 +2548,34 @@ impl FuncEnvironment<'_> { ) } + pub fn translate_exn_unbox( + &mut self, + builder: &mut FunctionBuilder<'_>, + tag_index: TagIndex, + exn_ref: ir::Value, + ) -> WasmResult> { + gc::translate_exn_unbox(self, builder, tag_index, exn_ref) + } + + pub fn translate_exn_throw( + &mut self, + builder: &mut FunctionBuilder<'_>, + tag_index: TagIndex, + args: &[ir::Value], + handlers: impl IntoIterator, Block)>, + ) -> WasmResult<()> { + gc::translate_exn_throw(self, builder, tag_index, args, handlers) + } + + pub fn translate_exn_throw_ref( + &mut self, + builder: &mut FunctionBuilder<'_>, + exnref: ir::Value, + handlers: impl IntoIterator, Block)>, + ) -> WasmResult<()> { + gc::translate_exn_throw_ref(self, builder, exnref, handlers) + } + pub fn translate_array_new( &mut self, builder: &mut FunctionBuilder, @@ -2781,17 +2926,18 @@ impl FuncEnvironment<'_> { Ok(()) } - pub fn translate_call_indirect( + pub fn translate_call_indirect<'a>( &mut self, - builder: &mut FunctionBuilder, + builder: &'a mut FunctionBuilder, features: &WasmFeatures, table_index: TableIndex, ty_index: TypeIndex, sig_ref: ir::SigRef, callee: ir::Value, call_args: &[ir::Value], - ) -> WasmResult> { - Call::new(builder, self).indirect_call( + handlers: impl IntoIterator, Block)>, + ) -> WasmResult> { + Call::new(builder, self, handlers).indirect_call( features, table_index, ty_index, @@ -2801,24 +2947,26 @@ impl FuncEnvironment<'_> { ) } - pub fn translate_call( + pub fn translate_call<'a>( &mut self, - builder: &mut FunctionBuilder, + builder: &'a mut FunctionBuilder, callee_index: FuncIndex, sig_ref: ir::SigRef, call_args: &[ir::Value], - ) -> WasmResult { - Call::new(builder, self).direct_call(callee_index, sig_ref, call_args) + handlers: impl IntoIterator, Block)>, + ) -> WasmResult { + Call::new(builder, self, handlers).direct_call(callee_index, sig_ref, call_args) } - pub fn translate_call_ref( + pub fn translate_call_ref<'a>( &mut self, - builder: &mut FunctionBuilder, + builder: &'a mut FunctionBuilder, sig_ref: ir::SigRef, callee: ir::Value, call_args: &[ir::Value], - ) -> WasmResult { - Call::new(builder, self).call_ref(sig_ref, callee, call_args) + handlers: impl IntoIterator, Block)>, + ) -> WasmResult { + Call::new(builder, self, handlers).call_ref(sig_ref, callee, call_args) } pub fn translate_return_call( diff --git a/crates/cranelift/src/func_environ/gc.rs b/crates/cranelift/src/func_environ/gc.rs index 157c8dcac99a..87cc59a212e3 100644 --- a/crates/cranelift/src/func_environ/gc.rs +++ b/crates/cranelift/src/func_environ/gc.rs @@ -8,7 +8,7 @@ use crate::func_environ::FuncEnvironment; use cranelift_codegen::ir; use cranelift_frontend::FunctionBuilder; -use wasmtime_environ::{GcTypeLayouts, TypeIndex, WasmRefType, WasmResult}; +use wasmtime_environ::{GcTypeLayouts, TagIndex, TypeIndex, WasmRefType, WasmResult}; #[cfg(feature = "gc")] mod enabled; @@ -67,6 +67,24 @@ pub trait GcCompiler { fields: &[ir::Value], ) -> WasmResult; + /// Emit code to allocate a new exception object. + /// + /// The exception object should be of the given type and its + /// fields initialized to the given values. The tag field is left + /// uninitialized; that is the responsibility of generated code to + /// fill in. `tag_index` is used only to look up the appropriate + /// exception object type. + #[cfg_attr(not(feature = "gc"), allow(dead_code))] + fn alloc_exn( + &mut self, + func_env: &mut FuncEnvironment<'_>, + builder: &mut FunctionBuilder<'_>, + tag_index: TagIndex, + fields: &[ir::Value], + instance_id: ir::Value, + tag: ir::Value, + ) -> WasmResult; + /// Emit a read barrier for when we are cloning a GC reference onto the Wasm /// stack. /// diff --git a/crates/cranelift/src/func_environ/gc/disabled.rs b/crates/cranelift/src/func_environ/gc/disabled.rs index ee020047615d..9a0fb4e036ff 100644 --- a/crates/cranelift/src/func_environ/gc/disabled.rs +++ b/crates/cranelift/src/func_environ/gc/disabled.rs @@ -4,7 +4,8 @@ use super::GcCompiler; use crate::func_environ::{Extension, FuncEnvironment}; use cranelift_codegen::ir; use cranelift_frontend::FunctionBuilder; -use wasmtime_environ::{TypeIndex, WasmRefType, WasmResult, wasm_unsupported}; +use smallvec::SmallVec; +use wasmtime_environ::{TagIndex, TypeIndex, WasmRefType, WasmResult, wasm_unsupported}; fn disabled() -> WasmResult { Err(wasm_unsupported!( @@ -57,6 +58,34 @@ pub fn translate_struct_set( disabled() } +pub fn translate_exn_unbox( + _func_env: &mut FuncEnvironment<'_>, + _builder: &mut FunctionBuilder<'_>, + _tag_index: TagIndex, + _exn_ref: ir::Value, +) -> WasmResult> { + disabled() +} + +pub fn translate_exn_throw( + _func_env: &mut FuncEnvironment<'_>, + _builder: &mut FunctionBuilder<'_>, + _tag_index: TagIndex, + _args: &[ir::Value], + _handlers: impl IntoIterator, ir::Block)>, +) -> WasmResult<()> { + disabled() +} + +pub fn translate_exn_throw_ref( + _func_env: &mut FuncEnvironment<'_>, + _builder: &mut FunctionBuilder<'_>, + _exnref: ir::Value, + _handlers: impl IntoIterator, ir::Block)>, +) -> WasmResult<()> { + disabled() +} + pub fn translate_array_new( _func_env: &mut FuncEnvironment<'_>, _builder: &mut FunctionBuilder, diff --git a/crates/cranelift/src/func_environ/gc/enabled.rs b/crates/cranelift/src/func_environ/gc/enabled.rs index 59b2f036b1e7..e82ec7649a01 100644 --- a/crates/cranelift/src/func_environ/gc/enabled.rs +++ b/crates/cranelift/src/func_environ/gc/enabled.rs @@ -4,17 +4,20 @@ use crate::func_environ::{Extension, FuncEnvironment}; use crate::translate::{Heap, HeapData, StructFieldsVec, TargetEnvironment}; use crate::{Reachability, TRAP_INTERNAL_ASSERT}; use cranelift_codegen::ir::immediates::Offset32; +use cranelift_codegen::ir::{ + Block, BlockArg, ExceptionTableData, ExceptionTableItem, ExceptionTag, +}; use cranelift_codegen::{ cursor::FuncCursor, ir::{self, InstBuilder, condcodes::IntCC}, }; use cranelift_entity::packed_option::ReservedValue; use cranelift_frontend::FunctionBuilder; -use smallvec::SmallVec; +use smallvec::{SmallVec, smallvec}; use wasmtime_environ::{ Collector, GcArrayLayout, GcLayout, GcStructLayout, I31_DISCRIMINANT, ModuleInternedTypeIndex, - PtrSize, TypeIndex, VMGcKind, WasmHeapTopType, WasmHeapType, WasmRefType, WasmResult, - WasmStorageType, WasmValType, wasm_unsupported, + PtrSize, TagIndex, TypeIndex, VMGcKind, WasmCompositeInnerType, WasmHeapTopType, WasmHeapType, + WasmRefType, WasmResult, WasmStorageType, WasmValType, wasm_unsupported, }; #[cfg(feature = "gc-drc")] @@ -332,7 +335,7 @@ pub fn translate_struct_get( let field_index = usize::try_from(field_index).unwrap(); let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index(); - let struct_layout = func_env.struct_layout(interned_type_index); + let struct_layout = func_env.struct_or_exn_layout(interned_type_index); let struct_size = struct_layout.size; let field_offset = struct_layout.fields[field_index].offset; @@ -379,7 +382,7 @@ pub fn translate_struct_set( let field_index = usize::try_from(field_index).unwrap(); let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index(); - let struct_layout = func_env.struct_layout(interned_type_index); + let struct_layout = func_env.struct_or_exn_layout(interned_type_index); let struct_size = struct_layout.size; let field_offset = struct_layout.fields[field_index].offset; @@ -409,6 +412,119 @@ pub fn translate_struct_set( Ok(()) } +pub fn translate_exn_unbox( + func_env: &mut FuncEnvironment<'_>, + builder: &mut FunctionBuilder<'_>, + tag_index: TagIndex, + exn_ref: ir::Value, +) -> WasmResult> { + log::trace!("translate_exn_unbox({tag_index:?}, {exn_ref:?})"); + + // We know that the `exn_ref` is not null because we reach this + // operation only in catch blocks, and throws are initiated from + // runtime code that checks for nulls first. + + // Get the GcExceptionLayout associated with this tag's + // function type, and generate loads for each field. + let exception_ty_idx = func_env + .exception_type_from_tag(tag_index) + .unwrap_module_type_index(); + let exception_ty = func_env.types.unwrap_exn(exception_ty_idx)?; + let exn_layout = func_env.struct_or_exn_layout(exception_ty_idx); + let exn_size = exn_layout.size; + + // Gather accesses first because these require a borrow on + // `func_env`, which we later mutate below via + // `prepare_gc_ref_access()`. + let mut accesses: SmallVec<[_; 4]> = smallvec![]; + for (field_ty, field_layout) in exception_ty.fields.iter().zip(exn_layout.fields.iter()) { + accesses.push((field_layout.offset, field_ty.element_type)); + } + + let mut result = smallvec![]; + for (field_offset, field_ty) in accesses { + let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty); + assert!(field_offset + field_size <= exn_size); + let field_addr = func_env.prepare_gc_ref_access( + builder, + exn_ref, + BoundsCheck::StaticObjectField { + offset: field_offset, + access_size: u8::try_from(field_size).unwrap(), + object_size: exn_size, + }, + ); + + let value = read_field_at_addr(func_env, builder, field_ty, field_addr, None)?; + result.push(value); + } + + log::trace!("translate_exn_unbox(..) -> {result:?}"); + Ok(result) +} + +pub fn translate_exn_throw( + func_env: &mut FuncEnvironment<'_>, + builder: &mut FunctionBuilder<'_>, + tag_index: TagIndex, + args: &[ir::Value], + handlers: impl IntoIterator, Block)>, +) -> WasmResult<()> { + let (instance_id, defined_tag_id) = func_env.get_instance_and_tag(builder, tag_index); + let exnref = gc_compiler(func_env)?.alloc_exn( + func_env, + builder, + tag_index, + args, + instance_id, + defined_tag_id, + )?; + translate_exn_throw_ref(func_env, builder, exnref, handlers) +} + +pub fn translate_exn_throw_ref( + func_env: &mut FuncEnvironment<'_>, + builder: &mut FunctionBuilder<'_>, + exnref: ir::Value, + handlers: impl IntoIterator, Block)>, +) -> WasmResult<()> { + let builtin = func_env.builtin_functions.throw_ref(builder.func); + let sig = builder.func.dfg.ext_funcs[builtin].signature; + let vmctx = func_env.vmctx_val(&mut builder.cursor()); + + // Generate a `try_call` with handlers from the current + // stack. This libcall is unique among libcall implementations of + // opcodes: we know the others will not throw, but `throw_ref`'s + // entire purpose is to throw. So if there are any handlers in the + // local function body, we need to attach them to this callsite + // like any other. + let continuation = builder.create_block(); + let current_block = builder.current_block().unwrap(); + builder.insert_block_after(continuation, current_block); + let continuation_call = builder.func.dfg.block_call(continuation, &[]); + let mut table_items = vec![ExceptionTableItem::Context(vmctx)]; + for (tag, block) in handlers { + let block_call = builder + .func + .dfg + .block_call(block, &[BlockArg::TryCallExn(0)]); + table_items.push(match tag { + Some(tag) => ExceptionTableItem::Tag(tag, block_call), + None => ExceptionTableItem::Default(block_call), + }); + } + let etd = ExceptionTableData::new(sig, continuation_call, table_items); + let et = builder.func.dfg.exception_tables.push(etd); + + builder.ins().try_call(builtin, &[vmctx, exnref], et); + + builder.switch_to_block(continuation); + builder.seal_block(continuation); + func_env.trap(builder, crate::TRAP_UNREACHABLE); + + Ok(()) +} + pub fn translate_array_new( func_env: &mut FuncEnvironment<'_>, builder: &mut FunctionBuilder, @@ -1219,17 +1335,19 @@ fn initialize_struct_fields( ir::Value, ) -> WasmResult<()>, ) -> WasmResult<()> { - let struct_layout = func_env.struct_layout(struct_ty); + let struct_layout = func_env.struct_or_exn_layout(struct_ty); let struct_size = struct_layout.size; let field_offsets: SmallVec<[_; 8]> = struct_layout.fields.iter().map(|f| f.offset).collect(); assert_eq!(field_offsets.len(), field_values.len()); assert!(!func_env.types[struct_ty].composite_type.shared); - let struct_ty = func_env.types[struct_ty] - .composite_type - .inner - .unwrap_struct(); - let field_types: SmallVec<[_; 8]> = struct_ty.fields.iter().cloned().collect(); + let fields = match &func_env.types[struct_ty].composite_type.inner { + WasmCompositeInnerType::Struct(s) => &s.fields, + WasmCompositeInnerType::Exn(e) => &e.fields, + _ => panic!("Not a struct or exception type"), + }; + + let field_types: SmallVec<[_; 8]> = fields.iter().cloned().collect(); assert_eq!(field_types.len(), field_values.len()); for ((ty, val), offset) in field_types.into_iter().zip(field_values).zip(field_offsets) { @@ -1263,9 +1381,10 @@ impl FuncEnvironment<'_> { self.gc_layout(type_index).unwrap_array() } - /// Get the `GcStructLayout` for the struct type at the given `type_index`. - fn struct_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcStructLayout { - self.gc_layout(type_index).unwrap_struct() + /// Get the `GcStructLayout` for the struct or exception type at the given `type_index`. + fn struct_or_exn_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcStructLayout { + let result = self.gc_layout(type_index).unwrap_struct(); + result } /// Get or create the global for our GC heap's base pointer. diff --git a/crates/cranelift/src/func_environ/gc/enabled/drc.rs b/crates/cranelift/src/func_environ/gc/enabled/drc.rs index 322bd1d4b537..d0abb2ee53b6 100644 --- a/crates/cranelift/src/func_environ/gc/enabled/drc.rs +++ b/crates/cranelift/src/func_environ/gc/enabled/drc.rs @@ -8,6 +8,7 @@ use cranelift_codegen::ir::condcodes::IntCC; use cranelift_codegen::ir::{self, InstBuilder}; use cranelift_frontend::FunctionBuilder; use smallvec::SmallVec; +use wasmtime_environ::drc::{EXCEPTION_TAG_DEFINED_OFFSET, EXCEPTION_TAG_INSTANCE_OFFSET}; use wasmtime_environ::{ GcTypeLayouts, ModuleInternedTypeIndex, PtrSize, TypeIndex, VMGcKind, WasmHeapTopType, WasmHeapType, WasmRefType, WasmResult, WasmStorageType, WasmValType, drc::DrcTypeLayouts, @@ -452,12 +453,9 @@ impl GcCompiler for DrcCompiler { struct_type_index: TypeIndex, field_vals: &[ir::Value], ) -> WasmResult { - // First, call the `gc_alloc_raw` builtin libcall to allocate the - // struct. let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index(); - - let struct_layout = func_env.struct_layout(interned_type_index); + let struct_layout = func_env.struct_or_exn_layout(interned_type_index); // Copy some stuff out of the struct layout to avoid borrowing issues. let struct_size = struct_layout.size; @@ -498,6 +496,82 @@ impl GcCompiler for DrcCompiler { Ok(struct_ref) } + fn alloc_exn( + &mut self, + func_env: &mut FuncEnvironment<'_>, + builder: &mut FunctionBuilder<'_>, + tag_index: TagIndex, + field_vals: &[ir::Value], + instance_id: ir::Value, + tag: ir::Value, + ) -> WasmResult { + let interned_type_index = func_env.module.tags[tag_index] + .exception + .unwrap_module_type_index(); + let exn_layout = func_env.struct_or_exn_layout(interned_type_index); + + // Copy some stuff out of the exception layout to avoid borrowing issues. + let exn_size = exn_layout.size; + let exn_align = exn_layout.align; + let field_offsets: SmallVec<[_; 8]> = exn_layout.fields.iter().copied().collect(); + assert_eq!(field_vals.len(), field_offsets.len()); + + let exn_size_val = builder.ins().iconst(ir::types::I32, i64::from(exn_size)); + + let exn_ref = emit_gc_raw_alloc( + func_env, + builder, + VMGcKind::ExnRef, + interned_type_index, + exn_size_val, + exn_align, + ); + + // Second, initialize each of the newly-allocated exception + // object's fields. + // + // Note: we don't need to bounds-check the GC ref access here, since we + // trust the results of the allocation libcall. + let base = func_env.get_gc_heap_base(builder); + let extended_exn_ref = + uextend_i32_to_pointer_type(builder, func_env.pointer_type(), exn_ref); + let raw_ptr_to_exn = builder.ins().iadd(base, extended_exn_ref); + initialize_struct_fields( + func_env, + builder, + interned_type_index, + raw_ptr_to_exn, + field_vals, + |func_env, builder, ty, field_addr, val| { + self.init_field(func_env, builder, field_addr, ty, val) + }, + )?; + + // Finally, initialize the tag fields. + let instance_id_addr = builder + .ins() + .iadd_imm(raw_ptr_to_exn, i64::from(EXCEPTION_TAG_INSTANCE_OFFSET)); + self.init_field( + func_env, + builder, + instance_id_addr, + WasmStorageType::Val(WasmValType::I32), + instance_id, + )?; + let tag_addr = builder + .ins() + .iadd_imm(raw_ptr_to_exn, i64::from(EXCEPTION_TAG_DEFINED_OFFSET)); + self.init_field( + func_env, + builder, + tag_addr, + WasmStorageType::Val(WasmValType::I32), + tag, + )?; + + Ok(exn_ref) + } + fn translate_read_gc_reference( &mut self, func_env: &mut FuncEnvironment<'_>, diff --git a/crates/cranelift/src/func_environ/gc/enabled/null.rs b/crates/cranelift/src/func_environ/gc/enabled/null.rs index 1a48aff422a1..a0e938d55c0f 100644 --- a/crates/cranelift/src/func_environ/gc/enabled/null.rs +++ b/crates/cranelift/src/func_environ/gc/enabled/null.rs @@ -9,6 +9,7 @@ use crate::func_environ::FuncEnvironment; use cranelift_codegen::ir::{self, InstBuilder}; use cranelift_frontend::FunctionBuilder; use wasmtime_environ::VMSharedTypeIndex; +use wasmtime_environ::null::{EXCEPTION_TAG_DEFINED_OFFSET, EXCEPTION_TAG_INSTANCE_OFFSET}; use wasmtime_environ::{ GcTypeLayouts, ModuleInternedTypeIndex, PtrSize, TypeIndex, VMGcKind, WasmRefType, WasmResult, null::NullTypeLayouts, @@ -252,7 +253,7 @@ impl GcCompiler for NullCompiler { ) -> WasmResult { let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index(); - let struct_layout = func_env.struct_layout(interned_type_index); + let struct_layout = func_env.struct_or_exn_layout(interned_type_index); // Copy some stuff out of the struct layout to avoid borrowing issues. let struct_size = struct_layout.size; @@ -294,6 +295,80 @@ impl GcCompiler for NullCompiler { Ok(struct_ref) } + fn alloc_exn( + &mut self, + func_env: &mut FuncEnvironment<'_>, + builder: &mut FunctionBuilder<'_>, + tag_index: TagIndex, + field_vals: &[ir::Value], + instance_id: ir::Value, + tag: ir::Value, + ) -> WasmResult { + let interned_type_index = func_env.module.tags[tag_index] + .exception + .unwrap_module_type_index(); + let exn_layout = func_env.struct_or_exn_layout(interned_type_index); + + // Copy some stuff out of the exception layout to avoid borrowing issues. + let exn_size = exn_layout.size; + let exn_align = exn_layout.align; + + assert_eq!(VMGcKind::MASK & exn_size, 0); + assert_eq!(VMGcKind::UNUSED_MASK & exn_size, exn_size); + let exn_size_val = builder.ins().iconst(ir::types::I32, i64::from(exn_size)); + + let align = builder.ins().iconst(ir::types::I32, i64::from(exn_align)); + + let (exn_ref, raw_exn_pointer) = self.emit_inline_alloc( + func_env, + builder, + VMGcKind::ExnRef, + Some(interned_type_index), + exn_size_val, + align, + ); + + // Initialize the exception object's fields. + // + // Note: we don't need to bounds-check the GC ref access here, because + // the result of the inline allocation is trusted and we aren't reading + // any pointers or offsets out from the (untrusted) GC heap. + initialize_struct_fields( + func_env, + builder, + interned_type_index, + raw_exn_pointer, + field_vals, + |func_env, builder, ty, field_addr, val| { + write_field_at_addr(func_env, builder, ty, field_addr, val) + }, + )?; + + // Initialize the tag fields. + let instance_id_addr = builder + .ins() + .iadd_imm(raw_exn_pointer, i64::from(EXCEPTION_TAG_INSTANCE_OFFSET)); + write_field_at_addr( + func_env, + builder, + WasmStorageType::Val(WasmValType::I32), + instance_id_addr, + instance_id, + )?; + let tag_addr = builder + .ins() + .iadd_imm(raw_exn_pointer, i64::from(EXCEPTION_TAG_DEFINED_OFFSET)); + write_field_at_addr( + func_env, + builder, + WasmStorageType::Val(WasmValType::I32), + tag_addr, + tag, + )?; + + Ok(exn_ref) + } + fn translate_read_gc_reference( &mut self, _func_env: &mut FuncEnvironment<'_>, diff --git a/crates/cranelift/src/translate/code_translator.rs b/crates/cranelift/src/translate/code_translator.rs index f1171685709a..23ffcd06fd25 100644 --- a/crates/cranelift/src/translate/code_translator.rs +++ b/crates/cranelift/src/translate/code_translator.rs @@ -74,6 +74,7 @@ use crate::Reachability; use crate::bounds_checks::{BoundsCheck, bounds_check_and_compute_addr}; use crate::func_environ::{Extension, FuncEnvironment}; +use crate::translate::TargetEnvironment; use crate::translate::environ::StructFieldsVec; use crate::translate::stack::{ControlStackFrame, ElseData, FuncTranslationStacks}; use crate::translate::translation_utils::{ @@ -82,7 +83,7 @@ use crate::translate::translation_utils::{ use cranelift_codegen::ir::condcodes::{FloatCC, IntCC}; use cranelift_codegen::ir::immediates::Offset32; use cranelift_codegen::ir::{ - self, AtomicRmwOp, InstBuilder, JumpTableData, MemFlags, Value, ValueLabel, + self, AtomicRmwOp, ExceptionTag, InstBuilder, JumpTableData, MemFlags, Value, ValueLabel, }; use cranelift_codegen::ir::{BlockArg, types::*}; use cranelift_codegen::packed_option::ReservedValue; @@ -93,8 +94,8 @@ use std::collections::{HashMap, hash_map}; use std::vec::Vec; use wasmparser::{FuncValidator, MemArg, Operator, WasmModuleResources}; use wasmtime_environ::{ - DataIndex, ElemIndex, FuncIndex, GlobalIndex, MemoryIndex, TableIndex, TypeConvert, TypeIndex, - WasmRefType, WasmResult, WasmValType, wasm_unsupported, + DataIndex, ElemIndex, FuncIndex, GlobalIndex, MemoryIndex, TableIndex, TagIndex, TypeConvert, + TypeIndex, WasmHeapType, WasmRefType, WasmResult, WasmValType, wasm_unsupported, }; /// Given a `Reachability`, unwrap the inner `T` or, when unreachable, set @@ -424,6 +425,8 @@ pub fn translate_operator( builder.seal_block(header) } + frame.restore_catch_handlers(&mut stack.handlers, builder); + frame.truncate_value_stack_to_original_size(&mut stack.stack); stack .stack @@ -569,18 +572,64 @@ pub fn translate_operator( stack.popn(return_count); stack.reachable = false; } - /********************************** Exception handing **********************************/ - Operator::Try { .. } - | Operator::Catch { .. } - | Operator::Throw { .. } + /********************************** Exception handling **********************************/ + Operator::Catch { .. } | Operator::Rethrow { .. } | Operator::Delegate { .. } | Operator::CatchAll => { return Err(wasm_unsupported!( - "proposed exception handling operator {:?}", - op + "legacy exception handling proposal is not supported" )); } + + Operator::TryTable { try_table } => { + // First, create a block on the control stack. This also + // updates the handler state that is attached to all calls + // made within this block. + let body = builder.create_block(); + let (params, results) = blocktype_params_results(validator, try_table.ty)?; + let next = block_with_params(builder, results.clone(), environ)?; + builder.ins().jump(body, []); + builder.seal_block(body); + + // For each catch clause, create a block with the + // equivalent of `br` to the target (unboxing the exnref + // into stack values or pushing it directly, depending on + // the kind of clause). + let ckpt = stack.handlers.take_checkpoint(); + let mut catch_blocks = vec![]; + // Process in *reverse* order: see the comment on + // [`HandlerState`]. In brief, this allows us to unify the + // left-to-right matching semantics of a single + // `try_table`'s catch clauses with the inside-out + // (deepest scope first) semantics of nested `try_table`s. + for catch in try_table.catches.iter().rev() { + // This will register the block in `state.handlers` + // under the appropriate tag. + catch_blocks.push(create_catch_block(builder, stack, catch, environ)?); + } + + stack.push_try_table_block(next, catch_blocks, params.len(), results.len(), ckpt); + + // Continue codegen into the main body block. + builder.switch_to_block(body); + } + + Operator::Throw { tag_index } => { + let tag_index = TagIndex::from_u32(*tag_index); + let arity = environ.tag_param_arity(tag_index); + let args = stack.peekn(arity); + environ.translate_exn_throw(builder, tag_index, args, stack.handlers.handlers())?; + stack.popn(arity); + stack.reachable = false; + } + + Operator::ThrowRef => { + let exnref = stack.pop1(); + environ.translate_exn_throw_ref(builder, exnref, stack.handlers.handlers())?; + stack.reachable = false; + } + /************************************ Calls **************************************** * The call instructions pop off their arguments from the stack and append their * return values to it. `call_indirect` needs environment support because there is an @@ -597,16 +646,23 @@ pub fn translate_operator( // Bitcast any vector arguments to their default type, I8X16, before calling. let args = stack.peekn_mut(num_args); bitcast_wasm_params(environ, sig_ref, args, builder); + let args = stack.peekn(num_args); // Re-borrow immutably. + + let inst_results = environ.translate_call( + builder, + function_index, + sig_ref, + args, + stack.handlers.handlers(), + )?; - let call = environ.translate_call(builder, function_index, sig_ref, args)?; - let inst_results = builder.inst_results(call); debug_assert_eq!( inst_results.len(), builder.func.dfg.signatures[sig_ref].returns.len(), "translate_call results should match the call signature" ); stack.popn(num_args); - stack.pushn(inst_results); + stack.pushn(&inst_results); } Operator::CallIndirect { type_index, @@ -624,7 +680,7 @@ pub fn translate_operator( let args = stack.peekn_mut(num_args); bitcast_wasm_params(environ, sigref, args, builder); - let call = environ.translate_call_indirect( + let inst_results = environ.translate_call_indirect( builder, validator.features(), TableIndex::from_u32(*table_index), @@ -632,22 +688,23 @@ pub fn translate_operator( sigref, callee, stack.peekn(num_args), + stack.handlers.handlers(), )?; - let call = match call { - Some(call) => call, + let inst_results = match inst_results { + Some(results) => results, None => { stack.reachable = false; return Ok(()); } }; - let inst_results = builder.inst_results(call); + debug_assert_eq!( inst_results.len(), builder.func.dfg.signatures[sigref].returns.len(), "translate_call_indirect results should match the call signature" ); stack.popn(num_args); - stack.pushn(inst_results); + stack.pushn(&inst_results); } /******************************* Tail Calls ****************************************** * The tail call instructions pop their arguments from the stack and @@ -2457,17 +2514,21 @@ pub fn translate_operator( let args = stack.peekn_mut(num_args); bitcast_wasm_params(environ, sigref, args, builder); - let call = - environ.translate_call_ref(builder, sigref, callee, stack.peekn(num_args))?; + let inst_results = environ.translate_call_ref( + builder, + sigref, + callee, + stack.peekn(num_args), + stack.handlers.handlers(), + )?; - let inst_results = builder.inst_results(call); debug_assert_eq!( inst_results.len(), builder.func.dfg.signatures[sigref].returns.len(), "translate_call_ref results should match the call signature" ); stack.popn(num_args); - stack.pushn(inst_results); + stack.pushn(&inst_results); } Operator::RefAsNonNull => { let r = stack.pop1(); @@ -2574,12 +2635,6 @@ pub fn translate_operator( stack.push1(val); } - Operator::TryTable { .. } | Operator::ThrowRef => { - return Err(wasm_unsupported!( - "exception operators are not yet implemented" - )); - } - Operator::ArrayNew { array_type_index } => { let array_type_index = TypeIndex::from_u32(*array_type_index); let (elem, len) = stack.pop2(); @@ -3106,6 +3161,8 @@ fn translate_unreachable_operator( let control_stack = &mut stack.control_stack; let frame = control_stack.pop().unwrap(); + frame.restore_catch_handlers(&mut stack.handlers, builder); + // Pop unused parameters from stack. frame.truncate_value_stack_to_original_size(value_stack); @@ -4132,3 +4189,75 @@ fn bitcast_wasm_params( *arg = builder.ins().bitcast(t, flags, *arg); } } + +fn create_catch_block( + builder: &mut FunctionBuilder, + stacks: &mut FuncTranslationStacks, + catch: &wasmparser::Catch, + environ: &mut FuncEnvironment<'_>, +) -> WasmResult { + let (is_ref, tag, label) = match catch { + wasmparser::Catch::One { tag, label } => (false, Some(*tag), *label), + wasmparser::Catch::OneRef { tag, label } => (true, Some(*tag), *label), + wasmparser::Catch::All { label } => (false, None, *label), + wasmparser::Catch::AllRef { label } => (true, None, *label), + }; + + // We always create a handler block with one blockparam for the + // one exception payload value that we use (`exn0` block-call + // argument). This one payload value is the `exnref`. Note, + // however, that we carry it in a native host-pointer-sized + // payload (because this is what the exception ABI in Cranelift + // requires). We then generate the args for the actual branch to + // the handler block: we add unboxing code to load each value in + // the exception signature if a specific tag is expected (hence + // signature is known), and then append the `exnref` itself if we + // are compiling a `*Ref` variant. + + let (exn_ref_ty, needs_stack_map) = environ.reference_type(WasmHeapType::Exn); + let (exn_payload_wasm_ty, exn_payload_ty) = match environ.pointer_type().bits() { + 32 => (wasmparser::ValType::I32, I32), + 64 => (wasmparser::ValType::I64, I64), + _ => panic!("Unsupported pointer width"), + }; + let block = block_with_params(builder, [exn_payload_wasm_ty], environ)?; + builder.switch_to_block(block); + let exn_ref = builder.func.dfg.block_params(block)[0]; + debug_assert!(exn_ref_ty.bits() <= exn_payload_ty.bits()); + let exn_ref = if exn_ref_ty.bits() < exn_payload_ty.bits() { + builder.ins().ireduce(exn_ref_ty, exn_ref) + } else { + exn_ref + }; + + if needs_stack_map { + builder.declare_value_needs_stack_map(exn_ref); + } + + // We encode tag indices from the module directly as Cranelift + // `ExceptionTag`s. We will translate those to (instance, + // defined-tag-index) pairs during the unwind walk -- necessarily + // dynamic because tag imports are provided only at instantiation + // time. + let clif_tag = tag.map(|t| ExceptionTag::from_u32(t)); + + stacks.handlers.add_handler(clif_tag, block); + + let mut params = vec![]; + + if let Some(tag) = tag { + let tag = TagIndex::from_u32(tag); + params.extend(environ.translate_exn_unbox(builder, tag, exn_ref)?); + } + if is_ref { + params.push(exn_ref); + } + + // Generate the branch itself. + let i = stacks.control_stack.len() - 1 - (label as usize); + let frame = &mut stacks.control_stack[i]; + frame.set_branched_to_exit(); + canonicalise_then_jump(builder, frame.br_destination(), ¶ms); + + Ok(block) +} diff --git a/crates/cranelift/src/translate/stack.rs b/crates/cranelift/src/translate/stack.rs index a7848b717ef3..5c79231d7dcf 100644 --- a/crates/cranelift/src/translate/stack.rs +++ b/crates/cranelift/src/translate/stack.rs @@ -4,7 +4,8 @@ //! track of the WebAssembly value and control stacks during the translation of //! a single function. -use cranelift_codegen::ir::{self, Block, Inst, Value}; +use cranelift_codegen::ir::{self, Block, ExceptionTag, Inst, Value}; +use cranelift_frontend::FunctionBuilder; use std::vec::Vec; /// Information about the presence of an associated `else` for an `if`, or the @@ -74,6 +75,10 @@ pub enum ControlStackFrame { num_return_values: usize, original_stack_size: usize, exit_is_branched_to: bool, + /// If this block is a try-table block, the handler state + /// checkpoint to rewind to when we leave the block, and the + /// list of catch blocks to seal when done. + try_table_info: Option<(HandlerStateCheckpoint, Vec)>, }, Loop { destination: Block, @@ -209,6 +214,26 @@ impl ControlStackFrame { }; stack.truncate(self.original_stack_size() - num_duplicated_params); } + + /// Restore the catch-handlers as they were outside of this block. + pub fn restore_catch_handlers( + &self, + handlers: &mut HandlerState, + builder: &mut FunctionBuilder, + ) { + match self { + ControlStackFrame::Block { + try_table_info: Some((ckpt, catch_blocks)), + .. + } => { + handlers.restore_checkpoint(*ckpt); + for block in catch_blocks { + builder.seal_block(*block); + } + } + _ => {} + } + } } /// Keeps track of Wasm's operand and control stacks, as well as reachability @@ -219,6 +244,9 @@ pub struct FuncTranslationStacks { pub(crate) stack: Vec, /// A stack of active control flow operations at this point in the input wasm function. pub(crate) control_stack: Vec, + /// Exception handler state, updated as we enter and exit + /// `try_table` scopes and attached to each call that we make. + pub(crate) handlers: HandlerState, /// Is the current translation state still reachable? This is false when translating operators /// like End, Return, or Unreachable. pub(crate) reachable: bool, @@ -239,6 +267,7 @@ impl FuncTranslationStacks { Self { stack: Vec::new(), control_stack: Vec::new(), + handlers: HandlerState::default(), reachable: true, } } @@ -246,6 +275,7 @@ impl FuncTranslationStacks { fn clear(&mut self) { debug_assert!(self.stack.is_empty()); debug_assert!(self.control_stack.is_empty()); + debug_assert!(self.handlers.is_empty()); self.reachable = true; } @@ -364,12 +394,12 @@ impl FuncTranslationStacks { &mut self.stack[len - n..] } - /// Push a block on the control stack. - pub(crate) fn push_block( + fn push_block_impl( &mut self, following_code: Block, num_param_types: usize, num_result_types: usize, + try_table_info: Option<(HandlerStateCheckpoint, Vec)>, ) { debug_assert!(num_param_types <= self.stack.len()); self.control_stack.push(ControlStackFrame::Block { @@ -378,9 +408,37 @@ impl FuncTranslationStacks { num_param_values: num_param_types, num_return_values: num_result_types, exit_is_branched_to: false, + try_table_info, }); } + /// Push a block on the control stack. + pub(crate) fn push_block( + &mut self, + following_code: Block, + num_param_types: usize, + num_result_types: usize, + ) { + self.push_block_impl(following_code, num_param_types, num_result_types, None); + } + + /// Push a try-table block on the control stack. + pub(crate) fn push_try_table_block( + &mut self, + following_code: Block, + catch_blocks: Vec, + num_param_types: usize, + num_result_types: usize, + checkpoint: HandlerStateCheckpoint, + ) { + self.push_block_impl( + following_code, + num_param_types, + num_result_types, + Some((checkpoint, catch_blocks)), + ); + } + /// Push a loop on the control stack. pub(crate) fn push_loop( &mut self, @@ -434,3 +492,75 @@ impl FuncTranslationStacks { }); } } + +/// Exception handler state. +/// +/// We update this state as we enter and exit `try_table` scopes. When +/// we visit a call, we use this state to attach handler info to a +/// `try_call` CLIF instruction. +/// +/// Note that although handlers are lexically-scoped, and we could +/// optimize away shadowing, this is fairly subtle, because handler +/// order also matters (two *distinct* tag indices in our module are +/// not necessarily distinct: tag imports can create aliasing). Rather +/// than attempt to keep an ordered map and also remove shadowing, we +/// follow the Wasm spec more closely: handlers are on "the stack" and +/// inner handlers win over outer handlers. Within a single +/// `try_table`, we push handlers *in reverse*, because the semantics +/// of handler matching in `try_table` are left-to-right; this allows +/// us to *flatten* the LIFO stack of `try_table`s with left-to-right +/// scans within a table into a single stack we scan backward from the +/// end. +pub struct HandlerState { + /// List of pairs mapping from CLIF-level exception tag to + /// CLIF-level block. We will have already filled in these blocks + /// with the appropriate branch implementation when we start the + /// `try_table` scope. + pub(crate) handlers: Vec<(Option, Block)>, +} + +impl core::default::Default for HandlerState { + fn default() -> Self { + HandlerState { handlers: vec![] } + } +} + +/// A checkpoint in the handler state. Can be restored in LIFO order +/// only: the last-taken checkpoint can be restored first, then the +/// one before it, etc. +#[derive(Clone, Copy, Debug)] +pub struct HandlerStateCheckpoint(usize); + +impl HandlerState { + /// Set a given tag's handler to a given CLIF block. + pub fn add_handler(&mut self, tag: Option, block: Block) { + self.handlers.push((tag, block)); + } + + /// Take a checkpoint. + pub fn take_checkpoint(&self) -> HandlerStateCheckpoint { + HandlerStateCheckpoint(self.handlers.len()) + } + + /// Restore to a checkpoint. + pub fn restore_checkpoint(&mut self, ckpt: HandlerStateCheckpoint) { + assert!(ckpt.0 <= self.handlers.len()); + self.handlers.truncate(ckpt.0); + } + + /// Get an iterator over handlers. The exception-matching + /// semantics are to take the *first* match in this sequence; that + /// is, this returns the sequence of handlers latest-first (top of + /// stack first). + pub fn handlers(&self) -> impl Iterator, Block)> + '_ { + self.handlers + .iter() + .map(|(tag, block)| (*tag, *block)) + .rev() + } + + /// Are there no handlers registered? + pub fn is_empty(&self) -> bool { + self.handlers.is_empty() + } +} diff --git a/crates/environ/src/builtin.rs b/crates/environ/src/builtin.rs index 5ccc346857bd..32cbe639a6a0 100644 --- a/crates/environ/src/builtin.rs +++ b/crates/environ/src/builtin.rs @@ -239,6 +239,14 @@ macro_rules! foreach_builtin_function { // the Option, as in previous libcall. #[cfg(feature = "stack-switching")] table_fill_cont_obj(vmctx: vmctx, table: u32, dst: u64, value_contref: pointer, value_revision: u64, len: u64) -> bool; + + // Return the instance ID for a given vmctx. + #[cfg(feature = "gc")] + get_instance_id(vmctx: vmctx) -> u32; + + // Throw an exception. + #[cfg(feature = "gc")] + throw_ref(vmctx: vmctx, exnref: u32) -> bool; } }; } @@ -422,6 +430,8 @@ impl BuiltinFunctionIndex { (@get cont_new pointer) => (TrapSentinel::Negative); + (@get get_instance_id u32) => (return None); + // Bool-returning functions use `false` as an indicator of a trap. (@get $name:ident bool) => (TrapSentinel::Falsy); diff --git a/crates/environ/src/gc.rs b/crates/environ/src/gc.rs index f263294248fc..92104ad181d1 100644 --- a/crates/environ/src/gc.rs +++ b/crates/environ/src/gc.rs @@ -211,11 +211,19 @@ pub trait GcTypeLayouts { /// element type. fn array_length_field_offset(&self) -> u32; - /// The offset of an exception object's tag reference. + /// The offset of an exception object's tag reference: defining + /// instance index field. /// - /// This msut be the same for all exception objects in the heap, + /// This must be the same for all exception objects in the heap, /// regardless of their specific signature. - fn exception_tag_offset(&self) -> u32; + fn exception_tag_instance_offset(&self) -> u32; + + /// The offset of an exception object's tag reference: defined tag + /// index field. + /// + /// This must be the same for all exception objects in the heap, + /// regardless of their specific signature. + fn exception_tag_defined_offset(&self) -> u32; /// Get this collector's layout for the given composite type. /// diff --git a/crates/environ/src/gc/drc.rs b/crates/environ/src/gc/drc.rs index e79993d60390..ac1e6d573993 100644 --- a/crates/environ/src/gc/drc.rs +++ b/crates/environ/src/gc/drc.rs @@ -11,8 +11,11 @@ pub const HEADER_ALIGN: u32 = 8; /// The offset of the length field in a `VMDrcArrayHeader`. pub const ARRAY_LENGTH_OFFSET: u32 = HEADER_SIZE; -/// The offset of the tag fields in an exception header. -pub const EXCEPTION_TAG_OFFSET: u32 = HEADER_SIZE; +/// The offset of the tag-instance-index field in an exception header. +pub const EXCEPTION_TAG_INSTANCE_OFFSET: u32 = HEADER_SIZE; + +/// The offset of the tag-defined-index field in an exception header. +pub const EXCEPTION_TAG_DEFINED_OFFSET: u32 = HEADER_SIZE + 4; /// The bit within a `VMDrcHeader`'s reserved bits that is the mark /// bit. Collectively, this bit in all the heap's objects' headers implements @@ -32,8 +35,12 @@ impl GcTypeLayouts for DrcTypeLayouts { ARRAY_LENGTH_OFFSET } - fn exception_tag_offset(&self) -> u32 { - EXCEPTION_TAG_OFFSET + fn exception_tag_instance_offset(&self) -> u32 { + EXCEPTION_TAG_INSTANCE_OFFSET + } + + fn exception_tag_defined_offset(&self) -> u32 { + EXCEPTION_TAG_DEFINED_OFFSET } fn array_layout(&self, ty: &WasmArrayType) -> GcArrayLayout { diff --git a/crates/environ/src/gc/null.rs b/crates/environ/src/gc/null.rs index 860dc805fdd7..c16b33668ca0 100644 --- a/crates/environ/src/gc/null.rs +++ b/crates/environ/src/gc/null.rs @@ -11,8 +11,11 @@ pub const HEADER_ALIGN: u32 = 8; /// The offset of the length field in a `VMNullArrayHeader`. pub const ARRAY_LENGTH_OFFSET: u32 = HEADER_SIZE; -/// The offset of the tag fields in an exception header. -pub const EXCEPTION_TAG_OFFSET: u32 = HEADER_SIZE; +/// The offset of the tag-instance-index field in an exception header. +pub const EXCEPTION_TAG_INSTANCE_OFFSET: u32 = HEADER_SIZE; + +/// The offset of the tag-defined-index field in an exception header. +pub const EXCEPTION_TAG_DEFINED_OFFSET: u32 = HEADER_SIZE + 4; /// The layout of Wasm GC objects in the null collector. #[derive(Default)] @@ -23,8 +26,12 @@ impl GcTypeLayouts for NullTypeLayouts { ARRAY_LENGTH_OFFSET } - fn exception_tag_offset(&self) -> u32 { - EXCEPTION_TAG_OFFSET + fn exception_tag_instance_offset(&self) -> u32 { + EXCEPTION_TAG_INSTANCE_OFFSET + } + + fn exception_tag_defined_offset(&self) -> u32 { + EXCEPTION_TAG_DEFINED_OFFSET } fn array_layout(&self, ty: &WasmArrayType) -> GcArrayLayout { diff --git a/crates/environ/src/obj.rs b/crates/environ/src/obj.rs index 45b9293fe9a0..a846d667c863 100644 --- a/crates/environ/src/obj.rs +++ b/crates/environ/src/obj.rs @@ -98,6 +98,17 @@ pub const ELF_WASMTIME_STACK_MAP: &str = ".wasmtime.stackmap"; /// to the 32-bit encodings for offsets this doesn't support images >=4gb. pub const ELF_WASMTIME_TRAPS: &str = ".wasmtime.traps"; +/// A custom binary-encoded section of the wasmtime compilation +/// artifacts which encodes exception tables. +/// +/// This section is used at runtime to allow the unwinder to find +/// exception handler blocks active at particular callsites. +/// +/// This section's format is defined by the +/// [`wasmtime_unwinder::ExceptionTableBuilder`] data structure. Its +/// code offsets are relative to the start of the text segment. +pub const ELF_WASMTIME_EXCEPTIONS: &str = ".wasmtime.exceptions"; + /// A custom section which consists of just 1 byte which is either 0 or 1 as to /// whether BTI is enabled. pub const ELF_WASM_BTI: &str = ".wasmtime.bti"; diff --git a/crates/environ/src/vmoffsets.rs b/crates/environ/src/vmoffsets.rs index 05df1121a978..1b12ed9a6139 100644 --- a/crates/environ/src/vmoffsets.rs +++ b/crates/environ/src/vmoffsets.rs @@ -190,25 +190,26 @@ pub trait PtrSize { /// Return the offset of the `gc_heap.base` field within a `VMStoreContext`. fn vmstore_context_gc_heap_base(&self) -> u8 { let offset = self.vmstore_context_gc_heap() + self.vmmemory_definition_base(); - debug_assert!(offset < self.vmstore_context_last_wasm_exit_fp()); + debug_assert!(offset < self.vmstore_context_last_wasm_exit_trampoline_fp()); offset } /// Return the offset of the `gc_heap.current_length` field within a `VMStoreContext`. fn vmstore_context_gc_heap_current_length(&self) -> u8 { let offset = self.vmstore_context_gc_heap() + self.vmmemory_definition_current_length(); - debug_assert!(offset < self.vmstore_context_last_wasm_exit_fp()); + debug_assert!(offset < self.vmstore_context_last_wasm_exit_trampoline_fp()); offset } - /// Return the offset of the `last_wasm_exit_fp` field of `VMStoreContext`. - fn vmstore_context_last_wasm_exit_fp(&self) -> u8 { + /// Return the offset of the `last_wasm_exit_trampoline_fp` field + /// of `VMStoreContext`. + fn vmstore_context_last_wasm_exit_trampoline_fp(&self) -> u8 { self.vmstore_context_gc_heap() + self.size_of_vmmemory_definition() } /// Return the offset of the `last_wasm_exit_pc` field of `VMStoreContext`. fn vmstore_context_last_wasm_exit_pc(&self) -> u8 { - self.vmstore_context_last_wasm_exit_fp() + self.size() + self.vmstore_context_last_wasm_exit_trampoline_fp() + self.size() } /// Return the offset of the `last_wasm_entry_fp` field of `VMStoreContext`. @@ -811,6 +812,18 @@ impl VMOffsets

{ 0 * self.pointer_size() } + /// The offset of the `vmctx` field. + #[inline] + pub fn vmtag_import_vmctx(&self) -> u8 { + 1 * self.pointer_size() + } + + /// The offset of the `index` field. + #[inline] + pub fn vmtag_import_index(&self) -> u8 { + 2 * self.pointer_size() + } + /// Return the size of `VMTagImport`. #[inline] pub fn size_of_vmtag_import(&self) -> u8 { @@ -1045,6 +1058,18 @@ impl VMOffsets

{ pub fn vmctx_vmtag_import_from(&self, index: TagIndex) -> u32 { self.vmctx_vmtag_import(index) + u32::from(self.vmtag_import_from()) } + + /// Return the offset to the `vmctx` field in `VMTagImport` index `index`. + #[inline] + pub fn vmctx_vmtag_import_vmctx(&self, index: TagIndex) -> u32 { + self.vmctx_vmtag_import(index) + u32::from(self.vmtag_import_vmctx()) + } + + /// Return the offset to the `index` field in `VMTagImport` index `index`. + #[inline] + pub fn vmctx_vmtag_import_index(&self, index: TagIndex) -> u32 { + self.vmctx_vmtag_import(index) + u32::from(self.vmtag_import_index()) + } } /// Offsets for `VMGcHeader`. diff --git a/crates/fuzzing/src/generators/config.rs b/crates/fuzzing/src/generators/config.rs index 0c129f38e09b..553bbea530c1 100644 --- a/crates/fuzzing/src/generators/config.rs +++ b/crates/fuzzing/src/generators/config.rs @@ -141,7 +141,7 @@ impl Config { component_model_gc, simd, exceptions, - legacy_exceptions, + legacy_exceptions: _, hogs_memory: _, nan_canonicalization: _, @@ -161,7 +161,6 @@ impl Config { component_model_async_stackful.unwrap_or(false); self.module_config.component_model_error_context = component_model_error_context.unwrap_or(false); - self.module_config.legacy_exceptions = legacy_exceptions.unwrap_or(false); self.module_config.component_model_gc = component_model_gc.unwrap_or(false); // Enable/disable proposals that wasm-smith has knobs for which will be @@ -305,7 +304,6 @@ impl Config { Some(self.module_config.config.shared_everything_threads_enabled); cfg.wasm.wide_arithmetic = Some(self.module_config.config.wide_arithmetic_enabled); cfg.wasm.exceptions = Some(self.module_config.config.exceptions_enabled); - cfg.wasm.legacy_exceptions = Some(self.module_config.legacy_exceptions); if !self.module_config.config.simd_enabled { cfg.wasm.relaxed_simd = Some(false); } diff --git a/crates/test-macros/src/wasmtime_test.rs b/crates/test-macros/src/wasmtime_test.rs index 1ededbffe9cb..d77e98e887a7 100644 --- a/crates/test-macros/src/wasmtime_test.rs +++ b/crates/test-macros/src/wasmtime_test.rs @@ -264,6 +264,12 @@ fn expand(test_config: &TestConfig, func: Fn) -> Result { #(#attrs)* #ignore_miri #asyncness fn #test_name() { + // Skip this test completely if the compiler doesn't support + // this host. + let compiler = wasmtime_test_util::wast::Compiler::#strategy_ident; + if !compiler.supports_host() { + return; + } let _ = env_logger::try_init(); let mut config = Config::new(); wasmtime_test_util::wasmtime_wast::apply_test_config( @@ -273,17 +279,17 @@ fn expand(test_config: &TestConfig, func: Fn) -> Result { wasmtime_test_util::wasmtime_wast::apply_wast_config( &mut config, &wasmtime_test_util::wast::WastConfig { - compiler: wasmtime_test_util::wast::Compiler::#strategy_ident, + compiler, pooling: false, collector: wasmtime_test_util::wast::Collector::Auto, }, ); let result = #func_name(&mut config) #await_; - if wasmtime_test_util::wast::Compiler::#strategy_ident.should_fail(&#test_config) { - assert!(result.is_err()); - } else { - result.unwrap(); - } + if compiler.should_fail(&#test_config) { + assert!(result.is_err()); + } else { + result.unwrap(); + } } }; diff --git a/crates/test-util/src/wast.rs b/crates/test-util/src/wast.rs index 0b2058bfd95e..cef52f1e206b 100644 --- a/crates/test-util/src/wast.rs +++ b/crates/test-util/src/wast.rs @@ -108,22 +108,8 @@ fn spec_test_config(test: &Path) -> TestConfig { ret.memory64 = Some(true); ret.tail_call = Some(true); ret.extended_const = Some(true); + ret.exceptions = Some(true); - // Wasmtime, at the current date, has incomplete support for the - // exceptions proposal. Instead of flagging the entire test suite - // as needing this proposal try to filter down per-test to what - // exactly needs this. Other tests aren't expected to need - // exceptions. - if test.ends_with("tag.wast") - || test.ends_with("instance.wast") - || test.ends_with("throw.wast") - || test.ends_with("throw_ref.wast") - || test.ends_with("try_table.wast") - || test.ends_with("ref_null.wast") - || test.ends_with("imports.wast") - { - ret.exceptions = Some(true); - } if test.parent().unwrap().ends_with("legacy") { ret.legacy_exceptions = Some(true); } @@ -336,7 +322,11 @@ impl Compiler { } Compiler::CraneliftPulley => { - config.threads() || config.legacy_exceptions() || config.stack_switching() + config.threads() + || config.legacy_exceptions() + // Pulley doesn't yet support exception unwinding. + || config.exceptions() + || config.stack_switching() } } } @@ -642,22 +632,6 @@ impl WastTest { } } - // For the exceptions proposal these tests use instructions and such - // which aren't implemented yet so these are expected to fail. - if self.config.exceptions() { - let unsupported = [ - "ref_null.wast", - "throw.wast", - "rethrow.wast", - "throw_ref.wast", - "try_table.wast", - "instance.wast", - ]; - if unsupported.iter().any(|part| self.path.ends_with(part)) { - return true; - } - } - false } } diff --git a/crates/unwinder/src/exception_table.rs b/crates/unwinder/src/exception_table.rs index 025df705bd6d..e61b87d17400 100644 --- a/crates/unwinder/src/exception_table.rs +++ b/crates/unwinder/src/exception_table.rs @@ -13,7 +13,8 @@ use object::{Bytes, LittleEndian, U32Bytes}; #[cfg(feature = "cranelift")] -use alloc::{vec, vec::Vec}; +use alloc::vec; +use alloc::vec::Vec; #[cfg(feature = "cranelift")] use cranelift_codegen::{ ExceptionContextLoc, FinalizedMachCallSite, FinalizedMachExceptionHandler, binemit::CodeOffset, @@ -214,14 +215,28 @@ pub struct ExceptionTable<'a> { callsites: &'a [U32Bytes], ranges: &'a [U32Bytes], tags: &'a [U32Bytes], - #[expect( - dead_code, - reason = "Will be used in subsequent PR for Wasm exception handling" - )] contexts: &'a [U32Bytes], handlers: &'a [U32Bytes], } +/// Wasmtime exception table item, after parsing. +/// +/// Note that this is separately defined from the equivalent type in +/// Cranelift, `cranelift_codegen::FinalizedMachExceptionHandler`, +/// because we need this in runtime-only builds when Cranelift is not +/// included. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ExceptionHandler { + /// A tag (arbitrary `u32` identifier from CLIF) or `None` for catch-all. + pub tag: Option, + /// Dynamic context, if provided, with which to interpret the + /// tag. Context is available at the given offset from SP in this + /// frame. + pub context_sp_offset: Option, + /// Handler code offset. + pub handler_offset: u32, +} + impl<'a> ExceptionTable<'a> { /// Parse exception tables from a byte-slice as produced by /// [`ExceptionTableBuilder::serialize`]. @@ -263,6 +278,26 @@ impl<'a> ExceptionTable<'a> { }) } + /// Look up the set of handlers, if any, for a given return + /// address (as an offset into the code section). + /// + /// The handler for `None` (the catch-all/default handler), if + /// any, will always come last. + /// + /// Note: we use raw `u32` types for code offsets here to avoid + /// dependencies on `cranelift-codegen` when this crate is built + /// without compiler backend support (runtime-only config). + pub fn lookup_pc(&self, pc: u32) -> impl Iterator + '_ { + let callsite_idx = self + .callsites + .binary_search_by_key(&pc, |callsite| callsite.get(LittleEndian)) + .ok(); + + callsite_idx + .into_iter() + .flat_map(|callsite_idx| self.handlers_for_callsite(callsite_idx)) + } + /// Look up the handler destination, if any, for a given return /// address (as an offset into the code section) and exception /// tag. @@ -270,26 +305,14 @@ impl<'a> ExceptionTable<'a> { /// Note: we use raw `u32` types for code offsets and tags here to /// avoid dependencies on `cranelift-codegen` when this crate is /// built without compiler backend support (runtime-only config). - pub fn lookup(&self, pc: u32, tag: u32) -> Option { + pub fn lookup_pc_tag(&self, pc: u32, tag: u32) -> Option { // First, look up the callsite in the sorted callsites list. let callsite_idx = self .callsites .binary_search_by_key(&pc, |callsite| callsite.get(LittleEndian)) .ok()?; - // Now get the range. - let end_idx = self.ranges[callsite_idx].get(LittleEndian); - let start_idx = if callsite_idx > 0 { - self.ranges[callsite_idx - 1].get(LittleEndian) - } else { - 0 - }; - // Take the subslices of `tags` and `handlers` corresponding - // to this callsite. - let start_idx = usize::try_from(start_idx).unwrap(); - let end_idx = usize::try_from(end_idx).unwrap(); - let tags = &self.tags[start_idx..end_idx]; - let handlers = &self.handlers[start_idx..end_idx]; + let (tags, _, handlers) = self.tags_contexts_handlers_for_callsite(callsite_idx); // Is there any handler with an exact tag match? if let Ok(handler_idx) = tags.binary_search_by_key(&tag, |tag| tag.get(LittleEndian)) { @@ -305,6 +328,64 @@ impl<'a> ExceptionTable<'a> { None } + + fn tags_contexts_handlers_for_callsite( + &self, + idx: usize, + ) -> ( + &[U32Bytes], + &[U32Bytes], + &[U32Bytes], + ) { + let end_idx = self.ranges[idx].get(LittleEndian); + let start_idx = if idx > 0 { + self.ranges[idx - 1].get(LittleEndian) + } else { + 0 + }; + + // Take the subslices of `tags`, `contexts`, and `handlers` + // corresponding to this callsite. + let start_idx = usize::try_from(start_idx).unwrap(); + let end_idx = usize::try_from(end_idx).unwrap(); + let tags = &self.tags[start_idx..end_idx]; + let contexts = &self.contexts[start_idx..end_idx]; + let handlers = &self.handlers[start_idx..end_idx]; + (tags, contexts, handlers) + } + + fn handlers_for_callsite(&self, idx: usize) -> impl Iterator { + let (tags, contexts, handlers) = self.tags_contexts_handlers_for_callsite(idx); + tags.iter() + .zip(contexts.iter()) + .zip(handlers.iter()) + .map(|((tag, context), handler)| { + let tag = tag.get(LittleEndian); + let tag = if tag == u32::MAX { None } else { Some(tag) }; + let context = context.get(LittleEndian); + let context = if context == u32::MAX { + None + } else { + Some(context) + }; + let handler = handler.get(LittleEndian); + ExceptionHandler { + tag, + context_sp_offset: context, + handler_offset: handler, + } + }) + } + + /// Provide an iterator over callsites, and for each callsite, the + /// arrays of handlers. + pub fn into_iter(self) -> impl Iterator)> + 'a { + self.callsites + .iter() + .map(|pc| pc.get(LittleEndian)) + .enumerate() + .map(move |(i, pc)| (pc, self.handlers_for_callsite(i).collect())) + } } #[cfg(all(test, feature = "cranelift"))] @@ -341,10 +422,43 @@ mod test { let deserialized = ExceptionTable::parse(&bytes).unwrap(); - assert_eq!(deserialized.lookup(0x148, 1), None); - assert_eq!(deserialized.lookup(0x110, 1), Some(0x120)); - assert_eq!(deserialized.lookup(0x110, 2), Some(0x130)); - assert_eq!(deserialized.lookup(0x110, 42), Some(0x140)); - assert_eq!(deserialized.lookup(0x150, 100), Some(0x160)); + assert_eq!( + deserialized + .lookup_pc(0x148) + .collect::>(), + vec![] + ); + assert_eq!( + deserialized + .lookup_pc(0x110) + .collect::>(), + vec![ + ExceptionHandler { + tag: Some(1), + context_sp_offset: None, + handler_offset: 0x120 + }, + ExceptionHandler { + tag: Some(2), + context_sp_offset: None, + handler_offset: 0x130 + }, + ExceptionHandler { + tag: None, + context_sp_offset: None, + handler_offset: 0x140 + }, + ] + ); + assert_eq!( + deserialized + .lookup_pc(0x150) + .collect::>(), + vec![ExceptionHandler { + tag: None, + context_sp_offset: None, + handler_offset: 0x160 + },] + ); } } diff --git a/crates/unwinder/src/lib.rs b/crates/unwinder/src/lib.rs index d437146b608d..8b16a486aea6 100644 --- a/crates/unwinder/src/lib.rs +++ b/crates/unwinder/src/lib.rs @@ -10,7 +10,6 @@ #![doc = include_str!("../README.md")] #![no_std] -#[cfg(feature = "cranelift")] extern crate alloc; mod stackwalk; diff --git a/crates/unwinder/src/stackwalk.rs b/crates/unwinder/src/stackwalk.rs index 51bc9347949e..449db6632855 100644 --- a/crates/unwinder/src/stackwalk.rs +++ b/crates/unwinder/src/stackwalk.rs @@ -65,6 +65,9 @@ pub struct Frame { pc: usize, /// The frame pointer value corresponding to this frame. fp: usize, + /// The SP value at the callsite. This may not be known for the + /// first frame that is visited. + sp: Option, } impl Frame { @@ -77,6 +80,30 @@ impl Frame { pub fn fp(&self) -> usize { self.fp } + + /// Get this frame's stack pointer at the callsite, if known. + pub fn sp(&self) -> Option { + self.sp + } + + /// Read out a machine-word-sized value at the given offset from + /// SP in this frame, if the SP is known. + /// + /// # Safety + /// + /// Requires that this frame is a valid, active frame. A `Frame` + /// provided by `visit_frames()` will be valid for the duration of + /// the invoked closure. + /// + /// Requires that `offset` falls within the size of this + /// frame. This ordinarily requires knowledge passed from the + /// compiler that produced the running function, e.g., Cranelift. + pub unsafe fn read_slot(&self, offset: usize) -> Option { + // SAFETY: we required that this is a valid frame, and that + // `offset` is a valid offset within that frame. + self.sp + .map(|sp| unsafe { *(sp.wrapping_add(offset) as *mut usize) }) + } } /// Walk through a contiguous sequence of Wasm frames starting with @@ -111,6 +138,8 @@ pub unsafe fn visit_frames( assert_ne!(fp, 0); assert_ne!(trampoline_fp, 0); + let mut last_fp = None; + // This loop will walk the linked list of frame pointers starting // at `fp` and going up until `trampoline_fp`. We know that both // `fp` and `trampoline_fp` are "trusted values" aka generated and @@ -170,7 +199,8 @@ pub unsafe fn visit_frames( log::trace!("pc = {:p}", pc as *const ()); log::trace!("fp = {:p}", fp as *const ()); - f(Frame { pc, fp })?; + let sp = last_fp.map(|last_fp| last_fp + unwind.next_older_sp_from_fp_offset()); + f(Frame { pc, fp, sp })?; // SAFETY: this unsafe traversal of the linked list on the stack is // reflected in the contract of this function where `pc`, `fp`, @@ -192,6 +222,7 @@ pub unsafe fn visit_frames( // Because the stack always grows down, the older FP must be greater // than the current FP. assert!(next_older_fp > fp, "{next_older_fp:#x} > {fp:#x}"); + last_fp = Some(fp); fp = next_older_fp; } } diff --git a/crates/unwinder/src/throw.rs b/crates/unwinder/src/throw.rs index 9b1c64381037..d7e548debe6a 100644 --- a/crates/unwinder/src/throw.rs +++ b/crates/unwinder/src/throw.rs @@ -1,4 +1,4 @@ -//! Generation of the throw-stub. +//! Throw action computation (handler search). //! //! In order to throw exceptions from within Cranelift-compiled code, //! we provide a runtime function helper meant to be called by host @@ -16,7 +16,7 @@ //! responsibility to invoke alternative behavior (e.g., abort the //! program or unwind all the way to initial Cranelift-code entry). -use crate::{ExceptionTable, Unwind}; +use crate::{Frame, Unwind}; use core::ops::ControlFlow; /// Throw action to perform. @@ -46,45 +46,43 @@ pub enum ThrowAction { /// The safety of this function is the same as [`crate::visit_frames`] where the /// values passed in configuring the frame pointer walk must be correct and /// Wasm-defined for this to not have UB. -pub unsafe fn compute_throw_action<'a, F: Fn(usize) -> Option<(usize, ExceptionTable<'a>)>>( +pub unsafe fn compute_throw_action Option>( unwind: &dyn Unwind, - module_lookup: F, + frame_handler: F, exit_pc: usize, - exit_frame: usize, + exit_trampoline_frame: usize, entry_frame: usize, - tag: u32, ) -> ThrowAction { - let mut last_fp = exit_frame; - // SAFETY: the safety of `visit_frames` relies on the correctness of the // parameters passed in which is forwarded as a contract to this function // tiself. let result = unsafe { - crate::stackwalk::visit_frames(unwind, exit_pc, exit_frame, entry_frame, |frame| { - if let Some((base, table)) = module_lookup(frame.pc()) { - let relative_pc = u32::try_from( - frame - .pc() - .checked_sub(base) - .expect("module lookup did not return a module base below the PC"), - ) - .expect("module larger than 4GiB"); - - if let Some(handler) = table.lookup(relative_pc, tag) { - let abs_handler_pc = base - .checked_add(usize::try_from(handler).unwrap()) - .expect("Handler address computation overflowed"); + crate::stackwalk::visit_frames( + unwind, + exit_pc, + exit_trampoline_frame, + entry_frame, + |frame| { + log::trace!("visit_frame: frame {frame:?}"); + let Some(sp) = frame.sp() else { + // Cannot possibly unwind to this frame if SP is not + // known. This is only the case for the first + // (trampoline) frame; after that, we know SP at the + // callsite because we know the offset from the lower + // FP to the next frame's SP. + return ControlFlow::Continue(()); + }; + if let Some(handler_pc) = frame_handler(&frame) { return ControlFlow::Break(ThrowAction::Handler { - pc: abs_handler_pc, - sp: last_fp + unwind.next_older_sp_from_fp_offset(), + pc: handler_pc, + sp, fp: frame.fp(), }); } - } - last_fp = frame.fp(); - ControlFlow::Continue(()) - }) + ControlFlow::Continue(()) + }, + ) }; match result { ControlFlow::Break(action) => action, diff --git a/crates/wasmtime/src/config.rs b/crates/wasmtime/src/config.rs index d3c2ae0dbe60..347caf6f95ac 100644 --- a/crates/wasmtime/src/config.rs +++ b/crates/wasmtime/src/config.rs @@ -1186,7 +1186,10 @@ impl Config { self } - #[doc(hidden)] // FIXME(#3427) - if/when implemented then un-hide this + /// Configures whether the [Exception-handling proposal][proposal] is enabled or not. + /// + /// [proposal]: https://github.com/WebAssembly/exception-handling + #[cfg(feature = "gc")] pub fn wasm_exceptions(&mut self, enable: bool) -> &mut Self { self.wasm_feature(WasmFeatures::EXCEPTIONS, enable); self @@ -2080,6 +2083,13 @@ impl Config { unsupported |= WasmFeatures::STACK_SWITCHING; } + // Pulley also doesn't support exceptions, because we + // need to refactor the setjmp/longjmp emulation to + // also support resuming into Wasm. + if self.compiler_target().is_pulley() { + unsupported |= WasmFeatures::EXCEPTIONS; + } + use target_lexicon::*; match self.compiler_target() { Triple { @@ -2234,6 +2244,10 @@ impl Config { bail!("support for GC was disabled at compile time") } + if !cfg!(feature = "gc") && features.contains(WasmFeatures::EXCEPTIONS) { + bail!("exceptions support requires garbage collection (GC) to be enabled in the build"); + } + let mut tunables = Tunables::default_for_target(&self.compiler_target())?; // If no target is explicitly specified then further refine `tunables` diff --git a/crates/wasmtime/src/runtime.rs b/crates/wasmtime/src/runtime.rs index 585130e5e8eb..ee6b1b5a2915 100644 --- a/crates/wasmtime/src/runtime.rs +++ b/crates/wasmtime/src/runtime.rs @@ -33,6 +33,8 @@ pub(crate) mod code; pub(crate) mod code_memory; #[cfg(feature = "debug-builtins")] pub(crate) mod debug; +#[cfg(feature = "gc")] +pub(crate) mod exception; pub(crate) mod externals; #[cfg(feature = "async")] pub(crate) mod fiber; @@ -72,6 +74,8 @@ cfg_if::cfg_if! { } pub use code_memory::CodeMemory; +#[cfg(feature = "gc")] +pub use exception::*; pub use externals::*; pub use func::*; pub use gc::*; diff --git a/crates/wasmtime/src/runtime/code_memory.rs b/crates/wasmtime/src/runtime/code_memory.rs index 1ade0a672de9..da3e8cdafbba 100644 --- a/crates/wasmtime/src/runtime/code_memory.rs +++ b/crates/wasmtime/src/runtime/code_memory.rs @@ -9,6 +9,7 @@ use object::SectionFlags; use object::endian::Endianness; use object::read::{Object, ObjectSection, elf::ElfFile64}; use wasmtime_environ::{Trap, lookup_trap_code, obj}; +use wasmtime_unwinder::ExceptionTable; /// Management of executable memory within a `MmapVec` /// @@ -34,6 +35,7 @@ pub struct CodeMemory { wasm_data: Range, address_map_data: Range, stack_map_data: Range, + exception_data: Range, func_name_data: Range, info_data: Range, wasm_dwarf: Range, @@ -119,6 +121,7 @@ impl CodeMemory { #[cfg(feature = "debug-builtins")] let mut has_native_debug_info = false; let mut trap_data = 0..0; + let mut exception_data = 0..0; let mut wasm_data = 0..0; let mut address_map_data = 0..0; let mut stack_map_data = 0..0; @@ -168,6 +171,7 @@ impl CodeMemory { obj::ELF_WASMTIME_ADDRMAP => address_map_data = range, obj::ELF_WASMTIME_STACK_MAP => stack_map_data = range, obj::ELF_WASMTIME_TRAPS => trap_data = range, + obj::ELF_WASMTIME_EXCEPTIONS => exception_data = range, obj::ELF_NAME_DATA => func_name_data = range, obj::ELF_WASMTIME_INFO => info_data = range, obj::ELF_WASMTIME_DWARF => wasm_dwarf = range, @@ -182,6 +186,17 @@ impl CodeMemory { #[cfg(not(has_host_compiler_backend))] let _ = &mut unwind; + // Ensure that the exception table is well-formed. This parser + // construction is cheap: it reads the header and validates + // ranges but nothing else. We do this only in debug-assertion + // builds because we otherwise require for safety that the + // compiled artifact is as-produced-by this version of + // Wasmtime, and we should always produce a correct exception + // table (i.e., we are not expecting untrusted data here). + if cfg!(debug_assertions) { + let _ = ExceptionTable::parse(&mmap[exception_data.clone()])?; + } + Ok(Self { mmap, #[cfg(has_host_compiler_backend)] @@ -200,6 +215,7 @@ impl CodeMemory { trap_data, address_map_data, stack_map_data, + exception_data, func_name_data, wasm_dwarf, info_data, @@ -255,6 +271,12 @@ impl CodeMemory { &self.mmap[self.stack_map_data.clone()] } + /// Returns the encoded exception-tables section to pass to + /// `wasmtime_unwinder::ExceptionTable::parse`. + pub fn exception_tables(&self) -> &[u8] { + &self.mmap[self.exception_data.clone()] + } + /// Returns the contents of the `ELF_WASMTIME_INFO` section, or an empty /// slice if it wasn't found. #[inline] diff --git a/crates/wasmtime/src/runtime/exception.rs b/crates/wasmtime/src/runtime/exception.rs new file mode 100644 index 000000000000..c32568e34e92 --- /dev/null +++ b/crates/wasmtime/src/runtime/exception.rs @@ -0,0 +1,52 @@ +//! Types for the public API around exceptions. +//! +//! To allow host code to interact with exceptions, Wasmtime provides +//! two basic areas of API: +//! +//! - The [`crate::ExnRef`] type and associated types allow the host +//! to create exception objects. In the Wasm execution model, every +//! thrown exception is a unique instance of an exception object, +//! which carries a reference to the associated tag and any payload +//! values specified by the exception's signature. +//! +//! - The [`crate::Store::throw`] method to throw an exception, and +//! associated methods to take ([`crate::Store::take_exception`]) or +//! peek at ([`crate::Store::peek_exception`]) a thrown exception, +//! along with the `Error` type [`ThrownException`] that indicates +//! an exception is being thrown. This API allows access to a +//! "pending exception" slot on the `Store` which roots an exception +//! object and allows it to be propagated through Wasm and hostcall +//! layers. If Wasm code throws an uncaught exception, it will be +//! set as the pending exception and the call into Wasm will return +//! an `Err(ThrownException.into())`; if a hostcall wishes to throw +//! an exception to be caught by Wasm (or the outer call into Wasm +//! by the host), it can call `Store::throw` and return the +//! associated error. + +/// An error type that represents that a pending WebAssembly exception +/// is set on the associated `Store`. +/// +/// When used as an error type and returned from a Wasm-to-host call, +/// or host-to-Wasm call, it indicates that the caller should either +/// continue propagating the error upward, or take and handle the +/// exception using [`crate::Store::take_exception`] (or a helper such +/// as [`crate::Store::catch`]. +/// +/// Wasmtime uses an error type *without* payload, and stores the +/// exception itself on the store, to maintain proper GC rooting; +/// otherwise, it is difficult to get exception propagation up the +/// stack right in the presence of nested handle scopes. A pending +/// exception on the store is safely rooted as long as it is stored +/// there. +#[derive(Debug)] +pub struct ThrownException; + +/// We need to implement Error for `ThrownException` so it can be boxed up into an `anyhow::Error`. +impl core::error::Error for ThrownException {} + +/// `Error` requires `Display`. +impl core::fmt::Display for ThrownException { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "thrown Wasm exception") + } +} diff --git a/crates/wasmtime/src/runtime/func.rs b/crates/wasmtime/src/runtime/func.rs index 43f264ddc8c1..38bec8a3ea75 100644 --- a/crates/wasmtime/src/runtime/func.rs +++ b/crates/wasmtime/src/runtime/func.rs @@ -1514,7 +1514,7 @@ pub(crate) fn invoke_wasm_and_catch_traps( let result = crate::runtime::vm::catch_traps(store, &mut previous_runtime_state, closure); core::mem::drop(previous_runtime_state); store.0.call_hook(CallHook::ReturningFromWasm)?; - result.map_err(|t| crate::trap::from_runtime_box(store.0, t)) + result } } @@ -1530,9 +1530,9 @@ pub(crate) struct EntryStoreContext { /// Contains value of `last_wasm_exit_pc` field to restore in /// `VMStoreContext` when exiting Wasm. pub last_wasm_exit_pc: usize, - /// Contains value of `last_wasm_exit_fp` field to restore in + /// Contains value of `last_wasm_exit_trampoline_fp` field to restore in /// `VMStoreContext` when exiting Wasm. - pub last_wasm_exit_fp: usize, + pub last_wasm_exit_trampoline_fp: usize, /// Contains value of `last_wasm_entry_fp` field to restore in /// `VMStoreContext` when exiting Wasm. pub last_wasm_entry_fp: usize, @@ -1625,7 +1625,11 @@ impl EntryStoreContext { unsafe { let last_wasm_exit_pc = *store.0.vm_store_context().last_wasm_exit_pc.get(); - let last_wasm_exit_fp = *store.0.vm_store_context().last_wasm_exit_fp.get(); + let last_wasm_exit_trampoline_fp = *store + .0 + .vm_store_context() + .last_wasm_exit_trampoline_fp + .get(); let last_wasm_entry_fp = *store.0.vm_store_context().last_wasm_entry_fp.get(); let stack_chain = (*store.0.vm_store_context().stack_chain.get()).clone(); @@ -1638,7 +1642,7 @@ impl EntryStoreContext { Self { stack_limit, last_wasm_exit_pc, - last_wasm_exit_fp, + last_wasm_exit_trampoline_fp, last_wasm_entry_fp, stack_chain, vm_store_context, @@ -1657,7 +1661,8 @@ impl EntryStoreContext { *(&*self.vm_store_context).stack_limit.get() = limit; } - *(*self.vm_store_context).last_wasm_exit_fp.get() = self.last_wasm_exit_fp; + *(*self.vm_store_context).last_wasm_exit_trampoline_fp.get() = + self.last_wasm_exit_trampoline_fp; *(*self.vm_store_context).last_wasm_exit_pc.get() = self.last_wasm_exit_pc; *(*self.vm_store_context).last_wasm_entry_fp.get() = self.last_wasm_entry_fp; *(*self.vm_store_context).stack_chain.get() = self.stack_chain.clone(); diff --git a/crates/wasmtime/src/runtime/module.rs b/crates/wasmtime/src/runtime/module.rs index b2efaa5fd74c..0420f7ee4a1c 100644 --- a/crates/wasmtime/src/runtime/module.rs +++ b/crates/wasmtime/src/runtime/module.rs @@ -22,6 +22,8 @@ use wasmtime_environ::{ CompiledModuleInfo, EntityIndex, HostPtr, ModuleTypes, ObjectKind, TypeTrace, VMOffsets, VMSharedTypeIndex, }; +#[cfg(feature = "gc")] +use wasmtime_unwinder::ExceptionTable; mod registry; pub use registry::*; @@ -1126,6 +1128,13 @@ impl Module { let info = self.inner.code.code_memory().stack_map_data(); wasmtime_environ::StackMap::lookup(text_offset, info) } + + /// Obtain an exception-table parser on this module's exception metadata. + #[cfg(feature = "gc")] + pub(crate) fn exception_table<'a>(&'a self) -> ExceptionTable<'a> { + ExceptionTable::parse(self.inner.code.code_memory().exception_tables()) + .expect("Exception tables were validated on module load") + } } /// Describes a function for a given module. diff --git a/crates/wasmtime/src/runtime/store.rs b/crates/wasmtime/src/runtime/store.rs index 1d9e8810c800..d403c2c72546 100644 --- a/crates/wasmtime/src/runtime/store.rs +++ b/crates/wasmtime/src/runtime/store.rs @@ -77,6 +77,8 @@ //! `wasmtime`, must uphold for the public interface to be safe. use crate::RootSet; +#[cfg(feature = "gc")] +use crate::ThrownException; #[cfg(feature = "component-model-async")] use crate::component::ComponentStoreData; #[cfg(feature = "component-model-async")] @@ -97,6 +99,8 @@ use crate::runtime::vm::{ }; use crate::trampoline::VMHostGlobalContext; use crate::{Engine, Module, Val, ValRaw, module::ModuleRegistry}; +#[cfg(feature = "gc")] +use crate::{ExnRef, Rooted}; use crate::{Global, Instance, Memory, Table, Uninhabited}; use alloc::sync::Arc; use core::fmt; @@ -122,6 +126,9 @@ pub(crate) use token::StoreToken; mod async_; #[cfg(all(feature = "async", feature = "call-hook"))] pub use self::async_::CallHookHandler; + +#[cfg(feature = "gc")] +use super::vm::VMExnRef; #[cfg(feature = "gc")] mod gc; @@ -416,6 +423,17 @@ pub struct StoreOpaque { // Types for which the embedder has created an allocator for. #[cfg(feature = "gc")] gc_host_alloc_types: crate::hash_set::HashSet, + /// Pending exception, if any. This is also a GC root, because it + /// needs to be rooted somewhere between the time that a pending + /// exception is set and the time that the handling code takes the + /// exception object. We use this rooting strategy rather than a + /// root in an `Err` branch of a `Result` on the host side because + /// it is less error-prone with respect to rooting behavior. See + /// `throw()`, `take_pending_exception()`, + /// `peek_pending_exception()`, `has_pending_exception()`, and + /// `catch()`. + #[cfg(feature = "gc")] + pending_exception: Option, // Numbers of resources instantiated in this store, and their limits instance_count: usize, @@ -648,6 +666,8 @@ impl Store { gc_roots_list: GcRootsList::default(), #[cfg(feature = "gc")] gc_host_alloc_types: Default::default(), + #[cfg(feature = "gc")] + pending_exception: None, modules: ModuleRegistry::default(), func_refs: FuncRefs::default(), host_globals: PrimaryMap::new(), @@ -1067,6 +1087,84 @@ impl Store { ) { self.inner.epoch_deadline_callback(Box::new(callback)); } + + /// Set an exception as the currently pending exception, and + /// return an error that propagates the throw. + /// + /// This method takes an exception object and stores it in the + /// `Store` as the currently pending exception. This is a special + /// rooted slot that holds the exception as long as it is + /// propagating. This method then returns a `ThrownException` + /// error, which is a special type that indicates a pending + /// exception exists. When this type propagates as an error + /// returned from a Wasm-to-host call, the pending exception is + /// thrown within the Wasm context, and either caught or + /// propagated further to the host-to-Wasm call boundary. If an + /// exception is thrown out of Wasm (or across Wasm from a + /// hostcall) back to the host-to-Wasm call boundary, *that* + /// invocation returns a `ThrownException`, and the pending + /// exception slot is again set. In other words, the + /// `ThrownException` error type should propagate upward exactly + /// and only when a pending exception is set. + /// + /// To inspect or take the pending exception, use + /// [`peek_pending_exception`] and [`take_pending_exception`]. For + /// a convenient wrapper that invokes a closure and provides any + /// caught exception from the closure to a separate handler + /// closure, see [`StoreContextMut::catch`]. + /// + /// This method is parameterized over `R` for convenience, but + /// will always return an `Err`. + /// + /// # Panics + /// + /// - Will panic if `exception` has been unrooted. + /// - Will panic if `exception` is a null reference. + /// - Will panic if a pending exception has already been set. + #[cfg(feature = "gc")] + pub fn throw(&mut self, exception: Rooted) -> Result { + self.inner.throw_impl(exception); + Err(ThrownException) + } + + /// Take the currently pending exception, if any, and return it, + /// removing it from the "pending exception" slot. + /// + /// If there is no pending exception, returns `None`. + /// + /// Note: the returned exception is a LIFO root (see + /// [`crate::Rooted`]), rooted in the current handle scope. Take + /// care to ensure that it is re-rooted or otherwise does not + /// escape this scope! It is usually best to allow an exception + /// object to be rooted in the store's "pending exception" slot + /// until the final consumer has taken it, rather than root it and + /// pass it up the callstack in some other way. + /// + /// This method is useful to implement ad-hoc exception plumbing + /// in various ways, but for the most idiomatic handling, see + /// [`StoreContextMut::catch`]. + #[cfg(feature = "gc")] + pub fn take_pending_exception(&mut self) -> Option> { + self.inner.take_pending_exception_rooted() + } + + /// Tests whether there is a pending exception. + /// + /// Ordinarily, a pending exception will be set on a store if and + /// only if a host-side callstack is propagating a + /// [`crate::ThrownException`] error. The final consumer that + /// catches the exception takes it; it may re-place it to re-throw + /// (using [`throw`]) if it chooses not to actually handle the + /// exception. + /// + /// This method is useful to tell whether a store is in this + /// state, but should not be used as part of the ordinary + /// exception-handling flow. For the most idiomatic handling, see + /// [`StoreContextMut::catch`]. + #[cfg(feature = "gc")] + pub fn has_pending_exception(&self) -> bool { + self.inner.pending_exception.is_some() + } } impl<'a, T> StoreContext<'a, T> { @@ -1162,6 +1260,34 @@ impl<'a, T> StoreContextMut<'a, T> { pub fn epoch_deadline_trap(&mut self) { self.0.epoch_deadline_trap(); } + + /// Set an exception as the currently pending exception, and + /// return an error that propagates the throw. + /// + /// See [`Store::throw`] for more details. + #[cfg(feature = "gc")] + pub fn throw(&mut self, exception: Rooted) -> Result { + self.0.inner.throw_impl(exception); + Err(ThrownException) + } + + /// Take the currently pending exception, if any, and return it, + /// removing it from the "pending exception" slot. + /// + /// See [`Store::take_pending_exception`] for more details. + #[cfg(feature = "gc")] + pub fn take_pending_exception(&mut self) -> Option> { + self.0.inner.take_pending_exception_rooted() + } + + /// Tests whether there is a pending exception. + /// + /// + /// See [`Store::has_pending_exception`] for more details. + #[cfg(feature = "gc")] + pub fn has_pending_exception(&self) -> bool { + self.0.inner.pending_exception.is_some() + } } impl StoreInner { @@ -1729,6 +1855,7 @@ impl StoreOpaque { vm::Yield::new().await; } self.trace_user_roots(gc_roots_list); + self.trace_pending_exception_roots(gc_roots_list); log::trace!("End trace GC roots") } @@ -1850,6 +1977,18 @@ impl StoreOpaque { log::trace!("End trace GC roots :: user"); } + #[cfg(feature = "gc")] + fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) { + log::trace!("Begin trace GC roots :: pending exception"); + if let Some(pending_exception) = self.pending_exception.as_mut() { + unsafe { + let root = pending_exception.as_gc_ref_mut(); + gc_roots_list.add_root(root.into(), "Pending exception"); + } + } + log::trace!("End trace GC roots :: pending exception"); + } + /// Insert a host-allocated GC type into this store. /// /// This makes it suitable for the embedder to allocate instances of this @@ -2286,6 +2425,38 @@ at https://bytecodealliance.org/security. Ok(id) } + /// Set a pending exception. The `exnref` is taken and held on + /// this store to be fetched later by an unwind. This method does + /// *not* set up an unwind request on the TLS call state; that + /// must be done separately. + #[cfg(feature = "gc")] + pub(crate) fn set_pending_exception(&mut self, exnref: VMExnRef) { + self.pending_exception = Some(exnref); + } + + /// Take a pending exception, if any. + #[cfg(feature = "gc")] + pub(crate) fn take_pending_exception(&mut self) -> Option { + self.pending_exception.take() + } + + #[cfg(feature = "gc")] + fn take_pending_exception_rooted(&mut self) -> Option> { + let vmexnref = self.take_pending_exception()?; + let mut nogc = AutoAssertNoGc::new(self); + Some(Rooted::new(&mut nogc, vmexnref.into())) + } + + #[cfg(feature = "gc")] + fn throw_impl(&mut self, exception: Rooted) { + let mut nogc = AutoAssertNoGc::new(self); + let exnref = exception._to_raw(&mut nogc).unwrap(); + let exnref = VMGcRef::from_raw_u32(exnref) + .expect("exception cannot be null") + .into_exnref_unchecked(); + nogc.set_pending_exception(exnref); + } + #[cfg(target_has_atomic = "64")] pub(crate) fn set_epoch_deadline(&mut self, delta: u64) { // Set a new deadline based on the "epoch deadline delta". diff --git a/crates/wasmtime/src/runtime/trap.rs b/crates/wasmtime/src/runtime/trap.rs index cc7fed5c24c2..8ac3d38d3330 100644 --- a/crates/wasmtime/src/runtime/trap.rs +++ b/crates/wasmtime/src/runtime/trap.rs @@ -1,5 +1,7 @@ #[cfg(feature = "coredump")] use super::coredump::WasmCoreDump; +#[cfg(feature = "gc")] +use crate::ThrownException; use crate::prelude::*; use crate::store::StoreOpaque; use crate::{AsContext, Module}; @@ -79,6 +81,8 @@ pub(crate) fn from_runtime_box( coredumpstack, } = *runtime_trap; let (mut error, pc) = match reason { + #[cfg(feature = "gc")] + crate::runtime::vm::TrapReason::Exception => (ThrownException.into(), None), // For user-defined errors they're already an `anyhow::Error` so no // conversion is really necessary here, but a `backtrace` may have // been captured so it's attempted to get inserted here. diff --git a/crates/wasmtime/src/runtime/vm.rs b/crates/wasmtime/src/runtime/vm.rs index 7f2709681b31..7ea114c97fc5 100644 --- a/crates/wasmtime/src/runtime/vm.rs +++ b/crates/wasmtime/src/runtime/vm.rs @@ -69,6 +69,8 @@ mod stack_switching; mod store_box; mod sys; mod table; +#[cfg(feature = "gc")] +mod throw; mod traphandlers; mod vmcontext; @@ -119,6 +121,8 @@ pub use crate::runtime::vm::sys::mmap::open_file_for_mmap; #[cfg(has_host_compiler_backend)] pub use crate::runtime::vm::sys::unwind::UnwindRegistration; pub use crate::runtime::vm::table::{Table, TableElementType}; +#[cfg(feature = "gc")] +pub use crate::runtime::vm::throw::*; pub use crate::runtime::vm::traphandlers::*; pub use crate::runtime::vm::vmcontext::{ VMArrayCallFunction, VMArrayCallHostFuncContext, VMContext, VMFuncRef, VMFunctionImport, diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/exnref.rs b/crates/wasmtime/src/runtime/vm/gc/enabled/exnref.rs index 7d632648b561..122663fc0a65 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/exnref.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled/exnref.rs @@ -100,6 +100,16 @@ impl VMExnRef { &self.0 } + /// Get a mutable borrow on the underlying `VMGcRef`. + /// + /// Requires that the mutation retains the reference's invariants, + /// namely: not null, and pointing to a valid exnref object. Doing + /// otherwise is memory safe, but will lead to general + /// incorrectness. + pub fn as_gc_ref_mut(&mut self) -> &mut VMGcRef { + &mut self.0 + } + /// Clone this `VMExnRef`, running any GC barriers as necessary. pub fn clone(&self, gc_store: &mut GcStore) -> Self { Self(gc_store.clone_gc_ref(&self.0)) @@ -178,37 +188,31 @@ impl VMExnRef { instance: InstanceId, tag: DefinedTagIndex, ) -> Result<()> { - let tag_offset = store - .engine() - .gc_runtime() - .unwrap() - .layouts() - .exception_tag_offset(); + let layouts = store.engine().gc_runtime().unwrap().layouts(); + let instance_offset = layouts.exception_tag_instance_offset(); + let tag_offset = layouts.exception_tag_defined_offset(); let store = store.require_gc_store_mut()?; store .gc_object_data(&self.0) - .write_u32(tag_offset, instance.as_u32()); + .write_u32(instance_offset, instance.as_u32()); store .gc_object_data(&self.0) - .write_u32(tag_offset + 4, tag.as_u32()); + .write_u32(tag_offset, tag.as_u32()); Ok(()) } /// Get the tag referenced by this exception object. pub fn tag(&self, store: &mut AutoAssertNoGc) -> Result<(InstanceId, DefinedTagIndex)> { - let tag_offset = store - .engine() - .gc_runtime() - .unwrap() - .layouts() - .exception_tag_offset(); + let layouts = store.engine().gc_runtime().unwrap().layouts(); + let instance_offset = layouts.exception_tag_instance_offset(); + let tag_offset = layouts.exception_tag_defined_offset(); let instance = store .require_gc_store_mut()? .gc_object_data(&self.0) - .read_u32(tag_offset); + .read_u32(instance_offset); let instance = InstanceId::from_u32(instance); let store = store.require_gc_store_mut()?; - let tag = store.gc_object_data(&self.0).read_u32(tag_offset + 4); + let tag = store.gc_object_data(&self.0).read_u32(tag_offset); let tag = DefinedTagIndex::from_u32(tag); Ok((instance, tag)) } diff --git a/crates/wasmtime/src/runtime/vm/instance.rs b/crates/wasmtime/src/runtime/vm/instance.rs index 899204e811a4..126390ecaf59 100644 --- a/crates/wasmtime/src/runtime/vm/instance.rs +++ b/crates/wasmtime/src/runtime/vm/instance.rs @@ -326,6 +326,33 @@ impl Instance { unsafe { Pin::new_unchecked(ptr.as_mut()) } } + /// Accessor from a raw `vmctx` to `&vm::Instance`, given a store. + /// + /// This is like the above `sibling_vmctx{,_mut}` accessors, but + /// takes the store explicitly rather than inferring it from an + /// existing instance in the store. + /// + /// # Safety + /// + /// The `vmctx` pointer must be a valid vmctx from an active + /// instance that belongs to the given `store`. + #[inline] + pub unsafe fn from_vmctx<'a>( + _store: &'a StoreOpaque, + vmctx: NonNull, + ) -> &'a Instance { + // SAFETY: The validity of this `byte_sub` relies on `vmctx` + // being a valid allocation which is itself a contract of this + // function. Likewise, the `.as_ref()` converts a valid `*mut + // Instance` to a `&Instance`. + unsafe { + vmctx + .byte_sub(mem::size_of::()) + .cast::() + .as_ref() + } + } + pub(crate) fn env_module(&self) -> &Arc { self.runtime_info.env_module() } diff --git a/crates/wasmtime/src/runtime/vm/libcalls.rs b/crates/wasmtime/src/runtime/vm/libcalls.rs index f056e1132489..d1ff7d84b8c0 100644 --- a/crates/wasmtime/src/runtime/vm/libcalls.rs +++ b/crates/wasmtime/src/runtime/vm/libcalls.rs @@ -215,7 +215,7 @@ fn memory_grow( mut instance: Pin<&mut Instance>, delta: u64, memory_index: u32, -) -> Result, TrapReason> { +) -> Result> { let memory_index = DefinedMemoryIndex::from_u32(memory_index); let module = instance.env_module(); let page_size_log2 = module.memories[module.memory_index(memory_index)].page_size_log2; @@ -1661,19 +1661,22 @@ fn trap( )) } -fn raise(_store: &mut dyn VMStore, _instance: Pin<&mut Instance>) { +fn raise(store: &mut dyn VMStore, _instance: Pin<&mut Instance>) { // SAFETY: this is only called from compiled wasm so we know that wasm has // already been entered. It's a dynamic safety precondition that the trap // information has already been arranged to be present. #[cfg(has_host_compiler_backend)] unsafe { - crate::runtime::vm::traphandlers::raise_preexisting_trap() + crate::runtime::vm::traphandlers::raise_preexisting_trap(store) } // When Cranelift isn't in use then this is an unused libcall for Pulley, so // just insert a stub to catch bugs if it's accidentally called. #[cfg(not(has_host_compiler_backend))] - unreachable!() + { + let _ = store; + unreachable!() + } } // Builtins for continuations. These are thin wrappers around the @@ -1685,8 +1688,28 @@ fn cont_new( func: *mut u8, param_count: u32, result_count: u32, -) -> Result, TrapReason> { +) -> Result> { let ans = crate::vm::stack_switching::cont_new(store, instance, func, param_count, result_count)?; Ok(Some(AllocationSize(ans.cast::() as usize))) } + +#[cfg(feature = "gc")] +fn get_instance_id(_store: &mut dyn VMStore, instance: Pin<&mut Instance>) -> u32 { + instance.id().as_u32() +} + +#[cfg(feature = "gc")] +fn throw_ref( + store: &mut dyn VMStore, + _instance: Pin<&mut Instance>, + exnref: u32, +) -> Result<(), TrapReason> { + let exnref = VMGcRef::from_raw_u32(exnref).ok_or_else(|| Trap::NullReference)?; + let exnref = store.unwrap_gc_store_mut().clone_gc_ref(&exnref); + let exnref = exnref + .into_exnref(&*store.unwrap_gc_store().gc_heap) + .expect("gc ref should be an exception object"); + store.set_pending_exception(exnref); + Err(TrapReason::Exception) +} diff --git a/crates/wasmtime/src/runtime/vm/stack_switching.rs b/crates/wasmtime/src/runtime/vm/stack_switching.rs index c07430cf9b2a..c12dd1373ad8 100644 --- a/crates/wasmtime/src/runtime/vm/stack_switching.rs +++ b/crates/wasmtime/src/runtime/vm/stack_switching.rs @@ -306,7 +306,7 @@ pub fn cont_new( func: *mut u8, param_count: u32, result_count: u32, -) -> Result<*mut VMContRef, crate::vm::TrapReason> { +) -> anyhow::Result<*mut VMContRef> { let caller_vmctx = instance.vmctx(); let stack_size = store.engine().config().async_stack_size; diff --git a/crates/wasmtime/src/runtime/vm/throw.rs b/crates/wasmtime/src/runtime/vm/throw.rs new file mode 100644 index 000000000000..a6cf333a949a --- /dev/null +++ b/crates/wasmtime/src/runtime/vm/throw.rs @@ -0,0 +1,131 @@ +//! Exception-throw logic for Wasm exceptions. + +use core::ptr::NonNull; + +use wasmtime_environ::TagIndex; +use wasmtime_unwinder::{Frame, ThrowAction}; + +use super::{VMContext, VMStore}; +use crate::{store::AutoAssertNoGc, vm::Instance}; + +/// Compute the target of the pending exception on the store. +/// +/// # Safety +/// +/// The stored last-exit state in `store` either must be valid, or +/// must have a zeroed exit FP if no Wasm is on the stack. +pub unsafe fn compute_throw_action(store: &mut dyn VMStore) -> ThrowAction { + let mut nogc = AutoAssertNoGc::new(store.store_opaque_mut()); + + // Get the tag identity relative to the store. + + // Temporarily take, to avoid borrowing issues. + let exnref = nogc + .take_pending_exception() + .expect("Only invoked when an exception is pending"); + let (throwing_tag_instance_id, throwing_tag_defined_tag_index) = + exnref.tag(&mut nogc).expect("cannot read tag"); + nogc.set_pending_exception(exnref); + log::trace!( + "throwing: tag defined in instance {throwing_tag_instance_id:?} defined-tag {throwing_tag_defined_tag_index:?}" + ); + + // Get the state needed for a stack walk. + let (exit_pc, exit_trampoline_fp, entry_fp) = unsafe { + ( + *nogc.vm_store_context().last_wasm_exit_pc.get(), + *nogc.vm_store_context().last_wasm_exit_trampoline_fp.get(), + *nogc.vm_store_context().last_wasm_entry_fp.get(), + ) + }; + + // Early out: if there is no exit FP -- which can happen if a host + // func, wrapped up as a `Func`, is called directly via + // `Func::call` -- then the only possible action we can take is + // `None` (i.e., no handler, unwind to entry from host). + if exit_trampoline_fp == 0 { + return ThrowAction::None; + } + + // Walk the stack, looking up the module with each PC, and using + // that module to resolve local tag indices into (instance, tag) + // tuples. + let handler_lookup = |frame: &Frame| -> Option { + log::trace!( + "exception-throw stack walk: frame at FP={:x} SP={:x} PC={:x}", + frame.fp(), + frame.sp().unwrap(), + frame.pc() + ); + let module = nogc.modules().lookup_module_by_pc(frame.pc())?; + let base = module.code_object().code_memory().text().as_ptr() as usize; + let rel_pc = u32::try_from(frame.pc().wrapping_sub(base)).expect("Module larger than 4GiB"); + let et = module.exception_table(); + for handler in et.lookup_pc(rel_pc) { + log::trace!("-> checking handler: {handler:?}"); + let is_match = match handler.tag { + // Catch-all/default handler. Always come last in sequence. + None => true, + Some(module_local_tag_index) => { + let frame_vmctx = unsafe { + frame + .read_slot( + usize::try_from( + handler + .context_sp_offset + .expect("dynamic context not present for handler record"), + ) + .unwrap(), + ) + .unwrap() + }; + log::trace!("-> read vmctx from frame: {frame_vmctx:x}"); + let frame_vmctx = + NonNull::new(frame_vmctx as *mut VMContext).expect("null vmctx in frame"); + + // SAFETY: we use `Instance::from_vmctx` to get an + // `&Instance` from a raw vmctx we read off the + // stack frame. That method's safety requirements + // are that the `vmctx` is a valid vmctx belonging + // to an instance in the store (`nogc`). This is + // satisfied because every Wasm frame in this + // activation must belong to an instance in this + // store: we do not permit cross-store calls + // without exiting through host code. + let (handler_tag_instance, handler_tag_index) = unsafe { + let store_id = nogc.id(); + let instance = Instance::from_vmctx(&nogc, frame_vmctx); + let tag = instance + .get_exported_tag(store_id, TagIndex::from_u32(module_local_tag_index)); + tag.to_raw_indices() + }; + log::trace!( + "-> handler's tag {module_local_tag_index:?} resolves to instance {handler_tag_instance:?} defined-tag {handler_tag_index:?}" + ); + + handler_tag_instance == throwing_tag_instance_id + && handler_tag_index == throwing_tag_defined_tag_index + } + }; + if is_match { + return Some(base.wrapping_add( + usize::try_from(handler.handler_offset).expect("Module larger than usize"), + )); + } + } + None + }; + let unwinder = nogc.unwinder(); + let action = unsafe { + wasmtime_unwinder::compute_throw_action( + unwinder, + handler_lookup, + exit_pc, + exit_trampoline_fp, + entry_fp, + ) + }; + + log::trace!("throw action: {action:?}"); + action +} diff --git a/crates/wasmtime/src/runtime/vm/traphandlers.rs b/crates/wasmtime/src/runtime/vm/traphandlers.rs index a5c659b4c9cc..d27c10a19870 100644 --- a/crates/wasmtime/src/runtime/vm/traphandlers.rs +++ b/crates/wasmtime/src/runtime/vm/traphandlers.rs @@ -15,6 +15,8 @@ mod signals; #[cfg(all(has_native_signals))] pub use self::signals::*; +#[cfg(feature = "gc")] +use crate::ThrownException; use crate::runtime::module::lookup_code; use crate::runtime::store::{ExecutorRef, StoreOpaque}; use crate::runtime::vm::sys::traphandlers; @@ -62,20 +64,27 @@ fn lazy_per_thread_init() { traphandlers::lazy_per_thread_init(); } -/// Raises a preexisting trap and unwinds. +/// Raises a preexisting trap or exception and unwinds. +/// +/// If the preexisting state has registered a trap, this function will +/// execute the `longjmp` to make its way back to the original +/// `setjmp` performed when Wasm was entered. If the state has +/// registered an exception, this function will perform the unwind +/// action registered: either resetting PC, FP, and SP to the handler +/// in the middle of the Wasm activation on the stack, or `longjmp` +/// back to the entry from the host, if the exception is uncaught. /// -/// This function will execute the `longjmp` to make its way back to the -/// original `setjmp` performed when wasm was entered. This is currently -/// only called from the `raise` builtin of Wasmtime. This builtin is only used -/// when the host returns back to wasm and indicates that a trap should be -/// raised. In this situation the host has already stored trap information -/// within the `CallThreadState` and this is the low-level operation to actually -/// perform an unwind. +/// This is currently only called from the `raise` builtin of +/// Wasmtime. This builtin is only used when the host returns back to +/// wasm and indicates that a trap or exception should be raised. In +/// this situation the host has already stored trap or exception +/// information within the `CallThreadState` and this is the low-level +/// operation to actually perform an unwind. /// -/// This function won't be use with Pulley, for example, as the interpreter +/// This function won't be used with Pulley, for example, as the interpreter /// halts differently than native code. Additionally one day this will ideally /// be implemented by Cranelift itself without need of a libcall when Cranelift -/// implements the exception handling proposal for example. +/// implements setjmp and longjmp operators itself. /// /// # Safety /// @@ -83,8 +92,8 @@ fn lazy_per_thread_init() { /// have been previously called. Additionally no Rust destructors can be on the /// stack. They will be skipped and not executed. #[cfg(has_host_compiler_backend)] -pub(super) unsafe fn raise_preexisting_trap() -> ! { - tls::with(|info| unsafe { info.unwrap().unwind() }) +pub(super) unsafe fn raise_preexisting_trap(store: &mut dyn VMStore) -> ! { + tls::with(|info| unsafe { info.unwrap().unwind(store) }) } /// Invokes the closure `f` and handles any error/panic/trap that happens @@ -122,9 +131,9 @@ where // return value is always provided and if unwind information is provided // (e.g. `ret` is a "false"-y value) then it's recorded in TLS for the // unwind operation that's about to happen from Cranelift-generated code. - let (ret, unwind) = R::maybe_catch_unwind(|| f(store)); + let (ret, unwind) = R::maybe_catch_unwind(store, |store| f(store)); if let Some(unwind) = unwind { - tls::with(|info| info.unwrap().record_unwind(unwind)); + tls::with(|info| info.unwrap().record_unwind(store, unwind)); } ret } @@ -156,7 +165,10 @@ pub trait HostResult { /// back to wasm (which should be soon after calling this through /// `catch_unwind_and_record_trap`) then wasm will very quickly turn around /// and initiate an unwind (currently through `raise_preexisting_trap`). - fn maybe_catch_unwind(f: impl FnOnce() -> Self) -> (Self::Abi, Option); + fn maybe_catch_unwind( + store: &mut dyn VMStore, + f: impl FnOnce(&mut dyn VMStore) -> Self, + ) -> (Self::Abi, Option); } // Base case implementations that do not catch unwinds. These are for libcalls @@ -169,8 +181,11 @@ macro_rules! host_result_no_catch { $( impl HostResult for $t { type Abi = $t; - fn maybe_catch_unwind(f: impl FnOnce() -> $t) -> ($t, Option) { - (f(), None) + fn maybe_catch_unwind( + store: &mut dyn VMStore, + f: impl FnOnce(&mut dyn VMStore) -> $t, + ) -> ($t, Option) { + (f(store), None) } } )* @@ -192,8 +207,11 @@ host_result_no_catch! { impl HostResult for NonNull { type Abi = *mut u8; - fn maybe_catch_unwind(f: impl FnOnce() -> Self) -> (*mut u8, Option) { - (f().as_ptr(), None) + fn maybe_catch_unwind( + store: &mut dyn VMStore, + f: impl FnOnce(&mut dyn VMStore) -> Self, + ) -> (*mut u8, Option) { + (f(store).as_ptr(), None) } } @@ -219,11 +237,14 @@ where { type Abi = T::Abi; - fn maybe_catch_unwind(f: impl FnOnce() -> Result) -> (T::Abi, Option) { + fn maybe_catch_unwind( + store: &mut dyn VMStore, + f: impl FnOnce(&mut dyn VMStore) -> Result, + ) -> (T::Abi, Option) { // First prepare the closure `f` as something that'll be invoked to // generate the return value of this function. This is the // conditionally, below, passed to `catch_unwind`. - let f = move || match f() { + let f = move || match f(store) { Ok(ret) => (ret.into_abi(), None), Err(reason) => (T::SENTINEL, Some(UnwindReason::Trap(reason.into()))), }; @@ -331,7 +352,8 @@ pub struct Trap { pub coredumpstack: Option, } -/// Enumeration of different methods of raising a trap. +/// Enumeration of different methods of raising a trap (or a sentinel +/// for an exception). #[derive(Debug)] pub enum TrapReason { /// A user-raised trap through `raise_user_trap`. @@ -362,11 +384,27 @@ pub enum TrapReason { /// A trap raised from a wasm libcall Wasm(wasmtime_environ::Trap), + + /// An exception. + /// + /// Note that internally, exceptions are rooted on the Store, while + /// when crossing the public API, exceptions are held in a + /// `wasmtime::Exception` which contains a boxed root and implements + /// `Error`. This choice is intentional, to keep the internal + /// implementation lightweight and ensure the types represent only + /// allowable states. + #[cfg(feature = "gc")] + Exception, } impl From for TrapReason { - fn from(err: Error) -> Self { - TrapReason::User(err) + fn from(error: Error) -> Self { + #[cfg(feature = "gc")] + if error.is::() { + return TrapReason::Exception; + } + + TrapReason::User(error) } } @@ -387,7 +425,7 @@ pub unsafe fn catch_traps( store: &mut StoreContextMut<'_, T>, old_state: &mut EntryStoreContext, mut closure: F, -) -> Result<(), Box> +) -> Result<()> where F: FnMut(NonNull, Option>) -> bool, { @@ -433,16 +471,39 @@ where }, }); - return match result { + match result { Ok(x) => Ok(x), - Err((UnwindReason::Trap(reason), backtrace, coredumpstack)) => Err(Box::new(Trap { - reason, + #[cfg(feature = "gc")] + Err(UnwindState::UnwindToHost { + reason: UnwindReason::Trap(TrapReason::Exception), + backtrace: _, + coredump_stack: _, + }) => Err(ThrownException.into()), + Err(UnwindState::UnwindToHost { + reason: UnwindReason::Trap(reason), backtrace, - coredumpstack, - })), + coredump_stack, + }) => Err(crate::trap::from_runtime_box( + store.0, + Box::new(Trap { + reason, + backtrace, + coredumpstack: coredump_stack, + }), + )), #[cfg(all(feature = "std", panic = "unwind"))] - Err((UnwindReason::Panic(panic), _, _)) => std::panic::resume_unwind(panic), - }; + Err(UnwindState::UnwindToHost { + reason: UnwindReason::Panic(panic), + .. + }) => std::panic::resume_unwind(panic), + #[cfg(feature = "gc")] + Err(UnwindState::UnwindToWasm { .. }) => { + unreachable!("We should not have returned to the host with an UnwindToWasm state"); + } + Err(UnwindState::None) => { + unreachable!("We should not have gotten an error with no unwind state"); + } + } } // Module to hide visibility of the `CallThreadState::prev` field and force @@ -452,6 +513,46 @@ mod call_thread_state { use crate::EntryStoreContext; use crate::runtime::vm::{Unwind, VMStackChain}; + /// Queued-up unwinding on the CallThreadState, ready to be + /// enacted by `unwind()`. + /// + /// This represents either a request to unwind to the entry point + /// from host (via longjmp), with associated data; or a request to + /// unwind into the middle of the Wasm action, e.g. when an + /// exception is caught. + pub enum UnwindState { + /// Unwind all the way to the entry from host to Wasm, using + /// `longjmp` to the `jmp_buf` on the `CallThreadState`. + UnwindToHost { + reason: UnwindReason, + backtrace: Option, + coredump_stack: Option, + }, + /// Unwind into Wasm. The exception destination has been + /// resolved. Note that the payload value is still not + /// specified, because it must remain rooted on the Store + /// until `unwind()` actually takes the value. The first + /// payload word in the underlying exception ABI is used to + /// send the raw `VMExnRef`. + #[cfg(feature = "gc")] + #[cfg_attr( + not(has_host_compiler_backend), + allow(dead_code, reason = "Unwind not yet implemented for Pulley") + )] + UnwindToWasm { pc: usize, fp: usize, sp: usize }, + /// Do not unwind. + None, + } + + impl UnwindState { + pub(super) fn is_none(&self) -> bool { + match self { + Self::None => true, + _ => false, + } + } + } + /// Temporary state stored on the stack which is registered in the `tls` /// module below for calls into wasm. /// @@ -474,7 +575,14 @@ mod call_thread_state { /// interior mutability here since that only gives access to /// `&CallThreadState`. pub struct CallThreadState { - pub(super) unwind: Cell, Option)>>, + /// Unwind state set when initiating an unwind and read when + /// the control transfer occurs (after the `longjmp` point is + /// reached for host-code destinations and right when + /// performing the jump for Wasm-code destinations). + pub(super) unwind: Cell, + /// Resume point established by `setjmp`, used when unwinding + /// all the way across the Wasm activation back to the entry + /// from host code. Traps and uncaught exceptions use this. pub(super) jmp_buf: Cell<*const u8>, #[cfg(all(has_native_signals))] pub(super) signal_handler: Option<*const SignalHandler>, @@ -502,7 +610,7 @@ mod call_thread_state { fn drop(&mut self) { // Unwind information should not be present as it should have // already been processed. - debug_assert!(self.unwind.replace(None).is_none()); + debug_assert!(self.unwind.replace(UnwindState::None).is_none()); } } @@ -515,7 +623,7 @@ mod call_thread_state { old_state: *mut EntryStoreContext, ) -> CallThreadState { CallThreadState { - unwind: Cell::new(None), + unwind: Cell::new(UnwindState::None), unwinder: store.unwinder(), jmp_buf: Cell::new(ptr::null()), #[cfg(all(has_native_signals))] @@ -530,8 +638,16 @@ mod call_thread_state { } /// Get the saved FP upon exit from Wasm for the previous `CallThreadState`. + /// + /// # Safety + /// + /// Requires that the saved last Wasm trampoline FP points to + /// a valid trampoline frame, or is null. pub unsafe fn old_last_wasm_exit_fp(&self) -> usize { - unsafe { (&*self.old_state).last_wasm_exit_fp } + let trampoline_fp = unsafe { (&*self.old_state).last_wasm_exit_trampoline_fp }; + // SAFETY: `trampoline_fp` is either a valid FP from an + // active trampoline frame or is null. + unsafe { VMStoreContext::wasm_exit_fp_from_trampoline_fp(trampoline_fp) } } /// Get the saved PC upon exit from Wasm for the previous `CallThreadState`. @@ -605,8 +721,8 @@ mod call_thread_state { unsafe { let cx = self.vm_store_context.as_ref(); swap( - &cx.last_wasm_exit_fp, - &mut (*self.old_state).last_wasm_exit_fp, + &cx.last_wasm_exit_trampoline_fp, + &mut (*self.old_state).last_wasm_exit_trampoline_fp, ); swap( &cx.last_wasm_exit_pc, @@ -623,18 +739,27 @@ mod call_thread_state { } pub use call_thread_state::*; +#[cfg(feature = "gc")] +use super::compute_throw_action; + pub enum UnwindReason { #[cfg(all(feature = "std", panic = "unwind"))] Panic(Box), Trap(TrapReason), } +impl From for UnwindReason +where + E: Into, +{ + fn from(value: E) -> UnwindReason { + UnwindReason::Trap(value.into()) + } +} + impl CallThreadState { #[inline] - fn with( - mut self, - closure: impl FnOnce(&CallThreadState) -> bool, - ) -> Result<(), (UnwindReason, Option, Option)> { + fn with(mut self, closure: impl FnOnce(&CallThreadState) -> bool) -> Result<(), UnwindState> { let succeeded = tls::set(&mut self, |me| closure(me)); if succeeded { Ok(()) @@ -644,8 +769,8 @@ impl CallThreadState { } #[cold] - fn read_unwind(&self) -> (UnwindReason, Option, Option) { - self.unwind.replace(None).unwrap() + fn read_unwind(&self) -> UnwindState { + self.unwind.replace(UnwindState::None) } /// Records the unwind information provided within this `CallThreadState`, @@ -662,35 +787,73 @@ impl CallThreadState { /// /// Panics if unwind information has already been recorded as that should /// have been processed first. - fn record_unwind(&self, reason: UnwindReason) { + fn record_unwind(&self, store: &mut dyn VMStore, reason: UnwindReason) { if cfg!(debug_assertions) { - let prev = self.unwind.replace(None); + let prev = self.unwind.replace(UnwindState::None); assert!(prev.is_none()); } - let (backtrace, coredump) = match &reason { - // Panics don't need backtraces. There is nowhere to attach the - // hypothetical backtrace to and it doesn't really make sense to try - // in the first place since this is a Rust problem rather than a - // Wasm problem. + let state = match reason { #[cfg(all(feature = "std", panic = "unwind"))] - UnwindReason::Panic(_) => (None, None), + UnwindReason::Panic(err) => { + // Panics don't need backtraces. There is nowhere to attach the + // hypothetical backtrace to and it doesn't really make sense to try + // in the first place since this is a Rust problem rather than a + // Wasm problem. + UnwindState::UnwindToHost { + reason: UnwindReason::Panic(err), + backtrace: None, + coredump_stack: None, + } + } + // An unwind due to an already-set pending exception + // triggers the handler-search stack-walk. We store the + // resolved handler if one exists. In either case, the + // exception remains rooted in the Store until we actually + // perform the unwind, and then gets taken and becomes the + // payload at that point. + #[cfg(feature = "gc")] + UnwindReason::Trap(TrapReason::Exception) => { + // SAFETY: we are invoking `compute_throw()` while + // Wasm is on the stack and we have re-entered via a + // trampoline, as required by its stack-walking logic. + let action = unsafe { compute_throw_action(store) }; + match action { + wasmtime_unwinder::ThrowAction::Handler { pc, sp, fp } => { + UnwindState::UnwindToWasm { pc, sp, fp } + } + wasmtime_unwinder::ThrowAction::None => UnwindState::UnwindToHost { + reason: UnwindReason::Trap(TrapReason::Exception), + backtrace: None, + coredump_stack: None, + }, + } + } // And if we are just propagating an existing trap that already has // a backtrace attached to it, then there is no need to capture a // new backtrace either. UnwindReason::Trap(TrapReason::User(err)) if err.downcast_ref::().is_some() => { - (None, None) + UnwindState::UnwindToHost { + reason: UnwindReason::Trap(TrapReason::User(err)), + backtrace: None, + coredump_stack: None, + } } UnwindReason::Trap(trap) => { log::trace!("Capturing backtrace and coredump for {trap:?}"); - ( - self.capture_backtrace(self.vm_store_context.as_ptr(), None), - self.capture_coredump(self.vm_store_context.as_ptr(), None), - ) + UnwindState::UnwindToHost { + reason: UnwindReason::Trap(trap), + backtrace: self.capture_backtrace(store.vm_store_context_mut(), None), + coredump_stack: self.capture_coredump(store.vm_store_context_mut(), None), + } } }; - self.unwind.set(Some((reason, backtrace, coredump))); + + // Avoid unused-variable warning in non-exceptions/GC build. + let _ = store; + + self.unwind.set(state); } /// Helper function to perform an actual unwinding operation. @@ -704,11 +867,43 @@ impl CallThreadState { /// called. Additionally this isn't safe as it will skip all Rust /// destructors on the stack, if there are any. #[cfg(has_host_compiler_backend)] - unsafe fn unwind(&self) -> ! { - debug_assert!(!self.jmp_buf.get().is_null()); - debug_assert!(self.jmp_buf.get() != CallThreadState::JMP_BUF_INTERPRETER_SENTINEL); - unsafe { - traphandlers::wasmtime_longjmp(self.jmp_buf.get()); + unsafe fn unwind(&self, store: &mut dyn VMStore) -> ! { + // Ensure used even in no-GC builds. + let _ = store; + + let unwind = self.unwind.replace(UnwindState::None); + match unwind { + UnwindState::UnwindToHost { .. } => { + // Keep the state around -- we will read it out again + // when we reach the entry-from-host side after the + // `longjmp`. + self.unwind.set(unwind); + debug_assert!(!self.jmp_buf.get().is_null()); + debug_assert!(self.jmp_buf.get() != CallThreadState::JMP_BUF_INTERPRETER_SENTINEL); + unsafe { + traphandlers::wasmtime_longjmp(self.jmp_buf.get()); + } + } + #[cfg(feature = "gc")] + UnwindState::UnwindToWasm { pc, fp, sp } => { + // Take the pending exception at this time and use it as payload. + let payload1 = usize::try_from( + store + .take_pending_exception() + .unwrap() + .as_gc_ref() + .as_raw_u32(), + ) + .expect("GC ref does not fit in usize"); + // We only use one of the payload words. + let payload2 = 0; + unsafe { + wasmtime_unwinder::resume_to_exception_handler(pc, sp, fp, payload1, payload2); + } + } + UnwindState::None => { + panic!("Attempting to unwind with no unwind state set."); + } } } @@ -808,16 +1003,16 @@ impl CallThreadState { trap: wasmtime_environ::Trap, ) { let backtrace = self.capture_backtrace(self.vm_store_context.as_ptr(), Some((pc, fp))); - let coredump = self.capture_coredump(self.vm_store_context.as_ptr(), Some((pc, fp))); - self.unwind.set(Some(( - UnwindReason::Trap(TrapReason::Jit { + let coredump_stack = self.capture_coredump(self.vm_store_context.as_ptr(), Some((pc, fp))); + self.unwind.set(UnwindState::UnwindToHost { + reason: UnwindReason::Trap(TrapReason::Jit { pc, faulting_addr, trap, }), backtrace, - coredump, - ))) + coredump_stack, + }); } } diff --git a/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs b/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs index 29fdb5dd6ae6..055c4f2aff69 100644 --- a/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs +++ b/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs @@ -160,7 +160,7 @@ impl Backtrace { // through the Wasm-to-host trampoline. None => unsafe { let pc = *(*vm_store_context).last_wasm_exit_pc.get(); - let fp = *(*vm_store_context).last_wasm_exit_fp.get(); + let fp = (*vm_store_context).last_wasm_exit_fp(); (pc, fp) }, }; diff --git a/crates/wasmtime/src/runtime/vm/vmcontext.rs b/crates/wasmtime/src/runtime/vm/vmcontext.rs index e089543da316..818b08940873 100644 --- a/crates/wasmtime/src/runtime/vm/vmcontext.rs +++ b/crates/wasmtime/src/runtime/vm/vmcontext.rs @@ -355,6 +355,14 @@ mod test_vmtag_import { offset_of!(VMTagImport, from), usize::from(offsets.vmtag_import_from()) ); + assert_eq!( + offset_of!(VMTagImport, vmctx), + usize::from(offsets.vmtag_import_vmctx()) + ); + assert_eq!( + offset_of!(VMTagImport, index), + usize::from(offsets.vmtag_import_index()) + ); } } @@ -1101,18 +1109,23 @@ pub struct VMStoreContext { /// The `VMMemoryDefinition` for this store's GC heap. pub gc_heap: VMMemoryDefinition, - /// The value of the frame pointer register when we last called from Wasm to - /// the host. + /// The value of the frame pointer register in the trampoline used + /// to call from Wasm to the host. /// - /// Maintained by our Wasm-to-host trampoline, and cleared just before - /// calling into Wasm in `catch_traps`. + /// Maintained by our Wasm-to-host trampoline, and cleared just + /// before calling into Wasm in `catch_traps`. /// /// This member is `0` when Wasm is actively running and has not called out /// to the host. /// - /// Used to find the start of a contiguous sequence of Wasm frames when - /// walking the stack. - pub last_wasm_exit_fp: UnsafeCell, + /// Used to find the start of a contiguous sequence of Wasm frames + /// when walking the stack. Note that we record the FP of the + /// *trampoline*'s frame, not the last Wasm frame, because we need + /// to know the SP (bottom of frame) of the last Wasm frame as + /// well in case we need to resume to an exception handler in that + /// frame. The FP of the last Wasm frame can be recovered by + /// loading the saved FP value at this FP address. + pub last_wasm_exit_trampoline_fp: UnsafeCell, /// The last Wasm program counter before we called from Wasm to the host. /// @@ -1137,7 +1150,7 @@ pub struct VMStoreContext { /// called from the host, then this member has the sentinel value of `-1 as /// usize`, meaning that this contiguous sequence of Wasm frames is the /// empty sequence, and it is not safe to dereference the - /// `last_wasm_exit_fp`. + /// `last_wasm_exit_trampoline_fp`. /// /// Used to find the end of a contiguous sequence of Wasm frames when /// walking the stack. @@ -1160,6 +1173,59 @@ pub struct VMStoreContext { pub async_guard_range: Range<*mut u8>, } +impl VMStoreContext { + /// From the current saved trampoline FP, get the FP of the last + /// Wasm frame. If the current saved trampoline FP is null, return + /// null. + /// + /// We store only the trampoline FP, because (i) we need the + /// trampoline FP, so we know the size (bottom) of the last Wasm + /// frame; and (ii) the last Wasm frame, just above the trampoline + /// frame, can be recovered via the FP chain. + /// + /// # Safety + /// + /// This function requires that the `last_wasm_exit_trampoline_fp` + /// field either points to an active trampoline frame or is a null + /// pointer. + pub(crate) unsafe fn last_wasm_exit_fp(&self) -> usize { + // SAFETY: the unsafe cell is safe to load (no other threads + // will be writing our store when we have control), and the + // helper function's safety condition is the same as ours. + unsafe { + let trampoline_fp = *self.last_wasm_exit_trampoline_fp.get(); + Self::wasm_exit_fp_from_trampoline_fp(trampoline_fp) + } + } + + /// From any saved trampoline FP, get the FP of the last Wasm + /// frame. If the given trampoline FP is null, return null. + /// + /// This differs from `last_wasm_exit_fp()` above in that it + /// allows accessing activations further up the stack as well, + /// e.g. via `CallThreadState::old_state`. + /// + /// # Safety + /// + /// This function requires that the provided FP value is valid, + /// and points to an active trampoline frame, or is null. + /// + /// This function depends on the invariant that on all supported + /// architectures, we store the previous FP value under the + /// current FP. This is a property of our ABI that we control and + /// ensure. + pub(crate) unsafe fn wasm_exit_fp_from_trampoline_fp(trampoline_fp: usize) -> usize { + if trampoline_fp != 0 { + // SAFETY: We require that trampoline_fp points to a valid + // frame, which will (by definition) contain an old FP value + // that we can load. + unsafe { *(trampoline_fp as *const usize) } + } else { + 0 + } + } +} + // The `VMStoreContext` type is a pod-type with no destructor, and we don't // access any fields from other threads, so add in these trait impls which are // otherwise not available due to the `fuel_consumed` and `epoch_deadline` @@ -1180,7 +1246,7 @@ impl Default for VMStoreContext { base: NonNull::dangling().into(), current_length: AtomicUsize::new(0), }, - last_wasm_exit_fp: UnsafeCell::new(0), + last_wasm_exit_trampoline_fp: UnsafeCell::new(0), last_wasm_exit_pc: UnsafeCell::new(0), last_wasm_entry_fp: UnsafeCell::new(0), stack_chain: UnsafeCell::new(VMStackChain::Absent), @@ -1224,8 +1290,8 @@ mod test_vmstore_context { usize::from(offsets.ptr.vmstore_context_gc_heap_current_length()) ); assert_eq!( - offset_of!(VMStoreContext, last_wasm_exit_fp), - usize::from(offsets.ptr.vmstore_context_last_wasm_exit_fp()) + offset_of!(VMStoreContext, last_wasm_exit_trampoline_fp), + usize::from(offsets.ptr.vmstore_context_last_wasm_exit_trampoline_fp()) ); assert_eq!( offset_of!(VMStoreContext, last_wasm_exit_pc), diff --git a/crates/wast/src/core.rs b/crates/wast/src/core.rs index c8edec98c36e..23b5b64dd8ce 100644 --- a/crates/wast/src/core.rs +++ b/crates/wast/src/core.rs @@ -85,13 +85,17 @@ pub fn match_val(store: &mut Store, actual: &Val, expected: &CoreConst) -> (Val::V128(a), CoreConst::V128(value)) => match_v128(a.as_u128(), value), // Null references, or blanket "any reference" assertions - (Val::FuncRef(None) | Val::ExternRef(None) | Val::AnyRef(None), CoreConst::RefNull) + ( + Val::FuncRef(None) | Val::ExternRef(None) | Val::AnyRef(None) | Val::ExnRef(None), + CoreConst::RefNull, + ) | (Val::FuncRef(_), CoreConst::FuncRef { value: None }) | (Val::AnyRef(_), CoreConst::AnyRef { value: None }) | (Val::ExternRef(_), CoreConst::ExternRef { value: None }) | (Val::AnyRef(None), CoreConst::NullRef) | (Val::FuncRef(None), CoreConst::NullFuncRef) | (Val::ExternRef(None), CoreConst::NullExternRef) + | (Val::ExnRef(None), CoreConst::NullExnRef) | ( Val::FuncRef(None), CoreConst::FuncRef { @@ -109,6 +113,12 @@ pub fn match_val(store: &mut Store, actual: &Val, expected: &CoreConst) -> CoreConst::ExternRef { value: Some(json_from_wast::ExternRef::Null), }, + ) + | ( + Val::ExnRef(None), + CoreConst::ExnRef { + value: Some(json_from_wast::ExnRef::Null), + }, ) => Ok(()), // Ideally we'd compare the actual index, but Wasmtime doesn't expose diff --git a/crates/wast/src/wast.rs b/crates/wast/src/wast.rs index 35eee9ea99a8..70a366200f65 100644 --- a/crates/wast/src/wast.rs +++ b/crates/wast/src/wast.rs @@ -479,6 +479,21 @@ where bail!("expected '{}', got '{}'", expected, actual) } + fn assert_exception(&mut self, result: Outcome) -> Result<()> { + match result { + Outcome::Ok(values) => bail!("expected exception, got {values:?}"), + Outcome::Trap(err) if err.is::() => { + // Discard the thrown exception. + let _ = self + .store + .take_pending_exception() + .expect("there should be a pending exception on the store"); + Ok(()) + } + Outcome::Trap(err) => bail!("expected exception, got {err:?}"), + } + } + /// Run a wast script from a byte buffer. pub fn run_wast(&mut self, filename: &str, wast: &[u8]) -> Result<()> { let wast = str::from_utf8(wast)?; @@ -676,7 +691,10 @@ where bail!("assert_unlinkable: expected {text}, got {error_message}",) } } - AssertException { .. } => bail!("unimplemented assert_exception"), + AssertException { line: _, action } => { + let result = self.perform_action(&action)?; + self.assert_exception(result)?; + } Thread { name, diff --git a/docs/stability-tiers.md b/docs/stability-tiers.md index 7c6e9b7e2a71..22f002b5fc8d 100644 --- a/docs/stability-tiers.md +++ b/docs/stability-tiers.md @@ -83,6 +83,7 @@ For explanations of what each tier means see below. | WebAssembly Proposal | [`gc`] | production quality | | WebAssembly Proposal | [`wide-arithmetic`] | Unstable wasm proposal | | WebAssembly Proposal | [`custom-page-sizes`] | Unstable wasm proposal | +| WebAssembly Proposal | [`exception-handling`] | fuzzing, dependence on GC | | Execution Backend | Pulley | More time fuzzing/baking | [`memory64`]: https://github.com/WebAssembly/memory64/blob/master/proposals/memory64/Overview.md @@ -228,7 +229,7 @@ here is: | [`gc`] | ✅ | ❌ | | [`wide-arithmetic`] | ✅ | ✅ | | [`custom-page-sizes`] | ✅ | ✅ | -| [`exception-handling`] | 🚧 | ❌ | +| [`exception-handling`] | ✅ | ❌ | | [`stack-switching`] | 🚧 | ❌ | ##### aarch64 @@ -253,7 +254,7 @@ here is: | [`gc`] | ✅ | ❌ | | [`wide-arithmetic`] | ✅ | ❌ | | [`custom-page-sizes`] | ✅ | ✅ | -| [`exception-handling`] | 🚧 | ❌ | +| [`exception-handling`] | ✅ | ❌ | | [`stack-switching`] | ❌ | ❌ | ##### s390x @@ -278,7 +279,7 @@ here is: | [`gc`] | ✅ | ❌ | | [`wide-arithmetic`] | ✅ | ❌ | | [`custom-page-sizes`] | ✅ | ❌ | -| [`exception-handling`] | 🚧 | ❌ | +| [`exception-handling`] | ✅ | ❌ | | [`stack-switching`] | ❌ | ❌ | ##### riscv64 @@ -303,7 +304,7 @@ here is: | [`gc`] | ✅ | ❌ | | [`wide-arithmetic`] | ✅ | ❌ | | [`custom-page-sizes`] | ✅ | ❌ | -| [`exception-handling`] | 🚧 | ❌ | +| [`exception-handling`] | ✅ | ❌ | | [`stack-switching`] | ❌ | ❌ | ##### Pulley diff --git a/docs/stability-wasm-proposals.md b/docs/stability-wasm-proposals.md index e8612026da06..64ad6d739976 100644 --- a/docs/stability-wasm-proposals.md +++ b/docs/stability-wasm-proposals.md @@ -56,8 +56,8 @@ The emoji legend is: | [`gc`] [^6] | ✅ | ✅ | 🚧[^7] | 🚧[^8] | ✅ | ❌ | | [`wide-arithmetic`] | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | | [`custom-page-sizes`] | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | -| [`exception-handling`] [^10]| ✅ | ❌ | 🚧 | ❌ | 🚧 | ❌ | -| [`stack-switching`] [^11] | ❌ | 🚧 | 🚧 | ❌ | ❌ | ❌ | +| [`exception-handling`] | ✅ | ✅ | ✅ | ❌ | ✅ | ❌ | +| [`stack-switching`] [^10] | ❌ | 🚧 | 🚧 | ❌ | ❌ | ❌ | [^6]: There is also a [tracking issue](https://github.com/bytecodealliance/wasmtime/issues/5032) for the @@ -70,9 +70,7 @@ The emoji legend is: whole-module fuzz targets like `differential`, but we would like to additionally [extend the `table_ops` fuzz target to exercise more of the GC proposal](https://github.com/bytecodealliance/wasmtime/issues/10327). -[^10]: The exception-handling proposal is a work-in-progress being tracked - at [#3427](https://github.com/bytecodealliance/wasmtime/issues/3427) -[^11]: The stack-switching proposal is a work-in-progress being tracked +[^10]: The stack-switching proposal is a work-in-progress being tracked at [#9465](https://github.com/bytecodealliance/wasmtime/issues/9465). Currently the implementation is only for x86\_64 Linux. diff --git a/src/commands/objdump.rs b/src/commands/objdump.rs index cd9b5aa9c4be..f48bcc87e3db 100644 --- a/src/commands/objdump.rs +++ b/src/commands/objdump.rs @@ -15,6 +15,7 @@ use std::path::{Path, PathBuf}; use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}; use wasmtime::Engine; use wasmtime_environ::{FilePos, StackMap, Trap, obj}; +use wasmtime_unwinder::{ExceptionHandler, ExceptionTable}; /// A helper utility in wasmtime to explore the compiled object file format of /// a `*.cwasm` file. @@ -65,6 +66,10 @@ pub struct ObjdumpCommand { /// Whether or not to show information about stack maps. #[arg(long, require_equals = true, value_name = "true|false")] stack_maps: Option>, + + /// Whether or not to show information about exception tables. + #[arg(long, require_equals = true, value_name = "true|false")] + exception_tables: Option>, } fn optional_flag_with_default(flag: Option>, default: bool) -> bool { @@ -88,6 +93,10 @@ impl ObjdumpCommand { optional_flag_with_default(self.stack_maps, true) } + fn exception_tables(&self) -> bool { + optional_flag_with_default(self.exception_tables, true) + } + /// Executes the command. pub fn execute(self) -> Result<()> { // Setup stdout handling color options. Also build some variables used @@ -135,6 +144,12 @@ impl ObjdumpCommand { .and_then(|section| section.data().ok()) .and_then(|bytes| StackMap::iter(bytes)) .map(|i| (Box::new(i) as Box>).peekable()), + exception_tables: elf + .section_by_name(obj::ELF_WASMTIME_EXCEPTIONS) + .and_then(|section| section.data().ok()) + .and_then(|bytes| ExceptionTable::parse(bytes).ok()) + .map(|table| table.into_iter()) + .map(|i| (Box::new(i) as Box>).peekable()), objdump: &self, }; @@ -220,6 +235,65 @@ impl ObjdumpCommand { let inline_bytes = 9; let width = self.address_width; + // Collect any "decorations" or annotations for this + // instruction. This includes the address map, stack + // maps, exception handlers, etc. + // + // Once they're collected then we print them before or + // after the instruction attempting to use some + // unicode characters to make it easier to read/scan. + // + // Note that some decorations occur "before" an + // instruction: for example, exception handler entries + // logically occur at the return point after a call, + // so "before" the instruction following the call. + let mut pre_decorations = Vec::new(); + let mut post_decorations = Vec::new(); + decorator.decorate(address, &mut pre_decorations, &mut post_decorations); + + let print_whitespace_to_decoration = |stdout: &mut StandardStream| -> Result<()> { + write!(stdout, "{:width$} ", "")?; + if self.bytes { + for _ in 0..inline_bytes + 1 { + write!(stdout, " ")?; + } + } + Ok(()) + }; + + let print_decorations = + |stdout: &mut StandardStream, decorations: Vec| -> Result<()> { + for (i, decoration) in decorations.iter().enumerate() { + print_whitespace_to_decoration(stdout)?; + let mut color = ColorSpec::new(); + color.set_fg(Some(Color::Cyan)); + stdout.set_color(&color)?; + let final_decoration = i == decorations.len() - 1; + if !final_decoration { + write!(stdout, "├")?; + } else { + write!(stdout, "╰")?; + } + for (i, line) in decoration.lines().enumerate() { + if i == 0 { + write!(stdout, "─╼ ")?; + } else { + print_whitespace_to_decoration(stdout)?; + if final_decoration { + write!(stdout, " ")?; + } else { + write!(stdout, "│ ")?; + } + } + writeln!(stdout, "{line}")?; + } + stdout.reset()?; + } + Ok(()) + }; + + print_decorations(&mut stdout, pre_decorations)?; + // Some instructions may disassemble to multiple lines, such as // `br_table` with Pulley. Handle separate lines per-instruction // here. @@ -285,52 +359,7 @@ impl ObjdumpCommand { } } - // And now finally after an instruction is printed try to - // collect any "decorations" or annotations for this - // instruction. This is for example the address map, stack maps, - // etc. - // - // Once they're collected then print them after the instruction - // attempting to use some unicode characters to make it easier - // to read/scan. - let mut decorations = Vec::new(); - decorator.decorate(address, &mut decorations); - - let print_whitespace_to_decoration = |stdout: &mut StandardStream| -> Result<()> { - write!(stdout, "{:width$} ", "")?; - if self.bytes { - for _ in 0..inline_bytes + 1 { - write!(stdout, " ")?; - } - } - Ok(()) - }; - for (i, decoration) in decorations.iter().enumerate() { - print_whitespace_to_decoration(&mut stdout)?; - let mut color = ColorSpec::new(); - color.set_fg(Some(Color::Cyan)); - stdout.set_color(&color)?; - let final_decoration = i == decorations.len() - 1; - if !final_decoration { - write!(stdout, "├")?; - } else { - write!(stdout, "╰")?; - } - for (i, line) in decoration.lines().enumerate() { - if i == 0 { - write!(stdout, "─╼ ")?; - } else { - print_whitespace_to_decoration(&mut stdout)?; - if final_decoration { - write!(stdout, " ")?; - } else { - write!(stdout, "│ ")?; - } - } - writeln!(stdout, "{line}")?; - } - stdout.reset()?; - } + print_decorations(&mut stdout, post_decorations)?; } } Ok(()) @@ -497,13 +526,15 @@ struct Decorator<'a> { addrmap: Option + 'a>>>, traps: Option + 'a>>>, stack_maps: Option)> + 'a>>>, + exception_tables: Option)> + 'a>>>, } impl Decorator<'_> { - fn decorate(&mut self, address: u64, list: &mut Vec) { - self.addrmap(address, list); - self.traps(address, list); - self.stack_maps(address, list); + fn decorate(&mut self, address: u64, pre_list: &mut Vec, post_list: &mut Vec) { + self.addrmap(address, post_list); + self.traps(address, post_list); + self.stack_maps(address, post_list); + self.exception_table(address, pre_list); } fn addrmap(&mut self, address: u64, list: &mut Vec) { @@ -558,4 +589,34 @@ impl Decorator<'_> { )); } } + + fn exception_table(&mut self, address: u64, list: &mut Vec) { + if !self.objdump.exception_tables() { + return; + } + let Some(exception_tables) = &mut self.exception_tables else { + return; + }; + while let Some((addr, handlers)) = + exception_tables.next_if(|(addr, _pos)| u64::from(*addr) <= address) + { + if u64::from(addr) != address { + continue; + } + for handler in &handlers { + let tag = match handler.tag { + Some(tag) => format!("tag={tag}"), + None => "default handler".to_string(), + }; + let context = match handler.context_sp_offset { + Some(offset) => format!("context at [SP+0x{offset:x}]"), + None => "no dynamic context".to_string(), + }; + list.push(format!( + "exception handler: {tag}, {context}, handler=0x{:x}", + handler.handler_offset + )); + } + } + } } diff --git a/tests/all/exceptions.rs b/tests/all/exceptions.rs new file mode 100644 index 000000000000..50bb9af37811 --- /dev/null +++ b/tests/all/exceptions.rs @@ -0,0 +1,210 @@ +use wasmtime::*; +use wasmtime_test_macros::wasmtime_test; + +#[wasmtime_test(wasm_features(exceptions))] +fn basic_throw(config: &mut Config) -> Result<()> { + let engine = Engine::new(config)?; + let mut store = Store::new(&engine, ()); + + let module = Module::new( + &engine, + r#" + (module + (tag $e0 (param i32 i64)) + + (func $throw (param i32 i64) + (throw $e0 (local.get 0) (local.get 1))) + + (func $catch (export "catch") (param i32 i64) (result i32 i64) + + (block $b (result i32 i64) + (try_table (result i32 i64) + (catch $e0 $b) + (call $throw (local.get 0) (local.get 1)) + (i32.const 42) + (i64.const 100))))) + "#, + )?; + + let instance = Instance::new(&mut store, &module, &[])?; + let func = instance.get_func(&mut store, "catch").unwrap(); + let mut results = [Val::I32(0), Val::I64(0)]; + func.call(&mut store, &[Val::I32(1), Val::I64(2)], &mut results[..])?; + assert!(matches!(results[0], Val::I32(1))); + assert!(matches!(results[1], Val::I64(2))); + + Ok(()) +} + +#[wasmtime_test(wasm_features(exceptions))] +fn dynamic_tags(config: &mut Config) -> Result<()> { + let engine = Engine::new(config)?; + let mut store = Store::new(&engine, ()); + + let module = Module::new( + &engine, + r#" + (module + (import "test" "e0" (tag $e0 (param i32 i64))) + (import "test" "e1" (tag $e1 (param i32 i64))) + + (func $throw_e1 (param i32 i64) + (throw $e1 (local.get 0) (local.get 1))) + + (func $catch (export "catch") (param i32 i64) (result i32 i64 i32) + (block $b1 (result i32 i64) + (block $b0 (result i32 i64) + (try_table (result i32 i64) + (catch $e0 $b0) + (catch $e1 $b1) + (call $throw_e1 (local.get 0) (local.get 1)) + (unreachable))) + (i32.const 0) + (return)) + (i32.const 1) + (return))) + "#, + )?; + + let functy = FuncType::new(&engine, [ValType::I32, ValType::I64], []); + let tagty = TagType::new(functy); + let tag0 = Tag::new(&mut store, &tagty)?; + let tag1 = Tag::new(&mut store, &tagty)?; + + // Instantiate with two different tags -- second catch-clause + // should match (on $e1). + let instance1 = Instance::new(&mut store, &module, &[Extern::Tag(tag0), Extern::Tag(tag1)])?; + let func1 = instance1.get_func(&mut store, "catch").unwrap(); + let mut results = [Val::I32(0), Val::I64(0), Val::I32(0)]; + func1.call(&mut store, &[Val::I32(1), Val::I64(2)], &mut results[..])?; + assert!(matches!(results[0], Val::I32(1))); + assert!(matches!(results[1], Val::I64(2))); + assert!(matches!(results[2], Val::I32(1))); + + // Instantiate with two imports of the same tag -- now first + // catch-clause should match (on $e0, since $e0 is an alias to + // $e1). + let instance2 = Instance::new(&mut store, &module, &[Extern::Tag(tag0), Extern::Tag(tag0)])?; + let func2 = instance2.get_func(&mut store, "catch").unwrap(); + let mut results = [Val::I32(0), Val::I64(0), Val::I32(0)]; + func2.call(&mut store, &[Val::I32(1), Val::I64(2)], &mut results[..])?; + assert!(matches!(results[0], Val::I32(1))); + assert!(matches!(results[1], Val::I64(2))); + assert!(matches!(results[2], Val::I32(0))); + + Ok(()) +} + +#[wasmtime_test(wasm_features(exceptions))] +fn exception_escape_to_host(config: &mut Config) -> Result<()> { + let engine = Engine::new(config)?; + let mut store = Store::new(&engine, ()); + + let module = Module::new( + &engine, + r#" + (module + (import "test" "e0" (tag $e0 (param i32))) + + (func $throw (export "throw") + (throw $e0 (i32.const 42)))) + "#, + )?; + + let functy = FuncType::new(&engine, [ValType::I32], []); + let tagty = TagType::new(functy); + let tag = Tag::new(&mut store, &tagty)?; + let instance = Instance::new(&mut store, &module, &[Extern::Tag(tag)])?; + let func = instance.get_func(&mut store, "throw").unwrap(); + let mut results = []; + let result = func.call(&mut store, &[], &mut results[..]); + assert!(result.is_err()); + assert!(result.unwrap_err().is::()); + let exn = store.take_pending_exception().unwrap(); + let exntag = exn.tag(&mut store)?; + assert!(Tag::eq(&exntag, &tag, &store)); + + Ok(()) +} + +#[wasmtime_test(wasm_features(exceptions))] +fn exception_from_host(config: &mut Config) -> Result<()> { + let engine = Engine::new(config)?; + let mut store = Store::new(&engine, ()); + + let module = Module::new( + &engine, + r#" + (module + (import "test" "e0" (tag $e0 (param i32))) + (import "test" "f" (func $f (param i32))) + + (func $catch (export "catch") (result i32) + (block $b (result i32) + (try_table (result i32) (catch $e0 $b) + i32.const 42 + call $f + i32.const 0)))) + "#, + )?; + + let functy = FuncType::new(&engine, [ValType::I32], []); + let tagty = TagType::new(functy.clone()); + let exnty = ExnType::from_tag_type(&tagty).unwrap(); + let exnpre = ExnRefPre::new(&mut store, exnty); + let tag = Tag::new(&mut store, &tagty)?; + let extfunc = Func::new(&mut store, functy, move |mut caller, args, _rets| { + let exn = ExnRef::new( + &mut caller, + &exnpre, + &tag, + &[Val::I32(args[0].unwrap_i32())], + ) + .unwrap(); + caller.as_context_mut().throw(exn)?; + Ok(()) + }); + let instance = Instance::new( + &mut store, + &module, + &[Extern::Tag(tag), Extern::Func(extfunc)], + )?; + let func = instance.get_func(&mut store, "catch").unwrap(); + let mut results = [Val::null_any_ref()]; + func.call(&mut store, &[], &mut results[..])?; + assert_eq!(results[0].unwrap_i32(), 42); + + Ok(()) +} + +#[wasmtime_test(wasm_features(exceptions))] +fn exception_across_no_wasm(config: &mut Config) -> Result<()> { + let engine = Engine::new(config)?; + let mut store = Store::new(&engine, ()); + + let functy = FuncType::new(&engine, [ValType::I32], []); + let tagty = TagType::new(functy.clone()); + let exnty = ExnType::from_tag_type(&tagty).unwrap(); + let exnpre = ExnRefPre::new(&mut store, exnty); + let tag = Tag::new(&mut store, &tagty)?; + let extfunc = Func::new(&mut store, functy, move |mut caller, args, _rets| { + let exn = ExnRef::new( + &mut caller, + &exnpre, + &tag, + &[Val::I32(args[0].unwrap_i32())], + ) + .unwrap(); + caller.as_context_mut().throw(exn)?; + Ok(()) + }); + let mut results = []; + let result = extfunc.call(&mut store, &[Val::I32(42)], &mut results[..]); + assert!(result.is_err() && result.unwrap_err().downcast::().is_ok()); + let exn = store.take_pending_exception().unwrap(); + let exntag = exn.tag(&mut store)?; + assert!(Tag::eq(&exntag, &tag, &store)); + assert_eq!(exn.field(&mut store, 0)?.unwrap_i32(), 42); + + Ok(()) +} diff --git a/tests/all/main.rs b/tests/all/main.rs index b8ce683e93e9..d06392b74df2 100644 --- a/tests/all/main.rs +++ b/tests/all/main.rs @@ -13,6 +13,7 @@ mod custom_code_memory; mod debug; mod defaults; mod epoch_interruption; +mod exceptions; mod exnrefs; mod externals; mod fuel; diff --git a/tests/disas/epoch-interruption-x86.wat b/tests/disas/epoch-interruption-x86.wat index 0fa5f8821ddb..3d8e569d2a6a 100644 --- a/tests/disas/epoch-interruption-x86.wat +++ b/tests/disas/epoch-interruption-x86.wat @@ -28,12 +28,12 @@ ;; jae 0x64 ;; jmp 0x46 ;; 57: movq %r15, %rdi -;; callq 0xf6 +;; callq 0xf2 ;; jmp 0x46 ;; 64: movq 8(%r13), %rax ;; cmpq %rax, %r11 ;; jb 0x46 ;; 71: movq %r15, %rdi -;; callq 0xf6 +;; callq 0xf2 ;; jmp 0x46 ;; 7e: ud2 diff --git a/tests/disas/exceptions.wat b/tests/disas/exceptions.wat new file mode 100644 index 000000000000..fdb2d1758253 --- /dev/null +++ b/tests/disas/exceptions.wat @@ -0,0 +1,98 @@ +;;! target = "x86_64" +;;! test = "compile" +;;! flags = ["-Wexceptions=yes", "-Wgc=yes"] + +(module + (tag $e0 (param i32 i64)) + + (func $throw (param i32 i64) + (throw $e0 (local.get 0) (local.get 1))) + + (func $catch (export "catch") (param i32 i64) (result i32 i64) + + (block $b (result i32 i64) + (try_table (result i32 i64) + (catch $e0 $b) + (call $throw (local.get 0) (local.get 1)) + (i32.const 42) + (i64.const 100))))) +;; wasm[0]::function[0]::throw: +;; pushq %rbp +;; movq %rsp, %rbp +;; movq 8(%rdi), %r10 +;; movq 0x10(%r10), %r10 +;; addq $0x50, %r10 +;; cmpq %rsp, %r10 +;; ja 0xa2 +;; 19: subq $0x40, %rsp +;; movq %rbx, 0x10(%rsp) +;; movq %r12, 0x18(%rsp) +;; movq %r13, 0x20(%rsp) +;; movq %r14, 0x28(%rsp) +;; movq %r15, 0x30(%rsp) +;; movq %rdi, %r12 +;; movq %rcx, %r13 +;; movq %rdx, %r15 +;; callq 0x3c9 +;; movq %rax, %r14 +;; movl $0x4000000, %esi +;; movl $3, %edx +;; movl $0x30, %ecx +;; movl $8, %r8d +;; movq %r12, %rdi +;; callq 0x355 +;; movq 8(%r12), %r8 +;; movq 0x18(%r8), %r8 +;; movl %eax, %r9d +;; movq %r15, %rdx +;; movl %edx, 0x20(%r8, %r9) +;; movq %r13, %rcx +;; movq %rcx, 0x28(%r8, %r9) +;; movq %r14, %r10 +;; movl %r10d, 0x18(%r8, %r9) +;; movl $0, 0x1c(%r8, %r9) +;; movq %rax, %rsi +;; movq %r12, %rdi +;; movq %r12, (%rsp) +;; callq 0x405 +;; ud2 +;; ud2 +;; +;; wasm[0]::function[1]::catch: +;; pushq %rbp +;; movq %rsp, %rbp +;; movq 8(%rdi), %r10 +;; movq 0x10(%r10), %r10 +;; addq $0x50, %r10 +;; cmpq %rsp, %r10 +;; ja 0x14f +;; d9: subq $0x40, %rsp +;; movq %rbx, 0x10(%rsp) +;; movq %r12, 0x18(%rsp) +;; movq %r13, 0x20(%rsp) +;; movq %r14, 0x28(%rsp) +;; movq %r15, 0x30(%rsp) +;; movq %rdi, (%rsp) +;; movq (%rsp), %rsi +;; movq (%rsp), %rdi +;; callq 0 +;; ╰─╼ exception handler: tag=0, context at [SP+0x0], handler=0x116 +;; movl $0x2a, %eax +;; movl $0x64, %ecx +;; jmp 0x12d +;; 116: movq (%rsp), %rdi +;; movq 8(%rdi), %rcx +;; movq 0x18(%rcx), %rcx +;; movl %eax, %edx +;; movl 0x20(%rcx, %rdx), %eax +;; movq 0x28(%rcx, %rdx), %rcx +;; movq 0x10(%rsp), %rbx +;; movq 0x18(%rsp), %r12 +;; movq 0x20(%rsp), %r13 +;; movq 0x28(%rsp), %r14 +;; movq 0x30(%rsp), %r15 +;; addq $0x40, %rsp +;; movq %rbp, %rsp +;; popq %rbp +;; retq +;; 14f: ud2 diff --git a/tests/disas/gc/struct-new-stack-map.wat b/tests/disas/gc/struct-new-stack-map.wat index 98bb69b87d33..e2bc1a37416f 100644 --- a/tests/disas/gc/struct-new-stack-map.wat +++ b/tests/disas/gc/struct-new-stack-map.wat @@ -32,7 +32,7 @@ ;; movl $0x28, %ecx ;; movl $8, %r8d ;; movq %rdi, %r13 -;; callq 0x15f +;; callq 0x161 ;; movq 8(%r13), %r8 ;; ╰─╼ stack_map: frame_size=64, frame_offsets=[0] ;; movq 0x18(%r8), %r8 diff --git a/tests/disas/pulley/epoch-simple.wat b/tests/disas/pulley/epoch-simple.wat index 7853a0424631..83e9d1e58c35 100644 --- a/tests/disas/pulley/epoch-simple.wat +++ b/tests/disas/pulley/epoch-simple.wat @@ -14,5 +14,5 @@ ;; br_if_xulteq64 x7, x6, 0x9 // target = 0x26 ;; 24: pop_frame ;; ret -;; 26: call 0x8d // target = 0xb3 +;; 26: call 0x83 // target = 0xa9 ;; 2b: jump -0x7 // target = 0x24 diff --git a/tests/disas/riscv64-component-builtins-asm.wat b/tests/disas/riscv64-component-builtins-asm.wat index f482de9d1ecd..54992d202207 100644 --- a/tests/disas/riscv64-component-builtins-asm.wat +++ b/tests/disas/riscv64-component-builtins-asm.wat @@ -25,7 +25,7 @@ ;; beq a1, a3, 8 ;; .byte 0x00, 0x00, 0x00, 0x00 ;; ld a1, 0x10(a0) -;; ld a3, 0(s0) +;; mv a3, s0 ;; sd a3, 0x28(a1) ;; ld a3, 8(s0) ;; sd a3, 0x30(a1) diff --git a/tests/disas/riscv64-component-builtins.wat b/tests/disas/riscv64-component-builtins.wat index db5ccc3dbc16..98c3ffa79459 100644 --- a/tests/disas/riscv64-component-builtins.wat +++ b/tests/disas/riscv64-component-builtins.wat @@ -16,31 +16,30 @@ ;; ;; block0(v0: i64, v1: i64, v2: i32): ;; v3 = load.i32 notrap aligned little v0 -;; v17 = iconst.i32 0x706d_6f63 -;; v4 = icmp eq v3, v17 ; v17 = 0x706d_6f63 +;; v16 = iconst.i32 0x706d_6f63 +;; v4 = icmp eq v3, v16 ; v16 = 0x706d_6f63 ;; trapz v4, user1 ;; v5 = load.i64 notrap aligned v0+16 ;; v6 = get_frame_pointer.i64 -;; v7 = load.i64 notrap aligned v6 -;; store notrap aligned v7, v5+40 -;; v8 = get_return_address.i64 -;; store notrap aligned v8, v5+48 -;; v10 = load.i64 notrap aligned readonly v0+8 -;; v11 = load.i64 notrap aligned readonly v10+16 -;; v9 = iconst.i32 0 -;; v12 = call_indirect sig0, v11(v0, v9, v2) ; v9 = 0 -;; v13 = iconst.i64 -1 -;; v14 = icmp ne v12, v13 ; v13 = -1 -;; brif v14, block2, block1 +;; store notrap aligned v6, v5+40 +;; v7 = get_return_address.i64 +;; store notrap aligned v7, v5+48 +;; v9 = load.i64 notrap aligned readonly v0+8 +;; v10 = load.i64 notrap aligned readonly v9+16 +;; v8 = iconst.i32 0 +;; v11 = call_indirect sig0, v10(v0, v8, v2) ; v8 = 0 +;; v12 = iconst.i64 -1 +;; v13 = icmp ne v11, v12 ; v12 = -1 +;; brif v13, block2, block1 ;; ;; block1 cold: -;; v15 = load.i64 notrap aligned readonly v1+16 -;; v16 = load.i64 notrap aligned readonly v15+408 -;; call_indirect sig1, v16(v1) +;; v14 = load.i64 notrap aligned readonly v1+16 +;; v15 = load.i64 notrap aligned readonly v14+408 +;; call_indirect sig1, v15(v1) ;; trap user1 ;; ;; block2: -;; brif.i64 v12, block3, block4 +;; brif.i64 v11, block3, block4 ;; ;; block3: ;; jump block4 diff --git a/tests/disas/trunc.wat b/tests/disas/trunc.wat index 4eba4d8b4c34..1230649f1922 100644 --- a/tests/disas/trunc.wat +++ b/tests/disas/trunc.wat @@ -24,7 +24,7 @@ ;; jne 0x101 ;; 39: movq %r14, %rdi ;; movdqu (%rsp), %xmm0 -;; callq 0x224 +;; callq 0x221 ;; movabsq $13830554455654793216, %rax ;; movq %rax, %xmm6 ;; ucomisd %xmm0, %xmm6 @@ -55,27 +55,27 @@ ;; retq ;; d3: movl $6, %esi ;; d8: movq %r14, %rdi -;; db: callq 0x263 +;; db: callq 0x25d ;; e0: movq %r14, %rdi -;; e3: callq 0x2a6 +;; e3: callq 0x29d ;; e8: ud2 ;; ea: movl $6, %esi ;; ef: movq %r14, %rdi -;; f2: callq 0x263 +;; f2: callq 0x25d ;; f7: movq %r14, %rdi -;; fa: callq 0x2a6 +;; fa: callq 0x29d ;; ff: ud2 ;; 101: movl $8, %esi ;; 106: movq %r14, %rdi -;; 109: callq 0x263 +;; 109: callq 0x25d ;; 10e: movq %r14, %rdi -;; 111: callq 0x2a6 +;; 111: callq 0x29d ;; 116: ud2 ;; 118: xorl %esi, %esi ;; 11a: movq %r14, %rdi -;; 11d: callq 0x263 +;; 11d: callq 0x25d ;; 122: movq %r14, %rdi -;; 125: callq 0x2a6 +;; 125: callq 0x29d ;; 12a: ud2 ;; 12c: ud2 ;; 12e: ud2 diff --git a/tests/disas/trunc32.wat b/tests/disas/trunc32.wat index 064961cf2c63..c3025b5533c0 100644 --- a/tests/disas/trunc32.wat +++ b/tests/disas/trunc32.wat @@ -26,7 +26,7 @@ ;; jp 0xf6 ;; jne 0xf6 ;; 46: movq %r12, %rdi -;; callq 0x222 +;; callq 0x220 ;; movabsq $13830554455654793216, %r8 ;; movq %r8, %xmm1 ;; ucomisd %xmm0, %xmm1 @@ -56,27 +56,27 @@ ;; retq ;; c8: movl $6, %esi ;; cd: movq %r12, %rdi -;; d0: callq 0x261 +;; d0: callq 0x25c ;; d5: movq %r12, %rdi -;; d8: callq 0x2a4 +;; d8: callq 0x29c ;; dd: ud2 ;; df: movl $6, %esi ;; e4: movq %r12, %rdi -;; e7: callq 0x261 +;; e7: callq 0x25c ;; ec: movq %r12, %rdi -;; ef: callq 0x2a4 +;; ef: callq 0x29c ;; f4: ud2 ;; f6: movl $8, %esi ;; fb: movq %r12, %rdi -;; fe: callq 0x261 +;; fe: callq 0x25c ;; 103: movq %r12, %rdi -;; 106: callq 0x2a4 +;; 106: callq 0x29c ;; 10b: ud2 ;; 10d: xorl %esi, %esi ;; 10f: movq %r12, %rdi -;; 112: callq 0x261 +;; 112: callq 0x25c ;; 117: movq %r12, %rdi -;; 11a: callq 0x2a4 +;; 11a: callq 0x29c ;; 11f: ud2 ;; 121: ud2 ;; 123: ud2 diff --git a/tests/disas/winch/aarch64/call_indirect/call_indirect.wat b/tests/disas/winch/aarch64/call_indirect/call_indirect.wat index 44ee0f6c22eb..c5ffe288595c 100644 --- a/tests/disas/winch/aarch64/call_indirect/call_indirect.wat +++ b/tests/disas/winch/aarch64/call_indirect/call_indirect.wat @@ -85,7 +85,7 @@ ;; mov x0, x9 ;; mov x1, #0 ;; ldur w2, [x28] -;; bl #0x3f8 +;; bl #0x3f4 ;; e0: add x28, x28, #4 ;; mov sp, x28 ;; ldur x9, [x28, #0x14] @@ -153,7 +153,7 @@ ;; mov x0, x9 ;; mov x1, #0 ;; ldur w2, [x28, #0xc] -;; bl #0x3f8 +;; bl #0x3f4 ;; 1f0: add x28, x28, #0xc ;; mov sp, x28 ;; add x28, x28, #4 diff --git a/tests/disas/winch/aarch64/call_indirect/local_arg.wat b/tests/disas/winch/aarch64/call_indirect/local_arg.wat index 0c2eb73d3bb1..2272c814bdc8 100644 --- a/tests/disas/winch/aarch64/call_indirect/local_arg.wat +++ b/tests/disas/winch/aarch64/call_indirect/local_arg.wat @@ -91,7 +91,7 @@ ;; mov x0, x9 ;; mov x1, #0 ;; ldur w2, [x28] -;; bl #0x424 +;; bl #0x41c ;; 120: add x28, x28, #4 ;; mov sp, x28 ;; ldur x9, [x28, #0x14] diff --git a/tests/disas/winch/x64/atomic/notify/notify.wat b/tests/disas/winch/x64/atomic/notify/notify.wat index 2e26168cdcb5..581a89e20cf1 100644 --- a/tests/disas/winch/x64/atomic/notify/notify.wat +++ b/tests/disas/winch/x64/atomic/notify/notify.wat @@ -27,7 +27,7 @@ ;; movl $0, %esi ;; movq 8(%rsp), %rdx ;; movl 4(%rsp), %ecx -;; callq 0x17a +;; callq 0x177 ;; addq $4, %rsp ;; addq $0xc, %rsp ;; movq 8(%rsp), %r14 diff --git a/tests/disas/winch/x64/atomic/notify/notify_offset.wat b/tests/disas/winch/x64/atomic/notify/notify_offset.wat index 694b94095185..b6b123818857 100644 --- a/tests/disas/winch/x64/atomic/notify/notify_offset.wat +++ b/tests/disas/winch/x64/atomic/notify/notify_offset.wat @@ -28,7 +28,7 @@ ;; movl $0, %esi ;; movq 8(%rsp), %rdx ;; movl 4(%rsp), %ecx -;; callq 0x181 +;; callq 0x17e ;; addq $4, %rsp ;; addq $0xc, %rsp ;; movq 8(%rsp), %r14 diff --git a/tests/disas/winch/x64/atomic/wait/wait32.wat b/tests/disas/winch/x64/atomic/wait/wait32.wat index 5256cd0af079..5c4ff0920869 100644 --- a/tests/disas/winch/x64/atomic/wait/wait32.wat +++ b/tests/disas/winch/x64/atomic/wait/wait32.wat @@ -30,7 +30,7 @@ ;; movq 0x18(%rsp), %rdx ;; movl 0x14(%rsp), %ecx ;; movq 0xc(%rsp), %r8 -;; callq 0x187 +;; callq 0x184 ;; addq $0xc, %rsp ;; addq $0x14, %rsp ;; movq 8(%rsp), %r14 diff --git a/tests/disas/winch/x64/atomic/wait/wait32_offset.wat b/tests/disas/winch/x64/atomic/wait/wait32_offset.wat index 97cca8b8e397..bbb4601c4867 100644 --- a/tests/disas/winch/x64/atomic/wait/wait32_offset.wat +++ b/tests/disas/winch/x64/atomic/wait/wait32_offset.wat @@ -34,7 +34,7 @@ ;; movq 0x18(%rsp), %rdx ;; movl 0x14(%rsp), %ecx ;; movq 0xc(%rsp), %r8 -;; callq 0x18e +;; callq 0x18b ;; addq $0xc, %rsp ;; addq $0x14, %rsp ;; movq 8(%rsp), %r14 diff --git a/tests/disas/winch/x64/atomic/wait/wait64.wat b/tests/disas/winch/x64/atomic/wait/wait64.wat index 4991344e0470..36e567234942 100644 --- a/tests/disas/winch/x64/atomic/wait/wait64.wat +++ b/tests/disas/winch/x64/atomic/wait/wait64.wat @@ -29,7 +29,7 @@ ;; movq 0x18(%rsp), %rdx ;; movq 0x10(%rsp), %rcx ;; movq 8(%rsp), %r8 -;; callq 0x17f +;; callq 0x17c ;; addq $8, %rsp ;; addq $0x18, %rsp ;; movq 8(%rsp), %r14 diff --git a/tests/disas/winch/x64/atomic/wait/wait64_offset.wat b/tests/disas/winch/x64/atomic/wait/wait64_offset.wat index ecc8df46ed44..83da4af2f4e4 100644 --- a/tests/disas/winch/x64/atomic/wait/wait64_offset.wat +++ b/tests/disas/winch/x64/atomic/wait/wait64_offset.wat @@ -33,7 +33,7 @@ ;; movq 0x18(%rsp), %rdx ;; movq 0x10(%rsp), %rcx ;; movq 8(%rsp), %r8 -;; callq 0x186 +;; callq 0x183 ;; addq $8, %rsp ;; addq $0x18, %rsp ;; movq 8(%rsp), %r14 diff --git a/tests/disas/winch/x64/call_indirect/call_indirect.wat b/tests/disas/winch/x64/call_indirect/call_indirect.wat index fec8809fc211..bc1e2ee551c6 100644 --- a/tests/disas/winch/x64/call_indirect/call_indirect.wat +++ b/tests/disas/winch/x64/call_indirect/call_indirect.wat @@ -76,7 +76,7 @@ ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 8(%rsp), %edx -;; callq 0x330 +;; callq 0x32e ;; addq $8, %rsp ;; addq $4, %rsp ;; movq 0x1c(%rsp), %r14 @@ -128,7 +128,7 @@ ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 4(%rsp), %edx -;; callq 0x330 +;; callq 0x32e ;; addq $4, %rsp ;; addq $4, %rsp ;; movq 0x20(%rsp), %r14 diff --git a/tests/disas/winch/x64/call_indirect/local_arg.wat b/tests/disas/winch/x64/call_indirect/local_arg.wat index 91fe846c9b0a..d2ef72ecb8e3 100644 --- a/tests/disas/winch/x64/call_indirect/local_arg.wat +++ b/tests/disas/winch/x64/call_indirect/local_arg.wat @@ -72,7 +72,7 @@ ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 8(%rsp), %edx -;; callq 0x313 +;; callq 0x30b ;; addq $8, %rsp ;; addq $4, %rsp ;; movq 0x1c(%rsp), %r14 diff --git a/tests/disas/winch/x64/epoch/func.wat b/tests/disas/winch/x64/epoch/func.wat index 3a500c8178c5..0d5b5f628981 100644 --- a/tests/disas/winch/x64/epoch/func.wat +++ b/tests/disas/winch/x64/epoch/func.wat @@ -23,7 +23,7 @@ ;; cmpq %rcx, %rdx ;; jb 0x54 ;; 47: movq %r14, %rdi -;; callq 0x12d +;; callq 0x129 ;; movq 8(%rsp), %r14 ;; addq $0x10, %rsp ;; popq %rbp diff --git a/tests/disas/winch/x64/epoch/loop.wat b/tests/disas/winch/x64/epoch/loop.wat index 3e8d0b684c3d..06ee1d6b26f8 100644 --- a/tests/disas/winch/x64/epoch/loop.wat +++ b/tests/disas/winch/x64/epoch/loop.wat @@ -25,7 +25,7 @@ ;; cmpq %rcx, %rdx ;; jb 0x54 ;; 47: movq %r14, %rdi -;; callq 0x157 +;; callq 0x153 ;; movq 8(%rsp), %r14 ;; movq 0x18(%r14), %rdx ;; movq (%rdx), %rdx @@ -34,7 +34,7 @@ ;; cmpq %rcx, %rdx ;; jb 0x79 ;; 6c: movq %r14, %rdi -;; callq 0x157 +;; callq 0x153 ;; movq 8(%rsp), %r14 ;; jmp 0x54 ;; 7e: addq $0x10, %rsp diff --git a/tests/disas/winch/x64/f32_ceil/f32_ceil_param.wat b/tests/disas/winch/x64/f32_ceil/f32_ceil_param.wat index ff6c02d5dd57..90586b776ee1 100644 --- a/tests/disas/winch/x64/f32_ceil/f32_ceil_param.wat +++ b/tests/disas/winch/x64/f32_ceil/f32_ceil_param.wat @@ -26,7 +26,7 @@ ;; subq $0xc, %rsp ;; movq %r14, %rdi ;; movss 0xc(%rsp), %xmm0 -;; callq 0xf8 +;; callq 0xf6 ;; addq $0xc, %rsp ;; addq $4, %rsp ;; movq 0x18(%rsp), %r14 diff --git a/tests/disas/winch/x64/f32_floor/f32_floor_param.wat b/tests/disas/winch/x64/f32_floor/f32_floor_param.wat index 7d4a34067845..3d6ae5c93aee 100644 --- a/tests/disas/winch/x64/f32_floor/f32_floor_param.wat +++ b/tests/disas/winch/x64/f32_floor/f32_floor_param.wat @@ -26,7 +26,7 @@ ;; subq $0xc, %rsp ;; movq %r14, %rdi ;; movss 0xc(%rsp), %xmm0 -;; callq 0xf8 +;; callq 0xf6 ;; addq $0xc, %rsp ;; addq $4, %rsp ;; movq 0x18(%rsp), %r14 diff --git a/tests/disas/winch/x64/f32_nearest/f32_nearest_param.wat b/tests/disas/winch/x64/f32_nearest/f32_nearest_param.wat index 15e9634d0d29..16eeb83fd531 100644 --- a/tests/disas/winch/x64/f32_nearest/f32_nearest_param.wat +++ b/tests/disas/winch/x64/f32_nearest/f32_nearest_param.wat @@ -26,7 +26,7 @@ ;; subq $0xc, %rsp ;; movq %r14, %rdi ;; movss 0xc(%rsp), %xmm0 -;; callq 0xf8 +;; callq 0xf6 ;; addq $0xc, %rsp ;; addq $4, %rsp ;; movq 0x18(%rsp), %r14 diff --git a/tests/disas/winch/x64/f32_trunc/f32_trunc_param.wat b/tests/disas/winch/x64/f32_trunc/f32_trunc_param.wat index 073fc0cb8be6..c693d6a44cce 100644 --- a/tests/disas/winch/x64/f32_trunc/f32_trunc_param.wat +++ b/tests/disas/winch/x64/f32_trunc/f32_trunc_param.wat @@ -26,7 +26,7 @@ ;; subq $0xc, %rsp ;; movq %r14, %rdi ;; movss 0xc(%rsp), %xmm0 -;; callq 0xf8 +;; callq 0xf6 ;; addq $0xc, %rsp ;; addq $4, %rsp ;; movq 0x18(%rsp), %r14 diff --git a/tests/disas/winch/x64/f64_ceil/f64_ceil_param.wat b/tests/disas/winch/x64/f64_ceil/f64_ceil_param.wat index 1bd04f4c9535..9003a2033394 100644 --- a/tests/disas/winch/x64/f64_ceil/f64_ceil_param.wat +++ b/tests/disas/winch/x64/f64_ceil/f64_ceil_param.wat @@ -26,7 +26,7 @@ ;; subq $8, %rsp ;; movq %r14, %rdi ;; movsd 8(%rsp), %xmm0 -;; callq 0xf8 +;; callq 0xf6 ;; addq $8, %rsp ;; addq $8, %rsp ;; movq 0x18(%rsp), %r14 diff --git a/tests/disas/winch/x64/f64_floor/f64_floor_param.wat b/tests/disas/winch/x64/f64_floor/f64_floor_param.wat index 6d11df49e446..beaa52e39458 100644 --- a/tests/disas/winch/x64/f64_floor/f64_floor_param.wat +++ b/tests/disas/winch/x64/f64_floor/f64_floor_param.wat @@ -26,7 +26,7 @@ ;; subq $8, %rsp ;; movq %r14, %rdi ;; movsd 8(%rsp), %xmm0 -;; callq 0xf8 +;; callq 0xf6 ;; addq $8, %rsp ;; addq $8, %rsp ;; movq 0x18(%rsp), %r14 diff --git a/tests/disas/winch/x64/f64_nearest/f64_nearest_param.wat b/tests/disas/winch/x64/f64_nearest/f64_nearest_param.wat index 496450e939d2..5f090d98f9dc 100644 --- a/tests/disas/winch/x64/f64_nearest/f64_nearest_param.wat +++ b/tests/disas/winch/x64/f64_nearest/f64_nearest_param.wat @@ -26,7 +26,7 @@ ;; subq $8, %rsp ;; movq %r14, %rdi ;; movsd 8(%rsp), %xmm0 -;; callq 0xf8 +;; callq 0xf6 ;; addq $8, %rsp ;; addq $8, %rsp ;; movq 0x18(%rsp), %r14 diff --git a/tests/disas/winch/x64/f64_trunc/f64_trunc_param.wat b/tests/disas/winch/x64/f64_trunc/f64_trunc_param.wat index 9166f1c550f3..5e47af4bd6bf 100644 --- a/tests/disas/winch/x64/f64_trunc/f64_trunc_param.wat +++ b/tests/disas/winch/x64/f64_trunc/f64_trunc_param.wat @@ -26,7 +26,7 @@ ;; subq $8, %rsp ;; movq %r14, %rdi ;; movsd 8(%rsp), %xmm0 -;; callq 0xf8 +;; callq 0xf6 ;; addq $8, %rsp ;; addq $8, %rsp ;; movq 0x18(%rsp), %r14 diff --git a/tests/disas/winch/x64/fuel/call.wat b/tests/disas/winch/x64/fuel/call.wat index 679298ce4aa1..e19a3f14cf3c 100644 --- a/tests/disas/winch/x64/fuel/call.wat +++ b/tests/disas/winch/x64/fuel/call.wat @@ -28,7 +28,7 @@ ;; cmpq $0, %rcx ;; jl 0x5e ;; 51: movq %r14, %rdi -;; callq 0x1e7 +;; callq 0x1e3 ;; movq 8(%rsp), %r14 ;; movq 8(%r14), %rax ;; movq (%rax), %r11 @@ -74,7 +74,7 @@ ;; cmpq $0, %rcx ;; jl 0x10e ;; 101: movq %r14, %rdi -;; callq 0x1e7 +;; callq 0x1e3 ;; movq 8(%rsp), %r14 ;; addq $0x10, %rsp ;; popq %rbp diff --git a/tests/disas/winch/x64/fuel/func.wat b/tests/disas/winch/x64/fuel/func.wat index 2b7d6cfe423b..692ce02f36f0 100644 --- a/tests/disas/winch/x64/fuel/func.wat +++ b/tests/disas/winch/x64/fuel/func.wat @@ -24,7 +24,7 @@ ;; cmpq $0, %rcx ;; jl 0x5e ;; 51: movq %r14, %rdi -;; callq 0x137 +;; callq 0x133 ;; movq 8(%rsp), %r14 ;; addq $0x10, %rsp ;; popq %rbp diff --git a/tests/disas/winch/x64/fuel/loop.wat b/tests/disas/winch/x64/fuel/loop.wat index d83f077f0e05..226bf4dc7027 100644 --- a/tests/disas/winch/x64/fuel/loop.wat +++ b/tests/disas/winch/x64/fuel/loop.wat @@ -26,14 +26,14 @@ ;; cmpq $0, %rcx ;; jl 0x5e ;; 51: movq %r14, %rdi -;; callq 0x16b +;; callq 0x167 ;; movq 8(%rsp), %r14 ;; movq 8(%r14), %rcx ;; movq (%rcx), %rcx ;; cmpq $0, %rcx ;; jl 0x7c ;; 6f: movq %r14, %rdi -;; callq 0x16b +;; callq 0x167 ;; movq 8(%rsp), %r14 ;; movq 8(%r14), %rax ;; movq (%rax), %r11 diff --git a/tests/disas/winch/x64/load/grow_load.wat b/tests/disas/winch/x64/load/grow_load.wat index 4f568222ae8d..0dc925de8ece 100644 --- a/tests/disas/winch/x64/load/grow_load.wat +++ b/tests/disas/winch/x64/load/grow_load.wat @@ -65,7 +65,7 @@ ;; movq %r14, %rdi ;; movl 0xc(%rsp), %esi ;; movl $0, %edx -;; callq 0x2eb +;; callq 0x2f4 ;; addq $0xc, %rsp ;; addq $4, %rsp ;; movq 0x58(%rsp), %r14 diff --git a/tests/disas/winch/x64/table/fill.wat b/tests/disas/winch/x64/table/fill.wat index d2ace7d05e5b..1a4367ed037d 100644 --- a/tests/disas/winch/x64/table/fill.wat +++ b/tests/disas/winch/x64/table/fill.wat @@ -113,7 +113,7 @@ ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 0xc(%rsp), %edx -;; callq 0x497 +;; callq 0x490 ;; addq $0xc, %rsp ;; addq $4, %rsp ;; movq 0x28(%rsp), %r14 @@ -133,7 +133,7 @@ ;; movl 0xc(%rsp), %edx ;; movq 4(%rsp), %rcx ;; movl (%rsp), %r8d -;; callq 0x4d8 +;; callq 0x4cc ;; addq $0x10, %rsp ;; movq 0x28(%rsp), %r14 ;; addq $0x30, %rsp diff --git a/tests/disas/winch/x64/table/get.wat b/tests/disas/winch/x64/table/get.wat index c84491d7b4d1..d0f6daf74ebe 100644 --- a/tests/disas/winch/x64/table/get.wat +++ b/tests/disas/winch/x64/table/get.wat @@ -65,7 +65,7 @@ ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 0xc(%rsp), %edx -;; callq 0x2da +;; callq 0x2d3 ;; addq $0xc, %rsp ;; addq $4, %rsp ;; movq 0x18(%rsp), %r14 diff --git a/tests/disas/winch/x64/table/grow.wat b/tests/disas/winch/x64/table/grow.wat index 8fd6ebfdcd97..117aae3973c3 100644 --- a/tests/disas/winch/x64/table/grow.wat +++ b/tests/disas/winch/x64/table/grow.wat @@ -30,7 +30,7 @@ ;; movl $0, %esi ;; movl $0xa, %edx ;; movq 8(%rsp), %rcx -;; callq 0x173 +;; callq 0x170 ;; addq $8, %rsp ;; addq $8, %rsp ;; movq 0x18(%rsp), %r14 diff --git a/tests/disas/winch/x64/table/init_copy_drop.wat b/tests/disas/winch/x64/table/init_copy_drop.wat index 138863dea66f..22f55fb6a738 100644 --- a/tests/disas/winch/x64/table/init_copy_drop.wat +++ b/tests/disas/winch/x64/table/init_copy_drop.wat @@ -142,11 +142,11 @@ ;; movl $7, %ecx ;; movl $0, %r8d ;; movl $4, %r9d -;; callq 0x905 +;; callq 0x8f9 ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $1, %esi -;; callq 0x963 +;; callq 0x954 ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $0, %esi @@ -154,11 +154,11 @@ ;; movl $0xf, %ecx ;; movl $1, %r8d ;; movl $3, %r9d -;; callq 0x905 +;; callq 0x8f9 ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $3, %esi -;; callq 0x963 +;; callq 0x954 ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $0, %esi @@ -166,7 +166,7 @@ ;; movl $0x14, %ecx ;; movl $0xf, %r8d ;; movl $5, %r9d -;; callq 0x8a7 +;; callq 0x89e ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $0, %esi @@ -174,7 +174,7 @@ ;; movl $0x15, %ecx ;; movl $0x1d, %r8d ;; movl $1, %r9d -;; callq 0x8a7 +;; callq 0x89e ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $0, %esi @@ -182,7 +182,7 @@ ;; movl $0x18, %ecx ;; movl $0xa, %r8d ;; movl $1, %r9d -;; callq 0x8a7 +;; callq 0x89e ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $0, %esi @@ -190,7 +190,7 @@ ;; movl $0xd, %ecx ;; movl $0xb, %r8d ;; movl $4, %r9d -;; callq 0x8a7 +;; callq 0x89e ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $0, %esi @@ -198,7 +198,7 @@ ;; movl $0x13, %ecx ;; movl $0x14, %r8d ;; movl $5, %r9d -;; callq 0x8a7 +;; callq 0x89e ;; movq 8(%rsp), %r14 ;; addq $0x10, %rsp ;; popq %rbp @@ -243,7 +243,7 @@ ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 0xc(%rsp), %edx -;; callq 0x9a1 +;; callq 0x98f ;; addq $0xc, %rsp ;; addq $4, %rsp ;; movq 0x18(%rsp), %r14 diff --git a/tests/disas/winch/x64/table/set.wat b/tests/disas/winch/x64/table/set.wat index b31d75ed3575..795d037f6522 100644 --- a/tests/disas/winch/x64/table/set.wat +++ b/tests/disas/winch/x64/table/set.wat @@ -109,7 +109,7 @@ ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 8(%rsp), %edx -;; callq 0x49c +;; callq 0x498 ;; addq $8, %rsp ;; addq $4, %rsp ;; movq 0x1c(%rsp), %r14