diff --git a/Sources/WAT/BinaryInstructionEncoder.swift b/Sources/WAT/BinaryInstructionEncoder.swift index c9bfe7c2..8eac6eef 100644 --- a/Sources/WAT/BinaryInstructionEncoder.swift +++ b/Sources/WAT/BinaryInstructionEncoder.swift @@ -125,6 +125,13 @@ extension BinaryInstructionEncoder { case .i64Load16U: opcode = [0x33] case .i64Load32S: opcode = [0x34] case .i64Load32U: opcode = [0x35] + case .i32AtomicLoad: opcode = [0xFE, 0x10] + case .i64AtomicLoad: opcode = [0xFE, 0x11] + case .i32AtomicLoad8U: opcode = [0xFE, 0x12] + case .i32AtomicLoad16U: opcode = [0xFE, 0x13] + case .i64AtomicLoad8U: opcode = [0xFE, 0x14] + case .i64AtomicLoad16U: opcode = [0xFE, 0x15] + case .i64AtomicLoad32U: opcode = [0xFE, 0x16] } try encodeInstruction(opcode) @@ -142,6 +149,13 @@ extension BinaryInstructionEncoder { case .i64Store8: opcode = [0x3C] case .i64Store16: opcode = [0x3D] case .i64Store32: opcode = [0x3E] + case .i32AtomicStore: opcode = [0xFE, 0x17] + case .i64AtomicStore: opcode = [0xFE, 0x18] + case .i32AtomicStore8: opcode = [0xFE, 0x19] + case .i32AtomicStore16: opcode = [0xFE, 0x1A] + case .i64AtomicStore8: opcode = [0xFE, 0x1B] + case .i64AtomicStore16: opcode = [0xFE, 0x1C] + case .i64AtomicStore32: opcode = [0xFE, 0x1D] } try encodeInstruction(opcode) @@ -392,4 +406,5 @@ extension BinaryInstructionEncoder { try encodeInstruction([0xFC, 0x10]) try encodeImmediates(table: table) } + mutating func visitAtomicFence() throws { try encodeInstruction([0xFE, 0x03, 0x00]) } } diff --git a/Sources/WAT/ParseTextInstruction.swift b/Sources/WAT/ParseTextInstruction.swift index fb57a2e3..b6d583cb 100644 --- a/Sources/WAT/ParseTextInstruction.swift +++ b/Sources/WAT/ParseTextInstruction.swift @@ -332,6 +332,49 @@ func parseTextInstruction(keyword: String, expressionPars case "i64.trunc_sat_f32_u": return { return try $0.visitConversion(.i64TruncSatF32U) } case "i64.trunc_sat_f64_s": return { return try $0.visitConversion(.i64TruncSatF64S) } case "i64.trunc_sat_f64_u": return { return try $0.visitConversion(.i64TruncSatF64U) } + case "atomic.fence": return { return try $0.visitAtomicFence() } + case "i32.atomic.load": + let (memarg) = try expressionParser.visitLoad(.i32AtomicLoad, wat: &wat) + return { return try $0.visitLoad(.i32AtomicLoad, memarg: memarg) } + case "i64.atomic.load": + let (memarg) = try expressionParser.visitLoad(.i64AtomicLoad, wat: &wat) + return { return try $0.visitLoad(.i64AtomicLoad, memarg: memarg) } + case "i32.atomic.load8_u": + let (memarg) = try expressionParser.visitLoad(.i32AtomicLoad8U, wat: &wat) + return { return try $0.visitLoad(.i32AtomicLoad8U, memarg: memarg) } + case "i32.atomic.load16_u": + let (memarg) = try expressionParser.visitLoad(.i32AtomicLoad16U, wat: &wat) + return { return try $0.visitLoad(.i32AtomicLoad16U, memarg: memarg) } + case "i64.atomic.load8_u": + let (memarg) = try expressionParser.visitLoad(.i64AtomicLoad8U, wat: &wat) + return { return try $0.visitLoad(.i64AtomicLoad8U, memarg: memarg) } + case "i64.atomic.load16_u": + let (memarg) = try expressionParser.visitLoad(.i64AtomicLoad16U, wat: &wat) + return { return try $0.visitLoad(.i64AtomicLoad16U, memarg: memarg) } + case "i64.atomic.load32_u": + let (memarg) = try expressionParser.visitLoad(.i64AtomicLoad32U, wat: &wat) + return { return try $0.visitLoad(.i64AtomicLoad32U, memarg: memarg) } + case "i32.atomic.store": + let (memarg) = try expressionParser.visitStore(.i32AtomicStore, wat: &wat) + return { return try $0.visitStore(.i32AtomicStore, memarg: memarg) } + case "i64.atomic.store": + let (memarg) = try expressionParser.visitStore(.i64AtomicStore, wat: &wat) + return { return try $0.visitStore(.i64AtomicStore, memarg: memarg) } + case "i32.atomic.store8": + let (memarg) = try expressionParser.visitStore(.i32AtomicStore8, wat: &wat) + return { return try $0.visitStore(.i32AtomicStore8, memarg: memarg) } + case "i32.atomic.store16": + let (memarg) = try expressionParser.visitStore(.i32AtomicStore16, wat: &wat) + return { return try $0.visitStore(.i32AtomicStore16, memarg: memarg) } + case "i64.atomic.store8": + let (memarg) = try expressionParser.visitStore(.i64AtomicStore8, wat: &wat) + return { return try $0.visitStore(.i64AtomicStore8, memarg: memarg) } + case "i64.atomic.store16": + let (memarg) = try expressionParser.visitStore(.i64AtomicStore16, wat: &wat) + return { return try $0.visitStore(.i64AtomicStore16, memarg: memarg) } + case "i64.atomic.store32": + let (memarg) = try expressionParser.visitStore(.i64AtomicStore32, wat: &wat) + return { return try $0.visitStore(.i64AtomicStore32, memarg: memarg) } default: return nil } } diff --git a/Sources/WasmKit/Execution/DispatchInstruction.swift b/Sources/WasmKit/Execution/DispatchInstruction.swift index 741a1eae..e21fa2f6 100644 --- a/Sources/WasmKit/Execution/DispatchInstruction.swift +++ b/Sources/WasmKit/Execution/DispatchInstruction.swift @@ -213,6 +213,20 @@ extension Execution { case 197: return self.execute_onEnter(sp: &sp, pc: &pc, md: &md, ms: &ms) case 198: return self.execute_onExit(sp: &sp, pc: &pc, md: &md, ms: &ms) case 199: return try self.execute_breakpoint(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 200: return try self.execute_i32AtomicLoad(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 201: return try self.execute_i64AtomicLoad(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 202: return try self.execute_i32AtomicLoad8U(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 203: return try self.execute_i32AtomicLoad16U(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 204: return try self.execute_i64AtomicLoad8U(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 205: return try self.execute_i64AtomicLoad16U(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 206: return try self.execute_i64AtomicLoad32U(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 207: return try self.execute_i32AtomicStore(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 208: return try self.execute_i64AtomicStore(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 209: return try self.execute_i32AtomicStore8(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 210: return try self.execute_i32AtomicStore16(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 211: return try self.execute_i64AtomicStore8(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 212: return try self.execute_i64AtomicStore16(sp: &sp, pc: &pc, md: &md, ms: &ms) + case 213: return try self.execute_i64AtomicStore32(sp: &sp, pc: &pc, md: &md, ms: &ms) default: preconditionFailure("Unknown instruction!?") } @@ -1802,6 +1816,118 @@ extension Execution { (pc.pointee, next) = try self.breakpoint(sp: &sp.pointee, pc: pc.pointee) return next } + @_silgen_name("wasmkit_execute_i32AtomicLoad") @inline(__always) + mutating func execute_i32AtomicLoad(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.LoadOperand.load(from: &pc.pointee) + try memoryLoad(sp: sp.pointee, md: md.pointee, ms: ms.pointee, loadOperand: immediate, loadAs: UInt32.self, castToValue: { .i32($0) }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } + @_silgen_name("wasmkit_execute_i64AtomicLoad") @inline(__always) + mutating func execute_i64AtomicLoad(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.LoadOperand.load(from: &pc.pointee) + try memoryLoad(sp: sp.pointee, md: md.pointee, ms: ms.pointee, loadOperand: immediate, loadAs: UInt64.self, castToValue: { .i64($0) }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } + @_silgen_name("wasmkit_execute_i32AtomicLoad8U") @inline(__always) + mutating func execute_i32AtomicLoad8U(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.LoadOperand.load(from: &pc.pointee) + try memoryLoad(sp: sp.pointee, md: md.pointee, ms: ms.pointee, loadOperand: immediate, loadAs: UInt8.self, castToValue: { .i32(UInt32($0)) }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } + @_silgen_name("wasmkit_execute_i32AtomicLoad16U") @inline(__always) + mutating func execute_i32AtomicLoad16U(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.LoadOperand.load(from: &pc.pointee) + try memoryLoad(sp: sp.pointee, md: md.pointee, ms: ms.pointee, loadOperand: immediate, loadAs: UInt16.self, castToValue: { .i32(UInt32($0)) }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } + @_silgen_name("wasmkit_execute_i64AtomicLoad8U") @inline(__always) + mutating func execute_i64AtomicLoad8U(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.LoadOperand.load(from: &pc.pointee) + try memoryLoad(sp: sp.pointee, md: md.pointee, ms: ms.pointee, loadOperand: immediate, loadAs: UInt8.self, castToValue: { .i64(UInt64($0)) }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } + @_silgen_name("wasmkit_execute_i64AtomicLoad16U") @inline(__always) + mutating func execute_i64AtomicLoad16U(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.LoadOperand.load(from: &pc.pointee) + try memoryLoad(sp: sp.pointee, md: md.pointee, ms: ms.pointee, loadOperand: immediate, loadAs: UInt16.self, castToValue: { .i64(UInt64($0)) }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } + @_silgen_name("wasmkit_execute_i64AtomicLoad32U") @inline(__always) + mutating func execute_i64AtomicLoad32U(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.LoadOperand.load(from: &pc.pointee) + try memoryLoad(sp: sp.pointee, md: md.pointee, ms: ms.pointee, loadOperand: immediate, loadAs: UInt32.self, castToValue: { .i64(UInt64($0)) }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } + @_silgen_name("wasmkit_execute_i32AtomicStore") @inline(__always) + mutating func execute_i32AtomicStore(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.StoreOperand.load(from: &pc.pointee) + try memoryStore(sp: sp.pointee, md: md.pointee, ms: ms.pointee, storeOperand: immediate, castFromValue: { $0.i32 }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } + @_silgen_name("wasmkit_execute_i64AtomicStore") @inline(__always) + mutating func execute_i64AtomicStore(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.StoreOperand.load(from: &pc.pointee) + try memoryStore(sp: sp.pointee, md: md.pointee, ms: ms.pointee, storeOperand: immediate, castFromValue: { $0.i64 }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } + @_silgen_name("wasmkit_execute_i32AtomicStore8") @inline(__always) + mutating func execute_i32AtomicStore8(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.StoreOperand.load(from: &pc.pointee) + try memoryStore(sp: sp.pointee, md: md.pointee, ms: ms.pointee, storeOperand: immediate, castFromValue: { UInt8(truncatingIfNeeded: $0.i32) }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } + @_silgen_name("wasmkit_execute_i32AtomicStore16") @inline(__always) + mutating func execute_i32AtomicStore16(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.StoreOperand.load(from: &pc.pointee) + try memoryStore(sp: sp.pointee, md: md.pointee, ms: ms.pointee, storeOperand: immediate, castFromValue: { UInt16(truncatingIfNeeded: $0.i32) }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } + @_silgen_name("wasmkit_execute_i64AtomicStore8") @inline(__always) + mutating func execute_i64AtomicStore8(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.StoreOperand.load(from: &pc.pointee) + try memoryStore(sp: sp.pointee, md: md.pointee, ms: ms.pointee, storeOperand: immediate, castFromValue: { UInt8(truncatingIfNeeded: $0.i64) }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } + @_silgen_name("wasmkit_execute_i64AtomicStore16") @inline(__always) + mutating func execute_i64AtomicStore16(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.StoreOperand.load(from: &pc.pointee) + try memoryStore(sp: sp.pointee, md: md.pointee, ms: ms.pointee, storeOperand: immediate, castFromValue: { UInt16(truncatingIfNeeded: $0.i64) }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } + @_silgen_name("wasmkit_execute_i64AtomicStore32") @inline(__always) + mutating func execute_i64AtomicStore32(sp: UnsafeMutablePointer, pc: UnsafeMutablePointer, md: UnsafeMutablePointer, ms: UnsafeMutablePointer) throws -> CodeSlot { + let immediate = Instruction.StoreOperand.load(from: &pc.pointee) + try memoryStore(sp: sp.pointee, md: md.pointee, ms: ms.pointee, storeOperand: immediate, castFromValue: { UInt32(truncatingIfNeeded: $0.i64) }) + let next = pc.pointee.pointee + pc.pointee = pc.pointee.advanced(by: 1) + return next + } } extension Instruction { diff --git a/Sources/WasmKit/Execution/Instructions/Instruction.swift b/Sources/WasmKit/Execution/Instructions/Instruction.swift index 4c4dd969..17bfe0b9 100644 --- a/Sources/WasmKit/Execution/Instructions/Instruction.swift +++ b/Sources/WasmKit/Execution/Instructions/Instruction.swift @@ -417,6 +417,34 @@ enum Instruction: Equatable { /// /// This instruction is used in debugging scenarios. case breakpoint + /// WebAssembly Core Instruction `i32.atomic.load` + case i32AtomicLoad(Instruction.LoadOperand) + /// WebAssembly Core Instruction `i64.atomic.load` + case i64AtomicLoad(Instruction.LoadOperand) + /// WebAssembly Core Instruction `i32.atomic.load8_u` + case i32AtomicLoad8U(Instruction.LoadOperand) + /// WebAssembly Core Instruction `i32.atomic.load16_u` + case i32AtomicLoad16U(Instruction.LoadOperand) + /// WebAssembly Core Instruction `i64.atomic.load8_u` + case i64AtomicLoad8U(Instruction.LoadOperand) + /// WebAssembly Core Instruction `i64.atomic.load16_u` + case i64AtomicLoad16U(Instruction.LoadOperand) + /// WebAssembly Core Instruction `i64.atomic.load32_u` + case i64AtomicLoad32U(Instruction.LoadOperand) + /// WebAssembly Core Instruction `i32.atomic.store` + case i32AtomicStore(Instruction.StoreOperand) + /// WebAssembly Core Instruction `i64.atomic.store` + case i64AtomicStore(Instruction.StoreOperand) + /// WebAssembly Core Instruction `i32.atomic.store8` + case i32AtomicStore8(Instruction.StoreOperand) + /// WebAssembly Core Instruction `i32.atomic.store16` + case i32AtomicStore16(Instruction.StoreOperand) + /// WebAssembly Core Instruction `i64.atomic.store8` + case i64AtomicStore8(Instruction.StoreOperand) + /// WebAssembly Core Instruction `i64.atomic.store16` + case i64AtomicStore16(Instruction.StoreOperand) + /// WebAssembly Core Instruction `i64.atomic.store32` + case i64AtomicStore32(Instruction.StoreOperand) } extension Instruction { @@ -1066,6 +1094,20 @@ extension Instruction { case .tableElementDrop(let immediate): return immediate case .onEnter(let immediate): return immediate case .onExit(let immediate): return immediate + case .i32AtomicLoad(let immediate): return immediate + case .i64AtomicLoad(let immediate): return immediate + case .i32AtomicLoad8U(let immediate): return immediate + case .i32AtomicLoad16U(let immediate): return immediate + case .i64AtomicLoad8U(let immediate): return immediate + case .i64AtomicLoad16U(let immediate): return immediate + case .i64AtomicLoad32U(let immediate): return immediate + case .i32AtomicStore(let immediate): return immediate + case .i64AtomicStore(let immediate): return immediate + case .i32AtomicStore8(let immediate): return immediate + case .i32AtomicStore16(let immediate): return immediate + case .i64AtomicStore8(let immediate): return immediate + case .i64AtomicStore16(let immediate): return immediate + case .i64AtomicStore32(let immediate): return immediate default: return nil } } @@ -1276,6 +1318,20 @@ extension Instruction { case .onEnter: return 197 case .onExit: return 198 case .breakpoint: return 199 + case .i32AtomicLoad: return 200 + case .i64AtomicLoad: return 201 + case .i32AtomicLoad8U: return 202 + case .i32AtomicLoad16U: return 203 + case .i64AtomicLoad8U: return 204 + case .i64AtomicLoad16U: return 205 + case .i64AtomicLoad32U: return 206 + case .i32AtomicStore: return 207 + case .i64AtomicStore: return 208 + case .i32AtomicStore8: return 209 + case .i32AtomicStore16: return 210 + case .i64AtomicStore8: return 211 + case .i64AtomicStore16: return 212 + case .i64AtomicStore32: return 213 } } } @@ -1487,6 +1543,20 @@ extension Instruction { case 197: return .onEnter(Instruction.OnEnterOperand.load(from: &pc)) case 198: return .onExit(Instruction.OnExitOperand.load(from: &pc)) case 199: return .breakpoint + case 200: return .i32AtomicLoad(Instruction.LoadOperand.load(from: &pc)) + case 201: return .i64AtomicLoad(Instruction.LoadOperand.load(from: &pc)) + case 202: return .i32AtomicLoad8U(Instruction.LoadOperand.load(from: &pc)) + case 203: return .i32AtomicLoad16U(Instruction.LoadOperand.load(from: &pc)) + case 204: return .i64AtomicLoad8U(Instruction.LoadOperand.load(from: &pc)) + case 205: return .i64AtomicLoad16U(Instruction.LoadOperand.load(from: &pc)) + case 206: return .i64AtomicLoad32U(Instruction.LoadOperand.load(from: &pc)) + case 207: return .i32AtomicStore(Instruction.StoreOperand.load(from: &pc)) + case 208: return .i64AtomicStore(Instruction.StoreOperand.load(from: &pc)) + case 209: return .i32AtomicStore8(Instruction.StoreOperand.load(from: &pc)) + case 210: return .i32AtomicStore16(Instruction.StoreOperand.load(from: &pc)) + case 211: return .i64AtomicStore8(Instruction.StoreOperand.load(from: &pc)) + case 212: return .i64AtomicStore16(Instruction.StoreOperand.load(from: &pc)) + case 213: return .i64AtomicStore32(Instruction.StoreOperand.load(from: &pc)) default: fatalError("Unknown instruction opcode: \(opcode)") } } @@ -1701,6 +1771,20 @@ extension Instruction { case 197: return "onEnter" case 198: return "onExit" case 199: return "breakpoint" + case 200: return "i32AtomicLoad" + case 201: return "i64AtomicLoad" + case 202: return "i32AtomicLoad8U" + case 203: return "i32AtomicLoad16U" + case 204: return "i64AtomicLoad8U" + case 205: return "i64AtomicLoad16U" + case 206: return "i64AtomicLoad32U" + case 207: return "i32AtomicStore" + case 208: return "i64AtomicStore" + case 209: return "i32AtomicStore8" + case 210: return "i32AtomicStore16" + case 211: return "i64AtomicStore8" + case 212: return "i64AtomicStore16" + case 213: return "i64AtomicStore32" default: fatalError("Unknown instruction index: \(opcode)") } } diff --git a/Sources/WasmKit/Translator.swift b/Sources/WasmKit/Translator.swift index 4af369c4..6f3e1116 100644 --- a/Sources/WasmKit/Translator.swift +++ b/Sources/WasmKit/Translator.swift @@ -1807,6 +1807,14 @@ struct InstructionTranslator: InstructionVisitor { case .i64Load16U: instruction = Instruction.i64Load16U case .i64Load32S: instruction = Instruction.i64Load32S case .i64Load32U: instruction = Instruction.i64Load32U + case .i32AtomicLoad: instruction = Instruction.i32AtomicLoad + case .i64AtomicLoad: instruction = Instruction.i64AtomicLoad + case .i32AtomicLoad8U: instruction = Instruction.i32AtomicLoad8U + case .i32AtomicLoad16U: instruction = Instruction.i32AtomicLoad16U + case .i64AtomicLoad8U: instruction = Instruction.i64AtomicLoad8U + case .i64AtomicLoad16U: instruction = Instruction.i64AtomicLoad16U + case .i64AtomicLoad32U: instruction = Instruction.i64AtomicLoad32U + } try visitLoad(memarg, load.type, load.naturalAlignment, instruction) } @@ -1823,6 +1831,13 @@ struct InstructionTranslator: InstructionVisitor { case .i64Store8: instruction = Instruction.i64Store8 case .i64Store16: instruction = Instruction.i64Store16 case .i64Store32: instruction = Instruction.i64Store32 + case .i32AtomicStore: instruction = Instruction.i32AtomicStore + case .i64AtomicStore: instruction = Instruction.i64AtomicStore + case .i32AtomicStore8: instruction = Instruction.i32AtomicStore8 + case .i32AtomicStore16: instruction = Instruction.i32AtomicStore16 + case .i64AtomicStore8: instruction = Instruction.i64AtomicStore8 + case .i64AtomicStore16: instruction = Instruction.i64AtomicStore16 + case .i64AtomicStore32: instruction = Instruction.i64AtomicStore32 } try visitStore(memarg, store.type, store.naturalAlignment, instruction) } diff --git a/Sources/WasmParser/BinaryInstructionDecoder.swift b/Sources/WasmParser/BinaryInstructionDecoder.swift index 1111b67d..5cc84908 100644 --- a/Sources/WasmParser/BinaryInstructionDecoder.swift +++ b/Sources/WasmParser/BinaryInstructionDecoder.swift @@ -9,6 +9,7 @@ protocol BinaryInstructionDecoder { /// Claim the next byte to be decoded @inlinable func claimNextByte() throws -> UInt8 + /// Throw an error due to unknown opcode. func throwUnknown(_ opcode: [UInt8]) throws -> Never /// Decode `block` immediates @inlinable mutating func visitBlock() throws -> BlockType @@ -565,6 +566,64 @@ func parseBinaryInstruction(visitor: inout some InstructionVisitor, decoder: ino default: if try !visitor.visitUnknown([opcode0, opcode1]) { try decoder.throwUnknown([opcode0, opcode1]) } } + case 0xFE: + + let opcode1 = try decoder.claimNextByte() + switch opcode1 { + case 0x03: + + let opcode2 = try decoder.claimNextByte() + switch opcode2 { + case 0x00: + try visitor.visitAtomicFence() + default: + if try !visitor.visitUnknown([opcode0, opcode1, opcode2]) { try decoder.throwUnknown([opcode0, opcode1, opcode2]) } + } + case 0x10: + let (memarg) = try decoder.visitLoad(.i32AtomicLoad) + try visitor.visitLoad(.i32AtomicLoad, memarg: memarg) + case 0x11: + let (memarg) = try decoder.visitLoad(.i64AtomicLoad) + try visitor.visitLoad(.i64AtomicLoad, memarg: memarg) + case 0x12: + let (memarg) = try decoder.visitLoad(.i32AtomicLoad8U) + try visitor.visitLoad(.i32AtomicLoad8U, memarg: memarg) + case 0x13: + let (memarg) = try decoder.visitLoad(.i32AtomicLoad16U) + try visitor.visitLoad(.i32AtomicLoad16U, memarg: memarg) + case 0x14: + let (memarg) = try decoder.visitLoad(.i64AtomicLoad8U) + try visitor.visitLoad(.i64AtomicLoad8U, memarg: memarg) + case 0x15: + let (memarg) = try decoder.visitLoad(.i64AtomicLoad16U) + try visitor.visitLoad(.i64AtomicLoad16U, memarg: memarg) + case 0x16: + let (memarg) = try decoder.visitLoad(.i64AtomicLoad32U) + try visitor.visitLoad(.i64AtomicLoad32U, memarg: memarg) + case 0x17: + let (memarg) = try decoder.visitStore(.i32AtomicStore) + try visitor.visitStore(.i32AtomicStore, memarg: memarg) + case 0x18: + let (memarg) = try decoder.visitStore(.i64AtomicStore) + try visitor.visitStore(.i64AtomicStore, memarg: memarg) + case 0x19: + let (memarg) = try decoder.visitStore(.i32AtomicStore8) + try visitor.visitStore(.i32AtomicStore8, memarg: memarg) + case 0x1A: + let (memarg) = try decoder.visitStore(.i32AtomicStore16) + try visitor.visitStore(.i32AtomicStore16, memarg: memarg) + case 0x1B: + let (memarg) = try decoder.visitStore(.i64AtomicStore8) + try visitor.visitStore(.i64AtomicStore8, memarg: memarg) + case 0x1C: + let (memarg) = try decoder.visitStore(.i64AtomicStore16) + try visitor.visitStore(.i64AtomicStore16, memarg: memarg) + case 0x1D: + let (memarg) = try decoder.visitStore(.i64AtomicStore32) + try visitor.visitStore(.i64AtomicStore32, memarg: memarg) + default: + if try !visitor.visitUnknown([opcode0, opcode1]) { try decoder.throwUnknown([opcode0, opcode1]) } + } default: if try !visitor.visitUnknown([opcode0]) { try decoder.throwUnknown([opcode0]) } } diff --git a/Sources/WasmParser/InstructionVisitor.swift b/Sources/WasmParser/InstructionVisitor.swift index 2b4f3c7f..2a6b0271 100644 --- a/Sources/WasmParser/InstructionVisitor.swift +++ b/Sources/WasmParser/InstructionVisitor.swift @@ -20,6 +20,13 @@ public enum Instruction: Equatable { case i64Load16U case i64Load32S case i64Load32U + case i32AtomicLoad + case i64AtomicLoad + case i32AtomicLoad8U + case i32AtomicLoad16U + case i64AtomicLoad8U + case i64AtomicLoad16U + case i64AtomicLoad32U } public enum Store: Equatable { case i32Store @@ -31,6 +38,13 @@ public enum Instruction: Equatable { case i64Store8 case i64Store16 case i64Store32 + case i32AtomicStore + case i64AtomicStore + case i32AtomicStore8 + case i32AtomicStore16 + case i64AtomicStore8 + case i64AtomicStore16 + case i64AtomicStore32 } public enum Cmp: Equatable { case i32Eq @@ -226,6 +240,7 @@ public enum Instruction: Equatable { case `tableSet`(table: UInt32) case `tableGrow`(table: UInt32) case `tableSize`(table: UInt32) + case `atomicFence` } /// A visitor that visits all instructions by a single visit method. @@ -287,6 +302,7 @@ extension AnyInstructionVisitor { public mutating func visitTableSet(table: UInt32) throws { return try self.visit(.tableSet(table: table)) } public mutating func visitTableGrow(table: UInt32) throws { return try self.visit(.tableGrow(table: table)) } public mutating func visitTableSize(table: UInt32) throws { return try self.visit(.tableSize(table: table)) } + public mutating func visitAtomicFence() throws { return try self.visit(.atomicFence) } } /// A visitor for WebAssembly instructions. @@ -398,6 +414,8 @@ public protocol InstructionVisitor { mutating func visitTableGrow(table: UInt32) throws /// Visiting `table.size` instruction. mutating func visitTableSize(table: UInt32) throws + /// Visiting `atomic.fence` instruction. + mutating func visitAtomicFence() throws /// Returns: `true` if the parser should silently proceed parsing. mutating func visitUnknown(_ opcode: [UInt8]) throws -> Bool } @@ -458,6 +476,7 @@ extension InstructionVisitor { case let .tableSet(table): return try visitTableSet(table: table) case let .tableGrow(table): return try visitTableGrow(table: table) case let .tableSize(table): return try visitTableSize(table: table) + case .atomicFence: return try visitAtomicFence() } } } @@ -516,6 +535,7 @@ extension InstructionVisitor { public mutating func visitTableSet(table: UInt32) throws {} public mutating func visitTableGrow(table: UInt32) throws {} public mutating func visitTableSize(table: UInt32) throws {} + public mutating func visitAtomicFence() throws {} public mutating func visitUnknown(_ opcode: [UInt8]) throws -> Bool { false } } diff --git a/Sources/WasmParser/WasmTypes.swift b/Sources/WasmParser/WasmTypes.swift index 2acd5212..df95b6c2 100644 --- a/Sources/WasmParser/WasmTypes.swift +++ b/Sources/WasmParser/WasmTypes.swift @@ -350,20 +350,20 @@ extension Instruction.Load { @_alwaysEmitIntoClient public var naturalAlignment: Int { switch self { - case .i32Load: return 2 - case .i64Load: return 3 + case .i32Load, .i32AtomicLoad: return 2 + case .i64Load, .i64AtomicLoad: return 3 case .f32Load: return 2 case .f64Load: return 3 case .i32Load8S: return 0 - case .i32Load8U: return 0 + case .i32Load8U, .i32AtomicLoad8U: return 0 case .i32Load16S: return 1 - case .i32Load16U: return 1 + case .i32Load16U, .i32AtomicLoad16U: return 1 case .i64Load8S: return 0 - case .i64Load8U: return 0 + case .i64Load8U, .i64AtomicLoad8U: return 0 case .i64Load16S: return 1 - case .i64Load16U: return 1 + case .i64Load16U, .i64AtomicLoad16U: return 1 case .i64Load32S: return 2 - case .i64Load32U: return 2 + case .i64Load32U, .i64AtomicLoad32U: return 2 } } @@ -371,20 +371,20 @@ extension Instruction.Load { @_alwaysEmitIntoClient public var type: ValueType { switch self { - case .i32Load: return .i32 - case .i64Load: return .i64 + case .i32Load, .i32AtomicLoad: return .i32 + case .i64Load, .i64AtomicLoad: return .i64 case .f32Load: return .f32 case .f64Load: return .f64 case .i32Load8S: return .i32 - case .i32Load8U: return .i32 + case .i32Load8U, .i32AtomicLoad8U: return .i32 case .i32Load16S: return .i32 - case .i32Load16U: return .i32 + case .i32Load16U, .i32AtomicLoad16U: return .i32 case .i64Load8S: return .i64 - case .i64Load8U: return .i64 + case .i64Load8U, .i64AtomicLoad8U: return .i64 case .i64Load16S: return .i64 - case .i64Load16U: return .i64 + case .i64Load16U, .i64AtomicLoad16U: return .i64 case .i64Load32S: return .i64 - case .i64Load32U: return .i64 + case .i64Load32U, .i64AtomicLoad32U: return .i64 } } } @@ -395,15 +395,15 @@ extension Instruction.Store { @_alwaysEmitIntoClient public var naturalAlignment: Int { switch self { - case .i32Store: return 2 - case .i64Store: return 3 + case .i32Store, .i32AtomicStore: return 2 + case .i64Store, .i64AtomicStore: return 3 case .f32Store: return 2 case .f64Store: return 3 - case .i32Store8: return 0 - case .i32Store16: return 1 - case .i64Store8: return 0 - case .i64Store16: return 1 - case .i64Store32: return 2 + case .i32Store8, .i32AtomicStore8: return 0 + case .i32Store16, .i32AtomicStore16: return 1 + case .i64Store8, .i64AtomicStore8: return 0 + case .i64Store16, .i64AtomicStore16: return 1 + case .i64Store32, .i64AtomicStore32: return 2 } } @@ -411,15 +411,15 @@ extension Instruction.Store { @_alwaysEmitIntoClient public var type: ValueType { switch self { - case .i32Store: return .i32 - case .i64Store: return .i64 + case .i32Store, .i32AtomicStore: return .i32 + case .i64Store, .i64AtomicStore: return .i64 case .f32Store: return .f32 case .f64Store: return .f64 - case .i32Store8: return .i32 - case .i32Store16: return .i32 - case .i64Store8: return .i64 - case .i64Store16: return .i64 - case .i64Store32: return .i64 + case .i32Store8, .i32AtomicStore8: return .i32 + case .i32Store16, .i32AtomicStore16: return .i32 + case .i64Store8, .i64AtomicStore8: return .i64 + case .i64Store16, .i64AtomicStore16: return .i64 + case .i64Store32, .i64AtomicStore32: return .i64 } } } diff --git a/Sources/_CWasmKit/include/DirectThreadedCode.inc b/Sources/_CWasmKit/include/DirectThreadedCode.inc index 71eb00a6..822c1d61 100644 --- a/Sources/_CWasmKit/include/DirectThreadedCode.inc +++ b/Sources/_CWasmKit/include/DirectThreadedCode.inc @@ -1269,6 +1269,104 @@ SWIFT_CC(swiftasync) static inline void wasmkit_tc_breakpoint(Sp sp, Pc pc, Md m if (error) return wasmkit_execution_state_set_error(error, sp, state); return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); } +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i32AtomicLoad(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i32AtomicLoad(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i32AtomicLoad(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i64AtomicLoad(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i64AtomicLoad(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i64AtomicLoad(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i32AtomicLoad8U(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i32AtomicLoad8U(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i32AtomicLoad8U(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i32AtomicLoad16U(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i32AtomicLoad16U(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i32AtomicLoad16U(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i64AtomicLoad8U(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i64AtomicLoad8U(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i64AtomicLoad8U(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i64AtomicLoad16U(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i64AtomicLoad16U(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i64AtomicLoad16U(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i64AtomicLoad32U(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i64AtomicLoad32U(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i64AtomicLoad32U(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i32AtomicStore(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i32AtomicStore(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i32AtomicStore(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i64AtomicStore(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i64AtomicStore(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i64AtomicStore(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i32AtomicStore8(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i32AtomicStore8(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i32AtomicStore8(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i32AtomicStore16(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i32AtomicStore16(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i32AtomicStore16(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i64AtomicStore8(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i64AtomicStore8(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i64AtomicStore8(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i64AtomicStore16(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i64AtomicStore16(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i64AtomicStore16(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} +SWIFT_CC(swiftasync) static inline void wasmkit_tc_i64AtomicStore32(Sp sp, Pc pc, Md md, Ms ms, SWIFT_CONTEXT void *state) { + SWIFT_CC(swift) uint64_t wasmkit_execute_i64AtomicStore32(Sp *sp, Pc *pc, Md *md, Ms *ms, SWIFT_CONTEXT void *state, SWIFT_ERROR_RESULT void **error); + void * _Nullable error = NULL; uint64_t next; + INLINE_CALL next = wasmkit_execute_i64AtomicStore32(&sp, &pc, &md, &ms, state, &error); + if (error) return wasmkit_execution_state_set_error(error, sp, state); + return ((wasmkit_tc_exec)next)(sp, pc, md, ms, state); +} static const uintptr_t wasmkit_tc_exec_handlers[] = { (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_copyStack), (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_globalGet), @@ -1470,4 +1568,18 @@ static const uintptr_t wasmkit_tc_exec_handlers[] = { (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_onEnter), (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_onExit), (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_breakpoint), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i32AtomicLoad), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i64AtomicLoad), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i32AtomicLoad8U), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i32AtomicLoad16U), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i64AtomicLoad8U), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i64AtomicLoad16U), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i64AtomicLoad32U), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i32AtomicStore), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i64AtomicStore), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i32AtomicStore8), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i32AtomicStore16), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i64AtomicStore8), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i64AtomicStore16), + (uintptr_t)((wasmkit_tc_exec)&wasmkit_tc_i64AtomicStore32), }; diff --git a/Sources/_CWasmKit/include/_CWasmKit.h b/Sources/_CWasmKit/include/_CWasmKit.h index 307177ee..ad7e41d1 100644 --- a/Sources/_CWasmKit/include/_CWasmKit.h +++ b/Sources/_CWasmKit/include/_CWasmKit.h @@ -1,6 +1,7 @@ #ifndef WASMKIT__CWASMKIT_H #define WASMKIT__CWASMKIT_H +#include #include #include #include diff --git a/Utilities/Instructions.json b/Utilities/Instructions.json index 953a4073..e123e7cc 100644 --- a/Utilities/Instructions.json +++ b/Utilities/Instructions.json @@ -201,5 +201,13 @@ ["saturatingFloatToInt", "i64.trunc_sat_f32_s" , ["0xFC", "0x04"], [] , "conversion"], ["saturatingFloatToInt", "i64.trunc_sat_f32_u" , ["0xFC", "0x05"], [] , "conversion"], ["saturatingFloatToInt", "i64.trunc_sat_f64_s" , ["0xFC", "0x06"], [] , "conversion"], - ["saturatingFloatToInt", "i64.trunc_sat_f64_u" , ["0xFC", "0x07"], [] , "conversion"] + ["saturatingFloatToInt", "i64.trunc_sat_f64_u" , ["0xFC", "0x07"], [] , "conversion"], + ["threads" , "atomic.fence" , ["0xFE", "0x03", "0x00"], [] , null ], + ["threads" , "i32.atomic.load" , ["0xFE", "0x10"], [["memarg", "MemArg"]] , "load" ], + ["threads" , "i64.atomic.load" , ["0xFE", "0x11"], [["memarg", "MemArg"]] , "load" ], + ["threads" , "i32.atomic.load8_u" , ["0xFE", "0x12"], [["memarg", "MemArg"]] , "load" ], + ["threads" , "i32.atomic.load16_u" , ["0xFE", "0x13"], [["memarg", "MemArg"]] , "load" ], + ["threads" , "i64.atomic.load8_u" , ["0xFE", "0x14"], [["memarg", "MemArg"]] , "load" ], + ["threads" , "i64.atomic.load16_u" , ["0xFE", "0x15"], [["memarg", "MemArg"]] , "load" ], + ["threads" , "i64.atomic.load32_u" , ["0xFE", "0x16"], [["memarg", "MemArg"]] , "load" ] ] diff --git a/Utilities/Sources/VMGen.swift b/Utilities/Sources/VMGen.swift index 210098f7..dabb0b5b 100644 --- a/Utilities/Sources/VMGen.swift +++ b/Utilities/Sources/VMGen.swift @@ -85,6 +85,17 @@ enum VMGen { """ } + for op in memoryAtomicLoadOps { + inlineImpls[op.atomicInstruction.name] = """ + try memoryLoad(sp: sp.pointee, md: md.pointee, ms: ms.pointee, loadOperand: immediate, loadAs: \(op.loadAs).self, castToValue: { \(op.castToValue) }) + """ + } + for op in memoryAtomicStoreOps { + inlineImpls[op.atomicInstruction.name] = """ + try memoryStore(sp: sp.pointee, md: md.pointee, ms: ms.pointee, storeOperand: immediate, castFromValue: { \(op.castFromValue) }) + """ + } + return inlineImpls } diff --git a/Utilities/Sources/VMSpec/Instruction.swift b/Utilities/Sources/VMSpec/Instruction.swift index dcff3f95..753f685a 100644 --- a/Utilities/Sources/VMSpec/Instruction.swift +++ b/Utilities/Sources/VMSpec/Instruction.swift @@ -353,54 +353,69 @@ extension VMGen { let op: String let loadAs: String let castToValue: String + let isSigned: Bool + let isFloatingPoint: Bool var instruction: Instruction { Instruction(name: "\(type)\(op)", documentation: "WebAssembly Core Instruction `\(type).\(VMGen.snakeCase(pascalCase: op))`", mayThrow: true, useCurrentMemory: .read, immediateLayout: .load) } + var atomicInstruction: Instruction { + Instruction(name: "\(type)Atomic\(op)", documentation: "WebAssembly Core Instruction `\(type).atomic.\(VMGen.snakeCase(pascalCase: op))`", + mayThrow: true, useCurrentMemory: .read, immediateLayout: .load) + } } static let memoryLoadOps: [LoadOpInfo] = [ - ("i32", "Load", "UInt32", ".i32($0)"), - ("i64", "Load", "UInt64", ".i64($0)"), - ("f32", "Load", "UInt32", ".rawF32($0)"), - ("f64", "Load", "UInt64", ".rawF64($0)"), - ("i32", "Load8S", "Int8", ".init(signed: Int32($0))"), - ("i32", "Load8U", "UInt8", ".i32(UInt32($0))"), - ("i32", "Load16S", "Int16", ".init(signed: Int32($0))"), - ("i32", "Load16U", "UInt16", ".i32(UInt32($0))"), - ("i64", "Load8S", "Int8", ".init(signed: Int64($0))"), - ("i64", "Load8U", "UInt8", ".i64(UInt64($0))"), - ("i64", "Load16S", "Int16", ".init(signed: Int64($0))"), - ("i64", "Load16U", "UInt16", ".i64(UInt64($0))"), - ("i64", "Load32S", "Int32", ".init(signed: Int64($0))"), - ("i64", "Load32U", "UInt32", ".i64(UInt64($0))"), - ].map { (type, op, loadAs, castToValue) in - return LoadOpInfo(type: type, op: op, loadAs: loadAs, castToValue: castToValue) + ("i32", "Load", "UInt32", ".i32($0)", false, false), + ("i64", "Load", "UInt64", ".i64($0)", false, false), + ("f32", "Load", "UInt32", ".rawF32($0)", false, true), + ("f64", "Load", "UInt64", ".rawF64($0)", false, true), + ("i32", "Load8S", "Int8", ".init(signed: Int32($0))", true, false), + ("i32", "Load8U", "UInt8", ".i32(UInt32($0))", false, false), + ("i32", "Load16S", "Int16", ".init(signed: Int32($0))", true, false), + ("i32", "Load16U", "UInt16", ".i32(UInt32($0))", false, false), + ("i64", "Load8S", "Int8", ".init(signed: Int64($0))", true, false), + ("i64", "Load8U", "UInt8", ".i64(UInt64($0))", false, false), + ("i64", "Load16S", "Int16", ".init(signed: Int64($0))", true, false), + ("i64", "Load16U", "UInt16", ".i64(UInt64($0))", false, false), + ("i64", "Load32S", "Int32", ".init(signed: Int64($0))", true, false), + ("i64", "Load32U", "UInt32", ".i64(UInt64($0))", false, false), + ].map { (type, op, loadAs, castToValue, isSigned, isFloatingPoint) in + return LoadOpInfo(type: type, op: op, loadAs: loadAs, castToValue: castToValue, isSigned: isSigned, isFloatingPoint: isFloatingPoint) } + static let memoryAtomicLoadOps = memoryLoadOps.filter { !$0.isFloatingPoint && !$0.isSigned } + struct StoreOpInfo { let type: String let op: String let castFromValue: String + let isFloatingPoint: Bool var instruction: Instruction { Instruction(name: "\(type)\(op)", documentation: "WebAssembly Core Instruction `\(type).\(VMGen.snakeCase(pascalCase: op))`", mayThrow: true, useCurrentMemory: .read, immediateLayout: .store) } + var atomicInstruction: Instruction { + Instruction(name: "\(type)Atomic\(op)", documentation: "WebAssembly Core Instruction `\(type).atomic.\(VMGen.snakeCase(pascalCase: op))`", + mayThrow: true, useCurrentMemory: .read, immediateLayout: .store) + } } static let memoryStoreOps: [StoreOpInfo] = [ - ("i32", "Store", "$0.i32"), - ("i64", "Store", "$0.i64"), - ("f32", "Store", "$0.rawF32"), - ("f64", "Store", "$0.rawF64"), - ("i32", "Store8", "UInt8(truncatingIfNeeded: $0.i32)"), - ("i32", "Store16", "UInt16(truncatingIfNeeded: $0.i32)"), - ("i64", "Store8", "UInt8(truncatingIfNeeded: $0.i64)"), - ("i64", "Store16", "UInt16(truncatingIfNeeded: $0.i64)"), - ("i64", "Store32", "UInt32(truncatingIfNeeded: $0.i64)"), - ].map { (type, op, castFromValue) in - return StoreOpInfo(type: type, op: op, castFromValue: castFromValue) + ("i32", "Store", "$0.i32", false), + ("i64", "Store", "$0.i64", false), + ("f32", "Store", "$0.rawF32", true), + ("f64", "Store", "$0.rawF64", true), + ("i32", "Store8", "UInt8(truncatingIfNeeded: $0.i32)", false), + ("i32", "Store16", "UInt16(truncatingIfNeeded: $0.i32)", false), + ("i64", "Store8", "UInt8(truncatingIfNeeded: $0.i64)", false), + ("i64", "Store16", "UInt16(truncatingIfNeeded: $0.i64)", false), + ("i64", "Store32", "UInt32(truncatingIfNeeded: $0.i64)", false), + ].map { (type, op, castFromValue, isFloatingPoint) in + return StoreOpInfo(type: type, op: op, castFromValue: castFromValue, isFloatingPoint: isFloatingPoint) } + static let memoryAtomicStoreOps = memoryStoreOps.filter { !$0.isFloatingPoint } static let memoryLoadStoreInsts: [Instruction] = memoryLoadOps.map(\.instruction) + memoryStoreOps.map(\.instruction) + static let memoryAtomicInsts: [Instruction] = memoryAtomicLoadOps.map(\.atomicInstruction) + memoryAtomicStoreOps.map(\.atomicInstruction) static let memoryOpInsts: [Instruction] = [ Instruction(name: "memorySize", documentation: "WebAssembly Core Instruction `memory.size`") { $0.field(name: "memoryIndex", type: .MemoryIndex) @@ -608,6 +623,7 @@ extension VMGen { instructions += floatBinOps.map(\.instruction) instructions += floatUnaryOps.map(\.instruction) instructions += miscInsts + instructions += memoryAtomicInsts return instructions }