diff --git a/src/audio_worklet.js b/src/audio_worklet.js index 6ed827af22e7d..1cd582d762e22 100644 --- a/src/audio_worklet.js +++ b/src/audio_worklet.js @@ -29,14 +29,47 @@ function createWasmAudioWorkletProcessor(audioParams) { // Capture the Wasm function callback to invoke. let opts = args.processorOptions; - this.callbackFunction = Module['wasmTable'].get(opts['cb']); - this.userData = opts['ud']; + this.callbackFunction = Module['wasmTable'].get({{{ toIndexType("opts['cb']") }}}); + this.userData = {{{ toIndexType("opts['ud']") }}}; + // Then the samples per channel to process, fixed for the lifetime of the - // context that created this processor. Note for when moving to Web Audio - // 1.1: the typed array passed to process() should be the same size as this - // 'render quantum size', and this exercise of passing in the value - // shouldn't be required (to be verified). + // context that created this processor. Even though this 'render quantum + // size' is fixed at 128 samples in the 1.0 spec, it will be variable in + // the 1.1 spec. It's passed in now, just to prove it's settable, but will + // eventually be a property of the AudioWorkletGlobalScope (globalThis). this.samplesPerChannel = opts['sc']; + this.bytesPerChannel = this.samplesPerChannel * {{{ getNativeTypeSize('float') }}}; + + // Create up-front as many typed views for marshalling the output data as + // may be required (with an arbitrary maximum of 10, for the case where a + // multi-MB stack is passed), allocated at the *top* of the worklet's + // stack (and whose addresses are fixed). The 'minimum alloc' firstly + // stops STACK_OVERFLOW_CHECK failing (since the stack will be full, and + // 16 being the minimum allocation size due to alignments) and leaves room + // for a single AudioSampleFrame as a minumum. + this.maxBuffers = Math.min(((Module['sz'] - /*minimum alloc*/ 16) / this.bytesPerChannel) | 0, /*sensible limit*/ 10); +#if ASSERTIONS + console.assert(this.maxBuffers > 0, `AudioWorklet needs more stack allocating (at least ${this.samplesPerChannel * 4})`); +#endif + // These are still alloc'd to take advantage of the overflow checks, etc. + var oldStackPtr = stackSave(); + var viewDataIdx = {{{ getHeapOffset('stackAlloc(this.maxBuffers * this.bytesPerChannel)', 'float') }}}; +#if WEBAUDIO_DEBUG + console.log(`AudioWorklet creating ${this.maxBuffers} buffer one-time views (for a stack size of ${Module['sz']} at address 0x${(viewDataIdx * 4).toString(16)})`); +#endif + this.outputViews = []; + for (var i = this.maxBuffers; i > 0; i--) { + // Added in reverse so the lowest indices are closest to the stack top + this.outputViews.unshift( + HEAPF32.subarray(viewDataIdx, viewDataIdx += this.samplesPerChannel) + ); + } + stackRestore(oldStackPtr); + +#if ASSERTIONS + // Explicitly verify this later in process() + this.ctorOldStackPtr = oldStackPtr; +#endif } static get parameterDescriptors() { @@ -51,75 +84,122 @@ function createWasmAudioWorkletProcessor(audioParams) { let numInputs = inputList.length, numOutputs = outputList.length, numParams = 0, i, j, k, dataPtr, - bytesPerChannel = this.samplesPerChannel * 4, + outputViewsNeeded = 0, stackMemoryNeeded = (numInputs + numOutputs) * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}, oldStackPtr = stackSave(), - inputsPtr, outputsPtr, outputDataPtr, paramsPtr, + inputsPtr, outputsPtr, paramsPtr, didProduceAudio, paramArray; - // Calculate how much stack space is needed. - for (i of inputList) stackMemoryNeeded += i.length * bytesPerChannel; - for (i of outputList) stackMemoryNeeded += i.length * bytesPerChannel; + // Calculate how much stack space is needed + for (i of inputList) stackMemoryNeeded += i.length * this.bytesPerChannel; + for (i of outputList) outputViewsNeeded += i.length; + stackMemoryNeeded += outputViewsNeeded * this.bytesPerChannel; for (i in parameters) stackMemoryNeeded += parameters[i].byteLength + {{{ C_STRUCTS.AudioParamFrame.__size__ }}}, ++numParams; - // Allocate the necessary stack space. - inputsPtr = stackAlloc(stackMemoryNeeded); +#if ASSERTIONS + console.assert(oldStackPtr == this.ctorOldStackPtr, 'AudioWorklet stack address has unexpectedly moved'); + console.assert(outputViewsNeeded <= this.outputViews.length, `Too many AudioWorklet outputs (need ${outputViewsNeeded} but have stack space for ${this.outputViews.length})`); +#endif + + // Allocate the necessary stack space (dataPtr is always in bytes, and + // advances as space for structs and data is taken, but note the switching + // between bytes and indices into the various heaps, usually in 'k'). This + // will be 16-byte aligned (from _emscripten_stack_alloc()), as were the + // output views, so we round up and advance the required bytes to ensure + // the addresses all work out at the end. + i = (stackMemoryNeeded + 15) & ~15; + dataPtr = stackAlloc(i) + (i - stackMemoryNeeded); // Copy input audio descriptor structs and data to Wasm - k = inputsPtr >> 2; - dataPtr = inputsPtr + numInputs * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}; + // Note: filling the structs was tried with makeSetValue() but it creates + // minor overhead (adds and shifts) that we can avoid (and no combination + // of optimisations will fold). + inputsPtr = dataPtr; + k = {{{ getHeapOffset('inputsPtr', 'u32') }}}; + dataPtr += numInputs * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}; for (i of inputList) { // Write the AudioSampleFrame struct instance - HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.numberOfChannels / 4 }}}] = i.length; - HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.samplesPerChannel / 4 }}}] = this.samplesPerChannel; - HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / 4 }}}] = dataPtr; - k += {{{ C_STRUCTS.AudioSampleFrame.__size__ / 4 }}}; + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.numberOfChannels / getNativeTypeSize('u32') }}}] = i.length; + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.samplesPerChannel / getNativeTypeSize('u32') }}}] = this.samplesPerChannel; + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / getNativeTypeSize('u32') }}}] = dataPtr; +#if MEMORY64 + // See the note in the constructor for dealing with 64-bit addresses + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / getNativeTypeSize('u32') + 1 }}}] = dataPtr / 0x100000000; +#endif + k += {{{ C_STRUCTS.AudioSampleFrame.__size__ / getNativeTypeSize('u32') }}}; // Marshal the input audio sample data for each audio channel of this input for (j of i) { - HEAPF32.set(j, dataPtr>>2); - dataPtr += bytesPerChannel; + HEAPF32.set(j, {{{ getHeapOffset('dataPtr', 'float') }}}); + dataPtr += this.bytesPerChannel; } } - // Copy output audio descriptor structs to Wasm - outputsPtr = dataPtr; - k = outputsPtr >> 2; - outputDataPtr = (dataPtr += numOutputs * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}) >> 2; - for (i of outputList) { - // Write the AudioSampleFrame struct instance - HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.numberOfChannels / 4 }}}] = i.length; - HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.samplesPerChannel / 4 }}}] = this.samplesPerChannel; - HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / 4 }}}] = dataPtr; - k += {{{ C_STRUCTS.AudioSampleFrame.__size__ / 4 }}}; - // Reserve space for the output data - dataPtr += bytesPerChannel * i.length; - } - // Copy parameters descriptor structs and data to Wasm paramsPtr = dataPtr; - k = paramsPtr >> 2; + k = {{{ getHeapOffset('paramsPtr', 'u32') }}}; dataPtr += numParams * {{{ C_STRUCTS.AudioParamFrame.__size__ }}}; for (i = 0; paramArray = parameters[i++];) { // Write the AudioParamFrame struct instance - HEAPU32[k + {{{ C_STRUCTS.AudioParamFrame.length / 4 }}}] = paramArray.length; - HEAPU32[k + {{{ C_STRUCTS.AudioParamFrame.data / 4 }}}] = dataPtr; - k += {{{ C_STRUCTS.AudioParamFrame.__size__ / 4 }}}; + HEAPU32[k + {{{ C_STRUCTS.AudioParamFrame.length / getNativeTypeSize('u32') }}}] = paramArray.length; + HEAPU32[k + {{{ C_STRUCTS.AudioParamFrame.data / getNativeTypeSize('u32') }}}] = dataPtr; +#if MEMORY64 + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / getNativeTypeSize('u32') + 1 }}}] = dataPtr / 0x100000000; +#endif + k += {{{ C_STRUCTS.AudioParamFrame.__size__ / getNativeTypeSize('u32') }}}; // Marshal the audio parameters array - HEAPF32.set(paramArray, dataPtr>>2); - dataPtr += paramArray.length*4; + HEAPF32.set(paramArray, {{{ getHeapOffset('dataPtr', 'float') }}}); + dataPtr += paramArray.length * {{{ getNativeTypeSize('float') }}}; } + // Copy output audio descriptor structs to Wasm (note that dataPtr after + // the struct offsets should now be 16-byte aligned). + outputsPtr = dataPtr; + k = {{{ getHeapOffset('outputsPtr', 'u32') }}}; + dataPtr += numOutputs * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}; + for (i of outputList) { + // Write the AudioSampleFrame struct instance + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.numberOfChannels / getNativeTypeSize('u32') }}}] = i.length; + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.samplesPerChannel / getNativeTypeSize('u32') }}}] = this.samplesPerChannel; + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / getNativeTypeSize('u32') }}}] = dataPtr; +#if MEMORY64 + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / getNativeTypeSize('u32') + 1 }}}] = dataPtr / 0x100000000; +#endif + k += {{{ C_STRUCTS.AudioSampleFrame.__size__ / getNativeTypeSize('u32') }}}; + // Advance the output pointer to the next output (matching the pre-allocated views) + dataPtr += this.bytesPerChannel * i.length; + } + +#if ASSERTIONS + // If all the maths worked out, we arrived at the original stack address + console.assert(dataPtr == oldStackPtr, `AudioWorklet stack missmatch (audio data finishes at ${dataPtr} instead of ${oldStackPtr})`); + + // Sanity checks. If these trip the most likely cause, beyond unforeseen + // stack shenanigans, is that the 'render quantum size' changed. + if (numOutputs) { + // First that the output view addresses match the stack positions. + k = dataPtr - this.bytesPerChannel; + for (i = 0; i < outputViewsNeeded; i++) { + console.assert(k == this.outputViews[i].byteOffset, 'AudioWorklet internal error in addresses of the output array views'); + k -= this.bytesPerChannel; + } + // And that the views' size match the passed in output buffers + for (i of outputList) { + for (j of i) { + console.assert(j.byteLength == this.bytesPerChannel, `AudioWorklet unexpected output buffer size (expected ${this.bytesPerChannel} got ${j.byteLength})`); + } + } + } +#endif + // Call out to Wasm callback to perform audio processing - if (didProduceAudio = this.callbackFunction(numInputs, inputsPtr, numOutputs, outputsPtr, numParams, paramsPtr, this.userData)) { + if (didProduceAudio = this.callbackFunction(numInputs, {{{ toIndexType('inputsPtr') }}}, numOutputs, {{{ toIndexType('outputsPtr') }}}, numParams, {{{ toIndexType('paramsPtr') }}}, this.userData)) { // Read back the produced audio data to all outputs and their channels. - // (A garbage-free function TypedArray.copy(dstTypedArray, dstOffset, - // srcTypedArray, srcOffset, count) would sure be handy.. but web does - // not have one, so manually copy all bytes in) + // The preallocated 'outputViews' already have the correct offsets and + // sizes into the stack (recall from the ctor that they run backwards). + k = outputViewsNeeded - 1; for (i of outputList) { for (j of i) { - for (k = 0; k < this.samplesPerChannel; ++k) { - j[k] = HEAPF32[outputDataPtr++]; - } + j.set(this.outputViews[k--]); } } } @@ -193,14 +273,9 @@ class BootstrapMessages extends AudioWorkletProcessor { // 'cb' the callback function // 'ch' the context handle // 'ud' the passed user data - p.postMessage({'_wsc': d['cb'], 'x': [d['ch'], 1/*EM_TRUE*/, d['ud']] }); + p.postMessage({'_wsc': {{{ toIndexType("d['cb']") }}}, 'x': [d['ch'], 1/*EM_TRUE*/, {{{ toIndexType("d['ud']") }}}] }); } else if (d['_wsc']) { -#if MEMORY64 - var ptr = BigInt(d['_wsc']); -#else - var ptr = d['_wsc']; -#endif - Module['wasmTable'].get(ptr)(...d['x']); + Module['wasmTable'].get({{{ toIndexType("d['_wsc']") }}})(...d['x']); }; } } diff --git a/src/lib/libwebaudio.js b/src/lib/libwebaudio.js index 159beae48c478..b07589e7e1ca2 100644 --- a/src/lib/libwebaudio.js +++ b/src/lib/libwebaudio.js @@ -78,7 +78,7 @@ let LibraryWebAudio = { #if WEBAUDIO_DEBUG console.log(`emscripten_resume_audio_context_async() callback: New audio state="${EmAudio[contextHandle].state}", ID=${state}`); #endif - {{{ makeDynCall('viii', 'callback') }}}(contextHandle, state, userData); + {{{ makeDynCall('viip', 'callback') }}}(contextHandle, state, userData); } #if WEBAUDIO_DEBUG console.log(`emscripten_resume_audio_context_async() resuming...`); @@ -162,9 +162,13 @@ let LibraryWebAudio = { console.log(`emscripten_start_wasm_audio_worklet_thread_async() adding audioworklet.js...`); #endif - let audioWorkletCreationFailed = () => { + let audioWorkletCreationFailed = (err) => { #if WEBAUDIO_DEBUG - console.error(`emscripten_start_wasm_audio_worklet_thread_async() addModule() failed!`); + // Note about Cross-Origin here: a lack of Cross-Origin-Opener-Policy and + // Cross-Origin-Embedder-Policy headers to the client request will result + // in the worklet file failing to load. + console.error(`emscripten_start_wasm_audio_worklet_thread_async() addModule() failed! Are the Cross-Origin headers being set?`); + if (err) console.error(err); #endif {{{ makeDynCall('viip', 'callback') }}}(contextHandle, 0/*EM_FALSE*/, userData); }; @@ -178,7 +182,7 @@ let LibraryWebAudio = { console.error(`AudioWorklets are not supported by current browser.`); } #endif - return audioWorkletCreationFailed(); + return audioWorkletCreationFailed(null); } // TODO: In MINIMAL_RUNTIME builds, read this file off of a preloaded Blob, @@ -222,7 +226,7 @@ let LibraryWebAudio = { #if WEBAUDIO_DEBUG console.log(`emscripten_start_wasm_audio_worklet_thread_async() addModule() of main application JS completed`); #endif - {{{ makeDynCall('viii', 'callback') }}}(contextHandle, 1/*EM_TRUE*/, userData); + {{{ makeDynCall('viip', 'callback') }}}(contextHandle, 1/*EM_TRUE*/, userData); }).catch(audioWorkletCreationFailed); }, @@ -241,24 +245,25 @@ let LibraryWebAudio = { assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_create_wasm_audio_worklet_processor_async() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); #endif - options >>= 2; let audioParams = [], - numAudioParams = HEAPU32[options+1], - audioParamDescriptors = HEAPU32[options+2] >> 2, + processorName = UTF8ToString({{{ makeGetValue('options', C_STRUCTS.WebAudioWorkletProcessorCreateOptions.name, '*') }}}), + numAudioParams = {{{ makeGetValue('options', C_STRUCTS.WebAudioWorkletProcessorCreateOptions.numAudioParams, 'i32') }}}, + audioParamDescriptors = {{{ makeGetValue('options', C_STRUCTS.WebAudioWorkletProcessorCreateOptions.audioParamDescriptors, '*') }}}, i = 0; while (numAudioParams--) { audioParams.push({ name: i++, - defaultValue: HEAPF32[audioParamDescriptors++], - minValue: HEAPF32[audioParamDescriptors++], - maxValue: HEAPF32[audioParamDescriptors++], - automationRate: ['a','k'][HEAPU32[audioParamDescriptors++]] + '-rate', + defaultValue: {{{ makeGetValue('audioParamDescriptors', C_STRUCTS.WebAudioParamDescriptor.defaultValue, 'float') }}}, + minValue: {{{ makeGetValue('audioParamDescriptors', C_STRUCTS.WebAudioParamDescriptor.minValue, 'float') }}}, + maxValue: {{{ makeGetValue('audioParamDescriptors', C_STRUCTS.WebAudioParamDescriptor.maxValue, 'float') }}}, + automationRate: ({{{ makeGetValue('audioParamDescriptors', C_STRUCTS.WebAudioParamDescriptor.automationRate, 'i32') }}} ? 'k' : 'a') + '-rate' }); + audioParamDescriptors += {{{ C_STRUCTS.WebAudioParamDescriptor.__size__ }}}; } #if WEBAUDIO_DEBUG - console.log(`emscripten_create_wasm_audio_worklet_processor_async() creating a new AudioWorklet processor with name ${UTF8ToString(HEAPU32[options])}`); + console.log(`emscripten_create_wasm_audio_worklet_processor_async() creating a new AudioWorklet processor with name ${processorName}`); #endif EmAudio[contextHandle].audioWorklet.bootstrapMessage.port.postMessage({ @@ -266,7 +271,9 @@ let LibraryWebAudio = { // Processor Name' used as a 'key' to verify the message type so as to // not get accidentally mixed with user submitted messages, the remainder // for space saving reasons, abbreviated from their variable names). - '_wpn': UTF8ToString(HEAPU32[options]), + // Note: we can only pass clonable object, so need to pass the function + // pointer and not the wasm function object. + '_wpn': processorName, 'ap': audioParams, 'ch': contextHandle, 'cb': callback, @@ -281,18 +288,20 @@ let LibraryWebAudio = { assert(EmAudio[contextHandle], `Called emscripten_create_wasm_audio_worklet_node() with a nonexisting/already freed Web Audio Context handle ${contextHandle}!`); assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_create_wasm_audio_worklet_node() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); #endif - options >>= 2; function readChannelCountArray(heapIndex, numOutputs) { + if (!heapIndex) return void 0; + heapIndex = {{{ getHeapOffset('heapIndex', 'i32') }}}; let channelCounts = []; while (numOutputs--) channelCounts.push(HEAPU32[heapIndex++]); return channelCounts; } + let optionsOutputs = options ? {{{ makeGetValue('options', C_STRUCTS.EmscriptenAudioWorkletNodeCreateOptions.numberOfOutputs, 'i32') }}} : 0; let opts = options ? { - numberOfInputs: HEAP32[options], - numberOfOutputs: HEAP32[options+1], - outputChannelCount: HEAPU32[options+2] ? readChannelCountArray(HEAPU32[options+2]>>2, HEAP32[options+1]) : void 0, + numberOfInputs: {{{ makeGetValue('options', C_STRUCTS.EmscriptenAudioWorkletNodeCreateOptions.numberOfInputs, 'i32') }}}, + numberOfOutputs: optionsOutputs, + outputChannelCount: readChannelCountArray({{{ makeGetValue('options', C_STRUCTS.EmscriptenAudioWorkletNodeCreateOptions.outputChannelCounts, 'i32*') }}}, optionsOutputs), processorOptions: { 'cb': callback, 'ud': userData, diff --git a/src/struct_info.json b/src/struct_info.json index 8f81911b65624..558ea780df629 100644 --- a/src/struct_info.json +++ b/src/struct_info.json @@ -1264,6 +1264,17 @@ { "file": "emscripten/webaudio.h", "structs": { + "WebAudioParamDescriptor": [ + "defaultValue", + "minValue", + "maxValue", + "automationRate" + ], + "WebAudioWorkletProcessorCreateOptions": [ + "name", + "numAudioParams", + "audioParamDescriptors" + ], "AudioSampleFrame": [ "numberOfChannels", "samplesPerChannel", @@ -1272,6 +1283,11 @@ "AudioParamFrame": [ "length", "data" + ], + "EmscriptenAudioWorkletNodeCreateOptions": [ + "numberOfInputs", + "numberOfOutputs", + "outputChannelCounts" ] } }, diff --git a/src/struct_info_generated.json b/src/struct_info_generated.json index 5ca6688dac3b1..c128843b0c725 100644 --- a/src/struct_info_generated.json +++ b/src/struct_info_generated.json @@ -529,6 +529,12 @@ "numberOfChannels": 0, "samplesPerChannel": 4 }, + "EmscriptenAudioWorkletNodeCreateOptions": { + "__size__": 12, + "numberOfInputs": 0, + "numberOfOutputs": 4, + "outputChannelCounts": 8 + }, "EmscriptenBatteryEvent": { "__size__": 32, "charging": 24, @@ -1474,6 +1480,19 @@ "module": 4, "nextInChain": 0 }, + "WebAudioParamDescriptor": { + "__size__": 16, + "automationRate": 12, + "defaultValue": 0, + "maxValue": 8, + "minValue": 4 + }, + "WebAudioWorkletProcessorCreateOptions": { + "__size__": 12, + "audioParamDescriptors": 8, + "name": 0, + "numAudioParams": 4 + }, "__cxa_exception": { "__size__": 24, "adjustedPtr": 16, diff --git a/src/struct_info_generated_wasm64.json b/src/struct_info_generated_wasm64.json index e0b21e0a9b577..087d6cb559ba3 100644 --- a/src/struct_info_generated_wasm64.json +++ b/src/struct_info_generated_wasm64.json @@ -529,6 +529,12 @@ "numberOfChannels": 0, "samplesPerChannel": 4 }, + "EmscriptenAudioWorkletNodeCreateOptions": { + "__size__": 16, + "numberOfInputs": 0, + "numberOfOutputs": 4, + "outputChannelCounts": 8 + }, "EmscriptenBatteryEvent": { "__size__": 32, "charging": 24, @@ -1474,6 +1480,19 @@ "module": 8, "nextInChain": 0 }, + "WebAudioParamDescriptor": { + "__size__": 16, + "automationRate": 12, + "defaultValue": 0, + "maxValue": 8, + "minValue": 4 + }, + "WebAudioWorkletProcessorCreateOptions": { + "__size__": 24, + "audioParamDescriptors": 16, + "name": 0, + "numAudioParams": 8 + }, "__cxa_exception": { "__size__": 48, "adjustedPtr": 32, diff --git a/test/test_browser.py b/test/test_browser.py index c3daa051e7587..96447fc4e5d01 100644 --- a/test/test_browser.py +++ b/test/test_browser.py @@ -5470,8 +5470,6 @@ def test_full_js_library_strict(self): 'es6': (['-sEXPORT_ES6'],), 'strict': (['-sSTRICT'],), }) - @no_wasm64('https://github.com/emscripten-core/emscripten/pull/23508') - @no_2gb('https://github.com/emscripten-core/emscripten/pull/23508') @requires_sound_hardware def test_audio_worklet(self, args): self.btest_exit('webaudio/audioworklet.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS', '-DTEST_AND_EXIT'] + args) @@ -5494,8 +5492,6 @@ def test_audio_worklet_post_function(self, args): '': ([],), 'closure': (['--closure', '1', '-Oz'],), }) - @no_wasm64('https://github.com/emscripten-core/emscripten/pull/23508') - @no_2gb('https://github.com/emscripten-core/emscripten/pull/23508') @requires_sound_hardware def test_audio_worklet_modularize(self, args): self.btest_exit('webaudio/audioworklet.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS', '-sMODULARIZE=1', '-sEXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html'), '-DTEST_AND_EXIT'] + args) @@ -5507,8 +5503,6 @@ def test_audio_worklet_modularize(self, args): '': ([],), 'minimal_with_closure': (['-sMINIMAL_RUNTIME', '--closure=1', '-Oz'],), }) - @no_wasm64('https://github.com/emscripten-core/emscripten/pull/23508') - @no_2gb('https://github.com/emscripten-core/emscripten/pull/23508') @requires_sound_hardware def test_audio_worklet_params_mixing(self, args): os.mkdir('audio_files') @@ -5517,8 +5511,6 @@ def test_audio_worklet_params_mixing(self, args): self.btest_exit('webaudio/audioworklet_params_mixing.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS', '-DTEST_AND_EXIT'] + args) # Tests AudioWorklet with emscripten_lock_busyspin_wait_acquire() and friends - @no_wasm64('https://github.com/emscripten-core/emscripten/pull/23508') - @no_2gb('https://github.com/emscripten-core/emscripten/pull/23508') @requires_sound_hardware @also_with_minimal_runtime def test_audio_worklet_emscripten_locks(self):