diff --git a/test/test_browser.py b/test/test_browser.py index 36f27b55b6534..c1ce997417fee 100644 --- a/test/test_browser.py +++ b/test/test_browser.py @@ -5495,17 +5495,21 @@ def test_audio_worklet_post_function(self, args): def test_audio_worklet_modularize(self, args): self.btest_exit('webaudio/audioworklet.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS', '-sMODULARIZE=1', '-sEXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')] + args) - # Tests multiple inputs, forcing a larger stack (note: passing BROWSER_TEST is - # specific to this test to allow it to exit rather than play forever). + # Tests an AudioWorklet with multiple stereo inputs mixing in the processor + # via a varying parameter to a single stereo output (touching all of the API + # copying from structs) @parameterized({ '': ([],), 'minimal_with_closure': (['-sMINIMAL_RUNTIME', '--closure=1', '-Oz'],), }) - def test_audio_worklet_stereo_io(self, args): + @no_wasm64('https://github.com/emscripten-core/emscripten/pull/23508') + @no_2gb('https://github.com/emscripten-core/emscripten/pull/23508') + @requires_sound_hardware + def test_audio_worklet_params_mixing(self, args): os.mkdir('audio_files') shutil.copy(test_file('webaudio/audio_files/emscripten-beat.mp3'), 'audio_files/') shutil.copy(test_file('webaudio/audio_files/emscripten-bass.mp3'), 'audio_files/') - self.btest_exit('webaudio/audioworklet_in_out_stereo.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS', '-DBROWSER_TEST'] + args) + self.btest_exit('webaudio/audioworklet_params_mixing.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS', '-DTEST_AND_EXIT'] + args) def test_error_reporting(self): # Test catching/reporting Error objects diff --git a/test/test_interactive.py b/test/test_interactive.py index 78de7771cd5f7..e25bd12f4f9b2 100644 --- a/test/test_interactive.py +++ b/test/test_interactive.py @@ -334,10 +334,32 @@ def test_audio_worklet_2x_hard_pan_io(self): shutil.copy(test_file('webaudio/audio_files/emscripten-bass-mono.mp3'), 'audio_files/') self.btest_exit('webaudio/audioworklet_2x_in_hard_pan.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS']) + # Tests an AudioWorklet with multiple stereo inputs mixing in the processor via a parameter to a single stereo output (6kB stack) + def test_audio_worklet_params_mixing(self): + os.mkdir('audio_files') + shutil.copy(test_file('webaudio/audio_files/emscripten-beat.mp3'), 'audio_files/') + shutil.copy(test_file('webaudio/audio_files/emscripten-bass.mp3'), 'audio_files/') + self.btest_exit('webaudio/audioworklet_params_mixing.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS']) + class interactive64(interactive): def setUp(self): super().setUp() self.set_setting('MEMORY64') - self.emcc_args.append('-Wno-experimental') self.require_wasm64() + + +class interactive64_4gb(interactive): + def setUp(self): + super().setUp() + self.set_setting('MEMORY64') + self.set_setting('INITIAL_MEMORY', '4200mb') + self.set_setting('GLOBAL_BASE', '4gb') + self.require_wasm64() + + +class interactive_2gb(interactive): + def setUp(self): + super().setUp() + self.set_setting('INITIAL_MEMORY', '2200mb') + self.set_setting('GLOBAL_BASE', '2gb') diff --git a/test/webaudio/audioworklet_2x_in_hard_pan.c b/test/webaudio/audioworklet_2x_in_hard_pan.c index 682a6ec053cef..636b44e777b78 100644 --- a/test/webaudio/audioworklet_2x_in_hard_pan.c +++ b/test/webaudio/audioworklet_2x_in_hard_pan.c @@ -1,6 +1,5 @@ #include #include -#include #include #include @@ -16,30 +15,41 @@ // Callback to process and copy the audio tracks bool process(int numInputs, const AudioSampleFrame* inputs, int numOutputs, AudioSampleFrame* outputs, int numParams, const AudioParamFrame* params, void* data) { +#ifdef TEST_AND_EXIT audioProcessedCount++; +#endif - // Twin mono in, single stereo out + // Twin mono in (or disabled), single stereo out assert(numInputs == 2 && numOutputs == 1); - assert(inputs[0].numberOfChannels == 1 && inputs[1].numberOfChannels == 1); + assert(inputs[0].numberOfChannels == 0 || inputs[0].numberOfChannels == 1); + assert(inputs[1].numberOfChannels == 0 || inputs[1].numberOfChannels == 1); assert(outputs[0].numberOfChannels == 2); // All with the same number of samples assert(inputs[0].samplesPerChannel == inputs[1].samplesPerChannel); assert(inputs[0].samplesPerChannel == outputs[0].samplesPerChannel); - // Now with all known quantities we can memcpy the data - int samplesPerChannel = inputs[0].samplesPerChannel; - memcpy(outputs[0].data, inputs[0].data, samplesPerChannel * sizeof(float)); - memcpy(outputs[0].data + samplesPerChannel, inputs[1].data, samplesPerChannel * sizeof(float)); + // Now with all known quantities we can memcpy the L&R data (or zero it if the + // channels are disabled) + int bytesPerChannel = outputs[0].samplesPerChannel * sizeof(float); + float* outputData = outputs[0].data; + if (inputs[0].numberOfChannels > 0) { + memcpy(outputData, inputs[0].data, bytesPerChannel); + } else { + memset(outputData, 0, bytesPerChannel); + } + outputData += outputs[0].samplesPerChannel; + if (inputs[1].numberOfChannels > 0) { + memcpy(outputData, inputs[1].data, bytesPerChannel); + } else { + memset(outputData, 0, bytesPerChannel); + } return true; } // Audio processor created, now register the audio callback void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { - if (!success) { - printf("Audio worklet node creation failed\n"); - return; - } - printf("Audio worklet processor created\n"); - printf("Click to toggle audio playback\n"); + assert(success && "Audio worklet failed in processorCreated()"); + emscripten_out("Audio worklet processor created"); + emscripten_out("Click to toggle audio playback"); // Stereo output, two inputs int outputChannelCounts[2] = { 2 }; @@ -65,6 +75,13 @@ void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { // Register a click to start playback emscripten_set_click_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, WA_2_VOIDP(context), false, &onClick); - // Register the counter that exits the test after one second of mixing +#ifdef TEST_AND_EXIT + // Register the counter that exits the test after one second of playback emscripten_set_timeout_loop(&playedAndMixed, 16, NULL); +#endif +} + +// This implementation has no custom start-up requirements +EmscriptenStartWebAudioWorkletCallback getStartCallback(void) { + return &initialised; } diff --git a/test/webaudio/audioworklet_2x_in_out_stereo.c b/test/webaudio/audioworklet_2x_in_out_stereo.c index eb51bcdaa9db7..f9a773742812a 100644 --- a/test/webaudio/audioworklet_2x_in_out_stereo.c +++ b/test/webaudio/audioworklet_2x_in_out_stereo.c @@ -1,6 +1,5 @@ #include #include -#include #include #include @@ -15,31 +14,41 @@ // Callback to process and copy the audio tracks bool process(int numInputs, const AudioSampleFrame* inputs, int numOutputs, AudioSampleFrame* outputs, int numParams, const AudioParamFrame* params, void* data) { +#ifdef TEST_AND_EXIT audioProcessedCount++; +#endif // Twin stereo in and out assert(numInputs == 2 && numOutputs == 2); - assert(inputs[0].numberOfChannels == 2 && inputs[1].numberOfChannels == 2); - assert(outputs[0].numberOfChannels == 2 && outputs[1].numberOfChannels == 2); + assert(inputs[0].numberOfChannels == 0 || inputs[0].numberOfChannels == 2); + assert(inputs[1].numberOfChannels == 0 || inputs[1].numberOfChannels == 2); + assert(outputs[0].numberOfChannels == 2); + assert(outputs[1].numberOfChannels == 2); // All with the same number of samples assert(inputs[0].samplesPerChannel == inputs[1].samplesPerChannel); assert(inputs[0].samplesPerChannel == outputs[0].samplesPerChannel); assert(outputs[0].samplesPerChannel == outputs[1].samplesPerChannel); - // Now with all known quantities we can memcpy the data - int totalSamples = outputs[0].samplesPerChannel * outputs[0].numberOfChannels; - memcpy(outputs[0].data, inputs[0].data, totalSamples * sizeof(float)); - memcpy(outputs[1].data, inputs[1].data, totalSamples * sizeof(float)); + // Now with all known quantities we can memcpy all the data (or zero it if the + // channels are disabled) + int totalBytes = outputs[0].samplesPerChannel * outputs[0].numberOfChannels * sizeof(float); + if (inputs[0].numberOfChannels > 0) { + memcpy(outputs[0].data, inputs[0].data, totalBytes); + } else { + memset(outputs[0].data, 0, totalBytes); + } + if (inputs[1].numberOfChannels > 0) { + memcpy(outputs[1].data, inputs[1].data, totalBytes); + } else { + memset(outputs[1].data, 0, totalBytes); + } return true; } // Audio processor created, now register the audio callback void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { - if (!success) { - printf("Audio worklet node creation failed\n"); - return; - } - printf("Audio worklet processor created\n"); - printf("Click to toggle audio playback\n"); + assert(success && "Audio worklet failed in processorCreated()"); + emscripten_out("Audio worklet processor created"); + emscripten_out("Click to toggle audio playback"); // Two stereo outputs, two inputs int outputChannelCounts[2] = { 2, 2 }; @@ -67,6 +76,13 @@ void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { // Register a click to start playback emscripten_set_click_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, WA_2_VOIDP(context), false, &onClick); - // Register the counter that exits the test after one second of mixing +#ifdef TEST_AND_EXIT + // Register the counter that exits the test after one second of playback emscripten_set_timeout_loop(&playedAndMixed, 16, NULL); +#endif +} + +// This implementation has no custom start-up requirements +EmscriptenStartWebAudioWorkletCallback getStartCallback(void) { + return &initialised; } diff --git a/test/webaudio/audioworklet_in_out_mono.c b/test/webaudio/audioworklet_in_out_mono.c index 56e405d95c2e4..bd76e1fc1c863 100644 --- a/test/webaudio/audioworklet_in_out_mono.c +++ b/test/webaudio/audioworklet_in_out_mono.c @@ -1,6 +1,5 @@ #include #include -#include #include #include @@ -16,7 +15,9 @@ // Callback to process and mix the audio tracks bool process(int numInputs, const AudioSampleFrame* inputs, int numOutputs, AudioSampleFrame* outputs, int numParams, const AudioParamFrame* params, void* data) { +#ifdef TEST_AND_EXIT audioProcessedCount++; +#endif // Single mono output assert(numOutputs == 1 && outputs[0].numberOfChannels == 1); @@ -29,11 +30,18 @@ bool process(int numInputs, const AudioSampleFrame* inputs, int numOutputs, Audi // We can now do a quick mix since we know the layouts if (numInputs > 0) { int totalSamples = outputs[0].samplesPerChannel * outputs[0].numberOfChannels; + // Simple copy of the first input's audio data, checking that we have + // channels (since a muted input has zero channels). float* outputData = outputs[0].data; - memcpy(outputData, inputs[0].data, totalSamples * sizeof(float)); + if (inputs[0].numberOfChannels > 0) { + memcpy(outputData, inputs[0].data, totalSamples * sizeof(float)); + } else { + // And for muted we need to fill the buffer with zeroes otherwise it repeats the last frame + memset(outputData, 0, totalSamples * sizeof(float)); + } + // Now add another inputs for (int n = 1; n < numInputs; n++) { - // It's possible to have an input with no channels - if (inputs[n].numberOfChannels == 1) { + if (inputs[n].numberOfChannels > 0) { float* inputData = inputs[n].data; for (int i = totalSamples - 1; i >= 0; i--) { outputData[i] += inputData[i]; @@ -46,12 +54,9 @@ bool process(int numInputs, const AudioSampleFrame* inputs, int numOutputs, Audi // Audio processor created, now register the audio callback void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { - if (!success) { - printf("Audio worklet node creation failed\n"); - return; - } - printf("Audio worklet processor created\n"); - printf("Click to toggle audio playback\n"); + assert(success && "Audio worklet failed in processorCreated()"); + emscripten_out("Audio worklet processor created"); + emscripten_out("Click to toggle audio playback"); // Mono output, two inputs int outputChannelCounts[1] = { 1 }; @@ -77,6 +82,13 @@ void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { // Register a click to start playback emscripten_set_click_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, WA_2_VOIDP(context), false, &onClick); +#ifdef TEST_AND_EXIT // Register the counter that exits the test after one second of mixing emscripten_set_timeout_loop(&playedAndMixed, 16, NULL); +#endif +} + +// This implementation has no custom start-up requirements +EmscriptenStartWebAudioWorkletCallback getStartCallback(void) { + return &initialised; } diff --git a/test/webaudio/audioworklet_in_out_stereo.c b/test/webaudio/audioworklet_in_out_stereo.c index 107e0638a9e2c..3c41a26c41e4a 100644 --- a/test/webaudio/audioworklet_in_out_stereo.c +++ b/test/webaudio/audioworklet_in_out_stereo.c @@ -1,6 +1,5 @@ #include #include -#include #include #include @@ -16,7 +15,9 @@ // Callback to process and mix the audio tracks bool process(int numInputs, const AudioSampleFrame* inputs, int numOutputs, AudioSampleFrame* outputs, int numParams, const AudioParamFrame* params, void* data) { +#ifdef TEST_AND_EXIT audioProcessedCount++; +#endif // Single stereo output assert(numOutputs == 1 && outputs[0].numberOfChannels == 2); @@ -29,11 +30,18 @@ bool process(int numInputs, const AudioSampleFrame* inputs, int numOutputs, Audi // We can now do a quick mix since we know the layouts if (numInputs > 0) { int totalSamples = outputs[0].samplesPerChannel * outputs[0].numberOfChannels; + // Simple copy of the first input's audio data, checking that we have + // channels (since a muted input has zero channels). float* outputData = outputs[0].data; - memcpy(outputData, inputs[0].data, totalSamples * sizeof(float)); + if (inputs[0].numberOfChannels > 0) { + memcpy(outputData, inputs[0].data, totalSamples * sizeof(float)); + } else { + // And for muted we need to fill the buffer with zeroes otherwise it repeats the last frame + memset(outputData, 0, totalSamples * sizeof(float)); + } + // Now add another inputs for (int n = 1; n < numInputs; n++) { - // It's possible to have an input with no channels - if (inputs[n].numberOfChannels == 2) { + if (inputs[n].numberOfChannels > 0) { float* inputData = inputs[n].data; for (int i = totalSamples - 1; i >= 0; i--) { outputData[i] += inputData[i]; @@ -46,12 +54,9 @@ bool process(int numInputs, const AudioSampleFrame* inputs, int numOutputs, Audi // Audio processor created, now register the audio callback void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { - if (!success) { - printf("Audio worklet node creation failed\n"); - return; - } - printf("Audio worklet processor created\n"); - printf("Click to toggle audio playback\n"); + assert(success && "Audio worklet failed in processorCreated()"); + emscripten_out("Audio worklet processor created"); + emscripten_out("Click to toggle audio playback"); // Stereo output, two inputs int outputChannelCounts[1] = { 2 }; @@ -77,6 +82,13 @@ void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { // Register a click to start playback emscripten_set_click_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, WA_2_VOIDP(context), false, &onClick); +#ifdef TEST_AND_EXIT // Register the counter that exits the test after one second of mixing emscripten_set_timeout_loop(&playedAndMixed, 16, NULL); +#endif +} + +// This implementation has no custom start-up requirements +EmscriptenStartWebAudioWorkletCallback getStartCallback(void) { + return &initialised; } diff --git a/test/webaudio/audioworklet_params_mixing.c b/test/webaudio/audioworklet_params_mixing.c new file mode 100644 index 0000000000000..446888dcea4cc --- /dev/null +++ b/test/webaudio/audioworklet_params_mixing.c @@ -0,0 +1,174 @@ +#include +#include + +#include +#include + +// Tests processing two stereo audio inputs being mixed to a single stereo audio +// output in process(), then applying a fade from the parameters. + +// This needs to be big enough for the stereo output, 2x inputs, 2x params and +// the worker stack. To note that different browsers have different stack size +// requirement (see notes in process() plus the expansion of the params). +#define AUDIO_STACK_SIZE 6144 + +// Shared file playback and bootstrap +#include "audioworklet_test_shared.inc" + +// Callback to process and mix the audio tracks +bool process(int numInputs, const AudioSampleFrame* inputs, int numOutputs, AudioSampleFrame* outputs, int numParams, const AudioParamFrame* params, void* data) { +#ifdef TEST_AND_EXIT + audioProcessedCount++; +#endif + + // Single stereo output + assert(numOutputs == 1 && outputs[0].numberOfChannels == 2); + int outSamplesPerChannel = outputs[0].samplesPerChannel; + for (int n = 0; n < numInputs; n++) { + // And all inputs are also stereo (or disabled) + assert(inputs[n].numberOfChannels == 2 || inputs[n].numberOfChannels == 0); + // This should always be the case + assert(inputs[n].samplesPerChannel == outSamplesPerChannel); + } + // Interestingly, params varies per browser. Chrome won't have a length > 1 + // unless the value changes, and FF has all 128 entries even for a k-rate + // parameter. The only given is that two params are incoming: + assert(numParams = 2); + assert(params[0].length == 1 || params[0].length == outSamplesPerChannel); + assert(params[1].length == 1 || params[1].length == outSamplesPerChannel); + // We can now do a quick mix since we know the layouts + if (numInputs > 0) { + int totalSamples = outSamplesPerChannel * outputs[0].numberOfChannels; + // Simple copy of the first input's audio data, checking that we have + // channels (since a muted input has zero channels). + float* outputData = outputs[0].data; + if (inputs[0].numberOfChannels > 0) { + memcpy(outputData, inputs[0].data, totalSamples * sizeof(float)); + } else { + // And for muted we need to fill the buffer with zeroes otherwise it repeats the last frame + memset(outputData, 0, totalSamples * sizeof(float)); + } + // Grab the mix level parameter and expand it to have one entry per output + // sample. This simplifies the mixer and smooths out browser differences. + // Output and input buffers are stereo planar, so the mix data just repeats. + float* const mixLevel = alloca(totalSamples * sizeof(float)); + if (params[0].length > 1) { + // This is the regular path, one entry per sample by number of channels + for (int ch = outputs[0].numberOfChannels - 1; ch >= 0; ch--) { + memcpy(mixLevel + ch * outSamplesPerChannel, params[0].data, outSamplesPerChannel * sizeof(float)); + } + } else { + // Chrome will take this path when the k-rate parameter doesn't change + float singleLevel = params[0].data[0]; + for (int n = totalSamples - 1; n >= 0; n--) { + mixLevel[n] = singleLevel; + } + } + // Now add another inputs with the mix level + for (int n = 1; n < numInputs; n++) { + if (inputs[n].numberOfChannels > 0) { + float* inputData = inputs[n].data; + for (int i = totalSamples - 1; i >= 0; i--) { + outputData[i] += inputData[i] * mixLevel[i]; + } + } + } + } + return true; +} + +// Grabs the known worklet parameter and fades in or out (depending on whether +// it's already fading up or down, we reverse the fade direction). +EM_JS(void, doFade, (EMSCRIPTEN_AUDIO_WORKLET_NODE_T workletID), { + var worklet = emscriptenGetAudioObject(workletID); + if (worklet) { + // Emscripten's API creates these from a C array, indexing them instead of a + // name. Chrome and FF work with 0 but Safari requires the correct "0". + var param = worklet.parameters.get("0"); + if (param) { + param.setTargetAtTime((param.value > 0.5) ? 0 : 1, 0 /* same as context.currentTime */, 0.5); + } + } +}) + +// Registered keypress event to call the JS doFade() +bool onPress(int type, const EmscriptenKeyboardEvent* e, void* data) { + if (!e->repeat && data) { + emscripten_out("Toggling fade"); + doFade(VOIDP_2_WA(data)); + } + return false; +} + +// Audio processor created, now register the audio callback +void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { + assert(success && "Audio worklet failed in processorCreated()"); + emscripten_out("Audio worklet processor created"); + emscripten_out("Click to toggle audio playback"); + emscripten_out("Keypress to fade the beat in or out"); + + // Stereo output, two inputs + int outputChannelCounts[1] = { 2 }; + EmscriptenAudioWorkletNodeCreateOptions opts = { + .numberOfInputs = 2, + .numberOfOutputs = 1, + .outputChannelCounts = outputChannelCounts + }; + EMSCRIPTEN_AUDIO_WORKLET_NODE_T worklet = emscripten_create_wasm_audio_worklet_node(context, "mixer", &opts, &process, NULL); + emscripten_audio_node_connect(worklet, context, 0, 0); + + // Create the two stereo source nodes and connect them to the two inputs + // Note: we can connect the sources to the same input and it'll get mixed for us, but that's not the point + beatID = createTrack(context, "audio_files/emscripten-beat.mp3", true); + if (beatID) { + emscripten_audio_node_connect(beatID, worklet, 0, 1); + } + bassID = createTrack(context, "audio_files/emscripten-bass.mp3", true); + if (bassID) { + emscripten_audio_node_connect(bassID, worklet, 0, 0); + } + + // Register a click to start playback + emscripten_set_click_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, WA_2_VOIDP(context), false, &onClick); + // And a keypress to do affect the fader + emscripten_set_keypress_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, WA_2_VOIDP(worklet), false, &onPress); + +#ifdef TEST_AND_EXIT + // Register the counter that exits the test after one second of mixing + emscripten_set_timeout_loop(&playedAndMixed, 16, NULL); +#endif +} + +// Worklet thread inited, now create the audio processor +void initialisedWithParams(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { + assert(success && "Audio worklet failed initialised()"); + emscripten_out("Audio worklet initialised"); + + // Custom audio params we'll use as a fader + WebAudioParamDescriptor faderParam[] = { + { + // This a-rate (one entry per sample) is used to set the mix level + .defaultValue = 1.0f, + .minValue = 0.0f, + .maxValue = 1.0f, + .automationRate = WEBAUDIO_PARAM_A_RATE + }, { + // This k-rate (one entry per frame) is used just to test + .defaultValue = 0.0f, + .minValue = -100.0f, + .maxValue = 100.0f, + .automationRate = WEBAUDIO_PARAM_K_RATE + } + }; + WebAudioWorkletProcessorCreateOptions opts = { + .name = "mixer", + .numAudioParams = 2, + .audioParamDescriptors = faderParam + }; + emscripten_create_wasm_audio_worklet_processor_async(context, &opts, &processorCreated, NULL); +} + +// This implementation has no custom start-up requirements +EmscriptenStartWebAudioWorkletCallback getStartCallback(void) { + return &initialisedWithParams; +} diff --git a/test/webaudio/audioworklet_test_shared.inc b/test/webaudio/audioworklet_test_shared.inc index 7077a6cb833be..777f106b1e57c 100644 --- a/test/webaudio/audioworklet_test_shared.inc +++ b/test/webaudio/audioworklet_test_shared.inc @@ -5,10 +5,13 @@ // Helper for MEMORY64 to cast a void* to an audio context or type #define VOIDP_2_WA(ptr) ((EMSCRIPTEN_WEBAUDIO_T) (intptr_t) ptr) -// Count the audio callbacks and return after 375 frames (1 second with the default 128 size) + +// Count the audio callbacks and return after 375 frames (1 second with the +// default 128 size). // // *** Remove this in your own code *** // +#ifdef TEST_AND_EXIT volatile int audioProcessedCount = 0; bool playedAndMixed(double time, void* data) { if (audioProcessedCount >= 375) { @@ -17,6 +20,7 @@ bool playedAndMixed(double time, void* data) { } return true; } +#endif // ID to the beat and bass loops EMSCRIPTEN_WEBAUDIO_T beatID = 0; @@ -28,13 +32,15 @@ EM_JS(EMSCRIPTEN_WEBAUDIO_T, createTrack, (EMSCRIPTEN_WEBAUDIO_T ctxID, const ch var context = emscriptenGetAudioObject(ctxID); if (context) { var audio = document.createElement('audio'); - audio.src = UTF8ToString(url); + // Number() wrapper is a workaround for UTF8ToString() needing a JS number + // and from64() not being available in EM_JS macros. Fix in UTF8ToString? + audio.src = UTF8ToString(Number(url)); audio.loop = looping; var track = context.createMediaElementSource(audio); return emscriptenRegisterAudioObject(track); } return 0; -}); +}) // Toggles the play/pause of a MediaElementAudioSourceNode given its ID EM_JS(void, toggleTrack, (EMSCRIPTEN_WEBAUDIO_T srcID), { @@ -50,16 +56,16 @@ EM_JS(void, toggleTrack, (EMSCRIPTEN_WEBAUDIO_T srcID), { } } } -}); +}) -// Registered click even to (1) enable audio playback and (2) toggle playing the tracks +// Registered click event to (1) enable audio playback and (2) toggle playing the tracks bool onClick(int type, const EmscriptenMouseEvent* e, void* data) { EMSCRIPTEN_WEBAUDIO_T ctx = VOIDP_2_WA(data); if (emscripten_audio_context_state(ctx) != AUDIO_CONTEXT_STATE_RUNNING) { - printf("Resuming playback\n"); + emscripten_out("Resuming playback"); emscripten_resume_audio_context_sync(ctx); } - printf("Toggling audio playback\n"); + emscripten_out("Toggling audio playback"); toggleTrack(beatID); toggleTrack(bassID); return false; @@ -70,26 +76,32 @@ void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data); // Worklet thread inited, now create the audio processor void initialised(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { - if (!success) { - printf("Audio worklet failed to initialise\n"); - return; - } - printf("Audio worklet initialised\n"); + assert(success && "Audio worklet failed in initialised()"); + emscripten_out("Audio worklet initialised"); WebAudioWorkletProcessorCreateOptions opts = { - .name = "mixer", + .name = "mixer" }; emscripten_create_wasm_audio_worklet_processor_async(context, &opts, &processorCreated, NULL); } +// To be implemented by the test code, allowing initialised() to be changed. +EmscriptenStartWebAudioWorkletCallback getStartCallback(void); + // Common entry point for the mixer tests -int main() { - static char workletStack[AUDIO_STACK_SIZE]; +int main(void) { + char* const workletStack = memalign(16, AUDIO_STACK_SIZE); + emscripten_outf("Audio worklet stack at 0x%p", workletStack); + assert(workletStack); + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(NULL); - emscripten_start_wasm_audio_worklet_thread_async(context, workletStack, sizeof workletStack, &initialised, NULL); -#ifndef BROWSER_TEST - // Special case: browser tests need to exit instantly, interactive tests need to wait - emscripten_runtime_keepalive_push(); + emscripten_start_wasm_audio_worklet_thread_async(context, workletStack, AUDIO_STACK_SIZE, getStartCallback(), NULL); + +#ifdef TEST_AND_EXIT + // We're in the test harness and exiting is via playedAndMixed() + emscripten_out("In test mode, will exit after 1 second of playback"); #endif - return 0; + emscripten_exit_with_live_runtime(); + + return EXIT_SUCCESS; }