@@ -28,11 +28,43 @@ function createWasmAudioWorkletProcessor(audioParams) {
2828 this . callback = { { { makeDynCall ( 'iipipipp' , 'opts.callback' ) } } } ;
2929 this . userData = opts . userData ;
3030 // Then the samples per channel to process, fixed for the lifetime of the
31- // context that created this processor. Note for when moving to Web Audio
32- // 1.1: the typed array passed to process() should be the same size as this
33- // 'render quantum size', and this exercise of passing in the value
34- // shouldn't be required (to be verified)
31+ // context that created this processor. Even though this 'render quantum
32+ // size' is fixed at 128 samples in the 1.0 spec, it will be variable in
33+ // the 1.1 spec. It's passed in now, just to prove it's settable, but will
34+ // eventually be a property of the AudioWorkletGlobalScope (globalThis).
3535 this . samplesPerChannel = opts . samplesPerChannel ;
36+ this . bytesPerChannel = this . samplesPerChannel * { { { getNativeTypeSize ( 'float' ) } } } ;
37+
38+ // Create up-front as many typed views for marshalling the output data as
39+ // may be required (with an arbitrary maximum of 16, for the case where a
40+ // multi-MB stack is passed), allocated at the *top* of the worklet's
41+ // stack (and whose addresses are fixed). The 'minimum alloc' firstly
42+ // stops STACK_OVERFLOW_CHECK failing (since the stack will be full, and
43+ // 16 being the minimum allocation size due to alignments) and leaves room
44+ // for a single AudioSampleFrame as a minumum.
45+ this . maxBuffers = Math . min ( ( ( wwParams . stackSize - /*minimum alloc*/ 16 ) / this . bytesPerChannel ) | 0 , /*sensible limit*/ 16 ) ;
46+ #if ASSERTIONS
47+ console . assert ( this . maxBuffers > 0 , `AudioWorklet needs more stack allocating (at least ${ this . bytesPerChannel } )` ) ;
48+ #endif
49+ // These are still alloc'd to take advantage of the overflow checks, etc.
50+ var oldStackPtr = stackSave ( ) ;
51+ var viewDataIdx = { { { getHeapOffset ( 'stackAlloc(this.maxBuffers * this.bytesPerChannel)' , 'float' ) } } } ;
52+ #if WEBAUDIO_DEBUG
53+ console . log ( `AudioWorklet creating ${ this . maxBuffers } buffer one-time views (for a stack size of ${ wwParams . stackSize } at address 0x${ ( viewDataIdx * 4 ) . toString ( 16 ) } )` ) ;
54+ #endif
55+ this . outputViews = [ ] ;
56+ for ( var n = this . maxBuffers ; n > 0 ; n -- ) {
57+ // Added in reverse so the lowest indices are closest to the stack top
58+ this . outputViews . unshift (
59+ HEAPF32 . subarray ( viewDataIdx , viewDataIdx += this . samplesPerChannel )
60+ ) ;
61+ }
62+ stackRestore ( oldStackPtr ) ;
63+
64+ #if ASSERTIONS
65+ // Explicitly verify this later in process()
66+ this . ctorOldStackPtr = oldStackPtr ;
67+ #endif
3668 }
3769
3870 static get parameterDescriptors ( ) {
@@ -53,26 +85,42 @@ function createWasmAudioWorkletProcessor(audioParams) {
5385 var entry ; // reused list entry or index
5486 var subentry ; // reused channel or other array in each list entry or index
5587
56- // Calculate how much stack space is needed.
57- var bytesPerChannel = this . samplesPerChannel * { { { getNativeTypeSize ( 'float' ) } } } ;
88+ // Calculate the required stack and output buffer views
5889 var stackMemoryNeeded = ( numInputs + numOutputs ) * { { { C_STRUCTS . AudioSampleFrame . __size__ } } } ;
90+ for ( entry of inputList ) {
91+ stackMemoryNeeded += entry . length * this . bytesPerChannel ;
92+ }
93+ var outputViewsNeeded = 0 ;
94+ for ( entry of outputList ) {
95+ outputViewsNeeded += entry . length ;
96+ }
97+ stackMemoryNeeded += outputViewsNeeded * this . bytesPerChannel ;
5998 var numParams = 0 ;
60- for ( entry of inputList ) stackMemoryNeeded += entry . length * bytesPerChannel ;
61- for ( entry of outputList ) stackMemoryNeeded += entry . length * bytesPerChannel ;
6299 for ( entry in parameters ) {
63100 stackMemoryNeeded += parameters [ entry ] . byteLength + { { { C_STRUCTS . AudioParamFrame . __size__ } } } ;
64101 ++ numParams ;
65102 }
103+ #if ASSERTIONS
104+ console . assert ( outputViewsNeeded <= this . outputViews . length , `Too many AudioWorklet outputs (need ${ outputViewsNeeded } but have stack space for ${ this . outputViews . length } )` ) ;
105+ #endif
66106
67- // Allocate the necessary stack space.
68107 var oldStackPtr = stackSave ( ) ;
69- var inputsPtr = stackAlloc ( stackMemoryNeeded ) ;
108+ // Allocate the necessary stack space. All pointer variables are always in
109+ // bytes; 'dataPtr' is the start of the data section, and advances as
110+ // space for structs and data is taken; 'structPtr' is reused as the
111+ // working start to each struct record.
112+ // Here 'dataPtr' will be 16-byte aligned, from _emscripten_stack_alloc(),
113+ // as were the output views, so we round up and advance the required bytes
114+ // to ensure the addresses all work out at the end.
115+ entry = ( stackMemoryNeeded + 15 ) & ~ 15 ;
116+ var dataPtr = stackAlloc ( entry ) + ( entry - stackMemoryNeeded ) ;
70117
71- // Copy input audio descriptor structs and data to Wasm ('structPtr' is
72- // reused as the working start to each struct record, 'dataPtr' start of
73- // the data section, usually after all structs).
118+ // Copy input audio descriptor structs and data to Wasm (recall, structs
119+ // first, audio data after). 'inputsPtr' is the start of the C callback's
120+ // input AudioSampleFrame.
121+ var /*const*/ inputsPtr = dataPtr ;
74122 var structPtr = inputsPtr ;
75- var dataPtr = inputsPtr + numInputs * { { { C_STRUCTS . AudioSampleFrame . __size__ } } } ;
123+ dataPtr += numInputs * { { { C_STRUCTS . AudioSampleFrame . __size__ } } } ;
76124 for ( entry of inputList ) {
77125 // Write the AudioSampleFrame struct instance
78126 { { { makeSetValue ( 'structPtr' , C_STRUCTS . AudioSampleFrame . numberOfChannels , 'entry.length' , 'u32' ) } } } ;
@@ -82,26 +130,13 @@ function createWasmAudioWorkletProcessor(audioParams) {
82130 // Marshal the input audio sample data for each audio channel of this input
83131 for ( subentry of entry ) {
84132 HEAPF32 . set ( subentry , { { { getHeapOffset ( 'dataPtr' , 'float' ) } } } ) ;
85- dataPtr += bytesPerChannel ;
133+ dataPtr += this . bytesPerChannel ;
86134 }
87135 }
88136
89- // Copy output audio descriptor structs to Wasm
90- var outputsPtr = dataPtr ;
91- structPtr = outputsPtr ;
92- var outputDataPtr = ( dataPtr += numOutputs * { { { C_STRUCTS . AudioSampleFrame . __size__ } } } ) ;
93- for ( entry of outputList ) {
94- // Write the AudioSampleFrame struct instance
95- { { { makeSetValue ( 'structPtr' , C_STRUCTS . AudioSampleFrame . numberOfChannels , 'entry.length' , 'u32' ) } } } ;
96- { { { makeSetValue ( 'structPtr' , C_STRUCTS . AudioSampleFrame . samplesPerChannel , 'this.samplesPerChannel' , 'u32' ) } } } ;
97- { { { makeSetValue ( 'structPtr' , C_STRUCTS . AudioSampleFrame . data , 'dataPtr' , '*' ) } } } ;
98- structPtr += { { { C_STRUCTS . AudioSampleFrame . __size__ } } } ;
99- // Reserve space for the output data
100- dataPtr + = bytesPerChannel * entry . length ;
101- }
102-
103- // Copy parameters descriptor structs and data to Wasm
104- var paramsPtr = dataPtr ;
137+ // Copy parameters descriptor structs and data to Wasm. 'paramsPtr' is the
138+ // start of the C callback's input AudioParamFrame.
139+ var /*const*/ paramsPtr = dataPtr ;
105140 structPtr = paramsPtr ;
106141 dataPtr += numParams * { { { C_STRUCTS . AudioParamFrame . __size__ } } } ;
107142 for ( entry = 0 ; subentry = parameters [ entry ++ ] ; ) {
@@ -114,20 +149,54 @@ function createWasmAudioWorkletProcessor(audioParams) {
114149 dataPtr + = subentry . length * { { { getNativeTypeSize ( 'float' ) } } } ;
115150 }
116151
152+ // Copy output audio descriptor structs to Wasm. 'outputsPtr' is the start
153+ // of the C callback's output AudioSampleFrame.
154+ // Note: dataPtr after the struct offsets should now be 16-byte aligned.
155+ var /*const*/ outputsPtr = dataPtr ;
156+ structPtr = outputsPtr ;
157+ dataPtr += numOutputs * { { { C_STRUCTS . AudioSampleFrame . __size__ } } } ;
158+ for ( entry of outputList ) {
159+ // Write the AudioSampleFrame struct instance
160+ { { { makeSetValue ( 'structPtr ', C_STRUCTS . AudioSampleFrame . numberOfChannels , 'entry . length ', 'u32 ') } } } ;
161+ { { { makeSetValue ( 'structPtr ', C_STRUCTS . AudioSampleFrame . samplesPerChannel , 'this . samplesPerChannel ', 'u32 ') } } } ;
162+ { { { makeSetValue ( 'structPtr ', C_STRUCTS . AudioSampleFrame . data , 'dataPtr ', '* ') } } } ;
163+ structPtr += { { { C_STRUCTS . AudioSampleFrame . __size__ } } } ;
164+ // Advance the output pointer to the next output (matching the pre-allocated views)
165+ dataPtr += this . bytesPerChannel * entry . length ;
166+ }
167+
168+ #if ASSERTIONS
169+ // If all the maths worked out, we arrived at the original stack address
170+ console . assert ( dataPtr == oldStackPtr , `AudioWorklet stack missmatch (audio data finishes at ${ dataPtr } instead of ${ oldStackPtr } )` ) ;
171+
172+ // Sanity checks. If these trip the most likely cause, beyond unforeseen
173+ // stack shenanigans, is that the 'render quantum size' changed after
174+ // construction (which shouldn't be possible).
175+ if ( numOutputs ) {
176+ // First that the output view addresses match the stack positions
177+ dataPtr -= this . bytesPerChannel ;
178+ for ( entry = 0 ; entry < outputViewsNeeded ; entry ++ ) {
179+ console . assert ( dataPtr == this . outputViews [ entry ] . byteOffset , 'AudioWorklet internal error in addresses of the output array views' ) ;
180+ dataPtr -= this . bytesPerChannel ;
181+ }
182+ // And that the views' size match the passed in output buffers
183+ for ( entry of outputList ) {
184+ for ( subentry of entry ) {
185+ console . assert ( subentry . byteLength == this . bytesPerChannel , `AudioWorklet unexpected output buffer size (expected ${ this . bytesPerChannel } got ${ subentry . byteLength } )` ) ;
186+ }
187+ }
188+ }
189+ #endif
190+
117191 // Call out to Wasm callback to perform audio processing
118192 var didProduceAudio = this . callback ( numInputs , inputsPtr , numOutputs , outputsPtr , numParams , paramsPtr , this . userData ) ;
119193 if ( didProduceAudio ) {
120194 // Read back the produced audio data to all outputs and their channels.
121- // (A garbage-free function TypedArray.copy(dstTypedArray, dstOffset,
122- // srcTypedArray, srcOffset, count) would sure be handy.. but web does
123- // not have one, so manually copy all bytes in)
124- outputDataPtr = { { { getHeapOffset ( 'outputDataPtr' , 'float' ) } } } ;
195+ // The preallocated 'outputViews' already have the correct offsets and
196+ // sizes into the stack (recall from the ctor that they run backwards).
125197 for ( entry of outputList ) {
126198 for ( subentry of entry ) {
127- // repurposing structPtr for now
128- for ( structPtr = 0 ; structPtr < this . samplesPerChannel ; ++ structPtr ) {
129- subentry [ structPtr ] = HEAPF32 [ outputDataPtr ++ ] ;
130- }
199+ subentry . set ( this . outputViews [ -- outputViewsNeeded ] ) ;
131200 }
132201 }
133202 }
0 commit comments