@@ -38,6 +38,7 @@ function createWasmAudioWorkletProcessor(audioParams) {
38
38
// the 1.1 spec. It's passed in now, just to prove it's settable, but will
39
39
// eventually be a property of the AudioWorkletGlobalScope (globalThis).
40
40
this . samplesPerChannel = opts [ 'sc' ] ;
41
+ this . bytesPerChannel = this . samplesPerChannel * { { { getNativeTypeSize ( 'float' ) } } } ;
41
42
42
43
// Create up-front as many typed views for marshalling the output data as
43
44
// may be required (with an arbitrary maximum of 10, for the case where a
@@ -46,17 +47,13 @@ function createWasmAudioWorkletProcessor(audioParams) {
46
47
// stops STACK_OVERFLOW_CHECK failing (since the stack will be full, and
47
48
// 16 being the minimum allocation size due to alignments) and leaves room
48
49
// for a single AudioSampleFrame as a minumum.
49
- // Note: here and in the rest of the code the natural '>>> 2' unsigned
50
- // shifts for bytes to HEAPU32 offsets have been replaced with '/ 4',
51
- // otherwise the values are truncated to 32-bit addresses, which fails
52
- // when compiling with MEMORY64.
53
- this . maxBuffers = Math . min ( ( ( Module [ 'sz' ] - /*minimum alloc*/ 16 ) / ( this . samplesPerChannel * 4 ) ) | 0 , /*sensible limit*/ 10 ) ;
50
+ this . maxBuffers = Math . min ( ( ( Module [ 'sz' ] - /*minimum alloc*/ 16 ) / this . bytesPerChannel ) | 0 , /*sensible limit*/ 10 ) ;
54
51
#if ASSERTIONS
55
52
console . assert ( this . maxBuffers > 0 , `AudioWorklet needs more stack allocating (at least ${ this . samplesPerChannel * 4 } )` ) ;
56
53
#endif
57
54
// These are still alloc'd to take advantage of the overflow checks, etc.
58
55
var oldStackPtr = stackSave ( ) ;
59
- var viewDataIdx = stackAlloc ( this . maxBuffers * this . samplesPerChannel * 4 ) / 4 ;
56
+ var viewDataIdx = { { { getHeapOffset ( ' stackAlloc(this.maxBuffers * this.bytesPerChannel)' , 'float' ) } } } ;
60
57
#if WEBAUDIO_DEBUG
61
58
console . log ( `AudioWorklet creating ${ this . maxBuffers } buffer one-time views (for a stack size of ${ Module [ 'sz' ] } at address 0x${ ( viewDataIdx * 4 ) . toString ( 16 ) } )` ) ;
62
59
#endif
@@ -87,17 +84,16 @@ function createWasmAudioWorkletProcessor(audioParams) {
87
84
let numInputs = inputList . length ,
88
85
numOutputs = outputList . length ,
89
86
numParams = 0 , i , j , k , dataPtr ,
90
- bytesPerChannel = this . samplesPerChannel * 4 ,
91
87
outputViewsNeeded = 0 ,
92
88
stackMemoryNeeded = ( numInputs + numOutputs ) * { { { C_STRUCTS . AudioSampleFrame . __size__ } } } ,
93
89
oldStackPtr = stackSave ( ) ,
94
90
inputsPtr , outputsPtr , paramsPtr ,
95
91
didProduceAudio , paramArray ;
96
92
97
93
// Calculate how much stack space is needed
98
- for ( i of inputList ) stackMemoryNeeded += i . length * bytesPerChannel ;
94
+ for ( i of inputList ) stackMemoryNeeded += i . length * this . bytesPerChannel ;
99
95
for ( i of outputList ) outputViewsNeeded += i . length ;
100
- stackMemoryNeeded += outputViewsNeeded * bytesPerChannel ;
96
+ stackMemoryNeeded += outputViewsNeeded * this . bytesPerChannel ;
101
97
for ( i in parameters ) stackMemoryNeeded += parameters [ i ] . byteLength + { { { C_STRUCTS . AudioParamFrame . __size__ } } } , ++ numParams ;
102
98
103
99
#if ASSERTIONS
@@ -115,59 +111,62 @@ function createWasmAudioWorkletProcessor(audioParams) {
115
111
dataPtr = stackAlloc ( i ) + ( i - stackMemoryNeeded ) ;
116
112
117
113
// Copy input audio descriptor structs and data to Wasm
114
+ // Note: filling the structs was tried with makeSetValue() but it creates
115
+ // minor overhead (adds and shifts) that we can avoid (and no combination
116
+ // of optimisations will fold).
118
117
inputsPtr = dataPtr ;
119
- k = inputsPtr / 4 ;
118
+ k = { { { getHeapOffset ( ' inputsPtr' , 'u32' ) } } } ;
120
119
dataPtr += numInputs * { { { C_STRUCTS . AudioSampleFrame . __size__ } } } ;
121
120
for ( i of inputList ) {
122
121
// Write the AudioSampleFrame struct instance
123
- HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . numberOfChannels / 4 } } } ] = i . length ;
124
- HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . samplesPerChannel / 4 } } } ] = this . samplesPerChannel ;
125
- HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . data / 4 } } } ] = dataPtr ;
122
+ HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . numberOfChannels / getNativeTypeSize ( 'u32' ) } } } ] = i . length ;
123
+ HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . samplesPerChannel / getNativeTypeSize ( 'u32' ) } } } ] = this . samplesPerChannel ;
124
+ HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . data / getNativeTypeSize ( 'u32' ) } } } ] = dataPtr ;
126
125
#if MEMORY64
127
126
// See the note in the constructor for dealing with 64-bit addresses
128
- HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . data / 4 + 1 } } } ] = dataPtr / 0x100000000 ;
127
+ HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . data / getNativeTypeSize ( 'u32' ) + 1 } } } ] = dataPtr / 0x100000000 ;
129
128
#endif
130
- k += { { { C_STRUCTS . AudioSampleFrame . __size__ / 4 } } } ;
129
+ k += { { { C_STRUCTS . AudioSampleFrame . __size__ / g e t N a t i v e T y p e S i z e ( ' u 3 2 ' ) } } } ;
131
130
// Marshal the input audio sample data for each audio channel of this input
132
131
for ( j of i ) {
133
- HEAPF32 . set ( j , dataPtr / 4 ) ;
134
- dataPtr += bytesPerChannel ;
132
+ HEAPF32 . set ( j , { { { getHeapOffset ( ' dataPtr' , 'float' ) } } } ) ;
133
+ dataPtr += this . bytesPerChannel ;
135
134
}
136
135
}
137
136
138
137
// Copy parameters descriptor structs and data to Wasm
139
138
paramsPtr = dataPtr ;
140
- k = paramsPtr / 4 ;
139
+ k = { { { getHeapOffset ( ' paramsPtr' , 'u32' ) } } } ;
141
140
dataPtr += numParams * { { { C_STRUCTS . AudioParamFrame . __size__ } } } ;
142
141
for ( i = 0 ; paramArray = parameters [ i ++ ] ; ) {
143
142
// Write the AudioParamFrame struct instance
144
- HEAPU32 [ k + { { { C_STRUCTS . AudioParamFrame . length / 4 } } } ] = paramArray . length ;
145
- HEAPU32 [ k + { { { C_STRUCTS . AudioParamFrame . data / 4 } } } ] = dataPtr ;
143
+ HEAPU32 [ k + { { { C_STRUCTS . AudioParamFrame . length / getNativeTypeSize ( 'u32' ) } } } ] = paramArray . length ;
144
+ HEAPU32 [ k + { { { C_STRUCTS . AudioParamFrame . data / getNativeTypeSize ( 'u32' ) } } } ] = dataPtr ;
146
145
#if MEMORY64
147
- HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . data / 4 + 1 } } } ] = dataPtr / 0x100000000 ;
146
+ HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . data / getNativeTypeSize ( 'u32' ) + 1 } } } ] = dataPtr / 0x100000000 ;
148
147
#endif
149
- k += { { { C_STRUCTS . AudioParamFrame . __size__ / 4 } } } ;
148
+ k += { { { C_STRUCTS . AudioParamFrame . __size__ / g e t N a t i v e T y p e S i z e ( ' u 3 2 ' ) } } } ;
150
149
// Marshal the audio parameters array
151
- HEAPF32 . set ( paramArray , dataPtr / 4 ) ;
152
- dataPtr += paramArray . length * 4 ;
150
+ HEAPF32 . set ( paramArray , { { { getHeapOffset ( ' dataPtr' , 'float' ) } } } ) ;
151
+ dataPtr += paramArray . length * { { { getNativeTypeSize ( 'float' ) } } } ;
153
152
}
154
153
155
154
// Copy output audio descriptor structs to Wasm (note that dataPtr after
156
155
// the struct offsets should now be 16-byte aligned).
157
156
outputsPtr = dataPtr ;
158
- k = outputsPtr / 4 ;
157
+ k = { { { getHeapOffset ( ' outputsPtr' , 'u32' ) } } } ;
159
158
dataPtr += numOutputs * { { { C_STRUCTS . AudioSampleFrame . __size__ } } } ;
160
159
for ( i of outputList ) {
161
160
// Write the AudioSampleFrame struct instance
162
- HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . numberOfChannels / 4 } } } ] = i . length ;
163
- HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . samplesPerChannel / 4 } } } ] = this . samplesPerChannel ;
164
- HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . data / 4 } } } ] = dataPtr ;
161
+ HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . numberOfChannels / getNativeTypeSize ( 'u32' ) } } } ] = i . length ;
162
+ HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . samplesPerChannel / getNativeTypeSize ( 'u32' ) } } } ] = this . samplesPerChannel ;
163
+ HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . data / getNativeTypeSize ( 'u32' ) } } } ] = dataPtr ;
165
164
#if MEMORY64
166
- HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . data / 4 + 1 } } } ] = dataPtr / 0x100000000 ;
165
+ HEAPU32 [ k + { { { C_STRUCTS . AudioSampleFrame . data / getNativeTypeSize ( 'u32' ) + 1 } } } ] = dataPtr / 0x100000000 ;
167
166
#endif
168
- k += { { { C_STRUCTS . AudioSampleFrame . __size__ / 4 } } } ;
167
+ k += { { { C_STRUCTS . AudioSampleFrame . __size__ / g e t N a t i v e T y p e S i z e ( ' u 3 2 ' ) } } } ;
169
168
// Advance the output pointer to the next output (matching the pre-allocated views)
170
- dataPtr += bytesPerChannel * i . length ;
169
+ dataPtr += this . bytesPerChannel * i . length ;
171
170
}
172
171
173
172
#if ASSERTIONS
@@ -178,15 +177,15 @@ function createWasmAudioWorkletProcessor(audioParams) {
178
177
// stack shenanigans, is that the 'render quantum size' changed.
179
178
if ( numOutputs ) {
180
179
// First that the output view addresses match the stack positions.
181
- k = dataPtr - bytesPerChannel ;
180
+ k = dataPtr - this . bytesPerChannel ;
182
181
for ( i = 0 ; i < outputViewsNeeded ; i ++ ) {
183
182
console . assert ( k == this . outputViews [ i ] . byteOffset , 'AudioWorklet internal error in addresses of the output array views' ) ;
184
- k -= bytesPerChannel ;
183
+ k -= this . bytesPerChannel ;
185
184
}
186
185
// And that the views' size match the passed in output buffers
187
186
for ( i of outputList ) {
188
187
for ( j of i ) {
189
- console . assert ( j . byteLength == bytesPerChannel , `AudioWorklet unexpected output buffer size (expected ${ bytesPerChannel } got ${ j . byteLength } )` ) ;
188
+ console . assert ( j . byteLength == this . bytesPerChannel , `AudioWorklet unexpected output buffer size (expected ${ this . bytesPerChannel } got ${ j . byteLength } )` ) ;
190
189
}
191
190
}
192
191
}
0 commit comments