@@ -5977,13 +5977,19 @@ var bufferFrom = __webpack_require__(15);
5977
5977
*
5978
5978
* @see https://developer.mozilla.org/en-US/docs/Web/API/Navigator/getUserMedia
5979
5979
*
5980
- * @param {MediaStream } stream https://developer.mozilla.org/en-US/docs/Web/API/MediaStream
5981
5980
* @param {Object } [opts] options
5981
+ * @param {MediaStream } [opts.stream] https://developer.mozilla.org/en-US/docs/Web/API/MediaStream - for iOS compatibility, it is recommended that you create the MicrophoneStream instance in response to the tap - before you have a MediaStream, and then later call setStream() with the MediaStream.
5982
5982
* @param {Boolean } [opts.objectMode=false] Puts the stream into ObjectMode where it emits AudioBuffers instead of Buffers - see https://developer.mozilla.org/en-US/docs/Web/API/AudioBuffer
5983
5983
* @param {Number|null } [opts.bufferSize=null] https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/createScriptProcessor
5984
5984
* @constructor
5985
5985
*/
5986
- function MicrophoneStream ( stream , opts ) {
5986
+ function MicrophoneStream ( opts ) {
5987
+ // backwards compatibility - passing in the Stream here will generally not work on iOS 11 Safari
5988
+ if ( typeof MediaStream && opts instanceof MediaStream ) {
5989
+ var stream = opts ;
5990
+ opts = arguments [ 1 ] || { } ;
5991
+ opts . stream = stream ;
5992
+ }
5987
5993
// "It is recommended for authors to not specify this buffer size and allow the implementation to pick a good
5988
5994
// buffer size to balance between latency and audio quality."
5989
5995
// https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/createScriptProcessor
@@ -6019,12 +6025,32 @@ function MicrophoneStream(stream, opts) {
6019
6025
6020
6026
var AudioContext = window . AudioContext || window . webkitAudioContext ;
6021
6027
var context = new AudioContext ( ) ;
6022
- var audioInput = context . createMediaStreamSource ( stream ) ;
6023
6028
var recorder = context . createScriptProcessor ( bufferSize , inputChannels , outputChannels ) ;
6024
6029
6025
- recorder . onaudioprocess = recorderProcess ;
6030
+ // Workaround for Safari on iOS 11 - context starts out suspended, and the resume() call must be in response to a tap.
6031
+ // This allows you to create the MicrophoneStream instance synchronously in response to the first tap,
6032
+ // and then connect the MediaStream asynchronously, after the user has granted microphone access.
6033
+ var audioInput ;
6034
+ if ( context . state === 'suspended' ) {
6035
+ context . resume ( ) ;
6036
+ }
6026
6037
6027
- audioInput . connect ( recorder ) ;
6038
+ /**
6039
+ * Set the MediaStream
6040
+ *
6041
+ * This was removed from the constructor to enable better compatibility with Safari on iOS 11.
6042
+ *
6043
+ * @param {MediaStream } stream https://developer.mozilla.org/en-US/docs/Web/API/MediaStream
6044
+ */
6045
+ this . setStream = function ( stream ) {
6046
+ audioInput = context . createMediaStreamSource ( stream ) ;
6047
+ audioInput . connect ( recorder ) ;
6048
+ recorder . onaudioprocess = recorderProcess ;
6049
+ } ;
6050
+
6051
+ if ( opts . stream ) {
6052
+ this . setStream ( stream ) ;
6053
+ }
6028
6054
6029
6055
// other half of workaround for chrome bugs
6030
6056
recorder . connect ( context . destination ) ;
@@ -6039,7 +6065,9 @@ function MicrophoneStream(stream, opts) {
6039
6065
// This fails in some older versions of chrome. Nothing we can do about it.
6040
6066
}
6041
6067
recorder . disconnect ( ) ;
6042
- audioInput . disconnect ( ) ;
6068
+ if ( audioInput ) {
6069
+ audioInput . disconnect ( ) ;
6070
+ }
6043
6071
try {
6044
6072
context . close ( ) ; // returns a promise;
6045
6073
} catch ( ex ) {
@@ -8785,29 +8813,66 @@ module.exports = function recognizeMicrophone(options) {
8785
8813
var recognizeStream = new RecognizeStream ( rsOpts ) ;
8786
8814
var streams = [ recognizeStream ] ; // collect all of the streams so that we can bundle up errors and send them to the last one
8787
8815
8816
+ // set up the output first so that we have a place to emit errors
8817
+ // if there's trouble with the input stream
8818
+ var stream = recognizeStream ;
8819
+
8788
8820
var keepMic = options . keepMicrophone ;
8789
- var getMicStream ;
8821
+ var micStream ;
8790
8822
if ( keepMic && preservedMicStream ) {
8791
8823
preservedMicStream . unpipe ( bitBucket ) ;
8792
- getMicStream = Promise . resolve ( preservedMicStream ) ;
8824
+ micStream = preservedMicStream ;
8793
8825
} else {
8826
+ // create the MicrophoneStream synchronously to allow it to resume the context in Safari on iOS 11
8827
+ micStream = new MicrophoneStream ( {
8828
+ objectMode : true ,
8829
+ bufferSize : options . bufferSize
8830
+ } ) ;
8794
8831
var pm = options . mediaStream ? Promise . resolve ( options . mediaStream ) : getUserMedia ( { video : false , audio : true } ) ;
8795
-
8796
- getMicStream = pm . then ( function ( mic ) {
8797
- var micStream = new MicrophoneStream ( mic , {
8798
- objectMode : true ,
8799
- bufferSize : options . bufferSize
8832
+ pm
8833
+ . then ( function ( mediaStream ) {
8834
+ micStream . setStream ( mediaStream ) ;
8835
+ if ( keepMic ) {
8836
+ preservedMicStream = micStream ;
8837
+ }
8838
+ } )
8839
+ . catch ( function ( err ) {
8840
+ stream . emit ( 'error' , err ) ;
8841
+ if ( err . name === 'NotSupportedError' ) {
8842
+ stream . end ( ) ; // end the stream
8843
+ }
8800
8844
} ) ;
8801
- if ( keepMic ) {
8802
- preservedMicStream = micStream ;
8803
- }
8804
- return Promise . resolve ( micStream ) ;
8805
- } ) ;
8806
8845
}
8807
8846
8808
- // set up the output first so that we have a place to emit errors
8809
- // if there's trouble with the input stream
8810
- var stream = recognizeStream ;
8847
+ var l16Stream = new L16 ( { writableObjectMode : true } ) ;
8848
+
8849
+ micStream . pipe ( l16Stream ) . pipe ( recognizeStream ) ;
8850
+
8851
+ streams . push ( micStream , l16Stream ) ;
8852
+
8853
+ /**
8854
+ * unpipes the mic stream to prevent any more audio from being sent over the wire
8855
+ * temporarily re-pipes it to the bitBucket (basically /dev/null) becuse
8856
+ * otherwise it will buffer the audio from in between calls and prepend it to the next one
8857
+ *
8858
+ * @private
8859
+ */
8860
+ function end ( ) {
8861
+ micStream . unpipe ( l16Stream ) ;
8862
+ micStream . pipe ( bitBucket ) ;
8863
+ l16Stream . end ( ) ;
8864
+ }
8865
+ // trigger on both stop and end events:
8866
+ // stop will not fire when a stream ends due to a timeout
8867
+ // but when stop does fire, we want to honor it immediately
8868
+ // end will always fire, but it may take a few moments after stop
8869
+ if ( keepMic ) {
8870
+ recognizeStream . on ( 'end' , end ) ;
8871
+ recognizeStream . on ( 'stop' , end ) ;
8872
+ } else {
8873
+ recognizeStream . on ( 'end' , micStream . stop . bind ( micStream ) ) ;
8874
+ recognizeStream . on ( 'stop' , micStream . stop . bind ( micStream ) ) ;
8875
+ }
8811
8876
8812
8877
if ( options . resultsBySpeaker ) {
8813
8878
stream = stream . pipe ( new SpeakerStream ( options ) ) ;
@@ -8829,49 +8894,6 @@ module.exports = function recognizeMicrophone(options) {
8829
8894
streams . push ( stream ) ;
8830
8895
}
8831
8896
8832
- getMicStream . catch ( function ( err ) {
8833
- stream . emit ( 'error' , err ) ;
8834
- if ( err . name === 'NotSupportedError' ) {
8835
- stream . end ( ) ; // end the stream
8836
- }
8837
- } ) ;
8838
-
8839
- getMicStream
8840
- . then ( function ( micStream ) {
8841
- streams . push ( micStream ) ;
8842
-
8843
- var l16Stream = new L16 ( { writableObjectMode : true } ) ;
8844
-
8845
- micStream . pipe ( l16Stream ) . pipe ( recognizeStream ) ;
8846
-
8847
- streams . push ( l16Stream ) ;
8848
-
8849
- /**
8850
- * unpipes the mic stream to prevent any more audio from being sent over the wire
8851
- * temporarily re-pipes it to the bitBucket (basically /dev/null) becuse
8852
- * otherwise it will buffer the audio from in between calls and prepend it to the next one
8853
- *
8854
- * @private
8855
- */
8856
- function end ( ) {
8857
- micStream . unpipe ( l16Stream ) ;
8858
- micStream . pipe ( bitBucket ) ;
8859
- l16Stream . end ( ) ;
8860
- }
8861
- // trigger on both stop and end events:
8862
- // stop will not fire when a stream ends due to a timeout
8863
- // but when stop does fire, we want to honor it immediately
8864
- // end will always fire, but it may take a few moments after stop
8865
- if ( keepMic ) {
8866
- recognizeStream . on ( 'end' , end ) ;
8867
- recognizeStream . on ( 'stop' , end ) ;
8868
- } else {
8869
- recognizeStream . on ( 'end' , micStream . stop . bind ( micStream ) ) ;
8870
- recognizeStream . on ( 'stop' , micStream . stop . bind ( micStream ) ) ;
8871
- }
8872
- } )
8873
- . catch ( recognizeStream . emit . bind ( recognizeStream , 'error' ) ) ;
8874
-
8875
8897
// Capture errors from any stream except the last one and emit them on the last one
8876
8898
streams . forEach ( function ( prevStream ) {
8877
8899
if ( prevStream !== stream ) {
0 commit comments