|
2 | 2 | author: eric-urban
|
3 | 3 | ms.service: cognitive-services
|
4 | 4 | ms.topic: include
|
5 |
| -ms.date: 07/02/2021 |
| 5 | +ms.date: 01/15/2022 |
6 | 6 | ms.author: eur
|
7 | 7 | ms.custom: devx-track-js
|
8 | 8 | ---
|
@@ -135,10 +135,10 @@ For many scenarios in speech application development, you likely need the result
|
135 | 135 | * Integrate the result with other API's or services.
|
136 | 136 | * Modify the audio data, write custom `.wav` headers, etc.
|
137 | 137 |
|
138 |
| -It's simple to make this change from the previous example. First, remove the `AudioConfig` block, as you will manage the output behavior manually from this point onward for increased control. Then pass `undefined` for the `AudioConfig` in the `SpeechSynthesizer` constructor. |
| 138 | +It's simple to make this change from the previous example. First, remove the `AudioConfig` block, as you will manage the output behavior manually from this point onward for increased control. Then pass `null` for the `AudioConfig` in the `SpeechSynthesizer` constructor. |
139 | 139 |
|
140 | 140 | > [!NOTE]
|
141 |
| -> Passing `undefined` for the `AudioConfig`, rather than omitting it like in the speaker output example above, will not play the audio by default on the current active output device. |
| 141 | +> Passing `null` for the `AudioConfig`, rather than omitting it like in the speaker output example above, will not play the audio by default on the current active output device. |
142 | 142 |
|
143 | 143 | This time, you save the result to a [`SpeechSynthesisResult`](/javascript/api/microsoft-cognitiveservices-speech-sdk/speechsynthesisresult) variable. The `SpeechSynthesisResult.audioData` property returns an `ArrayBuffer` of the output data, the default browser stream type. For server-code, convert the arrayBuffer to a buffer stream.
|
144 | 144 |
|
@@ -212,7 +212,7 @@ function synthesizeSpeech() {
|
212 | 212 | // Set the output format
|
213 | 213 | speechConfig.speechSynthesisOutputFormat = SpeechSynthesisOutputFormat.Riff24Khz16BitMonoPcm;
|
214 | 214 |
|
215 |
| - const synthesizer = new sdk.SpeechSynthesizer(speechConfig, undefined); |
| 215 | + const synthesizer = new sdk.SpeechSynthesizer(speechConfig, null); |
216 | 216 | synthesizer.speakTextAsync(
|
217 | 217 | "Customizing audio output format.",
|
218 | 218 | result => {
|
@@ -258,7 +258,7 @@ For more information on `readFileSync`, see <a href="https://nodejs.org/api/fs.h
|
258 | 258 | ```javascript
|
259 | 259 | function synthesizeSpeech() {
|
260 | 260 | const speechConfig = sdk.SpeechConfig.fromSubscription("<paste-your-speech-key-here>", "<paste-your-speech-location/region-here>");
|
261 |
| - const synthesizer = new sdk.SpeechSynthesizer(speechConfig, undefined); |
| 261 | + const synthesizer = new sdk.SpeechSynthesizer(speechConfig, null); |
262 | 262 |
|
263 | 263 | const ssml = xmlToString("ssml.xml");
|
264 | 264 | synthesizer.speakSsmlAsync(
|
|
0 commit comments