Skip to content

Commit b8e522c

Browse files
Merge pull request #285071 from solarrezaei11/speech-translation-variable-constistency
Speech translation variable consistency
2 parents 03c5c9f + a62891e commit b8e522c

File tree

6 files changed

+67
-67
lines changed

6 files changed

+67
-67
lines changed

articles/ai-services/speech-service/includes/how-to/recognize-speech/csharp.md

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,8 @@ class Program
6262
using var speechRecognizer = new SpeechRecognizer(speechConfig, audioConfig);
6363

6464
Console.WriteLine("Speak into your microphone.");
65-
var result = await speechRecognizer.RecognizeOnceAsync();
66-
Console.WriteLine($"RECOGNIZED: Text={result.Text}");
65+
var speechRecognitionResult = await speechRecognizer.RecognizeOnceAsync();
66+
Console.WriteLine($"RECOGNIZED: Text={speechRecognitionResult.Text}");
6767
}
6868

6969
async static Task Main(string[] args)
@@ -94,8 +94,8 @@ class Program
9494
using var audioConfig = AudioConfig.FromWavFileInput("PathToFile.wav");
9595
using var speechRecognizer = new SpeechRecognizer(speechConfig, audioConfig);
9696

97-
var result = await speechRecognizer.RecognizeOnceAsync();
98-
Console.WriteLine($"RECOGNIZED: Text={result.Text}");
97+
var speechRecognitionResult = await speechRecognizer.RecognizeOnceAsync();
98+
Console.WriteLine($"RECOGNIZED: Text={speechRecognitionResult.Text}");
9999
}
100100

101101
async static Task Main(string[] args)
@@ -137,8 +137,8 @@ class Program
137137
audioConfigStream.Write(readBytes, readBytes.Length);
138138
} while (readBytes.Length > 0);
139139

140-
var result = await speechRecognizer.RecognizeOnceAsync();
141-
Console.WriteLine($"RECOGNIZED: Text={result.Text}");
140+
var speechRecognitionResult = await speechRecognizer.RecognizeOnceAsync();
141+
Console.WriteLine($"RECOGNIZED: Text={speechRecognitionResult.Text}");
142142
}
143143

144144
async static Task Main(string[] args)
@@ -153,23 +153,23 @@ Using a push stream as input assumes that the audio data is raw PCM and skips an
153153

154154
## Handle errors
155155

156-
The previous examples only get the recognized text from the `result.Text` property. To handle errors and other responses, you need to write some code to handle the result. The following code evaluates the [`result.Reason`](/dotnet/api/microsoft.cognitiveservices.speech.recognitionresult.reason) property and:
156+
The previous examples only get the recognized text from the `speechRecognitionResult.Text` property. To handle errors and other responses, you need to write some code to handle the result. The following code evaluates the [`speechRecognitionResult.Reason`](/dotnet/api/microsoft.cognitiveservices.speech.recognitionresult.reason) property and:
157157

158158
* Prints the recognition result: `ResultReason.RecognizedSpeech`.
159159
* If there's no recognition match, it informs the user: `ResultReason.NoMatch`.
160160
* If an error is encountered, it prints the error message: `ResultReason.Canceled`.
161161

162162
```csharp
163-
switch (result.Reason)
163+
switch (speechRecognitionResult.Reason)
164164
{
165165
case ResultReason.RecognizedSpeech:
166-
Console.WriteLine($"RECOGNIZED: Text={result.Text}");
166+
Console.WriteLine($"RECOGNIZED: Text={speechRecognitionResult.Text}");
167167
break;
168168
case ResultReason.NoMatch:
169169
Console.WriteLine($"NOMATCH: Speech could not be recognized.");
170170
break;
171171
case ResultReason.Canceled:
172-
var cancellation = CancellationDetails.FromResult(result);
172+
var cancellation = CancellationDetails.FromResult(speechRecognitionResult);
173173
Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");
174174

175175
if (cancellation.Reason == CancellationReason.Error)

articles/ai-services/speech-service/includes/how-to/recognize-speech/java.md

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,8 @@ public class Program {
6262

6363
System.out.println("Speak into your microphone.");
6464
Future<SpeechRecognitionResult> task = speechRecognizer.recognizeOnceAsync();
65-
SpeechRecognitionResult result = task.get();
66-
System.out.println("RECOGNIZED: Text=" + result.getText());
65+
SpeechRecognitionResult speechRecognitionResult = task.get();
66+
System.out.println("RECOGNIZED: Text=" + speechRecognitionResult.getText());
6767
}
6868
}
6969
```
@@ -91,31 +91,31 @@ public class Program {
9191
SpeechRecognizer speechRecognizer = new SpeechRecognizer(speechConfig, audioConfig);
9292

9393
Future<SpeechRecognitionResult> task = speechRecognizer.recognizeOnceAsync();
94-
SpeechRecognitionResult result = task.get();
95-
System.out.println("RECOGNIZED: Text=" + result.getText());
94+
SpeechRecognitionResult speechRecognitionResult = task.get();
95+
System.out.println("RECOGNIZED: Text=" + speechRecognitionResult.getText());
9696
}
9797
}
9898
```
9999

100100
## Handle errors
101101

102-
The previous examples only get the recognized text by using `result.getText()`. To handle errors and other responses, you need to write some code to handle the result. The following example evaluates [`result.getReason()`](/java/api/com.microsoft.cognitiveservices.speech.recognitionresult.getreason) and:
102+
The previous examples only get the recognized text by using `speechRecognitionResult.getText()`. To handle errors and other responses, you need to write some code to handle the result. The following example evaluates [`speechRecognitionResult.getReason()`](/java/api/com.microsoft.cognitiveservices.speech.recognitionresult.getreason) and:
103103

104104
* Prints the recognition result: `ResultReason.RecognizedSpeech`.
105105
* If there's no recognition match, it informs the user: `ResultReason.NoMatch`.
106106
* If an error is encountered, it prints the error message: `ResultReason.Canceled`.
107107

108108
```java
109-
switch (result.getReason()) {
109+
switch (speechRecognitionResult.getReason()) {
110110
case ResultReason.RecognizedSpeech:
111-
System.out.println("We recognized: " + result.getText());
111+
System.out.println("We recognized: " + speechRecognitionResult.getText());
112112
exitCode = 0;
113113
break;
114114
case ResultReason.NoMatch:
115115
System.out.println("NOMATCH: Speech could not be recognized.");
116116
break;
117117
case ResultReason.Canceled: {
118-
CancellationDetails cancellation = CancellationDetails.fromResult(result);
118+
CancellationDetails cancellation = CancellationDetails.fromResult(speechRecognitionResult);
119119
System.out.println("CANCELED: Reason=" + cancellation.getReason());
120120

121121
if (cancellation.getReason() == CancellationReason.Error) {

articles/ai-services/speech-service/includes/how-to/recognize-speech/python.md

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,8 @@ def from_mic():
4242
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)
4343

4444
print("Speak into your microphone.")
45-
result = speech_recognizer.recognize_once_async().get()
46-
print(result.text)
45+
speech_recognition_result = speech_recognizer.recognize_once_async().get()
46+
print(speech_recognition_result.text)
4747

4848
from_mic()
4949
```
@@ -62,27 +62,27 @@ def from_file():
6262
audio_config = speechsdk.AudioConfig(filename="your_file_name.wav")
6363
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
6464

65-
result = speech_recognizer.recognize_once_async().get()
66-
print(result.text)
65+
speech_recognition_result = speech_recognizer.recognize_once_async().get()
66+
print(speech_recognition_result.text)
6767

6868
from_file()
6969
```
7070

7171
## Handle errors
7272

73-
The previous examples only get the recognized text from the `result.text` property. To handle errors and other responses, you need to write some code to handle the result. The following code evaluates the [`result.reason`](/python/api/azure-cognitiveservices-speech/azure.cognitiveservices.speech.resultreason) property and:
73+
The previous examples only get the recognized text from the `speech_recognition_result.text` property. To handle errors and other responses, you need to write some code to handle the result. The following code evaluates the [`speech_recognition_result.reason`](/python/api/azure-cognitiveservices-speech/azure.cognitiveservices.speech.resultreason) property and:
7474

7575
* Prints the recognition result: `speechsdk.ResultReason.RecognizedSpeech`.
7676
* If there's no recognition match, it informs the user: `speechsdk.ResultReason.NoMatch`.
7777
* If an error is encountered, it prints the error message: `speechsdk.ResultReason.Canceled`.
7878

7979
```Python
80-
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
81-
print("Recognized: {}".format(result.text))
82-
elif result.reason == speechsdk.ResultReason.NoMatch:
83-
print("No speech could be recognized: {}".format(result.no_match_details))
84-
elif result.reason == speechsdk.ResultReason.Canceled:
85-
cancellation_details = result.cancellation_details
80+
if speech_recognition_result.reason == speechsdk.ResultReason.RecognizedSpeech:
81+
print("Recognized: {}".format(speech_recognition_result.text))
82+
elif speech_recognition_result.reason == speechsdk.ResultReason.NoMatch:
83+
print("No speech could be recognized: {}".format(speech_recognition_result.no_match_details))
84+
elif speech_recognition_result.reason == speechsdk.ResultReason.Canceled:
85+
cancellation_details = speech_recognition_result.cancellation_details
8686
print("Speech Recognition canceled: {}".format(cancellation_details.reason))
8787
if cancellation_details.reason == speechsdk.CancellationReason.Error:
8888
print("Error details: {}".format(cancellation_details.error_details))

articles/ai-services/speech-service/includes/how-to/translate-speech/java.md

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -192,10 +192,10 @@ static void translateSpeech() throws ExecutionException, InterruptedException {
192192
try (TranslationRecognizer translationRecognizer = new TranslationRecognizer(speechTranslationConfig)) {
193193
System.out.printf("Say something in '%s' and we'll translate...", fromLanguage);
194194

195-
TranslationRecognitionResult result = translationRecognizer.recognizeOnceAsync().get();
196-
if (result.getReason() == ResultReason.TranslatedSpeech) {
197-
System.out.printf("Recognized: \"%s\"\n", result.getText());
198-
for (Map.Entry<String, String> pair : result.getTranslations().entrySet()) {
195+
TranslationRecognitionResult translationRecognitionResult = translationRecognizer.recognizeOnceAsync().get();
196+
if (translationRecognitionResult.getReason() == ResultReason.TranslatedSpeech) {
197+
System.out.printf("Recognized: \"%s\"\n", translationRecognitionResult.getText());
198+
for (Map.Entry<String, String> pair : translationRecognitionResult.getTranslations().entrySet()) {
199199
System.out.printf("Translated into '%s': %s\n", pair.getKey(), pair.getValue());
200200
}
201201
}
@@ -248,10 +248,10 @@ static void translateSpeech() throws ExecutionException, FileNotFoundException,
248248

249249
System.out.printf("Say something in '%s' and we'll translate...", fromLanguage);
250250

251-
TranslationRecognitionResult result = translationRecognizer.recognizeOnceAsync().get();
252-
if (result.getReason() == ResultReason.TranslatedSpeech) {
253-
System.out.printf("Recognized: \"%s\"\n", result.getText());
254-
for (Map.Entry<String, String> pair : result.getTranslations().entrySet()) {
251+
TranslationRecognitionResult translationRecognitionResult = translationRecognizer.recognizeOnceAsync().get();
252+
if (translationRecognitionResult.getReason() == ResultReason.TranslatedSpeech) {
253+
System.out.printf("Recognized: \"%s\"\n", translationRecognitionResult.getText());
254+
for (Map.Entry<String, String> pair : translationRecognitionResult.getTranslations().entrySet()) {
255255
String language = pair.getKey();
256256
String translation = pair.getValue();
257257
System.out.printf("Translated into '%s': %s\n", language, translation);
@@ -282,8 +282,8 @@ static void translateSpeech() throws ExecutionException, InterruptedException {
282282
try (TranslationRecognizer translationRecognizer = new TranslationRecognizer(speechTranslationConfig)) {
283283
System.out.printf("Say something in '%s' and we'll translate...", fromLanguage);
284284

285-
TranslationRecognitionResult result = translationRecognizer.recognizeOnceAsync().get();
286-
if (result.getReason() == ResultReason.TranslatedSpeech) {
285+
TranslationRecognitionResult translationRecognitionResult = translationRecognizer.recognizeOnceAsync().get();
286+
if (translationRecognitionResult.getReason() == ResultReason.TranslatedSpeech) {
287287
// See: https://aka.ms/speech/sdkregion#standard-and-neural-voices
288288
Map<String, String> languageToVoiceMap = new HashMap<String, String>();
289289
languageToVoiceMap.put("de", "de-DE-KatjaNeural");
@@ -292,8 +292,8 @@ static void translateSpeech() throws ExecutionException, InterruptedException {
292292
languageToVoiceMap.put("pt", "pt-BR-FranciscaNeural");
293293
languageToVoiceMap.put("zh-Hans", "zh-CN-XiaoxiaoNeural");
294294

295-
System.out.printf("Recognized: \"%s\"\n", result.getText());
296-
for (Map.Entry<String, String> pair : result.getTranslations().entrySet()) {
295+
System.out.printf("Recognized: \"%s\"\n", translationRecognitionResult.getText());
296+
for (Map.Entry<String, String> pair : translationRecognitionResult.getTranslations().entrySet()) {
297297
String language = pair.getKey();
298298
String translation = pair.getValue();
299299
System.out.printf("Translated into '%s': %s\n", language, translation);

articles/ai-services/speech-service/includes/how-to/translate-speech/python.md

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -157,17 +157,17 @@ def translate_speech_to_text():
157157
translation_config=translation_config)
158158

159159
print('Say something...')
160-
result = translation_recognizer.recognize_once()
161-
print(get_result_text(reason=result.reason, result=result))
160+
translation_recognition_result = translation_recognizer.recognize_once()
161+
print(get_result_text(reason=translation_recognition_result.reason, result=translation_recognition_result))
162162

163-
def get_result_text(reason, result):
163+
def get_result_text(reason, translation_recognition_result):
164164
reason_format = {
165165
speechsdk.ResultReason.TranslatedSpeech:
166-
f'RECOGNIZED "{from_language}": {result.text}\n' +
167-
f'TRANSLATED into "{to_language}"": {result.translations[to_language]}',
168-
speechsdk.ResultReason.RecognizedSpeech: f'Recognized: "{result.text}"',
169-
speechsdk.ResultReason.NoMatch: f'No speech could be recognized: {result.no_match_details}',
170-
speechsdk.ResultReason.Canceled: f'Speech Recognition canceled: {result.cancellation_details}'
166+
f'RECOGNIZED "{from_language}": {translation_recognition_result.text}\n' +
167+
f'TRANSLATED into "{to_language}"": {translation_recognition_result.translations[to_language]}',
168+
speechsdk.ResultReason.RecognizedSpeech: f'Recognized: "{translation_recognition_result.text}"',
169+
speechsdk.ResultReason.NoMatch: f'No speech could be recognized: {translation_recognition_result.no_match_details}',
170+
speechsdk.ResultReason.Canceled: f'Speech Recognition canceled: {translation_recognition_result.cancellation_details}'
171171
}
172172
return reason_format.get(reason, 'Unable to recognize speech')
173173

@@ -222,17 +222,17 @@ def translate_speech_to_text():
222222

223223
print(f'Say something in "{from_language}" and we\'ll translate into "{to_language}".')
224224

225-
result = translation_recognizer.recognize_once()
226-
print(get_result_text(reason=result.reason, result=result))
225+
translation_recognition_result = translation_recognizer.recognize_once()
226+
print(get_result_text(reason=translation_recognition_result.reason, result=translation_recognition_result))
227227

228-
def get_result_text(reason, result):
228+
def get_result_text(reason, translation_recognition_result):
229229
reason_format = {
230230
speechsdk.ResultReason.TranslatedSpeech:
231-
f'Recognized "{from_language}": {result.text}\n' +
232-
f'Translated into "{to_language}"": {result.translations[to_language]}',
233-
speechsdk.ResultReason.RecognizedSpeech: f'Recognized: "{result.text}"',
234-
speechsdk.ResultReason.NoMatch: f'No speech could be recognized: {result.no_match_details}',
235-
speechsdk.ResultReason.Canceled: f'Speech Recognition canceled: {result.cancellation_details}'
231+
f'Recognized "{from_language}": {translation_recognition_result.text}\n' +
232+
f'Translated into "{to_language}"": {translation_recognition_result.translations[to_language]}',
233+
speechsdk.ResultReason.RecognizedSpeech: f'Recognized: "{translation_recognition_result.text}"',
234+
speechsdk.ResultReason.NoMatch: f'No speech could be recognized: {translation_recognition_result.no_match_details}',
235+
speechsdk.ResultReason.Canceled: f'Speech Recognition canceled: {translation_recognition_result.cancellation_details}'
236236
}
237237
return reason_format.get(reason, 'Unable to recognize speech')
238238

@@ -264,21 +264,21 @@ def translate_speech_to_text():
264264
translation_config=translation_config)
265265

266266
print('Say something...')
267-
result = translation_recognizer.recognize_once()
268-
synthesize_translations(result=result)
267+
translation_recognition_result = translation_recognizer.recognize_once()
268+
synthesize_translations(result=translation_recognition_result)
269269

270-
def synthesize_translations(result):
270+
def synthesize_translations(translation_recognition_result):
271271
language_to_voice_map = {
272272
"de": "de-DE-KatjaNeural",
273273
"en": "en-US-AriaNeural",
274274
"it": "it-IT-ElsaNeural",
275275
"pt": "pt-BR-FranciscaNeural",
276276
"zh-Hans": "zh-CN-XiaoxiaoNeural"
277277
}
278-
print(f'Recognized: "{result.text}"')
278+
print(f'Recognized: "{translation_recognition_result.text}"')
279279

280-
for language in result.translations:
281-
translation = result.translations[language]
280+
for language in translation_recognition_result.translations:
281+
translation = translation_recognition_result.translations[language]
282282
print(f'Translated into "{language}": {translation}')
283283

284284
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)

articles/ai-services/speech-service/includes/quickstarts/speech-translation-basics/python.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,8 @@ Follow these steps to create a new console application.
4646
speech_translation_config = speechsdk.translation.SpeechTranslationConfig(subscription=os.environ.get('SPEECH_KEY'), region=os.environ.get('SPEECH_REGION'))
4747
speech_translation_config.speech_recognition_language="en-US"
4848
49-
target_language="it"
50-
speech_translation_config.add_target_language(target_language)
49+
to_language ="it"
50+
speech_translation_config.add_target_language(to_language)
5151
5252
audio_config = speechsdk.audio.AudioConfig(use_default_microphone=True)
5353
translation_recognizer = speechsdk.translation.TranslationRecognizer(translation_config=speech_translation_config, audio_config=audio_config)
@@ -58,8 +58,8 @@ Follow these steps to create a new console application.
5858
if translation_recognition_result.reason == speechsdk.ResultReason.TranslatedSpeech:
5959
print("Recognized: {}".format(translation_recognition_result.text))
6060
print("""Translated into '{}': {}""".format(
61-
target_language,
62-
translation_recognition_result.translations[target_language]))
61+
to_language,
62+
translation_recognition_result.translations[to_language]))
6363
elif translation_recognition_result.reason == speechsdk.ResultReason.NoMatch:
6464
print("No speech could be recognized: {}".format(translation_recognition_result.no_match_details))
6565
elif translation_recognition_result.reason == speechsdk.ResultReason.Canceled:

0 commit comments

Comments
 (0)