Skip to content

Commit 1aebda1

Browse files
committed
Change result to speech_recognition_result for consistency with quickstart, add link to GStreamer
1 parent 837cb2a commit 1aebda1

File tree

2 files changed

+19
-19
lines changed
  • articles/ai-services/speech-service/includes/how-to/recognize-speech

2 files changed

+19
-19
lines changed

articles/ai-services/speech-service/includes/how-to/recognize-speech/csharp.md

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,8 @@ class Program
6262
using var speechRecognizer = new SpeechRecognizer(speechConfig, audioConfig);
6363

6464
Console.WriteLine("Speak into your microphone.");
65-
var result = await speechRecognizer.RecognizeOnceAsync();
66-
Console.WriteLine($"RECOGNIZED: Text={result.Text}");
65+
var speechRecognitionResult = await speechRecognizer.RecognizeOnceAsync();
66+
Console.WriteLine($"RECOGNIZED: Text={speechRecognitionResult.Text}");
6767
}
6868

6969
async static Task Main(string[] args)
@@ -94,8 +94,8 @@ class Program
9494
using var audioConfig = AudioConfig.FromWavFileInput("PathToFile.wav");
9595
using var speechRecognizer = new SpeechRecognizer(speechConfig, audioConfig);
9696

97-
var result = await speechRecognizer.RecognizeOnceAsync();
98-
Console.WriteLine($"RECOGNIZED: Text={result.Text}");
97+
var speechRecognitionResult = await speechRecognizer.RecognizeOnceAsync();
98+
Console.WriteLine($"RECOGNIZED: Text={speechRecognitionResult.Text}");
9999
}
100100

101101
async static Task Main(string[] args)
@@ -137,8 +137,8 @@ class Program
137137
audioConfigStream.Write(readBytes, readBytes.Length);
138138
} while (readBytes.Length > 0);
139139

140-
var result = await speechRecognizer.RecognizeOnceAsync();
141-
Console.WriteLine($"RECOGNIZED: Text={result.Text}");
140+
var speechRecognitionResult = await speechRecognizer.RecognizeOnceAsync();
141+
Console.WriteLine($"RECOGNIZED: Text={speechRecognitionResult.Text}");
142142
}
143143

144144
async static Task Main(string[] args)
@@ -149,27 +149,27 @@ class Program
149149
}
150150
```
151151

152-
Using a push stream as input assumes that the audio data is a raw PCM and skips any headers. The API still works in certain cases if the header hasn't been skipped. For the best results, consider implementing logic to read off the headers so that `byte[]` begins at the *start of the audio data*.
152+
Using a push stream as input assumes that the audio data is a raw PCM and skips any headers. The API still works in certain cases if the header hasn't been skipped. For the best results, consider implementing logic to read off the headers so that `byte[]` begins at the *start of the audio data*. To address this issue, you can utilize [GStreamer](./how-to-use-codec-compressed-audio-input-streams.md). GStreamer provides a flexible pipeline-based framework that allows you to implement logic to read off the headers, ensuring that the byte array begins at the start of the audio data.
153153

154154
## Handle errors
155155

156-
The previous examples only get the recognized text from the `result.Text` property. To handle errors and other responses, you need to write some code to handle the result. The following code evaluates the [`result.Reason`](/dotnet/api/microsoft.cognitiveservices.speech.recognitionresult.reason) property and:
156+
The previous examples only get the recognized text from the `speechRecognitionResult.Text` property. To handle errors and other responses, you need to write some code to handle the result. The following code evaluates the [`speechRecognitionResult.Reason`](/dotnet/api/microsoft.cognitiveservices.speech.recognitionresult.reason) property and:
157157

158158
* Prints the recognition result: `ResultReason.RecognizedSpeech`.
159159
* If there's no recognition match, it informs the user: `ResultReason.NoMatch`.
160160
* If an error is encountered, it prints the error message: `ResultReason.Canceled`.
161161

162162
```csharp
163-
switch (result.Reason)
163+
switch (speechRecognitionResult.Reason)
164164
{
165165
case ResultReason.RecognizedSpeech:
166-
Console.WriteLine($"RECOGNIZED: Text={result.Text}");
166+
Console.WriteLine($"RECOGNIZED: Text={speechRecognitionResult.Text}");
167167
break;
168168
case ResultReason.NoMatch:
169169
Console.WriteLine($"NOMATCH: Speech could not be recognized.");
170170
break;
171171
case ResultReason.Canceled:
172-
var cancellation = CancellationDetails.FromResult(result);
172+
var cancellation = CancellationDetails.FromResult(speechRecognitionResult);
173173
Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");
174174

175175
if (cancellation.Reason == CancellationReason.Error)

articles/ai-services/speech-service/includes/how-to/recognize-speech/java.md

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,8 @@ public class Program {
6262

6363
System.out.println("Speak into your microphone.");
6464
Future<SpeechRecognitionResult> task = speechRecognizer.recognizeOnceAsync();
65-
SpeechRecognitionResult result = task.get();
66-
System.out.println("RECOGNIZED: Text=" + result.getText());
65+
SpeechRecognitionResult speechRecognitionResult = task.get();
66+
System.out.println("RECOGNIZED: Text=" + speechRecognitionResult.getText());
6767
}
6868
}
6969
```
@@ -91,31 +91,31 @@ public class Program {
9191
SpeechRecognizer speechRecognizer = new SpeechRecognizer(speechConfig, audioConfig);
9292

9393
Future<SpeechRecognitionResult> task = speechRecognizer.recognizeOnceAsync();
94-
SpeechRecognitionResult result = task.get();
95-
System.out.println("RECOGNIZED: Text=" + result.getText());
94+
SpeechRecognitionResult speechRecognitionResult = task.get();
95+
System.out.println("RECOGNIZED: Text=" + speechRecognitionResult.getText());
9696
}
9797
}
9898
```
9999

100100
## Handle errors
101101

102-
The previous examples only get the recognized text by using `result.getText()`. To handle errors and other responses, you need to write some code to handle the result. The following example evaluates [`result.getReason()`](/java/api/com.microsoft.cognitiveservices.speech.recognitionresult.getreason) and:
102+
The previous examples only get the recognized text by using `speechRecognitionResult.getText()`. To handle errors and other responses, you need to write some code to handle the result. The following example evaluates [`speechRecognitionResult.getReason()`](/java/api/com.microsoft.cognitiveservices.speech.recognitionresult.getreason) and:
103103

104104
* Prints the recognition result: `ResultReason.RecognizedSpeech`.
105105
* If there's no recognition match, it informs the user: `ResultReason.NoMatch`.
106106
* If an error is encountered, it prints the error message: `ResultReason.Canceled`.
107107

108108
```java
109-
switch (result.getReason()) {
109+
switch (speechRecognitionResult.getReason()) {
110110
case ResultReason.RecognizedSpeech:
111-
System.out.println("We recognized: " + result.getText());
111+
System.out.println("We recognized: " + speechRecognitionResult.getText());
112112
exitCode = 0;
113113
break;
114114
case ResultReason.NoMatch:
115115
System.out.println("NOMATCH: Speech could not be recognized.");
116116
break;
117117
case ResultReason.Canceled: {
118-
CancellationDetails cancellation = CancellationDetails.fromResult(result);
118+
CancellationDetails cancellation = CancellationDetails.fromResult(speechRecognitionResult);
119119
System.out.println("CANCELED: Reason=" + cancellation.getReason());
120120

121121
if (cancellation.getReason() == CancellationReason.Error) {

0 commit comments

Comments
 (0)