@@ -19,7 +19,7 @@ The Speech SDK is available as a [NuGet package](https://www.nuget.org/packages/
19
19
20
20
### Set environment variables
21
21
22
- This example requires environment variables named ` OPEN_AI_KEY ` , ` OPEN_AI_ENDPOINT ` , ` SPEECH_KEY ` , and ` SPEECH_REGION ` .
22
+ This example requires environment variables named ` OPEN_AI_KEY ` , ` OPEN_AI_ENDPOINT ` , ` OPEN_AI_DEPLOYMENT_NAME ` , ` SPEECH_KEY ` , and ` SPEECH_REGION ` .
23
23
24
24
[ !INCLUDE [ Environment variables] ( ../../common/environment-variables-openai.md )]
25
25
@@ -42,156 +42,152 @@ Follow these steps to create a new console application.
42
42
1. Replace the contents of `Program.cs` with the following code.
43
43
44
44
```csharp
45
- using System;
46
- using System.IO;
47
- using System.Threading.Tasks;
45
+ using System.Text;
48
46
using Microsoft.CognitiveServices.Speech;
49
47
using Microsoft.CognitiveServices.Speech.Audio;
50
48
using Azure;
51
49
using Azure.AI.OpenAI;
52
- using static System.Environment;
53
-
54
- class Program
50
+
51
+ // This example requires environment variables named "OPEN_AI_KEY", "OPEN_AI_ENDPOINT" and "OPEN_AI_DEPLOYMENT_NAME"
52
+ // Your endpoint should look like the following https://YOUR_OPEN_AI_RESOURCE_NAME.openai.azure.com/
53
+ string openAIKey = Environment.GetEnvironmentVariable("OPEN_AI_KEY") ??
54
+ throw new ArgumentException("Missing OPEN_AI_KEY");
55
+ string openAIEndpoint = Environment.GetEnvironmentVariable("OPEN_AI_ENDPOINT") ??
56
+ throw new ArgumentException("Missing OPEN_AI_ENDPOINT");
57
+
58
+ // Enter the deployment name you chose when you deployed the model.
59
+ string engine = Environment.GetEnvironmentVariable("OPEN_AI_DEPLOYMENT_NAME") ??
60
+ throw new ArgumentException("Missing OPEN_AI_DEPLOYMENT_NAME");
61
+
62
+ // This example requires environment variables named "SPEECH_KEY" and "SPEECH_REGION"
63
+ string speechKey = Environment.GetEnvironmentVariable("SPEECH_KEY") ??
64
+ throw new ArgumentException("Missing SPEECH_KEY");
65
+ string speechRegion = Environment.GetEnvironmentVariable("SPEECH_REGION") ??
66
+ throw new ArgumentException("Missing SPEECH_REGION");
67
+
68
+ // Sentence end symbols for splitting the response into sentences.
69
+ List<string> sentenceSaperators = new() { ".", "!", "?", ";", "。", "!", "?", ";", "\n" };
70
+
71
+ try
72
+ {
73
+ await ChatWithOpenAI();
74
+ }
75
+ catch (Exception ex)
76
+ {
77
+ Console.WriteLine(ex);
78
+ }
79
+
80
+ // Prompts Azure OpenAI with a request and synthesizes the response.
81
+ async Task AskOpenAI(string prompt)
55
82
{
56
- // This example requires environment variables named "OPEN_AI_KEY" and "OPEN_AI_ENDPOINT"
57
- // Your endpoint should look like the following https://YOUR_OPEN_AI_RESOURCE_NAME.openai.azure.com/
58
- static string openAIKey = Environment.GetEnvironmentVariable("OPEN_AI_KEY");
59
- static string openAIEndpoint = Environment.GetEnvironmentVariable("OPEN_AI_ENDPOINT");
60
-
61
- // Enter the deployment name you chose when you deployed the model.
62
- static string engine = "gpt-35-turbo-instruct";
63
-
64
- // This example requires environment variables named "SPEECH_KEY" and "SPEECH_REGION"
65
- static string speechKey = Environment.GetEnvironmentVariable("SPEECH_KEY");
66
- static string speechRegion = Environment.GetEnvironmentVariable("SPEECH_REGION");
67
-
68
- // Sentence end symbols for splitting the response into sentences.
69
- static List<string> sentenceSaperators = new() { ".", "!", "?", ";", "。", "!", "?", ";", "\n" };
70
-
71
- private static object consoleLock = new();
72
-
73
- // Prompts Azure OpenAI with a request and synthesizes the response.
74
- async static Task AskOpenAI(string prompt)
83
+ object consoleLock = new();
84
+ var speechConfig = SpeechConfig.FromSubscription(speechKey, speechRegion);
85
+
86
+ // The language of the voice that speaks.
87
+ speechConfig.SpeechSynthesisVoiceName = "en-US-JennyMultilingualNeural";
88
+ var audioOutputConfig = AudioConfig.FromDefaultSpeakerOutput();
89
+ using var speechSynthesizer = new SpeechSynthesizer(speechConfig, audioOutputConfig);
90
+ speechSynthesizer.Synthesizing += (sender, args) =>
75
91
{
76
- var speechConfig = SpeechConfig.FromSubscription(speechKey, speechRegion);
77
- // The language of the voice that speaks.
78
- speechConfig.SpeechSynthesisVoiceName = "en-US-JennyMultilingualNeural";
79
- var audioOutputConfig = AudioConfig.FromDefaultSpeakerOutput();
80
- using var speechSynthesizer = new SpeechSynthesizer(speechConfig, audioOutputConfig);
81
- speechSynthesizer.Synthesizing += (sender, args) =>
92
+ lock (consoleLock)
82
93
{
83
- lock (consoleLock)
84
- {
85
- Console.ForegroundColor = ConsoleColor.Yellow;
86
- Console.Write($"[Audio]");
87
- Console.ResetColor();
88
- }
89
- };
90
-
91
- // Ask Azure OpenAI
92
- OpenAIClient client = new(new Uri(openAIEndpoint), new AzureKeyCredential(openAIKey));
93
- var completionsOptions = new CompletionsOptions()
94
+ Console.ForegroundColor = ConsoleColor.Yellow;
95
+ Console.Write($"[Audio]");
96
+ Console.ResetColor();
97
+ }
98
+ };
99
+
100
+ // Ask Azure OpenAI
101
+ OpenAIClient client = new(new Uri(openAIEndpoint), new AzureKeyCredential(openAIKey));
102
+ var completionsOptions = new ChatCompletionsOptions()
103
+ {
104
+ DeploymentName = engine,
105
+ Messages = { new ChatRequestUserMessage(prompt) },
106
+ MaxTokens = 100,
107
+ };
108
+ var responseStream = await client.GetChatCompletionsStreamingAsync(completionsOptions);
109
+
110
+ StringBuilder gptBuffer = new();
111
+ await foreach (var completionUpdate in responseStream)
112
+ {
113
+ var message = completionUpdate.ContentUpdate;
114
+ if (string.IsNullOrEmpty(message))
94
115
{
95
- Prompts = { prompt },
96
- MaxTokens = 100,
97
-
98
- };
99
- var responseStream = await client.GetCompletionsStreamingAsync(engine, completionsOptions);
100
- using var streamingCompletions = responseStream.Value;
101
- StringBuilder gptBuffer = new();
102
- await foreach (var choice in streamingCompletions.GetChoicesStreaming())
116
+ continue;
117
+ }
118
+
119
+ lock (consoleLock)
103
120
{
104
- await foreach (var message in choice.GetTextStreaming())
105
- {
106
- if (string.IsNullOrEmpty(message))
107
- {
108
- continue;
109
- }
110
-
111
- lock (consoleLock)
112
- {
113
- Console.ForegroundColor = ConsoleColor.DarkBlue;
114
- Console.Write($"{message}");
115
- Console.ResetColor();
116
- }
117
-
118
- gptBuffer.Append(message);
119
-
120
- if (sentenceSaperators.Any(message.Contains))
121
- {
122
- var sentence = gptBuffer.ToString().Trim();
123
- if (!string.IsNullOrEmpty(sentence))
124
- {
125
- await speechSynthesizer.SpeakTextAsync(sentence).ConfigureAwait(true);
126
- gptBuffer.Clear();
127
- }
128
- }
129
- }
121
+ Console.ForegroundColor = ConsoleColor.DarkBlue;
122
+ Console.Write($"{message}");
123
+ Console.ResetColor();
130
124
}
131
- }
132
-
133
- // Continuously listens for speech input to recognize and send as text to Azure OpenAI
134
- async static Task ChatWithOpenAI()
135
- {
136
- // Should be the locale for the speaker's language.
137
- var speechConfig = SpeechConfig.FromSubscription(speechKey, speechRegion);
138
- speechConfig.SpeechRecognitionLanguage = "en-US";
139
-
140
- using var audioConfig = AudioConfig.FromDefaultMicrophoneInput();
141
- using var speechRecognizer = new SpeechRecognizer(speechConfig, audioConfig);
142
- var conversationEnded = false;
143
-
144
- while(!conversationEnded)
125
+
126
+ gptBuffer.Append(message);
127
+
128
+ if (sentenceSaperators.Any(message.Contains))
145
129
{
146
- Console.WriteLine("Azure OpenAI is listening. Say 'Stop' or press Ctrl-Z to end the conversation.");
147
-
148
- // Get audio from the microphone and then send it to the TTS service.
149
- var speechRecognitionResult = await speechRecognizer.RecognizeOnceAsync();
150
-
151
- switch (speechRecognitionResult.Reason)
130
+ var sentence = gptBuffer.ToString().Trim();
131
+ if (!string.IsNullOrEmpty(sentence))
152
132
{
153
- case ResultReason.RecognizedSpeech:
154
- if (speechRecognitionResult.Text == "Stop.")
155
- {
156
- Console.WriteLine("Conversation ended.");
157
- conversationEnded = true;
158
- }
159
- else
160
- {
161
- Console.WriteLine($"Recognized speech: {speechRecognitionResult.Text}");
162
- await AskOpenAI(speechRecognitionResult.Text).ConfigureAwait(true);
163
- }
164
- break;
165
- case ResultReason.NoMatch:
166
- Console.WriteLine($"No speech could be recognized: ");
167
- break;
168
- case ResultReason.Canceled:
169
- var cancellationDetails = CancellationDetails.FromResult(speechRecognitionResult);
170
- Console.WriteLine($"Speech Recognition canceled: {cancellationDetails.Reason}");
171
- if (cancellationDetails.Reason == CancellationReason.Error)
172
- {
173
- Console.WriteLine($"Error details={cancellationDetails.ErrorDetails}");
174
- }
175
- break;
133
+ await speechSynthesizer.SpeakTextAsync(sentence);
134
+ gptBuffer.Clear();
176
135
}
177
136
}
178
137
}
179
-
180
- async static Task Main(string[] args)
138
+ }
139
+
140
+ // Continuously listens for speech input to recognize and send as text to Azure OpenAI
141
+ async Task ChatWithOpenAI()
142
+ {
143
+ // Should be the locale for the speaker's language.
144
+ var speechConfig = SpeechConfig.FromSubscription(speechKey, speechRegion);
145
+ speechConfig.SpeechRecognitionLanguage = "en-US";
146
+
147
+ using var audioConfig = AudioConfig.FromDefaultMicrophoneInput();
148
+ using var speechRecognizer = new SpeechRecognizer(speechConfig, audioConfig);
149
+ var conversationEnded = false;
150
+
151
+ while (!conversationEnded)
181
152
{
182
- try
183
- {
184
- await ChatWithOpenAI().ConfigureAwait(true);
185
- }
186
- catch (Exception ex)
153
+ Console.WriteLine("Azure OpenAI is listening. Say 'Stop' or press Ctrl-Z to end the conversation.");
154
+
155
+ // Get audio from the microphone and then send it to the TTS service.
156
+ var speechRecognitionResult = await speechRecognizer.RecognizeOnceAsync();
157
+
158
+ switch (speechRecognitionResult.Reason)
187
159
{
188
- Console.WriteLine(ex.Message);
160
+ case ResultReason.RecognizedSpeech:
161
+ if (speechRecognitionResult.Text == "Stop.")
162
+ {
163
+ Console.WriteLine("Conversation ended.");
164
+ conversationEnded = true;
165
+ }
166
+ else
167
+ {
168
+ Console.WriteLine($"Recognized speech: {speechRecognitionResult.Text}");
169
+ await AskOpenAI(speechRecognitionResult.Text);
170
+ }
171
+
172
+ break;
173
+ case ResultReason.NoMatch:
174
+ Console.WriteLine($"No speech could be recognized: ");
175
+ break;
176
+ case ResultReason.Canceled:
177
+ var cancellationDetails = CancellationDetails.FromResult(speechRecognitionResult);
178
+ Console.WriteLine($"Speech Recognition canceled: {cancellationDetails.Reason}");
179
+ if (cancellationDetails.Reason == CancellationReason.Error)
180
+ {
181
+ Console.WriteLine($"Error details={cancellationDetails.ErrorDetails}");
182
+ }
183
+
184
+ break;
189
185
}
190
186
}
191
187
}
192
188
```
193
189
194
- 1. To increase or decrease the number of tokens returned by Azure OpenAI, change the `MaxTokens` property in the `CompletionsOptions ` class instance. For more information tokens and cost implications, see [Azure OpenAI tokens](/azure/ai-services/openai/overview#tokens) and [Azure OpenAI pricing](https://azure.microsoft.com/pricing/details/cognitive-services/openai-service/).
190
+ 1. To increase or decrease the number of tokens returned by Azure OpenAI, change the `MaxTokens` property in the `ChatCompletionsOptions ` class instance. For more information tokens and cost implications, see [Azure OpenAI tokens](/azure/ai-services/openai/overview#tokens) and [Azure OpenAI pricing](https://azure.microsoft.com/pricing/details/cognitive-services/openai-service/).
195
191
196
192
Run your new console application to start speech recognition from a microphone:
197
193
@@ -200,7 +196,7 @@ dotnet run
200
196
```
201
197
202
198
> [ !IMPORTANT]
203
- > Make sure that you set the ` OPEN_AI_KEY ` , ` OPEN_AI_ENDPOINT ` , ` SPEECH_KEY ` and ` SPEECH_REGION ` environment variables as described [ previously] ( #set-environment-variables ) . If you don't set these variables, the sample will fail with an error message.
199
+ > Make sure that you set the ` OPEN_AI_KEY ` , ` OPEN_AI_ENDPOINT ` , ` OPEN_AI_DEPLOYMENT_NAME ` , ` SPEECH_KEY ` and ` SPEECH_REGION ` environment variables as described [ previously] ( #set-environment-variables ) . If you don't set these variables, the sample will fail with an error message.
204
200
205
201
Speak into your microphone when prompted. The console output includes the prompt for you to begin speaking, then your request as text, and then the response from Azure OpenAI as text. The response from Azure OpenAI should be converted from text to speech and then output to the default speaker.
206
202
0 commit comments