|
| 1 | +--- |
| 2 | +author: eric-urban |
| 3 | +ms.service: cognitive-services |
| 4 | +ms.topic: include |
| 5 | +ms.date: 04/15/2022 |
| 6 | +ms.author: eur |
| 7 | +--- |
| 8 | + |
| 9 | +[!INCLUDE [Header](../../common/csharp.md)] |
| 10 | + |
| 11 | +[!INCLUDE [Introduction](intro.md)] |
| 12 | + |
| 13 | +## Prerequisites |
| 14 | + |
| 15 | +[!INCLUDE [Prerequisites](../../common/azure-prerequisites.md)] |
| 16 | + |
| 17 | +## Set up the environment |
| 18 | +The Speech SDK is available as a [NuGet package](https://www.nuget.org/packages/Microsoft.CognitiveServices.Speech) and implements .NET Standard 2.0. You install the Speech SDK later in this guide, but first check the [SDK installation guide](../../../quickstarts/setup-platform.md?pivots=programming-language-csharp) for any more requirements. |
| 19 | + |
| 20 | +### Set environment variables |
| 21 | + |
| 22 | +This example requires environment variables named `OPEN_AI_KEY`, `OPEN_AI_ENDPOINT`, `SPEECH_KEY`, and `SPEECH_REGION`. |
| 23 | + |
| 24 | +[!INCLUDE [Environment variables](../../common/environment-variables-openai.md)] |
| 25 | + |
| 26 | +## Recognize speech from a microphone |
| 27 | + |
| 28 | +Follow these steps to create a new console application. |
| 29 | + |
| 30 | +1. Open a command prompt where you want the new project, and create a console application with the .NET CLI. The `Program.cs` file should be created in the project directory. |
| 31 | + ```dotnetcli |
| 32 | + dotnet new console |
| 33 | + ``` |
| 34 | +1. Install the Speech SDK in your new project with the .NET CLI. |
| 35 | + ```dotnetcli |
| 36 | + dotnet add package Microsoft.CognitiveServices.Speech |
| 37 | + ``` |
| 38 | +1. Install the Azure OpenAI SDK (prerelease) in your new project with the .NET CLI. |
| 39 | + ```dotnetcli |
| 40 | + dotnet add package Azure.AI.OpenAI --prerelease |
| 41 | + ``` |
| 42 | +1. Replace the contents of `Program.cs` with the following code. |
| 43 | +
|
| 44 | + ```csharp |
| 45 | + using System; |
| 46 | + using System.IO; |
| 47 | + using System.Threading.Tasks; |
| 48 | + using Microsoft.CognitiveServices.Speech; |
| 49 | + using Microsoft.CognitiveServices.Speech.Audio; |
| 50 | + using Azure; |
| 51 | + using Azure.AI.OpenAI; |
| 52 | + using static System.Environment; |
| 53 | + |
| 54 | + class Program |
| 55 | + { |
| 56 | + // This example requires environment variables named "OPEN_AI_KEY" and "OPEN_AI_ENDPOINT" |
| 57 | + // Your endpoint should look like the following https://YOUR_OPEN_AI_RESOURCE_NAME.openai.azure.com/ |
| 58 | + static string openAIKey = Environment.GetEnvironmentVariable("OPEN_AI_KEY"); |
| 59 | + static string openAIEndpoint = Environment.GetEnvironmentVariable("OPEN_AI_ENDPOINT"); |
| 60 | + |
| 61 | + // Enter the deployment name you chose when you deployed the model. |
| 62 | + static string engine = "text-davinci-002"; |
| 63 | + |
| 64 | + // This example requires environment variables named "SPEECH_KEY" and "SPEECH_REGION" |
| 65 | + static string speechKey = Environment.GetEnvironmentVariable("SPEECH_KEY"); |
| 66 | + static string speechRegion = Environment.GetEnvironmentVariable("SPEECH_REGION"); |
| 67 | + |
| 68 | + // Prompts Azure OpenAI with a request and synthesizes the response. |
| 69 | + async static Task AskOpenAI(string prompt) |
| 70 | + { |
| 71 | + // Ask Azure OpenAI |
| 72 | + OpenAIClient client = new(new Uri(openAIEndpoint), new AzureKeyCredential(openAIKey)); |
| 73 | + var completionsOptions = new CompletionsOptions() |
| 74 | + { |
| 75 | + Prompts = { prompt }, |
| 76 | + MaxTokens = 100, |
| 77 | + }; |
| 78 | + Response<Completions> completionsResponse = client.GetCompletions(engine, completionsOptions); |
| 79 | + string text = completionsResponse.Value.Choices[0].Text.Trim(); |
| 80 | + Console.WriteLine($"Azure OpenAI response: {text}"); |
| 81 | + |
| 82 | + var speechConfig = SpeechConfig.FromSubscription(speechKey, speechRegion); |
| 83 | + // The language of the voice that speaks. |
| 84 | + speechConfig.SpeechSynthesisVoiceName = "en-US-JennyMultilingualNeural"; |
| 85 | + var audioOutputConfig = AudioConfig.FromDefaultSpeakerOutput(); |
| 86 | + |
| 87 | + using (var speechSynthesizer = new SpeechSynthesizer(speechConfig, audioOutputConfig)) |
| 88 | + { |
| 89 | + var speechSynthesisResult = await speechSynthesizer.SpeakTextAsync(text).ConfigureAwait(true); |
| 90 | + |
| 91 | + if (speechSynthesisResult.Reason == ResultReason.SynthesizingAudioCompleted) |
| 92 | + { |
| 93 | + Console.WriteLine($"Speech synthesized to speaker for text: [{text}]"); |
| 94 | + } |
| 95 | + else if (speechSynthesisResult.Reason == ResultReason.Canceled) |
| 96 | + { |
| 97 | + var cancellationDetails = SpeechSynthesisCancellationDetails.FromResult(speechSynthesisResult); |
| 98 | + Console.WriteLine($"Speech synthesis canceled: {cancellationDetails.Reason}"); |
| 99 | + |
| 100 | + if (cancellationDetails.Reason == CancellationReason.Error) |
| 101 | + { |
| 102 | + Console.WriteLine($"Error details: {cancellationDetails.ErrorDetails}"); |
| 103 | + } |
| 104 | + } |
| 105 | + } |
| 106 | + } |
| 107 | + |
| 108 | + // Continuously listens for speech input to recognize and send as text to Azure OpenAI |
| 109 | + async static Task ChatWithOpenAI() |
| 110 | + { |
| 111 | + // Should be the locale for the speaker's language. |
| 112 | + var speechConfig = SpeechConfig.FromSubscription(speechKey, speechRegion); |
| 113 | + speechConfig.SpeechRecognitionLanguage = "en-US"; |
| 114 | + |
| 115 | + using var audioConfig = AudioConfig.FromDefaultMicrophoneInput(); |
| 116 | + using var speechRecognizer = new SpeechRecognizer(speechConfig, audioConfig); |
| 117 | + var conversationEnded = false; |
| 118 | + |
| 119 | + while(!conversationEnded) |
| 120 | + { |
| 121 | + Console.WriteLine("Azure OpenAI is listening. Say 'Stop' or press Ctrl-Z to end the conversation."); |
| 122 | + |
| 123 | + // Get audio from the microphone and then send it to the TTS service. |
| 124 | + var speechRecognitionResult = await speechRecognizer.RecognizeOnceAsync(); |
| 125 | + |
| 126 | + switch (speechRecognitionResult.Reason) |
| 127 | + { |
| 128 | + case ResultReason.RecognizedSpeech: |
| 129 | + if (speechRecognitionResult.Text == "Stop.") |
| 130 | + { |
| 131 | + Console.WriteLine("Conversation ended."); |
| 132 | + conversationEnded = true; |
| 133 | + } |
| 134 | + else |
| 135 | + { |
| 136 | + Console.WriteLine($"Recognized speech: {speechRecognitionResult.Text}"); |
| 137 | + await AskOpenAI(speechRecognitionResult.Text).ConfigureAwait(true); |
| 138 | + } |
| 139 | + break; |
| 140 | + case ResultReason.NoMatch: |
| 141 | + Console.WriteLine($"No speech could be recognized: "); |
| 142 | + break; |
| 143 | + case ResultReason.Canceled: |
| 144 | + var cancellationDetails = CancellationDetails.FromResult(speechRecognitionResult); |
| 145 | + Console.WriteLine($"Speech Recognition canceled: {cancellationDetails.Reason}"); |
| 146 | + if (cancellationDetails.Reason == CancellationReason.Error) |
| 147 | + { |
| 148 | + Console.WriteLine($"Error details={cancellationDetails.ErrorDetails}"); |
| 149 | + } |
| 150 | + break; |
| 151 | + } |
| 152 | + } |
| 153 | + } |
| 154 | + |
| 155 | + async static Task Main(string[] args) |
| 156 | + { |
| 157 | + try |
| 158 | + { |
| 159 | + await ChatWithOpenAI().ConfigureAwait(true); |
| 160 | + } |
| 161 | + catch (Exception ex) |
| 162 | + { |
| 163 | + Console.WriteLine(ex.Message); |
| 164 | + } |
| 165 | + } |
| 166 | + } |
| 167 | + ``` |
| 168 | +
|
| 169 | +1. To increase or decrease the number of tokens returned by Azure OpenAI, change the `MaxTokens` property in the `CompletionsOptions` class instance. For more information tokens and cost implications, see [Azure OpenAI tokens](/azure/cognitive-services/openai/overview#tokens) and [Azure OpenAI pricing](https://azure.microsoft.com/pricing/details/cognitive-services/openai-service/). |
| 170 | +
|
| 171 | +Run your new console application to start speech recognition from a microphone: |
| 172 | +
|
| 173 | +```console |
| 174 | +dotnet run |
| 175 | +``` |
| 176 | + |
| 177 | +> [!IMPORTANT] |
| 178 | +> Make sure that you set the `OPEN_AI_KEY`, `OPEN_AI_ENDPOINT`, `SPEECH__KEY` and `SPEECH__REGION` environment variables as described [previously](#set-environment-variables). If you don't set these variables, the sample will fail with an error message. |
| 179 | +
|
| 180 | +Speak into your microphone when prompted. The console output includes the prompt for you to begin speaking, then your request as text, and then the response from Azure OpenAI as text. The response from Azure OpenAI should be converted from text to speech and then output to the default speaker. |
| 181 | + |
| 182 | +```console |
| 183 | +PS C:\dev\openai\csharp> dotnet run |
| 184 | +Azure OpenAI is listening. Say 'Stop' or press Ctrl-Z to end the conversation. |
| 185 | +Recognized speech:Make a comma separated list of all continents. |
| 186 | +Azure OpenAI response:Africa, Antarctica, Asia, Australia, Europe, North America, South America |
| 187 | +Speech synthesized to speaker for text [Africa, Antarctica, Asia, Australia, Europe, North America, South America] |
| 188 | +Azure OpenAI is listening. Say 'Stop' or press Ctrl-Z to end the conversation. |
| 189 | +Recognized speech: Make a comma separated list of 1 Astronomical observatory for each continent. A list should include each continent name in parentheses. |
| 190 | +Azure OpenAI response:Mauna Kea Observatories (North America), La Silla Observatory (South America), Tenerife Observatory (Europe), Siding Spring Observatory (Australia), Beijing Xinglong Observatory (Asia), Naukluft Plateau Observatory (Africa), Rutherford Appleton Laboratory (Antarctica) |
| 191 | +Speech synthesized to speaker for text [Mauna Kea Observatories (North America), La Silla Observatory (South America), Tenerife Observatory (Europe), Siding Spring Observatory (Australia), Beijing Xinglong Observatory (Asia), Naukluft Plateau Observatory (Africa), Rutherford Appleton Laboratory (Antarctica)] |
| 192 | +Azure OpenAI is listening. Say 'Stop' or press Ctrl-Z to end the conversation. |
| 193 | +Conversation ended. |
| 194 | +PS C:\dev\openai\csharp> |
| 195 | +``` |
| 196 | + |
| 197 | +## Remarks |
| 198 | +Now that you've completed the quickstart, here are some more considerations: |
| 199 | + |
| 200 | +- To change the speech recognition language, replace `en-US` with another [supported language](~/articles/cognitive-services/speech-service/supported-languages.md). For example, `es-ES` for Spanish (Spain). The default language is `en-US` if you don't specify a language. For details about how to identify one of multiple languages that might be spoken, see [language identification](~/articles/cognitive-services/speech-service/language-identification.md). |
| 201 | +- To change the voice that you hear, replace `en-US-JennyMultilingualNeural` with another [supported voice](~/articles/cognitive-services/speech-service/supported-languages.md#prebuilt-neural-voices). If the voice doesn't speak the language of the text returned from Azure OpenAI, the Speech service doesn't output synthesized audio. |
| 202 | +- To use a different [model](/azure/cognitive-services/openai/concepts/models#model-summary-table-and-region-availability), replace `text-davinci-002` with the ID of another [deployment](/azure/cognitive-services/openai/how-to/create-resource#deploy-a-model). Keep in mind that the deployment ID isn't necessarily the same as the model name. You named your deployment when you created it in [Azure OpenAI Studio](https://oai.azure.com/). |
| 203 | +- Azure OpenAI also performs content moderation on the prompt inputs and generated outputs. The prompts or responses may be filtered if harmful content is detected. For more information, see the [content filtering](/azure/cognitive-services/openai/concepts/content-filter) article. |
| 204 | + |
| 205 | +## Clean up resources |
| 206 | + |
| 207 | +[!INCLUDE [Delete resource](../../common/delete-resource.md)] |
| 208 | + |
0 commit comments