Skip to content

Commit 8340270

Browse files
author
Brian Mouncer
committed
Merge branch 'brianem/1.43.0_updates' of https://github.com/BrianMouncer/azure-ai-docs-pr into brianem/1.43.0_updates
2 parents 97f1fa0 + b438f77 commit 8340270

File tree

7 files changed

+122
-126
lines changed

7 files changed

+122
-126
lines changed

articles/ai-services/speech-service/audio-processing-speech-sdk.md

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ This sample shows how to use MAS with all default enhancement options on input f
2828
### [C#](#tab/csharp)
2929

3030
```csharp
31-
var speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
31+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
3232

3333
var audioProcessingOptions = AudioProcessingOptions.Create(AudioProcessingConstants.AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT);
3434
var audioInput = AudioConfig.FromDefaultMicrophoneInput(audioProcessingOptions);
@@ -39,7 +39,7 @@ var recognizer = new SpeechRecognizer(speechConfig, audioInput);
3939
### [C++](#tab/cpp)
4040

4141
```cpp
42-
auto speechConfig = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion");
42+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
4343

4444
auto audioProcessingOptions = AudioProcessingOptions::Create(AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT);
4545
auto audioInput = AudioConfig::FromDefaultMicrophoneInput(audioProcessingOptions);
@@ -50,7 +50,7 @@ auto recognizer = SpeechRecognizer::FromConfig(speechConfig, audioInput);
5050
### [Java](#tab/java)
5151

5252
```java
53-
SpeechConfig speechConfig = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
53+
SpeechConfig speechConfig = SpeechConfig.fromEndpoint(new java.net.URI("YourSpeechEndpoint"), "YourSpeechKey");
5454

5555
AudioProcessingOptions audioProcessingOptions = AudioProcessingOptions.create(AudioProcessingConstants.AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT);
5656
AudioConfig audioInput = AudioConfig.fromDefaultMicrophoneInput(audioProcessingOptions);
@@ -69,7 +69,7 @@ This sample shows how to use MAS with a predefined microphone geometry on a spec
6969
### [C#](#tab/csharp)
7070

7171
```csharp
72-
var speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
72+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
7373

7474
var audioProcessingOptions = AudioProcessingOptions.Create(AudioProcessingConstants.AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT, PresetMicrophoneArrayGeometry.Linear2);
7575
var audioInput = AudioConfig.FromMicrophoneInput("hw:0,1", audioProcessingOptions);
@@ -80,7 +80,7 @@ var recognizer = new SpeechRecognizer(speechConfig, audioInput);
8080
### [C++](#tab/cpp)
8181

8282
```cpp
83-
auto speechConfig = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion");
83+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
8484

8585
auto audioProcessingOptions = AudioProcessingOptions::Create(AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT, PresetMicrophoneArrayGeometry::Linear2);
8686
auto audioInput = AudioConfig::FromMicrophoneInput("hw:0,1", audioProcessingOptions);
@@ -91,7 +91,7 @@ auto recognizer = SpeechRecognizer::FromConfig(speechConfig, audioInput);
9191
### [Java](#tab/java)
9292

9393
```java
94-
SpeechConfig speechConfig = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
94+
SpeechConfig speechConfig = SpeechConfig.fromEndpoint(new java.net.URI("YourSpeechEndpoint"), "YourSpeechKey");
9595

9696
AudioProcessingOptions audioProcessingOptions = AudioProcessingOptions.create(AudioProcessingConstants.AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT, PresetMicrophoneArrayGeometry.Linear2);
9797
AudioConfig audioInput = AudioConfig.fromMicrophoneInput("hw:0,1", audioProcessingOptions);
@@ -110,7 +110,7 @@ This sample shows how to use MAS with a custom microphone geometry on a specifie
110110
### [C#](#tab/csharp)
111111

112112
```csharp
113-
var speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
113+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
114114

115115
MicrophoneCoordinates[] microphoneCoordinates = new MicrophoneCoordinates[7]
116116
{
@@ -132,7 +132,7 @@ var recognizer = new SpeechRecognizer(speechConfig, audioInput);
132132
### [C++](#tab/cpp)
133133

134134
```cpp
135-
auto speechConfig = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion");
135+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
136136

137137
MicrophoneArrayGeometry microphoneArrayGeometry
138138
{
@@ -148,7 +148,7 @@ auto recognizer = SpeechRecognizer::FromConfig(speechConfig, audioInput);
148148
### [Java](#tab/java)
149149

150150
```java
151-
SpeechConfig speechConfig = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
151+
SpeechConfig speechConfig = SpeechConfig.fromEndpoint(new java.net.URI("YourSpeechEndpoint"), "YourSpeechKey");
152152

153153
MicrophoneCoordinates[] microphoneCoordinates = new MicrophoneCoordinates[7];
154154
microphoneCoordinates[0] = new MicrophoneCoordinates(0, 0, 0);
@@ -177,7 +177,7 @@ In this example:
177177
### [C#](#tab/csharp)
178178

179179
```csharp
180-
var speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
180+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
181181

182182
var audioProcessingOptions = AudioProcessingOptions.Create(AudioProcessingConstants.AUDIO_INPUT_PROCESSING_DISABLE_ECHO_CANCELLATION | AudioProcessingConstants.AUDIO_INPUT_PROCESSING_DISABLE_NOISE_SUPPRESSION | AudioProcessingConstants.AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT);
183183
var audioInput = AudioConfig.FromDefaultMicrophoneInput(audioProcessingOptions);
@@ -188,7 +188,7 @@ var recognizer = new SpeechRecognizer(speechConfig, audioInput);
188188
### [C++](#tab/cpp)
189189

190190
```cpp
191-
auto speechConfig = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion");
191+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
192192

193193
auto audioProcessingOptions = AudioProcessingOptions::Create(AUDIO_INPUT_PROCESSING_DISABLE_ECHO_CANCELLATION | AUDIO_INPUT_PROCESSING_DISABLE_NOISE_SUPPRESSION | AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT);
194194
auto audioInput = AudioConfig::FromDefaultMicrophoneInput(audioProcessingOptions);
@@ -199,7 +199,7 @@ auto recognizer = SpeechRecognizer::FromConfig(speechConfig, audioInput);
199199
### [Java](#tab/java)
200200

201201
```java
202-
SpeechConfig speechConfig = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
202+
SpeechConfig speechConfig = SpeechConfig.fromEndpoint(new java.net.URI("YourSpeechEndpoint"), "YourSpeechKey");
203203

204204
AudioProcessingOptions audioProcessingOptions = AudioProcessingOptions.create(AudioProcessingConstants.AUDIO_INPUT_PROCESSING_DISABLE_ECHO_CANCELLATION | AudioProcessingConstants.AUDIO_INPUT_PROCESSING_DISABLE_NOISE_SUPPRESSION | AudioProcessingConstants.AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT);
205205
AudioConfig audioInput = AudioConfig.fromDefaultMicrophoneInput(audioProcessingOptions);
@@ -221,7 +221,7 @@ In the following code example, the start angle is set to 70 degrees and the end
221221
### [C#](#tab/csharp)
222222

223223
```csharp
224-
var speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
224+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
225225

226226
MicrophoneCoordinates[] microphoneCoordinates = new MicrophoneCoordinates[4]
227227
{
@@ -241,7 +241,7 @@ var recognizer = new SpeechRecognizer(speechConfig, audioInput);
241241
### [C++](#tab/cpp)
242242

243243
```cpp
244-
auto speechConfig = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion");
244+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
245245

246246
MicrophoneArrayGeometry microphoneArrayGeometry
247247
{
@@ -260,7 +260,7 @@ auto recognizer = SpeechRecognizer::FromConfig(speechConfig, audioInput);
260260
### [Java](#tab/java)
261261

262262
```java
263-
SpeechConfig speechConfig = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
263+
SpeechConfig speechConfig = SpeechConfig.fromEndpoint(new java.net.URI("YourSpeechEndpoint"), "YourSpeechKey");
264264

265265
MicrophoneCoordinates[] microphoneCoordinates = new MicrophoneCoordinates[4];
266266
microphoneCoordinates[0] = new MicrophoneCoordinates(-60, 0, 0);

articles/ai-services/speech-service/embedded-speech.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ All text to speech locales [here](language-support.md?tabs=tts) (except fa-IR, P
166166
167167
## Embedded speech configuration
168168
169-
For cloud connected applications, as shown in most Speech SDK samples, you use the `SpeechConfig` object with an API key and region. For embedded speech, you don't use an AI Services resource for Speech. Instead of a cloud resource, you use the [models and voices](#models-and-voices) that you download to your local device.
169+
For cloud connected applications, as shown in most Speech SDK samples, you use the `SpeechConfig` object with an API key and endpoint. For embedded speech, you don't use an AI Services resource for Speech. Instead of a cloud resource, you use the [models and voices](#models-and-voices) that you download to your local device.
170170
171171
Use the `EmbeddedSpeechConfig` object to set the location of the models or voices. If your application is used for both speech to text and text to speech, you can use the same `EmbeddedSpeechConfig` object to set the location of the models and voices.
172172

articles/ai-services/speech-service/how-to-configure-azure-ad-auth.md

Lines changed: 52 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -175,11 +175,39 @@ With a Microsoft Entra access token, you can now create a Speech SDK configurati
175175

176176
The method of providing the token, and the method to construct the corresponding Speech SDK ```Config``` object varies by the object you're using.
177177

178-
### SpeechRecognizer, SpeechSynthesizer, IntentRecognizer, ConversationTranscriber
178+
::: zone pivot="programming-language-csharp"
179+
### SpeechRecognizer, SourceLanguageRecognizer, ConversationTranscriber
179180

180-
For ```SpeechRecognizer```, ```SpeechSynthesizer```, ```IntentRecognizer```, ```ConversationTranscriber``` objects, build the authorization token from the resource ID and the Microsoft Entra access token and then use it to create a ```SpeechConfig``` object.
181+
For ```SpeechRecognizer```, ```SourceLanguageRecognizer```, ```ConversationTranscriber``` objects, use an appropriate instance of [TokenCredential](https://learn.microsoft.com/dotnet/api/azure.core.tokencredential) for authentication, along with the endpoint that includes your [custom domain](https://learn.microsoft.com/azure/ai-services/speech-service/speech-services-private-link?tabs=portal#create-a-custom-domain-name), to create a ```SpeechConfig``` object.
182+
183+
```C#
184+
TokenCredential browserCredential = new InteractiveBrowserCredential();
185+
186+
// Define the custom domain endpoint for your Speech resource.
187+
var endpoint = "wss://{your custom name}.cognitiveservices.azure.com/stt/speech/universal/v2";
188+
189+
// Create the SpeechConfig object using the custom domain endpoint and TokenCredential.
190+
var speechConfig = SpeechConfig.FromEndpoint(new Uri(endpoint), browserCredential);
191+
```
192+
193+
### TranslationRecognizer
194+
195+
For ```TranslationRecognizer``` object, use an appropriate instance of [TokenCredential](https://learn.microsoft.com/dotnet/api/azure.core.tokencredential) for authentication, along with the endpoint that includes your [custom domain](https://learn.microsoft.com/azure/ai-services/speech-service/speech-services-private-link?tabs=portal#create-a-custom-domain-name), to create a ```SpeechTranslationConfig``` object.
196+
197+
```C#
198+
TokenCredential browserCredential = new InteractiveBrowserCredential();
199+
200+
// Define the custom domain endpoint for your Speech resource
201+
var endpoint = "wss://{your custom name}.cognitiveservices.azure.com/stt/speech/universal/v2";
202+
203+
// Create the SpeechTranslationConfig object using the custom domain endpoint and TokenCredential.
204+
var speechConfig = SpeechTranslationConfig.FromEndpoint(new Uri(endpoint), browserCredential);
205+
```
206+
207+
### SpeechSynthesizer, IntentRecognizer
208+
209+
For ```SpeechSynthesizer```, ```IntentRecognizer``` objects, build the authorization token from the resource ID and the Microsoft Entra access token and then use it to create a ```SpeechConfig``` object.
181210

182-
::: zone pivot="programming-language-csharp"
183211
```C#
184212
string resourceId = "Your Resource ID";
185213
string aadToken = "Your Microsoft Entra access token";
@@ -192,6 +220,10 @@ var speechConfig = SpeechConfig.FromAuthorizationToken(authorizationToken, regio
192220
::: zone-end
193221

194222
::: zone pivot="programming-language-cpp"
223+
### SpeechRecognizer, SpeechSynthesizer, IntentRecognizer, ConversationTranscriber
224+
225+
For ```SpeechRecognizer```, ```SpeechSynthesizer```, ```IntentRecognizer```, ```ConversationTranscriber``` objects, build the authorization token from the resource ID and the Microsoft Entra access token and then use it to create a ```SpeechConfig``` object.
226+
195227
```C++
196228
std::string resourceId = "Your Resource ID";
197229
std::string aadToken = "Your Microsoft Entra access token";
@@ -204,6 +236,10 @@ auto speechConfig = SpeechConfig::FromAuthorizationToken(authorizationToken, reg
204236
::: zone-end
205237

206238
::: zone pivot="programming-language-java"
239+
### SpeechRecognizer, SpeechSynthesizer, IntentRecognizer, ConversationTranscriber
240+
241+
For ```SpeechRecognizer```, ```SpeechSynthesizer```, ```IntentRecognizer```, ```ConversationTranscriber``` objects, build the authorization token from the resource ID and the Microsoft Entra access token and then use it to create a ```SpeechConfig``` object.
242+
207243
```Java
208244
String resourceId = "Your Resource ID";
209245
String region = "Your Region";
@@ -215,6 +251,10 @@ SpeechConfig speechConfig = SpeechConfig.fromAuthorizationToken(authorizationTok
215251
::: zone-end
216252

217253
::: zone pivot="programming-language-python"
254+
### SpeechRecognizer, SpeechSynthesizer, IntentRecognizer, ConversationTranscriber
255+
256+
For ```SpeechRecognizer```, ```SpeechSynthesizer```, ```IntentRecognizer```, ```ConversationTranscriber``` objects, build the authorization token from the resource ID and the Microsoft Entra access token and then use it to create a ```SpeechConfig``` object.
257+
218258
```Python
219259
resourceId = "Your Resource ID"
220260
region = "Your Region"
@@ -224,23 +264,11 @@ speechConfig = SpeechConfig(auth_token=authorizationToken, region=region)
224264
```
225265
::: zone-end
226266

267+
::: zone pivot="programming-language-cpp"
227268
### TranslationRecognizer
228269

229270
For the ```TranslationRecognizer```, build the authorization token from the resource ID and the Microsoft Entra access token and then use it to create a ```SpeechTranslationConfig``` object.
230271

231-
::: zone pivot="programming-language-csharp"
232-
```C#
233-
string resourceId = "Your Resource ID";
234-
string aadToken = "Your Microsoft Entra access token";
235-
string region = "Your Speech Region";
236-
237-
// You need to include the "aad#" prefix and the "#" (hash) separator between resource ID and Microsoft Entra access token.
238-
var authorizationToken = $"aad#{resourceId}#{aadToken}";
239-
var speechConfig = SpeechTranslationConfig.FromAuthorizationToken(authorizationToken, region);
240-
```
241-
::: zone-end
242-
243-
::: zone pivot="programming-language-cpp"
244272
```cpp
245273
std::string resourceId = "Your Resource ID";
246274
std::string aadToken = "Your Microsoft Entra access token";
@@ -253,6 +281,10 @@ auto speechConfig = SpeechTranslationConfig::FromAuthorizationToken(authorizatio
253281
::: zone-end
254282

255283
::: zone pivot="programming-language-java"
284+
### TranslationRecognizer
285+
286+
For the ```TranslationRecognizer```, build the authorization token from the resource ID and the Microsoft Entra access token and then use it to create a ```SpeechTranslationConfig``` object.
287+
256288
```Java
257289
String resourceId = "Your Resource ID";
258290
String region = "Your Region";
@@ -264,6 +296,10 @@ SpeechTranslationConfig translationConfig = SpeechTranslationConfig.fromAuthoriz
264296
::: zone-end
265297

266298
::: zone pivot="programming-language-python"
299+
### TranslationRecognizer
300+
301+
For the ```TranslationRecognizer```, build the authorization token from the resource ID and the Microsoft Entra access token and then use it to create a ```SpeechTranslationConfig``` object.
302+
267303
```Python
268304
resourceId = "Your Resource ID"
269305
region = "Your Region"

articles/ai-services/speech-service/includes/how-to/recognize-speech/cpp.md

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,14 +14,14 @@ ms.author: eur
1414

1515
To call the Speech service using the Speech SDK, you need to create a [`SpeechConfig`](/cpp/cognitive-services/speech/speechconfig) instance. This class includes information about your subscription, like your key and associated region, endpoint, host, or authorization token.
1616

17-
1. Create an AI Services resource for Speech in the [Azure portal](https://portal.azure.com/#create/Microsoft.CognitiveServicesAIServices). Get the Speech resource key and region.
18-
1. Create a `SpeechConfig` instance by using the following code. Replace `YourSpeechKey` and `YourSpeechRegion` with your Speech resource key and region.
17+
1. Create an AI Services resource for Speech in the [Azure portal](https://portal.azure.com/#create/Microsoft.CognitiveServicesAIServices). Get the Speech resource key and endpoint.
18+
1. Create a `SpeechConfig` instance by using the following code. Replace `YourSpeechKey` and `YourSpeechEndpoint` with your Speech resource key and endpoint.
1919

2020
```cpp
2121
using namespace std;
2222
using namespace Microsoft::CognitiveServices::Speech;
2323

24-
auto speechConfig = SpeechConfig::FromSubscription("YourSpeechKey", "YourSpeechRegion");
24+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
2525
```
2626
2727
You can initialize `SpeechConfig` in a few other ways:
@@ -99,7 +99,7 @@ switch (result->Reason)
9999
if (cancellation->Reason == CancellationReason::Error) {
100100
cout << "CANCELED: ErrorCode= " << (int)cancellation->ErrorCode << std::endl;
101101
cout << "CANCELED: ErrorDetails=" << cancellation->ErrorDetails << std::endl;
102-
cout << "CANCELED: Did you set the speech resource key and region values?" << std::endl;
102+
cout << "CANCELED: Did you set the speech resource key and endpoint values?" << std::endl;
103103
}
104104
}
105105
break;
@@ -158,7 +158,7 @@ speechRecognizer->Canceled.Connect([&recognitionEnd](const SpeechRecognitionCanc
158158
{
159159
cout << "CANCELED: ErrorCode=" << (int)e.ErrorCode << "\n"
160160
<< "CANCELED: ErrorDetails=" << e.ErrorDetails << "\n"
161-
<< "CANCELED: Did you set the speech resource key and region values?" << std::endl;
161+
<< "CANCELED: Did you set the speech resource key and endpoint values?" << std::endl;
162162

163163
recognitionEnd.set_value(); // Notify to stop recognition.
164164
}
@@ -205,14 +205,14 @@ For a complete code sample, see [Language identification](../../../language-iden
205205
With [custom speech](../../../custom-speech-overview.md), you can upload your own data, test and train a custom model, compare accuracy between models, and deploy a model to a custom endpoint. The following example shows how to set a custom endpoint.
206206
207207
```cpp
208-
auto speechConfig = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion");
208+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
209209
speechConfig->SetEndpointId("YourEndpointId");
210210
auto speechRecognizer = SpeechRecognizer::FromConfig(speechConfig);
211211
```
212212

213213
## Run and use a container
214214

215-
Speech containers provide websocket-based query endpoint APIs that are accessed through the Speech SDK and Speech CLI. By default, the Speech SDK and Speech CLI use the public Speech service. To use the container, you need to change the initialization method. Use a container host URL instead of key and region.
215+
Speech containers provide websocket-based query endpoint APIs that are accessed through the Speech SDK and Speech CLI. By default, the Speech SDK and Speech CLI use the public Speech service. To use the container, you need to change the initialization method. Use a container host URL instead of key and endpoint.
216216

217217
For more information about containers, see Host URLs in [Install and run Speech containers with Docker](../../../speech-container-howto.md#host-urls).
218218

0 commit comments

Comments
 (0)