Skip to content

Commit 39072c4

Browse files
rhureycqfu96
authored andcommitted
Switch docs to FromEndpoint (Part 1)
1 parent c5f2bb2 commit 39072c4

File tree

6 files changed

+70
-110
lines changed

6 files changed

+70
-110
lines changed

articles/ai-services/speech-service/audio-processing-speech-sdk.md

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ This sample shows how to use MAS with all default enhancement options on input f
2828
### [C#](#tab/csharp)
2929

3030
```csharp
31-
var speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
31+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
3232

3333
var audioProcessingOptions = AudioProcessingOptions.Create(AudioProcessingConstants.AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT);
3434
var audioInput = AudioConfig.FromDefaultMicrophoneInput(audioProcessingOptions);
@@ -39,7 +39,7 @@ var recognizer = new SpeechRecognizer(speechConfig, audioInput);
3939
### [C++](#tab/cpp)
4040

4141
```cpp
42-
auto speechConfig = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion");
42+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
4343

4444
auto audioProcessingOptions = AudioProcessingOptions::Create(AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT);
4545
auto audioInput = AudioConfig::FromDefaultMicrophoneInput(audioProcessingOptions);
@@ -50,7 +50,7 @@ auto recognizer = SpeechRecognizer::FromConfig(speechConfig, audioInput);
5050
### [Java](#tab/java)
5151

5252
```java
53-
SpeechConfig speechConfig = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
53+
SpeechConfig speechConfig = SpeechConfig.fromEndpoint(new java.net.URI("YourSpeechEndpoint"), "YourSpeechKey");
5454

5555
AudioProcessingOptions audioProcessingOptions = AudioProcessingOptions.create(AudioProcessingConstants.AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT);
5656
AudioConfig audioInput = AudioConfig.fromDefaultMicrophoneInput(audioProcessingOptions);
@@ -69,7 +69,7 @@ This sample shows how to use MAS with a predefined microphone geometry on a spec
6969
### [C#](#tab/csharp)
7070

7171
```csharp
72-
var speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
72+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
7373

7474
var audioProcessingOptions = AudioProcessingOptions.Create(AudioProcessingConstants.AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT, PresetMicrophoneArrayGeometry.Linear2);
7575
var audioInput = AudioConfig.FromMicrophoneInput("hw:0,1", audioProcessingOptions);
@@ -80,7 +80,7 @@ var recognizer = new SpeechRecognizer(speechConfig, audioInput);
8080
### [C++](#tab/cpp)
8181

8282
```cpp
83-
auto speechConfig = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion");
83+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
8484

8585
auto audioProcessingOptions = AudioProcessingOptions::Create(AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT, PresetMicrophoneArrayGeometry::Linear2);
8686
auto audioInput = AudioConfig::FromMicrophoneInput("hw:0,1", audioProcessingOptions);
@@ -91,7 +91,7 @@ auto recognizer = SpeechRecognizer::FromConfig(speechConfig, audioInput);
9191
### [Java](#tab/java)
9292

9393
```java
94-
SpeechConfig speechConfig = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
94+
SpeechConfig speechConfig = SpeechConfig.fromEndpoint(new java.net.URI("YourSpeechEndpoint"), "YourSpeechKey");
9595

9696
AudioProcessingOptions audioProcessingOptions = AudioProcessingOptions.create(AudioProcessingConstants.AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT, PresetMicrophoneArrayGeometry.Linear2);
9797
AudioConfig audioInput = AudioConfig.fromMicrophoneInput("hw:0,1", audioProcessingOptions);
@@ -110,7 +110,7 @@ This sample shows how to use MAS with a custom microphone geometry on a specifie
110110
### [C#](#tab/csharp)
111111

112112
```csharp
113-
var speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
113+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
114114

115115
MicrophoneCoordinates[] microphoneCoordinates = new MicrophoneCoordinates[7]
116116
{
@@ -132,7 +132,7 @@ var recognizer = new SpeechRecognizer(speechConfig, audioInput);
132132
### [C++](#tab/cpp)
133133

134134
```cpp
135-
auto speechConfig = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion");
135+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
136136

137137
MicrophoneArrayGeometry microphoneArrayGeometry
138138
{
@@ -148,7 +148,7 @@ auto recognizer = SpeechRecognizer::FromConfig(speechConfig, audioInput);
148148
### [Java](#tab/java)
149149

150150
```java
151-
SpeechConfig speechConfig = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
151+
SpeechConfig speechConfig = SpeechConfig.fromEndpoint(new java.net.URI("YourSpeechEndpoint"), "YourSpeechKey");
152152

153153
MicrophoneCoordinates[] microphoneCoordinates = new MicrophoneCoordinates[7];
154154
microphoneCoordinates[0] = new MicrophoneCoordinates(0, 0, 0);
@@ -177,7 +177,7 @@ In this example:
177177
### [C#](#tab/csharp)
178178

179179
```csharp
180-
var speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
180+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
181181

182182
var audioProcessingOptions = AudioProcessingOptions.Create(AudioProcessingConstants.AUDIO_INPUT_PROCESSING_DISABLE_ECHO_CANCELLATION | AudioProcessingConstants.AUDIO_INPUT_PROCESSING_DISABLE_NOISE_SUPPRESSION | AudioProcessingConstants.AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT);
183183
var audioInput = AudioConfig.FromDefaultMicrophoneInput(audioProcessingOptions);
@@ -188,7 +188,7 @@ var recognizer = new SpeechRecognizer(speechConfig, audioInput);
188188
### [C++](#tab/cpp)
189189

190190
```cpp
191-
auto speechConfig = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion");
191+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
192192

193193
auto audioProcessingOptions = AudioProcessingOptions::Create(AUDIO_INPUT_PROCESSING_DISABLE_ECHO_CANCELLATION | AUDIO_INPUT_PROCESSING_DISABLE_NOISE_SUPPRESSION | AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT);
194194
auto audioInput = AudioConfig::FromDefaultMicrophoneInput(audioProcessingOptions);
@@ -199,7 +199,7 @@ auto recognizer = SpeechRecognizer::FromConfig(speechConfig, audioInput);
199199
### [Java](#tab/java)
200200

201201
```java
202-
SpeechConfig speechConfig = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
202+
SpeechConfig speechConfig = SpeechConfig.fromEndpoint(new java.net.URI("YourSpeechEndpoint"), "YourSpeechKey");
203203

204204
AudioProcessingOptions audioProcessingOptions = AudioProcessingOptions.create(AudioProcessingConstants.AUDIO_INPUT_PROCESSING_DISABLE_ECHO_CANCELLATION | AudioProcessingConstants.AUDIO_INPUT_PROCESSING_DISABLE_NOISE_SUPPRESSION | AudioProcessingConstants.AUDIO_INPUT_PROCESSING_ENABLE_DEFAULT);
205205
AudioConfig audioInput = AudioConfig.fromDefaultMicrophoneInput(audioProcessingOptions);
@@ -221,7 +221,7 @@ In the following code example, the start angle is set to 70 degrees and the end
221221
### [C#](#tab/csharp)
222222

223223
```csharp
224-
var speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
224+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
225225

226226
MicrophoneCoordinates[] microphoneCoordinates = new MicrophoneCoordinates[4]
227227
{
@@ -241,7 +241,7 @@ var recognizer = new SpeechRecognizer(speechConfig, audioInput);
241241
### [C++](#tab/cpp)
242242

243243
```cpp
244-
auto speechConfig = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion");
244+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
245245

246246
MicrophoneArrayGeometry microphoneArrayGeometry
247247
{
@@ -260,7 +260,7 @@ auto recognizer = SpeechRecognizer::FromConfig(speechConfig, audioInput);
260260
### [Java](#tab/java)
261261

262262
```java
263-
SpeechConfig speechConfig = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
263+
SpeechConfig speechConfig = SpeechConfig.fromEndpoint(new java.net.URI("YourSpeechEndpoint"), "YourSpeechKey");
264264

265265
MicrophoneCoordinates[] microphoneCoordinates = new MicrophoneCoordinates[4];
266266
microphoneCoordinates[0] = new MicrophoneCoordinates(-60, 0, 0);

articles/ai-services/speech-service/embedded-speech.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ All text to speech locales [here](language-support.md?tabs=tts) (except fa-IR, P
166166
167167
## Embedded speech configuration
168168
169-
For cloud connected applications, as shown in most Speech SDK samples, you use the `SpeechConfig` object with an API key and region. For embedded speech, you don't use an AI Services resource for Speech. Instead of a cloud resource, you use the [models and voices](#models-and-voices) that you download to your local device.
169+
For cloud connected applications, as shown in most Speech SDK samples, you use the `SpeechConfig` object with an API key and endpoint. For embedded speech, you don't use an AI Services resource for Speech. Instead of a cloud resource, you use the [models and voices](#models-and-voices) that you download to your local device.
170170
171171
Use the `EmbeddedSpeechConfig` object to set the location of the models or voices. If your application is used for both speech to text and text to speech, you can use the same `EmbeddedSpeechConfig` object to set the location of the models and voices.
172172

articles/ai-services/speech-service/includes/how-to/recognize-speech/cpp.md

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,14 +14,14 @@ ms.author: eur
1414

1515
To call the Speech service using the Speech SDK, you need to create a [`SpeechConfig`](/cpp/cognitive-services/speech/speechconfig) instance. This class includes information about your subscription, like your key and associated region, endpoint, host, or authorization token.
1616

17-
1. Create an AI Services resource for Speech in the [Azure portal](https://portal.azure.com/#create/Microsoft.CognitiveServicesAIServices). Get the Speech resource key and region.
18-
1. Create a `SpeechConfig` instance by using the following code. Replace `YourSpeechKey` and `YourSpeechRegion` with your Speech resource key and region.
17+
1. Create an AI Services resource for Speech in the [Azure portal](https://portal.azure.com/#create/Microsoft.CognitiveServicesAIServices). Get the Speech resource key and endpoint.
18+
1. Create a `SpeechConfig` instance by using the following code. Replace `YourSpeechKey` and `YourSpeechEndpoint` with your Speech resource key and endpoint.
1919

2020
```cpp
2121
using namespace std;
2222
using namespace Microsoft::CognitiveServices::Speech;
2323

24-
auto speechConfig = SpeechConfig::FromSubscription("YourSpeechKey", "YourSpeechRegion");
24+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
2525
```
2626
2727
You can initialize `SpeechConfig` in a few other ways:
@@ -99,7 +99,7 @@ switch (result->Reason)
9999
if (cancellation->Reason == CancellationReason::Error) {
100100
cout << "CANCELED: ErrorCode= " << (int)cancellation->ErrorCode << std::endl;
101101
cout << "CANCELED: ErrorDetails=" << cancellation->ErrorDetails << std::endl;
102-
cout << "CANCELED: Did you set the speech resource key and region values?" << std::endl;
102+
cout << "CANCELED: Did you set the speech resource key and endpoint values?" << std::endl;
103103
}
104104
}
105105
break;
@@ -158,7 +158,7 @@ speechRecognizer->Canceled.Connect([&recognitionEnd](const SpeechRecognitionCanc
158158
{
159159
cout << "CANCELED: ErrorCode=" << (int)e.ErrorCode << "\n"
160160
<< "CANCELED: ErrorDetails=" << e.ErrorDetails << "\n"
161-
<< "CANCELED: Did you set the speech resource key and region values?" << std::endl;
161+
<< "CANCELED: Did you set the speech resource key and endpoint values?" << std::endl;
162162

163163
recognitionEnd.set_value(); // Notify to stop recognition.
164164
}
@@ -205,14 +205,14 @@ For a complete code sample, see [Language identification](../../../language-iden
205205
With [custom speech](../../../custom-speech-overview.md), you can upload your own data, test and train a custom model, compare accuracy between models, and deploy a model to a custom endpoint. The following example shows how to set a custom endpoint.
206206
207207
```cpp
208-
auto speechConfig = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion");
208+
auto speechConfig = SpeechConfig::FromEndpoint("YourServiceEndpoint", "YourSubscriptionKey");
209209
speechConfig->SetEndpointId("YourEndpointId");
210210
auto speechRecognizer = SpeechRecognizer::FromConfig(speechConfig);
211211
```
212212

213213
## Run and use a container
214214

215-
Speech containers provide websocket-based query endpoint APIs that are accessed through the Speech SDK and Speech CLI. By default, the Speech SDK and Speech CLI use the public Speech service. To use the container, you need to change the initialization method. Use a container host URL instead of key and region.
215+
Speech containers provide websocket-based query endpoint APIs that are accessed through the Speech SDK and Speech CLI. By default, the Speech SDK and Speech CLI use the public Speech service. To use the container, you need to change the initialization method. Use a container host URL instead of key and endpoint.
216216

217217
For more information about containers, see Host URLs in [Install and run Speech containers with Docker](../../../speech-container-howto.md#host-urls).
218218

articles/ai-services/speech-service/includes/how-to/recognize-speech/csharp.md

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@ ms.custom: devx-track-csharp
1515

1616
To call the Speech service by using the Speech SDK, you need to create a [`SpeechConfig`](/dotnet/api/microsoft.cognitiveservices.speech.speechconfig) instance. This class includes information about your subscription, like your key and associated region, endpoint, host, or authorization token.
1717

18-
1. Create an AI Services resource for Speech in the [Azure portal](https://portal.azure.com/#create/Microsoft.CognitiveServicesAIServices). Get the Speech resource key and region.
19-
1. Create a `SpeechConfig` instance by using the following code. Replace `YourSpeechKey` and `YourSpeechRegion` with your Speech resource key and region.
18+
1. Create an AI Services resource for Speech in the [Azure portal](https://portal.azure.com/#create/Microsoft.CognitiveServicesAIServices). Get the Speech resource key and endpoint.
19+
1. Create a `SpeechConfig` instance by using the following code. Replace `YourSpeechKey` and `YourSpeechEndpoint` with your Speech resource key and endpoint.
2020

2121
```csharp
2222
using System;
@@ -29,7 +29,7 @@ class Program
2929
{
3030
async static Task Main(string[] args)
3131
{
32-
var speechConfig = SpeechConfig.FromSubscription("YourSpeechKey", "YourSpeechRegion");
32+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
3333
}
3434
}
3535
```
@@ -68,7 +68,7 @@ class Program
6868

6969
async static Task Main(string[] args)
7070
{
71-
var speechConfig = SpeechConfig.FromSubscription("YourSpeechKey", "YourSpeechRegion");
71+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
7272
await FromMic(speechConfig);
7373
}
7474
}
@@ -100,7 +100,7 @@ class Program
100100

101101
async static Task Main(string[] args)
102102
{
103-
var speechConfig = SpeechConfig.FromSubscription("YourSpeechKey", "YourSpeechRegion");
103+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
104104
await FromFile(speechConfig);
105105
}
106106
}
@@ -143,7 +143,7 @@ class Program
143143

144144
async static Task Main(string[] args)
145145
{
146-
var speechConfig = SpeechConfig.FromSubscription("YourSpeechKey", "YourSpeechRegion");
146+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
147147
await FromStream(speechConfig);
148148
}
149149
}
@@ -176,7 +176,7 @@ switch (speechRecognitionResult.Reason)
176176
{
177177
Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
178178
Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
179-
Console.WriteLine($"CANCELED: Did you set the speech resource key and region values?");
179+
Console.WriteLine($"CANCELED: Did you set the speech resource key and endpoint values?");
180180
}
181181
break;
182182
}
@@ -234,7 +234,7 @@ speechRecognizer.Canceled += (s, e) =>
234234
{
235235
Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
236236
Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
237-
Console.WriteLine($"CANCELED: Did you set the speech resource key and region values?");
237+
Console.WriteLine($"CANCELED: Did you set the speech resource key and endpoint values?");
238238
}
239239

240240
stopRecognition.TrySetResult(0);
@@ -280,7 +280,7 @@ For a complete code sample, see [Language identification](../../../language-iden
280280
With [custom speech](../../../custom-speech-overview.md), you can upload your own data, test and train a custom model, compare accuracy between models, and deploy a model to a custom endpoint. The following example shows how to set a custom endpoint.
281281

282282
```csharp
283-
var speechConfig = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
283+
var speechConfig = SpeechConfig.FromEndpoint(new Uri("YourSpeechEndpoint"), "YourSpeechKey");
284284
speechConfig.EndpointId = "YourEndpointId";
285285
var speechRecognizer = new SpeechRecognizer(speechConfig);
286286
```

0 commit comments

Comments
 (0)