Skip to content

Commit e251fca

Browse files
committed
Merge branch 'main' of https://github.com/MicrosoftDocs/azure-docs-pr into gen2
id#
2 parents 4d0c5e4 + 2262dcb commit e251fca

File tree

427 files changed

+2863
-1699
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

427 files changed

+2863
-1699
lines changed

.openpublishing.redirection.json

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5079,6 +5079,11 @@
50795079
"source_path_from_root": "/articles/xplat-cli-install.md",
50805080
"redirect_url": "/cli/azure/install-azure-cli",
50815081
"redirect_document_id": false
5082+
},
5083+
{
5084+
"source_path_from_root": "/articles/virtual-network/template-samples.md",
5085+
"redirect_url": "/samples/browse/?expanded=azure&products=azure-resource-manager&terms=virtual%20network",
5086+
"redirect_document_id": false
50825087
}
50835088
]
50845089
}

articles/ai-services/computer-vision/how-to/identity-access-token.md

Lines changed: 29 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -112,61 +112,51 @@ curl -X POST 'https://<client-endpoint>/face/v1.0/identify' \
112112
113113
#### [C#](#tab/csharp)
114114

115-
The following code snippets show you how to use an access token with the [Face SDK for C#](https://www.nuget.org/packages/Microsoft.Azure.CognitiveServices.Vision.Face).
115+
The following code snippets show you how to use an access token with the [Face SDK for C#](https://aka.ms/azsdk-csharp-face-pkg).
116116

117-
The following class uses an access token to create a **ServiceClientCredentials** object that can be used to authenticate a Face API client object. It automatically adds the access token as a header in every request that the Face client will make.
117+
The following class uses an access token to create a **HttpPipelineSynchronousPolicy** object that can be used to authenticate a Face API client object. It automatically adds the access token as a header in every request that the Face client will make.
118118

119119
```csharp
120-
public class LimitedAccessTokenWithApiKeyClientCredential : ServiceClientCredentials
120+
public class LimitedAccessTokenPolicy : HttpPipelineSynchronousPolicy
121121
{
122-
/// <summary>
123-
/// Creates a new instance of the LimitedAccessTokenWithApiKeyClientCredential class
124-
/// </summary>
125-
/// <param name="apiKey">API Key for the Face API or CognitiveService endpoint</param>
126-
/// <param name="limitedAccessToken">LimitedAccessToken to bypass the limited access program, requires ISV sponsership.</param>
127-
128-
public LimitedAccessTokenWithApiKeyClientCredential(string apiKey, string limitedAccessToken)
129-
{
130-
this.ApiKey = apiKey;
131-
this.LimitedAccessToken = limitedAccessToken;
122+
/// <summary>
123+
/// Creates a new instance of the LimitedAccessTokenPolicy class
124+
/// </summary>
125+
/// <param name="limitedAccessToken">LimitedAccessToken to bypass the limited access program, requires ISV sponsership.</param>
126+
public LimitedAccessTokenPolicy(string limitedAccessToken)
127+
{
128+
_limitedAccessToken = limitedAccessToken;
132129
}
133130

134-
private readonly string ApiKey;
135-
private readonly string LimitedAccesToken;
136-
137-
/// <summary>
138-
/// Add the Basic Authentication Header to each outgoing request
139-
/// </summary>
140-
/// <param name="request">The outgoing request</param>
141-
/// <param name="cancellationToken">A token to cancel the operation</param>
142-
public override Task ProcessHttpRequestAsync(HttpRequestMessage request, CancellationToken cancellationToken)
143-
{
144-
if (request == null)
145-
throw new ArgumentNullException("request");
146-
request.Headers.Add("Ocp-Apim-Subscription-Key", ApiKey);
147-
request.Headers.Add("LimitedAccessToken", $"Bearer {LimitedAccesToken}");
148-
149-
return Task.FromResult<object>(null);
150-
}
151-
}
131+
private readonly string _limitedAccessToken;
132+
133+
/// <summary>
134+
/// Add the authentication header to each outgoing request
135+
/// </summary>
136+
/// <param name="message">The outgoing message</param>
137+
public override void OnSendingRequest(HttpMessage message)
138+
{
139+
message.Request.Headers.Add("LimitedAccessToken", $"Bearer {_limitedAccessToken}");
140+
}
141+
}
152142
```
153143

154144
In the client-side application, the helper class can be used like in this example:
155145

156146
```csharp
157-
static void Main(string[] args)
158-
{
147+
static void Main(string[] args)
148+
{
159149
// create Face client object
160-
var faceClient = new FaceClient(new LimitedAccessTokenWithApiKeyClientCredential(apiKey: "<client-face-key>", limitedAccessToken: "<token>"));
161-
162-
faceClient.Endpoint = "https://mytest-eastus2.cognitiveservices.azure.com";
150+
var clientOptions = new AzureAIVisionFaceClientOptions();
151+
clientOptions.AddPolicy(new LimitedAccessTokenPolicy("<token>"), HttpPipelinePosition.PerCall);
152+
FaceClient faceClient = new FaceClient(new Uri("<client-endpoint>"), new AzureKeyCredential("<client-face-key>"), clientOptions);
163153

164154
// use Face client in an API call
165-
using (var stream = File.OpenRead("photo.jpg"))
155+
using (var stream = File.OpenRead("photo.jpg"))
166156
{
167-
var result = faceClient.Face.DetectWithStreamAsync(stream, detectionModel: "Detection_03", recognitionModel: "Recognition_04", returnFaceId: true).Result;
157+
var response = faceClient.Detect(BinaryData.FromStream(stream), FaceDetectionModel.Detection03, FaceRecognitionModel.Recognition04, returnFaceId: true);
168158

169-
Console.WriteLine(JsonConvert.SerializeObject(result));
159+
Console.WriteLine(JsonConvert.SerializeObject(response.Value));
170160
}
171161
}
172162
```

articles/ai-services/computer-vision/how-to/specify-detection-model.md

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ var faces = response.Value;
7373

7474
The Face service can extract face data from an image and associate it with a **Person** object through the [Add Person Group Person Face] API. In this API call, you can specify the detection model in the same way as in [Detect].
7575

76-
See the following code example for the .NET client library.
76+
See the following .NET code example.
7777

7878
```csharp
7979
// Create a PersonGroup and add a person with face detected by "detection_03" model
@@ -110,7 +110,7 @@ This code creates a **PersonGroup** with ID `mypersongroupid` and adds a **Perso
110110
111111
## Add face to FaceList with specified model
112112

113-
You can also specify a detection model when you add a face to an existing **FaceList** object. See the following code example for the .NET client library.
113+
You can also specify a detection model when you add a face to an existing **FaceList** object. See the following .NET code example.
114114

115115
```csharp
116116
using (var content = new ByteArrayContent(Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(new Dictionary<string, object> { ["name"] = "My face collection", ["recognitionModel"] = "recognition_04" }))))
@@ -139,6 +139,7 @@ In this article, you learned how to specify the detection model to use with diff
139139

140140
* [Face .NET SDK](../quickstarts-sdk/identity-client-library.md?pivots=programming-language-csharp%253fpivots%253dprogramming-language-csharp)
141141
* [Face Python SDK](../quickstarts-sdk/identity-client-library.md?pivots=programming-language-python%253fpivots%253dprogramming-language-python)
142+
* [Face Java SDK](../quickstarts-sdk/identity-client-library.md?pivots=programming-language-java%253fpivots%253dprogramming-language-java)
142143
* [Face JavaScript SDK](../quickstarts-sdk/identity-client-library.md?pivots=programming-language-javascript%253fpivots%253dprogramming-language-javascript)
143144

144145
[Detect]: /rest/api/face/face-detection-operations/detect

articles/ai-services/computer-vision/how-to/specify-recognition-model.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,7 @@ In this article, you learned how to specify the recognition model to use with di
132132

133133
* [Face .NET SDK](../quickstarts-sdk/identity-client-library.md?pivots=programming-language-csharp%253fpivots%253dprogramming-language-csharp)
134134
* [Face Python SDK](../quickstarts-sdk/identity-client-library.md?pivots=programming-language-python%253fpivots%253dprogramming-language-python)
135+
* [Face Java SDK](../quickstarts-sdk/identity-client-library.md?pivots=programming-language-java%253fpivots%253dprogramming-language-java)
135136
* [Face JavaScript SDK](../quickstarts-sdk/identity-client-library.md?pivots=programming-language-javascript%253fpivots%253dprogramming-language-javascript)
136137

137138
[Detect]: /rest/api/face/face-detection-operations/detect

articles/ai-services/speech-service/batch-transcription-create.md

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,10 @@ You can query the status of your transcriptions with the [Transcriptions_Get](/r
108108
Call [Transcriptions_Delete](/rest/api/speechtotext/transcriptions/delete)
109109
regularly from the service, after you retrieve the results. Alternatively, set the `timeToLive` property to ensure the eventual deletion of the results.
110110

111+
> [!TIP]
112+
> You can also try the Batch Transcription API using Python on [GitHub](https://github.com/Azure-Samples/cognitive-services-speech-sdk/blob/master/samples/batch/python/python-client/main.py).
113+
114+
111115
::: zone-end
112116

113117
::: zone pivot="speech-cli"
@@ -168,7 +172,7 @@ spx help batch transcription
168172

169173
::: zone pivot="rest-api"
170174

171-
Here are some property options that you can use to configure a transcription when you call the [Transcriptions_Create](/rest/api/speechtotext/transcriptions/create) operation.
175+
Here are some property options to configure a transcription when you call the [Transcriptions_Create](/rest/api/speechtotext/transcriptions/create) operation. You can find more examples on the same page, such as [creating a transcription with language identification](/rest/api/speechtotext/transcriptions/create/#create-a-transcription-with-language-identification).
172176

173177
| Property | Description |
174178
|----------|-------------|

articles/ai-services/speech-service/embedded-speech.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -182,12 +182,12 @@ var embeddedSpeechConfig = EmbeddedSpeechConfig.FromPaths(paths.ToArray());
182182
// For speech to text
183183
embeddedSpeechConfig.SetSpeechRecognitionModel(
184184
"Microsoft Speech Recognizer en-US FP Model V8",
185-
Environment.GetEnvironmentVariable("MODEL_KEY"));
185+
Environment.GetEnvironmentVariable("EMBEDDED_SPEECH_MODEL_LICENSE"));
186186
187187
// For text to speech
188188
embeddedSpeechConfig.SetSpeechSynthesisVoice(
189189
"Microsoft Server Speech Text to Speech Voice (en-US, JennyNeural)",
190-
Environment.GetEnvironmentVariable("VOICE_KEY"));
190+
Environment.GetEnvironmentVariable("EMBEDDED_SPEECH_MODEL_LICENSE"));
191191
embeddedSpeechConfig.SetSpeechSynthesisOutputFormat(SpeechSynthesisOutputFormat.Riff24Khz16BitMonoPcm);
192192
```
193193
::: zone-end
@@ -207,12 +207,12 @@ auto embeddedSpeechConfig = EmbeddedSpeechConfig::FromPaths(paths);
207207
// For speech to text
208208
embeddedSpeechConfig->SetSpeechRecognitionModel((
209209
"Microsoft Speech Recognizer en-US FP Model V8",
210-
GetEnvironmentVariable("MODEL_KEY"));
210+
GetEnvironmentVariable("EMBEDDED_SPEECH_MODEL_LICENSE"));
211211

212212
// For text to speech
213213
embeddedSpeechConfig->SetSpeechSynthesisVoice(
214214
"Microsoft Server Speech Text to Speech Voice (en-US, JennyNeural)",
215-
GetEnvironmentVariable("VOICE_KEY"));
215+
GetEnvironmentVariable("EMBEDDED_SPEECH_MODEL_LICENSE"));
216216
embeddedSpeechConfig->SetSpeechSynthesisOutputFormat(SpeechSynthesisOutputFormat::Riff24Khz16BitMonoPcm);
217217
```
218218
@@ -230,12 +230,12 @@ var embeddedSpeechConfig = EmbeddedSpeechConfig.fromPaths(paths);
230230
// For speech to text
231231
embeddedSpeechConfig.setSpeechRecognitionModel(
232232
"Microsoft Speech Recognizer en-US FP Model V8",
233-
System.getenv("MODEL_KEY"));
233+
System.getenv("EMBEDDED_SPEECH_MODEL_LICENSE"));
234234
235235
// For text to speech
236236
embeddedSpeechConfig.setSpeechSynthesisVoice(
237237
"Microsoft Server Speech Text to Speech Voice (en-US, JennyNeural)",
238-
System.getenv("VOICE_KEY"));
238+
System.getenv("EMBEDDED_SPEECH_MODEL_LICENSE"));
239239
embeddedSpeechConfig.setSpeechSynthesisOutputFormat(SpeechSynthesisOutputFormat.Riff24Khz16BitMonoPcm);
240240
```
241241

articles/ai-services/speech-service/includes/how-to/speech-synthesis/python.md

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,15 @@ The voice that speaks is determined in order of priority as follows:
3636
- If both `SpeechSynthesisVoiceName` and `SpeechSynthesisLanguage` are set, the `SpeechSynthesisLanguage` setting is ignored. The voice that you specify by using `SpeechSynthesisVoiceName` speaks.
3737
- If the voice element is set by using [Speech Synthesis Markup Language (SSML)](../../../speech-synthesis-markup.md), the `SpeechSynthesisVoiceName` and `SpeechSynthesisLanguage` settings are ignored.
3838

39+
In summary, the order of priority can be described as:
40+
41+
| `SpeechSynthesisVoiceName` | `SpeechSynthesisLanguage` | SSML | Outcome |
42+
|:----------------------------:|:----------------------------:|:--------:|---------------------------------------------------------|
43+
|||| Default voice for `en-US` speaks |
44+
|||| Default voice for specified locale speaks. |
45+
|||| The voice that you specify by using `SpeechSynthesisVoiceName` speaks. |
46+
|||| The voice that you specify by using SSML speaks. |
47+
3948
## Synthesize speech to a file
4049

4150
Create a [SpeechSynthesizer](/python/api/azure-cognitiveservices-speech/azure.cognitiveservices.speech.speechsynthesizer) object. This object runs text to speech conversions and outputs to speakers, files, or other output streams. `SpeechSynthesizer` accepts as parameters:
@@ -53,7 +62,8 @@ Create a [SpeechSynthesizer](/python/api/azure-cognitiveservices-speech/azure.co
5362

5463
```python
5564
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)
56-
speech_synthesizer.speak_text_async("I'm excited to try text to speech")
65+
speech_synthesis_result = speech_synthesizer.speak_text_async("I'm excited to try text to speech").get()
66+
5767
```
5868

5969
When you run the program, it creates a synthesized *.wav* file, which is written to the location that you specify. This result is a good example of the most basic usage. Next, you can customize output and handle the output response as an in-memory stream for working with custom scenarios.
@@ -85,8 +95,8 @@ In this example, use the `AudioDataStream` constructor to get a stream from the
8595

8696
```python
8797
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=None)
88-
result = speech_synthesizer.speak_text_async("I'm excited to try text to speech").get()
89-
stream = speechsdk.AudioDataStream(result)
98+
speech_synthesis_result = speech_synthesizer.speak_text_async("I'm excited to try text to speech").get()
99+
stream = speechsdk.AudioDataStream(speech_synthesis_result)
90100
```
91101

92102
At this point, you can implement any custom behavior by using the resulting `stream` object.
@@ -112,8 +122,8 @@ This example specifies the high-fidelity RIFF format `Riff24Khz16BitMonoPcm` by
112122
speech_config.set_speech_synthesis_output_format(speechsdk.SpeechSynthesisOutputFormat.Riff24Khz16BitMonoPcm)
113123
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=None)
114124

115-
result = speech_synthesizer.speak_text_async("I'm excited to try text to speech").get()
116-
stream = speechsdk.AudioDataStream(result)
125+
speech_synthesis_result = speech_synthesizer.speak_text_async("I'm excited to try text to speech").get()
126+
stream = speechsdk.AudioDataStream(speech_synthesis_result)
117127
stream.save_to_wav_file("path/to/write/file.wav")
118128
```
119129

@@ -146,9 +156,9 @@ To start using SSML for customization, make a minor change that switches the voi
146156
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=None)
147157

148158
ssml_string = open("ssml.xml", "r").read()
149-
result = speech_synthesizer.speak_ssml_async(ssml_string).get()
159+
speech_synthesis_result = speech_synthesizer.speak_ssml_async(ssml_string).get()
150160

151-
stream = speechsdk.AudioDataStream(result)
161+
stream = speechsdk.AudioDataStream(speech_synthesis_result)
152162
stream.save_to_wav_file("path/to/write/file.wav")
153163
```
154164

articles/app-service/environment/index.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ metadata:
88
description: Host fully isolated websites in a secure and scalable environment. Find quickstarts, tutorials, and read about concepts to help you succeed with App Service Environment.
99
ms.topic: landing-page
1010
ms.author: mijacobs
11-
ms.service: app-service
11+
ms.service: azure-app-service
1212
ms.date: 11/15/2021
1313

1414
landingContent:

articles/app-service/environment/side-by-side-migrate.md

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -223,6 +223,8 @@ Once you're ready to redirect traffic, you can complete the final step of the mi
223223
> [!NOTE]
224224
> It's important to complete this step as soon as possible. When your App Service Environment is in the hybrid state, it's unable to receive platform upgrades and security patches, which makes it more vulnerable to instability and security threats.
225225
>
226+
> **You have 14 days to complete this step. After 14 days, the platform will automatically complete the migration and delete your old environment. If you need more time, you can open a support case to discuss your options**.
227+
>
226228
227229
If you discover any issues with your new App Service Environment v3, don't run the command to redirect customer traffic. This command also initiates the deletion of your App Service Environment v2. If you find an issue, contact support.
228230

@@ -458,7 +460,11 @@ az rest --method get --uri "${ASE_ID}?api-version=2022-03-01" --query properties
458460
459461
### 11. Redirect customer traffic, validate your App Service Environment v3, and complete migration
460462

461-
This step is your opportunity to test and validate your new App Service Environment v3.
463+
This step is your opportunity to test and validate your new App Service Environment v3.
464+
465+
> [!IMPORTANT]
466+
> You have 14 days to complete this step. After 14 days, the platform will automatically complete the migration and delete your old environment. If you need more time, you can open a support case to discuss your options.
467+
>
462468
463469
Once you confirm your apps are working as expected, you can finalize the migration by running the following command. This command also deletes your old environment.
464470

articles/app-service/includes/configure-azure-storage/azure-storage-linux-container-pivot.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
---
22
author: msangapu-msft
3-
ms.service: app-service
3+
ms.service: azure-app-service
44
ms.custom: linux-related-content
55
ms.topic: include
66
ms.date: 01/05/2024

0 commit comments

Comments
 (0)