Skip to content

Commit 2c45abe

Browse files
Merge pull request #44053 from dotnet/main
Merge main into live
2 parents ee1e334 + ffb3a5c commit 2c45abe

18 files changed

+247
-70
lines changed

.devcontainer/devcontainer.json

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
{
2+
"name": "C# (.NET)",
3+
"image": "mcr.microsoft.com/devcontainers/dotnet:latest"
4+
5+
// Features to add to the dev container. More info: https://containers.dev/features.
6+
// "features": {},
7+
8+
// Use 'forwardPorts' to make a list of ports inside the container available locally.
9+
// "forwardPorts": [5000, 5001],
10+
// "portsAttributes": {
11+
// "5001": {
12+
// "protocol": "https"
13+
// }
14+
// }
15+
16+
// Use 'postCreateCommand' to run commands after the container is created.
17+
// "postCreateCommand": "dotnet restore",
18+
19+
// Configure tool-specific properties.
20+
// "customizations": {},
21+
}

docs/ai/azure-ai-for-dotnet-developers.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
---
22
title: Develop .NET apps that use Azure AI services
33
description: This article provides an organized list of resources about Azure AI scenarios for .NET developers, including documentation and code samples.
4-
ms.date: 05/17/2024
4+
ms.date: 12/19/2024
55
ms.topic: overview
66
ms.custom: devx-track-dotnet, devx-track-dotnet-ai
77
---

docs/ai/conceptual/embeddings.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ title: "How Embeddings Extend Your AI Model's Reach"
33
description: "Learn how embeddings extend the limits and capabilities of AI models in .NET."
44
author: catbutler
55
ms.topic: concept-article #Don't change.
6-
ms.date: 05/14/2024
6+
ms.date: 12/19/2024
77

88
#customer intent: As a .NET developer, I want to understand how embeddings extend LLM limits and capabilities in .NET so that I have more semantic context and better outcomes for my AI apps.
99

@@ -34,7 +34,7 @@ Use embeddings to help a model understand the meaning and context of text, and t
3434

3535
Use audio embeddings to process audio files or inputs in your app.
3636

37-
For example, [Speech service](/azure/ai-services/speech-service/) supports a range of audio embeddings, including [speech to text](/azure/ai-services/speech-service/speech-to-text) and [text to speech](/azure/ai-services/speech-service/text-to-speech). You can process audio in real-time or in batches.
37+
For example, [Azure AI Speech](/azure/ai-services/speech-service/) supports a range of audio embeddings, including [speech to text](/azure/ai-services/speech-service/speech-to-text) and [text to speech](/azure/ai-services/speech-service/text-to-speech). You can process audio in real-time or in batches.
3838

3939
### Turn text into images or images into text
4040

docs/ai/conceptual/understanding-openai-functions.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ title: "Understanding OpenAI Function Calling"
33
description: "Understand how function calling enables you to integrate external tools with your OpenAI application."
44
author: haywoodsloan
55
ms.topic: concept-article
6-
ms.date: 05/14/2024
6+
ms.date: 12/19/2024
77

88
#customer intent: As a .NET developer, I want to understand OpenAI function calling so that I can integrate external tools with AI completions in my .NET project.
99

docs/ai/conceptual/understanding-tokens.md

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,15 @@ title: "Understanding tokens"
33
description: "Understand how large language models (LLMs) use tokens to analyze semantic relationships and generate natural language outputs"
44
author: haywoodsloan
55
ms.topic: concept-article
6-
ms.date: 05/14/2024
6+
ms.date: 12/19/2024
77

88
#customer intent: As a .NET developer, I want understand how large language models (LLMs) use tokens so I can add semantic analysis and text generation capabilities to my .NET projects.
99

1010
---
1111

1212
# Understand tokens
1313

14-
Tokens are words, character sets, or combinations of words and punctuation that are used by large language models (LLMs) to decompose text into. Tokenization is the first step in training. The LLM analyzes the semantic relationships between tokens, such as how commonly they're used together or whether they're used in similar contexts. After training, the LLM uses those patterns and relationships to generate a sequence of output tokens based on the input sequence.
14+
Tokens are words, character sets, or combinations of words and punctuation that are generated by large language models (LLMs) when they decompose text. Tokenization is the first step in training. The LLM analyzes the semantic relationships between tokens, such as how commonly they're used together or whether they're used in similar contexts. After training, the LLM uses those patterns and relationships to generate a sequence of output tokens based on the input sequence.
1515

1616
## Turning text into tokens
1717

@@ -89,11 +89,7 @@ Output generation is an iterative operation. The model appends the predicted tok
8989

9090
### Token limits
9191

92-
LLMs have limitations regarding the maximum number of tokens that can be used as input or generated as output. This limitation often causes the input and output tokens to be combined into a maximum context window.
93-
94-
For example, GPT-4 supports up to 8,192 tokens of context. The combined size of the input and output tokens can't exceed 8,192.
95-
96-
Taken together, a model's token limit and tokenization method determine the maximum length of text that can be provided as input or generated as output.
92+
LLMs have limitations regarding the maximum number of tokens that can be used as input or generated as output. This limitation often causes the input and output tokens to be combined into a maximum context window. Taken together, a model's token limit and tokenization method determine the maximum length of text that can be provided as input or generated as output.
9793

9894
For example, consider a model that has a maximum context window of 100 tokens. The model processes our example sentences as input text:
9995

docs/ai/get-started-app-chat-scaling-with-azure-container-apps.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
---
22
title: Scale Azure OpenAI for .NET chat sample using RAG
33
description: Learn how to add load balancing to your application to extend the chat app beyond the Azure OpenAI token and model quota limits.
4-
ms.date: 05/16/2024
4+
ms.date: 12/19/2024
55
ms.topic: get-started
66
ms.custom: devx-track-dotnet, devx-track-dotnet-ai
77
# CustomerIntent: As a .NET developer new to Azure OpenAI, I want to scale my Azure OpenAI capacity to avoid rate limit errors with Azure Container Apps.

docs/ai/get-started-app-chat-template.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
---
22
title: Get started with the chat using your own data sample for .NET
33
description: Get started with .NET and search across your own data using a chat app sample implemented using Azure OpenAI Service and Retrieval Augmented Generation (RAG) in Azure AI Search. Easily deploy with Azure Developer CLI. This article uses the Azure AI Reference Template sample.
4-
ms.date: 05/16/2024
4+
ms.date: 12/19/2024
55
ms.topic: get-started
66
ms.custom: devx-track-dotnet, devx-track-dotnet-ai
77
# CustomerIntent: As a .NET developer new to Azure OpenAI, I want deploy and use sample code to interact with app infused with my own business data so that learn from the sample code.

docs/ai/how-to/content-filtering.md

Lines changed: 9 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -5,13 +5,13 @@ ms.custom: devx-track-dotnet, devx-track-dotnet-ai
55
author: alexwolfmsft
66
ms.author: alexwolf
77
ms.topic: how-to
8-
ms.date: 05/13/2024
8+
ms.date: 12/19/2024
99

1010
#customer intent: As a .NET developer, I want to manage OpenAI Content Filtering in a .NET app
1111

1212
---
1313

14-
# Work with OpenAI content filtering in a .NET app
14+
# Work with Azure OpenAI content filtering in a .NET app
1515

1616
This article demonstrates how to handle content filtering concerns in a .NET app. Azure OpenAI Service includes a content filtering system that works alongside core models. This system works by running both the prompt and completion through an ensemble of classification models aimed at detecting and preventing the output of harmful content. The content filtering system detects and takes action on specific categories of potentially harmful content in both input prompts and output completions. Variations in API configurations and application design might affect completions and thus filtering behavior.
1717

@@ -27,33 +27,23 @@ The [Content Filtering](/azure/ai-services/openai/concepts/content-filter) docum
2727

2828
To use the sample code in this article, you need to create and assign a content filter to your OpenAI model.
2929

30-
1. [Create and assign a content filter](/azure/ai-services/openai/how-to/content-filters) to your provisioned GPT-35 or GPT-4 model.
30+
1. [Create and assign a content filter](/azure/ai-services/openai/how-to/content-filters) to your provisioned model.
3131

3232
1. Add the [`Azure.AI.OpenAI`](https://www.nuget.org/packages/Azure.AI.OpenAI) NuGet package to your project.
3333

3434
```dotnetcli
3535
dotnet add package Azure.AI.OpenAI
3636
```
3737
38-
1. Create a simple chat completion flow in your .NET app using the `OpenAiClient`. Replace the `YOUR_OPENAI_ENDPOINT`, `YOUR_OPENAI_KEY`, and `YOUR_OPENAI_DEPLOYMENT` values with your own.
38+
1. Create a simple chat completion flow in your .NET app using the `AzureOpenAiClient`. Replace the `YOUR_MODEL_ENDPOINT` and `YOUR_MODEL_DEPLOYMENT_NAME` values with your own.
3939
40-
:::code language="csharp" source="./snippets/content-filtering/program.cs" id="chatCompletionFlow":::
40+
:::code language="csharp" source="./snippets/content-filtering/program.cs" :::
4141
42-
1. Print out the content filtering results for each category.
42+
1. Replace the `YOUR_PROMPT` placeholder with your own message and run the app to experiment with content filtering results. If you enter a prompt the AI considers unsafe, Azure OpenAI returns a `400 Bad Request` code. The app prints a message in the console similar to the following:
4343
44-
:::code language="csharp" source="./snippets/content-filtering/program.cs" id="printContentFilteringResult":::
45-
46-
1. Replace the `YOUR_PROMPT` placeholder with your own message and run the app to experiment with content filtering results. The following output shows an example of a prompt that triggers a low severity content filtering result:
47-
48-
```output
49-
I am sorry if I have done anything to upset you.
50-
Is there anything I can do to assist you and make things better?
51-
52-
Hate category is filtered: False with low severity.
53-
SelfHarm category is filtered: False with safe severity.
54-
Sexual category is filtered: False with safe severity.
55-
Violence category is filtered: False with low severity.
56-
```
44+
```output
45+
The response was filtered due to the prompt triggering Azure OpenAI's content management policy...
46+
```
5747

5848
## Related content
5949

docs/ai/how-to/snippets/content-filtering/AIContentFiltering.csproj

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,10 @@
88
</PropertyGroup>
99

1010
<ItemGroup>
11-
<PackageReference Include="Azure.AI.OpenAI" Version="1.0.0-beta.17" />
12-
<PackageReference Include="Azure.Core" Version="1.43.0" />
11+
<PackageReference Include="Azure.AI.OpenAI" />
12+
<PackageReference Include="Azure.Identity" />
13+
<PackageReference Include="Microsoft.Extensions.AI" Version="9.0.1-preview.1.24570.5" />
14+
<PackageReference Include="Microsoft.Extensions.AI.OpenAI" Version="9.0.1-preview.1.24570.5" />
1315
</ItemGroup>
1416

1517
</Project>
Lines changed: 13 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1,38 +1,19 @@
1-
// <chatCompletionFlow>
2-
using Azure;
3-
using Azure.AI.OpenAI;
1+
using Azure.AI.OpenAI;
2+
using Azure.Identity;
3+
using Microsoft.Extensions.AI;
44

5-
string endpoint = "YOUR_OPENAI_ENDPOINT";
6-
string key = "YOUR_OPENAI_KEY";
5+
IChatClient client =
6+
new AzureOpenAIClient(
7+
new Uri("YOUR_MODEL_ENDPOINT"),
8+
new DefaultAzureCredential()).AsChatClient("YOUR_MODEL_DEPLOYMENT_NAME");
79

8-
OpenAIClient client = new(new Uri(endpoint), new AzureKeyCredential(key));
9-
10-
var chatCompletionsOptions = new ChatCompletionsOptions()
10+
try
1111
{
12-
DeploymentName = "YOUR_DEPLOYMENT_NAME",
13-
Messages =
14-
{
15-
new ChatRequestSystemMessage("You are a helpful assistant."),
16-
new ChatRequestUserMessage("YOUR_PROMPT")
17-
}
18-
};
19-
20-
Response<ChatCompletions> response = client.GetChatCompletions(chatCompletionsOptions);
21-
Console.WriteLine(response.Value.Choices[0].Message.Content);
22-
Console.WriteLine();
23-
// </chatCompletionFlow>
12+
ChatCompletion completion = await client.CompleteAsync("YOUR_PROMPT");
2413

25-
// <printContentFilteringResult>
26-
foreach (var promptFilterResult in response.Value.PromptFilterResults)
14+
Console.WriteLine(completion.Message);
15+
}
16+
catch (Exception e)
2717
{
28-
var results = promptFilterResult.ContentFilterResults;
29-
Console.WriteLine(@$"Hate category is filtered:
30-
{results.Hate.Filtered} with {results.Hate.Severity} severity.");
31-
Console.WriteLine(@$"Self-harm category is filtered:
32-
{results.SelfHarm.Filtered} with {results.SelfHarm.Severity} severity.");
33-
Console.WriteLine(@$"Sexual category is filtered:
34-
{results.Sexual.Filtered} with {results.Sexual.Severity} severity.");
35-
Console.WriteLine(@$"Violence category is filtered:
36-
{results.Violence.Filtered} with {results.Violence.Severity} severity.");
18+
Console.WriteLine(e.Message);
3719
}
38-
// </printContentFilteringResult>

0 commit comments

Comments
 (0)