Skip to content

Commit 20f8a9c

Browse files
Fix 127, 128, 177 (#190)
## Purpose <!-- Describe the intention of the changes being proposed. What problem does it solve or functionality does it add? --> * ... Fix #127 #128 #177 ## Application Insights <img width="1488" alt="image" src="https://github.com/Azure-Samples/azure-search-openai-demo-csharp/assets/16876986/923a2b35-7a67-4f78-b59d-ed65ca8c8599"> ## Does this introduce a breaking change? <!-- Mark one with an "x". --> ``` [ ] Yes [ ] No ``` ## Pull Request Type What kind of change does this Pull Request introduce? <!-- Please check the one that applies to this PR using "x". --> ``` [ ] Bugfix [ ] Feature [ ] Code style update (formatting, local variables) [ ] Refactoring (no functional changes, no api changes) [ ] Documentation content changes [ ] Other... Please describe: ``` ## How to Test * Get the code ``` git clone [repo-address] cd [repo-name] git checkout [branch-name] npm install ``` * Test the code <!-- Add steps to run the tests suite and/or manually test --> ``` ``` ## What to Check Verify that the following are valid * ... ## Other Information <!-- Add any other helpful information that may be needed here. --> --------- Co-authored-by: Luis Quintanilla <[email protected]>
1 parent 94ef11c commit 20f8a9c

File tree

5 files changed

+38
-8
lines changed

5 files changed

+38
-8
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,4 +200,4 @@ Once in the web app:
200200

201201
**_Question_**: Why do we need to break up the PDFs into chunks when Azure Cognitive Search supports searching large documents?
202202

203-
**_Answer_**: Chunking allows us to limit the amount of information we send to OpenAI due to token limits. By breaking up the content, it allows us to easily find potential chunks of text that we can inject into OpenAI. The method of chunking we use leverages a sliding window of text such that sentences that end one chunk will start the next. This allows us to reduce the chance of losing the context of the text.
203+
**_Answer_**: Chunking allows us to limit the amount of information we send to OpenAI due to token limits. By breaking up the content, it allows us to easily find potential chunks of text that we can inject into OpenAI. The method of chunking we use leverages a sliding window of text such that sentences that end one chunk will start the next. This allows us to reduce the chance of losing the context of the text.

app/backend/MinimalApi.csproj

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
<PackageReference Include="Azure.Storage.Blobs" Version="$(AzureStorageBlobsVersion)" />
1818
<PackageReference Include="Azure.AI.OpenAI" Version="$(AzureOpenAIVersion)" />
1919
<PackageReference Include="Azure.Extensions.AspNetCore.Configuration.Secrets" Version="1.2.2" />
20+
<PackageReference Include="Microsoft.ApplicationInsights.AspNetCore" Version="2.22.0-beta3" />
2021
<PackageReference Include="Microsoft.Extensions.Caching.StackExchangeRedis" Version="7.0.10" />
2122
<PackageReference Include="Microsoft.VisualStudio.Azure.Containers.Tools.Targets" Version="1.19.5" />
2223
<PackageReference Include="PdfSharpCore" Version="1.3.60" />

app/backend/Program.cs

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,15 @@
1919
}
2020
else
2121
{
22+
static string? GetEnvVar(string key) => Environment.GetEnvironmentVariable(key);
23+
2224
builder.Services.AddStackExchangeRedisCache(options =>
2325
{
2426
var name = builder.Configuration["AzureRedisCacheName"] +
2527
".redis.cache.windows.net";
2628
var key = builder.Configuration["AzureRedisCachePrimaryKey"];
2729
var ssl = "true";
2830

29-
static string? GetEnvVar(string key) => Environment.GetEnvironmentVariable(key);
3031

3132
if (GetEnvVar("REDIS_HOST") is string redisHost)
3233
{
@@ -46,7 +47,18 @@
4647
{name},abortConnect=false,ssl={ssl},allowAdmin=true,password={key}
4748
""";
4849
options.InstanceName = "content";
50+
51+
4952
});
53+
54+
// set application telemetry
55+
if (GetEnvVar("APPLICATIONINSIGHTS_CONNECTION_STRING") is string appInsightsConnectionString)
56+
{
57+
builder.Services.AddApplicationInsightsTelemetry((option) =>
58+
{
59+
option.ConnectionString = appInsightsConnectionString;
60+
});
61+
}
5062
}
5163

5264
var app = builder.Build();

infra/main.bicep

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,19 @@ param environmentName string
1010
param location string
1111
param tags string = ''
1212

13+
@description('Location for the OpenAI resource group')
14+
@allowed(['canadaeast', 'eastus', 'eastus2', 'francecentral', 'switzerlandnorth', 'uksouth', 'japaneast', 'northcentralus'])
15+
@metadata({
16+
azd: {
17+
type: 'location'
18+
}
19+
})
20+
param openAiResourceGroupLocation string
21+
22+
@description('Name of the chat GPT model. Default: gpt-35-turbo')
23+
@allowed(['gpt-35-turbo', 'gpt-4', 'gpt-35-turbo-16k', 'gpt-4-16k'])
24+
param chatGptModelName string = 'gpt-35-turbo'
25+
1326
@description('Name of the Azure Application Insights dashboard')
1427
param applicationInsightsDashboardName string = ''
1528

@@ -22,9 +35,6 @@ param chatGptDeploymentCapacity int = 30
2235
@description('Name of the chat GPT deployment')
2336
param chatGptDeploymentName string = 'chat'
2437

25-
@description('Name of the chat GPT model. Default: gpt-35-turbo')
26-
param chatGptModelName string = 'gpt-35-turbo'
27-
2838
@description('Name of the embedding deployment. Default: embedding')
2939
param embeddingDeploymentName string = 'embedding'
3040

@@ -67,9 +77,6 @@ param keyVaultResourceGroupName string = ''
6777
@description('Name of the Azure Log Analytics workspace')
6878
param logAnalyticsName string = ''
6979

70-
@description('Location of the resource group for the OpenAI resources')
71-
param openAiResourceGroupLocation string = location
72-
7380
@description('Name of the resource group for the OpenAI resources')
7481
param openAiResourceGroupName string = ''
7582

@@ -493,6 +500,7 @@ output AZURE_KEY_VAULT_ENDPOINT string = keyVault.outputs.endpoint
493500
output AZURE_KEY_VAULT_NAME string = keyVault.outputs.name
494501
output AZURE_KEY_VAULT_RESOURCE_GROUP string = keyVaultResourceGroup.name
495502
output AZURE_LOCATION string = location
503+
output AZURE_OPENAI_RESOURCE_LOCATION string = openAiResourceGroupLocation
496504
output AZURE_OPENAI_CHATGPT_DEPLOYMENT string = chatGptDeploymentName
497505
output AZURE_OPENAI_EMBEDDING_DEPLOYMENT string = embeddingDeploymentName
498506
output AZURE_OPENAI_ENDPOINT string = openAi.outputs.endpoint

infra/main.parameters.json

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,15 @@
2929
"openAiResourceGroupName": {
3030
"value": "${AZURE_OPENAI_RESOURCE_GROUP}"
3131
},
32+
"openAiResourceGroupLocation":{
33+
"value": "${AZURE_OPENAI_RESOURCE_LOCATION=${AZURE_LOCATION}}"
34+
},
35+
"chatGptDeploymentName":{
36+
"value": "${AZURE_OPENAI_CHATGPT_DEPLOYMENT=chat}"
37+
},
38+
"embeddingDeploymentName":{
39+
"value": "${AZURE_OPENAI_EMBEDDING_DEPLOYMENT=embedding}"
40+
},
3241
"openAiServiceName": {
3342
"value": "${AZURE_OPENAI_SERVICE}"
3443
},

0 commit comments

Comments
 (0)