Skip to content

Commit 5b477d1

Browse files
update model name from gpt-4.1-mini to gpt4.1-mini
1 parent 4c7e541 commit 5b477d1

File tree

6 files changed

+16
-15
lines changed

6 files changed

+16
-15
lines changed

docs/CustomizingAzdParameters.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ By default this template will use the environment name as the prefix to prevent
1212
| `AZURE_ENV_NAME` | string | `azdtemp` | Used as a prefix for all resource names to ensure uniqueness across environments. |
1313
| `AZURE_LOCATION` | string | `<User selects during deployment>` | Sets the Azure region for resource deployment. |
1414
| `AZURE_OPENAI_MODEL_DEPLOYMENT_TYPE` | string | `GlobalStandard` | Change the Model Deployment Type (allowed values: Standard, GlobalStandard). |
15-
| `AZURE_OPENAI_DEPLOYMENT_MODEL` | string | `gpt4.1-mini` | Set the GPT model name (allowed values: `gpt4.1-mini`, `gpt-4`, `gpt-4o`). |
15+
| `AZURE_OPENAI_DEPLOYMENT_MODEL` | string | `gpt-4.1-mini` | Set the GPT model name (allowed values: `gpt-4.1-mini`, `gpt-4`, `gpt-4o`). |
1616
| `AZURE_OPENAI_API_VERSION` | string | `2025-04-14` | Set the Azure OpenAI model version. |
1717
| `AZURE_OPENAI_DEPLOYMENT_MODEL_CAPACITY` | integer | `30` | Set the model capacity for GPT deployment. Choose based on your Azure quota and usage needs. |
1818
| `AZURE_OPENAI_EMBEDDING_MODEL` | string | `text-embedding-ada-002` | Set the model name used for embeddings. |

infra/main.bicep

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ param location string = resourceGroup().location
1919
param gptModelDeploymentType string = 'GlobalStandard'
2020

2121
@description('Optional. Name of the GPT model to deploy:')
22-
param gptModelName string = 'gpt4.1-mini'
22+
param gptModelName string = 'gpt-4.1-mini'
2323

2424
@description('Optional. Version of the GPT model to deploy:')
2525
param gptModelVersion string = '2025-04-14'
@@ -149,6 +149,7 @@ resource resourceGroupTags 'Microsoft.Resources/tags@2021-04-01' = {
149149
...tags
150150
TemplateName: 'Research Assistant'
151151
CreatedBy: createdBy
152+
SecurityControl: 'Ignore'
152153
}
153154
}
154155
}

infra/main.json

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
"_generator": {
77
"name": "bicep",
88
"version": "0.37.4.10188",
9-
"templateHash": "7824757243182327261"
9+
"templateHash": "4472328872828310878"
1010
}
1111
},
1212
"parameters": {
@@ -43,7 +43,7 @@
4343
},
4444
"gptModelName": {
4545
"type": "string",
46-
"defaultValue": "gpt4.1-mini",
46+
"defaultValue": "gpt-4.1-mini",
4747
"metadata": {
4848
"description": "Optional. Name of the GPT model to deploy:"
4949
}
@@ -103,7 +103,7 @@
103103
},
104104
"containerRegistryHostname": {
105105
"type": "string",
106-
"defaultValue": "byoaiacontainerreg.azurecr.io",
106+
"defaultValue": "racontainerreg37.azurecr.io",
107107
"metadata": {
108108
"description": "Optional. The Container Registry hostname where the docker images for the webapp are located."
109109
}
@@ -310,7 +310,7 @@
310310
"apiVersion": "2021-04-01",
311311
"name": "default",
312312
"properties": {
313-
"tags": "[shallowMerge(createArray(resourceGroup().tags, parameters('tags'), createObject('TemplateName', 'Research Assistant', 'CreatedBy', parameters('createdBy'))))]"
313+
"tags": "[shallowMerge(createArray(resourceGroup().tags, parameters('tags'), createObject('TemplateName', 'Research Assistant', 'CreatedBy', parameters('createdBy'), 'SecurityControl', 'Ignore')))]"
314314
}
315315
},
316316
"cogEndpointSecret": {
@@ -22462,10 +22462,10 @@
2246222462
}
2246322463
},
2246422464
"dependsOn": [
22465-
"[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').storageFile)]",
2246622465
"[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').storageBlob)]",
22467-
"[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').storageQueue)]",
22466+
"[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').storageFile)]",
2246822467
"[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').storageDfs)]",
22468+
"[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').storageQueue)]",
2246922469
"userAssignedIdentity",
2247022470
"virtualNetwork"
2247122471
]
@@ -38336,8 +38336,8 @@
3833638336
}
3833738337
},
3833838338
"dependsOn": [
38339-
"[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').notebook)]",
3834038339
"[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').machineLearningServices)]",
38340+
"[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').notebook)]",
3834138341
"azOpenAI",
3834238342
"azSearchService",
3834338343
"existingOpenAI",

src/.env.sample

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ AZURE_SEARCH_STRICTNESS=3
1717
AZURE_OPENAI_RESOURCE=
1818
AZURE_OPENAI_MODEL=
1919
AZURE_OPENAI_KEY=
20-
AZURE_OPENAI_MODEL_NAME=gpt4.1-mini
20+
AZURE_OPENAI_MODEL_NAME=gpt-4.1-mini
2121
AZURE_OPENAI_TEMPERATURE=0
2222
AZURE_OPENAI_TOP_P=1.0
2323
AZURE_OPENAI_MAX_TOKENS=1000

src/app.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ def assets(path):
8686
)
8787
AZURE_OPENAI_STREAM = os.environ.get("AZURE_OPENAI_STREAM", "true")
8888
AZURE_OPENAI_MODEL_NAME = os.environ.get(
89-
"AZURE_OPENAI_MODEL_NAME", "gpt4.1-mini"
89+
"AZURE_OPENAI_MODEL_NAME", "gpt-4.1-mini"
9090
)
9191
AZURE_OPENAI_EMBEDDING_ENDPOINT = os.environ.get("AZURE_OPENAI_EMBEDDING_ENDPOINT")
9292
AZURE_OPENAI_EMBEDDING_KEY = os.environ.get("AZURE_OPENAI_EMBEDDING_KEY")
@@ -108,7 +108,7 @@ def assets(path):
108108
def is_chat_model():
109109
if (
110110
"gpt-4" in AZURE_OPENAI_MODEL_NAME.lower()
111-
or AZURE_OPENAI_MODEL_NAME.lower() in ["gpt-35-turbo-4k", "gpt4.1-mini"]
111+
or AZURE_OPENAI_MODEL_NAME.lower() in ["gpt-35-turbo-4k", "gpt-4.1-mini"]
112112
):
113113
return True
114114
return False

src/test_app.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def test_is_chat_model_with_gpt35_turbo_4k():
4848

4949

5050
def test_is_chat_model_with_gpt35_turbo_16k():
51-
with patch("app.AZURE_OPENAI_MODEL_NAME", "gpt4.1-mini"):
51+
with patch("app.AZURE_OPENAI_MODEL_NAME", "gpt-4.1-mini"):
5252
assert is_chat_model() is True
5353

5454

@@ -291,7 +291,7 @@ def test_stream_with_data_azure_success():
291291
with patch("requests.Session.post") as mock_post:
292292
mock_response = MagicMock()
293293
mock_response.iter_lines.return_value = [
294-
b'data: {"id":"1","model":"gpt4.1-mini","created":1736397875,"object":"extensions.chat.completion.chunk","choices":[{"index":0,"delta":{"context":{"messages":[{"role":"tool","content":"hello","end_turn":false}]}},"end_turn":false,"finish_reason":"None"}]}'
294+
b'data: {"id":"1","model":"gpt-4.1-mini","created":1736397875,"object":"extensions.chat.completion.chunk","choices":[{"index":0,"delta":{"context":{"messages":[{"role":"tool","content":"hello","end_turn":false}]}},"end_turn":false,"finish_reason":"None"}]}'
295295
]
296296
mock_response.headers = {"apim-request-id": "test-request-id"}
297297
mock_post.return_value.__enter__.return_value = mock_response
@@ -381,7 +381,7 @@ def test_stream_with_data_azure_error():
381381
# body = mock_body
382382
mock_response = MagicMock()
383383
mock_response.iter_lines.return_value = [
384-
b'data: {"id":"1","model":"gpt4.1-mini","created":1736397875,"object":"extensions.chat.completion.chunk","choices":[{"index":0,"delta":{"context":{"messages":[{"role":"tool","content":"hello","end_turn":false}]}},"end_turn":false,"finish_reason":"None"}]}'
384+
b'data: {"id":"1","model":"gpt-4.1-mini","created":1736397875,"object":"extensions.chat.completion.chunk","choices":[{"index":0,"delta":{"context":{"messages":[{"role":"tool","content":"hello","end_turn":false}]}},"end_turn":false,"finish_reason":"None"}]}'
385385
]
386386
mock_response.headers = {"apim-request-id": "test-request-id"}
387387
mock_post.return_value.__enter__.return_value = mock_response

0 commit comments

Comments
 (0)