Skip to content

Commit 6518797

Browse files
fix: Fix for the issue encountered when updating the .env file and modifying model configurations. (#1347)
Co-authored-by: Pavan Kumar <v-kupavan.microsoft.com> Co-authored-by: Francia Riesco <[email protected]>
1 parent 5f1efcd commit 6518797

File tree

18 files changed

+329
-333
lines changed

18 files changed

+329
-333
lines changed

.env.sample

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,8 @@ AZURE_SEARCH_DATASOURCE_NAME=
2222
# Azure OpenAI for generating the answer and computing the embedding of the documents
2323
AZURE_OPENAI_RESOURCE=
2424
AZURE_OPENAI_API_KEY=
25-
AZURE_OPENAI_MODEL=gpt-35-turbo
26-
AZURE_OPENAI_MODEL_NAME=gpt-35-turbo
27-
AZURE_OPENAI_EMBEDDING_MODEL=text-embedding-ada-002
25+
AZURE_OPENAI_MODEL_INFO="{\"model\":\"gpt-35-turbo-16k\",\"modelName\":\"gpt-35-turbo-16k\",\"modelVersion\":\"0613\"}"
26+
AZURE_OPENAI_EMBEDDING_MODEL_INFO="{\"model\":\"text-embedding-ada-002\",\"modelName\":\"text-embedding-ada-002\",\"modelVersion\":\"2\"}"
2827
AZURE_OPENAI_TEMPERATURE=0
2928
AZURE_OPENAI_TOP_P=1.0
3029
AZURE_OPENAI_MAX_TOKENS=1000

code/backend/batch/utilities/helpers/env_helper.py

Lines changed: 29 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import json
12
import os
23
import logging
34
import threading
@@ -92,11 +93,19 @@ def __load_config(self, **kwargs) -> None:
9293

9394
self.AZURE_AUTH_TYPE = os.getenv("AZURE_AUTH_TYPE", "keys")
9495
# Azure OpenAI
96+
# Default model info
97+
default_azure_openai_model_info = '{"model":"gpt-35-turbo-16k","modelName":"gpt-35-turbo-16k","modelVersion":"0613"}'
98+
default_azure_openai_embedding_model_info = '{"model":"text-embedding-ada-002","modelName":"text-embedding-ada-002","modelVersion":"2"}'
99+
95100
self.AZURE_OPENAI_RESOURCE = os.getenv("AZURE_OPENAI_RESOURCE", "")
96-
self.AZURE_OPENAI_MODEL = os.getenv("AZURE_OPENAI_MODEL", "")
97-
self.AZURE_OPENAI_MODEL_NAME = os.getenv(
98-
"AZURE_OPENAI_MODEL_NAME", "gpt-35-turbo"
101+
102+
# Fetch and assign model info
103+
azure_openai_model_info = self.get_info_from_env(
104+
"AZURE_OPENAI_MODEL_INFO", default_azure_openai_model_info
99105
)
106+
self.AZURE_OPENAI_MODEL = azure_openai_model_info.get("model")
107+
self.AZURE_OPENAI_MODEL_NAME = azure_openai_model_info.get("modelName")
108+
100109
self.AZURE_OPENAI_VISION_MODEL = os.getenv("AZURE_OPENAI_VISION_MODEL", "gpt-4")
101110
self.AZURE_OPENAI_TEMPERATURE = os.getenv("AZURE_OPENAI_TEMPERATURE", "0")
102111
self.AZURE_OPENAI_TOP_P = os.getenv("AZURE_OPENAI_TOP_P", "1.0")
@@ -110,9 +119,16 @@ def __load_config(self, **kwargs) -> None:
110119
"AZURE_OPENAI_API_VERSION", "2024-02-01"
111120
)
112121
self.AZURE_OPENAI_STREAM = os.getenv("AZURE_OPENAI_STREAM", "true")
113-
self.AZURE_OPENAI_EMBEDDING_MODEL = os.getenv(
114-
"AZURE_OPENAI_EMBEDDING_MODEL", ""
122+
123+
# Fetch and assign embedding model info
124+
azure_openai_embedding_model_info = self.get_info_from_env(
125+
"AZURE_OPENAI_EMBEDDING_MODEL_INFO",
126+
default_azure_openai_embedding_model_info,
115127
)
128+
self.AZURE_OPENAI_EMBEDDING_MODEL = azure_openai_embedding_model_info.get(
129+
"model"
130+
)
131+
116132
self.SHOULD_STREAM = (
117133
True if self.AZURE_OPENAI_STREAM.lower() == "true" else False
118134
)
@@ -267,6 +283,14 @@ def get_env_var_float(self, var_name: str, default: float):
267283
def is_auth_type_keys(self):
268284
return self.AZURE_AUTH_TYPE == "keys"
269285

286+
def get_info_from_env(self, env_var: str, default_info: str) -> dict:
287+
# Fetch and parse model info from the environment variable.
288+
info_str = os.getenv(env_var, default_info)
289+
# Handle escaped characters in the JSON string by wrapping it in double quotes for parsing.
290+
if "\\" in info_str:
291+
info_str = json.loads(f'"{info_str}"')
292+
return {} if not info_str else json.loads(info_str)
293+
270294
@staticmethod
271295
def check_env():
272296
for attr, value in EnvHelper().__dict__.items():

code/tests/functional/app_config.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import base64
2+
import json
23
import logging
34
import os
45
from backend.batch.utilities.helpers.config.conversation_flow import ConversationFlow
@@ -24,11 +25,10 @@ class AppConfig:
2425
"AZURE_KEY_VAULT_ENDPOINT": "some-key-vault-endpoint",
2526
"AZURE_OPENAI_API_KEY": "some-azure-openai-api-key",
2627
"AZURE_OPENAI_API_VERSION": "2024-02-01",
27-
"AZURE_OPENAI_EMBEDDING_MODEL": "some-embedding-model",
28+
"AZURE_OPENAI_EMBEDDING_MODEL_INFO": '{"model":"some-embedding-model","modelName":"some-embedding-model-name","modelVersion":"some-embedding-model-version"}',
2829
"AZURE_OPENAI_ENDPOINT": "some-openai-endpoint",
2930
"AZURE_OPENAI_MAX_TOKENS": "1000",
30-
"AZURE_OPENAI_MODEL": "some-openai-model",
31-
"AZURE_OPENAI_MODEL_NAME": "some-openai-model-name",
31+
"AZURE_OPENAI_MODEL_INFO": '{"model":"some-openai-model","modelName":"some-openai-model-name","modelVersion":"some-openai-model-version"}',
3232
"AZURE_OPENAI_VISION_MODEL": "some-openai-vision-model",
3333
"AZURE_OPENAI_RESOURCE": "some-openai-resource",
3434
"AZURE_OPENAI_STREAM": "True",
@@ -95,6 +95,10 @@ def set(self, key: str, value: str | None) -> None:
9595
def get(self, key: str) -> str | None:
9696
return self.config[key]
9797

98+
def get_from_json(self, config_key: str, field: str) -> str | None:
99+
config_json = json.loads(self.config[config_key])
100+
return config_json.get(field)
101+
98102
def get_all(self) -> dict[str, str | None]:
99103
return self.config
100104

code/tests/functional/conftest.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ def setup_default_mocking(httpserver: HTTPServer, app_config: AppConfig):
2020
).respond_with_data()
2121

2222
httpserver.expect_request(
23-
f"/openai/deployments/{app_config.get('AZURE_OPENAI_EMBEDDING_MODEL')}/embeddings",
23+
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_EMBEDDING_MODEL_INFO','model')}/embeddings",
2424
method="POST",
2525
).respond_with_json(
2626
{
@@ -58,15 +58,15 @@ def setup_default_mocking(httpserver: HTTPServer, app_config: AppConfig):
5858

5959
httpserver.expect_request(
6060
re.compile(
61-
f"/openai/deployments/({app_config.get('AZURE_OPENAI_MODEL')}|{app_config.get('AZURE_OPENAI_VISION_MODEL')})/chat/completions"
61+
f"/openai/deployments/({app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}|{app_config.get('AZURE_OPENAI_VISION_MODEL')})/chat/completions"
6262
),
6363
method="POST",
6464
).respond_with_json(
6565
{
6666
"id": "chatcmpl-6v7mkQj980V1yBec6ETrKPRqFjNw9",
6767
"object": "chat.completion",
6868
"created": 1679072642,
69-
"model": app_config.get("AZURE_OPENAI_MODEL"),
69+
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
7070
"usage": {
7171
"prompt_tokens": 58,
7272
"completion_tokens": 68,
@@ -194,7 +194,7 @@ def setup_default_mocking(httpserver: HTTPServer, app_config: AppConfig):
194194
"inputs": [{"name": "text", "source": "/document/pages/*"}],
195195
"outputs": [{"name": "embedding", "targetName": "content_vector"}],
196196
"resourceUri": f"https://localhost:{httpserver.port}/",
197-
"deploymentId": f"{app_config.get('AZURE_OPENAI_EMBEDDING_MODEL')}",
197+
"deploymentId": f"{app_config.get_from_json('AZURE_OPENAI_EMBEDDING_MODEL_INFO','model')}",
198198
"apiKey": f"{app_config.get('AZURE_OPENAI_API_KEY')}",
199199
},
200200
],

code/tests/functional/tests/backend_api/default/test_advanced_image_processing.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
@pytest.fixture(autouse=True)
2828
def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
2929
httpserver.expect_oneshot_request(
30-
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
30+
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
3131
method="POST",
3232
).respond_with_json(
3333
{
@@ -48,7 +48,7 @@ def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
4848
],
4949
"created": 1714576877,
5050
"id": "chatcmpl-9K63hMvVH1DyQJqqM7rFE4oRPFCeR",
51-
"model": app_config.get("AZURE_OPENAI_MODEL"),
51+
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
5252
"object": "chat.completion",
5353
"prompt_filter_results": [
5454
{
@@ -72,7 +72,7 @@ def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
7272

7373
httpserver.expect_oneshot_request(
7474
re.compile(
75-
f"/openai/deployments/({app_config.get('AZURE_OPENAI_MODEL')}|{app_config.get('AZURE_OPENAI_VISION_MODEL')})/chat/completions"
75+
f"/openai/deployments/({app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}|{app_config.get('AZURE_OPENAI_VISION_MODEL')})/chat/completions"
7676
),
7777
method="POST",
7878
).respond_with_json(
@@ -95,7 +95,7 @@ def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
9595
],
9696
"created": 1714576891,
9797
"id": "chatcmpl-9K63vDGs3slJFynnpi2K6RcVPwgrT",
98-
"model": app_config.get("AZURE_OPENAI_MODEL"),
98+
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
9999
"object": "chat.completion",
100100
"prompt_filter_results": [
101101
{
@@ -167,7 +167,7 @@ def test_post_responds_successfully(app_url: str, app_config: AppConfig):
167167
],
168168
"created": "response.created",
169169
"id": "response.id",
170-
"model": app_config.get("AZURE_OPENAI_MODEL"),
170+
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
171171
"object": "response.object",
172172
}
173173
assert response.headers["Content-Type"] == "application/json"

code/tests/functional/tests/backend_api/default/test_conversation.py

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -28,14 +28,14 @@
2828
@pytest.fixture(autouse=True)
2929
def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
3030
httpserver.expect_oneshot_request(
31-
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
31+
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
3232
method="POST",
3333
).respond_with_json(
3434
{
3535
"id": "chatcmpl-6v7mkQj980V1yBec6ETrKPRqFjNw9",
3636
"object": "chat.completion",
3737
"created": 1679072642,
38-
"model": app_config.get("AZURE_OPENAI_MODEL"),
38+
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
3939
"usage": {
4040
"prompt_tokens": 58,
4141
"completion_tokens": 68,
@@ -58,7 +58,7 @@ def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
5858
)
5959

6060
httpserver.expect_oneshot_request(
61-
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
61+
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
6262
method="POST",
6363
).respond_with_json(
6464
{
@@ -110,7 +110,7 @@ def test_post_responds_successfully(app_url: str, app_config: AppConfig):
110110
],
111111
"created": "response.created",
112112
"id": "response.id",
113-
"model": app_config.get("AZURE_OPENAI_MODEL"),
113+
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
114114
"object": "response.object",
115115
}
116116
assert response.headers["Content-Type"] == "application/json"
@@ -126,7 +126,7 @@ def test_post_makes_correct_calls_to_openai_embeddings_to_get_vector_dimensions(
126126
verify_request_made(
127127
mock_httpserver=httpserver,
128128
request_matcher=RequestMatcher(
129-
path=f"/openai/deployments/{app_config.get('AZURE_OPENAI_EMBEDDING_MODEL')}/embeddings",
129+
path=f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_EMBEDDING_MODEL_INFO','model')}/embeddings",
130130
method="POST",
131131
json={
132132
"input": [[1199]],
@@ -155,13 +155,15 @@ def test_post_makes_correct_calls_to_openai_embeddings_to_embed_question_to_sear
155155
verify_request_made(
156156
mock_httpserver=httpserver,
157157
request_matcher=RequestMatcher(
158-
path=f"/openai/deployments/{app_config.get('AZURE_OPENAI_EMBEDDING_MODEL')}/embeddings",
158+
path=f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_EMBEDDING_MODEL_INFO','model')}/embeddings",
159159
method="POST",
160160
json={
161161
"input": [
162162
[3923, 374, 279, 7438, 315, 2324, 30]
163163
], # Embedding of "What is the meaning of life?"
164-
"model": app_config.get("AZURE_OPENAI_EMBEDDING_MODEL"),
164+
"model": app_config.get_from_json(
165+
"AZURE_OPENAI_EMBEDDING_MODEL_INFO", "model"
166+
),
165167
"encoding_format": "base64",
166168
},
167169
headers={
@@ -188,7 +190,7 @@ def test_post_makes_correct_calls_to_openai_embeddings_to_embed_question_to_stor
188190
verify_request_made(
189191
mock_httpserver=httpserver,
190192
request_matcher=RequestMatcher(
191-
path=f"/openai/deployments/{app_config.get('AZURE_OPENAI_EMBEDDING_MODEL')}/embeddings",
193+
path=f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_EMBEDDING_MODEL_INFO','model')}/embeddings",
192194
method="POST",
193195
json={
194196
"input": [
@@ -265,7 +267,7 @@ def test_post_makes_correct_call_to_openai_chat_completions_with_functions(
265267
verify_request_made(
266268
mock_httpserver=httpserver,
267269
request_matcher=RequestMatcher(
268-
path=f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
270+
path=f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
269271
method="POST",
270272
json={
271273
"messages": [
@@ -555,7 +557,7 @@ def test_post_makes_correct_call_to_openai_chat_completions_with_documents(
555557
verify_request_made(
556558
mock_httpserver=httpserver,
557559
request_matcher=RequestMatcher(
558-
path=f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
560+
path=f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
559561
method="POST",
560562
json={
561563
"messages": [
@@ -589,7 +591,7 @@ def test_post_makes_correct_call_to_openai_chat_completions_with_documents(
589591
"role": "user",
590592
},
591593
],
592-
"model": app_config.get("AZURE_OPENAI_MODEL"),
594+
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
593595
"max_tokens": int(app_config.get("AZURE_OPENAI_MAX_TOKENS")),
594596
"temperature": 0,
595597
},

code/tests/functional/tests/backend_api/default/test_post_prompt_tool.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -61,14 +61,14 @@ def setup_config_mocking(httpserver: HTTPServer):
6161
@pytest.fixture(autouse=True)
6262
def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
6363
httpserver.expect_oneshot_request(
64-
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
64+
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
6565
method="POST",
6666
).respond_with_json(
6767
{
6868
"id": "chatcmpl-6v7mkQj980V1yBec6ETrKPRqFjNw9",
6969
"object": "chat.completion",
7070
"created": 1679072642,
71-
"model": app_config.get("AZURE_OPENAI_MODEL"),
71+
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
7272
"usage": {
7373
"prompt_tokens": 58,
7474
"completion_tokens": 68,
@@ -92,7 +92,7 @@ def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
9292

9393
httpserver.expect_oneshot_request(
9494
re.compile(
95-
f"/openai/deployments/({app_config.get('AZURE_OPENAI_MODEL')}|{app_config.get('AZURE_OPENAI_VISION_MODEL')})/chat/completions"
95+
f"/openai/deployments/({app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}|{app_config.get('AZURE_OPENAI_VISION_MODEL')})/chat/completions"
9696
),
9797
method="POST",
9898
).respond_with_json(
@@ -125,7 +125,7 @@ def test_post_responds_successfully_when_not_filtered(
125125
):
126126
# given
127127
httpserver.expect_oneshot_request(
128-
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
128+
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
129129
method="POST",
130130
).respond_with_json(
131131
{
@@ -175,7 +175,7 @@ def test_post_responds_successfully_when_not_filtered(
175175
],
176176
"created": "response.created",
177177
"id": "response.id",
178-
"model": app_config.get("AZURE_OPENAI_MODEL"),
178+
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
179179
"object": "response.object",
180180
}
181181
assert response.headers["Content-Type"] == "application/json"
@@ -186,7 +186,7 @@ def test_post_responds_successfully_when_filtered(
186186
):
187187
# given
188188
httpserver.expect_oneshot_request(
189-
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
189+
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
190190
method="POST",
191191
).respond_with_json(
192192
{
@@ -236,7 +236,7 @@ def test_post_responds_successfully_when_filtered(
236236
],
237237
"created": "response.created",
238238
"id": "response.id",
239-
"model": app_config.get("AZURE_OPENAI_MODEL"),
239+
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
240240
"object": "response.object",
241241
}
242242
assert response.headers["Content-Type"] == "application/json"
@@ -247,7 +247,7 @@ def test_post_makes_correct_call_to_openai_from_post_prompt_tool(
247247
):
248248
# given
249249
httpserver.expect_oneshot_request(
250-
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
250+
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
251251
method="POST",
252252
).respond_with_json(
253253
{
@@ -280,7 +280,7 @@ def test_post_makes_correct_call_to_openai_from_post_prompt_tool(
280280
verify_request_made(
281281
mock_httpserver=httpserver,
282282
request_matcher=RequestMatcher(
283-
path=f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
283+
path=f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
284284
method="POST",
285285
json={
286286
"messages": [
@@ -289,7 +289,7 @@ def test_post_makes_correct_call_to_openai_from_post_prompt_tool(
289289
"role": "user",
290290
}
291291
],
292-
"model": app_config.get("AZURE_OPENAI_MODEL"),
292+
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
293293
"max_tokens": int(app_config.get("AZURE_OPENAI_MAX_TOKENS")),
294294
},
295295
headers={

0 commit comments

Comments
 (0)