Skip to content

Commit 8ed8a63

Browse files
committed
Update tests
1 parent 8a58ddf commit 8ed8a63

File tree

32 files changed

+471
-65
lines changed

32 files changed

+471
-65
lines changed

.github/copilot-instructions.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,3 +60,9 @@ If the feature is a UI element, add an e2e test for it.
6060
If it is an API endpoint, add an app integration test for it.
6161
If it is a function or method, add a unit test for it.
6262
Use mocks from conftest.py to mock external services.
63+
64+
When you're running tests, make sure you activate the .venv virtual environment first:
65+
66+
```bash
67+
source .venv/bin/activate
68+
```

app/backend/app.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -565,6 +565,7 @@ async def setup_clients():
565565
openai_organization=OPENAI_ORGANIZATION,
566566
)
567567

568+
user_blob_container_client = None
568569
if USE_USER_UPLOAD:
569570
current_app.logger.info("USE_USER_UPLOAD is true, setting up user upload feature")
570571
if not AZURE_USERSTORAGE_ACCOUNT or not AZURE_USERSTORAGE_CONTAINER:

app/backend/approaches/chatreadretrieveread.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -358,7 +358,7 @@ async def run_search_approach(
358358

359359
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
360360
text_sources, image_sources, citations = await self.get_sources_content(
361-
results, use_semantic_captions, use_image_sources=use_image_sources, user_oid=auth_claims["oid"]
361+
results, use_semantic_captions, use_image_sources=use_image_sources, user_oid=auth_claims.get("oid")
362362
)
363363

364364
extra_info = ExtraInfo(
@@ -430,7 +430,7 @@ async def run_agentic_retrieval_approach(
430430
use_text_sources = llm_inputs_enum in [LLMInputType.TEXT_AND_IMAGES, LLMInputType.TEXTS]
431431

432432
text_sources, image_sources, citations = await self.get_sources_content(
433-
results, use_semantic_captions=False, use_image_sources=use_image_sources, user_oid=auth_claims["oid"]
433+
results, use_semantic_captions=False, use_image_sources=use_image_sources, user_oid=auth_claims.get("oid")
434434
)
435435

436436
extra_info = ExtraInfo(

app/backend/approaches/retrievethenread.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ async def run_search_approach(
203203
)
204204

205205
text_sources, image_sources, citations = await self.get_sources_content(
206-
results, use_semantic_captions, use_image_sources=use_image_sources, user_oid=auth_claims["oid"]
206+
results, use_semantic_captions, use_image_sources=use_image_sources, user_oid=auth_claims.get("oid")
207207
)
208208

209209
return ExtraInfo(
@@ -264,7 +264,7 @@ async def run_agentic_retrieval_approach(
264264
use_image_sources = llm_inputs_enum in [LLMInputType.TEXT_AND_IMAGES, LLMInputType.IMAGES]
265265

266266
text_sources, image_sources, citations = await self.get_sources_content(
267-
results, use_semantic_captions=False, use_image_sources=use_image_sources, user_oid=auth_claims["oid"]
267+
results, use_semantic_captions=False, use_image_sources=use_image_sources, user_oid=auth_claims.get("oid")
268268
)
269269

270270
extra_info = ExtraInfo(

app/backend/prepdocs.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,8 @@ def setup_openai_client(
235235
logger.info(
236236
"OPENAI_HOST is not azure, setting up OpenAI client using OPENAI_API_KEY and OPENAI_ORGANIZATION environment variables"
237237
)
238+
if openai_api_key is None:
239+
raise ValueError("OpenAI key is required when using the non-Azure OpenAI API")
238240
openai_client = AsyncOpenAI(
239241
api_key=openai_api_key,
240242
organization=openai_organization,

docs/multimodal.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ For more details on how this feature works, read [this blog post](https://techco
100100

101101
## Compatibility
102102

103-
* This feature is not fully compatible with the [agentic retrieval](./agentic_retrieval.md) feature.
103+
* This feature is **not** fully compatible with the [agentic retrieval](./agentic_retrieval.md) feature.
104104
The agent *will* perform the multimodal vector embedding search, but it will not return images in the response,
105105
so we cannot send the images to the chat completion model.
106-
* This feature is compatible with the [reasoning models](./reasoning.md) feature, as long as you use a model that [supports image inputs](https://learn.microsoft.com/azure/ai-services/openai/how-to/reasoning?tabs=python-secure%2Cpy#api--feature-support).
106+
* This feature *is* compatible with the [reasoning models](./reasoning.md) feature, as long as you use a model that [supports image inputs](https://learn.microsoft.com/azure/ai-services/openai/how-to/reasoning?tabs=python-secure%2Cpy#api--feature-support).

tests/conftest.py

Lines changed: 50 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,6 +302,17 @@ def mock_blob_container_client(monkeypatch):
302302
"AZURE_OPENAI_EMB_MODEL_NAME": "text-embedding-3-large",
303303
"AZURE_OPENAI_EMB_DIMENSIONS": "3072",
304304
},
305+
{
306+
"OPENAI_HOST": "azure",
307+
"AZURE_OPENAI_SERVICE": "test-openai-service",
308+
"AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt",
309+
"AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada",
310+
"AZURE_OPENAI_EMB_MODEL_NAME": "text-embedding-3-large",
311+
"AZURE_OPENAI_EMB_DIMENSIONS": "3072",
312+
},
313+
]
314+
315+
vision_envs = [
305316
{
306317
"OPENAI_HOST": "azure",
307318
"AZURE_OPENAI_SERVICE": "test-openai-service",
@@ -310,7 +321,7 @@ def mock_blob_container_client(monkeypatch):
310321
"AZURE_OPENAI_EMB_MODEL_NAME": "text-embedding-3-large",
311322
"AZURE_OPENAI_EMB_DIMENSIONS": "3072",
312323
"USE_MULTIMODAL": "true",
313-
"VISION_ENDPOINT": "https://testvision.cognitiveservices.azure.com/",
324+
"AZURE_VISION_ENDPOINT": "https://testvision.cognitiveservices.azure.com/",
314325
},
315326
]
316327

@@ -501,6 +512,25 @@ def mock_agent_auth_env(monkeypatch, request):
501512
yield
502513

503514

515+
@pytest.fixture(params=vision_envs, ids=["client0"])
516+
def mock_vision_env(monkeypatch, request):
517+
with mock.patch.dict(os.environ, clear=True):
518+
monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account")
519+
monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container")
520+
monkeypatch.setenv("AZURE_IMAGESTORAGE_CONTAINER", "test-image-container")
521+
monkeypatch.setenv("AZURE_STORAGE_RESOURCE_GROUP", "test-storage-rg")
522+
monkeypatch.setenv("AZURE_SUBSCRIPTION_ID", "test-storage-subid")
523+
monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index")
524+
monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service")
525+
monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-4.1-mini")
526+
for key, value in request.param.items():
527+
monkeypatch.setenv(key, value)
528+
529+
with mock.patch("app.AzureDeveloperCliCredential") as mock_default_azure_credential:
530+
mock_default_azure_credential.return_value = MockAzureCredential()
531+
yield
532+
533+
504534
@pytest_asyncio.fixture(scope="function")
505535
async def client(
506536
monkeypatch,
@@ -689,6 +719,25 @@ async def auth_public_documents_client(
689719
yield client
690720

691721

722+
@pytest_asyncio.fixture(scope="function")
723+
async def vision_client(
724+
monkeypatch,
725+
mock_vision_env,
726+
mock_openai_chatcompletion,
727+
mock_openai_embedding,
728+
mock_acs_search,
729+
mock_blob_container_client,
730+
mock_azurehttp_calls,
731+
):
732+
quart_app = app.create_app()
733+
734+
async with quart_app.test_app() as test_app:
735+
test_app.app.config.update({"TESTING": True})
736+
mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT])
737+
mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT])
738+
yield test_app.test_client()
739+
740+
692741
@pytest.fixture
693742
def mock_validate_token_success(monkeypatch):
694743
async def mock_validate_access_token(self, token):

tests/snapshots/test_app/test_ask_prompt_template/client1/result.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@
1515
"props": {
1616
"filter": null,
1717
"top": 3,
18-
"use_image_embeddings": true,
19-
"use_image_sources": true,
18+
"use_image_embeddings": false,
19+
"use_image_sources": false,
2020
"use_query_rewriting": false,
2121
"use_semantic_captions": false,
2222
"use_semantic_ranker": false,

tests/snapshots/test_app/test_ask_prompt_template_concat/client1/result.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@
1515
"props": {
1616
"filter": null,
1717
"top": 3,
18-
"use_image_embeddings": true,
19-
"use_image_sources": true,
18+
"use_image_embeddings": false,
19+
"use_image_sources": false,
2020
"use_query_rewriting": false,
2121
"use_semantic_captions": false,
2222
"use_semantic_ranker": false,

tests/snapshots/test_app/test_ask_rtr_text/client1/result.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@
1515
"props": {
1616
"filter": null,
1717
"top": 3,
18-
"use_image_embeddings": true,
19-
"use_image_sources": true,
18+
"use_image_embeddings": false,
19+
"use_image_sources": false,
2020
"use_query_rewriting": false,
2121
"use_semantic_captions": false,
2222
"use_semantic_ranker": false,

0 commit comments

Comments
 (0)