Skip to content

Commit 27c115f

Browse files
fix unit tests
1 parent 4d9411a commit 27c115f

File tree

6 files changed

+121
-121
lines changed

6 files changed

+121
-121
lines changed

code/tests/functional/conftest.py

Lines changed: 83 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -283,88 +283,93 @@ def prime_search_to_trigger_creation_of_index(
283283
# This fixture can be overriden
284284
@pytest.fixture(autouse=True)
285285
def setup_config_mocking(httpserver: HTTPServer):
286+
config_data = {
287+
"prompts": {
288+
"condense_question_prompt": "",
289+
"answering_system_prompt": "system prompt",
290+
"answering_user_prompt": "## Retrieved Documents\n{sources}\n\n## User Question\nUse the Retrieved Documents to answer the question: {question}",
291+
"use_on_your_data_format": True,
292+
"post_answering_prompt": "post answering prompt\n{question}\n{answer}\n{sources}",
293+
"enable_post_answering_prompt": False,
294+
"enable_content_safety": True,
295+
},
296+
"messages": {"post_answering_filter": "post answering filter"},
297+
"example": {
298+
"documents": '{"retrieved_documents":[{"[doc1]":{"content":"content"}}]}',
299+
"user_question": "user question",
300+
"answer": "answer",
301+
},
302+
"document_processors": [
303+
{
304+
"document_type": "pdf",
305+
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
306+
"loading": {"strategy": "layout"},
307+
"use_advanced_image_processing": False,
308+
},
309+
{
310+
"document_type": "txt",
311+
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
312+
"loading": {"strategy": "web"},
313+
"use_advanced_image_processing": False,
314+
},
315+
{
316+
"document_type": "url",
317+
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
318+
"loading": {"strategy": "web"},
319+
"use_advanced_image_processing": False,
320+
},
321+
{
322+
"document_type": "md",
323+
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
324+
"loading": {"strategy": "web"},
325+
"use_advanced_image_processing": False,
326+
},
327+
{
328+
"document_type": "html",
329+
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
330+
"loading": {"strategy": "web"},
331+
"use_advanced_image_processing": False,
332+
},
333+
{
334+
"document_type": "htm",
335+
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
336+
"loading": {"strategy": "web"},
337+
"use_advanced_image_processing": False,
338+
},
339+
{
340+
"document_type": "docx",
341+
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
342+
"loading": {"strategy": "docx"},
343+
"use_advanced_image_processing": False,
344+
},
345+
{
346+
"document_type": "jpg",
347+
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
348+
"loading": {"strategy": "layout"},
349+
"use_advanced_image_processing": True,
350+
},
351+
{
352+
"document_type": "png",
353+
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
354+
"loading": {"strategy": "layout"},
355+
"use_advanced_image_processing": False,
356+
},
357+
],
358+
"logging": {"log_user_interactions": True, "log_tokens": True},
359+
"orchestrator": {"strategy": "openai_function"},
360+
"integrated_vectorization_config": None,
361+
}
362+
363+
import json
364+
config_json_string = json.dumps(config_data)
365+
286366
httpserver.expect_request(
287367
f"/{AZURE_STORAGE_CONFIG_CONTAINER_NAME}/{AZURE_STORAGE_CONFIG_FILE_NAME}",
288368
method="GET",
289-
).respond_with_json(
290-
{
291-
"prompts": {
292-
"condense_question_prompt": "",
293-
"answering_system_prompt": "system prompt",
294-
"answering_user_prompt": "## Retrieved Documents\n{sources}\n\n## User Question\nUse the Retrieved Documents to answer the question: {question}",
295-
"use_on_your_data_format": True,
296-
"post_answering_prompt": "post answering prompt\n{question}\n{answer}\n{sources}",
297-
"enable_post_answering_prompt": False,
298-
"enable_content_safety": True,
299-
},
300-
"messages": {"post_answering_filter": "post answering filter"},
301-
"example": {
302-
"documents": '{"retrieved_documents":[{"[doc1]":{"content":"content"}}]}',
303-
"user_question": "user question",
304-
"answer": "answer",
305-
},
306-
"document_processors": [
307-
{
308-
"document_type": "pdf",
309-
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
310-
"loading": {"strategy": "layout"},
311-
"use_advanced_image_processing": False,
312-
},
313-
{
314-
"document_type": "txt",
315-
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
316-
"loading": {"strategy": "web"},
317-
"use_advanced_image_processing": False,
318-
},
319-
{
320-
"document_type": "url",
321-
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
322-
"loading": {"strategy": "web"},
323-
"use_advanced_image_processing": False,
324-
},
325-
{
326-
"document_type": "md",
327-
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
328-
"loading": {"strategy": "web"},
329-
"use_advanced_image_processing": False,
330-
},
331-
{
332-
"document_type": "html",
333-
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
334-
"loading": {"strategy": "web"},
335-
"use_advanced_image_processing": False,
336-
},
337-
{
338-
"document_type": "htm",
339-
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
340-
"loading": {"strategy": "web"},
341-
"use_advanced_image_processing": False,
342-
},
343-
{
344-
"document_type": "docx",
345-
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
346-
"loading": {"strategy": "docx"},
347-
"use_advanced_image_processing": False,
348-
},
349-
{
350-
"document_type": "jpg",
351-
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
352-
"loading": {"strategy": "layout"},
353-
"use_advanced_image_processing": True,
354-
},
355-
{
356-
"document_type": "png",
357-
"chunking": {"strategy": "layout", "size": 500, "overlap": 100},
358-
"loading": {"strategy": "layout"},
359-
"use_advanced_image_processing": False,
360-
},
361-
],
362-
"logging": {"log_user_interactions": True, "log_tokens": True},
363-
"orchestrator": {"strategy": "openai_function"},
364-
"integrated_vectorization_config": None,
365-
},
369+
).respond_with_data(
370+
config_json_string,
366371
headers={
367372
"Content-Type": "application/json",
368-
"Content-Range": "bytes 0-12882/12883",
373+
"Content-Range": f"bytes 0-{len(config_json_string.encode('utf-8'))-1}/{len(config_json_string.encode('utf-8'))}",
369374
},
370375
)

code/tests/functional/tests/backend_api/default/test_post_prompt_tool.py

Lines changed: 27 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -26,34 +26,38 @@
2626

2727
@pytest.fixture(autouse=True)
2828
def setup_config_mocking(httpserver: HTTPServer):
29+
config_data = {
30+
"prompts": {
31+
"condense_question_prompt": "",
32+
"answering_system_prompt": "system prompt",
33+
"answering_user_prompt": "## Retrieved Documents\n{sources}\n\n## User Question\nUse the Retrieved Documents to answer the question: {question}",
34+
"use_on_your_data_format": True,
35+
"post_answering_prompt": "post answering prompt\n{question}\n{answer}\n{sources}",
36+
"enable_post_answering_prompt": True,
37+
"enable_content_safety": True,
38+
},
39+
"messages": {"post_answering_filter": "post answering filter"},
40+
"example": {
41+
"documents": '{"retrieved_documents":[{"[doc1]":{"content":"content"}}]}',
42+
"user_question": "user question",
43+
"answer": "answer",
44+
},
45+
"document_processors": [],
46+
"logging": {"log_user_interactions": True, "log_tokens": True},
47+
"orchestrator": {"strategy": "openai_function"},
48+
"integrated_vectorization_config": None,
49+
}
50+
51+
config_json_string = json.dumps(config_data)
52+
2953
httpserver.expect_request(
3054
f"/{AZURE_STORAGE_CONFIG_CONTAINER_NAME}/{AZURE_STORAGE_CONFIG_FILE_NAME}",
3155
method="GET",
32-
).respond_with_json(
33-
{
34-
"prompts": {
35-
"condense_question_prompt": "",
36-
"answering_system_prompt": "system prompt",
37-
"answering_user_prompt": "## Retrieved Documents\n{sources}\n\n## User Question\nUse the Retrieved Documents to answer the question: {question}",
38-
"use_on_your_data_format": True,
39-
"post_answering_prompt": "post answering prompt\n{question}\n{answer}\n{sources}",
40-
"enable_post_answering_prompt": True,
41-
"enable_content_safety": True,
42-
},
43-
"messages": {"post_answering_filter": "post answering filter"},
44-
"example": {
45-
"documents": '{"retrieved_documents":[{"[doc1]":{"content":"content"}}]}',
46-
"user_question": "user question",
47-
"answer": "answer",
48-
},
49-
"document_processors": [],
50-
"logging": {"log_user_interactions": True, "log_tokens": True},
51-
"orchestrator": {"strategy": "openai_function"},
52-
"integrated_vectorization_config": None,
53-
},
56+
).respond_with_data(
57+
config_json_string,
5458
headers={
5559
"Content-Type": "application/json",
56-
"Content-Range": "bytes 0-12882/12883",
60+
"Content-Range": f"bytes 0-{len(config_json_string.encode('utf-8'))-1}/{len(config_json_string.encode('utf-8'))}",
5761
},
5862
)
5963

code/tests/functional/tests/backend_api/sk_orchestrator/test_response_without_tool_call.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -162,12 +162,9 @@ def test_post_makes_correct_call_to_openai_chat_completions(
162162
json={
163163
"messages": [
164164
{
165-
"role": "system",
166-
"content": "You help employees to navigate only private information sources.\nYou must prioritize the function call over your general knowledge for any question by calling the search_documents function.\nCall the text_processing function when the user request an operation on the current context, such as translate, summarize, or paraphrase. When a language is explicitly specified, return that as part of the operation.\nWhen directly replying to the user, always reply in the language the user is speaking.\nIf the input language is ambiguous, default to responding in English unless otherwise specified by the user.\nYou **must not** respond if asked to List all documents in your repository.\n",
167-
},
168-
{"role": "user", "content": "Hello"},
169-
{"role": "assistant", "content": "Hi, how can I help?"},
170-
{"role": "user", "content": "What is the meaning of life?"},
165+
"role": "user",
166+
"content": "AuthorRole.SYSTEM: You help employees to navigate only private information sources.\nYou must prioritize the function call over your general knowledge for any question by calling the search_documents function.\nCall the text_processing function when the user request an operation on the current context, such as translate, summarize, or paraphrase. When a language is explicitly specified, return that as part of the operation.\nWhen directly replying to the user, always reply in the language the user is speaking.\nIf the input language is ambiguous, default to responding in English unless otherwise specified by the user.\nYou **must not** respond if asked to List all documents in your repository.\n\nAuthorRole.USER: Hello\nAuthorRole.ASSISTANT: Hi, how can I help?\nWhat is the meaning of life?",
167+
}
171168
],
172169
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
173170
"max_tokens": int(app_config.get("AZURE_OPENAI_MAX_TOKENS")),

code/tests/functional/tests/functions/advanced_image_processing/test_advanced_image_processing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ def setup_caption_response(httpserver: HTTPServer, app_config: AppConfig):
9595

9696

9797
def test_config_file_is_retrieved_from_storage(
98-
message: QueueMessage, httpserver: HTTPServer, app_config: AppConfig
98+
message: QueueMessage, httpserver: HTTPServer
9999
):
100100
# when
101101
batch_push_results.build().get_user_function()(message)

code/tests/functional/tests/functions/integrated_vectorization/test_integrated_vectorization_resource_creation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def message(app_config: AppConfig):
6262

6363

6464
def test_config_file_is_retrieved_from_storage(
65-
message: QueueMessage, httpserver: HTTPServer, app_config: AppConfig
65+
message: QueueMessage, httpserver: HTTPServer
6666
):
6767
# when
6868
batch_push_results.build().get_user_function()(message)

code/tests/utilities/orchestrator/test_semantic_kernel.py

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -423,19 +423,13 @@ async def test_chat_history_included(
423423
await orchestrator.orchestrate("question", chat_history)
424424

425425
# then
426-
chat_history = kernel_mock.invoke.call_args.kwargs["chat_history"]
427-
messages = chat_history.messages
428-
429-
assert len(messages) == 3
430-
assert messages[0].role == AuthorRole.SYSTEM
431-
432-
assert messages[1].role == AuthorRole.USER
433-
assert messages[1].content == "Hello"
434-
435-
assert messages[2].role == AuthorRole.ASSISTANT
436-
assert messages[2].content == "Hi, how can I help you today?"
437-
426+
chat_history_str = kernel_mock.invoke.call_args.kwargs["chat_history"]
438427

428+
# The chat_history parameter is now a string, so we check its content
429+
assert isinstance(chat_history_str, str)
430+
assert "AuthorRole.SYSTEM:" in chat_history_str
431+
assert "AuthorRole.USER: Hello" in chat_history_str
432+
assert "AuthorRole.ASSISTANT: Hi, how can I help you today?" in chat_history_str
439433
@pytest.mark.asyncio
440434
async def test_content_safety_output(orchestrator: SemanticKernelOrchestrator):
441435
# given

0 commit comments

Comments
 (0)