Skip to content

Commit 598cf36

Browse files
refactor: remove complete_chat_request method and related tests from ChatService and TestChatService
1 parent 0061d11 commit 598cf36

File tree

3 files changed

+8
-80
lines changed

3 files changed

+8
-80
lines changed

src/api/services/chat_service.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -180,27 +180,3 @@ async def generate():
180180
yield json.dumps({"error": "An error occurred while processing the request."}) + "\n\n"
181181

182182
return generate()
183-
184-
async def complete_chat_request(self, query, last_rag_response=None):
185-
"""
186-
Completes a chat request by generating a chart from the RAG response.
187-
"""
188-
if not last_rag_response:
189-
return {"error": "A previous RAG response is required to generate a chart."}
190-
191-
# Process RAG response to generate chart data
192-
chart_data = await self.process_rag_response(last_rag_response, query)
193-
194-
if not chart_data or "error" in chart_data:
195-
return {
196-
"error": "Chart could not be generated from this data. Please ask a different question.",
197-
"error_desc": str(chart_data),
198-
}
199-
200-
logger.info("Successfully generated chart data.")
201-
return {
202-
"id": str(uuid.uuid4()),
203-
"model": "azure-openai",
204-
"created": int(time.time()),
205-
"object": chart_data,
206-
}

src/tests/api/services/test_chat_service.py

Lines changed: 6 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
import pytest
66
from fastapi import HTTPException, status
77
from semantic_kernel.exceptions.agent_exceptions import AgentException as RealAgentException
8-
from azure.ai.agents.models import MessageRole
98

109

1110

@@ -244,16 +243,8 @@ async def mock_invoke_stream(*args, **kwargs):
244243
assert "I cannot answer this question with the current data" in chunks[0]
245244

246245
@pytest.mark.asyncio
247-
@patch('services.chat_service.uuid.uuid4')
248-
@patch('services.chat_service.time.time')
249-
@patch('services.chat_service.format_stream_response')
250-
async def test_stream_chat_request_success(self, mock_format_stream, mock_time, mock_uuid, chat_service):
246+
async def test_stream_chat_request_success(self, chat_service):
251247
"""Test successful stream chat request."""
252-
# Setup mocks
253-
mock_uuid.return_value = "test-uuid"
254-
mock_time.return_value = 1234567890
255-
mock_format_stream.return_value = {"formatted": "response"}
256-
257248
# Mock stream_openai_text
258249
async def mock_stream_openai_text(conversation_id, query):
259250
yield "Hello"
@@ -272,7 +263,11 @@ async def mock_stream_openai_text(conversation_id, query):
272263
# Verify the chunks contain expected structure
273264
for chunk in chunks:
274265
chunk_data = json.loads(chunk.strip())
275-
assert "formatted" in chunk_data
266+
assert "choices" in chunk_data
267+
assert len(chunk_data["choices"]) > 0
268+
assert "messages" in chunk_data["choices"][0]
269+
assert len(chunk_data["choices"][0]["messages"]) > 0
270+
assert chunk_data["choices"][0]["messages"][0]["role"] == "assistant"
276271

277272
@pytest.mark.asyncio
278273
async def test_stream_chat_request_agent_exception_rate_limit(self, chat_service):
@@ -345,46 +340,4 @@ async def mock_stream_openai_text_generic_error(conversation_id, query):
345340
error_data = json.loads(chunks[0].strip())
346341
assert "error" in error_data
347342
assert "An error occurred while processing the request." == error_data["error"]
348-
349-
@pytest.mark.asyncio
350-
async def test_complete_chat_request_success(self, chat_service):
351-
mock_chart_data = {
352-
"type": "bar",
353-
"data": {
354-
"labels": ["A"],
355-
"datasets": [{"data": [1]}]
356-
}
357-
}
358-
359-
chat_service.process_rag_response = AsyncMock(return_value=mock_chart_data)
360-
361-
result = await chat_service.complete_chat_request("Query", last_rag_response="RAG response")
362-
363-
assert result["object"]["type"] == "bar"
364-
365-
366-
@pytest.mark.asyncio
367-
async def test_complete_chat_request_no_rag_response(self, chat_service):
368-
"""Test complete chat request without RAG response."""
369-
result = await chat_service.complete_chat_request("Query", last_rag_response=None)
370-
371-
assert "error" in result
372-
assert result["error"] == "A previous RAG response is required to generate a chart."
373-
374-
@pytest.mark.asyncio
375-
async def test_complete_chat_request_chart_error(self, chat_service):
376-
chat_service.process_rag_response = AsyncMock(return_value={"error": "Chart generation failed"})
377-
378-
result = await chat_service.complete_chat_request("Query", last_rag_response="RAG response")
379-
380-
assert "error" in result
381-
382-
383-
@pytest.mark.asyncio
384-
async def test_complete_chat_request_empty_chart_data(self, chat_service):
385-
chat_service.process_rag_response = AsyncMock(return_value=None)
386-
387-
result = await chat_service.complete_chat_request("Query", last_rag_response="RAG response")
388-
389-
assert "error" in result
390343

src/tests/api/services/test_history_service.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,8 @@ def history_service(mock_config_instance):
3030
with patch("services.history_service.Config", return_value=mock_config_instance):
3131
# Create patches for other dependencies used by HistoryService
3232
with patch("services.history_service.CosmosConversationClient"):
33-
with patch("services.history_service.complete_chat_request"):
34-
service = HistoryService()
35-
return service
33+
service = HistoryService()
34+
return service
3635

3736

3837
@pytest.fixture

0 commit comments

Comments
 (0)