Skip to content

Commit 27dee37

Browse files
authored
Merge pull request #7 from grapeot/feature/skip-llm-tests
feat: add skip option for LLM tests when server not configured
2 parents 61b8624 + c27e876 commit 27dee37

File tree

2 files changed

+21
-0
lines changed

2 files changed

+21
-0
lines changed

tests/test_llm_api.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,20 @@
11
import unittest
22
from unittest.mock import patch, MagicMock
33
from tools.llm_api import create_llm_client, query_llm
4+
import os
5+
6+
def is_llm_configured():
7+
"""Check if LLM is configured by trying to connect to the server"""
8+
try:
9+
client = create_llm_client()
10+
response = query_llm("test", client)
11+
return response is not None
12+
except:
13+
return False
14+
15+
# Skip all LLM tests if LLM is not configured
16+
skip_llm_tests = not is_llm_configured()
17+
skip_message = "Skipping LLM tests as LLM is not configured. This is normal if you haven't set up a local LLM server."
418

519
class TestLLMAPI(unittest.TestCase):
620
def setUp(self):
@@ -18,6 +32,7 @@ def setUp(self):
1832
# Set up the mock client's chat.completions.create method
1933
self.mock_client.chat.completions.create.return_value = self.mock_response
2034

35+
@unittest.skipIf(skip_llm_tests, skip_message)
2136
@patch('tools.llm_api.OpenAI')
2237
def test_create_llm_client(self, mock_openai):
2338
# Test client creation
@@ -32,6 +47,7 @@ def test_create_llm_client(self, mock_openai):
3247

3348
self.assertEqual(client, self.mock_client)
3449

50+
@unittest.skipIf(skip_llm_tests, skip_message)
3551
@patch('tools.llm_api.create_llm_client')
3652
def test_query_llm_success(self, mock_create_client):
3753
# Set up mock
@@ -50,6 +66,7 @@ def test_query_llm_success(self, mock_create_client):
5066
temperature=0.7
5167
)
5268

69+
@unittest.skipIf(skip_llm_tests, skip_message)
5370
@patch('tools.llm_api.create_llm_client')
5471
def test_query_llm_with_custom_model(self, mock_create_client):
5572
# Set up mock
@@ -68,6 +85,7 @@ def test_query_llm_with_custom_model(self, mock_create_client):
6885
temperature=0.7
6986
)
7087

88+
@unittest.skipIf(skip_llm_tests, skip_message)
7189
@patch('tools.llm_api.create_llm_client')
7290
def test_query_llm_with_existing_client(self, mock_create_client):
7391
# Test query with provided client
@@ -79,6 +97,7 @@ def test_query_llm_with_existing_client(self, mock_create_client):
7997
# Verify create_client was not called
8098
mock_create_client.assert_not_called()
8199

100+
@unittest.skipIf(skip_llm_tests, skip_message)
82101
@patch('tools.llm_api.create_llm_client')
83102
def test_query_llm_error(self, mock_create_client):
84103
# Set up mock to raise an exception

tools/llm_api.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@ def query_llm(prompt, client=None, model="Qwen/Qwen2.5-32B-Instruct-AWQ"):
2525
return response.choices[0].message.content
2626
except Exception as e:
2727
print(f"Error querying LLM: {e}")
28+
print("Note: If you haven't configured a local LLM server, this error is expected and can be ignored.")
29+
print("The LLM functionality is optional and won't affect other features.")
2830
return None
2931

3032
def main():

0 commit comments

Comments
 (0)