Skip to content

Commit bb7ae95

Browse files
committed
Update default LLM model to 'tinyllama:latest', add test utilities for environment checks, and ensure test environment readiness before running tests.
1 parent e42610a commit bb7ae95

File tree

5 files changed

+164
-3
lines changed

5 files changed

+164
-3
lines changed

ragadoc/rag_system.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def __init__(
5656
self,
5757
ollama_base_url: str = "http://localhost:11434",
5858
embedding_model: str = DEFAULT_EMBEDDING_MODEL,
59-
llm_model: str = "olmo2:13b",
59+
llm_model: str = "tinyllama:latest",
6060
chunk_size: int = DEFAULT_CHUNK_SIZE,
6161
chunk_overlap: int = DEFAULT_CHUNK_OVERLAP,
6262
similarity_threshold: float = DEFAULT_SIMILARITY_THRESHOLD,

tests/conftest.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,12 @@
33
"""
44
import pytest
55
import fitz
6+
from .test_utils import check_test_environment
7+
8+
9+
# Ensure test environment is ready when tests start
10+
if not check_test_environment():
11+
pytest.exit("Test environment setup failed. Please check Ollama installation and service.", returncode=1)
612

713

814
@pytest.fixture

tests/test_ai_response.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
# Known available models
1616
EMBEDDING_MODEL = "nomic-embed-text:latest"
17-
LLM_MODEL = "olmo2:13b"
17+
LLM_MODEL = "tinyllama:latest"
1818

1919

2020
def is_ollama_available():

tests/test_rag_system.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
# Known available models
1919
EMBEDDING_MODEL = "nomic-embed-text:latest"
20-
LLM_MODEL = "olmo2:13b"
20+
LLM_MODEL = "tinyllama:latest"
2121

2222

2323
def is_ollama_available():

tests/test_utils.py

Lines changed: 155 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,155 @@
1+
"""
2+
Test utilities for ensuring proper test environment setup
3+
"""
4+
import subprocess
5+
import sys
6+
import time
7+
from typing import List
8+
from loguru import logger
9+
10+
11+
def is_ollama_running() -> bool:
12+
"""Check if Ollama service is running."""
13+
try:
14+
result = subprocess.run(
15+
["ollama", "list"],
16+
capture_output=True,
17+
text=True,
18+
timeout=10
19+
)
20+
return result.returncode == 0
21+
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError):
22+
return False
23+
24+
25+
def get_available_models() -> List[str]:
26+
"""Get list of currently available Ollama models."""
27+
try:
28+
result = subprocess.run(
29+
["ollama", "list"],
30+
capture_output=True,
31+
text=True,
32+
check=True,
33+
timeout=10
34+
)
35+
# Parse model names from output, skip header line
36+
lines = result.stdout.strip().split('\n')[1:]
37+
models = []
38+
for line in lines:
39+
if line.strip():
40+
# Extract model name (first column)
41+
model_name = line.split()[0]
42+
models.append(model_name)
43+
return models
44+
except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
45+
logger.error(f"Failed to get model list: {e}")
46+
return []
47+
48+
49+
def pull_model(model: str, timeout: int = 300) -> bool:
50+
"""Pull a specific Ollama model with timeout."""
51+
logger.info(f"Pulling model: {model}")
52+
try:
53+
result = subprocess.run(
54+
["ollama", "pull", model],
55+
capture_output=True,
56+
text=True,
57+
timeout=timeout
58+
)
59+
if result.returncode == 0:
60+
logger.info(f"Successfully pulled {model}")
61+
return True
62+
else:
63+
logger.error(f"Failed to pull {model}: {result.stderr}")
64+
return False
65+
except subprocess.TimeoutExpired:
66+
logger.error(f"Timeout pulling {model} after {timeout} seconds")
67+
return False
68+
except subprocess.CalledProcessError as e:
69+
logger.error(f"Error pulling {model}: {e}")
70+
return False
71+
72+
73+
def ensure_test_models() -> bool:
74+
"""
75+
Ensure all required models for testing are available.
76+
77+
Returns:
78+
bool: True if all models are available, False otherwise
79+
"""
80+
required_models = [
81+
"tinyllama:latest",
82+
"nomic-embed-text:latest"
83+
]
84+
85+
if not is_ollama_running():
86+
logger.error("Ollama is not running. Please start Ollama service first.")
87+
return False
88+
89+
try:
90+
available_models = get_available_models()
91+
logger.info(f"Currently available models: {available_models}")
92+
93+
missing_models = []
94+
for model in required_models:
95+
if model not in available_models:
96+
missing_models.append(model)
97+
else:
98+
logger.info(f"✓ Model {model} already available")
99+
100+
if not missing_models:
101+
logger.info("All required models are available!")
102+
return True
103+
104+
logger.info(f"Missing models: {missing_models}")
105+
106+
# Pull missing models
107+
success_count = 0
108+
for model in missing_models:
109+
if pull_model(model):
110+
success_count += 1
111+
else:
112+
logger.error(f"Failed to pull required model: {model}")
113+
114+
if success_count == len(missing_models):
115+
logger.info("All required models successfully pulled!")
116+
return True
117+
else:
118+
logger.error(f"Failed to pull {len(missing_models) - success_count} models")
119+
return False
120+
121+
except Exception as e:
122+
logger.error(f"Error ensuring test models: {e}")
123+
return False
124+
125+
126+
def check_test_environment() -> bool:
127+
"""
128+
Comprehensive check of the test environment.
129+
130+
Returns:
131+
bool: True if environment is ready for testing
132+
"""
133+
logger.info("Checking test environment...")
134+
135+
# Check if Ollama is installed
136+
try:
137+
subprocess.run(["ollama", "--version"], capture_output=True, check=True)
138+
logger.info("✓ Ollama is installed")
139+
except (subprocess.CalledProcessError, FileNotFoundError):
140+
logger.error("✗ Ollama is not installed or not in PATH")
141+
return False
142+
143+
# Check if Ollama is running
144+
if not is_ollama_running():
145+
logger.error("✗ Ollama service is not running")
146+
return False
147+
148+
logger.info("✓ Ollama service is running")
149+
150+
# Ensure required models
151+
if not ensure_test_models():
152+
return False
153+
154+
logger.info("✓ Test environment is ready!")
155+
return True

0 commit comments

Comments
 (0)