Skip to content

Commit 7b6e674

Browse files
committed
test: add streaming E2E tests, complete coverage, and ddev Ollama integration
- Add 12 streaming E2E tests covering all streamAction() branches (rate limiting, validation, missing config, fallback, SSE chunked response, XSS escaping, exception handling, model override) - Add 8 additional E2E tests for chat/complete rate limiting, validation boundaries, config-by-identifier, and invalid JSON - Add 27 unit tests killing escaped mutants and covering all lines - Add parseSseEvents() helper and createStreamingStack() factory - Add ddev Ollama integration (docker-compose, host commands, SQL seed) - Add Makefile targets for ollama, seed, ollama-pull - Rewrite testing.yml with matrix-based per-suite coverage uploads - Raise infection thresholds to minMsi:80, minCoveredMsi:85 Results: 515 tests, 100% line/method coverage, 96% MSI, 98% covered MSI Signed-off-by: Sebastian Mendel <info@sebastianmendel.de>
1 parent 01799bf commit 7b6e674

File tree

14 files changed

+1864
-16
lines changed

14 files changed

+1864
-16
lines changed

.ddev/commands/host/ollama

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
#!/bin/bash
2+
3+
## Description: Manage Ollama LLM models
4+
## Usage: ollama [pull|list|run|chat|rm] [model]
5+
## Example: ddev ollama pull qwen3:0.6b
6+
## Example: ddev ollama list
7+
## Example: ddev ollama chat qwen3:0.6b
8+
9+
OLLAMA_CONTAINER="ddev-${DDEV_SITENAME}-ollama"
10+
DEFAULT_MODEL="${OLLAMA_MODEL:-qwen3:0.6b}"
11+
12+
case "$1" in
13+
pull)
14+
MODEL="${2:-$DEFAULT_MODEL}"
15+
echo "Pulling model: $MODEL..."
16+
docker exec -it "$OLLAMA_CONTAINER" ollama pull "$MODEL"
17+
;;
18+
list)
19+
echo "Available models:"
20+
docker exec "$OLLAMA_CONTAINER" ollama list
21+
;;
22+
run|chat)
23+
MODEL="${2:-$DEFAULT_MODEL}"
24+
echo "Starting chat with: $MODEL (Ctrl+D to exit)"
25+
docker exec -it "$OLLAMA_CONTAINER" ollama run "$MODEL"
26+
;;
27+
rm|remove)
28+
if [ -z "$2" ]; then
29+
echo "Usage: ddev ollama rm <model>"
30+
exit 1
31+
fi
32+
echo "Removing model: $2..."
33+
docker exec -it "$OLLAMA_CONTAINER" ollama rm "$2"
34+
;;
35+
status)
36+
echo "Ollama container status:"
37+
docker exec "$OLLAMA_CONTAINER" ollama list
38+
;;
39+
api)
40+
echo "Ollama API endpoint: http://ollama:11434"
41+
echo "From host: http://localhost:$(docker port "$OLLAMA_CONTAINER" 11434 | cut -d: -f2)"
42+
;;
43+
*)
44+
echo "Ollama LLM Manager"
45+
echo ""
46+
echo "Usage: ddev ollama <command> [model]"
47+
echo ""
48+
echo "Commands:"
49+
echo " pull [model] Pull a model (default: $DEFAULT_MODEL)"
50+
echo " list List installed models"
51+
echo " chat [model] Start interactive chat"
52+
echo " rm <model> Remove a model"
53+
echo " status Show Ollama status"
54+
echo " api Show API endpoints"
55+
echo ""
56+
echo "Available small models:"
57+
echo " smollm2:135m - 135M params, ~400MB (absolute minimum)"
58+
echo " smollm2:360m - 360M params, ~700MB (tiny)"
59+
echo " qwen3:0.6b - 600M params, ~800MB (recommended, newest)"
60+
echo " gemma3:1b - 1B params, ~1.2GB (Google's latest)"
61+
echo " qwen2.5:0.5b - 0.5B params, ~1GB"
62+
echo " qwen2.5:1.5b - 1.5B params, ~2GB"
63+
echo " phi3:mini - 3.8B params, ~3GB"
64+
echo ""
65+
echo "Configure default: OLLAMA_MODEL=gemma3:1b ddev restart"
66+
;;
67+
esac

.ddev/commands/host/seed-ollama

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#!/bin/bash
2+
3+
## Description: Import Ollama provider, models, and sample configurations
4+
## Usage: seed-ollama [database]
5+
## Example: ddev seed-ollama
6+
## Example: ddev seed-ollama v14
7+
8+
# Get the DDEV project root
9+
DDEV_APPROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
10+
SQL_FILE="${DDEV_APPROOT}/sql/seed-ollama.sql"
11+
12+
# Default to v14 if no database specified
13+
DATABASE="${1:-v14}"
14+
15+
if [ ! -f "$SQL_FILE" ]; then
16+
echo "Error: SQL file not found: $SQL_FILE"
17+
exit 1
18+
fi
19+
20+
echo "Importing Ollama seed data into database: $DATABASE"
21+
echo ""
22+
23+
# Import the SQL file via ddev mysql
24+
cat "$SQL_FILE" | ddev mysql -d "$DATABASE"
25+
26+
if [ $? -eq 0 ]; then
27+
echo ""
28+
echo "Seed data imported successfully!"
29+
echo ""
30+
echo "Pre-configured LLM setup:"
31+
echo " Provider: Local Ollama (http://ollama:11434)"
32+
echo " Models: qwen3:0.6b (default), gemma3:1b, smollm2:360m"
33+
echo " Configs: General Purpose, Content Summarizer, Creative Writing"
34+
echo ""
35+
echo "You can now:"
36+
echo " - Access Admin Tools > LLM in TYPO3 backend"
37+
echo " - Use 'ddev ollama list' to verify the model is available"
38+
echo " - Test with 'ddev ollama chat' for interactive chat"
39+
else
40+
echo ""
41+
echo "Error importing seed data. Check database exists and tables are created."
42+
echo "Run: ddev exec -d /var/www/html/$DATABASE vendor/bin/typo3 database:updateschema"
43+
exit 1
44+
fi

.ddev/docker-compose.ollama.yaml

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# Ollama for local LLM inference
2+
# Configure model via OLLAMA_MODEL env var (default: qwen3:0.6b)
3+
4+
services:
5+
ollama:
6+
container_name: ddev-${DDEV_SITENAME}-ollama
7+
image: ollama/ollama:latest
8+
restart: unless-stopped
9+
ports:
10+
- "11434"
11+
volumes:
12+
- ollama-data:/root/.ollama
13+
environment:
14+
- OLLAMA_HOST=0.0.0.0
15+
# Options: qwen3:0.6b, gemma3:1b, smollm2:360m, qwen2.5:0.5b
16+
- OLLAMA_MODEL=${OLLAMA_MODEL:-qwen3:0.6b}
17+
labels:
18+
com.ddev.site-name: ${DDEV_SITENAME}
19+
com.ddev.approot: $DDEV_APPROOT
20+
healthcheck:
21+
test: ["CMD", "ollama", "list"]
22+
interval: 30s
23+
timeout: 10s
24+
retries: 3
25+
start_period: 30s
26+
27+
volumes:
28+
ollama-data:
29+
name: "${DDEV_SITENAME}-ollama-data"

.ddev/sql/seed-ollama.sql

Lines changed: 211 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,211 @@
1+
-- Seed data for local Ollama LLM development with t3x-cowriter
2+
-- This creates a pre-configured Ollama provider, models, and sample configurations
3+
-- Run with: ddev seed-ollama
4+
5+
-- Provider: Local Ollama instance
6+
INSERT INTO tx_nrllm_provider (
7+
pid, identifier, name, description, adapter_type, endpoint_url, api_key,
8+
api_timeout, max_retries, is_active, priority, sorting, tstamp, crdate
9+
) VALUES (
10+
0,
11+
'ollama-local',
12+
'Local Ollama',
13+
'Local Ollama LLM server running in DDEV container. No API key required.',
14+
'ollama',
15+
'http://ollama:11434',
16+
'',
17+
30,
18+
3,
19+
1,
20+
100,
21+
10,
22+
UNIX_TIMESTAMP(),
23+
UNIX_TIMESTAMP()
24+
) ON DUPLICATE KEY UPDATE
25+
name = VALUES(name),
26+
description = VALUES(description),
27+
endpoint_url = VALUES(endpoint_url),
28+
priority = VALUES(priority),
29+
tstamp = UNIX_TIMESTAMP();
30+
31+
-- Get the provider UID for the model relation
32+
SET @ollama_provider_uid = (SELECT uid FROM tx_nrllm_provider WHERE identifier = 'ollama-local' AND deleted = 0 LIMIT 1);
33+
34+
-- Model: Qwen 3 0.6B (default small model)
35+
INSERT INTO tx_nrllm_model (
36+
pid, identifier, name, description, provider_uid, model_id,
37+
context_length, max_output_tokens, capabilities, default_timeout,
38+
cost_input, cost_output, is_active, is_default, sorting, tstamp, crdate
39+
) VALUES (
40+
0,
41+
'qwen3-0.6b',
42+
'Qwen 3 0.6B',
43+
'Alibaba Qwen 3 with 600M parameters. Fast, efficient for development and testing.',
44+
@ollama_provider_uid,
45+
'qwen3:0.6b',
46+
32768,
47+
4096,
48+
'chat,completion,streaming',
49+
60,
50+
0,
51+
0,
52+
1,
53+
1,
54+
10,
55+
UNIX_TIMESTAMP(),
56+
UNIX_TIMESTAMP()
57+
) ON DUPLICATE KEY UPDATE
58+
name = VALUES(name),
59+
description = VALUES(description),
60+
provider_uid = @ollama_provider_uid,
61+
model_id = VALUES(model_id),
62+
tstamp = UNIX_TIMESTAMP();
63+
64+
-- Model: Gemma 3 1B (alternative small model)
65+
INSERT INTO tx_nrllm_model (
66+
pid, identifier, name, description, provider_uid, model_id,
67+
context_length, max_output_tokens, capabilities, default_timeout,
68+
cost_input, cost_output, is_active, is_default, sorting, tstamp, crdate
69+
) VALUES (
70+
0,
71+
'gemma3-1b',
72+
'Gemma 3 1B',
73+
'Google Gemma 3 with 1B parameters. Good quality for its size.',
74+
@ollama_provider_uid,
75+
'gemma3:1b',
76+
32768,
77+
4096,
78+
'chat,completion,streaming',
79+
60,
80+
0,
81+
0,
82+
1,
83+
0,
84+
20,
85+
UNIX_TIMESTAMP(),
86+
UNIX_TIMESTAMP()
87+
) ON DUPLICATE KEY UPDATE
88+
name = VALUES(name),
89+
description = VALUES(description),
90+
provider_uid = @ollama_provider_uid,
91+
model_id = VALUES(model_id),
92+
tstamp = UNIX_TIMESTAMP();
93+
94+
-- Model: SmolLM2 360M (ultra-small model)
95+
INSERT INTO tx_nrllm_model (
96+
pid, identifier, name, description, provider_uid, model_id,
97+
context_length, max_output_tokens, capabilities, default_timeout,
98+
cost_input, cost_output, is_active, is_default, sorting, tstamp, crdate
99+
) VALUES (
100+
0,
101+
'smollm2-360m',
102+
'SmolLM2 360M',
103+
'Hugging Face SmolLM2 with 360M parameters. Ultra-fast, minimal resources.',
104+
@ollama_provider_uid,
105+
'smollm2:360m',
106+
8192,
107+
2048,
108+
'chat,completion',
109+
30,
110+
0,
111+
0,
112+
1,
113+
0,
114+
30,
115+
UNIX_TIMESTAMP(),
116+
UNIX_TIMESTAMP()
117+
) ON DUPLICATE KEY UPDATE
118+
name = VALUES(name),
119+
description = VALUES(description),
120+
provider_uid = @ollama_provider_uid,
121+
model_id = VALUES(model_id),
122+
tstamp = UNIX_TIMESTAMP();
123+
124+
-- Get the default model UID for configurations
125+
SET @default_model_uid = (SELECT uid FROM tx_nrllm_model WHERE identifier = 'qwen3-0.6b' AND deleted = 0 LIMIT 1);
126+
127+
-- Configuration: General Purpose (default for Cowriter)
128+
INSERT INTO tx_nrllm_configuration (
129+
pid, identifier, name, description, model_uid,
130+
system_prompt, temperature, max_tokens, top_p, timeout,
131+
is_active, is_default, sorting, tstamp, crdate
132+
) VALUES (
133+
0,
134+
'local-general',
135+
'Local General Purpose',
136+
'General-purpose configuration for Cowriter content editing and generation.',
137+
@default_model_uid,
138+
'You are a professional writing assistant integrated into a CMS editor. Your task is to improve, enhance, or generate text based on the user''s request. Respond ONLY with the improved/generated text.',
139+
0.7,
140+
2048,
141+
0.9,
142+
0,
143+
1,
144+
1,
145+
10,
146+
UNIX_TIMESTAMP(),
147+
UNIX_TIMESTAMP()
148+
) ON DUPLICATE KEY UPDATE
149+
name = VALUES(name),
150+
description = VALUES(description),
151+
model_uid = @default_model_uid,
152+
tstamp = UNIX_TIMESTAMP();
153+
154+
-- Configuration: Content Summarizer
155+
INSERT INTO tx_nrllm_configuration (
156+
pid, identifier, name, description, model_uid,
157+
system_prompt, temperature, max_tokens, top_p, timeout,
158+
is_active, is_default, sorting, tstamp, crdate
159+
) VALUES (
160+
0,
161+
'local-summarizer',
162+
'Local Content Summarizer',
163+
'Optimized for summarizing articles, documents, and CMS content.',
164+
@default_model_uid,
165+
'You are a content summarizer. Create clear, concise summaries that capture the key points. Focus on the most important information and maintain the original meaning.',
166+
0.3,
167+
1024,
168+
0.85,
169+
0,
170+
1,
171+
0,
172+
20,
173+
UNIX_TIMESTAMP(),
174+
UNIX_TIMESTAMP()
175+
) ON DUPLICATE KEY UPDATE
176+
name = VALUES(name),
177+
description = VALUES(description),
178+
model_uid = @default_model_uid,
179+
tstamp = UNIX_TIMESTAMP();
180+
181+
-- Configuration: Creative Writing
182+
INSERT INTO tx_nrllm_configuration (
183+
pid, identifier, name, description, model_uid,
184+
system_prompt, temperature, max_tokens, top_p, timeout,
185+
is_active, is_default, sorting, tstamp, crdate
186+
) VALUES (
187+
0,
188+
'local-creative',
189+
'Local Creative Writing',
190+
'Higher temperature for creative content generation in CMS.',
191+
@default_model_uid,
192+
'You are a creative writing assistant. Help generate engaging, imaginative content with varied vocabulary and interesting prose.',
193+
0.9,
194+
2048,
195+
0.95,
196+
120,
197+
1,
198+
0,
199+
30,
200+
UNIX_TIMESTAMP(),
201+
UNIX_TIMESTAMP()
202+
) ON DUPLICATE KEY UPDATE
203+
name = VALUES(name),
204+
description = VALUES(description),
205+
model_uid = @default_model_uid,
206+
tstamp = UNIX_TIMESTAMP();
207+
208+
SELECT 'Ollama seed data imported successfully!' AS status;
209+
SELECT CONCAT('Provider: ', name, ' (', identifier, ')') AS created FROM tx_nrllm_provider WHERE identifier = 'ollama-local' AND deleted = 0;
210+
SELECT CONCAT('Models: ', COUNT(*), ' configured') AS created FROM tx_nrllm_model WHERE provider_uid = @ollama_provider_uid AND deleted = 0;
211+
SELECT CONCAT('Configurations: ', COUNT(*), ' configured') AS created FROM tx_nrllm_configuration WHERE model_uid = @default_model_uid AND deleted = 0;

0 commit comments

Comments
 (0)