Skip to content

Commit 2edd0f7

Browse files
authored
Merge pull request #1413 from jrobertboos/lcore-1251
LCORE-1251: Added TLS E2E Tests
2 parents 29ab42f + 4f45b06 commit 2edd0f7

File tree

10 files changed

+820
-3
lines changed

10 files changed

+820
-3
lines changed

docker-compose.yaml

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,16 @@ services:
2525
container_name: llama-stack
2626
ports:
2727
- "8321:8321" # Expose llama-stack on 8321 (adjust if needed)
28+
depends_on:
29+
mock-tls-inference:
30+
condition: service_healthy
2831
volumes:
2932
- ./run.yaml:/opt/app-root/run.yaml:z
3033
- ${GCP_KEYS_PATH:-./tmp/.gcp-keys-dummy}:/opt/app-root/.gcp-keys:ro
3134
- ./lightspeed-stack.yaml:/opt/app-root/lightspeed-stack.yaml:ro
3235
- llama-storage:/opt/app-root/src/.llama/storage
3336
- ./tests/e2e/rag:/opt/app-root/src/.llama/storage/rag:z
37+
- mock-tls-certs:/certs:ro
3438
environment:
3539
- BRAVE_SEARCH_API_KEY=${BRAVE_SEARCH_API_KEY:-}
3640
- TAVILY_SEARCH_API_KEY=${TAVILY_SEARCH_API_KEY:-}
@@ -141,9 +145,27 @@ services:
141145
retries: 3
142146
start_period: 2s
143147

148+
# Mock TLS inference server for TLS E2E tests
149+
mock-tls-inference:
150+
build:
151+
context: ./tests/e2e/mock_tls_inference_server
152+
dockerfile: Dockerfile
153+
container_name: mock-tls-inference
154+
networks:
155+
- lightspeednet
156+
volumes:
157+
- mock-tls-certs:/certs
158+
healthcheck:
159+
test: ["CMD", "python", "-c", "import urllib.request,ssl;c=ssl.create_default_context();c.check_hostname=False;c.verify_mode=ssl.CERT_NONE;urllib.request.urlopen('https://localhost:8443/health',context=c)"]
160+
interval: 5s
161+
timeout: 3s
162+
retries: 3
163+
start_period: 5s
164+
144165

145166
volumes:
146167
llama-storage:
168+
mock-tls-certs:
147169

148170
networks:
149171
lightspeednet:
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
name: Lightspeed Core Service (LCS)
2+
service:
3+
host: 0.0.0.0
4+
port: 8080
5+
auth_enabled: false
6+
workers: 1
7+
color_log: true
8+
access_log: true
9+
llama_stack:
10+
use_as_library_client: false
11+
url: http://${env.E2E_LLAMA_HOSTNAME}:8321
12+
api_key: xyzzy
13+
user_data_collection:
14+
feedback_enabled: true
15+
feedback_storage: "/tmp/data/feedback"
16+
transcripts_enabled: true
17+
transcripts_storage: "/tmp/data/transcripts"
18+
authentication:
19+
module: "noop"
20+
inference:
21+
default_provider: tls-openai
22+
default_model: mock-tls-model
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
name: Lightspeed Core Service (LCS)
2+
service:
3+
host: 0.0.0.0
4+
port: 8080
5+
auth_enabled: false
6+
workers: 1
7+
color_log: true
8+
access_log: true
9+
llama_stack:
10+
use_as_library_client: false
11+
url: http://llama-stack:8321
12+
api_key: xyzzy
13+
user_data_collection:
14+
feedback_enabled: true
15+
feedback_storage: "/tmp/data/feedback"
16+
transcripts_enabled: true
17+
transcripts_storage: "/tmp/data/transcripts"
18+
authentication:
19+
module: "noop"
20+
inference:
21+
default_provider: tls-openai
22+
default_model: mock-tls-model

tests/e2e/features/environment.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -552,6 +552,17 @@ def after_feature(context: Context, feature: Feature) -> None:
552552
restart_container("lightspeed-stack")
553553
remove_config_backup(context.default_config_backup)
554554

555+
# Restore Lightspeed Stack config if the generic configure_service step switched it.
556+
# This cleanup intentionally runs for any feature (not tag-gated) - any feature that
557+
# leaves a backup file will trigger config restoration and container restarts.
558+
backup_path = "lightspeed-stack.yaml.backup"
559+
if os.path.exists(backup_path):
560+
switch_config(backup_path)
561+
remove_config_backup(backup_path)
562+
if not context.is_library_mode:
563+
restart_container("llama-stack")
564+
restart_container("lightspeed-stack")
565+
555566
# Clean up any proxy servers left from the last scenario
556567
if hasattr(context, "tunnel_proxy") or hasattr(context, "interception_proxy"):
557568
from tests.e2e.features.steps.proxy import _stop_proxy

tests/e2e/features/steps/proxy.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -142,9 +142,10 @@ def restore_if_modified(context: Context) -> None:
142142
_stop_proxy(context, "interception_proxy", "interception_proxy_loop")
143143

144144
if os.path.exists(_LLAMA_STACK_CONFIG_BACKUP):
145-
print("Restoring original Llama Stack config from backup...")
146-
shutil.copy(_LLAMA_STACK_CONFIG_BACKUP, _LLAMA_STACK_CONFIG)
147-
os.remove(_LLAMA_STACK_CONFIG_BACKUP)
145+
print(
146+
f"Restoring original Llama Stack config from {_LLAMA_STACK_CONFIG_BACKUP}..."
147+
)
148+
shutil.move(_LLAMA_STACK_CONFIG_BACKUP, _LLAMA_STACK_CONFIG)
148149
restart_container("llama-stack")
149150
restart_container("lightspeed-stack")
150151

tests/e2e/features/steps/tls.py

Lines changed: 224 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,224 @@
1+
"""Step definitions for TLS configuration e2e tests.
2+
3+
These tests configure Llama Stack's run.yaml with NetworkConfig TLS settings
4+
and verify the full pipeline works through the Lightspeed Stack.
5+
6+
Config switching uses the same pattern as other e2e tests: overwrite the
7+
host-mounted run.yaml and restart Docker containers. Cleanup is handled
8+
by a Background step that restores the backup before each scenario.
9+
"""
10+
11+
import copy
12+
from typing import Any, Optional
13+
14+
from behave import given # pyright: ignore[reportAttributeAccessIssue]
15+
from behave.runner import Context
16+
17+
from tests.e2e.features.steps.proxy import (
18+
_LLAMA_STACK_CONFIG,
19+
_backup_llama_config,
20+
_load_llama_config,
21+
_write_config,
22+
)
23+
24+
_TLS_PROVIDER_BASE: dict[str, Any] = {
25+
"provider_id": "tls-openai",
26+
"provider_type": "remote::openai",
27+
"config": {
28+
"api_key": "test-key",
29+
"base_url": "https://mock-tls-inference:8443/v1",
30+
"allowed_models": ["mock-tls-model"],
31+
},
32+
}
33+
34+
_TLS_MODEL_RESOURCE: dict[str, str] = {
35+
"model_id": "mock-tls-model",
36+
"provider_id": "tls-openai",
37+
"provider_model_id": "mock-tls-model",
38+
}
39+
40+
41+
def _ensure_tls_provider(config: dict[str, Any]) -> dict[str, Any]:
42+
"""Find or create the tls-openai inference provider in the config.
43+
44+
If the provider does not exist, it is added along with the
45+
mock-tls-model registered resource.
46+
47+
Parameters:
48+
config: The Llama Stack configuration dictionary.
49+
50+
Returns:
51+
The tls-openai provider configuration dictionary.
52+
"""
53+
providers = config.setdefault("providers", {})
54+
inference = providers.setdefault("inference", [])
55+
56+
for provider in inference:
57+
if provider.get("provider_id") == "tls-openai":
58+
return provider
59+
60+
# Provider not found — add it
61+
provider = copy.deepcopy(_TLS_PROVIDER_BASE)
62+
inference.append(provider)
63+
64+
# Also register the model resource
65+
resources = config.setdefault("registered_resources", {})
66+
models = resources.setdefault("models", [])
67+
if not any(m.get("model_id") == "mock-tls-model" for m in models):
68+
models.append(copy.deepcopy(_TLS_MODEL_RESOURCE))
69+
70+
return provider
71+
72+
73+
def _configure_tls(tls_config: dict[str, Any], base_url: Optional[str] = None) -> None:
74+
"""Configure TLS settings for the tls-openai provider.
75+
76+
Parameters:
77+
tls_config: The TLS configuration dictionary.
78+
base_url: Optional base URL override for the provider.
79+
"""
80+
_backup_llama_config()
81+
config = _load_llama_config()
82+
provider = _ensure_tls_provider(config)
83+
provider.setdefault("config", {}).setdefault("network", {})
84+
if base_url is not None:
85+
provider["config"]["base_url"] = base_url
86+
provider["config"]["network"]["tls"] = tls_config
87+
_write_config(config, _LLAMA_STACK_CONFIG)
88+
89+
90+
# --- Background Steps ---
91+
# Restart steps ("The original Llama Stack config is restored if modified",
92+
# "Llama Stack is restarted", "Lightspeed Stack is restarted") are defined in
93+
# proxy.py and shared across features by behave.
94+
95+
96+
# --- TLS Configuration Steps ---
97+
98+
99+
@given("Llama Stack is configured with TLS verification disabled")
100+
def configure_tls_verify_false(context: Context) -> None:
101+
"""Configure run.yaml with TLS verify: false."""
102+
_configure_tls({"verify": False})
103+
104+
105+
@given("Llama Stack is configured with CA certificate verification")
106+
def configure_tls_verify_ca(context: Context) -> None:
107+
"""Configure run.yaml with TLS verify: /certs/ca.crt."""
108+
_configure_tls({"verify": "/certs/ca.crt", "min_version": "TLSv1.2"})
109+
110+
111+
@given("Llama Stack is configured with TLS verification enabled")
112+
def configure_tls_verify_true(context: Context) -> None:
113+
"""Configure run.yaml with TLS verify: true (fails with self-signed certs)."""
114+
_configure_tls({"verify": True})
115+
116+
117+
@given("Llama Stack is configured with mutual TLS authentication")
118+
def configure_tls_mtls(context: Context) -> None:
119+
"""Configure run.yaml with mutual TLS (client cert and key)."""
120+
_configure_tls(
121+
{
122+
"verify": "/certs/ca.crt",
123+
"client_cert": "/certs/client.crt",
124+
"client_key": "/certs/client.key",
125+
},
126+
base_url="https://mock-tls-inference:8444/v1",
127+
)
128+
129+
130+
@given('Llama Stack is configured with CA certificate path "{path}"')
131+
def configure_tls_verify_ca_path(context: Context, path: str) -> None:
132+
"""Configure run.yaml with TLS verify pointing to a specific CA cert path."""
133+
_configure_tls({"verify": path})
134+
135+
136+
@given("Llama Stack is configured for mTLS without client certificate")
137+
def configure_mtls_no_client_cert(context: Context) -> None:
138+
"""Configure run.yaml for mTLS port without client cert (should fail)."""
139+
_configure_tls(
140+
{"verify": "/certs/ca.crt"},
141+
base_url="https://mock-tls-inference:8444/v1",
142+
)
143+
144+
145+
@given("Llama Stack is configured for mTLS with wrong client certificate")
146+
def configure_mtls_wrong_client_cert(context: Context) -> None:
147+
"""Configure run.yaml for mTLS with invalid client cert (CA cert as client cert)."""
148+
_configure_tls(
149+
{
150+
"verify": "/certs/ca.crt",
151+
"client_cert": "/certs/ca.crt",
152+
"client_key": "/certs/client.key",
153+
},
154+
base_url="https://mock-tls-inference:8444/v1",
155+
)
156+
157+
158+
@given("Llama Stack is configured for mTLS with untrusted client certificate")
159+
def configure_mtls_untrusted_client_cert(context: Context) -> None:
160+
"""Configure run.yaml for mTLS with client cert from untrusted CA."""
161+
_configure_tls(
162+
{
163+
"verify": "/certs/ca.crt",
164+
"client_cert": "/certs/untrusted-client.crt",
165+
"client_key": "/certs/untrusted-client.key",
166+
},
167+
base_url="https://mock-tls-inference:8444/v1",
168+
)
169+
170+
171+
@given("Llama Stack is configured for mTLS with expired client certificate")
172+
def configure_mtls_expired_client_cert(context: Context) -> None:
173+
"""Configure run.yaml for mTLS with an expired client certificate."""
174+
_configure_tls(
175+
{
176+
"verify": "/certs/ca.crt",
177+
"client_cert": "/certs/expired-client.crt",
178+
"client_key": "/certs/client.key",
179+
},
180+
base_url="https://mock-tls-inference:8444/v1",
181+
)
182+
183+
184+
@given("Llama Stack is configured with CA certificate and hostname mismatch server")
185+
def configure_tls_hostname_mismatch(context: Context) -> None:
186+
"""Configure run.yaml to connect to hostname-mismatch server (should fail)."""
187+
_configure_tls(
188+
{"verify": "/certs/ca.crt"},
189+
base_url="https://mock-tls-inference:8445/v1",
190+
)
191+
192+
193+
@given("Llama Stack is configured with mutual TLS and hostname mismatch server")
194+
def configure_mtls_hostname_mismatch(context: Context) -> None:
195+
"""Configure run.yaml for mTLS against hostname-mismatch server (should fail)."""
196+
_configure_tls(
197+
{
198+
"verify": "/certs/ca.crt",
199+
"client_cert": "/certs/client.crt",
200+
"client_key": "/certs/client.key",
201+
},
202+
base_url="https://mock-tls-inference:8445/v1",
203+
)
204+
205+
206+
@given(
207+
'Llama Stack is configured with TLS minimum version "{version}" and hostname mismatch server'
208+
)
209+
def configure_tls_min_version_hostname_mismatch(context: Context, version: str) -> None:
210+
"""Configure run.yaml with TLS min version against hostname-mismatch server."""
211+
_configure_tls(
212+
{"verify": "/certs/ca.crt", "min_version": version},
213+
base_url="https://mock-tls-inference:8445/v1",
214+
)
215+
216+
217+
@given(
218+
'Llama Stack is configured with TLS minimum version "{version}" and CA certificate path "{path}"'
219+
)
220+
def configure_tls_min_version_with_ca_path(
221+
context: Context, version: str, path: str
222+
) -> None:
223+
"""Configure run.yaml with TLS minimum version and a specific CA cert path."""
224+
_configure_tls({"verify": path, "min_version": version})

0 commit comments

Comments
 (0)