|
| 1 | +"""Step definitions for TLS configuration e2e tests. |
| 2 | +
|
| 3 | +These tests configure Llama Stack's run.yaml with NetworkConfig TLS settings |
| 4 | +and verify the full pipeline works through the Lightspeed Stack. |
| 5 | +
|
| 6 | +Config switching uses the same pattern as other e2e tests: overwrite the |
| 7 | +host-mounted run.yaml and restart Docker containers. Cleanup is handled |
| 8 | +by a Background step that restores the backup before each scenario. |
| 9 | +""" |
| 10 | + |
| 11 | +import copy |
| 12 | +from typing import Any, Optional |
| 13 | + |
| 14 | +from behave import given # pyright: ignore[reportAttributeAccessIssue] |
| 15 | +from behave.runner import Context |
| 16 | + |
| 17 | +from tests.e2e.features.steps.proxy import ( |
| 18 | + _LLAMA_STACK_CONFIG, |
| 19 | + _backup_llama_config, |
| 20 | + _load_llama_config, |
| 21 | + _write_config, |
| 22 | +) |
| 23 | + |
| 24 | +_TLS_PROVIDER_BASE: dict[str, Any] = { |
| 25 | + "provider_id": "tls-openai", |
| 26 | + "provider_type": "remote::openai", |
| 27 | + "config": { |
| 28 | + "api_key": "test-key", |
| 29 | + "base_url": "https://mock-tls-inference:8443/v1", |
| 30 | + "allowed_models": ["mock-tls-model"], |
| 31 | + }, |
| 32 | +} |
| 33 | + |
| 34 | +_TLS_MODEL_RESOURCE: dict[str, str] = { |
| 35 | + "model_id": "mock-tls-model", |
| 36 | + "provider_id": "tls-openai", |
| 37 | + "provider_model_id": "mock-tls-model", |
| 38 | +} |
| 39 | + |
| 40 | + |
| 41 | +def _ensure_tls_provider(config: dict[str, Any]) -> dict[str, Any]: |
| 42 | + """Find or create the tls-openai inference provider in the config. |
| 43 | +
|
| 44 | + If the provider does not exist, it is added along with the |
| 45 | + mock-tls-model registered resource. |
| 46 | +
|
| 47 | + Parameters: |
| 48 | + config: The Llama Stack configuration dictionary. |
| 49 | +
|
| 50 | + Returns: |
| 51 | + The tls-openai provider configuration dictionary. |
| 52 | + """ |
| 53 | + providers = config.setdefault("providers", {}) |
| 54 | + inference = providers.setdefault("inference", []) |
| 55 | + |
| 56 | + for provider in inference: |
| 57 | + if provider.get("provider_id") == "tls-openai": |
| 58 | + return provider |
| 59 | + |
| 60 | + # Provider not found — add it |
| 61 | + provider = copy.deepcopy(_TLS_PROVIDER_BASE) |
| 62 | + inference.append(provider) |
| 63 | + |
| 64 | + # Also register the model resource |
| 65 | + resources = config.setdefault("registered_resources", {}) |
| 66 | + models = resources.setdefault("models", []) |
| 67 | + if not any(m.get("model_id") == "mock-tls-model" for m in models): |
| 68 | + models.append(copy.deepcopy(_TLS_MODEL_RESOURCE)) |
| 69 | + |
| 70 | + return provider |
| 71 | + |
| 72 | + |
| 73 | +def _configure_tls(tls_config: dict[str, Any], base_url: Optional[str] = None) -> None: |
| 74 | + """Configure TLS settings for the tls-openai provider. |
| 75 | +
|
| 76 | + Parameters: |
| 77 | + tls_config: The TLS configuration dictionary. |
| 78 | + base_url: Optional base URL override for the provider. |
| 79 | + """ |
| 80 | + _backup_llama_config() |
| 81 | + config = _load_llama_config() |
| 82 | + provider = _ensure_tls_provider(config) |
| 83 | + provider.setdefault("config", {}).setdefault("network", {}) |
| 84 | + if base_url is not None: |
| 85 | + provider["config"]["base_url"] = base_url |
| 86 | + provider["config"]["network"]["tls"] = tls_config |
| 87 | + _write_config(config, _LLAMA_STACK_CONFIG) |
| 88 | + |
| 89 | + |
| 90 | +# --- Background Steps --- |
| 91 | +# Restart steps ("The original Llama Stack config is restored if modified", |
| 92 | +# "Llama Stack is restarted", "Lightspeed Stack is restarted") are defined in |
| 93 | +# proxy.py and shared across features by behave. |
| 94 | + |
| 95 | + |
| 96 | +# --- TLS Configuration Steps --- |
| 97 | + |
| 98 | + |
| 99 | +@given("Llama Stack is configured with TLS verification disabled") |
| 100 | +def configure_tls_verify_false(context: Context) -> None: |
| 101 | + """Configure run.yaml with TLS verify: false.""" |
| 102 | + _configure_tls({"verify": False}) |
| 103 | + |
| 104 | + |
| 105 | +@given("Llama Stack is configured with CA certificate verification") |
| 106 | +def configure_tls_verify_ca(context: Context) -> None: |
| 107 | + """Configure run.yaml with TLS verify: /certs/ca.crt.""" |
| 108 | + _configure_tls({"verify": "/certs/ca.crt", "min_version": "TLSv1.2"}) |
| 109 | + |
| 110 | + |
| 111 | +@given("Llama Stack is configured with TLS verification enabled") |
| 112 | +def configure_tls_verify_true(context: Context) -> None: |
| 113 | + """Configure run.yaml with TLS verify: true (fails with self-signed certs).""" |
| 114 | + _configure_tls({"verify": True}) |
| 115 | + |
| 116 | + |
| 117 | +@given("Llama Stack is configured with mutual TLS authentication") |
| 118 | +def configure_tls_mtls(context: Context) -> None: |
| 119 | + """Configure run.yaml with mutual TLS (client cert and key).""" |
| 120 | + _configure_tls( |
| 121 | + { |
| 122 | + "verify": "/certs/ca.crt", |
| 123 | + "client_cert": "/certs/client.crt", |
| 124 | + "client_key": "/certs/client.key", |
| 125 | + }, |
| 126 | + base_url="https://mock-tls-inference:8444/v1", |
| 127 | + ) |
| 128 | + |
| 129 | + |
| 130 | +@given('Llama Stack is configured with CA certificate path "{path}"') |
| 131 | +def configure_tls_verify_ca_path(context: Context, path: str) -> None: |
| 132 | + """Configure run.yaml with TLS verify pointing to a specific CA cert path.""" |
| 133 | + _configure_tls({"verify": path}) |
| 134 | + |
| 135 | + |
| 136 | +@given("Llama Stack is configured for mTLS without client certificate") |
| 137 | +def configure_mtls_no_client_cert(context: Context) -> None: |
| 138 | + """Configure run.yaml for mTLS port without client cert (should fail).""" |
| 139 | + _configure_tls( |
| 140 | + {"verify": "/certs/ca.crt"}, |
| 141 | + base_url="https://mock-tls-inference:8444/v1", |
| 142 | + ) |
| 143 | + |
| 144 | + |
| 145 | +@given("Llama Stack is configured for mTLS with wrong client certificate") |
| 146 | +def configure_mtls_wrong_client_cert(context: Context) -> None: |
| 147 | + """Configure run.yaml for mTLS with invalid client cert (CA cert as client cert).""" |
| 148 | + _configure_tls( |
| 149 | + { |
| 150 | + "verify": "/certs/ca.crt", |
| 151 | + "client_cert": "/certs/ca.crt", |
| 152 | + "client_key": "/certs/client.key", |
| 153 | + }, |
| 154 | + base_url="https://mock-tls-inference:8444/v1", |
| 155 | + ) |
| 156 | + |
| 157 | + |
| 158 | +@given("Llama Stack is configured for mTLS with untrusted client certificate") |
| 159 | +def configure_mtls_untrusted_client_cert(context: Context) -> None: |
| 160 | + """Configure run.yaml for mTLS with client cert from untrusted CA.""" |
| 161 | + _configure_tls( |
| 162 | + { |
| 163 | + "verify": "/certs/ca.crt", |
| 164 | + "client_cert": "/certs/untrusted-client.crt", |
| 165 | + "client_key": "/certs/untrusted-client.key", |
| 166 | + }, |
| 167 | + base_url="https://mock-tls-inference:8444/v1", |
| 168 | + ) |
| 169 | + |
| 170 | + |
| 171 | +@given("Llama Stack is configured for mTLS with expired client certificate") |
| 172 | +def configure_mtls_expired_client_cert(context: Context) -> None: |
| 173 | + """Configure run.yaml for mTLS with an expired client certificate.""" |
| 174 | + _configure_tls( |
| 175 | + { |
| 176 | + "verify": "/certs/ca.crt", |
| 177 | + "client_cert": "/certs/expired-client.crt", |
| 178 | + "client_key": "/certs/client.key", |
| 179 | + }, |
| 180 | + base_url="https://mock-tls-inference:8444/v1", |
| 181 | + ) |
| 182 | + |
| 183 | + |
| 184 | +@given("Llama Stack is configured with CA certificate and hostname mismatch server") |
| 185 | +def configure_tls_hostname_mismatch(context: Context) -> None: |
| 186 | + """Configure run.yaml to connect to hostname-mismatch server (should fail).""" |
| 187 | + _configure_tls( |
| 188 | + {"verify": "/certs/ca.crt"}, |
| 189 | + base_url="https://mock-tls-inference:8445/v1", |
| 190 | + ) |
| 191 | + |
| 192 | + |
| 193 | +@given("Llama Stack is configured with mutual TLS and hostname mismatch server") |
| 194 | +def configure_mtls_hostname_mismatch(context: Context) -> None: |
| 195 | + """Configure run.yaml for mTLS against hostname-mismatch server (should fail).""" |
| 196 | + _configure_tls( |
| 197 | + { |
| 198 | + "verify": "/certs/ca.crt", |
| 199 | + "client_cert": "/certs/client.crt", |
| 200 | + "client_key": "/certs/client.key", |
| 201 | + }, |
| 202 | + base_url="https://mock-tls-inference:8445/v1", |
| 203 | + ) |
| 204 | + |
| 205 | + |
| 206 | +@given( |
| 207 | + 'Llama Stack is configured with TLS minimum version "{version}" and hostname mismatch server' |
| 208 | +) |
| 209 | +def configure_tls_min_version_hostname_mismatch(context: Context, version: str) -> None: |
| 210 | + """Configure run.yaml with TLS min version against hostname-mismatch server.""" |
| 211 | + _configure_tls( |
| 212 | + {"verify": "/certs/ca.crt", "min_version": version}, |
| 213 | + base_url="https://mock-tls-inference:8445/v1", |
| 214 | + ) |
| 215 | + |
| 216 | + |
| 217 | +@given( |
| 218 | + 'Llama Stack is configured with TLS minimum version "{version}" and CA certificate path "{path}"' |
| 219 | +) |
| 220 | +def configure_tls_min_version_with_ca_path( |
| 221 | + context: Context, version: str, path: str |
| 222 | +) -> None: |
| 223 | + """Configure run.yaml with TLS minimum version and a specific CA cert path.""" |
| 224 | + _configure_tls({"verify": path, "min_version": version}) |
0 commit comments