|
| 1 | +#!/usr/bin/env python3 |
| 2 | +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
| 3 | +# SPDX-License-Identifier: Apache-2.0 |
| 4 | + |
| 5 | +""" |
| 6 | +Determinism test for language model API using pytest. |
| 7 | +
|
| 8 | +This test suite checks if the model produces deterministic outputs |
| 9 | +when given the same inputs with fixed seed and temperature=0. |
| 10 | +
|
| 11 | +The test uses comprehensive server warmup (sending all test prompts |
| 12 | +before validation) to avoid server initialization effects that could |
| 13 | +impact determinism measurements. |
| 14 | +""" |
| 15 | + |
| 16 | +import logging |
| 17 | +import os |
| 18 | +import shutil |
| 19 | + |
| 20 | +import pytest |
| 21 | +import requests |
| 22 | + |
| 23 | +from tests.utils.engine_process import FRONTEND_PORT |
| 24 | +from tests.utils.managed_process import DynamoFrontendProcess, ManagedProcess |
| 25 | +from tests.utils.payloads import check_models_api |
| 26 | + |
| 27 | +logger = logging.getLogger(__name__) |
| 28 | + |
| 29 | +# Just need a model to show the config works rather than any stress of the system. |
| 30 | +MODEL_PATH = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" |
| 31 | +SERVED_MODEL_NAME = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" |
| 32 | + |
| 33 | +PROMPT = "In the heart of Eldoria, an ancient land of boundless magic and mysterious creatures, lies the long-forgotten city of Aeloria. Once a beacon of knowledge and power, Aeloria was buried beneath the shifting sands of time, lost to the world for centuries. You are an intrepid explorer, known for your unparalleled curiosity and courage, who has stumbled upon an ancient map hinting at ests that Aeloria holds a secret so profound that it has the potential to reshape the very fabric of reality. Your journey will take you through treacherous deserts, enchanted forests, and across perilous mountain ranges. Your Task: Character Background: Develop a detailed background for your character. Describe their motivations for seeking out Aeloria, their skills and weaknesses, and any personal connections to the ancient city or its legends. Are they driven by a quest for knowledge, a search for lost familt clue is hidden." |
| 34 | + |
| 35 | + |
| 36 | +class DynamoWorkerProcess(ManagedProcess): |
| 37 | + """Process manager for Dynamo worker with TRTLLM backend""" |
| 38 | + |
| 39 | + def __init__(self, request, worker_id: str, engine_config: str): |
| 40 | + self.worker_id = worker_id |
| 41 | + |
| 42 | + command = [ |
| 43 | + "python3", |
| 44 | + "-m", |
| 45 | + "dynamo.trtllm", |
| 46 | + "--model", |
| 47 | + MODEL_PATH, |
| 48 | + "--served-model-name", |
| 49 | + SERVED_MODEL_NAME, |
| 50 | + "--extra-engine-args", |
| 51 | + engine_config, |
| 52 | + ] |
| 53 | + |
| 54 | + # Set debug logging environment |
| 55 | + env = os.environ.copy() |
| 56 | + env["DYN_LOG"] = "debug" |
| 57 | + env["DYN_SYSTEM_ENABLED"] = "true" |
| 58 | + env["DYN_SYSTEM_USE_ENDPOINT_HEALTH_STATUS"] = '["generate"]' |
| 59 | + env["DYN_SYSTEM_PORT"] = "9345" |
| 60 | + env["DYN_KVBM_CPU_CACHE_GB"] = "20" |
| 61 | + env["DYN_KVBM_DISK_CACHE_GB"] = "60" |
| 62 | + env["DYN_KVBM_LEADER_WORKER_INIT_TIMEOUT_SECS"] = "1200" |
| 63 | + |
| 64 | + # TODO: Have the managed process take a command name explicitly to distinguish |
| 65 | + # between processes started with the same command. |
| 66 | + log_dir = f"{request.node.name}_{worker_id}" |
| 67 | + |
| 68 | + # Clean up any existing log directory from previous runs |
| 69 | + try: |
| 70 | + shutil.rmtree(log_dir) |
| 71 | + logger.info(f"Cleaned up existing log directory: {log_dir}") |
| 72 | + except FileNotFoundError: |
| 73 | + # Directory doesn't exist, which is fine |
| 74 | + pass |
| 75 | + |
| 76 | + super().__init__( |
| 77 | + command=command, |
| 78 | + env=env, |
| 79 | + health_check_urls=[ |
| 80 | + (f"http://localhost:{FRONTEND_PORT}/v1/models", check_models_api), |
| 81 | + ("http://localhost:9345/health", self.is_ready), |
| 82 | + ], |
| 83 | + timeout=300, |
| 84 | + display_output=True, |
| 85 | + terminate_existing=False, |
| 86 | + log_dir=log_dir, |
| 87 | + ) |
| 88 | + |
| 89 | + def get_pid(self) -> int | None: |
| 90 | + """Get the PID of the worker process""" |
| 91 | + return self.proc.pid if hasattr(self, "proc") and self.proc else None |
| 92 | + |
| 93 | + def is_ready(self, response) -> bool: |
| 94 | + """Check the health of the worker process""" |
| 95 | + try: |
| 96 | + data = response.json() |
| 97 | + if data.get("status") == "ready": |
| 98 | + logger.info( |
| 99 | + f"{self.__class__.__name__} {{ name: {self.worker_id} }} status is ready" |
| 100 | + ) |
| 101 | + return True |
| 102 | + logger.warning( |
| 103 | + f"{self.__class__.__name__} {{ name: {self.worker_id} }} status is not ready: {data.get('status')}" |
| 104 | + ) |
| 105 | + except ValueError: |
| 106 | + logger.warning( |
| 107 | + f"{self.__class__.__name__} {{ name: {self.worker_id} }} health response is not valid JSON" |
| 108 | + ) |
| 109 | + return False |
| 110 | + |
| 111 | + |
| 112 | +def send_completion_request( |
| 113 | + prompt: str, max_tokens: int, timeout: int = 120 |
| 114 | +) -> requests.Response: |
| 115 | + """Send a completion request to the frontend""" |
| 116 | + payload = { |
| 117 | + "model": SERVED_MODEL_NAME, |
| 118 | + "prompt": prompt, |
| 119 | + "stream": False, |
| 120 | + "max_tokens": max_tokens, |
| 121 | + } |
| 122 | + |
| 123 | + headers = {"Content-Type": "application/json"} |
| 124 | + |
| 125 | + logger.info( |
| 126 | + f"Sending completion request with prompt: '{prompt[:50]}...' and max_tokens: {max_tokens}" |
| 127 | + ) |
| 128 | + |
| 129 | + try: |
| 130 | + response = requests.post( |
| 131 | + "http://localhost:8000/v1/completions", |
| 132 | + headers=headers, |
| 133 | + json=payload, |
| 134 | + timeout=timeout, |
| 135 | + ) |
| 136 | + return response |
| 137 | + except requests.exceptions.Timeout: |
| 138 | + logger.error(f"Request timed out after {timeout} seconds") |
| 139 | + raise |
| 140 | + except requests.exceptions.RequestException as e: |
| 141 | + logger.error(f"Request failed with error: {e}") |
| 142 | + raise |
| 143 | + |
| 144 | + |
| 145 | +# Test markers to align with repository conventions |
| 146 | +# Todo: enable the rest when kvbm is built in the ci |
| 147 | +@pytest.mark.kvbm |
| 148 | +@pytest.mark.trtllm_marker |
| 149 | +@pytest.mark.e2e |
| 150 | +@pytest.mark.slow |
| 151 | +@pytest.mark.gpu_1 |
| 152 | +@pytest.mark.skip( |
| 153 | + reason="Enable these tests once `main` dynamo upgrades to TRTLLM 1.2+" |
| 154 | +) |
| 155 | +def test_kvbm_without_cuda_graph_enabled(request, runtime_services): |
| 156 | + """ |
| 157 | + End-to-end test for TRTLLM worker with cuda_graph_config not defined and |
| 158 | + KVBM enabled. |
| 159 | +
|
| 160 | + This test verifies a TRTLLM worker is able to serve requests when |
| 161 | + cuda graphs are not enabled in pytorch. KVBM should be able to offload |
| 162 | + blocks regardless. |
| 163 | + """ |
| 164 | + |
| 165 | + logger.info("Starting frontend...") |
| 166 | + with DynamoFrontendProcess(request): |
| 167 | + logger.info("Frontend started.") |
| 168 | + |
| 169 | + engine_config_with_cuda_graph_and_kvbm = ( |
| 170 | + "tests/kvbm/engine_config_without_cuda_graph_and_kvbm.yaml" |
| 171 | + ) |
| 172 | + logger.info("Starting worker...") |
| 173 | + with DynamoWorkerProcess( |
| 174 | + request, "decode", engine_config_with_cuda_graph_and_kvbm |
| 175 | + ) as worker: |
| 176 | + logger.info(f"Worker PID: {worker.get_pid()}") |
| 177 | + |
| 178 | + response = send_completion_request(PROMPT, 100, timeout=10) |
| 179 | + assert ( |
| 180 | + response.ok |
| 181 | + ), f"Expected successful status, got {response.status_code}" |
| 182 | + logger.info(f"Completion request succeeded: {response.status_code}") |
| 183 | + |
| 184 | + |
| 185 | +@pytest.mark.kvbm |
| 186 | +@pytest.mark.trtllm_marker |
| 187 | +@pytest.mark.e2e |
| 188 | +@pytest.mark.slow |
| 189 | +@pytest.mark.gpu_1 |
| 190 | +@pytest.mark.skip( |
| 191 | + reason="Enable these tests once dynamo `main` upgrades to TRTLLM 1.2+" |
| 192 | +) |
| 193 | +def test_kvbm_with_cuda_graph_enabled(request, runtime_services): |
| 194 | + """ |
| 195 | + End-to-end test for TRTLLM worker with cuda_graph_config defined and |
| 196 | + KVBM enabled. |
| 197 | +
|
| 198 | + This test verifies a TRTLLM worker is able to serve requests when |
| 199 | + cuda graphs are enabled in pytorch. KVBM should be able to offload |
| 200 | + blocks regardless. |
| 201 | + """ |
| 202 | + |
| 203 | + logger.info("Starting frontend...") |
| 204 | + with DynamoFrontendProcess(request): |
| 205 | + logger.info("Frontend started.") |
| 206 | + |
| 207 | + engine_config_with_cuda_graph_and_kvbm = ( |
| 208 | + "tests/kvbm/engine_config_with_cuda_graph_and_kvbm.yaml" |
| 209 | + ) |
| 210 | + logger.info("Starting worker...") |
| 211 | + with DynamoWorkerProcess( |
| 212 | + request, "decode", engine_config_with_cuda_graph_and_kvbm |
| 213 | + ) as worker: |
| 214 | + logger.info(f"Worker PID: {worker.get_pid()}") |
| 215 | + |
| 216 | + response = send_completion_request(PROMPT, 100, timeout=10) |
| 217 | + assert ( |
| 218 | + response.ok |
| 219 | + ), f"Expected successful status, got {response.status_code}" |
| 220 | + logger.info(f"Completion request succeeded: {response.status_code}") |
0 commit comments