diff --git a/README.md b/README.md index 13953f58..4a7d2878 100644 --- a/README.md +++ b/README.md @@ -111,7 +111,8 @@ container with the following commands: ``` mkdir -p /opt/tritonserver/backends/vllm -wget -P /opt/tritonserver/backends/vllm https://raw.githubusercontent.com/triton-inference-server/vllm_backend/main/src/model.py +git clone https://github.com/triton-inference-server/vllm_backend.git /tmp/vllm_backend +cp -r /tmp/vllm_backend/src/* /opt/tritonserver/backends/vllm ``` ## Using the vLLM Backend @@ -194,7 +195,6 @@ starting from 23.10 release. You can use `pip install ...` within the container to upgrade vLLM version. - ## Running Multiple Instances of Triton Server If you are running multiple instances of Triton server with a Python-based backend, @@ -202,6 +202,45 @@ you need to specify a different `shm-region-prefix-name` for each server. See [here](https://github.com/triton-inference-server/python_backend#running-multiple-instances-of-triton-server) for more information. +## Triton Metrics +Starting with the 24.08 release of Triton, users can now obtain partial +vLLM metrics by querying the Triton metrics endpoint (see complete vLLM metrics +[here](https://docs.vllm.ai/en/latest/serving/metrics.html)). This can be +accomplished by launching a Triton server in any of the ways described above +(ensuring the build code / container is 24.08 or later) and querying the server. +Upon receiving a successful response, you can query the metrics endpoint by entering +the following: +```bash +curl localhost:8002/metrics +``` +VLLM stats are reported by the metrics endpoint in fields that +are prefixed with `vllm:`. Your output for these fields should look +similar to the following: +```bash +# HELP vllm:prompt_tokens_total Number of prefill tokens processed. +# TYPE vllm:prompt_tokens_total counter +vllm:prompt_tokens_total{model="vllm_model",version="1"} 10 +# HELP vllm:generation_tokens_total Number of generation tokens processed. +# TYPE vllm:generation_tokens_total counter +vllm:generation_tokens_total{model="vllm_model",version="1"} 16 +``` +To enable vLLM engine colleting metrics, "disable_log_stats" option need to be either false +or left empty (false by default) in [model.json](https://github.com/triton-inference-server/vllm_backend/blob/main/samples/model_repository/vllm_model/1/model.json). +```bash +"disable_log_stats": false +``` +*Note:* vLLM metrics are not reported to Triton metrics server by default +due to potential performance slowdowns. To enable vLLM model's metrics +reporting, please add following lines to its config.pbtxt as well. +```bash +parameters: { + key: "REPORT_CUSTOM_METRICS" + value: { + string_value:"yes" + } +} +``` + ## Referencing the Tutorial You can read further in the diff --git a/ci/L0_backend_vllm/metrics_test/test.sh b/ci/L0_backend_vllm/metrics_test/test.sh new file mode 100755 index 00000000..0a8a96d6 --- /dev/null +++ b/ci/L0_backend_vllm/metrics_test/test.sh @@ -0,0 +1,248 @@ +#!/bin/bash +# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +source ../../common/util.sh + +TRITON_DIR=${TRITON_DIR:="/opt/tritonserver"} +SERVER=${TRITON_DIR}/bin/tritonserver +BACKEND_DIR=${TRITON_DIR}/backends +SERVER_ARGS="--model-repository=$(pwd)/models --backend-directory=${BACKEND_DIR} --model-control-mode=explicit --load-model=vllm_opt --log-verbose=1" +SERVER_LOG="./vllm_metrics_server.log" +CLIENT_LOG="./vllm_metrics_client.log" +TEST_RESULT_FILE='test_results.txt' +CLIENT_PY="./vllm_metrics_test.py" +SAMPLE_MODELS_REPO="../../../samples/model_repository" +EXPECTED_NUM_TESTS=1 + +# Helpers ======================================= +function copy_model_repository { + rm -rf models && mkdir -p models + cp -r ${SAMPLE_MODELS_REPO}/vllm_model models/vllm_opt + # `vllm_opt` model will be loaded on server start and stay loaded throughout + # unittesting. To ensure that vllm's memory profiler will not error out + # on `vllm_load_test` load, we reduce "gpu_memory_utilization" for `vllm_opt`, + # so that at least 60% of GPU memory was available for other models. + sed -i 's/"gpu_memory_utilization": 0.5/"gpu_memory_utilization": 0.4/' models/vllm_opt/1/model.json +} + +RET=0 + +# Test disabling vLLM metrics reporting without parameter "REPORT_CUSTOM_METRICS" in config.pbtxt +copy_model_repository +run_server +if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 +fi + +set +e +python3 $CLIENT_PY VLLMTritonMetricsTest.test_vllm_metrics_disabled -v > $CLIENT_LOG 2>&1 + +if [ $? -ne 0 ]; then + cat $CLIENT_LOG + echo -e "\n***\n*** Running $CLIENT_PY VLLMTritonMetricsTest.test_vllm_metrics_disabled FAILED. \n***" + RET=1 +else + check_test_results $TEST_RESULT_FILE $EXPECTED_NUM_TESTS + if [ $? -ne 0 ]; then + cat $CLIENT_LOG + echo -e "\n***\n*** Test Result Verification FAILED.\n***" + RET=1 + fi +fi +set -e + +kill $SERVER_PID +wait $SERVER_PID + +# Test disabling vLLM metrics reporting with parameter "REPORT_CUSTOM_METRICS" set to "no" in config.pbtxt +copy_model_repository +echo -e " +parameters: { + key: \"REPORT_CUSTOM_METRICS\" + value: { + string_value:\"no\" + } +} +" >> models/vllm_opt/config.pbtxt + +run_server +if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 +fi + +set +e +python3 $CLIENT_PY VLLMTritonMetricsTest.test_vllm_metrics_disabled -v > $CLIENT_LOG 2>&1 + +if [ $? -ne 0 ]; then + cat $CLIENT_LOG + echo -e "\n***\n*** Running $CLIENT_PY VLLMTritonMetricsTest.test_vllm_metrics_disabled FAILED. \n***" + RET=1 +else + check_test_results $TEST_RESULT_FILE $EXPECTED_NUM_TESTS + if [ $? -ne 0 ]; then + cat $CLIENT_LOG + echo -e "\n***\n*** Test Result Verification FAILED.\n***" + RET=1 + fi +fi +set -e + +kill $SERVER_PID +wait $SERVER_PID + +# Test vLLM metrics reporting with parameter "REPORT_CUSTOM_METRICS" set to "yes" in config.pbtxt +copy_model_repository +cp ${SAMPLE_MODELS_REPO}/vllm_model/config.pbtxt models/vllm_opt +echo -e " +parameters: { + key: \"REPORT_CUSTOM_METRICS\" + value: { + string_value:\"yes\" + } +} +" >> models/vllm_opt/config.pbtxt + +run_server +if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 +fi + +set +e +python3 $CLIENT_PY VLLMTritonMetricsTest.test_vllm_metrics -v > $CLIENT_LOG 2>&1 + +if [ $? -ne 0 ]; then + cat $CLIENT_LOG + echo -e "\n***\n*** Running $CLIENT_PY VLLMTritonMetricsTest.test_vllm_metrics FAILED. \n***" + RET=1 +else + check_test_results $TEST_RESULT_FILE $EXPECTED_NUM_TESTS + if [ $? -ne 0 ]; then + cat $CLIENT_LOG + echo -e "\n***\n*** Test Result Verification FAILED.\n***" + RET=1 + fi +fi +set -e + +kill $SERVER_PID +wait $SERVER_PID + +# Test enabling vLLM metrics reporting in config.pbtxt but disabling in model.json +copy_model_repository +jq '. += {"disable_log_stats" : true}' models/vllm_opt/1/model.json > "temp.json" +mv temp.json models/vllm_opt/1/model.json +echo -e " +parameters: { + key: \"REPORT_CUSTOM_METRICS\" + value: { + string_value:\"yes\" + } +} +" >> models/vllm_opt/config.pbtxt + +run_server +if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 +fi + +set +e +python3 $CLIENT_PY VLLMTritonMetricsTest.test_vllm_metrics_disabled -v > $CLIENT_LOG 2>&1 + +if [ $? -ne 0 ]; then + cat $CLIENT_LOG + echo -e "\n***\n*** Running $CLIENT_PY VLLMTritonMetricsTest.test_vllm_metrics_disabled FAILED. \n***" + RET=1 +else + check_test_results $TEST_RESULT_FILE $EXPECTED_NUM_TESTS + if [ $? -ne 0 ]; then + cat $CLIENT_LOG + echo -e "\n***\n*** Test Result Verification FAILED.\n***" + RET=1 + fi +fi +set -e + +kill $SERVER_PID +wait $SERVER_PID + +# Test enabling vLLM metrics reporting in config.pbtxt while disabling in server option +copy_model_repository +echo -e " +parameters: { + key: \"REPORT_CUSTOM_METRICS\" + value: { + string_value:\"yes\" + } +} +" >> models/vllm_opt/config.pbtxt +SERVER_ARGS="${SERVER_ARGS} --allow-metrics=false" +run_server +if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 +fi + +set +e +python3 $CLIENT_PY VLLMTritonMetricsTest.test_vllm_metrics_refused -v > $CLIENT_LOG 2>&1 + +if [ $? -ne 0 ]; then + cat $CLIENT_LOG + echo -e "\n***\n*** Running $CLIENT_PY VLLMTritonMetricsTest.test_vllm_metrics_refused FAILED. \n***" + RET=1 +else + check_test_results $TEST_RESULT_FILE $EXPECTED_NUM_TESTS + if [ $? -ne 0 ]; then + cat $CLIENT_LOG + echo -e "\n***\n*** Test Result Verification FAILED.\n***" + RET=1 + fi +fi +set -e + +kill $SERVER_PID +wait $SERVER_PID +rm -rf "./models" "temp.json" + +if [ $RET -eq 1 ]; then + cat $CLIENT_LOG + cat $SERVER_LOG + echo -e "\n***\n*** vLLM test FAILED. \n***" +else + echo -e "\n***\n*** vLLM test PASSED. \n***" +fi + +collect_artifacts_from_subdir +exit $RET diff --git a/ci/L0_backend_vllm/metrics_test/vllm_metrics_test.py b/ci/L0_backend_vllm/metrics_test/vllm_metrics_test.py new file mode 100644 index 00000000..d2059057 --- /dev/null +++ b/ci/L0_backend_vllm/metrics_test/vllm_metrics_test.py @@ -0,0 +1,155 @@ +# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import re +import sys +import unittest +from functools import partial + +import requests +import tritonclient.grpc as grpcclient +from tritonclient.utils import * + +sys.path.append("../../common") +from test_util import TestResultCollector, UserData, callback, create_vllm_request + + +class VLLMTritonMetricsTest(TestResultCollector): + def setUp(self): + self.triton_client = grpcclient.InferenceServerClient(url="localhost:8001") + self.tritonserver_ipaddr = os.environ.get("TRITONSERVER_IPADDR", "localhost") + self.vllm_model_name = "vllm_opt" + self.prompts = [ + "The most dangerous animal is", + "The capital of France is", + "The future of AI is", + ] + self.sampling_parameters = {"temperature": "0", "top_p": "1"} + + def get_vllm_metrics(self): + """ + Store vllm metrics in a dictionary. + """ + r = requests.get(f"http://{self.tritonserver_ipaddr}:8002/metrics") + r.raise_for_status() + + # Regular expression to match the pattern + pattern = r"^(vllm:[^ {]+)(?:{.*})? ([0-9.-]+)$" + vllm_dict = {} + + # Find all matches in the text + matches = re.findall(pattern, r.text, re.MULTILINE) + + for match in matches: + key, value = match + vllm_dict[key] = float(value) if "." in value else int(value) + + return vllm_dict + + def vllm_infer( + self, + prompts, + sampling_parameters, + model_name, + ): + """ + Helper function to send async stream infer requests to vLLM. + """ + user_data = UserData() + number_of_vllm_reqs = len(prompts) + + self.triton_client.start_stream(callback=partial(callback, user_data)) + for i in range(number_of_vllm_reqs): + request_data = create_vllm_request( + prompts[i], + i, + False, + sampling_parameters, + model_name, + True, + ) + self.triton_client.async_stream_infer( + model_name=model_name, + inputs=request_data["inputs"], + request_id=request_data["request_id"], + outputs=request_data["outputs"], + parameters=sampling_parameters, + ) + + for _ in range(number_of_vllm_reqs): + result = user_data._completed_requests.get() + if type(result) is InferenceServerException: + print(result.message()) + self.assertIsNot(type(result), InferenceServerException, str(result)) + + output = result.as_numpy("text_output") + self.assertIsNotNone(output, "`text_output` should not be None") + + self.triton_client.stop_stream() + + def test_vllm_metrics(self): + # Test vLLM metrics + self.vllm_infer( + prompts=self.prompts, + sampling_parameters=self.sampling_parameters, + model_name=self.vllm_model_name, + ) + metrics_dict = self.get_vllm_metrics() + + # vllm:prompt_tokens_total + self.assertEqual(metrics_dict["vllm:prompt_tokens_total"], 18) + # vllm:generation_tokens_total + self.assertEqual(metrics_dict["vllm:generation_tokens_total"], 48) + + def test_vllm_metrics_disabled(self): + # Test vLLM metrics + self.vllm_infer( + prompts=self.prompts, + sampling_parameters=self.sampling_parameters, + model_name=self.vllm_model_name, + ) + metrics_dict = self.get_vllm_metrics() + + # No vLLM metric found + self.assertEqual(len(metrics_dict), 0) + + def test_vllm_metrics_refused(self): + # Test vLLM metrics + self.vllm_infer( + prompts=self.prompts, + sampling_parameters=self.sampling_parameters, + model_name=self.vllm_model_name, + ) + with self.assertRaises(requests.exceptions.ConnectionError): + self.get_vllm_metrics() + + def tearDown(self): + self.triton_client.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/ci/L0_backend_vllm/test.sh b/ci/L0_backend_vllm/test.sh index 93d065c8..a9f89894 100755 --- a/ci/L0_backend_vllm/test.sh +++ b/ci/L0_backend_vllm/test.sh @@ -26,7 +26,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. RET=0 -SUBTESTS="accuracy_test request_cancellation enabled_stream vllm_backend" +SUBTESTS="accuracy_test request_cancellation enabled_stream vllm_backend metrics_test" python3 -m pip install --upgrade pip && pip3 install tritonclient[grpc] diff --git a/ci/L0_backend_vllm/vllm_backend/test.sh b/ci/L0_backend_vllm/vllm_backend/test.sh index a6dd0aa7..43b20af7 100755 --- a/ci/L0_backend_vllm/vllm_backend/test.sh +++ b/ci/L0_backend_vllm/vllm_backend/test.sh @@ -50,7 +50,7 @@ function assert_curl_success { rm -rf models && mkdir -p models cp -r ${SAMPLE_MODELS_REPO}/vllm_model models/vllm_opt -# `vllm_opt`` model will be loaded on server start and stay loaded throughout +# `vllm_opt` model will be loaded on server start and stay loaded throughout # unittesting. To test vllm model load/unload we use a dedicated # `vllm_load_test`. To ensure that vllm's memory profiler will not error out # on `vllm_load_test` load, we reduce "gpu_memory_utilization" for `vllm_opt`, diff --git a/samples/model_repository/vllm_model/1/model.json b/samples/model_repository/vllm_model/1/model.json index 6eb5e070..8a32050d 100644 --- a/samples/model_repository/vllm_model/1/model.json +++ b/samples/model_repository/vllm_model/1/model.json @@ -1,6 +1,6 @@ { "model":"facebook/opt-125m", - "disable_log_requests": "true", + "disable_log_requests": true, "gpu_memory_utilization": 0.5, - "enforce_eager": "true" + "enforce_eager": true } diff --git a/src/model.py b/src/model.py index 0ed0e454..cc52c3b5 100644 --- a/src/model.py +++ b/src/model.py @@ -41,6 +41,8 @@ from vllm.sampling_params import SamplingParams from vllm.utils import random_uuid +from utils.metrics import VllmStatLogger + _VLLM_ENGINE_ARGS_FILENAME = "model.json" _MULTI_LORA_ARGS_FILENAME = "multi_lora.json" @@ -155,9 +157,29 @@ def init_engine(self): self.setup_lora() # Create an AsyncLLMEngine from the config from JSON - self.llm_engine = AsyncLLMEngine.from_engine_args( - AsyncEngineArgs(**self.vllm_engine_config) - ) + aync_engine_args = AsyncEngineArgs(**self.vllm_engine_config) + self.llm_engine = AsyncLLMEngine.from_engine_args(aync_engine_args) + + # Create vLLM custom metrics + if ( + "REPORT_CUSTOM_METRICS" in self.model_config["parameters"] + and self.model_config["parameters"]["REPORT_CUSTOM_METRICS"]["string_value"] + == "yes" + and not aync_engine_args.disable_log_stats + ): + try: + labels = { + "model": self.args["model_name"], + "version": self.args["model_version"], + } + # Add vLLM custom metrics + self.llm_engine.add_logger("triton", VllmStatLogger(labels=labels)) + except pb_utils.TritonModelException as e: + if "metrics not supported" in str(e): + # Metrics are disabled at the server + self.logger.log_info("[vllm] Metrics not supported") + else: + raise e def setup_lora(self): self.enable_lora = False diff --git a/src/utils/metrics.py b/src/utils/metrics.py new file mode 100644 index 00000000..fc6e69bd --- /dev/null +++ b/src/utils/metrics.py @@ -0,0 +1,99 @@ +# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from typing import Dict, Union + +import triton_python_backend_utils as pb_utils +from vllm.engine.metrics import StatLoggerBase as VllmStatLoggerBase +from vllm.engine.metrics import Stats as VllmStats +from vllm.engine.metrics import SupportsMetricsInfo + + +class TritonMetrics: + def __init__(self, labels): + # Initialize metric families + # Iteration stats + self.counter_prompt_tokens_family = pb_utils.MetricFamily( + name="vllm:prompt_tokens_total", + description="Number of prefill tokens processed.", + kind=pb_utils.MetricFamily.COUNTER, + ) + self.counter_generation_tokens_family = pb_utils.MetricFamily( + name="vllm:generation_tokens_total", + description="Number of generation tokens processed.", + kind=pb_utils.MetricFamily.COUNTER, + ) + + # Initialize metrics + # Iteration stats + self.counter_prompt_tokens = self.counter_prompt_tokens_family.Metric( + labels=labels + ) + self.counter_generation_tokens = self.counter_generation_tokens_family.Metric( + labels=labels + ) + + +class VllmStatLogger(VllmStatLoggerBase): + """StatLogger is used as an adapter between vLLM stats collector and Triton metrics provider.""" + + # local_interval not used here. It's for vLLM logs to stdout. + def __init__(self, labels: Dict, local_interval: float = 0) -> None: + # Tracked stats over current local logging interval. + super().__init__(local_interval) + self.metrics = TritonMetrics(labels=labels) + + def info(self, type: str, obj: SupportsMetricsInfo) -> None: + pass + + def _log_counter(self, counter, data: Union[int, float]) -> None: + """Convenience function for logging to counter. + + Args: + counter: A counter metric instance. + data: An int or float to increment the count metric. + + Returns: + None + """ + if data != 0: + counter.increment(data) + + def log(self, stats: VllmStats) -> None: + """Report stats to Triton metrics server. + + Args: + stats: Created by LLMEngine for use by VllmStatLogger. + + Returns: + None + """ + self._log_counter( + self.metrics.counter_prompt_tokens, stats.num_prompt_tokens_iter + ) + self._log_counter( + self.metrics.counter_generation_tokens, stats.num_generation_tokens_iter + )