Skip to content

Commit 0686a7c

Browse files
committed
Add first supported metrics
1 parent 05c5a8b commit 0686a7c

File tree

2 files changed

+308
-0
lines changed

2 files changed

+308
-0
lines changed

src/metrics.py

Lines changed: 301 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,301 @@
1+
# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2+
#
3+
# Redistribution and use in source and binary forms, with or without
4+
# modification, are permitted provided that the following conditions
5+
# are met:
6+
# * Redistributions of source code must retain the above copyright
7+
# notice, this list of conditions and the following disclaimer.
8+
# * Redistributions in binary form must reproduce the above copyright
9+
# notice, this list of conditions and the following disclaimer in the
10+
# documentation and/or other materials provided with the distribution.
11+
# * Neither the name of NVIDIA CORPORATION nor the names of its
12+
# contributors may be used to endorse or promote products derived
13+
# from this software without specific prior written permission.
14+
#
15+
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
16+
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17+
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18+
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
19+
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20+
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21+
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22+
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23+
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24+
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25+
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26+
27+
from typing import Dict, Union
28+
29+
import triton_python_backend_utils as pb_utils
30+
from vllm.engine.metrics import StatLoggerBase as VllmStatLoggerBase
31+
from vllm.engine.metrics import Stats as VllmStats
32+
from vllm.engine.metrics import SupportsMetricsInfo
33+
34+
35+
# begin-metrics-definitions
36+
class TritonMetrics:
37+
def __init__(self, labels):
38+
# System stats
39+
# Scheduler State
40+
self.gauge_scheduler_running_family = pb_utils.MetricFamily(
41+
name="vllm:num_requests_running",
42+
description="Number of requests currently running on GPU.",
43+
kind=pb_utils.MetricFamily.GAUGE,
44+
)
45+
self.gauge_scheduler_waiting_family = pb_utils.MetricFamily(
46+
name="vllm:num_requests_waiting",
47+
description="Number of requests waiting to be processed.",
48+
kind=pb_utils.MetricFamily.GAUGE,
49+
)
50+
self.gauge_scheduler_swapped_family = pb_utils.MetricFamily(
51+
name="vllm:num_requests_swapped",
52+
description="Number of requests swapped to CPU.",
53+
kind=pb_utils.MetricFamily.GAUGE,
54+
)
55+
# KV Cache Usage in %
56+
self.gauge_gpu_cache_usage_family = pb_utils.MetricFamily(
57+
name="vllm:gpu_cache_usage_perc",
58+
description="GPU KV-cache usage. 1 means 100 percent usage.",
59+
kind=pb_utils.MetricFamily.GAUGE,
60+
)
61+
self.gauge_cpu_cache_usage_family = pb_utils.MetricFamily(
62+
name="vllm:cpu_cache_usage_perc",
63+
description="CPU KV-cache usage. 1 means 100 percent usage.",
64+
kind=pb_utils.MetricFamily.GAUGE,
65+
)
66+
67+
# Iteration stats
68+
self.counter_num_preemption_family = pb_utils.MetricFamily(
69+
name="vllm:num_preemptions_total",
70+
description="Cumulative number of preemption from the engine.",
71+
kind=pb_utils.MetricFamily.COUNTER,
72+
)
73+
self.counter_prompt_tokens_family = pb_utils.MetricFamily(
74+
name="vllm:prompt_tokens_total",
75+
description="Number of prefill tokens processed.",
76+
kind=pb_utils.MetricFamily.COUNTER,
77+
)
78+
self.counter_generation_tokens_family = pb_utils.MetricFamily(
79+
name="vllm:generation_tokens_total",
80+
description="Number of generation tokens processed.",
81+
kind=pb_utils.MetricFamily.COUNTER,
82+
)
83+
# self.histogram_time_to_first_token_family = pb_utils.MetricFamily(
84+
# name="vllm:time_to_first_token_seconds",
85+
# description="Histogram of time to first token in seconds.",
86+
# kind=pb_utils.MetricFamily.HISTOGRAM,
87+
# buckets=[
88+
# 0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.25, 0.5,
89+
# 0.75, 1.0, 2.5, 5.0, 7.5, 10.0
90+
# ])
91+
# self.histogram_time_per_output_token_family = pb_utils.MetricFamily(
92+
# name="vllm:time_per_output_token_seconds",
93+
# description="Histogram of time per output token in seconds.",
94+
# kind=pb_utils.MetricFamily.HISTOGRAM,
95+
# buckets=[
96+
# 0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.75,
97+
# 1.0, 2.5
98+
# ])
99+
100+
# Request stats
101+
# Latency
102+
# self.histogram_e2e_time_request_family = pb_utils.MetricFamily(
103+
# name="vllm:e2e_request_latency_seconds",
104+
# description="Histogram of end to end request latency in seconds.",
105+
# kind=pb_utils.MetricFamily.HISTOGRAM,
106+
# buckets=[1.0, 2.5, 5.0, 10.0, 15.0, 20.0, 30.0, 40.0, 50.0, 60.0])
107+
# # Metadata
108+
# self.histogram_num_prompt_tokens_request_family = pb_utils.MetricFamily(
109+
# name="vllm:request_prompt_tokens",
110+
# description="Number of prefill tokens processed.",
111+
# kind=pb_utils.MetricFamily.HISTOGRAM,
112+
# buckets=build_1_2_5_buckets(max_model_len),
113+
# )
114+
# self.histogram_num_generation_tokens_request_family = \
115+
# pb_utils.MetricFamily(
116+
# name="vllm:request_generation_tokens",
117+
# description="Number of generation tokens processed.",
118+
# kind=pb_utils.MetricFamily.HISTOGRAM,
119+
# buckets=build_1_2_5_buckets(max_model_len),
120+
# )
121+
# self.histogram_best_of_request_family = pb_utils.MetricFamily(
122+
# name="vllm:request_params_best_of",
123+
# description="Histogram of the best_of request parameter.",
124+
# kind=pb_utils.MetricFamily.HISTOGRAM,
125+
# buckets=[1, 2, 5, 10, 20],
126+
# )
127+
# self.histogram_n_request_family = pb_utils.MetricFamily(
128+
# name="vllm:request_params_n",
129+
# description="Histogram of the n request parameter.",
130+
# kind=pb_utils.MetricFamily.HISTOGRAM,
131+
# buckets=[1, 2, 5, 10, 20],
132+
# )
133+
# self.counter_request_success_family = pb_utils.MetricFamily(
134+
# name="vllm:request_success_total",
135+
# description="Count of successfully processed requests.",
136+
# kind=pb_utils.MetricFamily.COUNTER)
137+
138+
# Speculatie decoding stats
139+
# self.gauge_spec_decode_draft_acceptance_rate_family = pb_utils.MetricFamily(
140+
# name="vllm:spec_decode_draft_acceptance_rate",
141+
# description="Speculative token acceptance rate.",
142+
# kind=pb_utils.MetricFamily.GAUGE)
143+
# self.gauge_spec_decode_efficiency_family = pb_utils.MetricFamily(
144+
# name="vllm:spec_decode_efficiency",
145+
# description="Speculative decoding system efficiency.",
146+
# kind=pb_utils.MetricFamily.GAUGE)
147+
# self.counter_spec_decode_num_accepted_tokens_family = pb_utils.MetricFamily(
148+
# name="vllm:spec_decode_num_accepted_tokens_total",
149+
# description="Number of accepted tokens.",
150+
# kind=pb_utils.MetricFamily.COUNTER)
151+
# self.counter_spec_decode_num_draft_tokens_family = pb_utils.MetricFamily(
152+
# name="vllm:spec_decode_num_draft_tokens_total",
153+
# description="Number of draft tokens.",
154+
# kind=pb_utils.MetricFamily.COUNTER)
155+
# self.counter_spec_decode_num_emitted_tokens_family = pb_utils.MetricFamily(
156+
# name="vllm:spec_decode_num_emitted_tokens_total",
157+
# description="Number of emitted tokens.",
158+
# kind=pb_utils.MetricFamily.COUNTER)
159+
160+
# System stats
161+
# Scheduler State
162+
self.gauge_scheduler_running = self.gauge_scheduler_running_family.Metric(
163+
labels=labels
164+
)
165+
self.gauge_scheduler_waiting = self.gauge_scheduler_waiting_family.Metric(
166+
labels=labels
167+
)
168+
self.gauge_scheduler_swapped = self.gauge_scheduler_swapped_family.Metric(
169+
labels=labels
170+
)
171+
# KV Cache Usage in %
172+
self.gauge_gpu_cache_usage = self.gauge_gpu_cache_usage_family.Metric(
173+
labels=labels
174+
)
175+
self.gauge_cpu_cache_usage = self.gauge_cpu_cache_usage_family.Metric(
176+
labels=labels
177+
)
178+
179+
# Iteration stats
180+
self.counter_num_preemption = self.counter_num_preemption_family.Metric(
181+
labels=labels
182+
)
183+
self.counter_prompt_tokens = self.counter_prompt_tokens_family.Metric(
184+
labels=labels
185+
)
186+
self.counter_generation_tokens = self.counter_generation_tokens_family.Metric(
187+
labels=labels
188+
)
189+
# self.histogram_time_to_first_token = self.histogram_time_to_first_token_family.Metric(
190+
# labels=labels
191+
# )
192+
# self.histogram_time_per_output_token = self.histogram_time_per_output_token_family.Metric(
193+
# labels=labels
194+
# )
195+
196+
# Request stats
197+
# Latency
198+
# self.histogram_e2e_time_request = self.histogram_e2e_time_request_family.Metric(
199+
# labels=labels
200+
# )
201+
# # Metadata
202+
# self.histogram_num_prompt_tokens_request = self.histogram_num_prompt_tokens_request_family.Metric(
203+
# labels=labels
204+
# )
205+
# self.histogram_num_generation_tokens_request = self.histogram_num_generation_tokens_request_family.Metric(
206+
# labels=labels
207+
# )
208+
# self.histogram_best_of_request = self.histogram_best_of_request_family.Metric(
209+
# labels=labels
210+
# )
211+
# self.histogram_n_request = self.histogram_n_request_family.Metric(
212+
# labels=labels
213+
# )
214+
# self.counter_request_success = self.counter_request_success_family.Metric(
215+
# labels=labels
216+
# )
217+
218+
# Speculatie decoding stats
219+
# self.gauge_spec_decode_draft_acceptance_rate_ = self.gauge_spec_decode_draft_acceptance_rate_family.Metric(
220+
# labels=labels
221+
# )
222+
# self.gauge_spec_decode_efficiency = self.gauge_spec_decode_efficiency_family.Metric(
223+
# labels=labels
224+
# )
225+
# self.counter_spec_decode_num_accepted_tokens = self.counter_spec_decode_num_accepted_tokens_family.Metric(
226+
# labels=labels
227+
# )
228+
# self.counter_spec_decode_num_draft_tokens = self.counter_spec_decode_num_draft_tokens_family.Metric(
229+
# labels=labels
230+
# )
231+
# self.counter_spec_decode_num_emitted_tokens = self.counter_spec_decode_num_emitted_tokens_family.Metric(
232+
# labels=labels
233+
# )
234+
235+
236+
class VllmStatLogger(VllmStatLoggerBase):
237+
"""StatLoggeris used as adapter between vLLM stats collector and Triton metrics provider."""
238+
239+
# local_interval not used here. It's for vLLM logs to stdout.
240+
def __init__(self, labels: Dict, local_interval: float = 0) -> None:
241+
# Tracked stats over current local logging interval.
242+
super().__init__(local_interval)
243+
self.metrics = TritonMetrics(labels=labels)
244+
245+
def info(self, type: str, obj: SupportsMetricsInfo) -> None:
246+
raise NotImplementedError
247+
248+
def _log_gauge(self, gauge, data: Union[int, float]) -> None:
249+
# Convenience function for logging to gauge.
250+
gauge.set(data)
251+
252+
def _log_counter(self, counter, data: Union[int, float]) -> None:
253+
# Convenience function for logging to counter.
254+
counter.increment(data)
255+
256+
# def _log_histogram(self, histogram, data: Union[List[int],
257+
# List[float]]) -> None:
258+
# # Convenience function for logging list to histogram.
259+
# for datum in data:
260+
# histogram.labels(**self.labels).observe(datum)
261+
262+
def log(self, stats: VllmStats) -> None:
263+
# self.maybe_update_spec_decode_metrics(stats)
264+
265+
# System state data
266+
self._log_gauge(self.metrics.gauge_scheduler_running, stats.num_running_sys)
267+
self._log_gauge(self.metrics.gauge_scheduler_waiting, stats.num_waiting_sys)
268+
self._log_gauge(self.metrics.gauge_scheduler_swapped, stats.num_swapped_sys)
269+
self._log_gauge(self.metrics.gauge_gpu_cache_usage, stats.gpu_cache_usage_sys)
270+
self._log_gauge(self.metrics.gauge_cpu_cache_usage, stats.cpu_cache_usage_sys)
271+
272+
# Iteration level data
273+
self._log_counter(
274+
self.metrics.counter_num_preemption, stats.num_preemption_iter
275+
)
276+
self._log_counter(
277+
self.metrics.counter_prompt_tokens, stats.num_prompt_tokens_iter
278+
)
279+
self._log_counter(
280+
self.metrics.counter_generation_tokens, stats.num_generation_tokens_iter
281+
)
282+
# self._log_histogram(self.metrics.histogram_time_to_first_token, stats.time_to_first_tokens_iter)
283+
# self._log_histogram(self.metrics.histogram_time_per_output_token, stats.time_per_output_tokens_iter)
284+
285+
# Request level data
286+
# Latency
287+
# self._log_histogram(self.metrics.histogram_e2e_time_request, stats.time_e2e_requests)
288+
# Metadata
289+
# self._log_histogram(self.metrics.histogram_num_prompt_tokens_request, stats.num_prompt_tokens_requests)
290+
# self._log_histogram(self.metrics.histogram_num_generation_tokens_request, stats.num_generation_tokens_requests)
291+
# self._log_histogram(self.metrics.histogram_best_of_request, stats.best_of_requests)
292+
# self._log_histogram(self.metrics.histogram_n_request, stats.n_requests)
293+
# self._log_histogram(self.metrics.counter_request_success, stats.finished_reason_requests)
294+
295+
# Speculatie decoding stats
296+
# if self.spec_decode_metrics is not None:
297+
# self._log_gauge(self.metrics.gauge_spec_decode_draft_acceptance_rate, self.spec_decode_metrics.draft_acceptance_rate)
298+
# self._log_gauge(self.metrics.gauge_spec_decode_efficiency, self.spec_decode_metrics.system_efficiency)
299+
# self._log_counter(self.metrics.counter_spec_decode_num_accepted_tokens, self.spec_decode_metrics.accepted_tokens)
300+
# self._log_counter(self.metrics.counter_spec_decode_num_draft_tokens, self.spec_decode_metrics.draft_tokens)
301+
# self._log_counter(self.metrics.counter_spec_decode_num_emitted_tokens, self.spec_decode_metrics.emitted_tokens)

src/model.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,8 @@
3939
from vllm.sampling_params import SamplingParams
4040
from vllm.utils import random_uuid
4141

42+
from metrics import VllmStatLogger
43+
4244
_VLLM_ENGINE_ARGS_FILENAME = "model.json"
4345
_MULTI_LORA_ARGS_FILENAME = "multi_lora.json"
4446

@@ -151,6 +153,11 @@ def init_engine(self):
151153
AsyncEngineArgs(**self.vllm_engine_config)
152154
)
153155

156+
# Create vLLM custom Metrics
157+
labels = {"model": "vllm_metrics", "version": "1"}
158+
logger = VllmStatLogger(vllm_labels=labels)
159+
self.llm_engine.add_logger("triton", logger)
160+
154161
def setup_lora(self):
155162
self.enable_lora = False
156163

0 commit comments

Comments
 (0)