Skip to content

Commit 4122fb7

Browse files
committed
feat(metrics): add request prompt, generation, max_tokens and success metrics
Signed-off-by: googs1025 <[email protected]>
1 parent 202efc4 commit 4122fb7

File tree

7 files changed

+310
-45
lines changed

7 files changed

+310
-45
lines changed

manifests/config_with_fake.yaml

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,18 @@ time-to-first-token: 2000
77
inter-token-latency: 1000
88
kv-cache-transfer-latency: 100
99
seed: 100100100
10-
fake-metrics:
10+
fake-metrics:
1111
running-requests: 16
1212
waiting-requests: 3
1313
kv-cache-usage: 0.3
14+
request-success-total:
15+
stop: 20
16+
length: 0
17+
tool_calls: 0
18+
remote_decode: 0
19+
request-prompt-tokens: [ 10, 20, 30, 15 ]
20+
request-generation-tokens: [ 50, 60, 40 ]
21+
request-params-max-tokens: [ 128, 256, 512 ]
1422
loras:
1523
- '{"running":"lora1,lora2","waiting":"lora3","timestamp":1257894567}'
1624
- '{"running":"lora1,lora3","waiting":"","timestamp":1257894569}'

pkg/common/config.go

Lines changed: 7 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -223,6 +223,13 @@ type Metrics struct {
223223
// 0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.75,
224224
// 1.0, 2.5, 5.0, 7.5, 10.0, 20.0, 40.0, 80.0, +Inf
225225
TPOTBucketValues []int `yaml:"tpot-buckets-values" json:"tpot-buckets-values"`
226+
// RequestPromptTokens RequestGenerationTokens RequestParamsMaxTokens Histogram fake-observation arrays for init.
227+
// Each value will be passed to Observe() once at start-up.
228+
RequestPromptTokens []float64 `yaml:"request-prompt-tokens" json:"request-prompt-tokens"` // prompt-length samples
229+
RequestGenerationTokens []float64 `yaml:"request-generation-tokens" json:"request-generation-tokens"` // generation-length samples
230+
RequestParamsMaxTokens []float64 `yaml:"request-params-max-tokens" json:"request-params-max-tokens"` // max_tokens parameter samples
231+
// RequestSuccessTotal is the number of successful requests, key: finish-reason (stop, length, etc.).
232+
RequestSuccessTotal map[string]int64 `yaml:"request-success-total" json:"request-success-total"`
226233
}
227234

228235
type LorasMetrics struct {
@@ -501,52 +508,14 @@ func (c *Configuration) validate() error {
501508
if c.FakeMetrics.KVCacheUsagePercentage < 0 || c.FakeMetrics.KVCacheUsagePercentage > 1 {
502509
return errors.New("fake metrics KV cache usage must be between 0 ans 1")
503510
}
504-
if c.FakeMetrics.TTFTBucketValues != nil {
505-
if len(c.FakeMetrics.TTFTBucketValues) > len(TTFTBucketsBoundaries)+1 {
506-
return errors.New("fake time-to-first-token array is too long")
507-
}
508-
for v := range c.FakeMetrics.TTFTBucketValues {
509-
if v < 0 {
510-
return errors.New("time-to-first-token fake metrics should contain only non-negative values")
511-
}
512-
}
513-
}
514-
if c.FakeMetrics.TPOTBucketValues != nil {
515-
if len(c.FakeMetrics.TPOTBucketValues) > len(TPOTBucketsBoundaries)+1 {
516-
return errors.New("fake time-per-output-token array is too long")
517-
}
518-
for v := range c.FakeMetrics.TPOTBucketValues {
519-
if v < 0 {
520-
return errors.New("time-per-output-token fake metrics should contain only non-negative values")
521-
}
522-
}
523-
}
524511
}
525512

526513
if c.DPSize < 1 || c.DPSize > 8 {
527514
return errors.New("data parallel size must be between 1 ans 8")
528515
}
529-
530-
if (c.SSLCertFile == "") != (c.SSLKeyFile == "") {
531-
return errors.New("both ssl-certfile and ssl-keyfile must be provided together")
532-
}
533-
534-
if c.SelfSignedCerts && (c.SSLCertFile != "" || c.SSLKeyFile != "") {
535-
return errors.New("cannot use both self-signed-certs and explicit ssl-certfile/ssl-keyfile")
536-
}
537-
538-
if c.DatasetPath == "" && c.DatasetURL != "" {
539-
return errors.New("dataset-path is required when dataset-url is set")
540-
}
541-
542516
return nil
543517
}
544518

545-
// SSLEnabled returns true if SSL is enabled either via certificate files or self-signed certificates
546-
func (c *Configuration) SSLEnabled() bool {
547-
return (c.SSLCertFile != "" && c.SSLKeyFile != "") || c.SelfSignedCerts
548-
}
549-
550519
func (c *Configuration) Copy() (*Configuration, error) {
551520
var dst Configuration
552521
data, err := json.Marshal(c)
@@ -631,10 +600,6 @@ func ParseCommandParamsAndLoadConfig() (*Configuration, error) {
631600
f.Var(&dummyFailureTypes, "failure-types", failureTypesDescription)
632601
f.Lookup("failure-types").NoOptDefVal = dummy
633602

634-
f.StringVar(&config.SSLCertFile, "ssl-certfile", config.SSLCertFile, "Path to SSL certificate file for HTTPS (optional)")
635-
f.StringVar(&config.SSLKeyFile, "ssl-keyfile", config.SSLKeyFile, "Path to SSL private key file for HTTPS (optional)")
636-
f.BoolVar(&config.SelfSignedCerts, "self-signed-certs", config.SelfSignedCerts, "Enable automatic generation of self-signed certificates for HTTPS")
637-
638603
// These values were manually parsed above in getParamValueFromArgs, we leave this in order to get these flags in --help
639604
var dummyString string
640605
f.StringVar(&dummyString, "config", "", "The path to a yaml configuration file. The command line values overwrite the configuration file values")

pkg/common/config_test.go

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -205,6 +205,15 @@ var _ = Describe("Simulator configuration", func() {
205205
},
206206
TTFTBucketValues: []int{10, 20, 30, 10},
207207
TPOTBucketValues: []int{0, 0, 10, 20, 30},
208+
RequestPromptTokens: []float64{10, 20, 30, 15},
209+
RequestGenerationTokens: []float64{50, 60, 40},
210+
RequestParamsMaxTokens: []float64{128, 256, 512},
211+
RequestSuccessTotal: map[string]int64{
212+
StopFinishReason: 20,
213+
LengthFinishReason: 0,
214+
ToolsFinishReason: 0,
215+
RemoteDecodeFinishReason: 0,
216+
},
208217
}
209218
test = testCase{
210219
name: "config with fake metrics file",

pkg/llm-d-inference-sim/metrics.go

Lines changed: 144 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ package llmdinferencesim
2020

2121
import (
2222
"context"
23+
"math"
2324
"strconv"
2425
"strings"
2526
"sync"
@@ -65,6 +66,7 @@ func (s *VllmSimulator) createAndRegisterPrometheus() error {
6566
return err
6667
}
6768

69+
// not supported for now, reports constant value
6870
s.waitingRequests = prometheus.NewGaugeVec(
6971
prometheus.GaugeOpts{
7072
Subsystem: "",
@@ -123,6 +125,61 @@ func (s *VllmSimulator) createAndRegisterPrometheus() error {
123125
return err
124126
}
125127

128+
s.requestPromptTokens = prometheus.NewHistogramVec(
129+
prometheus.HistogramOpts{
130+
Subsystem: "",
131+
Name: "vllm:request_prompt_tokens",
132+
Help: "Number of prefill tokens processed.",
133+
Buckets: build125Buckets(s.config.MaxModelLen),
134+
},
135+
[]string{vllmapi.PromLabelModelName},
136+
)
137+
if err := s.registry.Register(s.requestPromptTokens); err != nil {
138+
s.logger.Error(err, "Prometheus request_prompt_tokens histogram register failed")
139+
return err
140+
}
141+
142+
s.requestGenerationTokens = prometheus.NewHistogramVec(
143+
prometheus.HistogramOpts{
144+
Subsystem: "",
145+
Name: "vllm:request_generation_tokens",
146+
Help: "Number of generation tokens processed.",
147+
Buckets: build125Buckets(s.config.MaxModelLen),
148+
},
149+
[]string{vllmapi.PromLabelModelName},
150+
)
151+
if err := s.registry.Register(s.requestGenerationTokens); err != nil {
152+
s.logger.Error(err, "Prometheus request_generation_tokens histogram register failed")
153+
return err
154+
}
155+
156+
s.requestParamsMaxTokens = prometheus.NewHistogramVec(
157+
prometheus.HistogramOpts{
158+
Subsystem: "",
159+
Name: "vllm:request_params_max_tokens",
160+
Help: "Histogram of the max_tokens request parameter.",
161+
Buckets: build125Buckets(s.config.MaxModelLen),
162+
},
163+
[]string{vllmapi.PromLabelModelName},
164+
)
165+
if err := s.registry.Register(s.requestParamsMaxTokens); err != nil {
166+
s.logger.Error(err, "Prometheus request_params_max_tokens histogram register failed")
167+
return err
168+
}
169+
170+
s.requestSuccessTotal = prometheus.NewCounterVec(
171+
prometheus.CounterOpts{
172+
Subsystem: "",
173+
Name: "vllm:request_success_total",
174+
Help: "Count of successfully processed requests.",
175+
},
176+
[]string{vllmapi.PromLabelModelName, vllmapi.PromLabelFinishReason},
177+
)
178+
if err := s.registry.Register(s.requestSuccessTotal); err != nil {
179+
s.logger.Error(err, "Prometheus request_success_total counter register failed")
180+
return err
181+
}
182+
126183
s.setInitialPrometheusMetrics()
127184

128185
return nil
@@ -132,6 +189,7 @@ func (s *VllmSimulator) createAndRegisterPrometheus() error {
132189
// the fake metrics if set
133190
func (s *VllmSimulator) setInitialPrometheusMetrics() {
134191
var nRunningReqs, nWaitingReqs, kvCacheUsage float64
192+
modelName := s.getDisplayedModelName(s.config.Model)
135193
if s.config.FakeMetrics != nil {
136194
nRunningReqs = float64(s.config.FakeMetrics.RunningRequests)
137195
nWaitingReqs = float64(s.config.FakeMetrics.WaitingRequests)
@@ -144,9 +202,21 @@ func (s *VllmSimulator) setInitialPrometheusMetrics() {
144202
if s.config.FakeMetrics.TPOTBucketValues != nil {
145203
s.initFakeHistogram(s.tpot, common.TPOTBucketsBoundaries, s.config.FakeMetrics.TPOTBucketValues)
146204
}
205+
for _, requestPromptToken := range s.config.FakeMetrics.RequestPromptTokens {
206+
s.requestPromptTokens.WithLabelValues(modelName).Observe(requestPromptToken)
207+
}
208+
for _, requestGenerationToken := range s.config.FakeMetrics.RequestGenerationTokens {
209+
s.requestGenerationTokens.WithLabelValues(modelName).Observe(requestGenerationToken)
210+
}
211+
for _, requestParamsMaxToken := range s.config.FakeMetrics.RequestParamsMaxTokens {
212+
s.requestParamsMaxTokens.WithLabelValues(modelName).Observe(requestParamsMaxToken)
213+
}
214+
for reason, requestSuccessTotal := range s.config.FakeMetrics.RequestSuccessTotal {
215+
s.requestSuccessTotal.WithLabelValues(modelName, reason).Add(float64(requestSuccessTotal))
216+
}
217+
147218
}
148219

149-
modelName := s.getDisplayedModelName(s.config.Model)
150220
s.runningRequests.WithLabelValues(modelName).Set(nRunningReqs)
151221
s.waitingRequests.WithLabelValues(modelName).Set(nWaitingReqs)
152222
s.kvCacheUsagePercentage.WithLabelValues(modelName).Set(kvCacheUsage)
@@ -288,6 +358,7 @@ func (s *VllmSimulator) startMetricsUpdaters(ctx context.Context) {
288358
go s.kvCacheUsageUpdater(ctx)
289359
go s.ttftUpdater(ctx)
290360
go s.tpotUpdater(ctx)
361+
go s.recordRequestUpdater(ctx)
291362
}
292363

293364
// waitingRequestsUpdater updates the waiting requests metric by listening on the relevant channel
@@ -396,3 +467,75 @@ func (s *VllmSimulator) decrementLoraRefCount(lora string, theMap *sync.Map) {
396467
s.logger.Error(nil, "Zero model reference", "model", lora)
397468
}
398469
}
470+
471+
// recordRequestUpdater listens on requestSuccessChan and drives the Prometheus metric
472+
// for successfully completed requests.
473+
func (s *VllmSimulator) recordRequestUpdater(ctx context.Context) {
474+
for {
475+
select {
476+
case <-ctx.Done():
477+
return
478+
case event := <-s.requestSuccessChan:
479+
s.recordRequestMetricsOnSuccess(
480+
event.promptTokens,
481+
event.generationTokens,
482+
event.maxTokens,
483+
event.finishReason,
484+
)
485+
}
486+
}
487+
}
488+
489+
// requestSuccessEvent represents the data associated with a successfully completed request,
490+
// which is sent through the requestSuccessChan for asynchronous metrics recording.
491+
type requestSuccessEvent struct {
492+
// promptTokens is the number of input (prompt) tokens in the request
493+
promptTokens int
494+
// generationTokens is the number of generated (output) tokens in the response
495+
generationTokens int
496+
// maxTokens is the maximum number of tokens allowed for generation (if specified in the request)
497+
maxTokens *int64
498+
// finishReason indicates why the generation stopped (e.g., "stop", "length", "tool_calls")
499+
finishReason string
500+
}
501+
502+
// recordRequestMetricsOnSuccess records metrics for a successfully completed request
503+
func (s *VllmSimulator) recordRequestMetricsOnSuccess(promptTokens,
504+
generationTokens int, maxTokens *int64, finishReason string) {
505+
modelName := s.getDisplayedModelName(s.config.Model)
506+
s.requestPromptTokens.WithLabelValues(modelName).Observe(float64(promptTokens))
507+
s.requestGenerationTokens.WithLabelValues(modelName).Observe(float64(generationTokens))
508+
if maxTokens != nil {
509+
s.requestParamsMaxTokens.WithLabelValues(modelName).Observe(float64(*maxTokens))
510+
}
511+
s.requestSuccessTotal.WithLabelValues(modelName, finishReason).Inc()
512+
}
513+
514+
// build125Buckets generates histogram buckets in powers of 10 scaled by [1,2,5].
515+
// This matches vLLM's build_1_2_5_buckets() in metrics.py.
516+
//
517+
// Reference: https://github.com/vllm-project/vllm/blob/main/vllm/engine/metrics.py#L175
518+
func build125Buckets(maxValue int) []float64 {
519+
if maxValue <= 0 {
520+
return []float64{}
521+
}
522+
var buckets []float64
523+
exponent := 0
524+
mantissa := []int{1, 2, 5}
525+
526+
for {
527+
complete := true
528+
for _, m := range mantissa {
529+
value := m * int(math.Pow10(exponent))
530+
if value <= maxValue {
531+
buckets = append(buckets, float64(value))
532+
complete = false
533+
}
534+
}
535+
if complete {
536+
break
537+
}
538+
exponent++
539+
}
540+
return buckets
541+
}

0 commit comments

Comments
 (0)