Skip to content

Commit b0b6ccc

Browse files
committed
fix(vllm): sanitize model reference in warning log for unsupported safetensors format
1 parent 20272f2 commit b0b6ccc

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

pkg/inference/scheduling/scheduler.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ import (
1818
"github.com/docker/model-runner/pkg/inference/backends/vllm"
1919
"github.com/docker/model-runner/pkg/inference/memory"
2020
"github.com/docker/model-runner/pkg/inference/models"
21+
"github.com/docker/model-runner/pkg/internal/utils"
2122
"github.com/docker/model-runner/pkg/logging"
2223
"github.com/docker/model-runner/pkg/metrics"
2324
"github.com/docker/model-runner/pkg/middleware"
@@ -226,7 +227,7 @@ func (s *Scheduler) handleOpenAIInference(w http.ResponseWriter, r *http.Request
226227
} else {
227228
s.log.Warnf("Model %s is in safetensors format but vLLM backend is not available. "+
228229
"Backend %s may not support this format and could fail at runtime.",
229-
request.Model, backend.Name())
230+
utils.SanitizeForLog(request.Model), backend.Name())
230231
}
231232
}
232233
}

0 commit comments

Comments
 (0)