Skip to content

Commit 9a6c8c4

Browse files
committed
fix: address security and code quality issues from review
- Fix potential API key leakage in error logging - Add temperature support check before setting temperature - Improve code consistency with RouterProvider patterns
1 parent 76ded7e commit 9a6c8c4

File tree

2 files changed

+22
-14
lines changed

2 files changed

+22
-14
lines changed

src/api/providers/chutes.ts

Lines changed: 21 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -28,21 +28,22 @@ export class ChutesHandler extends RouterProvider implements SingleCompletionHan
2828
systemPrompt: string,
2929
messages: Anthropic.Messages.MessageParam[],
3030
): OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming {
31-
const {
32-
id: model,
33-
info: { maxTokens: max_tokens },
34-
} = this.getModel()
31+
const { id: model, info } = this.getModel()
3532

36-
const temperature = this.options.modelTemperature ?? this.getModel().info.temperature
37-
38-
return {
33+
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
3934
model,
40-
max_tokens,
41-
temperature,
35+
max_tokens: info.maxTokens,
4236
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
4337
stream: true,
4438
stream_options: { include_usage: true },
4539
}
40+
41+
// Only add temperature if model supports it
42+
if (this.supportsTemperature(model)) {
43+
params.temperature = this.options.modelTemperature ?? info.temperature
44+
}
45+
46+
return params
4647
}
4748

4849
override async *createMessage(
@@ -112,15 +113,22 @@ export class ChutesHandler extends RouterProvider implements SingleCompletionHan
112113
}
113114

114115
async completePrompt(prompt: string): Promise<string> {
115-
const { id: modelId, info } = await this.fetchModel()
116+
const model = await this.fetchModel()
117+
const { id: modelId, info } = this.getModel()
116118

117119
try {
118-
const response = await this.client.chat.completions.create({
120+
const requestParams: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
119121
model: modelId,
120122
messages: [{ role: "user", content: prompt }],
121123
max_tokens: info.maxTokens,
122-
temperature: this.options.modelTemperature ?? 0,
123-
})
124+
}
125+
126+
// Only add temperature if model supports it
127+
if (this.supportsTemperature(modelId)) {
128+
requestParams.temperature = this.options.modelTemperature ?? info.temperature ?? 0
129+
}
130+
131+
const response = await this.client.chat.completions.create(requestParams)
124132
return response.choices[0]?.message.content || ""
125133
} catch (error) {
126134
if (error instanceof Error) {

src/api/providers/fetchers/chutes.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ export async function getChutesModels(apiKey?: string): Promise<Record<string, M
4646
models[m.id] = info
4747
}
4848
} catch (error) {
49-
console.error(`Error fetching Chutes models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`)
49+
console.error(`Error fetching Chutes models: ${error instanceof Error ? error.message : String(error)}`)
5050
}
5151

5252
return models

0 commit comments

Comments
 (0)