@@ -9,6 +9,7 @@ import { DEFAULT_HEADERS } from "./constants"
99import { BaseProvider } from "./base-provider"
1010import { getHuggingFaceModels , getCachedHuggingFaceModels } from "./fetchers/huggingface"
1111import { handleOpenAIError } from "./utils/openai-error-handler"
12+ import { huggingFaceDefaultModelId } from "@roo-code/types"
1213
1314export class HuggingFaceHandler extends BaseProvider implements SingleCompletionHandler {
1415 private client : OpenAI
@@ -50,7 +51,7 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion
5051 messages : Anthropic . Messages . MessageParam [ ] ,
5152 metadata ?: ApiHandlerCreateMessageMetadata ,
5253 ) : ApiStream {
53- const modelId = this . options . huggingFaceModelId || "meta-llama/Llama-3.3-70B-Instruct"
54+ const modelId = this . options . huggingFaceModelId || huggingFaceDefaultModelId
5455 const temperature = this . options . modelTemperature ?? 0.7
5556
5657 const params : OpenAI . Chat . Completions . ChatCompletionCreateParamsStreaming = {
@@ -94,7 +95,7 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion
9495 }
9596
9697 async completePrompt ( prompt : string ) : Promise < string > {
97- const modelId = this . options . huggingFaceModelId || "meta-llama/Llama-3.3-70B-Instruct"
98+ const modelId = this . options . huggingFaceModelId || huggingFaceDefaultModelId
9899
99100 try {
100101 const response = await this . client . chat . completions . create ( {
@@ -109,7 +110,7 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion
109110 }
110111
111112 override getModel ( ) {
112- const modelId = this . options . huggingFaceModelId || "meta-llama/Llama-3.3-70B-Instruct"
113+ const modelId = this . options . huggingFaceModelId || huggingFaceDefaultModelId
113114
114115 // Try to get model info from cache
115116 const modelInfo = this . modelCache ?. [ modelId ]
0 commit comments