@@ -15,15 +15,37 @@ function getDeepSeekApiKey(): string | null {
1515 return apiKey ;
1616}
1717
18+ /**
19+ * 获取 DeepSeek 模型配置
20+ * @returns { modelName: string, apiBaseURL: string }
21+ */
22+ function getDeepSeekModelConfig ( ) : { modelName : string , apiBaseURL : string } {
23+ const config = vscode . workspace . getConfiguration ( 'codeReDesign' ) ;
24+ const modelConfig = config . get < string > ( 'modelConfig' ) || 'deepseek-chat' ;
25+
26+ if ( modelConfig === 'custom' ) {
27+ const customModelName = config . get < string > ( 'customModelName' ) || '' ;
28+ const customApiBaseURL = config . get < string > ( 'customApiBaseURL' ) || '' ;
29+ return {
30+ modelName : customModelName ,
31+ apiBaseURL : customApiBaseURL
32+ } ;
33+ }
1834
19- const model_name = "deepseek-chat" ;
20- const apiBaseURL = "https://api.deepseek.com" ;
21-
22- // const model_name = "deepseek-reasoner";
23- // const apiBaseURL = "https://api.deepseek.com";
35+ // 默认配置
36+ const defaultConfigs : { [ key : string ] : { modelName : string , apiBaseURL : string } } = {
37+ 'deepseek-chat' : {
38+ modelName : 'deepseek-chat' ,
39+ apiBaseURL : 'https://api.deepseek.com'
40+ } ,
41+ 'deepseek-reasoner' : {
42+ modelName : 'deepseek-reasoner' ,
43+ apiBaseURL : 'https://api.deepseek.com'
44+ }
45+ } ;
2446
25- // const model_name = " deepseek-coder-v2" ;
26- // const apiBaseURL = "http://10.11.39.58:31084";
47+ return defaultConfigs [ modelConfig ] || defaultConfigs [ ' deepseek-chat' ] ;
48+ }
2749
2850/**
2951 * 调用 DeepSeek API
@@ -49,6 +71,13 @@ async function callDeepSeekApi(
4971 return null ;
5072 }
5173
74+ const { modelName, apiBaseURL } = getDeepSeekModelConfig ( ) ;
75+
76+ if ( ! modelName || ! apiBaseURL ) {
77+ vscode . window . showErrorMessage ( 'DeepSeek Model Name or API Base URL is not configured.' ) ;
78+ return null ;
79+ }
80+
5281 try {
5382 const openai = new OpenAI ( {
5483 apiKey : apiKey ,
@@ -73,7 +102,7 @@ async function callDeepSeekApi(
73102 while ( attempts < maxAttempts ) {
74103 attempts ++ ;
75104 const response = await openai . chat . completions . create ( {
76- model : model_name ,
105+ model : modelName ,
77106 messages : messages_body ,
78107 stream : streamMode ,
79108 max_tokens : 8192 ,
0 commit comments