22 * @Author : ai-business-hql [email protected] 33 * @Date : 2025-06-24 16:29:05
44 * @LastEditors : ai-business-hql [email protected] 5- * @LastEditTime : 2025-09-29 17:43:30
5+ * @LastEditTime : 2025-10-15 14:49:15
66 * @FilePath : /comfyui_copilot/ui/src/apis/workflowChatApi.ts
77 * @Description : 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
88 */
@@ -38,13 +38,15 @@ const getOpenAiConfig = () => {
3838 const rsaPublicKey = localStorage . getItem ( 'rsaPublicKey' ) ;
3939 const workflowLLMApiKey = localStorage . getItem ( 'workflowLLMApiKey' ) ;
4040 const workflowLLMBaseUrl = localStorage . getItem ( 'workflowLLMBaseUrl' ) ;
41+ const workflowLLMModel = localStorage . getItem ( 'workflowLLMModel' ) ;
4142
4243 return {
4344 openaiApiKey : openaiApiKey || '' ,
4445 openaiBaseUrl : openaiBaseUrl || '' ,
4546 rsaPublicKey,
4647 workflowLLMApiKey : workflowLLMApiKey || '' ,
4748 workflowLLMBaseUrl : workflowLLMBaseUrl || '' ,
49+ workflowLLMModel : workflowLLMModel || '' ,
4850 } ;
4951} ;
5052
@@ -145,7 +147,7 @@ export namespace WorkflowChatAPI {
145147 try {
146148 const apiKey = getApiKey ( ) ;
147149 const browserLanguage = app . extensionManager . setting . get ( 'Comfy.Locale' ) ;
148- const { openaiApiKey, openaiBaseUrl, rsaPublicKey, workflowLLMApiKey, workflowLLMBaseUrl } = getOpenAiConfig ( ) ;
150+ const { openaiApiKey, openaiBaseUrl, rsaPublicKey, workflowLLMApiKey, workflowLLMBaseUrl, workflowLLMModel } = getOpenAiConfig ( ) ;
149151 // Generate a unique message ID for this chat request
150152 const messageId = generateUUID ( ) ;
151153
@@ -269,6 +271,9 @@ export namespace WorkflowChatAPI {
269271 if ( workflowLLMApiKey ) {
270272 headers [ 'Workflow-LLM-Api-Key' ] = workflowLLMApiKey ;
271273 }
274+ if ( workflowLLMModel ) {
275+ headers [ 'Workflow-LLM-Model' ] = workflowLLMModel ;
276+ }
272277
273278 // Create controller and combine with external signal if provided
274279 const controller = new AbortController ( ) ;
@@ -600,12 +605,68 @@ export namespace WorkflowChatAPI {
600605 return result as { models : { label : string ; name : string ; image_enable : boolean } [ ] } ;
601606 }
602607
608+ // Fetch models directly from an OpenAI-compatible LLM server via its /models endpoint
609+ export async function listModelsFromLLM (
610+ baseUrl : string ,
611+ apiKey ?: string
612+ ) : Promise < string [ ] > {
613+ const headers : Record < string , string > = {
614+ 'accept' : 'application/json' ,
615+ } ;
616+
617+ if ( apiKey && apiKey . trim ( ) !== '' ) {
618+ headers [ 'Authorization' ] = `Bearer ${ apiKey } ` ;
619+ }
620+
621+ // Normalize base URL to avoid double slashes
622+ const normalizedBase = baseUrl . replace ( / \/ $ / , '' ) ;
623+ const url = `${ normalizedBase } /models` ;
624+
625+ const response = await fetch ( url , {
626+ method : 'GET' ,
627+ headers,
628+ } ) ;
629+
630+ if ( ! response . ok ) {
631+ throw new Error ( `Failed to fetch models from LLM: ${ response . status } ${ response . statusText } ` ) ;
632+ }
633+
634+ const result = await response . json ( ) ;
635+
636+ // Attempt to support multiple possible shapes
637+ // OpenAI style: { data: [{ id: string }, ...] }
638+ if ( Array . isArray ( result ?. data ) ) {
639+ const ids = result . data
640+ . map ( ( m : any ) => ( typeof m === 'string' ? m : ( m ?. id || m ?. name ) ) )
641+ . filter ( ( v : any ) => typeof v === 'string' && v . trim ( ) !== '' ) ;
642+ return Array . from ( new Set ( ids ) ) ;
643+ }
644+
645+ // Alternate style: { models: [{ id/name }, ...] } or [ ... ]
646+ const modelsField = result ?. models ?? result ;
647+ if ( Array . isArray ( modelsField ) ) {
648+ const ids = modelsField
649+ . map ( ( m : any ) => ( typeof m === 'string' ? m : ( m ?. id || m ?. name ) ) )
650+ . filter ( ( v : any ) => typeof v === 'string' && v . trim ( ) !== '' ) ;
651+ return Array . from ( new Set ( ids ) ) ;
652+ }
653+
654+ // Single object with id/name
655+ const single = result ?. id || result ?. name ;
656+ if ( typeof single === 'string' && single . trim ( ) !== '' ) {
657+ return [ single ] ;
658+ }
659+
660+ // Fallback to empty list if shape is unrecognized
661+ return [ ] ;
662+ }
663+
603664 export async function * streamDebugAgent (
604665 workflowData : any ,
605666 abortSignal ?: AbortSignal
606667 ) : AsyncGenerator < ChatResponse > {
607668 try {
608- const { openaiApiKey, openaiBaseUrl, workflowLLMApiKey, workflowLLMBaseUrl } = getOpenAiConfig ( ) ;
669+ const { openaiApiKey, openaiBaseUrl, workflowLLMApiKey, workflowLLMBaseUrl, workflowLLMModel } = getOpenAiConfig ( ) ;
609670 const browserLanguage = app . extensionManager . setting . get ( 'Comfy.Locale' ) ;
610671 const session_id = localStorage . getItem ( "sessionId" ) || null ;
611672 const apiKey = getApiKey ( ) ;
@@ -631,7 +692,9 @@ export namespace WorkflowChatAPI {
631692 if ( workflowLLMApiKey ) {
632693 headers [ 'Workflow-LLM-Api-Key' ] = workflowLLMApiKey ;
633694 }
634-
695+ if ( workflowLLMModel ) {
696+ headers [ 'Workflow-LLM-Model' ] = workflowLLMModel ;
697+ }
635698 // Create controller and combine with external signal if provided
636699 const controller = new AbortController ( ) ;
637700 const timeoutId = setTimeout ( ( ) => controller . abort ( ) , 120000 ) ; // 2 minutes timeout
0 commit comments