@@ -25,12 +25,103 @@ import { getMessageTextContent } from "@/app/utils";
2525import { RequestPayload } from "./openai" ;
2626import { fetch } from "@/app/utils/stream" ;
2727
28+ interface BasePayload {
29+ model : string ;
30+ }
31+
32+ interface ChatPayload extends BasePayload {
33+ messages : ChatOptions [ "messages" ] ;
34+ stream ?: boolean ;
35+ temperature ?: number ;
36+ presence_penalty ?: number ;
37+ frequency_penalty ?: number ;
38+ top_p ?: number ;
39+ }
40+
41+ interface ImageGenerationPayload extends BasePayload {
42+ prompt : string ;
43+ size ?: string ;
44+ user_id ?: string ;
45+ }
46+
47+ interface VideoGenerationPayload extends BasePayload {
48+ prompt : string ;
49+ duration ?: number ;
50+ resolution ?: string ;
51+ user_id ?: string ;
52+ }
53+
54+ type ModelType = "chat" | "image" | "video" ;
55+
2856export class ChatGLMApi implements LLMApi {
2957 private disableListModels = true ;
3058
59+ private getModelType ( model : string ) : ModelType {
60+ if ( model . startsWith ( "cogview-" ) ) return "image" ;
61+ if ( model . startsWith ( "cogvideo-" ) ) return "video" ;
62+ return "chat" ;
63+ }
64+
65+ private getModelPath ( type : ModelType ) : string {
66+ switch ( type ) {
67+ case "image" :
68+ return ChatGLM . ImagePath ;
69+ case "video" :
70+ return ChatGLM . VideoPath ;
71+ default :
72+ return ChatGLM . ChatPath ;
73+ }
74+ }
75+
76+ private createPayload (
77+ messages : ChatOptions [ "messages" ] ,
78+ modelConfig : any ,
79+ options : ChatOptions ,
80+ ) : BasePayload {
81+ const modelType = this . getModelType ( modelConfig . model ) ;
82+ const lastMessage = messages [ messages . length - 1 ] ;
83+ const prompt =
84+ typeof lastMessage . content === "string"
85+ ? lastMessage . content
86+ : lastMessage . content . map ( ( c ) => c . text ) . join ( "\n" ) ;
87+
88+ switch ( modelType ) {
89+ case "image" :
90+ return {
91+ model : modelConfig . model ,
92+ prompt,
93+ size : options . config . size ,
94+ } as ImageGenerationPayload ;
95+ default :
96+ return {
97+ messages,
98+ stream : options . config . stream ,
99+ model : modelConfig . model ,
100+ temperature : modelConfig . temperature ,
101+ presence_penalty : modelConfig . presence_penalty ,
102+ frequency_penalty : modelConfig . frequency_penalty ,
103+ top_p : modelConfig . top_p ,
104+ } as ChatPayload ;
105+ }
106+ }
107+
108+ private parseResponse ( modelType : ModelType , json : any ) : string {
109+ switch ( modelType ) {
110+ case "image" : {
111+ const imageUrl = json . data ?. [ 0 ] ?. url ;
112+ return imageUrl ? `` : "" ;
113+ }
114+ case "video" : {
115+ const videoUrl = json . data ?. [ 0 ] ?. url ;
116+ return videoUrl ? `<video controls src="${ videoUrl } "></video>` : "" ;
117+ }
118+ default :
119+ return this . extractMessage ( json ) ;
120+ }
121+ }
122+
31123 path ( path : string ) : string {
32124 const accessStore = useAccessStore . getState ( ) ;
33-
34125 let baseUrl = "" ;
35126
36127 if ( accessStore . useCustomConfig ) {
@@ -51,7 +142,6 @@ export class ChatGLMApi implements LLMApi {
51142 }
52143
53144 console . log ( "[Proxy Endpoint] " , baseUrl , path ) ;
54-
55145 return [ baseUrl , path ] . join ( "/" ) ;
56146 }
57147
@@ -79,53 +169,55 @@ export class ChatGLMApi implements LLMApi {
79169 } ,
80170 } ;
81171
82- const requestPayload : RequestPayload = {
83- messages,
84- stream : options . config . stream ,
85- model : modelConfig . model ,
86- temperature : modelConfig . temperature ,
87- presence_penalty : modelConfig . presence_penalty ,
88- frequency_penalty : modelConfig . frequency_penalty ,
89- top_p : modelConfig . top_p ,
90- } ;
172+ const modelType = this . getModelType ( modelConfig . model ) ;
173+ const requestPayload = this . createPayload ( messages , modelConfig , options ) ;
174+ const path = this . path ( this . getModelPath ( modelType ) ) ;
91175
92- console . log ( " [Request] glm payload: " , requestPayload ) ;
176+ console . log ( ` [Request] glm ${ modelType } payload: ` , requestPayload ) ;
93177
94- const shouldStream = ! ! options . config . stream ;
95178 const controller = new AbortController ( ) ;
96179 options . onController ?.( controller ) ;
97180
98181 try {
99- const chatPath = this . path ( ChatGLM . ChatPath ) ;
100182 const chatPayload = {
101183 method : "POST" ,
102184 body : JSON . stringify ( requestPayload ) ,
103185 signal : controller . signal ,
104186 headers : getHeaders ( ) ,
105187 } ;
106188
107- // make a fetch request
108189 const requestTimeoutId = setTimeout (
109190 ( ) => controller . abort ( ) ,
110191 REQUEST_TIMEOUT_MS ,
111192 ) ;
112193
194+ if ( modelType === "image" || modelType === "video" ) {
195+ const res = await fetch ( path , chatPayload ) ;
196+ clearTimeout ( requestTimeoutId ) ;
197+
198+ const resJson = await res . json ( ) ;
199+ console . log ( `[Response] glm ${ modelType } :` , resJson ) ;
200+ const message = this . parseResponse ( modelType , resJson ) ;
201+ options . onFinish ( message , res ) ;
202+ return ;
203+ }
204+
205+ const shouldStream = ! ! options . config . stream ;
113206 if ( shouldStream ) {
114207 const [ tools , funcs ] = usePluginStore
115208 . getState ( )
116209 . getAsTools (
117210 useChatStore . getState ( ) . currentSession ( ) . mask ?. plugin || [ ] ,
118211 ) ;
119212 return stream (
120- chatPath ,
213+ path ,
121214 requestPayload ,
122215 getHeaders ( ) ,
123216 tools as any ,
124217 funcs ,
125218 controller ,
126219 // parseSSE
127220 ( text : string , runTools : ChatMessageTool [ ] ) => {
128- // console.log("parseSSE", text, runTools);
129221 const json = JSON . parse ( text ) ;
130222 const choices = json . choices as Array < {
131223 delta : {
@@ -154,7 +246,7 @@ export class ChatGLMApi implements LLMApi {
154246 }
155247 return choices [ 0 ] ?. delta ?. content ;
156248 } ,
157- // processToolMessage, include tool_calls message and tool call results
249+ // processToolMessage
158250 (
159251 requestPayload : RequestPayload ,
160252 toolCallMessage : any ,
@@ -172,7 +264,7 @@ export class ChatGLMApi implements LLMApi {
172264 options ,
173265 ) ;
174266 } else {
175- const res = await fetch ( chatPath , chatPayload ) ;
267+ const res = await fetch ( path , chatPayload ) ;
176268 clearTimeout ( requestTimeoutId ) ;
177269
178270 const resJson = await res . json ( ) ;
@@ -184,6 +276,7 @@ export class ChatGLMApi implements LLMApi {
184276 options . onError ?.( e as Error ) ;
185277 }
186278 }
279+
187280 async usage ( ) {
188281 return {
189282 used : 0 ,
0 commit comments