1
- import { Models , getUserConfig } from '../../config/index.mjs'
2
- import { pushRecord , setAbortController } from './shared.mjs'
3
- import { isEmpty } from 'lodash-es'
1
+ import { getUserConfig } from '../../config/index.mjs'
4
2
import { getToken } from '../../utils/jwt-token-generator.mjs'
5
- import { createParser } from '../../utils/eventsource-parser.mjs'
6
-
7
- async function fetchSSE ( resource , options ) {
8
- const { onMessage, onStart, onEnd, onError, ...fetchOptions } = options
9
- const resp = await fetch ( resource , fetchOptions ) . catch ( async ( err ) => {
10
- await onError ( err )
11
- } )
12
- if ( ! resp ) return
13
- if ( ! resp . ok ) {
14
- await onError ( resp )
15
- return
16
- }
17
-
18
- const parser = createParser ( ( event ) => {
19
- if ( event . type === 'event' ) {
20
- onMessage ( event )
21
- }
22
- } )
23
-
24
- let hasStarted = false
25
- const reader = resp . body . getReader ( )
26
- let result
27
- while ( ! ( result = await reader . read ( ) ) . done ) {
28
- const chunk = result . value
29
- if ( ! hasStarted ) {
30
- hasStarted = true
31
- await onStart ( new TextDecoder ( ) . decode ( chunk ) )
32
- }
33
- parser . feed ( chunk )
34
- }
35
- await onEnd ( )
36
- }
3
+ import { generateAnswersWithChatgptApiCompat } from './openai-api.mjs'
37
4
38
5
/**
39
6
* @param {Runtime.Port } port
@@ -42,74 +9,14 @@ async function fetchSSE(resource, options) {
42
9
* @param {string } modelName
43
10
*/
44
11
export async function generateAnswersWithChatGLMApi ( port , question , session , modelName ) {
45
- const { controller , messageListener , disconnectListener } = setAbortController ( port )
12
+ const baseUrl = 'https://open.bigmodel.cn/api/paas/v4'
46
13
const config = await getUserConfig ( )
47
-
48
- const prompt = [ ]
49
- for ( const record of session . conversationRecords . slice ( - config . maxConversationContextLength ) ) {
50
- prompt . push ( { role : 'user' , content : record . question } )
51
- prompt . push ( { role : 'assistant' , content : record . answer } )
52
- }
53
- prompt . push ( { role : 'user' , content : question } )
54
-
55
- let answer = ''
56
- await fetchSSE (
57
- `https://open.bigmodel.cn/api/paas/v3/model-api/${ Models [ modelName ] . value } /sse-invoke` ,
58
- {
59
- method : 'POST' ,
60
- signal : controller . signal ,
61
- headers : {
62
- 'Content-Type' : 'application/json; charset=UTF-8' ,
63
- Accept : 'text/event-stream' ,
64
- Authorization : getToken ( config . chatglmApiKey ) ,
65
- } ,
66
- body : JSON . stringify ( {
67
- prompt : prompt ,
68
- // temperature: config.temperature,
69
- // top_t: 0.7,
70
- // request_id: string
71
- // incremental: true,
72
- // return_type: "json_string",
73
- // ref: {"enable": "true", "search_query": "history"},
74
- } ) ,
75
- onMessage ( event ) {
76
- console . debug ( 'sse event' , event )
77
-
78
- // Handle different types of events
79
- switch ( event . event ) {
80
- case 'add' :
81
- // In the case of an "add" event, append the completion to the answer
82
- if ( event . data ) {
83
- answer += event . data
84
- port . postMessage ( { answer : answer , done : false , session : null } )
85
- }
86
- break
87
- case 'error' :
88
- case 'interrupted' :
89
- case 'finish' :
90
- pushRecord ( session , question , answer )
91
- console . debug ( 'conversation history' , { content : session . conversationRecords } )
92
- port . postMessage ( { answer : null , done : true , session : session } )
93
- break
94
- default :
95
- break
96
- }
97
- } ,
98
- async onStart ( ) { } ,
99
- async onEnd ( ) {
100
- port . postMessage ( { done : true } )
101
- port . onMessage . removeListener ( messageListener )
102
- port . onDisconnect . removeListener ( disconnectListener )
103
- } ,
104
- async onError ( resp ) {
105
- port . onMessage . removeListener ( messageListener )
106
- port . onDisconnect . removeListener ( disconnectListener )
107
- if ( resp instanceof Error ) throw resp
108
- const error = await resp . json ( ) . catch ( ( ) => ( { } ) )
109
- throw new Error (
110
- ! isEmpty ( error ) ? JSON . stringify ( error ) : `${ resp . status } ${ resp . statusText } ` ,
111
- )
112
- } ,
113
- } ,
14
+ return generateAnswersWithChatgptApiCompat (
15
+ baseUrl ,
16
+ port ,
17
+ question ,
18
+ session ,
19
+ getToken ( config . chatglmApiKey ) ,
20
+ modelName ,
114
21
)
115
22
}
0 commit comments