1
1
import express from 'express'
2
2
import multer from 'multer'
3
3
4
- import { CourseChatRequest , AzureOptions , RequestWithUser } from '../types'
4
+ import {
5
+ CourseChatRequest ,
6
+ AzureOptions ,
7
+ RequestWithUser ,
8
+ AzureOptionsV2 ,
9
+ } from '../types'
5
10
import { isError } from '../util/parser'
6
- import { calculateUsage , incrementUsage , checkUsage , checkCourseUsage , incrementCourseUsage } from '../services/chatInstances/usage'
7
- import { getCompletionEvents , streamCompletion } from '../util/azure'
8
- import { getMessageContext , getModelContextLimit , getCourseModel , getAllowedModels } from '../util/util'
11
+ import {
12
+ calculateUsage ,
13
+ incrementUsage ,
14
+ checkUsage ,
15
+ checkCourseUsage ,
16
+ incrementCourseUsage ,
17
+ } from '../services/chatInstances/usage'
18
+ // import { getCompletionEvents, streamCompletion } from '../util/azure'
19
+ import { getCompletionEventsV2 , streamCompletionV2 } from '../util/azureV2'
20
+ import {
21
+ getMessageContext ,
22
+ getModelContextLimit ,
23
+ getCourseModel ,
24
+ getAllowedModels ,
25
+ } from '../util/util'
9
26
import getEncoding from '../util/tiktoken'
10
27
import logger from '../util/logger'
11
28
import { inProduction , DEFAULT_TOKEN_LIMIT , FREE_MODEL } from '../../config'
@@ -20,7 +37,14 @@ const upload = multer({ storage })
20
37
const fileParsing = async ( options : any , req : any ) => {
21
38
let fileContent = ''
22
39
23
- const textFileTypes = [ 'text/plain' , 'text/html' , 'text/css' , 'text/csv' , 'text/markdown' , 'text/md' ]
40
+ const textFileTypes = [
41
+ 'text/plain' ,
42
+ 'text/html' ,
43
+ 'text/css' ,
44
+ 'text/csv' ,
45
+ 'text/markdown' ,
46
+ 'text/md' ,
47
+ ]
24
48
if ( textFileTypes . includes ( req . file . mimetype ) ) {
25
49
const fileBuffer = req . file . buffer
26
50
fileContent = fileBuffer . toString ( 'utf8' )
@@ -56,7 +80,9 @@ openaiRouter.post('/stream', upload.single('file'), async (r, res) => {
56
80
return
57
81
}
58
82
59
- const usageAllowed = courseId ? await checkCourseUsage ( user , courseId ) : model === FREE_MODEL || ( await checkUsage ( user , model ) )
83
+ const usageAllowed = courseId
84
+ ? await checkCourseUsage ( user , courseId )
85
+ : model === FREE_MODEL || ( await checkUsage ( user , model ) )
60
86
61
87
if ( ! usageAllowed ) {
62
88
res . status ( 403 ) . send ( 'Usage limit reached' )
@@ -75,14 +101,22 @@ openaiRouter.post('/stream', upload.single('file'), async (r, res) => {
75
101
return
76
102
}
77
103
78
- options . messages = getMessageContext ( optionsMessagesWithFile || options . messages )
104
+ options . messages = getMessageContext (
105
+ optionsMessagesWithFile || options . messages
106
+ )
79
107
options . stream = true
80
108
81
109
const encoding = getEncoding ( model )
82
110
let tokenCount = calculateUsage ( options , encoding )
83
- const tokenUsagePercentage = Math . round ( ( tokenCount / DEFAULT_TOKEN_LIMIT ) * 100 )
84
-
85
- if ( model !== FREE_MODEL && tokenCount > 0.1 * DEFAULT_TOKEN_LIMIT && ! userConsent ) {
111
+ const tokenUsagePercentage = Math . round (
112
+ ( tokenCount / DEFAULT_TOKEN_LIMIT ) * 100
113
+ )
114
+
115
+ if (
116
+ model !== FREE_MODEL &&
117
+ tokenCount > 0.1 * DEFAULT_TOKEN_LIMIT &&
118
+ ! userConsent
119
+ ) {
86
120
res . status ( 201 ) . json ( {
87
121
tokenConsumtionWarning : true ,
88
122
message : `You are about to use ${ tokenUsagePercentage } % of your monthly CurreChat usage` ,
@@ -98,7 +132,7 @@ openaiRouter.post('/stream', upload.single('file'), async (r, res) => {
98
132
return
99
133
}
100
134
101
- const events = await getCompletionEvents ( options as AzureOptions )
135
+ const events = await getCompletionEventsV2 ( options )
102
136
103
137
if ( isError ( events ) ) {
104
138
res . status ( 424 )
@@ -107,7 +141,7 @@ openaiRouter.post('/stream', upload.single('file'), async (r, res) => {
107
141
108
142
res . setHeader ( 'content-type' , 'text/event-stream' )
109
143
110
- const completion = await streamCompletion ( events , options as AzureOptions , encoding , res )
144
+ const completion = await streamCompletionV2 ( events , encoding , res )
111
145
112
146
tokenCount += completion . tokenCount
113
147
@@ -135,7 +169,8 @@ openaiRouter.post('/stream', upload.single('file'), async (r, res) => {
135
169
where : { courseId } ,
136
170
} ) )
137
171
138
- const consentToSave = courseId && course . saveDiscussions && options . saveConsent
172
+ const consentToSave =
173
+ courseId && course . saveDiscussions && options . saveConsent
139
174
140
175
console . log ( 'consentToSave' , options . saveConsent , user . username )
141
176
@@ -155,79 +190,83 @@ openaiRouter.post('/stream', upload.single('file'), async (r, res) => {
155
190
return
156
191
} )
157
192
158
- openaiRouter . post ( '/stream/:courseId' , upload . single ( 'file' ) , async ( r , res ) => {
159
- const { courseId } = r . params
160
- const req = r as CourseChatRequest
161
- const { options } = JSON . parse ( r . body . data )
162
- const { user } = req
163
-
164
- if ( ! user . id ) {
165
- res . status ( 401 ) . send ( 'Unauthorized' )
166
- return
167
- }
193
+ openaiRouter . post (
194
+ '/stream/:courseId' ,
195
+ upload . single ( 'file' ) ,
196
+ async ( r , res ) => {
197
+ const { courseId } = r . params
198
+ const req = r as CourseChatRequest
199
+ const { options } = JSON . parse ( r . body . data )
200
+ const { user } = req
201
+
202
+ if ( ! user . id ) {
203
+ res . status ( 401 ) . send ( 'Unauthorized' )
204
+ return
205
+ }
168
206
169
- const usageAllowed = await checkCourseUsage ( user , courseId )
170
- if ( ! usageAllowed ) {
171
- res . status ( 403 ) . send ( 'Usage limit reached' )
172
- return
173
- }
207
+ const usageAllowed = await checkCourseUsage ( user , courseId )
208
+ if ( ! usageAllowed ) {
209
+ res . status ( 403 ) . send ( 'Usage limit reached' )
210
+ return
211
+ }
174
212
175
- options . messages = getMessageContext ( options . messages )
176
- options . stream = true
213
+ options . messages = getMessageContext ( options . messages )
214
+ options . stream = true
177
215
178
- const model = await getCourseModel ( courseId )
216
+ const model = await getCourseModel ( courseId )
179
217
180
- if ( options . model ) {
181
- const allowedModels = getAllowedModels ( model )
182
- if ( ! allowedModels . includes ( options . model ) ) {
183
- res . status ( 403 ) . send ( 'Model not allowed' )
184
- return
218
+ if ( options . model ) {
219
+ const allowedModels = getAllowedModels ( model )
220
+ if ( ! allowedModels . includes ( options . model ) ) {
221
+ res . status ( 403 ) . send ( 'Model not allowed' )
222
+ return
223
+ }
224
+ } else {
225
+ options . model = model
185
226
}
186
- } else {
187
- options . model = model
188
- }
189
227
190
- const encoding = getEncoding ( options . model )
191
- let tokenCount = calculateUsage ( options , encoding )
228
+ const encoding = getEncoding ( options . model )
229
+ let tokenCount = calculateUsage ( options , encoding )
192
230
193
- const contextLimit = getModelContextLimit ( options . model )
231
+ const contextLimit = getModelContextLimit ( options . model )
194
232
195
- if ( tokenCount > contextLimit ) {
196
- logger . info ( 'Maximum context reached' )
197
- res . status ( 403 ) . send ( 'Model maximum context reached' )
198
- return
199
- }
233
+ if ( tokenCount > contextLimit ) {
234
+ logger . info ( 'Maximum context reached' )
235
+ res . status ( 403 ) . send ( 'Model maximum context reached' )
236
+ return
237
+ }
200
238
201
- const events = await getCompletionEvents ( options as AzureOptions )
239
+ const events = await getCompletionEventsV2 ( options )
202
240
203
- if ( isError ( events ) ) {
204
- res . status ( 424 ) . send ( events )
205
- return
206
- }
241
+ if ( isError ( events ) ) {
242
+ res . status ( 424 ) . send ( events )
243
+ return
244
+ }
207
245
208
- res . setHeader ( 'content-type' , 'text/event-stream' )
246
+ res . setHeader ( 'content-type' , 'text/event-stream' )
209
247
210
- const completion = await streamCompletion ( events , options as AzureOptions , encoding , res )
248
+ const completion = await streamCompletionV2 ( events , encoding , res )
211
249
212
- tokenCount += completion . tokenCount
250
+ tokenCount += completion . tokenCount
213
251
214
- let userToCharge = user
215
- if ( inProduction && req . hijackedBy ) {
216
- userToCharge = req . hijackedBy
217
- }
252
+ let userToCharge = user
253
+ if ( inProduction && req . hijackedBy ) {
254
+ userToCharge = req . hijackedBy
255
+ }
218
256
219
- await incrementCourseUsage ( userToCharge , courseId , tokenCount )
220
- logger . info ( `Stream ended. Total tokens: ${ tokenCount } ` , {
221
- tokenCount,
222
- courseId,
223
- model : options . model ,
224
- user : user . username ,
225
- } )
257
+ await incrementCourseUsage ( userToCharge , courseId , tokenCount )
258
+ logger . info ( `Stream ended. Total tokens: ${ tokenCount } ` , {
259
+ tokenCount,
260
+ courseId,
261
+ model : options . model ,
262
+ user : user . username ,
263
+ } )
226
264
227
- encoding . free ( )
265
+ encoding . free ( )
228
266
229
- res . end ( )
230
- return
231
- } )
267
+ res . end ( )
268
+ return
269
+ }
270
+ )
232
271
233
272
export default openaiRouter
0 commit comments