1
1
// src/botservice.ts
2
2
import "isomorphic-fetch" ;
3
- import {
4
- ChatCompletionRequestMessageRoleEnum as ChatCompletionRequestMessageRoleEnum3
5
- } from "openai" ;
6
3
7
4
// src/logging.ts
8
5
import { Log } from "debug-level" ;
@@ -13,38 +10,31 @@ var openAILog = new Log("open-ai");
13
10
var matterMostLog = new Log ( "mattermost" ) ;
14
11
15
12
// src/openai-wrapper.ts
16
- import {
17
- ChatCompletionResponseMessageRoleEnum ,
18
- Configuration ,
19
- OpenAIApi
20
- } from "openai" ;
13
+ import OpenAI from "openai" ;
21
14
var apiKey = process . env [ "OPENAI_API_KEY" ] ;
22
- openAILog . trace ( { apiKey } ) ;
23
- var configuration = new Configuration ( { apiKey } ) ;
15
+ var config = { apiKey } ;
24
16
var azureOpenAiApiKey = process . env [ "AZURE_OPENAI_API_KEY" ] ;
25
17
if ( azureOpenAiApiKey ) {
26
- configuration . baseOptions = {
27
- headers : { "api-key" : azureOpenAiApiKey } ,
28
- params : {
29
- "api-version" : process . env [ "AZURE_OPENAI_API_VERSION" ] ?? "2023-07 -01-preview"
30
- }
18
+ config = {
19
+ apiKey : azureOpenAiApiKey ,
20
+ baseURL : `https:// ${ process . env [ "AZURE_OPENAI_API_INSTANCE_NAME" ] } .openai.azure.com/openai/deployments/ ${ process . env [ "AZURE_OPENAI_API_DEPLOYMENT_NAME" ] ?? "gpt-35-turbo" } ` ,
21
+ defaultQuery : { "api-version" : process . env [ "AZURE_OPENAI_API_VERSION" ] ?? "2023-08 -01-preview" } ,
22
+ defaultHeaders : { "api-key" : azureOpenAiApiKey }
31
23
} ;
32
- configuration . basePath = "https://" + process . env [ "AZURE_OPENAI_API_INSTANCE_NAME" ] + ".openai.azure.com/openai/deployments/" + process . env [ "AZURE_OPENAI_API_DEPLOYMENT_NAME" ] ;
33
24
}
34
- var openai = new OpenAIApi ( configuration ) ;
25
+ var openai = new OpenAI ( config ) ;
35
26
var openaiImage ;
36
27
if ( azureOpenAiApiKey ) {
37
- const configuration2 = new Configuration ( { apiKey } ) ;
38
28
if ( ! apiKey ) {
39
- configuration2 . baseOptions = {
40
- headers : { "api-key" : azureOpenAiApiKey } ,
41
- params : {
42
- "api-version" : process . env [ "AZURE_OPENAI_API_VERSION" ] ?? "2023-07-01-preview"
43
- }
44
- } ;
45
- configuration2 . basePath = "https://" + process . env [ "AZURE_OPENAI_API_INSTANCE_NAME" ] + ".openai.azure.com/openai" ;
29
+ openaiImage = new OpenAI ( {
30
+ apiKey : azureOpenAiApiKey ,
31
+ baseURL : `https://${ process . env [ "AZURE_OPENAI_API_INSTANCE_NAME" ] } .openai.azure.com/openai` ,
32
+ defaultQuery : { "api-version" : process . env [ "AZURE_OPENAI_API_VERSION" ] ?? "2023-08-01-preview" } ,
33
+ defaultHeaders : { "api-key" : azureOpenAiApiKey }
34
+ } ) ;
35
+ } else {
36
+ openaiImage = new OpenAI ( { apiKey } ) ;
46
37
}
47
- openaiImage = new OpenAIApi ( configuration2 ) ;
48
38
}
49
39
var model = process . env [ "OPENAI_MODEL_NAME" ] ?? "gpt-3.5-turbo" ;
50
40
var MAX_TOKENS = Number ( process . env [ "OPENAI_MAX_TOKENS" ] ?? 2e3 ) ;
@@ -93,7 +83,8 @@ async function continueThread(messages, msgData) {
93
83
openAILog . trace ( { pluginResponse } ) ;
94
84
if ( pluginResponse . intermediate ) {
95
85
messages . push ( {
96
- role : ChatCompletionResponseMessageRoleEnum . Function ,
86
+ role : "function" ,
87
+ //ChatCompletionResponseMessageRoleEnum.Function,
97
88
name : pluginName ,
98
89
content : pluginResponse . message
99
90
} ) ;
@@ -141,21 +132,25 @@ async function createChatCompletion(messages, functions2 = void 0) {
141
132
chatCompletionOptions . function_call = "auto" ;
142
133
}
143
134
openAILog . trace ( { chatCompletionOptions } ) ;
144
- const chatCompletion = await openai . createChatCompletion ( chatCompletionOptions ) ;
135
+ const chatCompletion = await openai . chat . completions . create ( chatCompletionOptions ) ;
145
136
openAILog . trace ( { chatCompletion } ) ;
146
- return { responseMessage : chatCompletion . data ?. choices ?. [ 0 ] ?. message , usage : chatCompletion . data ? .usage } ;
137
+ return { responseMessage : chatCompletion . choices ?. [ 0 ] ?. message , usage : chatCompletion . usage } ;
147
138
}
148
139
async function createImage ( prompt ) {
149
140
const createImageOptions = {
141
+ model : process . env [ "OPENAI_IMAGE_MODEL_NAME" ] ?? "dall-e-2" ,
150
142
prompt,
151
143
n : 1 ,
152
- size : "512x512" ,
144
+ size : "1024x1024" ,
145
+ //Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models.
146
+ quality : "standard" ,
147
+ //"hd", $0.080/枚=1枚12円で倍額
153
148
response_format : "b64_json"
154
149
} ;
155
150
openAILog . trace ( { createImageOptions } ) ;
156
- const image = await ( openaiImage ? openaiImage : openai ) . createImage ( createImageOptions ) ;
151
+ const image = await ( openaiImage ? openaiImage : openai ) . images . generate ( createImageOptions ) ;
157
152
openAILog . trace ( { image } ) ;
158
- return image . data ?. data [ 0 ] ?. b64_json ;
153
+ return image . data [ 0 ] ?. b64_json ;
159
154
}
160
155
161
156
// src/mm-client.ts
@@ -238,7 +233,6 @@ var ExitPlugin = class extends PluginBase {
238
233
import FormData3 from "form-data" ;
239
234
240
235
// src/plugins/GraphPlugin.ts
241
- import { ChatCompletionRequestMessageRoleEnum } from "openai" ;
242
236
import FormData from "form-data" ;
243
237
import fetch2 from "node-fetch" ;
244
238
var GraphPlugin = class extends PluginBase {
@@ -263,11 +257,13 @@ var GraphPlugin = class extends PluginBase {
263
257
} ;
264
258
const chatmessages = [
265
259
{
266
- role : ChatCompletionRequestMessageRoleEnum . System ,
260
+ role : "system" ,
261
+ //ChatCompletionRequestMessageRoleEnum.System,
267
262
content : this . VISUALIZE_DIAGRAM_INSTRUCTIONS
268
263
} ,
269
264
{
270
- role : ChatCompletionRequestMessageRoleEnum . User ,
265
+ role : "user" ,
266
+ //hatCompletionRequestMessageRoleEnum.User,
271
267
content : args . graphPrompt
272
268
}
273
269
] ;
@@ -336,7 +332,6 @@ ${graphContent}`);
336
332
} ;
337
333
338
334
// src/plugins/ImagePlugin.ts
339
- import { ChatCompletionRequestMessageRoleEnum as ChatCompletionRequestMessageRoleEnum2 } from "openai" ;
340
335
import FormData2 from "form-data" ;
341
336
var ImagePlugin = class extends PluginBase {
342
337
GPT_INSTRUCTIONS = "You are a prompt engineer who helps a user to create good prompts for the image AI DALL-E. The user will provide you with a short image description and you transform this into a proper prompt text. When creating the prompt first describe the looks and structure of the image. Secondly, describe the photography style, like camera angle, camera position, lenses. Third, describe the lighting and specific colors. Your prompt have to focus on the overall image and not describe any details on it. Consider adding buzzwords, for example 'detailed', 'hyper-detailed', 'very realistic', 'sketchy', 'street-art', 'drawing', or similar words. Keep the prompt as simple as possible and never get longer than 400 characters. You may only answer with the resulting prompt and provide no description or explanations." ;
@@ -380,11 +375,13 @@ ${args.imageDescription}`);
380
375
async createImagePrompt ( userInput ) {
381
376
const messages = [
382
377
{
383
- role : ChatCompletionRequestMessageRoleEnum2 . System ,
378
+ role : "system" ,
379
+ //ChatCompletionRequestMessageRoleEnum.System,
384
380
content : this . GPT_INSTRUCTIONS
385
381
} ,
386
382
{
387
- role : ChatCompletionRequestMessageRoleEnum2 . User ,
383
+ role : "user" ,
384
+ //ChatCompletionRequestMessageRoleEnum.User,
388
385
content : userInput
389
386
}
390
387
] ;
@@ -467,6 +464,9 @@ function tokenCount(content) {
467
464
if ( ! global . FormData ) {
468
465
global . FormData = FormData3 ;
469
466
}
467
+ if ( ! global . FormData ) {
468
+ global . FormData = FormData3 ;
469
+ }
470
470
var name = process . env [ "MATTERMOST_BOTNAME" ] || "@chatgpt" ;
471
471
var contextMsgCount = Number ( process . env [ "BOT_CONTEXT_MSG" ] ?? 100 ) ;
472
472
var SYSTEM_MESSAGE_HEADER = "// BOT System Message: " ;
@@ -490,21 +490,24 @@ async function onClientMessage(msg, meId) {
490
490
}
491
491
const chatmessages = [
492
492
{
493
- role : ChatCompletionRequestMessageRoleEnum3 . System ,
493
+ role : "system" ,
494
+ // ChatCompletionRequestMessageRoleEnum.System,
494
495
content : botInstructions
495
496
}
496
497
] ;
497
498
for ( const threadPost of posts . slice ( - contextMsgCount ) ) {
498
499
matterMostLog . trace ( { msg : threadPost } ) ;
499
500
if ( threadPost . user_id === meId ) {
500
501
chatmessages . push ( {
501
- role : ChatCompletionRequestMessageRoleEnum3 . Assistant ,
502
+ role : "assistant" ,
503
+ //ChatCompletionRequestMessageRoleEnum.Assistant,
502
504
content : threadPost . props . originalMessage ?? threadPost . message
503
505
} ) ;
504
506
} else {
505
507
chatmessages . push ( {
506
- role : ChatCompletionRequestMessageRoleEnum3 . User ,
507
- name : await userIdToName ( threadPost . user_id ) ,
508
+ role : "user" ,
509
+ //ChatCompletionRequestMessageRoleEnum.User,
510
+ //Not have openai V4 name: await userIdToName(threadPost.user_id),
508
511
content : threadPost . message
509
512
} ) ;
510
513
}
@@ -540,21 +543,33 @@ async function postMessage(msgData, messages) {
540
543
}
541
544
throw e ;
542
545
}
543
- const lines = messages [ 1 ] . content . split ( "\n" ) ;
544
- if ( lines . length < 1 ) {
546
+ let lines = typeof messages [ 1 ] . content === "string" ? messages [ 1 ] . content . split ( "\n" ) : void 0 ;
547
+ if ( ! lines ) {
548
+ if ( messages [ 1 ] . content ) {
549
+ lines = [ ] ;
550
+ for ( let i = 0 ; messages [ 1 ] . content . length > i ; i ++ ) {
551
+ if ( messages [ 1 ] . content [ i ] . type === "text" ) {
552
+ lines . push ( ...messages [ 1 ] . content [ i ] . text . split ( "\n" ) ) ;
553
+ }
554
+ }
555
+ }
556
+ }
557
+ if ( ! lines || lines . length < 1 ) {
545
558
botLog . error ( "No contents" , messages [ 1 ] . content ) ;
546
559
answer += "No contents." ;
547
560
newPost ( SYSTEM_MESSAGE_HEADER + answer , msgData . post , void 0 , void 0 ) ;
548
561
return ;
549
562
}
550
563
const linesCount = [ ] ;
551
564
lines . forEach ( ( line , i ) => {
552
- if ( line === "" ) {
553
- lines [ i ] = "\n" ;
554
- linesCount [ i ] = 1 ;
555
- } else {
556
- lines [ i ] += "\n" ;
557
- linesCount [ i ] = tokenCount ( lines [ i ] ) ;
565
+ if ( lines ) {
566
+ if ( line === "" ) {
567
+ lines [ i ] = "\n" ;
568
+ linesCount [ i ] = 1 ;
569
+ } else {
570
+ lines [ i ] += "\n" ;
571
+ linesCount [ i ] = tokenCount ( lines [ i ] ) ;
572
+ }
558
573
}
559
574
} ) ;
560
575
if ( messagesCount [ 0 ] + linesCount [ 0 ] >= LIMIT_TOKENS ) {
@@ -574,7 +589,8 @@ async function postMessage(msgData, messages) {
574
589
let systemMessage2 = SYSTEM_MESSAGE_HEADER ;
575
590
while ( currentMessages . length > 1 && ( sumCurrentMessagesCount + currentLinesCount + linesCount [ i ] >= LIMIT_TOKENS || sumCurrentMessagesCount + currentLinesCount > LIMIT_TOKENS / 2 ) ) {
576
591
botLog . info ( "Remove assistant message" , currentMessages [ 1 ] ) ;
577
- systemMessage2 += "Forget previous message.\n```\n" + currentMessages [ 1 ] . content . split ( "\n" ) . slice ( 0 , 3 ) . join ( "\n" ) + "...\n```\n" ;
592
+ systemMessage2 += "Forget previous message.\n```\n" + ( typeof messages [ 1 ] . content === "string" ? messages [ 1 ] . content . split ( "\n" ) . slice ( 0 , 3 ) . join ( "\n" ) : currentMessages [ 1 ] . content ) + // ChatCompletionContentPartの場合は考えられていない TODO: 本当はtextを選んで出すべき
593
+ "...\n```\n" ;
578
594
sumCurrentMessagesCount -= currentMessagesCount [ 1 ] ;
579
595
currentMessagesCount = [ currentMessagesCount [ 0 ] , ...currentMessagesCount . slice ( 2 ) ] ;
580
596
currentMessages = [ currentMessages [ 0 ] , ...currentMessages . slice ( 2 ) ] ;
@@ -653,7 +669,7 @@ function expireMessages(messages, sumMessagesCount, messagesCount, systemMessage
653
669
botLog . info ( "Remove message" , messages [ 1 ] ) ;
654
670
systemMessage += `Forget old message.
655
671
~~~
656
- ${ messages [ 1 ] . content . split ( "\n" ) . slice ( 0 , 3 ) . join ( "\n" ) }
672
+ ${ typeof messages [ 1 ] . content === "string" ? messages [ 1 ] . content . split ( "\n" ) . slice ( 0 , 3 ) . join ( "\n" ) : messages [ 1 ] . content }
657
673
...
658
674
~~~
659
675
` ;
@@ -667,7 +683,7 @@ function calcMessagesTokenCount(messages) {
667
683
let sumMessagesCount = 0 ;
668
684
const messagesCount = new Array ( messages . length ) ;
669
685
messages . forEach ( ( message , i ) => {
670
- messagesCount [ i ] = tokenCount ( message . content ) ;
686
+ messagesCount [ i ] = typeof message . content === "string" ? tokenCount ( message . content ) : 0 ;
671
687
sumMessagesCount += messagesCount [ i ] ;
672
688
} ) ;
673
689
return { sumMessagesCount, messagesCount } ;
@@ -733,26 +749,6 @@ async function getOlderPosts(refPost, options) {
733
749
}
734
750
return posts ;
735
751
}
736
- var usernameCache = { } ;
737
- async function userIdToName ( userId ) {
738
- let username ;
739
- if ( usernameCache [ userId ] && Date . now ( ) < usernameCache [ userId ] . expireTime ) {
740
- username = usernameCache [ userId ] . username ;
741
- } else {
742
- username = ( await mmClient . getUser ( userId ) ) . username ;
743
- if ( ! / ^ [ a - z A - Z 0 - 9 _ - ] { 1 , 64 } $ / . test ( username ) ) {
744
- username = username . replace ( / [ . @ ! ? ] / g, "_" ) . slice ( 0 , 64 ) ;
745
- }
746
- if ( ! / ^ [ a - z A - Z 0 - 9 _ - ] { 1 , 64 } $ / . test ( username ) ) {
747
- username = [ ...username . matchAll ( / [ a - z A - Z 0 - 9 _ - ] / g) ] . join ( "" ) . slice ( 0 , 64 ) ;
748
- }
749
- usernameCache [ userId ] = {
750
- username,
751
- expireTime : Date . now ( ) + 1e3 * 60 * 5
752
- } ;
753
- }
754
- return username ;
755
- }
756
752
async function main ( ) {
757
753
const meId = ( await mmClient . getMe ( ) ) . id ;
758
754
botLog . log ( "Connected to Mattermost." ) ;
0 commit comments