Skip to content

Commit 6c5599b

Browse files
committed
🔧 refactoring: Updated openai-node to v4 and updated environment variable samples
Updated the OpenAI API from v3 to v4. This allows us to take advantage of the new API features and improves the readability and maintainability of the code. Also, added new environment variable samples to the .env-sample file. This makes it easier for users to configure the use of the new API. 🔄 update(botservice.ts): Updated to OpenAI API v4 The OpenAI API has been updated to v4, and changes have been made to accommodate this. This allows the use of the new API features, improving the performance and functionality of the application. Also, unnecessary code has been removed to improve code readability and maintainability. 🔧 refactor(ImagePlugin.ts, types.d.ts): use string literals instead of directly using OpenAI types Using OpenAI types directly can lead to unexpected type changes due to library updates. Therefore, we used string literals to ensure type stability. Also, in types.d.ts, we imported the entire OpenAI instead of directly importing OpenAI types. This allows us to reference the necessary types directly, improving the readability of the code.
1 parent faf00a1 commit 6c5599b

File tree

10 files changed

+1031
-906
lines changed

10 files changed

+1031
-906
lines changed

.env-sample

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
1-
MATTERMOST_URL=https://mattermost.server
1+
MATTERMOST_URL=https://mattermost.server.example
22
MATTERMOST_TOKEN=abababacdcdcd
33
OPENAI_API_KEY=sk-234234234234234234
4+
# OPENAI_MODEL_NAME=gpt-3.5-turbo
5+
# OPENAI_IMAGE_MODEL_NAME=dall-e-2
6+
47
#AZURE_OPENAI_API_KEY=0123456789abcdefghijklmno
58
#AZURE_OPENAI_API_INSTANCE_NAME=example-name
69
#AZURE_OPENAI_API_DEPLOYMENT_NAME=gpt-35-turbo

dist/botservice.mjs

Lines changed: 69 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
11
// src/botservice.ts
22
import "isomorphic-fetch";
3-
import {
4-
ChatCompletionRequestMessageRoleEnum as ChatCompletionRequestMessageRoleEnum3
5-
} from "openai";
63

74
// src/logging.ts
85
import { Log } from "debug-level";
@@ -13,38 +10,31 @@ var openAILog = new Log("open-ai");
1310
var matterMostLog = new Log("mattermost");
1411

1512
// src/openai-wrapper.ts
16-
import {
17-
ChatCompletionResponseMessageRoleEnum,
18-
Configuration,
19-
OpenAIApi
20-
} from "openai";
13+
import OpenAI from "openai";
2114
var apiKey = process.env["OPENAI_API_KEY"];
22-
openAILog.trace({ apiKey });
23-
var configuration = new Configuration({ apiKey });
15+
var config = { apiKey };
2416
var azureOpenAiApiKey = process.env["AZURE_OPENAI_API_KEY"];
2517
if (azureOpenAiApiKey) {
26-
configuration.baseOptions = {
27-
headers: { "api-key": azureOpenAiApiKey },
28-
params: {
29-
"api-version": process.env["AZURE_OPENAI_API_VERSION"] ?? "2023-07-01-preview"
30-
}
18+
config = {
19+
apiKey: azureOpenAiApiKey,
20+
baseURL: `https://${process.env["AZURE_OPENAI_API_INSTANCE_NAME"]}.openai.azure.com/openai/deployments/${process.env["AZURE_OPENAI_API_DEPLOYMENT_NAME"] ?? "gpt-35-turbo"}`,
21+
defaultQuery: { "api-version": process.env["AZURE_OPENAI_API_VERSION"] ?? "2023-08-01-preview" },
22+
defaultHeaders: { "api-key": azureOpenAiApiKey }
3123
};
32-
configuration.basePath = "https://" + process.env["AZURE_OPENAI_API_INSTANCE_NAME"] + ".openai.azure.com/openai/deployments/" + process.env["AZURE_OPENAI_API_DEPLOYMENT_NAME"];
3324
}
34-
var openai = new OpenAIApi(configuration);
25+
var openai = new OpenAI(config);
3526
var openaiImage;
3627
if (azureOpenAiApiKey) {
37-
const configuration2 = new Configuration({ apiKey });
3828
if (!apiKey) {
39-
configuration2.baseOptions = {
40-
headers: { "api-key": azureOpenAiApiKey },
41-
params: {
42-
"api-version": process.env["AZURE_OPENAI_API_VERSION"] ?? "2023-07-01-preview"
43-
}
44-
};
45-
configuration2.basePath = "https://" + process.env["AZURE_OPENAI_API_INSTANCE_NAME"] + ".openai.azure.com/openai";
29+
openaiImage = new OpenAI({
30+
apiKey: azureOpenAiApiKey,
31+
baseURL: `https://${process.env["AZURE_OPENAI_API_INSTANCE_NAME"]}.openai.azure.com/openai`,
32+
defaultQuery: { "api-version": process.env["AZURE_OPENAI_API_VERSION"] ?? "2023-08-01-preview" },
33+
defaultHeaders: { "api-key": azureOpenAiApiKey }
34+
});
35+
} else {
36+
openaiImage = new OpenAI({ apiKey });
4637
}
47-
openaiImage = new OpenAIApi(configuration2);
4838
}
4939
var model = process.env["OPENAI_MODEL_NAME"] ?? "gpt-3.5-turbo";
5040
var MAX_TOKENS = Number(process.env["OPENAI_MAX_TOKENS"] ?? 2e3);
@@ -93,7 +83,8 @@ async function continueThread(messages, msgData) {
9383
openAILog.trace({ pluginResponse });
9484
if (pluginResponse.intermediate) {
9585
messages.push({
96-
role: ChatCompletionResponseMessageRoleEnum.Function,
86+
role: "function",
87+
//ChatCompletionResponseMessageRoleEnum.Function,
9788
name: pluginName,
9889
content: pluginResponse.message
9990
});
@@ -141,21 +132,25 @@ async function createChatCompletion(messages, functions2 = void 0) {
141132
chatCompletionOptions.function_call = "auto";
142133
}
143134
openAILog.trace({ chatCompletionOptions });
144-
const chatCompletion = await openai.createChatCompletion(chatCompletionOptions);
135+
const chatCompletion = await openai.chat.completions.create(chatCompletionOptions);
145136
openAILog.trace({ chatCompletion });
146-
return { responseMessage: chatCompletion.data?.choices?.[0]?.message, usage: chatCompletion.data?.usage };
137+
return { responseMessage: chatCompletion.choices?.[0]?.message, usage: chatCompletion.usage };
147138
}
148139
async function createImage(prompt) {
149140
const createImageOptions = {
141+
model: process.env["OPENAI_IMAGE_MODEL_NAME"] ?? "dall-e-2",
150142
prompt,
151143
n: 1,
152-
size: "512x512",
144+
size: "1024x1024",
145+
//Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models.
146+
quality: "standard",
147+
//"hd", $0.080/枚=1枚12円で倍額
153148
response_format: "b64_json"
154149
};
155150
openAILog.trace({ createImageOptions });
156-
const image = await (openaiImage ? openaiImage : openai).createImage(createImageOptions);
151+
const image = await (openaiImage ? openaiImage : openai).images.generate(createImageOptions);
157152
openAILog.trace({ image });
158-
return image.data?.data[0]?.b64_json;
153+
return image.data[0]?.b64_json;
159154
}
160155

161156
// src/mm-client.ts
@@ -238,7 +233,6 @@ var ExitPlugin = class extends PluginBase {
238233
import FormData3 from "form-data";
239234

240235
// src/plugins/GraphPlugin.ts
241-
import { ChatCompletionRequestMessageRoleEnum } from "openai";
242236
import FormData from "form-data";
243237
import fetch2 from "node-fetch";
244238
var GraphPlugin = class extends PluginBase {
@@ -263,11 +257,13 @@ var GraphPlugin = class extends PluginBase {
263257
};
264258
const chatmessages = [
265259
{
266-
role: ChatCompletionRequestMessageRoleEnum.System,
260+
role: "system",
261+
//ChatCompletionRequestMessageRoleEnum.System,
267262
content: this.VISUALIZE_DIAGRAM_INSTRUCTIONS
268263
},
269264
{
270-
role: ChatCompletionRequestMessageRoleEnum.User,
265+
role: "user",
266+
//hatCompletionRequestMessageRoleEnum.User,
271267
content: args.graphPrompt
272268
}
273269
];
@@ -336,7 +332,6 @@ ${graphContent}`);
336332
};
337333

338334
// src/plugins/ImagePlugin.ts
339-
import { ChatCompletionRequestMessageRoleEnum as ChatCompletionRequestMessageRoleEnum2 } from "openai";
340335
import FormData2 from "form-data";
341336
var ImagePlugin = class extends PluginBase {
342337
GPT_INSTRUCTIONS = "You are a prompt engineer who helps a user to create good prompts for the image AI DALL-E. The user will provide you with a short image description and you transform this into a proper prompt text. When creating the prompt first describe the looks and structure of the image. Secondly, describe the photography style, like camera angle, camera position, lenses. Third, describe the lighting and specific colors. Your prompt have to focus on the overall image and not describe any details on it. Consider adding buzzwords, for example 'detailed', 'hyper-detailed', 'very realistic', 'sketchy', 'street-art', 'drawing', or similar words. Keep the prompt as simple as possible and never get longer than 400 characters. You may only answer with the resulting prompt and provide no description or explanations.";
@@ -380,11 +375,13 @@ ${args.imageDescription}`);
380375
async createImagePrompt(userInput) {
381376
const messages = [
382377
{
383-
role: ChatCompletionRequestMessageRoleEnum2.System,
378+
role: "system",
379+
//ChatCompletionRequestMessageRoleEnum.System,
384380
content: this.GPT_INSTRUCTIONS
385381
},
386382
{
387-
role: ChatCompletionRequestMessageRoleEnum2.User,
383+
role: "user",
384+
//ChatCompletionRequestMessageRoleEnum.User,
388385
content: userInput
389386
}
390387
];
@@ -467,6 +464,9 @@ function tokenCount(content) {
467464
if (!global.FormData) {
468465
global.FormData = FormData3;
469466
}
467+
if (!global.FormData) {
468+
global.FormData = FormData3;
469+
}
470470
var name = process.env["MATTERMOST_BOTNAME"] || "@chatgpt";
471471
var contextMsgCount = Number(process.env["BOT_CONTEXT_MSG"] ?? 100);
472472
var SYSTEM_MESSAGE_HEADER = "// BOT System Message: ";
@@ -490,21 +490,24 @@ async function onClientMessage(msg, meId) {
490490
}
491491
const chatmessages = [
492492
{
493-
role: ChatCompletionRequestMessageRoleEnum3.System,
493+
role: "system",
494+
// ChatCompletionRequestMessageRoleEnum.System,
494495
content: botInstructions
495496
}
496497
];
497498
for (const threadPost of posts.slice(-contextMsgCount)) {
498499
matterMostLog.trace({ msg: threadPost });
499500
if (threadPost.user_id === meId) {
500501
chatmessages.push({
501-
role: ChatCompletionRequestMessageRoleEnum3.Assistant,
502+
role: "assistant",
503+
//ChatCompletionRequestMessageRoleEnum.Assistant,
502504
content: threadPost.props.originalMessage ?? threadPost.message
503505
});
504506
} else {
505507
chatmessages.push({
506-
role: ChatCompletionRequestMessageRoleEnum3.User,
507-
name: await userIdToName(threadPost.user_id),
508+
role: "user",
509+
//ChatCompletionRequestMessageRoleEnum.User,
510+
//Not have openai V4 name: await userIdToName(threadPost.user_id),
508511
content: threadPost.message
509512
});
510513
}
@@ -540,21 +543,33 @@ async function postMessage(msgData, messages) {
540543
}
541544
throw e;
542545
}
543-
const lines = messages[1].content.split("\n");
544-
if (lines.length < 1) {
546+
let lines = typeof messages[1].content === "string" ? messages[1].content.split("\n") : void 0;
547+
if (!lines) {
548+
if (messages[1].content) {
549+
lines = [];
550+
for (let i = 0; messages[1].content.length > i; i++) {
551+
if (messages[1].content[i].type === "text") {
552+
lines.push(...messages[1].content[i].text.split("\n"));
553+
}
554+
}
555+
}
556+
}
557+
if (!lines || lines.length < 1) {
545558
botLog.error("No contents", messages[1].content);
546559
answer += "No contents.";
547560
newPost(SYSTEM_MESSAGE_HEADER + answer, msgData.post, void 0, void 0);
548561
return;
549562
}
550563
const linesCount = [];
551564
lines.forEach((line, i) => {
552-
if (line === "") {
553-
lines[i] = "\n";
554-
linesCount[i] = 1;
555-
} else {
556-
lines[i] += "\n";
557-
linesCount[i] = tokenCount(lines[i]);
565+
if (lines) {
566+
if (line === "") {
567+
lines[i] = "\n";
568+
linesCount[i] = 1;
569+
} else {
570+
lines[i] += "\n";
571+
linesCount[i] = tokenCount(lines[i]);
572+
}
558573
}
559574
});
560575
if (messagesCount[0] + linesCount[0] >= LIMIT_TOKENS) {
@@ -574,7 +589,8 @@ async function postMessage(msgData, messages) {
574589
let systemMessage2 = SYSTEM_MESSAGE_HEADER;
575590
while (currentMessages.length > 1 && (sumCurrentMessagesCount + currentLinesCount + linesCount[i] >= LIMIT_TOKENS || sumCurrentMessagesCount + currentLinesCount > LIMIT_TOKENS / 2)) {
576591
botLog.info("Remove assistant message", currentMessages[1]);
577-
systemMessage2 += "Forget previous message.\n```\n" + currentMessages[1].content.split("\n").slice(0, 3).join("\n") + "...\n```\n";
592+
systemMessage2 += "Forget previous message.\n```\n" + (typeof messages[1].content === "string" ? messages[1].content.split("\n").slice(0, 3).join("\n") : currentMessages[1].content) + // ChatCompletionContentPartの場合は考えられていない TODO: 本当はtextを選んで出すべき
593+
"...\n```\n";
578594
sumCurrentMessagesCount -= currentMessagesCount[1];
579595
currentMessagesCount = [currentMessagesCount[0], ...currentMessagesCount.slice(2)];
580596
currentMessages = [currentMessages[0], ...currentMessages.slice(2)];
@@ -653,7 +669,7 @@ function expireMessages(messages, sumMessagesCount, messagesCount, systemMessage
653669
botLog.info("Remove message", messages[1]);
654670
systemMessage += `Forget old message.
655671
~~~
656-
${messages[1].content.split("\n").slice(0, 3).join("\n")}
672+
${typeof messages[1].content === "string" ? messages[1].content.split("\n").slice(0, 3).join("\n") : messages[1].content}
657673
...
658674
~~~
659675
`;
@@ -667,7 +683,7 @@ function calcMessagesTokenCount(messages) {
667683
let sumMessagesCount = 0;
668684
const messagesCount = new Array(messages.length);
669685
messages.forEach((message, i) => {
670-
messagesCount[i] = tokenCount(message.content);
686+
messagesCount[i] = typeof message.content === "string" ? tokenCount(message.content) : 0;
671687
sumMessagesCount += messagesCount[i];
672688
});
673689
return { sumMessagesCount, messagesCount };
@@ -733,26 +749,6 @@ async function getOlderPosts(refPost, options) {
733749
}
734750
return posts;
735751
}
736-
var usernameCache = {};
737-
async function userIdToName(userId) {
738-
let username;
739-
if (usernameCache[userId] && Date.now() < usernameCache[userId].expireTime) {
740-
username = usernameCache[userId].username;
741-
} else {
742-
username = (await mmClient.getUser(userId)).username;
743-
if (!/^[a-zA-Z0-9_-]{1,64}$/.test(username)) {
744-
username = username.replace(/[.@!?]/g, "_").slice(0, 64);
745-
}
746-
if (!/^[a-zA-Z0-9_-]{1,64}$/.test(username)) {
747-
username = [...username.matchAll(/[a-zA-Z0-9_-]/g)].join("").slice(0, 64);
748-
}
749-
usernameCache[userId] = {
750-
username,
751-
expireTime: Date.now() + 1e3 * 60 * 5
752-
};
753-
}
754-
return username;
755-
}
756752
async function main() {
757753
const meId = (await mmClient.getMe()).id;
758754
botLog.log("Connected to Mattermost.");

0 commit comments

Comments
 (0)