Skip to content

Commit af315f2

Browse files
committed
Better logging with more verbose trace options and channels
1 parent 8ef5c69 commit af315f2

File tree

4 files changed

+86
-46
lines changed

4 files changed

+86
-46
lines changed

package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "chatgpt-mattermost-bot",
3-
"version": "2.1.1",
3+
"version": "2.1.2",
44
"private": true,
55
"scripts": {
66
"start": "ts-node ./src/botservice.ts",

src/botservice.ts

Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import {continueThread, registerChatPlugin} from "./openai-wrapper";
2-
import {Log} from "debug-level"
32
import {mmClient, wsClient} from "./mm-client";
43
import 'babel-polyfill'
54
import 'isomorphic-fetch'
@@ -13,6 +12,8 @@ import {JSONMessageData, MessageData} from "./types";
1312
import {ExitPlugin} from "./plugins/ExitPlugin";
1413
import {MessageCollectPlugin} from "./plugins/MessageCollectPlugin";
1514

15+
import {botLog, matterMostLog} from "./logging";
16+
1617
if (!global.FormData) {
1718
global.FormData = require('form-data')
1819
}
@@ -33,9 +34,9 @@ const botInstructions = "Your name is " + name + " and you are a helpful assista
3334
"provide them with succinct answers formatted using Markdown. You know the user's name as it is provided within the " +
3435
"meta data of the messages."
3536

36-
async function onClientMessage(msg: WebSocketMessage<JSONMessageData>, meId: string, log: Log) {
37+
async function onClientMessage(msg: WebSocketMessage<JSONMessageData>, meId: string) {
3738
if (msg.event !== 'posted' || !meId) {
38-
log.debug({msg: msg})
39+
matterMostLog.debug({msg: msg})
3940
return
4041
}
4142

@@ -55,7 +56,7 @@ async function onClientMessage(msg: WebSocketMessage<JSONMessageData>, meId: str
5556

5657
// create the context
5758
for (const threadPost of posts.slice(-contextMsgCount)) {
58-
log.trace({msg: threadPost})
59+
matterMostLog.trace({msg: threadPost})
5960
if (threadPost.user_id === meId) {
6061
chatmessages.push({
6162
role: ChatCompletionRequestMessageRoleEnum.Assistant,
@@ -76,9 +77,8 @@ async function onClientMessage(msg: WebSocketMessage<JSONMessageData>, meId: str
7677
const typingInterval = setInterval(typing, 2000)
7778

7879
try {
79-
log.trace({chatmessages})
8080
const {message, fileId, props} = await continueThread(chatmessages, msgData)
81-
log.trace({message})
81+
botLog.trace({message})
8282

8383
// create answer response
8484
const newPost = await mmClient.createPost({
@@ -88,9 +88,9 @@ async function onClientMessage(msg: WebSocketMessage<JSONMessageData>, meId: str
8888
root_id: msgData.post.root_id || msgData.post.id,
8989
file_ids: fileId ? [fileId] : undefined
9090
})
91-
log.trace({msg: newPost})
91+
botLog.trace({msg: newPost})
9292
} catch (e) {
93-
log.error(e)
93+
botLog.error(e)
9494
await mmClient.createPost({
9595
message: "Sorry, but I encountered an internal error when trying to process your message",
9696
channel_id: msgData.post.channel_id,
@@ -208,28 +208,24 @@ async function userIdToName(userId: string): Promise<string> {
208208
return username
209209
}
210210

211-
Log.options({json: true, colors: true})
212-
Log.wrapConsole('bot-ws', {level4log: 'INFO'})
213-
const log = new Log('bot')
214-
215211
/* Entry point */
216212
async function main(): Promise<void> {
217213
const meId = (await mmClient.getMe()).id
218214

219-
log.log("Connected to Mattermost.")
215+
botLog.log("Connected to Mattermost.")
220216

221217
for (const plugin of plugins) {
222218
if (plugin.setup()) {
223219
registerChatPlugin(plugin)
224-
log.trace("Registered plugin " + plugin.key)
220+
botLog.trace("Registered plugin " + plugin.key)
225221
}
226222
}
227223

228-
wsClient.addMessageListener((e) => onClientMessage(e, meId, log))
229-
log.trace("Listening to MM messages...")
224+
wsClient.addMessageListener((e) => onClientMessage(e, meId))
225+
botLog.trace("Listening to MM messages...")
230226
}
231227

232228
main().catch(reason => {
233-
log.error(reason);
229+
botLog.error(reason);
234230
process.exit(-1)
235231
})

src/logging.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
import {Log} from "debug-level";
2+
3+
Log.options({json: true, colors: true})
4+
Log.wrapConsole('bot-ws', {level4log: 'INFO'})
5+
export const botLog = new Log('bot')
6+
export const openAILog = new Log('open-ai')
7+
export const matterMostLog = new Log('mattermost')

src/openai-wrapper.ts

Lines changed: 65 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -2,30 +2,36 @@ import {
22
ChatCompletionFunctions,
33
ChatCompletionRequestMessage,
44
ChatCompletionResponseMessage, ChatCompletionResponseMessageRoleEnum,
5-
Configuration,
5+
Configuration, CreateChatCompletionRequest, CreateImageRequest,
66
OpenAIApi
77
} from "openai";
8+
import {openAILog as log} from "./logging"
9+
810
import {PluginBase} from "./plugins/PluginBase";
911
import {AiResponse, MessageData} from "./types";
1012

11-
const configuration = new Configuration({
12-
apiKey: process.env['OPENAI_API_KEY']
13-
})
13+
const apiKey = process.env['OPENAI_API_KEY'];
14+
log.trace({apiKey})
15+
16+
const configuration = new Configuration({ apiKey })
17+
1418
const openai = new OpenAIApi(configuration)
1519

1620
const model = process.env['OPENAI_MODEL_NAME'] ?? 'gpt-3.5-turbo'
1721
const max_tokens = Number(process.env['OPENAI_MAX_TOKENS'] ?? 2000)
1822
const temperature = Number(process.env['OPENAI_TEMPERATURE'] ?? 1)
1923

20-
const plugins: Record<string, PluginBase<any>> = {}
24+
log.debug({model, max_tokens, temperature})
25+
26+
const plugins: Map<string, PluginBase<any>> = new Map()
2127
const functions: ChatCompletionFunctions[] = []
2228

2329
/**
2430
* Registers a plugin as a GPT function. These functions are sent to openAI when the user interacts with chatGPT.
2531
* @param plugin
2632
*/
2733
export function registerChatPlugin(plugin: PluginBase<any>) {
28-
plugins[plugin.key] = plugin
34+
plugins.set(plugin.key, plugin)
2935
functions.push({
3036
name: plugin.key,
3137
description: plugin.description,
@@ -48,27 +54,52 @@ export async function continueThread(messages: ChatCompletionRequestMessage[], m
4854
message: 'Sorry, but it seems I found no valid response.'
4955
}
5056

57+
// the number of rounds we're going to run at maximum
58+
let maxChainLength = 7;
59+
60+
// check whether ChatGPT hallucinates a plugin name.
61+
const missingPlugins = new Set<string>()
62+
5163
let isIntermediateResponse = true
52-
while(isIntermediateResponse) {
64+
while(isIntermediateResponse && maxChainLength-- > 0) {
5365
const responseMessage = await createChatCompletion(messages, functions)
66+
log.trace(responseMessage)
5467
if(responseMessage) {
5568
// if the function_call is set, we have a plugin call
5669
if(responseMessage.function_call && responseMessage.function_call.name) {
70+
const pluginName = responseMessage.function_call.name;
71+
log.trace({pluginName})
5772
try {
58-
const pluginResponse = await plugins[responseMessage.function_call!.name!].runPlugin((JSON.parse(responseMessage.function_call!.arguments!)), msgData)
59-
60-
if(pluginResponse.intermediate) {
61-
messages.push({
62-
role: ChatCompletionResponseMessageRoleEnum.Function,
63-
name: responseMessage.function_call!.name!,
64-
content: pluginResponse.message
65-
})
66-
continue
67-
}
73+
const plugin = plugins.get(pluginName);
74+
if (plugin){
75+
const pluginArguments = JSON.parse(responseMessage.function_call.arguments ?? '[]');
76+
log.trace({plugin, pluginArguments})
77+
const pluginResponse = await plugin.runPlugin(pluginArguments, msgData)
78+
log.trace({pluginResponse})
6879

69-
aiResponse = pluginResponse
80+
if(pluginResponse.intermediate) {
81+
messages.push({
82+
role: ChatCompletionResponseMessageRoleEnum.Function,
83+
name: pluginName,
84+
content: pluginResponse.message
85+
})
86+
continue
87+
}
88+
aiResponse = pluginResponse
89+
} else {
90+
if (!missingPlugins.has(pluginName)){
91+
missingPlugins.add(pluginName)
92+
log.debug({ error: 'Missing plugin ' + pluginName, pluginArguments: responseMessage.function_call.arguments})
93+
messages.push({ role: 'system', content: `There is no plugin named '${pluginName}' available. Try without using that plugin.`})
94+
continue
95+
} else {
96+
log.debug({ messages })
97+
aiResponse.message = `Sorry, but it seems there was an error when using the plugin \`\`\`${pluginName}\`\`\`.`
98+
}
99+
}
70100
} catch (e) {
71-
aiResponse.message = `Sorry, but it seems there was an error when using the plugin \`\`\`${responseMessage.function_call!.name!}\`\`\`.`
101+
log.debug({ messages, error: e })
102+
aiResponse.message = `Sorry, but it seems there was an error when using the plugin \`\`\`${pluginName}\`\`\`.`
72103
}
73104
} else if(responseMessage.content) {
74105
aiResponse.message = responseMessage.content
@@ -87,18 +118,22 @@ export async function continueThread(messages: ChatCompletionRequestMessage[], m
87118
* @param functions Function calls which can be called by the openAI model
88119
*/
89120
export async function createChatCompletion(messages: ChatCompletionRequestMessage[], functions: ChatCompletionFunctions[] | undefined = undefined): Promise<ChatCompletionResponseMessage | undefined> {
90-
const options: any = {
121+
const chatCompletionOptions: CreateChatCompletionRequest = {
91122
model: model,
92123
messages: messages,
93124
max_tokens: max_tokens,
94125
temperature: temperature,
95126
}
96127
if(functions) {
97-
options.functions = functions
98-
options.function_call = 'auto'
128+
chatCompletionOptions.functions = functions
129+
chatCompletionOptions.function_call = 'auto'
99130
}
100131

101-
const chatCompletion = await openai.createChatCompletion(options)
132+
log.trace({chatCompletionOptions})
133+
134+
const chatCompletion = await openai.createChatCompletion(chatCompletionOptions)
135+
136+
log.trace({chatCompletion})
102137

103138
return chatCompletion.data?.choices?.[0]?.message
104139
}
@@ -108,12 +143,14 @@ export async function createChatCompletion(messages: ChatCompletionRequestMessag
108143
* @param prompt The image description provided to DALL-E.
109144
*/
110145
export async function createImage(prompt: string): Promise<string | undefined> {
111-
const image = await openai.createImage({
112-
prompt: prompt,
146+
const createImageOptions: CreateImageRequest = {
147+
prompt,
113148
n: 1,
114149
size: '512x512',
115-
response_format: "b64_json"
116-
})
117-
150+
response_format: 'b64_json'
151+
};
152+
log.trace({createImageOptions})
153+
const image = await openai.createImage(createImageOptions)
154+
log.trace({image})
118155
return image.data?.data[0]?.b64_json
119156
}

0 commit comments

Comments
 (0)