Skip to content

Commit 6d17324

Browse files
committed
add comments to chat.ts
1 parent b4d6888 commit 6d17324

File tree

1 file changed

+42
-1
lines changed
  • src/server/services/langchain

1 file changed

+42
-1
lines changed

src/server/services/langchain/chat.ts

Lines changed: 42 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,14 @@ import { MockModel } from './MockModel'
1515

1616
type ChatModel = Runnable<BaseLanguageModelInput, AIMessageChunk, BaseChatModelCallOptions>
1717

18+
/**
19+
* Gets a chat model instance based on the provided configuration.
20+
* Can be a MockModel for testing or an AzureChatOpenAI model.
21+
* @param modelConfig The configuration for the model.
22+
* @param tools The structured tools the model can use.
23+
* @param temperature The temperature for the model's responses.
24+
* @returns A chat model instance.
25+
*/
1826
const getChatModel = (modelConfig: (typeof validModels)[number], tools: StructuredTool[], temperature: number): ChatModel => {
1927
const chatModel =
2028
modelConfig.name === 'mock'
@@ -31,7 +39,7 @@ const getChatModel = (modelConfig: (typeof validModels)[number], tools: Structur
3139
summary: null,
3240
generate_summary: null,
3341
},
34-
}).bindTools(tools)
42+
}).bindTools(tools) // Make tools available to the model.
3543

3644
return chatModel
3745
}
@@ -40,6 +48,23 @@ type WriteEventFunction = (data: ChatEvent) => Promise<void>
4048

4149
type ChatTool = StructuredTool<any, any, any, string>
4250

51+
/**
52+
* Handles the main chat streaming logic.
53+
* It takes the chat history, model configuration, and a set of tools,
54+
* and streams the response from the language model, handling tool calls
55+
* and sending events back to the client.
56+
*
57+
* This function can perform two chat turns if the first one results in tool calls.
58+
*
59+
* @param model The name of the model to use.
60+
* @param temperature The temperature for the model's responses.
61+
* @param systemMessage The system message to prepend to the chat history.
62+
* @param chatMessages The history of chat messages.
63+
* @param tools The structured tools available to the model.
64+
* @param writeEvent A function to write chat events to the client.
65+
* @param user The user initiating the chat.
66+
* @returns An object containing response statistics and the final response message.
67+
*/
4368
export const streamChat = async ({
4469
model,
4570
temperature,
@@ -77,6 +102,7 @@ export const streamChat = async ({
77102

78103
const result = await chatTurn(chatModel, messages, toolsByName, writeEvent, user)
79104

105+
// If the model decided to call tools, execute them and send the results back to the model in a second turn.
80106
if (result.toolCalls.length > 0) {
81107
const result2 = await chatTurn(chatModel, messages, toolsByName, writeEvent, user)
82108

@@ -102,6 +128,17 @@ export const streamChat = async ({
102128
}
103129
}
104130

131+
/**
132+
* Executes a single turn of the chat.
133+
* It streams the model's response, handles tool calls, and sends events.
134+
*
135+
* @param model The chat model instance.
136+
* @param messages The messages to send to the model.
137+
* @param toolsByName A record of available tools, keyed by name.
138+
* @param writeEvent A function to write chat events to the client.
139+
* @param user The user for whom the tool results are stored.
140+
* @returns An object with statistics about the chat turn and any tool calls made.
141+
*/
105142
const chatTurn = async (model: ChatModel, messages: BaseMessageLike[], toolsByName: Record<string, ChatTool>, writeEvent: WriteEventFunction, user: User) => {
106143
const stream = await model.stream(messages)
107144

@@ -140,9 +177,11 @@ const chatTurn = async (model: ChatModel, messages: BaseMessageLike[], toolsByNa
140177
})
141178
}
142179

180+
// Append the chunk to the full response.
143181
fullOutput = fullOutput !== undefined ? concat(fullOutput, chunk) : chunk
144182
}
145183

184+
// Add the assistant's full response to the message history.
146185
messages.push(fullOutput as AIMessageChunk)
147186

148187
const toolCalls = fullOutput?.tool_calls ?? []
@@ -162,6 +201,7 @@ const chatTurn = async (model: ChatModel, messages: BaseMessageLike[], toolsByNa
162201
const result = await tool.invoke(toolCall)
163202
const artifact = result.artifact as ChatToolOutput
164203
await ToolResultStore.saveResults(id, artifact, user)
204+
// Add the tool's output to the message history for the next turn.
165205
messages.push(result)
166206
toolCallStatuses[id] = {
167207
status: 'completed',
@@ -182,6 +222,7 @@ const chatTurn = async (model: ChatModel, messages: BaseMessageLike[], toolsByNa
182222
}
183223
}
184224

225+
// Calculate statistics about the response generation.
185226
const tokenCount = fullOutput?.usage_metadata?.output_tokens ?? 0
186227
const inputTokenCount = fullOutput?.usage_metadata?.input_tokens ?? 0
187228
const tokenStreamingDuration = Date.now() - firstTokenTS

0 commit comments

Comments
 (0)