9
9
Message ,
10
10
} from "./types.js" ;
11
11
import { execSync } from "child_process" ;
12
-
12
+ import { Readable , Writable } from "stream" ;
13
13
import { getAnthropicApiKeyError } from "../utils/errors.js" ;
14
14
15
15
export interface ToolAgentResult {
@@ -21,6 +21,16 @@ export interface ToolAgentResult {
21
21
interactions : number ;
22
22
}
23
23
24
+ export interface ToolAgentState {
25
+ outMessages : Readable ;
26
+ inMessages : Writable ;
27
+ result ?: string ;
28
+ error ?: Error ;
29
+ input_tokens : number ;
30
+ output_tokens : number ;
31
+ done : Promise < ToolAgentResult > ;
32
+ }
33
+
24
34
const CONFIG = {
25
35
maxIterations : 50 ,
26
36
model : "claude-3-5-sonnet-20241022" ,
@@ -76,7 +86,7 @@ const CONFIG = {
76
86
"When you run into issues or unexpected results, take a step back and read the project documentation and configuration files and look at other source files in the project for examples of what works." ,
77
87
"" ,
78
88
"Use sub-agents for parallel tasks, providing them with specific context they need rather than having them rediscover it." ,
79
- ] . join ( "\\ n" ) ;
89
+ ] . join ( "\n" ) ;
80
90
} ,
81
91
} ;
82
92
@@ -155,128 +165,166 @@ async function executeTools(
155
165
return { sequenceCompleted, completionResult, toolResults } ;
156
166
}
157
167
158
- // eslint-disable-next-line max-lines-per- function
159
- export const toolAgent = async (
168
+ // The main toolAgent function that now returns a ToolAgentState
169
+ export const toolAgent = (
160
170
initialPrompt : string ,
161
171
tools : Tool [ ] ,
162
172
logger : Logger ,
163
173
config = CONFIG ,
164
- ) : Promise < ToolAgentResult > => {
165
- logger . verbose ( "Starting agent execution" ) ;
166
- logger . verbose ( "Initial prompt:" , initialPrompt ) ;
174
+ ) : ToolAgentState => {
175
+ // Create streams
176
+ const outMessages = new Readable ( {
177
+ objectMode : true ,
178
+ read ( ) { } , // No-op since we push data manually
179
+ } ) ;
180
+
181
+ const inMessages = new Writable ( {
182
+ objectMode : true ,
183
+ write ( chunk , encoding , callback ) {
184
+ // Handle incoming messages (for future interactive features)
185
+ callback ( ) ;
186
+ } ,
187
+ } ) ;
167
188
168
189
let totalInputTokens = 0 ;
169
190
let totalOutputTokens = 0 ;
170
191
let interactions = 0 ;
192
+ let currentResult : string | undefined ;
193
+ let currentError : Error | undefined ;
194
+
195
+ // Stream the initial user message
196
+ outMessages . push ( { type : "user" , content : initialPrompt } ) ;
197
+
198
+ // Create a promise that will resolve when the agent is done
199
+ const donePromise = new Promise < ToolAgentResult > ( async ( resolve , reject ) => {
200
+ try {
201
+ const apiKey = process . env . ANTHROPIC_API_KEY ;
202
+ if ( ! apiKey ) {
203
+ const error = new Error ( getAnthropicApiKeyError ( ) ) ;
204
+ currentError = error ;
205
+ outMessages . push ( { type : "error" , content : error } ) ;
206
+ outMessages . push ( null ) ;
207
+ reject ( error ) ;
208
+ return ;
209
+ }
171
210
172
- const apiKey = process . env . ANTHROPIC_API_KEY ;
173
- if ( ! apiKey ) throw new Error ( getAnthropicApiKeyError ( ) ) ;
174
-
175
- const client = new Anthropic ( { apiKey } ) ;
176
- const messages : Message [ ] = [
177
- {
178
- role : "user" ,
179
- content : [ { type : "text" , text : initialPrompt } ] ,
180
- } ,
181
- ] ;
182
-
183
- logger . debug ( "User message:" , initialPrompt ) ;
184
-
185
- // Get the system prompt once at the start
186
- const systemPrompt = await config . getSystemPrompt ( ) ;
187
-
188
- for ( let i = 0 ; i < config . maxIterations ; i ++ ) {
189
- logger . verbose (
190
- `Requesting completion ${ i + 1 } with ${ messages . length } messages with ${
191
- JSON . stringify ( messages ) . length
192
- } bytes`,
193
- ) ;
194
-
195
- interactions ++ ;
196
- const response = await client . messages . create ( {
197
- model : config . model ,
198
- max_tokens : config . maxTokens ,
199
- temperature : config . temperature ,
200
- messages,
201
- system : systemPrompt ,
202
- tools : tools . map ( ( t ) => ( {
203
- name : t . name ,
204
- description : t . description ,
205
- input_schema : t . parameters as Anthropic . Tool . InputSchema ,
206
- } ) ) ,
207
- tool_choice : { type : "auto" } ,
208
- } ) ;
209
-
210
- if ( ! response . content . length ) {
211
- const result = {
212
- result :
213
- "Agent returned empty message implying it is done its given task" ,
214
- tokens : {
215
- input : totalInputTokens ,
216
- output : totalOutputTokens ,
211
+ const client = new Anthropic ( { apiKey } ) ;
212
+ const messages : Message [ ] = [
213
+ {
214
+ role : "user" ,
215
+ content : [ { type : "text" , text : initialPrompt } ] ,
217
216
} ,
218
- interactions,
219
- } ;
220
- logger . verbose (
221
- `Agent completed with ${ result . tokens . input } input tokens, ${ result . tokens . output } output tokens in ${ result . interactions } interactions` ,
222
- ) ;
223
- return result ;
224
- }
225
-
226
- totalInputTokens += response . usage . input_tokens ;
227
- totalOutputTokens += response . usage . output_tokens ;
228
- logger . verbose (
229
- ` Token usage: ${ response . usage . input_tokens } input, ${ response . usage . output_tokens } output` ,
230
- ) ;
231
-
232
- const { content, toolCalls } = processResponse ( response ) ;
233
- messages . push ( { role : "assistant" , content } ) ;
234
-
235
- // Log the assistant's message
236
- const assistantMessage = content
237
- . filter ( ( c ) => c . type === "text" )
238
- . map ( ( c ) => ( c as TextContent ) . text )
239
- . join ( "\\n" ) ;
240
- if ( assistantMessage ) {
241
- logger . info ( assistantMessage ) ;
242
- }
243
-
244
- const { sequenceCompleted, completionResult } = await executeTools (
245
- toolCalls ,
246
- tools ,
247
- messages ,
248
- logger ,
249
- ) ;
217
+ ] ;
218
+
219
+ logger . debug ( "User message:" , initialPrompt ) ;
220
+
221
+ const systemPrompt = await config . getSystemPrompt ( ) ;
222
+
223
+ for ( let i = 0 ; i < config . maxIterations ; i ++ ) {
224
+ logger . verbose (
225
+ `Requesting completion ${ i + 1 } with ${ messages . length } messages` ,
226
+ ) ;
227
+
228
+ interactions ++ ;
229
+ const response = await client . messages . create ( {
230
+ model : config . model ,
231
+ max_tokens : config . maxTokens ,
232
+ temperature : config . temperature ,
233
+ messages,
234
+ system : systemPrompt ,
235
+ tools : tools . map ( ( t ) => ( {
236
+ name : t . name ,
237
+ description : t . description ,
238
+ input_schema : t . parameters as Anthropic . Tool . InputSchema ,
239
+ } ) ) ,
240
+ tool_choice : { type : "auto" } ,
241
+ } ) ;
242
+
243
+ if ( ! response . content . length ) {
244
+ currentResult = "Agent returned empty message implying it is done its given task" ;
245
+ const result = {
246
+ result : currentResult ,
247
+ tokens : {
248
+ input : totalInputTokens ,
249
+ output : totalOutputTokens ,
250
+ } ,
251
+ interactions,
252
+ } ;
253
+ outMessages . push ( { type : "complete" , content : result } ) ;
254
+ outMessages . push ( null ) ;
255
+ resolve ( result ) ;
256
+ return ;
257
+ }
258
+
259
+ totalInputTokens += response . usage . input_tokens ;
260
+ totalOutputTokens += response . usage . output_tokens ;
261
+
262
+ const { content, toolCalls } = processResponse ( response ) ;
263
+ messages . push ( { role : "assistant" , content } ) ;
264
+
265
+ // Stream assistant's messages
266
+ const assistantMessage = content
267
+ . filter ( ( c ) => c . type === "text" )
268
+ . map ( ( c ) => ( c as TextContent ) . text )
269
+ . join ( "\n" ) ;
270
+ if ( assistantMessage ) {
271
+ logger . info ( assistantMessage ) ;
272
+ outMessages . push ( { type : "assistant" , content : assistantMessage } ) ;
273
+ }
274
+
275
+ const { sequenceCompleted, completionResult } = await executeTools (
276
+ toolCalls ,
277
+ tools ,
278
+ messages ,
279
+ logger ,
280
+ ) ;
281
+
282
+ if ( sequenceCompleted ) {
283
+ currentResult = completionResult ?? "Sequence explicitly completed with an empty result" ;
284
+ const result = {
285
+ result : currentResult ,
286
+ tokens : {
287
+ input : totalInputTokens ,
288
+ output : totalOutputTokens ,
289
+ } ,
290
+ interactions,
291
+ } ;
292
+ outMessages . push ( { type : "complete" , content : result } ) ;
293
+ outMessages . push ( null ) ;
294
+ resolve ( result ) ;
295
+ return ;
296
+ }
297
+ }
250
298
251
- if ( sequenceCompleted ) {
299
+ logger . warn ( "Maximum iterations reached" ) ;
300
+ currentResult = "Maximum sub-agent iterations reached without successful completion" ;
252
301
const result = {
253
- result :
254
- completionResult ??
255
- "Sequence explicitly completed with an empty result" ,
302
+ result : currentResult ,
256
303
tokens : {
257
304
input : totalInputTokens ,
258
305
output : totalOutputTokens ,
259
306
} ,
260
307
interactions,
261
308
} ;
262
- logger . verbose (
263
- `Agent completed with ${ result . tokens . input } input tokens, ${ result . tokens . output } output tokens in ${ result . interactions } interactions` ,
264
- ) ;
265
- return result ;
309
+ outMessages . push ( { type : "complete" , content : result } ) ;
310
+ outMessages . push ( null ) ;
311
+ resolve ( result ) ;
312
+
313
+ } catch ( error ) {
314
+ currentError = error as Error ;
315
+ outMessages . push ( { type : "error" , content : currentError } ) ;
316
+ outMessages . push ( null ) ;
317
+ reject ( currentError ) ;
266
318
}
267
- }
268
-
269
- logger . warn ( "Maximum iterations reached" ) ;
270
- const result = {
271
- result : "Maximum sub-agent iterations reach without successful completion" ,
272
- tokens : {
273
- input : totalInputTokens ,
274
- output : totalOutputTokens ,
275
- } ,
276
- interactions ,
319
+ } ) ;
320
+
321
+ return {
322
+ outMessages ,
323
+ inMessages ,
324
+ get result ( ) { return currentResult ; } ,
325
+ get error ( ) { return currentError ; } ,
326
+ input_tokens : totalInputTokens ,
327
+ output_tokens : totalOutputTokens ,
328
+ done : donePromise ,
277
329
} ;
278
- logger . verbose (
279
- `Agent completed with ${ result . tokens . input } input tokens, ${ result . tokens . output } output tokens in ${ result . interactions } interactions` ,
280
- ) ;
281
- return result ;
282
330
} ;
0 commit comments