@@ -29,16 +29,10 @@ class ResilientLLM {
2929 this . topP = options ?. topP || process . env . AI_TOP_P || 0.95 ;
3030 // Add rate limit config options if provided
3131 this . rateLimitConfig = options ?. rateLimitConfig || { requestsPerMinute : 10 , llmTokensPerMinute : 150000 } ;
32- // Instantiate ResilientOperation for LLM calls
33- this . resilientOperation = new ResilientOperation ( {
34- bucketId : this . aiService ,
35- rateLimitConfig : this . rateLimitConfig ,
36- retries : options ?. retries || 3 ,
37- timeout : this . timeout ,
38- backoffFactor : options ?. backoffFactor || 2 ,
39- onRateLimitUpdate : options ?. onRateLimitUpdate ,
40- cacheStore : this . cacheStore
41- } ) ;
32+ this . retries = options ?. retries || 3 ;
33+ this . backoffFactor = options ?. backoffFactor || 2 ;
34+ this . onRateLimitUpdate = options ?. onRateLimitUpdate ;
35+ this . _abortController = null ;
4236 }
4337
4438 getApiUrl ( aiService ) {
@@ -159,11 +153,24 @@ class ResilientLLM {
159153 throw new Error ( 'Invalid provider specified. Use "anthropic" or "openai" or "gemini" or "ollama".' ) ;
160154 }
161155 try {
156+ // Instantiate ResilientOperation for LLM calls
157+ this . resilientOperation = new ResilientOperation ( {
158+ bucketId : this . aiService ,
159+ rateLimitConfig : this . rateLimitConfig ,
160+ retries : this . retries ,
161+ timeout : this . timeout ,
162+ backoffFactor : this . backoffFactor ,
163+ onRateLimitUpdate : this . onRateLimitUpdate ,
164+ cacheStore : this . cacheStore
165+ } ) ;
166+ // Use single instance of abort controller for all operations
167+ this . _abortController = this . _abortController || new AbortController ( ) ;
162168 // Wrap the LLM API call in ResilientOperation for rate limiting, retries, etc.
163169 const { data, statusCode } = await this . resilientOperation
164170 . withTokens ( estimatedLLMTokens )
165171 . withCache ( )
166- . execute ( this . _makeHttpRequest , apiUrl , requestBody , headers ) ;
172+ . withAbortControl ( this . _abortController )
173+ . execute ( this . _makeHttpRequest , apiUrl , requestBody , headers , this . _abortController . signal ) ;
167174 /**
168175 * OpenAI chat completion response
169176 * {
@@ -256,6 +263,8 @@ class ResilientLLM {
256263 * @returns {Promise<{data: any, statusCode: number}> }
257264 */
258265 async _makeHttpRequest ( apiUrl , requestBody , headers , abortSignal ) {
266+ console . log ( "Making HTTP request to:" , apiUrl ) ;
267+ console . log ( "You may cancel it by calling abort() method on the ResilientLLM instance" ) ;
259268 const startTime = Date . now ( ) ;
260269
261270 try {
@@ -291,7 +300,8 @@ class ResilientLLM {
291300
292301 /**
293302 * Parse errors from various LLM APIs to create uniform error communication
294- * @param {* } error
303+ * @param {number|null } statusCode - HTTP status code or null for general errors
304+ * @param {Error|Object|null } error - Error object
295305 * @reference https://platform.openai.com/docs/guides/error-codes/api-error-codes
296306 * @reference https://docs.anthropic.com/en/api/errors
297307 */
@@ -305,8 +315,6 @@ class ResilientLLM {
305315 throw new Error ( error ?. message || "Invalid API Key" ) ;
306316 case 403 :
307317 throw new Error ( error ?. message || "You are not authorized to access this resource" ) ;
308- case 400 :
309- throw new Error ( error ?. message || "Bad request" ) ;
310318 case 429 :
311319 throw new Error ( error ?. message || "Rate limit exceeded" ) ;
312320 case 404 :
@@ -380,7 +388,10 @@ class ResilientLLM {
380388 return data ?. choices ?. [ 0 ] ?. message ?. content ;
381389 }
382390
383-
391+ abort ( ) {
392+ this . _abortController ?. abort ( ) ;
393+ this . _abortController = null ;
394+ }
384395
385396 /**
386397 * Estimate the number of tokens in a text
0 commit comments