Skip to content

Commit b640ed3

Browse files
committed
test: add resilientllm unit tests
1 parent 9197858 commit b640ed3

File tree

2 files changed

+87
-48
lines changed

2 files changed

+87
-48
lines changed

ResilientLLM.js

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
* const llm = new LLM({ aiService: "anthropic", model: "claude-3-5-sonnet-20240620", maxTokens: 2048, temperature: 0 });
55
* const response = await llm.chat([{ role: "user", content: "Hello, world!" }]);
66
* console.log(response);
7+
* // You may cancel all llm operations (for the given instance) by calling abort() method on the ResilientLLM instance
8+
* llm.abort();
79
*/
810
import { Tiktoken } from "js-tiktoken/lite";
911
import o200k_base from "js-tiktoken/ranks/o200k_base";
@@ -33,6 +35,7 @@ class ResilientLLM {
3335
this.backoffFactor = options?.backoffFactor || 2;
3436
this.onRateLimitUpdate = options?.onRateLimitUpdate;
3537
this._abortController = null;
38+
this.resilientOperations = {}; // Store resilient operation instances for observability
3639
}
3740

3841
getApiUrl(aiService) {
@@ -154,7 +157,7 @@ class ResilientLLM {
154157
}
155158
try{
156159
// Instantiate ResilientOperation for LLM calls
157-
this.resilientOperation = new ResilientOperation({
160+
const resilientOperation = new ResilientOperation({
158161
bucketId: this.aiService,
159162
rateLimitConfig: this.rateLimitConfig,
160163
retries: this.retries,
@@ -165,8 +168,9 @@ class ResilientLLM {
165168
});
166169
// Use single instance of abort controller for all operations
167170
this._abortController = this._abortController || new AbortController();
171+
this.resilientOperations[resilientOperation.id] = resilientOperation;
168172
// Wrap the LLM API call in ResilientOperation for rate limiting, retries, etc.
169-
const { data, statusCode } = await this.resilientOperation
173+
const { data, statusCode } = await resilientOperation
170174
.withTokens(estimatedLLMTokens)
171175
.withCache()
172176
.withAbortControl(this._abortController)
@@ -230,6 +234,7 @@ class ResilientLLM {
230234
content = this.parseOllamaChatCompletion(data, llmOptions?.tools);
231235
break;
232236
}
237+
delete this.resilientOperations[resilientOperation.id];
233238
return content;
234239
} catch (error) {
235240
console.error(`Error calling ${aiService} API:`, error);
@@ -391,6 +396,8 @@ class ResilientLLM {
391396
abort(){
392397
this._abortController?.abort();
393398
this._abortController = null;
399+
this.resilientOperations = {};
400+
this._abortController = null;
394401
}
395402

396403
/**

test/resilient-llm.unit.test.js

Lines changed: 78 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1,73 +1,105 @@
11
import ResilientLLM from '../ResilientLLM.js';
2-
import { describe, it, beforeEach } from 'mocha';
2+
import { describe, it, beforeEach, afterEach } from 'mocha';
33
import { expect, use } from 'chai';
44
import chaiAsPromised from 'chai-as-promised';
5+
import sinon from 'sinon';
56

67
// Configure chai to handle promises
78
use(chaiAsPromised);
89

9-
describe('ResilientLLM Async Function Tests', () => {
10-
let llm;
10+
describe('ResilientLLM Unit Tests', () => {
11+
let resilientLLM;
12+
let originalEnv;
13+
let mockFetch;
14+
let mockAnthropicResponse;
1115

1216
beforeEach(() => {
13-
llm = new ResilientLLM({
14-
aiService: 'openai',
15-
retries: 1
17+
// Save original environment
18+
originalEnv = { ...process.env };
19+
20+
// Set up test environment
21+
process.env.ANTHROPIC_API_KEY = 'test-key';
22+
process.env.MAX_INPUT_TOKENS = '100000';
23+
24+
resilientLLM = new ResilientLLM({
25+
aiService: 'anthropic',
26+
model: 'claude-3-5-sonnet-20240620',
27+
maxTokens: 2048,
28+
temperature: 0
1629
});
17-
});
1830

19-
it('should execute simple async function and return correct value', async () => {
20-
// Create a simple async function that returns a string
21-
const simpleAsyncFunction = async () => {
22-
return 'Hello, World!';
31+
mockAnthropicResponse = {
32+
content: [
33+
{ text: 'Hello! How can I help you today?' }
34+
]
2335
};
2436

25-
// Execute the async function using ResilientOperation
26-
const result = await llm.resilientOperation.execute(simpleAsyncFunction);
37+
mockFetch = sinon.stub().resolves({
38+
json: () => Promise.resolve(mockAnthropicResponse),
39+
status: 200
40+
});
2741

28-
// Verify the result
29-
expect(result).to.equal('Hello, World!');
42+
global.fetch = mockFetch;
3043
});
3144

32-
it('should execute async function with parameters', async () => {
33-
// Create an async function that takes parameters
34-
const asyncAdd = async (a, b) => {
35-
return a + b;
36-
};
45+
afterEach(() => {
46+
// Restore original environment
47+
process.env = originalEnv;
48+
sinon.restore();
49+
});
50+
51+
describe('Happy Path Tests', () => {
52+
it('should successfully complete a chat request and return parsed response', async () => {
53+
// Arrange
54+
const conversationHistory = [
55+
{ role: 'user', content: 'Hello, world!' }
56+
];
3757

38-
// Execute with parameters
39-
const result = await llm.resilientOperation.execute(asyncAdd, 5, 3);
58+
// Act
59+
const result = await resilientLLM.chat(conversationHistory);
4060

41-
// Verify the result
42-
expect(result).to.equal(8);
61+
// Assert
62+
expect(result).to.equal(mockAnthropicResponse.content[0].text);
63+
expect(mockFetch.callCount).to.be.equal(1);
64+
});
4365
});
4466

45-
it('should execute async function that returns object', async () => {
46-
// Create an async function that returns an object
47-
const asyncObjectFunction = async () => {
48-
return { status: 'success', data: [1, 2, 3] };
49-
};
67+
describe('Edge Case Tests', () => {
68+
it('should throw error when input tokens exceed maximum limit', async () => {
69+
// Arrange
70+
const longText = 'a'.repeat(500000); // Very long text to exceed token limit
71+
const conversationHistory = [
72+
{ role: 'user', content: longText }
73+
];
5074

51-
// Execute the function
52-
const result = await llm.resilientOperation.execute(asyncObjectFunction);
75+
// Act & Assert
76+
await expect(resilientLLM.chat(conversationHistory))
77+
.to.be.rejectedWith('Input tokens exceed the maximum limit of 100000');
78+
expect(mockFetch.callCount).to.be.equal(0);
79+
});
5380

54-
// Verify the result
55-
expect(result).to.deep.equal({ status: 'success', data: [1, 2, 3] });
56-
expect(result.status).to.equal('success');
57-
expect(result.data).to.have.length(3);
58-
});
81+
it('should retry with alternate service when primary service returns rate limit error', async () => {
82+
// Arrange
83+
const conversationHistory = [
84+
{ role: 'user', content: 'Test message' }
85+
];
5986

60-
it('should execute async function with delay', async () => {
61-
// Create an async function with a small delay
62-
const asyncDelayFunction = async () => {
63-
await new Promise(resolve => setTimeout(resolve, 10));
64-
return 'Completed after delay';
65-
};
87+
// Update fetch to return rate limit error
88+
mockFetch.resolves({
89+
json: () => Promise.resolve({ error: { message: 'Rate limit exceeded' } }),
90+
status: 429
91+
});
92+
93+
// Mock the retry method to return success
94+
sinon.stub(resilientLLM, 'retryChatWithAlternateService').resolves(mockAnthropicResponse.content[0].text);
6695

67-
// Execute the function
68-
const result = await llm.resilientOperation.execute(asyncDelayFunction);
96+
// Act
97+
const result = await resilientLLM.chat(conversationHistory);
6998

70-
// Verify the result
71-
expect(result).to.equal('Completed after delay');
99+
// Assert
100+
expect(result).to.equal(mockAnthropicResponse.content[0].text);
101+
expect(resilientLLM.retryChatWithAlternateService.calledOnce).to.be.true;
102+
expect(mockFetch.callCount).to.be.equal(1);
103+
});
72104
});
73105
});

0 commit comments

Comments
 (0)