11import ResilientLLM from '../ResilientLLM.js' ;
2- import { describe , it , beforeEach } from 'mocha' ;
2+ import { describe , it , beforeEach , afterEach } from 'mocha' ;
33import { expect , use } from 'chai' ;
44import chaiAsPromised from 'chai-as-promised' ;
5+ import sinon from 'sinon' ;
56
67// Configure chai to handle promises
78use ( chaiAsPromised ) ;
89
9- describe ( 'ResilientLLM Async Function Tests' , ( ) => {
10- let llm ;
10+ describe ( 'ResilientLLM Unit Tests' , ( ) => {
11+ let resilientLLM ;
12+ let originalEnv ;
13+ let mockFetch ;
14+ let mockAnthropicResponse ;
1115
1216 beforeEach ( ( ) => {
13- llm = new ResilientLLM ( {
14- aiService : 'openai' ,
15- retries : 1
17+ // Save original environment
18+ originalEnv = { ...process . env } ;
19+
20+ // Set up test environment
21+ process . env . ANTHROPIC_API_KEY = 'test-key' ;
22+ process . env . MAX_INPUT_TOKENS = '100000' ;
23+
24+ resilientLLM = new ResilientLLM ( {
25+ aiService : 'anthropic' ,
26+ model : 'claude-3-5-sonnet-20240620' ,
27+ maxTokens : 2048 ,
28+ temperature : 0
1629 } ) ;
17- } ) ;
1830
19- it ( 'should execute simple async function and return correct value' , async ( ) => {
20- // Create a simple async function that returns a string
21- const simpleAsyncFunction = async ( ) => {
22- return 'Hello, World!' ;
31+ mockAnthropicResponse = {
32+ content : [
33+ { text : 'Hello! How can I help you today?' }
34+ ]
2335 } ;
2436
25- // Execute the async function using ResilientOperation
26- const result = await llm . resilientOperation . execute ( simpleAsyncFunction ) ;
37+ mockFetch = sinon . stub ( ) . resolves ( {
38+ json : ( ) => Promise . resolve ( mockAnthropicResponse ) ,
39+ status : 200
40+ } ) ;
2741
28- // Verify the result
29- expect ( result ) . to . equal ( 'Hello, World!' ) ;
42+ global . fetch = mockFetch ;
3043 } ) ;
3144
32- it ( 'should execute async function with parameters' , async ( ) => {
33- // Create an async function that takes parameters
34- const asyncAdd = async ( a , b ) => {
35- return a + b ;
36- } ;
45+ afterEach ( ( ) => {
46+ // Restore original environment
47+ process . env = originalEnv ;
48+ sinon . restore ( ) ;
49+ } ) ;
50+
51+ describe ( 'Happy Path Tests' , ( ) => {
52+ it ( 'should successfully complete a chat request and return parsed response' , async ( ) => {
53+ // Arrange
54+ const conversationHistory = [
55+ { role : 'user' , content : 'Hello, world!' }
56+ ] ;
3757
38- // Execute with parameters
39- const result = await llm . resilientOperation . execute ( asyncAdd , 5 , 3 ) ;
58+ // Act
59+ const result = await resilientLLM . chat ( conversationHistory ) ;
4060
41- // Verify the result
42- expect ( result ) . to . equal ( 8 ) ;
61+ // Assert
62+ expect ( result ) . to . equal ( mockAnthropicResponse . content [ 0 ] . text ) ;
63+ expect ( mockFetch . callCount ) . to . be . equal ( 1 ) ;
64+ } ) ;
4365 } ) ;
4466
45- it ( 'should execute async function that returns object' , async ( ) => {
46- // Create an async function that returns an object
47- const asyncObjectFunction = async ( ) => {
48- return { status : 'success' , data : [ 1 , 2 , 3 ] } ;
49- } ;
67+ describe ( 'Edge Case Tests' , ( ) => {
68+ it ( 'should throw error when input tokens exceed maximum limit' , async ( ) => {
69+ // Arrange
70+ const longText = 'a' . repeat ( 500000 ) ; // Very long text to exceed token limit
71+ const conversationHistory = [
72+ { role : 'user' , content : longText }
73+ ] ;
5074
51- // Execute the function
52- const result = await llm . resilientOperation . execute ( asyncObjectFunction ) ;
75+ // Act & Assert
76+ await expect ( resilientLLM . chat ( conversationHistory ) )
77+ . to . be . rejectedWith ( 'Input tokens exceed the maximum limit of 100000' ) ;
78+ expect ( mockFetch . callCount ) . to . be . equal ( 0 ) ;
79+ } ) ;
5380
54- // Verify the result
55- expect ( result ) . to . deep . equal ( { status : 'success' , data : [ 1 , 2 , 3 ] } ) ;
56- expect ( result . status ) . to . equal ( 'success' ) ;
57- expect ( result . data ) . to . have . length ( 3 ) ;
58- } ) ;
81+ it ( 'should retry with alternate service when primary service returns rate limit error' , async ( ) => {
82+ // Arrange
83+ const conversationHistory = [
84+ { role : 'user' , content : 'Test message' }
85+ ] ;
5986
60- it ( 'should execute async function with delay' , async ( ) => {
61- // Create an async function with a small delay
62- const asyncDelayFunction = async ( ) => {
63- await new Promise ( resolve => setTimeout ( resolve , 10 ) ) ;
64- return 'Completed after delay' ;
65- } ;
87+ // Update fetch to return rate limit error
88+ mockFetch . resolves ( {
89+ json : ( ) => Promise . resolve ( { error : { message : 'Rate limit exceeded' } } ) ,
90+ status : 429
91+ } ) ;
92+
93+ // Mock the retry method to return success
94+ sinon . stub ( resilientLLM , 'retryChatWithAlternateService' ) . resolves ( mockAnthropicResponse . content [ 0 ] . text ) ;
6695
67- // Execute the function
68- const result = await llm . resilientOperation . execute ( asyncDelayFunction ) ;
96+ // Act
97+ const result = await resilientLLM . chat ( conversationHistory ) ;
6998
70- // Verify the result
71- expect ( result ) . to . equal ( 'Completed after delay' ) ;
99+ // Assert
100+ expect ( result ) . to . equal ( mockAnthropicResponse . content [ 0 ] . text ) ;
101+ expect ( resilientLLM . retryChatWithAlternateService . calledOnce ) . to . be . true ;
102+ expect ( mockFetch . callCount ) . to . be . equal ( 1 ) ;
103+ } ) ;
72104 } ) ;
73105} ) ;
0 commit comments