1+ import { ChutesHandler } from "../chutes" // Import ChutesHandler
2+ // TODO: Update imports for Chutes once defined in shared/api.ts
3+ import { ChutesModelId , chutesDefaultModelId , chutesModels } from "../../../shared/api"
4+ import OpenAI from "openai"
5+ import { Anthropic } from "@anthropic-ai/sdk"
6+
7+ // Mock OpenAI client
8+ jest . mock ( "openai" , ( ) => {
9+ const createMock = jest . fn ( )
10+ return jest . fn ( ( ) => ( {
11+ chat : {
12+ completions : {
13+ create : createMock ,
14+ } ,
15+ } ,
16+ } ) )
17+ } )
18+
19+ // Test suite for ChutesHandler
20+ describe ( "ChutesHandler" , ( ) => {
21+ let handler : ChutesHandler // Use ChutesHandler type
22+ let mockCreate : jest . Mock
23+
24+ beforeEach ( ( ) => {
25+ // Reset all mocks
26+ jest . clearAllMocks ( )
27+
28+ // Get the mock create function
29+ mockCreate = ( OpenAI as unknown as jest . Mock ) ( ) . chat . completions . create
30+
31+ // Create handler with mock
32+ handler = new ChutesHandler ( { } ) // Instantiate ChutesHandler
33+ } )
34+
35+ test ( "should use the correct Chutes base URL" , ( ) => {
36+ // Instantiate handler inside the test to ensure clean state for this check
37+ new ChutesHandler ( { } )
38+ expect ( OpenAI ) . toHaveBeenCalledWith (
39+ expect . objectContaining ( {
40+ baseURL : "https://llm.chutes.ai/v1" , // Verify Chutes base URL
41+ } ) ,
42+ )
43+ } )
44+
45+ test ( "should use the provided API key" , ( ) => {
46+ // Clear mocks before this specific test
47+ jest . clearAllMocks ( )
48+
49+ // Create a handler with our API key
50+ const chutesApiKey = "test-chutes-api-key" // Use chutesApiKey
51+ new ChutesHandler ( { chutesApiKey } ) // Instantiate ChutesHandler
52+
53+ // Verify the OpenAI constructor was called with our API key
54+ expect ( OpenAI ) . toHaveBeenCalledWith (
55+ expect . objectContaining ( {
56+ apiKey : chutesApiKey ,
57+ } ) ,
58+ )
59+ } )
60+
61+ test ( "should return default model when no model is specified" , ( ) => {
62+ const model = handler . getModel ( )
63+ expect ( model . id ) . toBe ( chutesDefaultModelId ) // Use chutesDefaultModelId
64+ expect ( model . info ) . toEqual ( chutesModels [ chutesDefaultModelId ] ) // Use chutesModels
65+ } )
66+
67+ test ( "should return specified model when valid model is provided" , ( ) => {
68+ // Using an actual model ID from the Chutes API response
69+ const testModelId : ChutesModelId = "Qwen/Qwen2.5-72B-Instruct"
70+ const handlerWithModel = new ChutesHandler ( { apiModelId : testModelId } ) // Instantiate ChutesHandler
71+ const model = handlerWithModel . getModel ( )
72+
73+ expect ( model . id ) . toBe ( testModelId )
74+ expect ( model . info ) . toEqual ( chutesModels [ testModelId ] ) // Use chutesModels
75+ } )
76+
77+ test ( "completePrompt method should return text from Chutes API" , async ( ) => {
78+ const expectedResponse = "This is a test response from Chutes"
79+
80+ mockCreate . mockResolvedValueOnce ( {
81+ choices : [
82+ {
83+ message : {
84+ content : expectedResponse ,
85+ } ,
86+ } ,
87+ ] ,
88+ } )
89+
90+ const result = await handler . completePrompt ( "test prompt" )
91+ expect ( result ) . toBe ( expectedResponse )
92+ } )
93+
94+ test ( "should handle errors in completePrompt" , async ( ) => {
95+ const errorMessage = "Chutes API error"
96+ mockCreate . mockRejectedValueOnce ( new Error ( errorMessage ) )
97+
98+ await expect ( handler . completePrompt ( "test prompt" ) ) . rejects . toThrow ( `Chutes AI completion error: ${ errorMessage } ` ) // Updated error message prefix
99+ } )
100+
101+ test ( "createMessage should yield text content from stream" , async ( ) => {
102+ const testContent = "This is test content from Chutes stream"
103+
104+ // Setup mock for streaming response
105+ mockCreate . mockImplementationOnce ( ( ) => {
106+ return {
107+ [ Symbol . asyncIterator ] : ( ) => ( {
108+ next : jest
109+ . fn ( )
110+ . mockResolvedValueOnce ( {
111+ done : false ,
112+ value : {
113+ choices : [ { delta : { content : testContent } } ] ,
114+ } ,
115+ } )
116+ . mockResolvedValueOnce ( { done : true } ) ,
117+ } ) ,
118+ }
119+ } )
120+
121+ // Create and consume the stream
122+ const stream = handler . createMessage ( "system prompt" , [ ] )
123+ const firstChunk = await stream . next ( )
124+
125+ // Verify the content
126+ expect ( firstChunk . done ) . toBe ( false )
127+ expect ( firstChunk . value ) . toEqual ( {
128+ type : "text" ,
129+ text : testContent ,
130+ } )
131+ } )
132+
133+ test ( "createMessage should yield usage data from stream" , async ( ) => {
134+ // Setup mock for streaming response that includes usage data
135+ mockCreate . mockImplementationOnce ( ( ) => {
136+ return {
137+ [ Symbol . asyncIterator ] : ( ) => ( {
138+ next : jest
139+ . fn ( )
140+ . mockResolvedValueOnce ( {
141+ done : false ,
142+ value : {
143+ choices : [ { delta : { } } ] , // Needs to have choices array to avoid error
144+ usage : { // Assuming standard OpenAI usage fields
145+ prompt_tokens : 10 ,
146+ completion_tokens : 20 ,
147+ } ,
148+ } ,
149+ } )
150+ . mockResolvedValueOnce ( { done : true } ) ,
151+ } ) ,
152+ }
153+ } )
154+
155+ // Create and consume the stream
156+ const stream = handler . createMessage ( "system prompt" , [ ] )
157+ const firstChunk = await stream . next ( )
158+
159+ // Verify the usage data
160+ expect ( firstChunk . done ) . toBe ( false )
161+ expect ( firstChunk . value ) . toEqual ( { // Updated expected usage structure
162+ type : "usage" ,
163+ inputTokens : 10 ,
164+ outputTokens : 20 ,
165+ cacheReadTokens : 0 , // Assuming 0 for Chutes
166+ cacheWriteTokens : 0 , // Assuming 0 for Chutes
167+ } )
168+ } )
169+
170+ test ( "createMessage should pass correct parameters to Chutes client" , async ( ) => {
171+ // Setup a handler with specific model
172+ const modelId : ChutesModelId = "deepseek-ai/DeepSeek-R1" // Use an actual Chutes model ID and type
173+ const modelInfo = chutesModels [ modelId ] // Use chutesModels
174+ const handlerWithModel = new ChutesHandler ( { apiModelId : modelId } ) // Instantiate ChutesHandler
175+
176+ // Setup mock for streaming response
177+ mockCreate . mockImplementationOnce ( ( ) => {
178+ return {
179+ [ Symbol . asyncIterator ] : ( ) => ( {
180+ async next ( ) {
181+ return { done : true }
182+ } ,
183+ } ) ,
184+ }
185+ } )
186+
187+ // System prompt and messages
188+ const systemPrompt = "Test system prompt for Chutes"
189+ const messages : Anthropic . Messages . MessageParam [ ] = [ { role : "user" , content : "Test message for Chutes" } ]
190+
191+ // Start generating a message
192+ const messageGenerator = handlerWithModel . createMessage ( systemPrompt , messages )
193+ await messageGenerator . next ( ) // Start the generator
194+
195+ // Check that all parameters were passed correctly
196+ expect ( mockCreate ) . toHaveBeenCalledWith (
197+ expect . objectContaining ( {
198+ model : modelId ,
199+ max_tokens : modelInfo . maxTokens , // Assuming standard max_tokens
200+ temperature : 0.5 , // Using CHUTES_DEFAULT_TEMPERATURE
201+ messages : expect . arrayContaining ( [ { role : "system" , content : systemPrompt } ] ) ,
202+ stream : true ,
203+ stream_options : { include_usage : true } , // Assuming standard support
204+ } ) ,
205+ )
206+ } )
207+ } )
0 commit comments