11// npx jest src/api/providers/__tests__/glama.test.ts
22
33import { Anthropic } from "@anthropic-ai/sdk"
4- import axios from "axios"
54
65import { GlamaHandler } from "../glama"
76import { ApiHandlerOptions } from "../../../shared/api"
@@ -20,31 +19,18 @@ jest.mock("openai", () => {
2019 const stream = {
2120 [ Symbol . asyncIterator ] : async function * ( ) {
2221 yield {
23- choices : [
24- {
25- delta : { content : "Test response" } ,
26- index : 0 ,
27- } ,
28- ] ,
22+ choices : [ { delta : { content : "Test response" } , index : 0 } ] ,
2923 usage : null ,
3024 }
3125 yield {
32- choices : [
33- {
34- delta : { } ,
35- index : 0 ,
36- } ,
37- ] ,
38- usage : {
39- prompt_tokens : 10 ,
40- completion_tokens : 5 ,
41- total_tokens : 15 ,
42- } ,
26+ choices : [ { delta : { } , index : 0 } ] ,
27+ usage : { prompt_tokens : 10 , completion_tokens : 5 , total_tokens : 15 } ,
4328 }
4429 } ,
4530 }
4631
4732 const result = mockCreate ( ...args )
33+
4834 if ( args [ 0 ] . stream ) {
4935 mockWithResponse . mockReturnValue (
5036 Promise . resolve ( {
@@ -59,6 +45,7 @@ jest.mock("openai", () => {
5945 )
6046 result . withResponse = mockWithResponse
6147 }
48+
6249 return result
6350 } ,
6451 } ,
@@ -73,10 +60,10 @@ describe("GlamaHandler", () => {
7360
7461 beforeEach ( ( ) => {
7562 mockOptions = {
76- apiModelId : "anthropic/claude-3-7-sonnet" ,
77- glamaModelId : "anthropic/claude-3-7-sonnet" ,
7863 glamaApiKey : "test-api-key" ,
64+ glamaModelId : "anthropic/claude-3-7-sonnet" ,
7965 }
66+
8067 handler = new GlamaHandler ( mockOptions )
8168 mockCreate . mockClear ( )
8269 mockWithResponse . mockClear ( )
@@ -102,7 +89,7 @@ describe("GlamaHandler", () => {
10289 describe ( "constructor" , ( ) => {
10390 it ( "should initialize with provided options" , ( ) => {
10491 expect ( handler ) . toBeInstanceOf ( GlamaHandler )
105- expect ( handler . getModel ( ) . id ) . toBe ( mockOptions . apiModelId )
92+ expect ( handler . getModel ( ) . id ) . toBe ( mockOptions . glamaModelId )
10693 } )
10794 } )
10895
@@ -116,40 +103,15 @@ describe("GlamaHandler", () => {
116103 ]
117104
118105 it ( "should handle streaming responses" , async ( ) => {
119- // Mock axios for token usage request
120- const mockAxios = jest . spyOn ( axios , "get" ) . mockResolvedValueOnce ( {
121- data : {
122- tokenUsage : {
123- promptTokens : 10 ,
124- completionTokens : 5 ,
125- cacheCreationInputTokens : 0 ,
126- cacheReadInputTokens : 0 ,
127- } ,
128- totalCostUsd : "0.00" ,
129- } ,
130- } )
131-
132106 const stream = handler . createMessage ( systemPrompt , messages )
133107 const chunks : any [ ] = [ ]
108+
134109 for await ( const chunk of stream ) {
135110 chunks . push ( chunk )
136111 }
137112
138- expect ( chunks . length ) . toBe ( 2 ) // Text chunk and usage chunk
139- expect ( chunks [ 0 ] ) . toEqual ( {
140- type : "text" ,
141- text : "Test response" ,
142- } )
143- expect ( chunks [ 1 ] ) . toEqual ( {
144- type : "usage" ,
145- inputTokens : 10 ,
146- outputTokens : 5 ,
147- cacheWriteTokens : 0 ,
148- cacheReadTokens : 0 ,
149- totalCost : 0 ,
150- } )
151-
152- mockAxios . mockRestore ( )
113+ expect ( chunks . length ) . toBe ( 1 )
114+ expect ( chunks [ 0 ] ) . toEqual ( { type : "text" , text : "Test response" } )
153115 } )
154116
155117 it ( "should handle API errors" , async ( ) => {
@@ -178,7 +140,7 @@ describe("GlamaHandler", () => {
178140 expect ( result ) . toBe ( "Test response" )
179141 expect ( mockCreate ) . toHaveBeenCalledWith (
180142 expect . objectContaining ( {
181- model : mockOptions . apiModelId ,
143+ model : mockOptions . glamaModelId ,
182144 messages : [ { role : "user" , content : "Test prompt" } ] ,
183145 temperature : 0 ,
184146 max_tokens : 8192 ,
@@ -204,22 +166,16 @@ describe("GlamaHandler", () => {
204166 mockCreate . mockClear ( )
205167
206168 const nonAnthropicOptions = {
207- apiModelId : "openai/gpt-4" ,
208- glamaModelId : "openai/gpt-4" ,
209169 glamaApiKey : "test-key" ,
210- glamaModelInfo : {
211- maxTokens : 4096 ,
212- contextWindow : 8192 ,
213- supportsImages : true ,
214- supportsPromptCache : false ,
215- } ,
170+ glamaModelId : "openai/gpt-4o" ,
216171 }
172+
217173 const nonAnthropicHandler = new GlamaHandler ( nonAnthropicOptions )
218174
219175 await nonAnthropicHandler . completePrompt ( "Test prompt" )
220176 expect ( mockCreate ) . toHaveBeenCalledWith (
221177 expect . objectContaining ( {
222- model : "openai/gpt-4 " ,
178+ model : "openai/gpt-4o " ,
223179 messages : [ { role : "user" , content : "Test prompt" } ] ,
224180 temperature : 0 ,
225181 } ) ,
@@ -228,13 +184,20 @@ describe("GlamaHandler", () => {
228184 } )
229185 } )
230186
231- describe ( "getModel " , ( ) => {
232- it ( "should return model info" , ( ) => {
233- const modelInfo = handler . getModel ( )
234- expect ( modelInfo . id ) . toBe ( mockOptions . apiModelId )
187+ describe ( "fetchModel " , ( ) => {
188+ it ( "should return model info" , async ( ) => {
189+ const modelInfo = await handler . fetchModel ( )
190+ expect ( modelInfo . id ) . toBe ( mockOptions . glamaModelId )
235191 expect ( modelInfo . info ) . toBeDefined ( )
236192 expect ( modelInfo . info . maxTokens ) . toBe ( 8192 )
237193 expect ( modelInfo . info . contextWindow ) . toBe ( 200_000 )
238194 } )
195+
196+ it ( "should return default model when invalid model provided" , async ( ) => {
197+ const handlerWithInvalidModel = new GlamaHandler ( { ...mockOptions , glamaModelId : "invalid/model" } )
198+ const modelInfo = await handlerWithInvalidModel . fetchModel ( )
199+ expect ( modelInfo . id ) . toBe ( "anthropic/claude-3-7-sonnet" )
200+ expect ( modelInfo . info ) . toBeDefined ( )
201+ } )
239202 } )
240203} )
0 commit comments