diff --git a/src/api/providers/fetchers/__tests__/ollama.test.ts b/src/api/providers/fetchers/__tests__/ollama.test.ts index cada0a4b60..bf1bf3c6b2 100644 --- a/src/api/providers/fetchers/__tests__/ollama.test.ts +++ b/src/api/providers/fetchers/__tests__/ollama.test.ts @@ -31,6 +31,31 @@ describe("Ollama Fetcher", () => { description: "Family: qwen3, Context: 40960, Size: 32.8B", }) }) + + it("should handle models with null families field", () => { + const modelDataWithNullFamilies = { + ...ollamaModelsData["qwen3-2to16:latest"], + details: { + ...ollamaModelsData["qwen3-2to16:latest"].details, + families: null, + }, + } + + const parsedModel = parseOllamaModel(modelDataWithNullFamilies as any) + + expect(parsedModel).toEqual({ + maxTokens: 40960, + contextWindow: 40960, + supportsImages: false, + supportsComputerUse: false, + supportsPromptCache: true, + inputPrice: 0, + outputPrice: 0, + cacheWritesPrice: 0, + cacheReadsPrice: 0, + description: "Family: qwen3, Context: 40960, Size: 32.8B", + }) + }) }) describe("getOllamaModels", () => { @@ -129,5 +154,69 @@ describe("Ollama Fetcher", () => { consoleInfoSpy.mockRestore() // Restore original console.info }) + + it("should handle models with null families field in API response", async () => { + const baseUrl = "http://localhost:11434" + const modelName = "test-model:latest" + + const mockApiTagsResponse = { + models: [ + { + name: modelName, + model: modelName, + modified_at: "2025-06-03T09:23:22.610222878-04:00", + size: 14333928010, + digest: "6a5f0c01d2c96c687d79e32fdd25b87087feb376bf9838f854d10be8cf3c10a5", + details: { + family: "llama", + families: null, // This is the case we're testing + format: "gguf", + parameter_size: "23.6B", + parent_model: "", + quantization_level: "Q4_K_M", + }, + }, + ], + } + const mockApiShowResponse = { + license: "Mock License", + modelfile: "FROM /path/to/blob\nTEMPLATE {{ .Prompt }}", + parameters: "num_ctx 4096\nstop_token ", + template: "{{ .System }}USER: {{ .Prompt }}ASSISTANT:", + modified_at: "2025-06-03T09:23:22.610222878-04:00", + details: { + parent_model: "", + format: "gguf", + family: "llama", + families: null, // This is the case we're testing + parameter_size: "23.6B", + quantization_level: "Q4_K_M", + }, + model_info: { + "ollama.context_length": 4096, + "some.other.info": "value", + }, + capabilities: ["completion"], + } + + mockedAxios.get.mockResolvedValueOnce({ data: mockApiTagsResponse }) + mockedAxios.post.mockResolvedValueOnce({ data: mockApiShowResponse }) + + const result = await getOllamaModels(baseUrl) + + expect(mockedAxios.get).toHaveBeenCalledTimes(1) + expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`) + + expect(mockedAxios.post).toHaveBeenCalledTimes(1) + expect(mockedAxios.post).toHaveBeenCalledWith(`${baseUrl}/api/show`, { model: modelName }) + + expect(typeof result).toBe("object") + expect(result).not.toBeInstanceOf(Array) + expect(Object.keys(result).length).toBe(1) + expect(result[modelName]).toBeDefined() + + // Verify the model was parsed correctly despite null families + expect(result[modelName].description).toBe("Family: llama, Context: 4096, Size: 23.6B") + }) }) }) diff --git a/src/api/providers/fetchers/ollama.ts b/src/api/providers/fetchers/ollama.ts index 8de2c1a918..8e1e3f7f07 100644 --- a/src/api/providers/fetchers/ollama.ts +++ b/src/api/providers/fetchers/ollama.ts @@ -4,26 +4,26 @@ import { z } from "zod" const OllamaModelDetailsSchema = z.object({ family: z.string(), - families: z.array(z.string()), - format: z.string(), + families: z.array(z.string()).nullable().optional(), + format: z.string().optional(), parameter_size: z.string(), - parent_model: z.string(), - quantization_level: z.string(), + parent_model: z.string().optional(), + quantization_level: z.string().optional(), }) const OllamaModelSchema = z.object({ details: OllamaModelDetailsSchema, - digest: z.string(), + digest: z.string().optional(), model: z.string(), - modified_at: z.string(), + modified_at: z.string().optional(), name: z.string(), - size: z.number(), + size: z.number().optional(), }) const OllamaModelInfoResponseSchema = z.object({ - modelfile: z.string(), - parameters: z.string(), - template: z.string(), + modelfile: z.string().optional(), + parameters: z.string().optional(), + template: z.string().optional(), details: OllamaModelDetailsSchema, model_info: z.record(z.string(), z.any()), capabilities: z.array(z.string()).optional(),