|
| 1 | +import { describe, expect, it, vi } from 'vitest'; |
| 2 | + |
| 3 | +import { DEFAULT_OPENAI_BASE_URL, PROVIDER_OPENAI } from './constants.js'; |
| 4 | +import { createOpenAIProvider } from './openai.js'; |
| 5 | +import { createProvider } from './index.js'; |
| 6 | + |
| 7 | +const buildFetchMock = (responsePayload, options = {}) => { |
| 8 | + const ok = options.ok ?? true; |
| 9 | + return vi.fn(async () => ({ |
| 10 | + ok, |
| 11 | + status: options.status ?? (ok ? 200 : 400), |
| 12 | + async json() { |
| 13 | + return responsePayload; |
| 14 | + } |
| 15 | + })); |
| 16 | +}; |
| 17 | + |
| 18 | +describe('OpenAI provider', () => { |
| 19 | + it('requires an API key', () => { |
| 20 | + expect(() => createOpenAIProvider()).toThrow(/api key/i); |
| 21 | + }); |
| 22 | + |
| 23 | + it('requires a fetch implementation when global fetch is unavailable', () => { |
| 24 | + const originalFetch = globalThis.fetch; |
| 25 | + try { |
| 26 | + // eslint-disable-next-line no-global-assign |
| 27 | + globalThis.fetch = undefined; |
| 28 | + expect(() => createOpenAIProvider({ apiKey: 'test-key' })).toThrow(/fetch/i); |
| 29 | + } finally { |
| 30 | + // eslint-disable-next-line no-global-assign |
| 31 | + globalThis.fetch = originalFetch; |
| 32 | + } |
| 33 | + }); |
| 34 | + |
| 35 | + it('calls the chat completions endpoint with provided payload', async () => { |
| 36 | + const fetch = buildFetchMock({ |
| 37 | + id: 'chatcmpl-123', |
| 38 | + model: 'gpt-test', |
| 39 | + choices: [ |
| 40 | + { |
| 41 | + index: 0, |
| 42 | + message: { role: 'assistant', content: 'Hello world' } |
| 43 | + } |
| 44 | + ], |
| 45 | + usage: { total_tokens: 10 } |
| 46 | + }); |
| 47 | + |
| 48 | + const provider = createOpenAIProvider({ |
| 49 | + apiKey: 'key', |
| 50 | + fetch |
| 51 | + }); |
| 52 | + |
| 53 | + const result = await provider.chatCompletion({ |
| 54 | + messages: [{ role: 'user', content: 'Hi' }], |
| 55 | + model: 'gpt-test', |
| 56 | + temperature: 0.2, |
| 57 | + maxTokens: 100 |
| 58 | + }); |
| 59 | + |
| 60 | + expect(fetch).toHaveBeenCalledTimes(1); |
| 61 | + const [url, request] = fetch.mock.calls[0]; |
| 62 | + expect(url).toBe(`${DEFAULT_OPENAI_BASE_URL}/chat/completions`); |
| 63 | + const body = JSON.parse(request.body); |
| 64 | + expect(body.model).toBe('gpt-test'); |
| 65 | + expect(body.temperature).toBe(0.2); |
| 66 | + expect(body.max_tokens).toBe(100); |
| 67 | + |
| 68 | + expect(result.content).toBe('Hello world'); |
| 69 | + expect(result.model).toBe('gpt-test'); |
| 70 | + expect(result.usage).toEqual({ total_tokens: 10 }); |
| 71 | + }); |
| 72 | + |
| 73 | + it('surfaces API errors with response payload details', async () => { |
| 74 | + const errorPayload = { |
| 75 | + error: { |
| 76 | + message: 'Invalid request' |
| 77 | + } |
| 78 | + }; |
| 79 | + const fetch = buildFetchMock(errorPayload, { ok: false, status: 400 }); |
| 80 | + |
| 81 | + const provider = createOpenAIProvider({ |
| 82 | + apiKey: 'key', |
| 83 | + fetch |
| 84 | + }); |
| 85 | + |
| 86 | + await expect( |
| 87 | + provider.chatCompletion({ messages: [{ role: 'user', content: 'hi' }] }) |
| 88 | + ).rejects.toMatchObject({ |
| 89 | + message: 'Invalid request', |
| 90 | + status: 400, |
| 91 | + payload: errorPayload |
| 92 | + }); |
| 93 | + }); |
| 94 | +}); |
| 95 | + |
| 96 | +describe('providers registry', () => { |
| 97 | + it('creates providers via registry defaults', () => { |
| 98 | + const fetch = buildFetchMock({ |
| 99 | + choices: [{ message: { content: 'ok' } }] |
| 100 | + }); |
| 101 | + |
| 102 | + const provider = createProvider({ |
| 103 | + apiKey: 'key', |
| 104 | + fetch |
| 105 | + }); |
| 106 | + |
| 107 | + expect(provider.id).toBe(PROVIDER_OPENAI); |
| 108 | + }); |
| 109 | + |
| 110 | + it('throws for unknown providers', () => { |
| 111 | + expect(() => |
| 112 | + createProvider({ provider: 'unknown', apiKey: 'key' }) |
| 113 | + ).toThrow(/unknown provider/i); |
| 114 | + }); |
| 115 | +}); |
0 commit comments