|
| 1 | +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest' |
| 2 | +import OpenAI from 'openai' |
| 3 | +import { OpenAIModel } from '../openai' |
| 4 | +import type { Message } from '../../types/messages' |
| 5 | + |
| 6 | +// Mock the OpenAI SDK |
| 7 | +vi.mock('openai', () => { |
| 8 | + const mockConstructor = vi.fn().mockImplementation(() => ({})) |
| 9 | + return { |
| 10 | + default: mockConstructor, |
| 11 | + } |
| 12 | +}) |
| 13 | + |
| 14 | +describe('OpenAIModel', () => { |
| 15 | + beforeEach(() => { |
| 16 | + vi.clearAllMocks() |
| 17 | + // Set default env var for most tests using Vitest's stubEnv |
| 18 | + vi.stubEnv('OPENAI_API_KEY', 'sk-test-env') |
| 19 | + }) |
| 20 | + |
| 21 | + afterEach(() => { |
| 22 | + vi.clearAllMocks() |
| 23 | + // Restore all environment variables to their original state |
| 24 | + vi.unstubAllEnvs() |
| 25 | + }) |
| 26 | + |
| 27 | + describe('constructor', () => { |
| 28 | + it('creates an instance with required modelId', () => { |
| 29 | + const provider = new OpenAIModel({ modelId: 'gpt-4o', apiKey: 'sk-test' }) |
| 30 | + const config = provider.getConfig() |
| 31 | + expect(config.modelId).toBe('gpt-4o') |
| 32 | + }) |
| 33 | + |
| 34 | + it('uses custom model ID', () => { |
| 35 | + const customModelId = 'gpt-3.5-turbo' |
| 36 | + const provider = new OpenAIModel({ modelId: customModelId, apiKey: 'sk-test' }) |
| 37 | + expect(provider.getConfig()).toStrictEqual({ |
| 38 | + modelId: customModelId, |
| 39 | + }) |
| 40 | + }) |
| 41 | + |
| 42 | + it('uses API key from constructor parameter', () => { |
| 43 | + const apiKey = 'sk-explicit' |
| 44 | + new OpenAIModel({ modelId: 'gpt-4o', apiKey }) |
| 45 | + expect(OpenAI).toHaveBeenCalledWith( |
| 46 | + expect.objectContaining({ |
| 47 | + apiKey: apiKey, |
| 48 | + }) |
| 49 | + ) |
| 50 | + }) |
| 51 | + |
| 52 | + it('uses API key from environment variable', () => { |
| 53 | + vi.stubEnv('OPENAI_API_KEY', 'sk-from-env') |
| 54 | + new OpenAIModel({ modelId: 'gpt-4o' }) |
| 55 | + // OpenAI client should be called without explicit apiKey (uses env var internally) |
| 56 | + expect(OpenAI).toHaveBeenCalled() |
| 57 | + }) |
| 58 | + |
| 59 | + it('explicit API key takes precedence over environment variable', () => { |
| 60 | + vi.stubEnv('OPENAI_API_KEY', 'sk-from-env') |
| 61 | + const explicitKey = 'sk-explicit' |
| 62 | + new OpenAIModel({ modelId: 'gpt-4o', apiKey: explicitKey }) |
| 63 | + expect(OpenAI).toHaveBeenCalledWith( |
| 64 | + expect.objectContaining({ |
| 65 | + apiKey: explicitKey, |
| 66 | + }) |
| 67 | + ) |
| 68 | + }) |
| 69 | + |
| 70 | + it('throws error when no API key is available', () => { |
| 71 | + vi.stubEnv('OPENAI_API_KEY', '') |
| 72 | + expect(() => new OpenAIModel({ modelId: 'gpt-4o' })).toThrow( |
| 73 | + "OpenAI API key is required. Provide it via the 'apiKey' option or set the OPENAI_API_KEY environment variable." |
| 74 | + ) |
| 75 | + }) |
| 76 | + |
| 77 | + it('uses custom client configuration', () => { |
| 78 | + const timeout = 30000 |
| 79 | + new OpenAIModel({ modelId: 'gpt-4o', apiKey: 'sk-test', clientConfig: { timeout } }) |
| 80 | + expect(OpenAI).toHaveBeenCalledWith( |
| 81 | + expect.objectContaining({ |
| 82 | + timeout: timeout, |
| 83 | + }) |
| 84 | + ) |
| 85 | + }) |
| 86 | + |
| 87 | + it('uses provided client instance', () => { |
| 88 | + vi.clearAllMocks() |
| 89 | + const mockClient = {} as OpenAI |
| 90 | + const provider = new OpenAIModel({ modelId: 'gpt-4o', client: mockClient }) |
| 91 | + // Should not create a new OpenAI client |
| 92 | + expect(OpenAI).not.toHaveBeenCalled() |
| 93 | + expect(provider).toBeDefined() |
| 94 | + }) |
| 95 | + |
| 96 | + it('provided client takes precedence over apiKey and clientConfig', () => { |
| 97 | + vi.clearAllMocks() |
| 98 | + const mockClient = {} as OpenAI |
| 99 | + new OpenAIModel({ |
| 100 | + modelId: 'gpt-4o', |
| 101 | + apiKey: 'sk-test', |
| 102 | + client: mockClient, |
| 103 | + clientConfig: { timeout: 30000 }, |
| 104 | + }) |
| 105 | + // Should not create a new OpenAI client when client is provided |
| 106 | + expect(OpenAI).not.toHaveBeenCalled() |
| 107 | + }) |
| 108 | + |
| 109 | + it('does not require API key when client is provided', () => { |
| 110 | + vi.clearAllMocks() |
| 111 | + vi.stubEnv('OPENAI_API_KEY', '') |
| 112 | + const mockClient = {} as OpenAI |
| 113 | + expect(() => new OpenAIModel({ modelId: 'gpt-4o', client: mockClient })).not.toThrow() |
| 114 | + }) |
| 115 | + }) |
| 116 | + |
| 117 | + describe('updateConfig', () => { |
| 118 | + it('merges new config with existing config', () => { |
| 119 | + const provider = new OpenAIModel({ modelId: 'gpt-4o', apiKey: 'sk-test', temperature: 0.5 }) |
| 120 | + provider.updateConfig({ modelId: 'gpt-4o', temperature: 0.8, maxTokens: 2048 }) |
| 121 | + expect(provider.getConfig()).toStrictEqual({ |
| 122 | + modelId: 'gpt-4o', |
| 123 | + temperature: 0.8, |
| 124 | + maxTokens: 2048, |
| 125 | + }) |
| 126 | + }) |
| 127 | + |
| 128 | + it('preserves fields not included in the update', () => { |
| 129 | + const provider = new OpenAIModel({ |
| 130 | + apiKey: 'sk-test', |
| 131 | + modelId: 'gpt-3.5-turbo', |
| 132 | + temperature: 0.5, |
| 133 | + maxTokens: 1024, |
| 134 | + }) |
| 135 | + provider.updateConfig({ modelId: 'gpt-3.5-turbo', temperature: 0.8 }) |
| 136 | + expect(provider.getConfig()).toStrictEqual({ |
| 137 | + modelId: 'gpt-3.5-turbo', |
| 138 | + temperature: 0.8, |
| 139 | + maxTokens: 1024, |
| 140 | + }) |
| 141 | + }) |
| 142 | + }) |
| 143 | + |
| 144 | + describe('getConfig', () => { |
| 145 | + it('returns the current configuration', () => { |
| 146 | + const provider = new OpenAIModel({ |
| 147 | + modelId: 'gpt-4o', |
| 148 | + apiKey: 'sk-test', |
| 149 | + maxTokens: 1024, |
| 150 | + temperature: 0.7, |
| 151 | + }) |
| 152 | + expect(provider.getConfig()).toStrictEqual({ |
| 153 | + modelId: 'gpt-4o', |
| 154 | + maxTokens: 1024, |
| 155 | + temperature: 0.7, |
| 156 | + }) |
| 157 | + }) |
| 158 | + }) |
| 159 | + |
| 160 | + describe('stream', () => { |
| 161 | + it('throws not yet implemented error', async () => { |
| 162 | + const provider = new OpenAIModel({ modelId: 'gpt-4o' }) |
| 163 | + const messages: Message[] = [{ role: 'user', content: [{ type: 'textBlock', text: 'Hello' }] }] |
| 164 | + |
| 165 | + await expect(async () => { |
| 166 | + // Try to consume the async generator |
| 167 | + // eslint-disable-next-line @typescript-eslint/no-unused-vars |
| 168 | + for await (const _event of provider.stream(messages)) { |
| 169 | + // Should not reach here |
| 170 | + } |
| 171 | + }).rejects.toThrow('Not yet implemented - will be completed in Task 04.2') |
| 172 | + }) |
| 173 | + }) |
| 174 | +}) |
0 commit comments