Skip to content

Commit 507db87

Browse files
committed
feat: add AI provider abstraction with OpenAI support
1 parent 4a9e6ed commit 507db87

File tree

7 files changed

+282
-5
lines changed

7 files changed

+282
-5
lines changed

TODO.md

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,11 @@
2727
-**Day 8:** Assemble AI feature backlog with user stories; prioritise RAG, eval, fine-tuning governance; publish roadmap entries.
2828
-**Day 9:** Implement `frai rag index` command, vector store abstraction, and RAG helper modules; cover with CLI tests.
2929
-**Day 10:** Scaffold `frai eval` baseline metrics leveraging open-source models; ensure configurable thresholds and reporting.
30-
- **Day 11:** Draft fine-tuning governance spec (dataset audit, training hooks, bias evals); prototype config schema.
31-
- **Day 12:** Develop VS Code MCP endpoint + extension scaffold reading data from `frai-core`; document protocol usage.
32-
- **Day 13:** Author problem-solution playbooks (RAG compliance, LLM agent docs) in `/docs`; tie features to user pain points.
33-
- **Day 14:** Add opt-in telemetry, gather KPIs (usage of new commands/tests), and produce sprint retrospective summary.
30+
-**Day 11:** Implement AI provider abstraction (`frai-core/src/providers`) supporting OpenAI + future adapters with comprehensive test coverage.
31+
- **Day 12:** Draft fine-tuning governance spec (dataset audit, training hooks, bias evals); prototype config schema.
32+
- **Day 13:** Develop VS Code MCP endpoint + extension scaffold reading data from `frai-core`; document protocol usage.
33+
- **Day 14:** Author problem-solution playbooks (RAG compliance, LLM agent docs) in `/docs`; tie features to user pain points.
34+
- **Day 15:** Add opt-in telemetry, gather KPIs (usage of new commands/tests), and produce sprint retrospective summary.
3435

3536
## Workstreams (Weeks 3-6)
3637

docs/refactor-plan.md

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,12 @@
1717
- ✅ Extract questionnaire prompts, answer mapping, and validation into `frai-core/src/questionnaire`.
1818
- ✅ Relocate document templates/builders into `frai-core/src/documents` with override support.
1919
- ✅ Refactor scanning heuristics into `frai-core/src/scanners` with plugin registry pattern.
20-
- [ ] Implement AI provider abstraction (`frai-core/src/providers`) supporting OpenAI + future adapters.
20+
- ✅ Implement AI provider abstraction (`frai-core/src/providers`) supporting OpenAI + future adapters with comprehensive test coverage.
21+
- Provider registry system with factory pattern
22+
- OpenAI provider with chat completion API
23+
- Error handling and response parsing
24+
- Full test coverage with mocked fetch
25+
- Export integration in main index.js
2126

2227
### 3. AI Feature Delivery
2328
- ✅ Assemble AI feature backlog (Day 8) with prioritised user stories (RAG, eval, fine-tune).

packages/frai-core/src/index.js

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,4 @@ export * as Scanners from './scanners/index.js';
55
export * as Rag from './rag/index.js';
66

77
export * as Eval from './eval/index.js';
8+
export * as Providers from './providers/index.js';
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
export const PROVIDER_OPENAI = 'openai';
2+
3+
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1';
4+
export const DEFAULT_OPENAI_CHAT_MODEL = 'gpt-4.1-nano-2025-04-14';
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
import { PROVIDER_OPENAI } from './constants.js';
2+
import { createOpenAIProvider } from './openai.js';
3+
4+
const registry = new Map([[PROVIDER_OPENAI, createOpenAIProvider]]);
5+
6+
export function registerProvider(id, factory) {
7+
if (!id || typeof id !== 'string') {
8+
throw new Error('Provider id must be a non-empty string');
9+
}
10+
if (typeof factory !== 'function') {
11+
throw new Error('Provider factory must be a function');
12+
}
13+
registry.set(id, factory);
14+
}
15+
16+
export function createProvider({ provider = PROVIDER_OPENAI, ...options } = {}) {
17+
const factory = registry.get(provider);
18+
if (!factory) {
19+
throw new Error(`Unknown provider "${provider}"`);
20+
}
21+
return factory(options);
22+
}
23+
24+
export { PROVIDER_OPENAI, createOpenAIProvider };
Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
import {
2+
DEFAULT_OPENAI_BASE_URL,
3+
DEFAULT_OPENAI_CHAT_MODEL,
4+
PROVIDER_OPENAI
5+
} from './constants.js';
6+
7+
const ensureFetch = (providedFetch) => {
8+
if (typeof providedFetch === 'function') {
9+
return providedFetch;
10+
}
11+
if (typeof globalThis !== 'undefined' && typeof globalThis.fetch === 'function') {
12+
return globalThis.fetch.bind(globalThis);
13+
}
14+
return null;
15+
};
16+
17+
const buildHeaders = ({ apiKey, organization }) => {
18+
const headers = {
19+
'Content-Type': 'application/json',
20+
Authorization: `Bearer ${apiKey}`
21+
};
22+
if (organization) {
23+
headers['OpenAI-Organization'] = organization;
24+
}
25+
return headers;
26+
};
27+
28+
const buildChatPayload = ({
29+
messages,
30+
model = DEFAULT_OPENAI_CHAT_MODEL,
31+
temperature,
32+
maxTokens,
33+
responseFormat,
34+
seed
35+
}) => {
36+
if (!Array.isArray(messages) || messages.length === 0) {
37+
throw new Error('messages array is required for chat completion');
38+
}
39+
40+
const payload = {
41+
model,
42+
messages
43+
};
44+
45+
if (typeof temperature === 'number') {
46+
payload.temperature = temperature;
47+
}
48+
49+
if (typeof maxTokens === 'number') {
50+
payload.max_tokens = maxTokens;
51+
}
52+
53+
if (responseFormat) {
54+
payload.response_format = responseFormat;
55+
}
56+
57+
if (typeof seed === 'number') {
58+
payload.seed = seed;
59+
}
60+
61+
return payload;
62+
};
63+
64+
const parseChatResponse = async (response) => {
65+
const json = await response.json();
66+
const choice = json?.choices?.[0];
67+
const content = choice?.message?.content ?? '';
68+
69+
return {
70+
content,
71+
model: json.model ?? null,
72+
usage: json.usage ?? null,
73+
raw: json
74+
};
75+
};
76+
77+
const parseErrorResponse = async (response) => {
78+
try {
79+
const payload = await response.json();
80+
const message = payload?.error?.message ?? `OpenAI request failed with status ${response.status}`;
81+
const error = new Error(message);
82+
error.status = response.status;
83+
error.payload = payload;
84+
return error;
85+
} catch (error) {
86+
const generic = new Error(`OpenAI request failed with status ${response.status}`);
87+
generic.status = response.status;
88+
return generic;
89+
}
90+
};
91+
92+
export function createOpenAIProvider({
93+
apiKey,
94+
baseUrl = DEFAULT_OPENAI_BASE_URL,
95+
organization,
96+
fetch: providedFetch
97+
} = {}) {
98+
if (!apiKey) {
99+
throw new Error('OpenAI provider requires an API key');
100+
}
101+
102+
const fetchFn = ensureFetch(providedFetch);
103+
if (!fetchFn) {
104+
throw new Error('OpenAI provider requires a fetch implementation');
105+
}
106+
107+
const headers = buildHeaders({ apiKey, organization });
108+
109+
return {
110+
id: PROVIDER_OPENAI,
111+
async chatCompletion(options) {
112+
const payload = buildChatPayload(options ?? {});
113+
const response = await fetchFn(`${baseUrl}/chat/completions`, {
114+
method: 'POST',
115+
headers,
116+
body: JSON.stringify(payload),
117+
signal: options?.signal
118+
});
119+
120+
if (!response.ok) {
121+
throw await parseErrorResponse(response);
122+
}
123+
124+
return parseChatResponse(response);
125+
}
126+
};
127+
}
Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
import { describe, expect, it, vi } from 'vitest';
2+
3+
import { DEFAULT_OPENAI_BASE_URL, PROVIDER_OPENAI } from './constants.js';
4+
import { createOpenAIProvider } from './openai.js';
5+
import { createProvider } from './index.js';
6+
7+
const buildFetchMock = (responsePayload, options = {}) => {
8+
const ok = options.ok ?? true;
9+
return vi.fn(async () => ({
10+
ok,
11+
status: options.status ?? (ok ? 200 : 400),
12+
async json() {
13+
return responsePayload;
14+
}
15+
}));
16+
};
17+
18+
describe('OpenAI provider', () => {
19+
it('requires an API key', () => {
20+
expect(() => createOpenAIProvider()).toThrow(/api key/i);
21+
});
22+
23+
it('requires a fetch implementation when global fetch is unavailable', () => {
24+
const originalFetch = globalThis.fetch;
25+
try {
26+
// eslint-disable-next-line no-global-assign
27+
globalThis.fetch = undefined;
28+
expect(() => createOpenAIProvider({ apiKey: 'test-key' })).toThrow(/fetch/i);
29+
} finally {
30+
// eslint-disable-next-line no-global-assign
31+
globalThis.fetch = originalFetch;
32+
}
33+
});
34+
35+
it('calls the chat completions endpoint with provided payload', async () => {
36+
const fetch = buildFetchMock({
37+
id: 'chatcmpl-123',
38+
model: 'gpt-test',
39+
choices: [
40+
{
41+
index: 0,
42+
message: { role: 'assistant', content: 'Hello world' }
43+
}
44+
],
45+
usage: { total_tokens: 10 }
46+
});
47+
48+
const provider = createOpenAIProvider({
49+
apiKey: 'key',
50+
fetch
51+
});
52+
53+
const result = await provider.chatCompletion({
54+
messages: [{ role: 'user', content: 'Hi' }],
55+
model: 'gpt-test',
56+
temperature: 0.2,
57+
maxTokens: 100
58+
});
59+
60+
expect(fetch).toHaveBeenCalledTimes(1);
61+
const [url, request] = fetch.mock.calls[0];
62+
expect(url).toBe(`${DEFAULT_OPENAI_BASE_URL}/chat/completions`);
63+
const body = JSON.parse(request.body);
64+
expect(body.model).toBe('gpt-test');
65+
expect(body.temperature).toBe(0.2);
66+
expect(body.max_tokens).toBe(100);
67+
68+
expect(result.content).toBe('Hello world');
69+
expect(result.model).toBe('gpt-test');
70+
expect(result.usage).toEqual({ total_tokens: 10 });
71+
});
72+
73+
it('surfaces API errors with response payload details', async () => {
74+
const errorPayload = {
75+
error: {
76+
message: 'Invalid request'
77+
}
78+
};
79+
const fetch = buildFetchMock(errorPayload, { ok: false, status: 400 });
80+
81+
const provider = createOpenAIProvider({
82+
apiKey: 'key',
83+
fetch
84+
});
85+
86+
await expect(
87+
provider.chatCompletion({ messages: [{ role: 'user', content: 'hi' }] })
88+
).rejects.toMatchObject({
89+
message: 'Invalid request',
90+
status: 400,
91+
payload: errorPayload
92+
});
93+
});
94+
});
95+
96+
describe('providers registry', () => {
97+
it('creates providers via registry defaults', () => {
98+
const fetch = buildFetchMock({
99+
choices: [{ message: { content: 'ok' } }]
100+
});
101+
102+
const provider = createProvider({
103+
apiKey: 'key',
104+
fetch
105+
});
106+
107+
expect(provider.id).toBe(PROVIDER_OPENAI);
108+
});
109+
110+
it('throws for unknown providers', () => {
111+
expect(() =>
112+
createProvider({ provider: 'unknown', apiKey: 'key' })
113+
).toThrow(/unknown provider/i);
114+
});
115+
});

0 commit comments

Comments
 (0)