Skip to content

Commit 700618d

Browse files
committed
stamp: cleaning & polishing
1 parent bb32436 commit 700618d

File tree

5 files changed

+23
-27
lines changed

5 files changed

+23
-27
lines changed

ext/ai/js/ai.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,6 @@ export class Session<T extends SessionType> {
8484
}
8585
}
8686

87-
// /** @param {string | object} prompt Either a String (ollama) or an OpenAI chat completion body object (openaicompatible): https://platform.openai.com/docs/api-reference/chat/create */
8887
async run<O extends SessionInputOptions<T>>(
8988
input: SessionInput<T>,
9089
options: O,
@@ -117,7 +116,6 @@ export class Session<T extends SessionType> {
117116
const normalize = opts.normalize ?? true;
118117

119118
const result = await core.ops.op_ai_run_model(
120-
// @ts-ignore
121119
this.#model,
122120
prompt,
123121
mean_pool,

ext/ai/js/llm/llm_session.ts

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,6 @@
11
import { OllamaLLMSession } from "./providers/ollama.ts";
22
import { OpenAILLMSession } from "./providers/openai.ts";
33

4-
// @ts-ignore deno_core environment
5-
const core = globalThis.Deno.core;
6-
74
export type LLMRunInput = {
85
/**
96
* Stream response from model. Applies only for LLMs like `mistral` (default: false)
@@ -45,8 +42,6 @@ export interface ILLMProviderOutput<T = object> {
4542
}
4643

4744
export interface ILLMProvider {
48-
// TODO:(kallebysantos) remove 'any'
49-
// TODO: (kallebysantos) standardised output format
5045
getStream(
5146
input: ILLMProviderInput,
5247
signal: AbortSignal,

ext/ai/js/llm/providers/ollama.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ export class OllamaLLMSession implements ILLMProvider, ILLMProviderMeta {
4545
true,
4646
) as AsyncGenerator<OllamaMessage>;
4747

48+
// NOTE:(kallebysantos) we need to clone the lambda parser to avoid `this` conflicts inside the local function*
4849
const parser = this.parse;
49-
5050
const stream = async function* () {
5151
for await (const message of generator) {
5252
if ("error" in message) {

ext/ai/js/llm/providers/openai.ts

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,13 @@ export type OpenAIProviderOptions = ILLMProviderOptions & {
1111
apiKey?: string;
1212
};
1313

14+
// NOTE:(kallebysantos) we define all types here for better development as well avoid `"npm:openai"` import
1415
// TODO:(kallebysantos) need to double check theses AI generated types
1516
export type OpenAIRequest = {
1617
model: string;
1718
messages: {
19+
// NOTE:(kallebysantos) using role as union type is great for intellisense suggestions
20+
// but at same time it forces users to `{} satisfies Supabase.ai.OpenAICompatibleInput`
1821
role: "system" | "user" | "assistant" | "tool";
1922
content: string;
2023
name?: string;
@@ -39,7 +42,7 @@ export type OpenAIRequest = {
3942
function: {
4043
name: string;
4144
description?: string;
42-
parameters: any; // Can be refined based on your function definition
45+
parameters: unknown;
4346
};
4447
}[];
4548
tool_choice?: "none" | "auto" | {
@@ -53,14 +56,14 @@ export type OpenAIResponseUsage = {
5356
completion_tokens: number;
5457
total_tokens: number;
5558
prompt_tokens_details: {
56-
cached_tokens: 0;
57-
audio_tokens: 0;
59+
cached_tokens: number;
60+
audio_tokens: number;
5861
};
5962
completion_tokens_details: {
60-
reasoning_tokens: 0;
61-
audio_tokens: 0;
62-
accepted_prediction_tokens: 0;
63-
rejected_prediction_tokens: 0;
63+
reasoning_tokens: number;
64+
audio_tokens: number;
65+
accepted_prediction_tokens: number;
66+
rejected_prediction_tokens: number;
6467
};
6568
};
6669

@@ -117,12 +120,12 @@ export class OpenAILLMSession implements ILLMProvider, ILLMProviderMeta {
117120
prompt,
118121
signal,
119122
true,
120-
) as AsyncGenerator<any>; // TODO:(kallebysantos) remove any
123+
) as AsyncGenerator<OpenAIResponse>;
121124

125+
// NOTE:(kallebysantos) we need to clone the lambda parser to avoid `this` conflicts inside the local function*
122126
const parser = this.parse;
123127
const stream = async function* () {
124128
for await (const message of generator) {
125-
// TODO:(kallebysantos) Simplify duplicated code for stream error checking
126129
if ("error" in message) {
127130
if (message.error instanceof Error) {
128131
throw message.error;
@@ -176,7 +179,7 @@ export class OpenAILLMSession implements ILLMProvider, ILLMProviderMeta {
176179
value: message.choices.at(0)?.message.content ?? undefined,
177180
inner: message,
178181
usage: {
179-
// Usage maybe 'null' while streaming, but the final message will include it
182+
// NOTE:(kallebysantos) usage maybe 'null' while streaming, but the final message will include it
180183
inputTokens: usage?.prompt_tokens ?? 0,
181184
outputTokens: usage?.completion_tokens ?? 0,
182185
totalTokens: usage?.total_tokens ?? 0,
@@ -218,7 +221,7 @@ export class OpenAILLMSession implements ILLMProvider, ILLMProviderMeta {
218221
}
219222

220223
if (stream) {
221-
return parseJSONOverEventStream(res.body, signal);
224+
return parseJSONOverEventStream<OpenAIResponse>(res.body, signal);
222225
}
223226

224227
const result: OpenAIResponse = await res.json();

ext/ai/js/llm/utils/json_parser.ts

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
1-
import EventSourceStream from './event_source_stream.mjs';
1+
import EventSourceStream from "./event_source_stream.mjs";
22

33
// Adapted from https://github.com/ollama/ollama-js/blob/6a4bfe3ab033f611639dfe4249bdd6b9b19c7256/src/utils.ts#L262
44
// TODO:(kallebysantos) need to simplify it
55
export async function* parseJSON<T extends object>(
66
itr: ReadableStream<Uint8Array>,
77
signal: AbortSignal,
88
) {
9-
let buffer = '';
9+
let buffer = "";
1010

11-
const decoder = new TextDecoder('utf-8');
11+
const decoder = new TextDecoder("utf-8");
1212
const reader = itr.getReader();
1313

1414
while (true) {
@@ -27,9 +27,9 @@ export async function* parseJSON<T extends object>(
2727

2828
buffer += decoder.decode(value);
2929

30-
const parts = buffer.split('\n');
30+
const parts = buffer.split("\n");
3131

32-
buffer = parts.pop() ?? '';
32+
buffer = parts.pop() ?? "";
3333

3434
for (const part of parts) {
3535
yield JSON.parse(part) as T;
@@ -39,7 +39,7 @@ export async function* parseJSON<T extends object>(
3939
}
4040
}
4141

42-
for (const part of buffer.split('\n').filter((p) => p !== '')) {
42+
for (const part of buffer.split("\n").filter((p) => p !== "")) {
4343
try {
4444
yield JSON.parse(part) as T;
4545
} catch (error) {
@@ -49,7 +49,7 @@ export async function* parseJSON<T extends object>(
4949
}
5050

5151
// TODO:(kallebysantos) need to simplify it
52-
export async function* parseJSONOverEventStream(
52+
export async function* parseJSONOverEventStream<T extends object>(
5353
itr: ReadableStream<Uint8Array>,
5454
signal: AbortSignal,
5555
) {
@@ -74,7 +74,7 @@ export async function* parseJSONOverEventStream(
7474
break;
7575
}
7676

77-
yield JSON.parse(value.data);
77+
yield JSON.parse(value.data) as T;
7878
} catch (error) {
7979
yield { error };
8080
}

0 commit comments

Comments
 (0)