-
Notifications
You must be signed in to change notification settings - Fork 32
Expand file tree
/
Copy pathcohere.ts
More file actions
85 lines (79 loc) · 2.39 KB
/
cohere.ts
File metadata and controls
85 lines (79 loc) · 2.39 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import OpenAI from "openai";
import { LLMHandle, LLMToolResult, ToolDefinition } from "./types";
import {
createOpenAICompatibleTools,
parseOpenAICompatibleResponse,
} from "./utils.js";
export type CohereAIOptions = {
temperature?: number;
max_tokens?: number;
stop_sequences?: string[];
seed?: number;
frequency_penalty?: number;
presence_penalty?: number;
k?: number;
p?: number;
};
export type CohereConfig = {
apiKey: string;
model: string;
baseUrl?: string;
options?: CohereAIOptions;
};
export function llmCohere(cfg: CohereConfig): LLMHandle {
if (!cfg.model) {
throw new Error(
"llmCohere: Missing required 'model' parameter. " +
"Please specify which Cohere model to use. " +
"Example: llmCohere({ apiKey: '<YOUR-API-KEY>', model: 'command-a-03-2025' }"
);
}
const model = cfg.model;
const id = `Cohere-${model}`;
if (cfg.baseUrl === undefined) {
// To use OpenAI Compatibility API
// see: https://docs.cohere.com/reference/chat
cfg.baseUrl = "https://api.cohere.ai/compatibility/v1";
}
const client = new OpenAI({ apiKey: cfg.apiKey, baseURL: cfg.baseUrl });
const options = cfg.options || {};
return {
id,
client,
model,
gen: async (prompt: string) => {
const r = await client.chat.completions.create({
model,
messages: [{ role: "user", content: prompt }],
...options,
});
return r.choices?.[0]?.message?.content ?? "";
},
genWithTools: async (prompt: string, tools: ToolDefinition[]): Promise<LLMToolResult> => {
const { nameMap, formattedTools } = createOpenAICompatibleTools(tools);
const r = await client.chat.completions.create({
model,
messages: [{ role: "user", content: prompt }],
tools: formattedTools,
tool_choice: "auto",
...options,
});
const message = r.choices?.[0]?.message;
return parseOpenAICompatibleResponse(message, nameMap);
},
genStream: async function* (prompt: string) {
const stream = await client.chat.completions.create({
model,
messages: [{ role: "user", content: prompt }],
stream: true,
...options,
});
for await (const chunk of stream as any) {
const delta = chunk?.choices?.[0]?.delta?.content;
if (typeof delta === "string" && delta.length > 0) {
yield delta;
}
}
},
};
}