Skip to content

Commit 3f41129

Browse files
committed
feat: alternative LLMs, incl. anthropic
Signed-off-by: Mike Ralphson <[email protected]>
1 parent fef1bc8 commit 3f41129

File tree

5 files changed

+127
-59
lines changed

5 files changed

+127
-59
lines changed

README.md

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# 🕵️🔗 BingChain
22

3-
This is an evolution of [langchain-mini](https://github.com/ColinEberhardt/langchain-mini), a very simple re-implementation of [LangChain](https://github.com/hwchase17/langchain), in ~300 lines of core code. In essence, it is a multi-model LLM-powered chat application that is able to use tools (Microsoft **Bing** search, URL retrieval, API plugin installation, API calls, a Javascript sandbox, JsFiddle creation, image and video preview, and a scientific calculator, as well as meta-tools such as `list`, `disable`, `reset` and `debug`) in order to build a **chain** of thought to hold conversations and answer questions.
3+
This is an evolution of [langchain-mini](https://github.com/ColinEberhardt/langchain-mini), a very simple re-implementation of [LangChain](https://github.com/hwchase17/langchain), in ~350 lines of core code. In essence, it is a multi-model LLM-powered chat application that is able to use tools (Microsoft **Bing** search, URL retrieval, API plugin installation, API calls, a Javascript sandbox, JsFiddle creation, image and video preview, and a scientific calculator, as well as meta-tools such as `list`, `disable`, `reset` and `debug`) in order to build a **chain** of thought to hold conversations and answer questions.
44

55
Here's an example:
66

@@ -44,6 +44,8 @@ GUI=1
4444
#PROMPT_OVERRIDE=Riddle me this! ${question}
4545
```
4646

47+
You can also set `PROVIDER=anthropic` (with a relevant `ANTHROPIC_API_KEY`, `MODEL` and `TOKEN_LIMIT`) to use an alternative LLM/API provider.
48+
4749
Set the token limit to the advertised limit of the model you are using, so 32768 for `gpt-4`, 4096 for `text-davinci-003` and 2048 for `text-curie-001`.
4850

4951
The clever part is the default initial prompt, which is held in [`prompt.txt`](https://raw.githubusercontent.com/postman-open-technologies/bingchain/main/prompt.txt), unless overridden by the `PROMPT_OVERRIDE` environment variable.

index.mjs

Lines changed: 59 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@ import https from "node:https";
66
import * as readline from "node:readline/promises";
77
import { stdin as input, stdout as output } from "node:process";
88

9+
import { openaiCompletion } from "./lib/openai.mjs";
10+
import { anthropicCompletion } from "./lib/anthropic.mjs";
11+
912
import Koa from 'koa';
1013
import serve from 'koa-static';
1114
import Router from 'koa-router';
@@ -45,9 +48,41 @@ setResponseLimit(RESPONSE_LIMIT);
4548
const agent = new https.Agent({ keepAlive: true, keepAliveMsecs: 120000, scheduling: 'lifo', family: 0, noDelay: false, zonread: { buffer: Buffer.alloc(RESPONSE_LIMIT * 2.75) } });
4649

4750
let completion = "";
51+
let partial = "";
52+
let carry = "{";
4853
let apiServer = "";
4954
let booting = true;
5055

56+
const extractText = (chunk) => {
57+
let json;
58+
try {
59+
json = JSON5.parse(`${carry}${chunk}}`);
60+
carry = "{";
61+
if (parseInt(debug(),10) >= 3) console.log(`${colour.cyan}${chunk}${colour.normal}`);
62+
}
63+
catch (ex) {
64+
carry += chunk;
65+
return;
66+
}
67+
let text;
68+
if (json.data && json.data.choices) {
69+
if (json.data.choices[0].delta) {
70+
text = json.data.choices[0].delta.content;
71+
}
72+
else {
73+
text = json.data.choices[0].text;
74+
}
75+
}
76+
else if (json.data && json.data.stop&& json.data.completion) {
77+
text = json.data.completion;
78+
}
79+
if (text) {
80+
if (!booting) process.stdout.write(text);
81+
completion += text;
82+
}
83+
return text;
84+
}
85+
5186
const app = new Koa();
5287
app.use(serve('.'));
5388
router.get('/', '/', (ctx) => {
@@ -87,25 +122,11 @@ const consume = async (value, chunkNo) => {
87122
for (let chunk of chunks) {
88123
if (booting && chunkNo % 20 === 1) process.stdout.write('.')
89124
chunk = chunk.replaceAll('[DONE]', '["DONE"]');
90-
let json = {};
91-
try {
92-
if (parseInt(debug(),10) >= 3) console.log(`${colour.cyan}${chunk}${colour.normal}`);
93-
json = JSON5.parse(`{${chunk}}`)?.data?.choices?.[0];
94-
const text = clean((json && json.delta ? json.delta.content : json?.text) || '');
95-
if (!booting) process.stdout.write(text);
96-
completion += text;
97-
}
98-
catch (ex) {
99-
if (json.error) {
100-
return json.error;
101-
}
102-
return `(Stutter: ${ex.message})`;
103-
}
125+
extractText(chunk);
104126
}
105127
}
106128

107129
async function fetchStream(url, options) {
108-
completion = "";
109130
let chunkNo = 0;
110131
let response = { ok: false, status: 418 }
111132
try {
@@ -118,14 +139,18 @@ async function fetchStream(url, options) {
118139
process.stdout.write(`${colour.red}`);
119140
let text = await response.text();
120141
try {
121-
let json = JSON5.parse(text);
142+
let json = JSON5.parse(partial+text);
122143
if (json.error && json.error.message) {
123-
completion = json.error.message;
144+
completion += json.error.message;
124145
return text;
125146
}
147+
else {
148+
partial = "";
149+
}
150+
}
151+
catch (ex) {
152+
partial = text;
126153
}
127-
catch (ex) {}
128-
completion = text;
129154
return text;
130155
}
131156
const reader = response.body.getReader();
@@ -156,44 +181,20 @@ async function fetchStream(url, options) {
156181
return text;
157182
}
158183

159-
// use the given model to complete a given prompts
160-
const completePrompt = async (prompt) => {
161-
let res = { ok: false, status: 418 };
162-
const timeout = "I took too long thinking about that.";
163-
const body = {
164-
model: MODEL,
165-
max_tokens: RESPONSE_LIMIT,
166-
temperature: TEMPERATURE,
167-
stream: true,
168-
user: 'BingChain',
169-
//frequency_penalty: 0.25,
170-
n: 1,
171-
stop: ["Observation:", "Question:"]
172-
};
173-
if (MODEL.startsWith('text')) {
174-
body.prompt = prompt;
184+
const getCompletion = async (prompt) => {
185+
process.stdout.write(colour.grey);
186+
completion = "";
187+
if (process.env.PROVIDER === "anthropic") {
188+
await anthropicCompletion(prompt, fetchStream, agent);
175189
}
176190
else {
177-
body.messages = [ { role: "system", content: "You are a helpful assistant who tries to answer all questions accurately and comprehensively." }, { role: "user", content: prompt }];
191+
await openaiCompletion(prompt, fetchStream, agent);
178192
}
179-
180-
const url = `https://api.openai.com/v1/${MODEL.startsWith('text') ? '' : 'chat/'}completions`;
181-
process.stdout.write(colour.grey);
182-
res = await fetchStream(url, {
183-
method: "POST",
184-
headers: {
185-
"Content-Type": "application/json",
186-
Authorization: "Bearer " + process.env.OPENAI_API_KEY
187-
},
188-
redirect: 'follow',
189-
body: JSON.stringify(body),
190-
agent
191-
});
192193
if (!completion.endsWith('\n\n')) {
193194
completion += '\n';
194195
}
195-
return completion;
196-
};
196+
return clean(completion);
197+
}
197198

198199
const answerQuestion = async (question) => {
199200
// construct the prompt, with our question and the tools that the chain can use
@@ -214,7 +215,7 @@ const answerQuestion = async (question) => {
214215

215216
// allow the LLM to iterate until it finds a final answer
216217
while (true) {
217-
const response = await completePrompt(prompt);
218+
const response = await getCompletion(prompt);
218219

219220
// add this to the prompt
220221
prompt += response;
@@ -231,12 +232,15 @@ const answerQuestion = async (question) => {
231232
actionInput = actionInput.replace(/```.+/gi, "```");
232233
actionInput = actionInput.split("```")[1];
233234
}
234-
else if (actionInput.indexOf(')()') >= 0) {
235+
if (actionInput.indexOf(')()') >= 0) {
235236
actionInput = actionInput.split(')()')[0]+')()'.trim();
236237
}
237-
else if (actionInput.indexOf('```') >= 0) {
238+
if (actionInput.indexOf('```') >= 0) {
238239
actionInput = actionInput.split('```\n')[0].trim();
239240
}
241+
else if (actionInput.startsWith('|')) {
242+
actionInput = actionInput.substring(1).trim();
243+
}
240244
else {
241245
actionInput = actionInput.split('\n\n')[0].trim();
242246
}
@@ -266,7 +270,7 @@ const mergeHistory = async (question, history) => {
266270
const prompt = mergeTemplate
267271
.replace("${question}", question)
268272
.replace("${history}", history);
269-
return await completePrompt(prompt);
273+
return await getCompletion(prompt);
270274
};
271275

272276
process.stdout.write(`${colour.cyan}Initialising built-in tools: `);

lib/anthropic.mjs

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
// use the given model to complete a given prompt
2+
export const anthropicCompletion = async (prompt, fetchStream, agent) => {
3+
let res = { ok: false, status: 418 };
4+
const body = {
5+
prompt,
6+
model: process.env.MODEL,
7+
max_tokens_to_sample: process.env.RESPONSE_LIMIT,
8+
stream: true,
9+
temperature: parseFloat(process.env.TEMPERATURE),
10+
//top_p: TOP_P,
11+
//top_k: TOP_K,
12+
metadata: {
13+
user_id: 'BingChain',
14+
},
15+
stop_sequences: ["Observation:", "Question:"]
16+
};
17+
18+
return await fetchStream(`https://api.anthropic.com/v1/complete`, {
19+
method: "POST",
20+
headers: {
21+
"Content-Type": "application/json",
22+
"X-API-Key": process.env.ANTHROPIC_API_KEY
23+
},
24+
body: JSON.stringify(body),
25+
redirect: 'follow',
26+
agent
27+
});
28+
};
29+

lib/openai.mjs

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
// use the given model to complete a given prompt
2+
export const openaiCompletion = async (prompt, fetchStream, agent) => {
3+
let res = { ok: false, status: 418 };
4+
const body = {
5+
model: process.env.MODEL,
6+
max_tokens: parseInt(process.env.RESPONSE_LIMIT, 10),
7+
temperature: parseFloat(process.env.TEMPERATURE),
8+
stream: true,
9+
user: 'BingChain',
10+
//frequency_penalty: 0.25,
11+
n: 1,
12+
stop: ["Observation:", "Question:"]
13+
};
14+
if (process.env.MODEL.startsWith('text')) {
15+
body.prompt = prompt;
16+
}
17+
else {
18+
body.messages = [ { role: "system", content: "You are a helpful assistant who tries to answer all questions accurately and comprehensively." }, { role: "user", content: prompt } ];
19+
}
20+
21+
const url = `https://api.openai.com/v1/${process.env.MODEL.startsWith('text') ? '' : 'chat/'}completions`;
22+
return await fetchStream(url, {
23+
method: "POST",
24+
headers: {
25+
"Content-Type": "application/json",
26+
Authorization: "Bearer " + process.env.OPENAI_API_KEY
27+
},
28+
body: JSON.stringify(body),
29+
redirect: 'follow',
30+
agent
31+
});
32+
};
33+

lib/tools.mjs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ const bingSearch = async (question) => {
139139
results += 'None found.';
140140
}
141141
results += '\nEnd of results.'
142-
if (debug()) console.log(`${colour.yellow}${results}${colour.normal}`)
142+
if (parseInt(debug(), 10) > 0) console.log(`${colour.yellow}${results}${colour.normal}`)
143143
else console.log(`${colour.cyan}Found ${hits} search results.${colour.normal}`);
144144
return results;
145145
});
@@ -472,7 +472,7 @@ const readRemotePdf = async (url, metadata) => {
472472
let res = { ok: false, status: 418 };
473473
let result = 'No results.';
474474
try {
475-
res = await fetch(url, { redirect: 'follow' });
475+
res = await fetch(url, { redirect: 'follow', headers: { 'Accept': 'application/pdf' } } );
476476
}
477477
catch (ex) {
478478
console.log(`${colour.red}${ex.message}${colour.normal}`);
@@ -505,7 +505,7 @@ const readRemoteDoc = async (url) => {
505505
let result = 'No results.';
506506
let res = { ok: false, status: 418 };
507507
try {
508-
res = await fetch(url, { redirect: 'follow' });
508+
res = await fetch(url, { headers: { 'Accept': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' }, redirect: 'follow' });
509509
}
510510
catch (ex) {
511511
console.log(`${colour.red}${ex.message}${colour.normal}`);

0 commit comments

Comments
 (0)