Skip to content

Commit 1009ced

Browse files
authored
Merge branch 'main' into docs-chatbot
2 parents 17a7c24 + 8cab7ca commit 1009ced

File tree

7 files changed

+134
-99
lines changed

7 files changed

+134
-99
lines changed

baseai/memory/docs/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ const memoryDocs = (): MemoryI => ({
77
enabled: true,
88
include: ['**/*.mdx'],
99
gitignore: true,
10-
deployedAt: '39f2778ad2dce348bb762a85f765c21453cec4fe',
10+
deployedAt: 'dd4e4714696856a7160aded10308b08304ca3739',
1111
embeddedAt: ''
1212
},
1313
documents: {

docs-chatbot.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# How we use Docs Chatbot?
2+
3+
Contact the docs team for any more questions on this.
4+
5+
## To sync all the latest docs changes in the `main` branch check out to the `main` branch and run
6+
7+
```sh
8+
pnpm langbase-sync
9+
```
10+
11+
This will verify all the changes since the last sync, update these files, and then write the commit hash to `baseai/memory/docs/index.ts` file which you should commit to keep track.

docs/admin/deploy/docker-compose/upgrade.mdx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ To perform a multi-version upgrade via migrators [upgrade](/admin/updates/migrat
9393
> *Note: you may add the `--dry-run` flag to the `command:` to test things out before altering the dbs*
9494
3. Run migrator with `docker-compose up migrator`
9595

96-
- Migrator `depends_on:` will ensure the databases are ready before attempting to run the migrator. Ensuring that database entry point scripts are run before the migrator attempts to connect to the databases. For users upgrading from a version earlier than `5.10.0`, a PostgreSQL version is required and will be performed automatically here. For more details, see [Upgradeing PostgreSQL](/admin/postgresql#upgrading-postgresql).
96+
- Migrator `depends_on:` will ensure the databases are ready before attempting to run the migrator. Ensuring that database entry point scripts are run before the migrator attempts to connect to the databases. For users upgrading from a version earlier than `5.10.0`, a PostgreSQL version is required and will be performed automatically here. For more details, see [Upgrading PostgreSQL](/admin/postgresql#upgrading-postgresql).
9797

9898
**Example:**
9999
```sh

docs/code-search/code-navigation/precise_code_navigation.mdx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ Precise code navigation relies on the open source [SCIP Code Intelligence Protoc
2222

2323
## Setting up code navigation for your codebase
2424

25-
There are several options for setting up precise code navigation:
25+
<Callout type="info">There are several options for setting up precise code navigation listed below. However, we always recommend you start by manually indexing your repo locally using the [approriate indexer](/code-navigation/writing_an_indexer#quick-reference) for your language. Code and build systems can vary by project and ensuring you can first succesfully run the indexer locally leads to a smoother experience since it is vastly easier to debug and iterate on any issues locally before trying to do so in CI/CD or in Auto-Indexing.</Callout>
2626

2727
1. **Manual indexing**. Index a repository and upload it to your Sourcegraph instance:
2828

docs/cody/core-concepts/token-limits.mdx

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -50,16 +50,16 @@ The Enterprise tier supports the token limits for the LLM models on Free and Pro
5050

5151
| **Model** | **Conversation Context** | **@-mention Context** | **Output** |
5252
| ------------------------------------ | ------------------------ | --------------------- | ---------- |
53-
| gpt-4o-mini | 7,000 | shared | 1,000 |
54-
| gpt-o3-mini-medium | 7,000 | shared | 1,000 |
55-
| gpt-4-turbo | 7,000 | shared | 1,000 |
56-
| gpt-4o | 7,000 | shared | 1,000 |
57-
| o1 | 7,000 | shared | 1,000 |
58-
| o3-mini-high | 7,000 | shared | 1,000 |
59-
| claude-3.5 Haiku | 7,000 | shared | 1,000 |
53+
| gpt-4o-mini | 7,000 | shared | 4,000 |
54+
| gpt-o3-mini-medium | 7,000 | shared | 4,000 |
55+
| gpt-4-turbo | 7,000 | shared | 4,000 |
56+
| gpt-4o | 7,000 | shared | 4,000 |
57+
| o1 | 7,000 | shared | 4,000 |
58+
| o3-mini-high | 7,000 | shared | 4,000 |
59+
| claude-3.5 Haiku | 7,000 | shared | 4,000 |
6060
| **claude-3.5 Sonnet (New)** | **15,000** | **30,000** | **4,000** |
61-
| Google Gemini 2.0 Flash | 7,000 | shared | 1,000 |
62-
| Google Gemini 2.0 Flash-Lite Preview | 7,000 | shared | 1,000 |
61+
| Google Gemini 2.0 Flash | 7,000 | shared | 4,000 |
62+
| Google Gemini 2.0 Flash-Lite Preview | 7,000 | shared | 4,000 |
6363

6464
</Tab>
6565
</Tabs>
Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
import { Langbase } from 'langbase';
2+
import { NextRequest } from 'next/server';
3+
4+
const apiKey = process.env.LANGBASE_API_KEY!;
5+
6+
const langbase = new Langbase({
7+
apiKey
8+
});
9+
10+
export async function POST(req: NextRequest) {
11+
const options = await req.json();
12+
13+
// Check if relevant
14+
let parsedResponse;
15+
let attempts = 0;
16+
const maxAttempts = 5;
17+
18+
while (attempts < maxAttempts) {
19+
try {
20+
const router = await langbase.pipe.run({
21+
...options,
22+
name: 'agent-router-related-to-sourcegraph',
23+
stream: false,
24+
variables: [{ name: 'userQuery', value: options.messages[0].content }],
25+
});
26+
27+
// @ts-expect-error — TODO: Fix by reporting to Langbase
28+
parsedResponse = JSON.parse(router.completion);
29+
break;
30+
} catch (error) {
31+
attempts++;
32+
if (attempts === maxAttempts) {
33+
return new Response(JSON.stringify({ error: "Service not working at the moment. Please refresh and try again." }), {
34+
status: 500,
35+
headers: {
36+
'Content-Type': 'application/json'
37+
}
38+
});
39+
}
40+
}
41+
}
42+
43+
// console.log("🚀 ~ parsedResponse:", parsedResponse)
44+
45+
46+
// If not relevant, return a stream mimicking Langbase's structure
47+
if (!parsedResponse.relevant) {
48+
49+
// console.log("🚀 Asking non relevant agent")
50+
51+
// Ask not relevant questions from agent ask-sourcegraph-docs-unrelated-queries
52+
const { stream, threadId } = await langbase.pipe.run({
53+
...options,
54+
name: 'ask-sourcegraph-docs-unrelated-queries'
55+
});
56+
57+
return new Response(stream, {
58+
status: 200,
59+
headers: {
60+
'lb-thread-id': threadId ?? ''
61+
}
62+
});
63+
64+
}
65+
66+
// Handle relevant question
67+
// console.log("🚀 Asking relevant agent")
68+
69+
const { stream, threadId } = await langbase.pipe.run({
70+
...options,
71+
name: 'ask-sourcegraph-docs'
72+
});
73+
74+
return new Response(stream, {
75+
status: 200,
76+
headers: {
77+
'lb-thread-id': threadId ?? ''
78+
}
79+
});
80+
}
81+
82+
83+
84+
85+
// Pretend answer is not relevant.
86+
// const encoder = new TextEncoder();
87+
// const stream = new ReadableStream({
88+
// start(controller) {
89+
// const chunk = {
90+
// id: `cmpl-${Math.random().toString(36).substr(2, 10)}`,
91+
// object: 'chat.completion.chunk' as const,
92+
// created: Math.floor(Date.now() / 1000),
93+
// model: 'gpt-3.5-turbo',
94+
// choices: [{
95+
// index: 0,
96+
// delta: { content: 'Please ask a relevant question to Sourcegraph.' },
97+
// logprobs: null,
98+
// finish_reason: 'stop'
99+
// }]
100+
// };
101+
// controller.enqueue(encoder.encode(JSON.stringify(chunk)));
102+
// controller.close();
103+
// }
104+
// });
105+
106+
// return new Response(stream, {
107+
// status: 200,
108+
// headers: {
109+
// // Omit 'Content-Type' to allow stream processing
110+
// }
111+
// });

src/app/api/chat/route.ts

Lines changed: 0 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -10,62 +10,6 @@ const langbase = new Langbase({
1010
export async function POST(req: NextRequest) {
1111
const options = await req.json();
1212

13-
// Check if relevant
14-
let parsedResponse;
15-
let attempts = 0;
16-
const maxAttempts = 5;
17-
18-
while (attempts < maxAttempts) {
19-
try {
20-
const router = await langbase.pipe.run({
21-
...options,
22-
name: 'agent-router-related-to-sourcegraph',
23-
stream: false,
24-
variables: [{ name: 'userQuery', value: options.messages[0].content }],
25-
});
26-
27-
// @ts-expect-error — TODO: Fix by reporting to Langbase
28-
parsedResponse = JSON.parse(router.completion);
29-
break;
30-
} catch (error) {
31-
attempts++;
32-
if (attempts === maxAttempts) {
33-
return new Response(JSON.stringify({ error: "Service not working at the moment. Please refresh and try again." }), {
34-
status: 500,
35-
headers: {
36-
'Content-Type': 'application/json'
37-
}
38-
});
39-
}
40-
}
41-
}
42-
43-
// console.log("🚀 ~ parsedResponse:", parsedResponse)
44-
45-
46-
// If not relevant, return a stream mimicking Langbase's structure
47-
if (!parsedResponse.relevant) {
48-
49-
// console.log("🚀 Asking non relevant agent")
50-
51-
// Ask not relevant questions from agent ask-sourcegraph-docs-unrelated-queries
52-
const { stream, threadId } = await langbase.pipe.run({
53-
...options,
54-
name: 'ask-sourcegraph-docs-unrelated-queries'
55-
});
56-
57-
return new Response(stream, {
58-
status: 200,
59-
headers: {
60-
'lb-thread-id': threadId ?? ''
61-
}
62-
});
63-
64-
}
65-
66-
// Handle relevant question
67-
// console.log("🚀 Asking relevant agent")
68-
6913
const { stream, threadId } = await langbase.pipe.run({
7014
...options,
7115
name: 'ask-sourcegraph-docs'
@@ -78,34 +22,3 @@ export async function POST(req: NextRequest) {
7822
}
7923
});
8024
}
81-
82-
83-
84-
85-
// Pretend answer is not relevant.
86-
// const encoder = new TextEncoder();
87-
// const stream = new ReadableStream({
88-
// start(controller) {
89-
// const chunk = {
90-
// id: `cmpl-${Math.random().toString(36).substr(2, 10)}`,
91-
// object: 'chat.completion.chunk' as const,
92-
// created: Math.floor(Date.now() / 1000),
93-
// model: 'gpt-3.5-turbo',
94-
// choices: [{
95-
// index: 0,
96-
// delta: { content: 'Please ask a relevant question to Sourcegraph.' },
97-
// logprobs: null,
98-
// finish_reason: 'stop'
99-
// }]
100-
// };
101-
// controller.enqueue(encoder.encode(JSON.stringify(chunk)));
102-
// controller.close();
103-
// }
104-
// });
105-
106-
// return new Response(stream, {
107-
// status: 200,
108-
// headers: {
109-
// // Omit 'Content-Type' to allow stream processing
110-
// }
111-
// });

0 commit comments

Comments
 (0)