Skip to content

Commit 3c56346

Browse files
authored
feat: enhance context handling by adding code context selection and implementing summary generation (#1091) #release
* feat: add context annotation types and enhance file handling in LLM processing * feat: enhance context handling by adding chatId to annotations and implementing summary generation * removed useless changes * feat: updated token counts to include optimization requests * prompt fix * logging added * useless logs removed
1 parent 2ae897a commit 3c56346

File tree

16 files changed

+1154
-223
lines changed

16 files changed

+1154
-223
lines changed

app/components/chat/AssistantMessage.tsx

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
import { memo } from 'react';
22
import { Markdown } from './Markdown';
33
import type { JSONValue } from 'ai';
4+
import type { ProgressAnnotation } from '~/types/context';
5+
import Popover from '~/components/ui/Popover';
46

57
interface AssistantMessageProps {
68
content: string;
@@ -10,7 +12,12 @@ interface AssistantMessageProps {
1012
export const AssistantMessage = memo(({ content, annotations }: AssistantMessageProps) => {
1113
const filteredAnnotations = (annotations?.filter(
1214
(annotation: JSONValue) => annotation && typeof annotation === 'object' && Object.keys(annotation).includes('type'),
13-
) || []) as { type: string; value: any }[];
15+
) || []) as { type: string; value: any } & { [key: string]: any }[];
16+
17+
let progressAnnotation: ProgressAnnotation[] = filteredAnnotations.filter(
18+
(annotation) => annotation.type === 'progress',
19+
) as ProgressAnnotation[];
20+
progressAnnotation = progressAnnotation.sort((a, b) => b.value - a.value);
1421

1522
const usage: {
1623
completionTokens: number;
@@ -20,11 +27,18 @@ export const AssistantMessage = memo(({ content, annotations }: AssistantMessage
2027

2128
return (
2229
<div className="overflow-hidden w-full">
23-
{usage && (
24-
<div className="text-sm text-bolt-elements-textSecondary mb-2">
25-
Tokens: {usage.totalTokens} (prompt: {usage.promptTokens}, completion: {usage.completionTokens})
30+
<>
31+
<div className=" flex gap-2 items-center text-sm text-bolt-elements-textSecondary mb-2">
32+
{progressAnnotation.length > 0 && (
33+
<Popover trigger={<div className="i-ph:info" />}>{progressAnnotation[0].message}</Popover>
34+
)}
35+
{usage && (
36+
<div>
37+
Tokens: {usage.totalTokens} (prompt: {usage.promptTokens}, completion: {usage.completionTokens})
38+
</div>
39+
)}
2640
</div>
27-
)}
41+
</>
2842
<Markdown html>{content}</Markdown>
2943
</div>
3044
);

app/components/ui/Popover.tsx

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
import * as Popover from '@radix-ui/react-popover';
2+
import type { PropsWithChildren, ReactNode } from 'react';
3+
4+
export default ({ children, trigger }: PropsWithChildren<{ trigger: ReactNode }>) => (
5+
<Popover.Root>
6+
<Popover.Trigger asChild>{trigger}</Popover.Trigger>
7+
<Popover.Anchor />
8+
<Popover.Portal>
9+
<Popover.Content
10+
sideOffset={10}
11+
side="top"
12+
align="center"
13+
className="bg-bolt-elements-background-depth-2 text-bolt-elements-item-contentAccent p-2 rounded-md shadow-xl z-workbench"
14+
>
15+
{children}
16+
<Popover.Arrow className="bg-bolt-elements-item-background-depth-2" />
17+
</Popover.Content>
18+
</Popover.Portal>
19+
</Popover.Root>
20+
);

app/entry.server.tsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import type { AppLoadContext, EntryContext } from '@remix-run/cloudflare';
1+
import type { AppLoadContext } from '@remix-run/cloudflare';
22
import { RemixServer } from '@remix-run/react';
33
import { isbot } from 'isbot';
44
import { renderToReadableStream } from 'react-dom/server';
@@ -10,7 +10,7 @@ export default async function handleRequest(
1010
request: Request,
1111
responseStatusCode: number,
1212
responseHeaders: Headers,
13-
remixContext: EntryContext,
13+
remixContext: any,
1414
_loadContext: AppLoadContext,
1515
) {
1616
// await initializeModelList({});

app/lib/.server/llm/constants.ts

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,3 +3,36 @@ export const MAX_TOKENS = 8000;
33

44
// limits the number of model responses that can be returned in a single request
55
export const MAX_RESPONSE_SEGMENTS = 2;
6+
7+
export interface File {
8+
type: 'file';
9+
content: string;
10+
isBinary: boolean;
11+
}
12+
13+
export interface Folder {
14+
type: 'folder';
15+
}
16+
17+
type Dirent = File | Folder;
18+
19+
export type FileMap = Record<string, Dirent | undefined>;
20+
21+
export const IGNORE_PATTERNS = [
22+
'node_modules/**',
23+
'.git/**',
24+
'dist/**',
25+
'build/**',
26+
'.next/**',
27+
'coverage/**',
28+
'.cache/**',
29+
'.vscode/**',
30+
'.idea/**',
31+
'**/*.log',
32+
'**/.DS_Store',
33+
'**/npm-debug.log*',
34+
'**/yarn-debug.log*',
35+
'**/yarn-error.log*',
36+
'**/*lock.json',
37+
'**/*lock.yml',
38+
];
Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
import { generateText, type CoreTool, type GenerateTextResult, type Message } from 'ai';
2+
import type { IProviderSetting } from '~/types/model';
3+
import { DEFAULT_MODEL, DEFAULT_PROVIDER, PROVIDER_LIST } from '~/utils/constants';
4+
import { extractCurrentContext, extractPropertiesFromMessage, simplifyBoltActions } from './utils';
5+
import { createScopedLogger } from '~/utils/logger';
6+
import { LLMManager } from '~/lib/modules/llm/manager';
7+
8+
const logger = createScopedLogger('create-summary');
9+
10+
export async function createSummary(props: {
11+
messages: Message[];
12+
env?: Env;
13+
apiKeys?: Record<string, string>;
14+
providerSettings?: Record<string, IProviderSetting>;
15+
promptId?: string;
16+
contextOptimization?: boolean;
17+
onFinish?: (resp: GenerateTextResult<Record<string, CoreTool<any, any>>, never>) => void;
18+
}) {
19+
const { messages, env: serverEnv, apiKeys, providerSettings, contextOptimization, onFinish } = props;
20+
let currentModel = DEFAULT_MODEL;
21+
let currentProvider = DEFAULT_PROVIDER.name;
22+
const processedMessages = messages.map((message) => {
23+
if (message.role === 'user') {
24+
const { model, provider, content } = extractPropertiesFromMessage(message);
25+
currentModel = model;
26+
currentProvider = provider;
27+
28+
return { ...message, content };
29+
} else if (message.role == 'assistant') {
30+
let content = message.content;
31+
32+
if (contextOptimization) {
33+
content = simplifyBoltActions(content);
34+
}
35+
36+
return { ...message, content };
37+
}
38+
39+
return message;
40+
});
41+
42+
const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER;
43+
const staticModels = LLMManager.getInstance().getStaticModelListFromProvider(provider);
44+
let modelDetails = staticModels.find((m) => m.name === currentModel);
45+
46+
if (!modelDetails) {
47+
const modelsList = [
48+
...(provider.staticModels || []),
49+
...(await LLMManager.getInstance().getModelListFromProvider(provider, {
50+
apiKeys,
51+
providerSettings,
52+
serverEnv: serverEnv as any,
53+
})),
54+
];
55+
56+
if (!modelsList.length) {
57+
throw new Error(`No models found for provider ${provider.name}`);
58+
}
59+
60+
modelDetails = modelsList.find((m) => m.name === currentModel);
61+
62+
if (!modelDetails) {
63+
// Fallback to first model
64+
logger.warn(
65+
`MODEL [${currentModel}] not found in provider [${provider.name}]. Falling back to first model. ${modelsList[0].name}`,
66+
);
67+
modelDetails = modelsList[0];
68+
}
69+
}
70+
71+
let slicedMessages = processedMessages;
72+
const { summary } = extractCurrentContext(processedMessages);
73+
let summaryText: string | undefined = undefined;
74+
let chatId: string | undefined = undefined;
75+
76+
if (summary && summary.type === 'chatSummary') {
77+
chatId = summary.chatId;
78+
summaryText = `Below is the Chat Summary till now, this is chat summary before the conversation provided by the user
79+
you should also use this as historical message while providing the response to the user.
80+
${summary.summary}`;
81+
82+
if (chatId) {
83+
let index = 0;
84+
85+
for (let i = 0; i < processedMessages.length; i++) {
86+
if (processedMessages[i].id === chatId) {
87+
index = i;
88+
break;
89+
}
90+
}
91+
slicedMessages = processedMessages.slice(index + 1);
92+
}
93+
}
94+
95+
const extractTextContent = (message: Message) =>
96+
Array.isArray(message.content)
97+
? (message.content.find((item) => item.type === 'text')?.text as string) || ''
98+
: message.content;
99+
100+
// select files from the list of code file from the project that might be useful for the current request from the user
101+
const resp = await generateText({
102+
system: `
103+
You are a software engineer. You are working on a project. tou need to summarize the work till now and provide a summary of the chat till now.
104+
105+
${summaryText}
106+
107+
RULES:
108+
* Only provide the summary of the chat till now.
109+
* Do not provide any new information.
110+
`,
111+
prompt: `
112+
please provide a summary of the chat till now.
113+
below is the latest chat:
114+
115+
---
116+
${slicedMessages
117+
.map((x) => {
118+
return `---\n[${x.role}] ${extractTextContent(x)}\n---`;
119+
})
120+
.join('\n')}
121+
---
122+
`,
123+
model: provider.getModelInstance({
124+
model: currentModel,
125+
serverEnv,
126+
apiKeys,
127+
providerSettings,
128+
}),
129+
});
130+
131+
const response = resp.text;
132+
133+
if (onFinish) {
134+
onFinish(resp);
135+
}
136+
137+
return response;
138+
}

0 commit comments

Comments
 (0)