Skip to content

Commit f7bef67

Browse files
committed
frontend/llm-history: only use one dstream to safe resources
1 parent b98a452 commit f7bef67

File tree

2 files changed

+88
-67
lines changed

2 files changed

+88
-67
lines changed

src/packages/frontend/frame-editors/llm/use-llm-history.ts

Lines changed: 57 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -7,16 +7,13 @@
77
// To debug LLM history in the browser console:
88
c = cc.client.conat_client
99
// Get the shared LLM history streams
10-
generalStream = await c.dstream({account_id: cc.client.account_id, name: 'llm-history-general'})
11-
formulaStream = await c.dstream({account_id: cc.client.account_id, name: 'llm-history-formula'})
12-
// View general prompts
13-
console.log('General LLM prompts:', generalStream.getAll())
14-
// View formula prompts
15-
console.log('Formula prompts:', formulaStream.getAll())
10+
stream = await c.dstream({account_id: cc.client.account_id, name: 'llm-history'})
11+
// View prompts
12+
console.log('LLM prompts:', stream.getAll())
1613
// Add a prompt to general
17-
generalStream.push("New prompt")
14+
stream.push("New prompt")
1815
// Listen to changes
19-
generalStream.on('change', (prompt) => console.log('New general prompt:', prompt))
16+
stream.on('change', (prompt) => console.log('New prompt:', prompt))
2017
*/
2118

2219
import { useState } from "react";
@@ -32,16 +29,20 @@ import { reuseInFlight } from "@cocalc/util/reuse-in-flight";
3229
const MAX_PROMPTS_NUM = 1000;
3330
const MAX_PROMPTS_BYTES = 1024 * 1024;
3431

35-
export type LLMHistoryType = "general" | "formula";
32+
export type LLMHistoryType = "general" | "formula" | "generate";
3633

37-
// Cache for dstream instances per type
38-
const streamCache = new Map<LLMHistoryType, DStream<string>>();
34+
interface LLMHistoryEntry {
35+
type: LLMHistoryType;
36+
prompt: string;
37+
}
38+
39+
// Single cache for the shared dstream
40+
let streamCache: DStream<LLMHistoryEntry> | null = null;
3941

40-
// Get or create dstream for a specific history type
41-
const getDStream = reuseInFlight(async (type: LLMHistoryType) => {
42-
const cachedStream = streamCache.get(type);
43-
if (cachedStream) {
44-
return cachedStream;
42+
// Get or create the single shared dstream
43+
const getDStream = reuseInFlight(async () => {
44+
if (streamCache) {
45+
return streamCache;
4546
}
4647

4748
try {
@@ -53,17 +54,17 @@ const getDStream = reuseInFlight(async (type: LLMHistoryType) => {
5354
});
5455

5556
const account_id = store.get_account_id();
56-
const stream = await webapp_client.conat_client.dstream<string>({
57+
const stream = await webapp_client.conat_client.dstream<LLMHistoryEntry>({
5758
account_id,
58-
name: `${CONAT_LLM_HISTORY_KEY}-${type}`,
59+
name: CONAT_LLM_HISTORY_KEY,
5960
config: {
6061
discard_policy: "old",
6162
max_msgs: MAX_PROMPTS_NUM,
6263
max_bytes: MAX_PROMPTS_BYTES,
6364
},
6465
});
6566

66-
streamCache.set(type, stream);
67+
streamCache = stream;
6768
return stream;
6869
} catch (err) {
6970
console.warn(`dstream LLM history initialization error -- ${err}`);
@@ -75,22 +76,31 @@ const getDStream = reuseInFlight(async (type: LLMHistoryType) => {
7576
export function useLLMHistory(type: LLMHistoryType = "general") {
7677
const [prompts, setPrompts] = useState<string[]>([]);
7778

79+
// Filter prompts by type and extract just the prompt strings (newest first)
80+
function filterPromptsByType(entries: LLMHistoryEntry[]): string[] {
81+
return entries
82+
.filter((entry) => entry.type === type)
83+
.map((entry) => entry.prompt)
84+
.reverse();
85+
}
86+
7887
// Initialize dstream and set up listeners
7988
useAsyncEffect(async () => {
8089
try {
81-
const stream = await getDStream(type);
82-
83-
// Load existing prompts from stream (newest first)
84-
const allPrompts = stream.getAll().reverse();
85-
setPrompts(allPrompts);
90+
const stream = await getDStream();
91+
const allEntries = stream.getAll();
92+
setPrompts(filterPromptsByType(allEntries));
8693

8794
// Listen for new prompts being added
88-
const handleChange = (newPrompt: string) => {
89-
setPrompts((prev) => {
90-
// Remove duplicate if exists, then add to front
91-
const filtered = prev.filter((p) => p !== newPrompt);
92-
return [newPrompt, ...filtered];
93-
});
95+
const handleChange = (newEntry: LLMHistoryEntry) => {
96+
// Only update if the new entry matches our type
97+
if (newEntry.type === type) {
98+
setPrompts((prev) => {
99+
// Remove duplicate if exists, then add to front
100+
const filtered = prev.filter((p) => p !== newEntry.prompt);
101+
return [newEntry.prompt, ...filtered];
102+
});
103+
}
94104
};
95105

96106
stream.on("change", handleChange);
@@ -105,35 +115,32 @@ export function useLLMHistory(type: LLMHistoryType = "general") {
105115
}, [type]);
106116

107117
async function addPrompt(prompt: string) {
108-
if (!prompt.trim()) {
109-
console.warn("Empty prompt provided");
118+
const trimmedPrompt = prompt.trim();
119+
120+
if (!trimmedPrompt) {
121+
console.warn("use-llm-history: ignoring empty prompt");
110122
return;
111123
}
112124

113125
try {
114-
const stream = await getDStream(type);
115-
const trimmedPrompt = prompt.trim();
126+
const stream = await getDStream();
116127

117-
// Add prompt to stream - this will trigger change event
118-
stream.push(trimmedPrompt);
128+
// Create entry object with type and prompt
129+
const entry: LLMHistoryEntry = {
130+
type,
131+
prompt: trimmedPrompt,
132+
};
119133

120-
// Clean up old prompts if we exceed MAX_PROMPTS
121-
const currentLength = stream.length;
122-
if (currentLength > MAX_PROMPTS_NUM) {
123-
// Note: dstream doesn't have a built-in way to remove old entries
124-
// but we limit the display to MAX_PROMPTS in the UI
125-
console.warn(
126-
`LLM history has ${currentLength} entries, exceeding MAX_PROMPTS=${MAX_PROMPTS_NUM}`,
127-
);
128-
}
134+
// Add entry to stream - this will trigger a change event
135+
stream.push(entry);
129136
} catch (err) {
130137
console.warn(`Error adding prompt to LLM history -- ${err}`);
131138
}
132139
}
133140

134141
async function clearHistory() {
135142
try {
136-
const stream = await getDStream(type);
143+
const stream = await getDStream();
137144

138145
// Clear local state immediately
139146
setPrompts([]);
@@ -142,14 +149,14 @@ export function useLLMHistory(type: LLMHistoryType = "general") {
142149
await stream.delete();
143150

144151
// Remove from cache so a new stream will be created
145-
streamCache.delete(type);
152+
streamCache = null;
146153
} catch (err) {
147154
console.warn(`Error clearing LLM history -- ${err}`);
148155
// Reload prompts on error
149156
try {
150-
const stream = await getDStream(type);
151-
const allPrompts = stream.getAll().slice(-MAX_PROMPTS_NUM).reverse();
152-
setPrompts(allPrompts);
157+
const stream = await getDStream();
158+
const allEntries = stream.getAll();
159+
setPrompts(filterPromptsByType(allEntries));
153160
} catch (reloadErr) {
154161
console.warn(
155162
`Error reloading prompts after clear failure -- ${reloadErr}`,

src/packages/frontend/project/page/home-page/ai-generate-document.tsx

Lines changed: 31 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import { delay } from "awaiting";
2525
import { debounce, isEmpty, throttle } from "lodash";
2626
import { useEffect, useRef, useState } from "react";
2727
import { FormattedMessage, useIntl } from "react-intl";
28+
2829
import { useLanguageModelSetting } from "@cocalc/frontend/account/useLanguageModelSetting";
2930
import {
3031
CSS,
@@ -51,10 +52,12 @@ import { file_options } from "@cocalc/frontend/editor-tmp";
5152
import { Actions as CodeEditorActions } from "@cocalc/frontend/frame-editors/code-editor/actions";
5253
import { JupyterEditorActions } from "@cocalc/frontend/frame-editors/jupyter-editor/actions";
5354
import { Actions as LatexActions } from "@cocalc/frontend/frame-editors/latex-editor/actions";
55+
import { LLMHistorySelector } from "@cocalc/frontend/frame-editors/llm/llm-history-selector";
5456
import { LLMQueryDropdownButton } from "@cocalc/frontend/frame-editors/llm/llm-query-dropdown";
5557
import LLMSelector, {
5658
modelToName,
5759
} from "@cocalc/frontend/frame-editors/llm/llm-selector";
60+
import { useLLMHistory } from "@cocalc/frontend/frame-editors/llm/use-llm-history";
5861
import { Actions as RmdActions } from "@cocalc/frontend/frame-editors/rmd-editor/actions";
5962
import { dialogs, labels } from "@cocalc/frontend/i18n";
6063
import getKernelSpec from "@cocalc/frontend/jupyter/kernelspecs";
@@ -156,6 +159,7 @@ function AIGenerateDocument({
156159
const [paperSize, setPaperSize] = useState<string | null>(null);
157160
// User's description of document they want to generate.
158161
const [prompt, setPrompt] = useState<string>("");
162+
const { prompts: historyPrompts, addPrompt } = useLLMHistory("generate");
159163
const [querying, setQuerying] = useState<boolean>(false);
160164
const [saving, setSaving] = useState<boolean>(false);
161165
const [error, setError] = useState<string>("");
@@ -362,6 +366,9 @@ function AIGenerateDocument({
362366
};
363367
projectActions?.log(event);
364368

369+
// Add prompt to history
370+
addPrompt(prompt);
371+
365372
const what = intl.formatMessage(
366373
{
367374
id: "project.page.ai-generate-document.create_document.what",
@@ -578,7 +585,7 @@ function AIGenerateDocument({
578585
if (cancel.current) {
579586
// we abort this
580587
llmStream.removeAllListeners();
581-
// singal "finalization"
588+
// signal "finalization"
582589
processTokens(answer, true);
583590
return;
584591
}
@@ -645,7 +652,7 @@ function AIGenerateDocument({
645652
case "ipynb":
646653
case "ipynb-sagemath":
647654
return spec != null
648-
? (JUPYTER[spec.language?.toLowerCase()] ?? [])
655+
? JUPYTER[spec.language?.toLowerCase()] ?? []
649656
: [];
650657
default:
651658
return DOCUMENT[ext];
@@ -836,21 +843,28 @@ function AIGenerateDocument({
836843
/>
837844
</Paragraph>
838845
<Paragraph>
839-
<Input.TextArea
840-
ref={promptRef}
841-
allowClear
842-
autoSize={{ minRows: 3, maxRows: 6 }}
843-
maxLength={3000}
844-
placeholder={placeholder}
845-
value={prompt}
846-
disabled={querying}
847-
onChange={({ target: { value } }) => setPrompt(value)}
848-
onPressEnter={(e) => {
849-
if (e.shiftKey) {
850-
generate();
851-
}
852-
}}
853-
/>
846+
<Space.Compact style={{ width: "100%" }}>
847+
<Input.TextArea
848+
ref={promptRef}
849+
allowClear
850+
autoSize={{ minRows: 3, maxRows: 6 }}
851+
maxLength={3000}
852+
placeholder={placeholder}
853+
value={prompt}
854+
disabled={querying}
855+
onChange={({ target: { value } }) => setPrompt(value)}
856+
onPressEnter={(e) => {
857+
if (e.shiftKey) {
858+
generate();
859+
}
860+
}}
861+
/>
862+
<LLMHistorySelector
863+
prompts={historyPrompts}
864+
onSelect={setPrompt}
865+
disabled={querying}
866+
/>
867+
</Space.Compact>
854868
</Paragraph>
855869
{!error ? renderExamples() : undefined}
856870
{!error ? renderPaperSize() : undefined}

0 commit comments

Comments
 (0)