7
7
// To debug LLM history in the browser console:
8
8
c = cc.client.conat_client
9
9
// Get the shared LLM history streams
10
- generalStream = await c.dstream({account_id: cc.client.account_id, name: 'llm-history-general'})
11
- formulaStream = await c.dstream({account_id: cc.client.account_id, name: 'llm-history-formula'})
12
- // View general prompts
13
- console.log('General LLM prompts:', generalStream.getAll())
14
- // View formula prompts
15
- console.log('Formula prompts:', formulaStream.getAll())
10
+ stream = await c.dstream({account_id: cc.client.account_id, name: 'llm-history'})
11
+ // View prompts
12
+ console.log('LLM prompts:', stream.getAll())
16
13
// Add a prompt to general
17
- generalStream .push("New prompt")
14
+ stream .push("New prompt")
18
15
// Listen to changes
19
- generalStream .on('change', (prompt) => console.log('New general prompt:', prompt))
16
+ stream .on('change', (prompt) => console.log('New prompt:', prompt))
20
17
*/
21
18
22
19
import { useState } from "react" ;
@@ -32,16 +29,20 @@ import { reuseInFlight } from "@cocalc/util/reuse-in-flight";
32
29
const MAX_PROMPTS_NUM = 1000 ;
33
30
const MAX_PROMPTS_BYTES = 1024 * 1024 ;
34
31
35
- export type LLMHistoryType = "general" | "formula" ;
32
+ export type LLMHistoryType = "general" | "formula" | "generate" ;
36
33
37
- // Cache for dstream instances per type
38
- const streamCache = new Map < LLMHistoryType , DStream < string > > ( ) ;
34
+ interface LLMHistoryEntry {
35
+ type : LLMHistoryType ;
36
+ prompt : string ;
37
+ }
38
+
39
+ // Single cache for the shared dstream
40
+ let streamCache : DStream < LLMHistoryEntry > | null = null ;
39
41
40
- // Get or create dstream for a specific history type
41
- const getDStream = reuseInFlight ( async ( type : LLMHistoryType ) => {
42
- const cachedStream = streamCache . get ( type ) ;
43
- if ( cachedStream ) {
44
- return cachedStream ;
42
+ // Get or create the single shared dstream
43
+ const getDStream = reuseInFlight ( async ( ) => {
44
+ if ( streamCache ) {
45
+ return streamCache ;
45
46
}
46
47
47
48
try {
@@ -53,17 +54,17 @@ const getDStream = reuseInFlight(async (type: LLMHistoryType) => {
53
54
} ) ;
54
55
55
56
const account_id = store . get_account_id ( ) ;
56
- const stream = await webapp_client . conat_client . dstream < string > ( {
57
+ const stream = await webapp_client . conat_client . dstream < LLMHistoryEntry > ( {
57
58
account_id,
58
- name : ` ${ CONAT_LLM_HISTORY_KEY } - ${ type } ` ,
59
+ name : CONAT_LLM_HISTORY_KEY ,
59
60
config : {
60
61
discard_policy : "old" ,
61
62
max_msgs : MAX_PROMPTS_NUM ,
62
63
max_bytes : MAX_PROMPTS_BYTES ,
63
64
} ,
64
65
} ) ;
65
66
66
- streamCache . set ( type , stream ) ;
67
+ streamCache = stream ;
67
68
return stream ;
68
69
} catch ( err ) {
69
70
console . warn ( `dstream LLM history initialization error -- ${ err } ` ) ;
@@ -75,22 +76,31 @@ const getDStream = reuseInFlight(async (type: LLMHistoryType) => {
75
76
export function useLLMHistory ( type : LLMHistoryType = "general" ) {
76
77
const [ prompts , setPrompts ] = useState < string [ ] > ( [ ] ) ;
77
78
79
+ // Filter prompts by type and extract just the prompt strings (newest first)
80
+ function filterPromptsByType ( entries : LLMHistoryEntry [ ] ) : string [ ] {
81
+ return entries
82
+ . filter ( ( entry ) => entry . type === type )
83
+ . map ( ( entry ) => entry . prompt )
84
+ . reverse ( ) ;
85
+ }
86
+
78
87
// Initialize dstream and set up listeners
79
88
useAsyncEffect ( async ( ) => {
80
89
try {
81
- const stream = await getDStream ( type ) ;
82
-
83
- // Load existing prompts from stream (newest first)
84
- const allPrompts = stream . getAll ( ) . reverse ( ) ;
85
- setPrompts ( allPrompts ) ;
90
+ const stream = await getDStream ( ) ;
91
+ const allEntries = stream . getAll ( ) ;
92
+ setPrompts ( filterPromptsByType ( allEntries ) ) ;
86
93
87
94
// Listen for new prompts being added
88
- const handleChange = ( newPrompt : string ) => {
89
- setPrompts ( ( prev ) => {
90
- // Remove duplicate if exists, then add to front
91
- const filtered = prev . filter ( ( p ) => p !== newPrompt ) ;
92
- return [ newPrompt , ...filtered ] ;
93
- } ) ;
95
+ const handleChange = ( newEntry : LLMHistoryEntry ) => {
96
+ // Only update if the new entry matches our type
97
+ if ( newEntry . type === type ) {
98
+ setPrompts ( ( prev ) => {
99
+ // Remove duplicate if exists, then add to front
100
+ const filtered = prev . filter ( ( p ) => p !== newEntry . prompt ) ;
101
+ return [ newEntry . prompt , ...filtered ] ;
102
+ } ) ;
103
+ }
94
104
} ;
95
105
96
106
stream . on ( "change" , handleChange ) ;
@@ -105,35 +115,32 @@ export function useLLMHistory(type: LLMHistoryType = "general") {
105
115
} , [ type ] ) ;
106
116
107
117
async function addPrompt ( prompt : string ) {
108
- if ( ! prompt . trim ( ) ) {
109
- console . warn ( "Empty prompt provided" ) ;
118
+ const trimmedPrompt = prompt . trim ( ) ;
119
+
120
+ if ( ! trimmedPrompt ) {
121
+ console . warn ( "use-llm-history: ignoring empty prompt" ) ;
110
122
return ;
111
123
}
112
124
113
125
try {
114
- const stream = await getDStream ( type ) ;
115
- const trimmedPrompt = prompt . trim ( ) ;
126
+ const stream = await getDStream ( ) ;
116
127
117
- // Add prompt to stream - this will trigger change event
118
- stream . push ( trimmedPrompt ) ;
128
+ // Create entry object with type and prompt
129
+ const entry : LLMHistoryEntry = {
130
+ type,
131
+ prompt : trimmedPrompt ,
132
+ } ;
119
133
120
- // Clean up old prompts if we exceed MAX_PROMPTS
121
- const currentLength = stream . length ;
122
- if ( currentLength > MAX_PROMPTS_NUM ) {
123
- // Note: dstream doesn't have a built-in way to remove old entries
124
- // but we limit the display to MAX_PROMPTS in the UI
125
- console . warn (
126
- `LLM history has ${ currentLength } entries, exceeding MAX_PROMPTS=${ MAX_PROMPTS_NUM } ` ,
127
- ) ;
128
- }
134
+ // Add entry to stream - this will trigger a change event
135
+ stream . push ( entry ) ;
129
136
} catch ( err ) {
130
137
console . warn ( `Error adding prompt to LLM history -- ${ err } ` ) ;
131
138
}
132
139
}
133
140
134
141
async function clearHistory ( ) {
135
142
try {
136
- const stream = await getDStream ( type ) ;
143
+ const stream = await getDStream ( ) ;
137
144
138
145
// Clear local state immediately
139
146
setPrompts ( [ ] ) ;
@@ -142,14 +149,14 @@ export function useLLMHistory(type: LLMHistoryType = "general") {
142
149
await stream . delete ( ) ;
143
150
144
151
// Remove from cache so a new stream will be created
145
- streamCache . delete ( type ) ;
152
+ streamCache = null ;
146
153
} catch ( err ) {
147
154
console . warn ( `Error clearing LLM history -- ${ err } ` ) ;
148
155
// Reload prompts on error
149
156
try {
150
- const stream = await getDStream ( type ) ;
151
- const allPrompts = stream . getAll ( ) . slice ( - MAX_PROMPTS_NUM ) . reverse ( ) ;
152
- setPrompts ( allPrompts ) ;
157
+ const stream = await getDStream ( ) ;
158
+ const allEntries = stream . getAll ( ) ;
159
+ setPrompts ( filterPromptsByType ( allEntries ) ) ;
153
160
} catch ( reloadErr ) {
154
161
console . warn (
155
162
`Error reloading prompts after clear failure -- ${ reloadErr } ` ,
0 commit comments