1
1
import { Button , Descriptions , Divider , Input , Modal , Space } from "antd" ;
2
+ import { debounce } from "lodash" ;
2
3
3
4
import { useLanguageModelSetting } from "@cocalc/frontend/account/useLanguageModelSetting" ;
4
5
import {
8
9
useState ,
9
10
useTypedRedux ,
10
11
} from "@cocalc/frontend/app-framework" ;
12
+ import type { Message } from "@cocalc/frontend/client/types" ;
11
13
import {
12
14
HelpIcon ,
13
15
Icon ,
@@ -20,12 +22,11 @@ import AIAvatar from "@cocalc/frontend/components/ai-avatar";
20
22
import { LLMModelName } from "@cocalc/frontend/components/llm-name" ;
21
23
import LLMSelector from "@cocalc/frontend/frame-editors/llm/llm-selector" ;
22
24
import { show_react_modal } from "@cocalc/frontend/misc" ;
25
+ import { LLMCostEstimation } from "@cocalc/frontend/misc/llm-cost-estimation" ;
23
26
import track from "@cocalc/frontend/user-tracking" ;
24
27
import { webapp_client } from "@cocalc/frontend/webapp-client" ;
25
28
import { isFreeModel } from "@cocalc/util/db-schema/llm-utils" ;
26
29
import { unreachable } from "@cocalc/util/misc" ;
27
- import { LLMCostEstimation } from "../../misc/llm-cost-estimation" ;
28
- import { debounce } from "lodash" ;
29
30
30
31
type Mode = "tex" | "md" ;
31
32
@@ -64,13 +65,18 @@ function AiGenFormula({ mode, text = "", project_id, cb }: Props) {
64
65
useAsyncEffect (
65
66
debounce (
66
67
async ( ) => {
67
- const prompt = getPrompt ( ) ?? "" ;
68
+ const { input , history , system } = getPrompt ( ) ?? "" ;
68
69
// compute the number of tokens (this MUST be a lazy import):
69
70
const { getMaxTokens, numTokensUpperBound } = await import (
70
71
"@cocalc/frontend/misc/llm"
71
72
) ;
72
73
73
- setTokens ( numTokensUpperBound ( prompt , getMaxTokens ( model ) ) ) ;
74
+ const all = [
75
+ input ,
76
+ history . map ( ( { content } ) => content ) . join ( " " ) ,
77
+ system ,
78
+ ] . join ( " " ) ;
79
+ setTokens ( numTokensUpperBound ( all , getMaxTokens ( model ) ) ) ;
74
80
} ,
75
81
1000 ,
76
82
{ leading : true , trailing : true } ,
@@ -83,20 +89,47 @@ function AiGenFormula({ mode, text = "", project_id, cb }: Props) {
83
89
. getStore ( "projects" )
84
90
. hasLanguageModelEnabled ( project_id , LLM_USAGE_TAG ) ;
85
91
86
- function getPrompt ( ) {
87
- const description = input || text ;
88
- const p1 = `Convert the following plain-text description of a formula to a LaTeX formula` ;
89
- const p2 = `Return the LaTeX formula, and only the formula. Enclose the formula in a single snippet delimited by $. Do not add any explanations.` ;
92
+ function getSystemPrompt ( ) : string {
93
+ const p1 = `Typset the plain-text description of a mathematical formula as a LaTeX formula. The formula will be` ;
94
+ const p2 = `Return only the LaTeX formula, ready to be inserted into the document. Do not add any explanations.` ;
90
95
switch ( mode ) {
91
96
case "tex" :
92
- return `${ p1 } in a *.tex file. Assume the package "amsmath" is available. ${ p2 } :\n\n ${ description } ` ;
97
+ return `${ p1 } in a *.tex file. Assume the package "amsmath" is available. ${ p2 } ` ;
93
98
case "md" :
94
- return `${ p1 } in a markdown file. ${ p2 } \n\n ${ description } ` ;
99
+ return `${ p1 } in a markdown file. Formulas are inside of $ or $$. ${ p2 } ` ;
95
100
default :
96
101
unreachable ( mode ) ;
102
+ return p1 ;
97
103
}
98
104
}
99
105
106
+ function getPrompt ( ) : { input : string ; history : Message [ ] ; system : string } {
107
+ const system = getSystemPrompt ( ) ;
108
+ // 3-shot examples
109
+ const history : Message [ ] = [
110
+ { role : "user" , content : "equation e^(i pi) = -1" } ,
111
+ { role : "assistant" , content : "$$e^{i \\pi} = -1$$" } ,
112
+ {
113
+ role : "user" ,
114
+ content : "integral 0 to 2 pi sin(x)^2" ,
115
+ } ,
116
+ {
117
+ role : "assistant" ,
118
+ content : "$\\int_{0}^{2\\pi} \\sin(x)^2 \\, \\mathrm{d}x$" ,
119
+ } ,
120
+ {
121
+ role : "user" ,
122
+ content : "equation system: [ 1 + x^2 = a, 1 - y^2 = ln(a) ]" ,
123
+ } ,
124
+ {
125
+ role : "assistant" ,
126
+ content :
127
+ "\\begin{cases}\n1 + x^2 = a \\\n1 - y^2 = \\ln(a)\n\\end{cases}" ,
128
+ } ,
129
+ ] ;
130
+ return { input : input || text , system, history } ;
131
+ }
132
+
100
133
function wrapFormula ( tex : string = "" ) {
101
134
// wrap single-line formulas in $...$
102
135
// if it is multiline, wrap in \begin{equation}...\end{equation}
@@ -170,12 +203,14 @@ function AiGenFormula({ mode, text = "", project_id, cb }: Props) {
170
203
type : "generate" ,
171
204
model,
172
205
} ) ;
206
+ const { system, input, history } = getPrompt ( ) ;
173
207
const reply = await webapp_client . openai_client . query ( {
174
- input : getPrompt ( ) ,
208
+ input,
209
+ history,
210
+ system,
211
+ model,
175
212
project_id,
176
213
tag : LLM_USAGE_TAG ,
177
- model,
178
- system : "" ,
179
214
} ) ;
180
215
const tex = processFormula ( reply ) ;
181
216
// significant differece? Also show the full reply
0 commit comments