Skip to content

Commit 4da6a39

Browse files
authored
New AI Thinking Control (#2543)
Create an AI Thinking Dropdown in Wave AI. Quick, Balanced, or Deep which map to gpt-5-mini, gpt-5 (low thinking), or gpt-5 (medium thinking). Also default down to Quick when no premium requests.
1 parent 0da0a64 commit 4da6a39

File tree

10 files changed

+213
-29
lines changed

10 files changed

+213
-29
lines changed

frontend/app/aipanel/aipanel-contextmenu.ts

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,8 @@
33

44
import { waveAIHasSelection } from "@/app/aipanel/waveai-focus-utils";
55
import { ContextMenuModel } from "@/app/store/contextmenu";
6-
import { isDev } from "@/app/store/global";
6+
import { atoms, isDev } from "@/app/store/global";
7+
import { globalStore } from "@/app/store/jotaiStore";
78
import { RpcApi } from "@/app/store/wshclientapi";
89
import { TabRpcClient } from "@/app/store/wshrpcutil";
910
import { WaveAIModel } from "./waveai-model";
@@ -38,41 +39,47 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo
3839
oref: model.orefContext,
3940
});
4041

41-
const currentThinkingLevel = rtInfo?.["waveai:thinkinglevel"] ?? "medium";
42+
const rateLimitInfo = globalStore.get(atoms.waveAIRateLimitInfoAtom);
43+
const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0;
44+
const currentThinkingMode = rtInfo?.["waveai:thinkingmode"] ?? (hasPremium ? "balanced" : "quick");
4245
const defaultTokens = model.inBuilder ? 24576 : 4096;
4346
const currentMaxTokens = rtInfo?.["waveai:maxoutputtokens"] ?? defaultTokens;
4447

45-
const thinkingLevelSubmenu: ContextMenuItem[] = [
48+
const thinkingModeSubmenu: ContextMenuItem[] = [
4649
{
47-
label: "Low",
50+
label: "Quick (gpt-5-mini)",
4851
type: "checkbox",
49-
checked: currentThinkingLevel === "low",
52+
checked: currentThinkingMode === "quick",
5053
click: () => {
5154
RpcApi.SetRTInfoCommand(TabRpcClient, {
5255
oref: model.orefContext,
53-
data: { "waveai:thinkinglevel": "low" },
56+
data: { "waveai:thinkingmode": "quick" },
5457
});
5558
},
5659
},
5760
{
58-
label: "Medium",
61+
label: hasPremium ? "Balanced (gpt-5, low thinking)" : "Balanced (premium)",
5962
type: "checkbox",
60-
checked: currentThinkingLevel === "medium",
63+
checked: currentThinkingMode === "balanced",
64+
enabled: hasPremium,
6165
click: () => {
66+
if (!hasPremium) return;
6267
RpcApi.SetRTInfoCommand(TabRpcClient, {
6368
oref: model.orefContext,
64-
data: { "waveai:thinkinglevel": "medium" },
69+
data: { "waveai:thinkingmode": "balanced" },
6570
});
6671
},
6772
},
6873
{
69-
label: "High",
74+
label: hasPremium ? "Deep (gpt-5, full thinking)" : "Deep (premium)",
7075
type: "checkbox",
71-
checked: currentThinkingLevel === "high",
76+
checked: currentThinkingMode === "deep",
77+
enabled: hasPremium,
7278
click: () => {
79+
if (!hasPremium) return;
7380
RpcApi.SetRTInfoCommand(TabRpcClient, {
7481
oref: model.orefContext,
75-
data: { "waveai:thinkinglevel": "high" },
82+
data: { "waveai:thinkingmode": "deep" },
7683
});
7784
},
7885
},
@@ -157,8 +164,8 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo
157164
}
158165

159166
menu.push({
160-
label: "Thinking Level",
161-
submenu: thinkingLevelSubmenu,
167+
label: "Thinking Mode",
168+
submenu: thinkingModeSubmenu,
162169
});
163170

164171
menu.push({

frontend/app/aipanel/aipanel.tsx

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import { AIPanelInput } from "./aipanelinput";
2121
import { AIPanelMessages } from "./aipanelmessages";
2222
import { AIRateLimitStrip } from "./airatelimitstrip";
2323
import { TelemetryRequiredMessage } from "./telemetryrequired";
24+
import { ThinkingLevelDropdown } from "./thinkingmode";
2425
import { WaveAIModel } from "./waveai-model";
2526

2627
const AIBlockMask = memo(() => {
@@ -493,9 +494,12 @@ const AIPanelComponentInner = memo(() => {
493494
<>
494495
{messages.length === 0 && initialLoadDone ? (
495496
<div
496-
className="flex-1 overflow-y-auto p-2"
497+
className="flex-1 overflow-y-auto p-2 relative"
497498
onContextMenu={(e) => handleWaveAIContextMenu(e, true)}
498499
>
500+
<div className="absolute top-2 right-2 z-10">
501+
<ThinkingLevelDropdown />
502+
</div>
499503
{model.inBuilder ? <AIBuilderWelcomeMessage /> : <AIWelcomeMessage />}
500504
</div>
501505
) : (

frontend/app/aipanel/aipanelmessages.tsx

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import { useAtomValue } from "jotai";
55
import { memo, useEffect, useRef } from "react";
66
import { AIMessage } from "./aimessage";
7+
import { ThinkingLevelDropdown } from "./thinkingmode";
78
import { WaveAIModel } from "./waveai-model";
89

910
interface AIPanelMessagesProps {
@@ -41,7 +42,14 @@ export const AIPanelMessages = memo(({ messages, status, onContextMenu }: AIPane
4142
}, [isPanelOpen]);
4243

4344
return (
44-
<div ref={messagesContainerRef} className="flex-1 overflow-y-auto p-2 space-y-4" onContextMenu={onContextMenu}>
45+
<div
46+
ref={messagesContainerRef}
47+
className="flex-1 overflow-y-auto p-2 space-y-4 relative"
48+
onContextMenu={onContextMenu}
49+
>
50+
<div className="absolute top-2 right-2 z-10">
51+
<ThinkingLevelDropdown />
52+
</div>
4553
{messages.map((message, index) => {
4654
const isLastMessage = index === messages.length - 1;
4755
const isStreaming = status === "streaming" && isLastMessage && message.role === "assistant";
Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
// Copyright 2025, Command Line Inc.
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
import { atoms } from "@/app/store/global";
5+
import { useAtomValue } from "jotai";
6+
import { memo, useRef, useState } from "react";
7+
import { WaveAIModel } from "./waveai-model";
8+
9+
type ThinkingMode = "quick" | "balanced" | "deep";
10+
11+
interface ThinkingModeMetadata {
12+
icon: string;
13+
name: string;
14+
desc: string;
15+
premium: boolean;
16+
}
17+
18+
const ThinkingModeData: Record<ThinkingMode, ThinkingModeMetadata> = {
19+
quick: {
20+
icon: "fa-bolt",
21+
name: "Quick",
22+
desc: "Fastest responses (gpt-5-mini)",
23+
premium: false,
24+
},
25+
balanced: {
26+
icon: "fa-sparkles",
27+
name: "Balanced",
28+
desc: "Good mix of speed and accuracy\n(gpt-5 with minimal thinking)",
29+
premium: true,
30+
},
31+
deep: {
32+
icon: "fa-lightbulb",
33+
name: "Deep",
34+
desc: "Slower but most capable\n(gpt-5 with full reasoning)",
35+
premium: true,
36+
},
37+
};
38+
39+
export const ThinkingLevelDropdown = memo(() => {
40+
const model = WaveAIModel.getInstance();
41+
const thinkingMode = useAtomValue(model.thinkingMode);
42+
const rateLimitInfo = useAtomValue(atoms.waveAIRateLimitInfoAtom);
43+
const [isOpen, setIsOpen] = useState(false);
44+
const dropdownRef = useRef<HTMLDivElement>(null);
45+
46+
const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0;
47+
48+
const handleSelect = (mode: ThinkingMode) => {
49+
const metadata = ThinkingModeData[mode];
50+
if (!hasPremium && metadata.premium) {
51+
return;
52+
}
53+
model.setThinkingMode(mode);
54+
setIsOpen(false);
55+
};
56+
57+
let currentMode = (thinkingMode as ThinkingMode) || "balanced";
58+
const currentMetadata = ThinkingModeData[currentMode];
59+
if (!hasPremium && currentMetadata.premium) {
60+
currentMode = "quick";
61+
}
62+
63+
return (
64+
<div className="relative" ref={dropdownRef}>
65+
<button
66+
onClick={() => setIsOpen(!isOpen)}
67+
className="flex items-center gap-1.5 px-2 py-1 text-xs text-gray-300 hover:text-white bg-gray-800/50 hover:bg-gray-700/50 rounded transition-colors cursor-pointer border border-gray-600/50"
68+
title={`Thinking: ${currentMetadata.name}`}
69+
>
70+
<i className={`fa ${currentMetadata.icon} text-[10px]`}></i>
71+
<span className="text-[11px]">{currentMetadata.name}</span>
72+
<i className="fa fa-chevron-down text-[8px]"></i>
73+
</button>
74+
75+
{isOpen && (
76+
<>
77+
<div className="fixed inset-0 z-40" onClick={() => setIsOpen(false)} />
78+
<div className="absolute top-full right-0 mt-1 bg-gray-800 border border-gray-600 rounded shadow-lg z-50 min-w-[280px]">
79+
{(Object.keys(ThinkingModeData) as ThinkingMode[]).map((mode, index) => {
80+
const metadata = ThinkingModeData[mode];
81+
const isFirst = index === 0;
82+
const isLast = index === Object.keys(ThinkingModeData).length - 1;
83+
const isDisabled = !hasPremium && metadata.premium;
84+
const isSelected = currentMode === mode;
85+
return (
86+
<button
87+
key={mode}
88+
onClick={() => handleSelect(mode)}
89+
disabled={isDisabled}
90+
className={`w-full flex flex-col gap-0.5 px-3 ${
91+
isFirst ? "pt-1 pb-0.5" : isLast ? "pt-0.5 pb-1" : "pt-0.5 pb-0.5"
92+
} ${
93+
isDisabled
94+
? "text-gray-500 cursor-not-allowed"
95+
: "text-gray-300 hover:bg-gray-700 cursor-pointer"
96+
} transition-colors text-left`}
97+
>
98+
<div className="flex items-center gap-2 w-full">
99+
<i className={`fa ${metadata.icon}`}></i>
100+
<span className={`text-sm ${isSelected ? "font-bold" : ""}`}>
101+
{metadata.name}
102+
{isDisabled && " (premium)"}
103+
</span>
104+
{isSelected && <i className="fa fa-check ml-auto"></i>}
105+
</div>
106+
<div className="text-xs text-muted pl-5" style={{ whiteSpace: "pre-line" }}>
107+
{metadata.desc}
108+
</div>
109+
</button>
110+
);
111+
})}
112+
</div>
113+
</>
114+
)}
115+
</div>
116+
);
117+
});
118+
119+
ThinkingLevelDropdown.displayName = "ThinkingLevelDropdown";

frontend/app/aipanel/waveai-model.tsx

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ export class WaveAIModel {
5757
widgetAccessAtom!: jotai.Atom<boolean>;
5858
droppedFiles: jotai.PrimitiveAtom<DroppedFile[]> = jotai.atom([]);
5959
chatId!: jotai.PrimitiveAtom<string>;
60+
thinkingMode: jotai.PrimitiveAtom<string> = jotai.atom("balanced");
6061
errorMessage: jotai.PrimitiveAtom<string> = jotai.atom(null) as jotai.PrimitiveAtom<string>;
6162
modelAtom!: jotai.Atom<string>;
6263
containerWidth: jotai.PrimitiveAtom<number> = jotai.atom(0);
@@ -332,6 +333,14 @@ export class WaveAIModel {
332333
});
333334
}
334335

336+
setThinkingMode(mode: string) {
337+
globalStore.set(this.thinkingMode, mode);
338+
RpcApi.SetRTInfoCommand(TabRpcClient, {
339+
oref: this.orefContext,
340+
data: { "waveai:thinkingmode": mode },
341+
});
342+
}
343+
335344
async loadInitialChat(): Promise<WaveUIMessage[]> {
336345
const rtInfo = await RpcApi.GetRTInfoCommand(TabRpcClient, {
337346
oref: this.orefContext,
@@ -346,6 +355,9 @@ export class WaveAIModel {
346355
}
347356
globalStore.set(this.chatId, chatIdValue);
348357

358+
const thinkingModeValue = rtInfo?.["waveai:thinkingmode"] ?? "balanced";
359+
globalStore.set(this.thinkingMode, thinkingModeValue);
360+
349361
try {
350362
const chatData = await RpcApi.GetWaveAIChatCommand(TabRpcClient, { chatid: chatIdValue });
351363
const messages: UIMessage[] = chatData?.messages ?? [];

frontend/types/gotypes.d.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -872,7 +872,7 @@ declare global {
872872
"builder:appid"?: string;
873873
"builder:env"?: {[key: string]: string};
874874
"waveai:chatid"?: string;
875-
"waveai:thinkinglevel"?: string;
875+
"waveai:thinkingmode"?: string;
876876
"waveai:maxoutputtokens"?: number;
877877
};
878878

@@ -1171,6 +1171,8 @@ declare global {
11711171
"waveai:firstbytems"?: number;
11721172
"waveai:requestdurms"?: number;
11731173
"waveai:widgetaccess"?: boolean;
1174+
"waveai:thinkinglevel"?: string;
1175+
"waveai:thinkingmode"?: string;
11741176
"waveai:feedback"?: "good" | "bad";
11751177
"waveai:action"?: string;
11761178
$set?: TEventUserProps;

pkg/aiusechat/uctypes/usechat-types.go

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,12 @@ const (
123123
ThinkingLevelHigh = "high"
124124
)
125125

126+
const (
127+
ThinkingModeQuick = "quick"
128+
ThinkingModeBalanced = "balanced"
129+
ThinkingModeDeep = "deep"
130+
)
131+
126132
const (
127133
ToolUseStatusPending = "pending"
128134
ToolUseStatusError = "error"
@@ -212,6 +218,7 @@ type AIOptsType struct {
212218
MaxTokens int `json:"maxtokens,omitempty"`
213219
TimeoutMs int `json:"timeoutms,omitempty"`
214220
ThinkingLevel string `json:"thinkinglevel,omitempty"` // ThinkingLevelLow, ThinkingLevelMedium, or ThinkingLevelHigh
221+
ThinkingMode string `json:"thinkingmode,omitempty"` // quick, balanced, or deep
215222
}
216223

217224
func (opts AIOptsType) IsWaveProxy() bool {
@@ -254,6 +261,8 @@ type AIMetrics struct {
254261
FirstByteLatency int `json:"firstbytelatency"` // ms
255262
RequestDuration int `json:"requestduration"` // ms
256263
WidgetAccess bool `json:"widgetaccess"`
264+
ThinkingLevel string `json:"thinkinglevel,omitempty"`
265+
ThinkingMode string `json:"thinkingmode,omitempty"`
257266
}
258267

259268
// GenAIMessage interface for messages stored in conversations

0 commit comments

Comments
 (0)