Skip to content

Commit db7a424

Browse files
authored
Update models: GPT-5, Claude 4, Gemini 2.5 (#452)
* feat: Update anthropic models and add thinking toggle in model settings * feat: refactor OpenAI reasoning feature with improved UX and GPT-5 support - Replace simple reasoning_effort field with structured reasoning object - Add reasoning.enabled toggle for better user control - Support GPT-5, GPT-5-mini, and GPT-5-nano models with pricing * feat: Update Gemini models - Update Gemini models: remove 1.5 versions, add 2.5-pro and 2.5-flash-lite - Remove Groq pricing constants and cost calculation logic * feat: Add model fallback - Add automatic fallback to first available chat model when selected model is not found - Create LLMModelNotFoundException for better error handling * feat: Add migration from v10 to v11 with new model structure - Add migration from v10 to v11 with new reasoning/thinking structure - Transform OpenAI models with reasoning_effort to new reasoning object structure - Transform Anthropic models with thinking.budget_tokens to new thinking structure - Add new default models: claude-sonnet-4.0, claude-opus-4.1, gpt-5 series, gemini-2.5 series * refactor: Centralize default model ids and update schema fallbacks/recommendations - Add DEFAULT_CHAT_MODEL_ID ('claude-sonnet-4.0') and DEFAULT_APPLY_MODEL_ID ('gpt-4.1-mini') in src/constants.ts - Update RECOMMENDED_MODELS_FOR_CHAT to ['claude-sonnet-4.0', 'gpt-4.1'] and RECOMMENDED_MODELS_FOR_APPLY to ['gpt-4.1-mini'] - Update smartComposerSettingsSchema to use the new default IDs as fallbacks (src/settings/schema/setting.types.ts) - Adjust tests to assert defaults via constants (src/settings/schema/settings.test.ts) * fix: Resolve CodeRabbit review
1 parent 85a5f37 commit db7a424

File tree

17 files changed

+684
-143
lines changed

17 files changed

+684
-143
lines changed

src/components/chat-view/useChatStreamManager.ts

Lines changed: 34 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import {
99
LLMAPIKeyInvalidException,
1010
LLMAPIKeyNotSetException,
1111
LLMBaseUrlNotSetException,
12+
LLMModelNotFoundException,
1213
} from '../../core/llm/exception'
1314
import { getChatModelClient } from '../../core/llm/manager'
1415
import { ChatMessage } from '../../types/chat'
@@ -37,7 +38,7 @@ export function useChatStreamManager({
3738
promptGenerator,
3839
}: UseChatStreamManagerParams): UseChatStreamManager {
3940
const app = useApp()
40-
const { settings } = useSettings()
41+
const { settings, setSettings } = useSettings()
4142
const { getMcpManager } = useMcp()
4243

4344
const activeStreamAbortControllersRef = useRef<AbortController[]>([])
@@ -50,11 +51,38 @@ export function useChatStreamManager({
5051
}, [])
5152

5253
const { providerClient, model } = useMemo(() => {
53-
return getChatModelClient({
54-
settings,
55-
modelId: settings.chatModelId,
56-
})
57-
}, [settings])
54+
try {
55+
return getChatModelClient({
56+
settings,
57+
modelId: settings.chatModelId,
58+
})
59+
} catch (error) {
60+
if (error instanceof LLMModelNotFoundException) {
61+
if (settings.chatModels.length === 0) {
62+
throw error
63+
}
64+
// Fallback to the first chat model if the selected chat model is not found
65+
const firstChatModel = settings.chatModels[0]
66+
setSettings({
67+
...settings,
68+
chatModelId: firstChatModel.id,
69+
chatModels: settings.chatModels.map((model) =>
70+
model.id === firstChatModel.id
71+
? {
72+
...model,
73+
enable: true,
74+
}
75+
: model,
76+
),
77+
})
78+
return getChatModelClient({
79+
settings,
80+
modelId: firstChatModel.id,
81+
})
82+
}
83+
throw error
84+
}
85+
}, [settings, setSettings])
5886

5987
const submitChatMutation = useMutation({
6088
mutationFn: async ({

src/components/common/ObsidianTextInput.tsx

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,14 @@ type ObsidianTextInputProps = {
77
value: string
88
placeholder?: string
99
onChange: (value: string) => void
10+
type?: 'text' | 'number'
1011
}
1112

1213
export function ObsidianTextInput({
1314
value,
1415
placeholder,
1516
onChange,
17+
type,
1618
}: ObsidianTextInputProps) {
1719
const containerRef = useRef<HTMLDivElement>(null)
1820
const { setting } = useObsidianSetting()
@@ -45,7 +47,9 @@ export function ObsidianTextInput({
4547
textComponent.setValue(value)
4648
if (placeholder) textComponent.setPlaceholder(placeholder)
4749
textComponent.onChange(onChange)
48-
}, [textComponent, value, onChange, placeholder])
50+
51+
if (type) textComponent.inputEl.type = type
52+
}, [textComponent, value, onChange, placeholder, type])
4953

5054
return <div ref={containerRef} />
5155
}

src/components/settings/sections/models/ChatModelSettings.tsx

Lines changed: 90 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import { ObsidianButton } from '../../../common/ObsidianButton'
77
import { ObsidianDropdown } from '../../../common/ObsidianDropdown'
88
import { ObsidianSetting } from '../../../common/ObsidianSetting'
99
import { ObsidianTextInput } from '../../../common/ObsidianTextInput'
10+
import { ObsidianToggle } from '../../../common/ObsidianToggle'
1011
import { ReactModal } from '../../../common/ReactModal'
1112

1213
type SettingsComponentProps = {
@@ -36,27 +37,27 @@ type ModelSettingsRegistry = {
3637
SettingsComponent: React.FC<SettingsComponentProps>
3738
}
3839

39-
const OPEN_AI_REASONING_MODEL_IDS = [
40-
'o1',
41-
'o1-mini',
42-
'o3',
43-
'o3-mini',
44-
'o4-mini',
45-
]
46-
47-
// Registry of available model settings
40+
/**
41+
* Registry of available model settings.
42+
*
43+
* The check function is used to determine if the model settings should be displayed.
44+
* The SettingsComponent is the component that will be displayed when the model settings are opened.
45+
*/
4846
const MODEL_SETTINGS_REGISTRY: ModelSettingsRegistry[] = [
49-
// OpenAI reasoning model settings
47+
/**
48+
* OpenAI model settings
49+
*/
5050
{
51-
check: (model) =>
52-
model.providerType === 'openai' &&
53-
OPEN_AI_REASONING_MODEL_IDS.includes(model.model),
51+
check: (model) => model.providerType === 'openai',
5452

5553
SettingsComponent: (props: SettingsComponentProps) => {
5654
const { model, plugin, onClose } = props
5755
const typedModel = model as ChatModel & { providerType: 'openai' }
56+
const [reasoningEnabled, setReasoningEnabled] = useState<boolean>(
57+
typedModel.reasoning?.enabled ?? false,
58+
)
5859
const [reasoningEffort, setReasoningEffort] = useState<string>(
59-
typedModel.reasoning_effort ?? 'medium',
60+
typedModel.reasoning?.reasoning_effort ?? 'medium',
6061
)
6162

6263
const handleSubmit = async () => {
@@ -67,8 +68,20 @@ const MODEL_SETTINGS_REGISTRY: ModelSettingsRegistry[] = [
6768

6869
const updatedModel = {
6970
...typedModel,
70-
reasoning_effort: reasoningEffort,
71+
reasoning: {
72+
enabled: reasoningEnabled,
73+
reasoning_effort: reasoningEffort,
74+
},
75+
}
76+
77+
const validationResult = chatModelSchema.safeParse(updatedModel)
78+
if (!validationResult.success) {
79+
new Notice(
80+
validationResult.error.issues.map((v) => v.message).join('\n'),
81+
)
82+
return
7183
}
84+
7285
await plugin.setSettings({
7386
...plugin.settings,
7487
chatModels: plugin.settings.chatModels.map((m) =>
@@ -81,19 +94,32 @@ const MODEL_SETTINGS_REGISTRY: ModelSettingsRegistry[] = [
8194
return (
8295
<>
8396
<ObsidianSetting
84-
name="Reasoning Effort"
85-
desc={`Controls how much thinking the model does before responding. Default is "medium".`}
97+
name="Reasoning"
98+
desc="Enable reasoning for the model. Available for o-series models (e.g., o3, o4-mini) and GPT-5 models."
8699
>
87-
<ObsidianDropdown
88-
value={reasoningEffort}
89-
options={{
90-
low: 'low',
91-
medium: 'medium',
92-
high: 'high',
93-
}}
94-
onChange={(value: string) => setReasoningEffort(value)}
100+
<ObsidianToggle
101+
value={reasoningEnabled}
102+
onChange={(value: boolean) => setReasoningEnabled(value)}
95103
/>
96104
</ObsidianSetting>
105+
{reasoningEnabled && (
106+
<ObsidianSetting
107+
name="Reasoning Effort"
108+
desc={`Controls how much thinking the model does before responding. Default is "medium".`}
109+
className="smtcmp-setting-item--nested"
110+
required
111+
>
112+
<ObsidianDropdown
113+
value={reasoningEffort}
114+
options={{
115+
low: 'low',
116+
medium: 'medium',
117+
high: 'high',
118+
}}
119+
onChange={(value: string) => setReasoningEffort(value)}
120+
/>
121+
</ObsidianSetting>
122+
)}
97123

98124
<ObsidianSetting>
99125
<ObsidianButton text="Save" onClick={handleSubmit} cta />
@@ -104,17 +130,26 @@ const MODEL_SETTINGS_REGISTRY: ModelSettingsRegistry[] = [
104130
},
105131
},
106132

107-
// Claude 3.7 Sonnet Thinking settings
133+
/**
134+
* Claude model settings
135+
*
136+
* For extended thinking, see:
137+
* @see https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking
138+
*/
108139
{
109-
check: (model) =>
110-
model.providerType === 'anthropic' &&
111-
model.id === 'claude-3.7-sonnet-thinking',
112-
140+
check: (model) => model.providerType === 'anthropic',
113141
SettingsComponent: (props: SettingsComponentProps) => {
142+
const DEFAULT_THINKING_BUDGET_TOKENS = 8192
143+
114144
const { model, plugin, onClose } = props
115145
const typedModel = model as ChatModel & { providerType: 'anthropic' }
146+
const [thinkingEnabled, setThinkingEnabled] = useState<boolean>(
147+
typedModel.thinking?.enabled ?? false,
148+
)
116149
const [budgetTokens, setBudgetTokens] = useState(
117-
(typedModel.thinking?.budget_tokens ?? 8192).toString(),
150+
(
151+
typedModel.thinking?.budget_tokens ?? DEFAULT_THINKING_BUDGET_TOKENS
152+
).toString(),
118153
)
119154

120155
const handleSubmit = async () => {
@@ -131,7 +166,10 @@ const MODEL_SETTINGS_REGISTRY: ModelSettingsRegistry[] = [
131166

132167
const updatedModel = {
133168
...typedModel,
134-
thinking: { budget_tokens: parsedTokens },
169+
thinking: {
170+
enabled: thinkingEnabled,
171+
budget_tokens: parsedTokens,
172+
},
135173
}
136174

137175
const validationResult = chatModelSchema.safeParse(updatedModel)
@@ -154,16 +192,29 @@ const MODEL_SETTINGS_REGISTRY: ModelSettingsRegistry[] = [
154192
return (
155193
<>
156194
<ObsidianSetting
157-
name="Budget Tokens"
158-
desc="The maximum number of tokens that Claude can use for thinking. Must be at least 1024."
159-
required
195+
name="Extended Thinking"
196+
desc="Enable extended thinking for Claude. Available for Claude Sonnet 3.7+ and Claude Opus 4.0+."
160197
>
161-
<ObsidianTextInput
162-
value={budgetTokens}
163-
placeholder="Number of tokens"
164-
onChange={(value: string) => setBudgetTokens(value)}
198+
<ObsidianToggle
199+
value={thinkingEnabled}
200+
onChange={(value: boolean) => setThinkingEnabled(value)}
165201
/>
166202
</ObsidianSetting>
203+
{thinkingEnabled && (
204+
<ObsidianSetting
205+
name="Budget Tokens"
206+
desc="The maximum number of tokens that Claude can use for thinking. Must be at least 1024."
207+
className="smtcmp-setting-item--nested"
208+
required
209+
>
210+
<ObsidianTextInput
211+
value={budgetTokens}
212+
placeholder="Number of tokens"
213+
onChange={(value: string) => setBudgetTokens(value)}
214+
type="number"
215+
/>
216+
</ObsidianSetting>
217+
)}
167218

168219
<ObsidianSetting>
169220
<ObsidianButton text="Save" onClick={handleSubmit} cta />

0 commit comments

Comments
 (0)