Skip to content

Commit 20e4d1d

Browse files
Merge pull request #157 from acara-app/feat/new-ai-model-option-for-gemini-3.1-pro
feat: Added Gemini 3.1 Pro model and refactor chat input to manage mode and model state externally.
2 parents 59132af + e3188bd commit 20e4d1d

File tree

11 files changed

+61
-69
lines changed

11 files changed

+61
-69
lines changed

app/Enums/ModelName.php

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ enum ModelName: string
1010
case GPT_5_NANO = 'gpt-5-nano';
1111
case GEMINI_2_5_FLASH = 'gemini-2.5-flash';
1212
case GEMINI_3_FLASH = 'gemini-3-flash-preview';
13+
case GEMINI_3_1_PRO = 'gemini-3.1-pro-preview';
1314

1415
/**
1516
* @return array{id: string, name: string, description: string, provider: string}[]
@@ -29,6 +30,7 @@ public function getName(): string
2930
self::GPT_5_NANO => 'GPT-5 Nano',
3031
self::GEMINI_2_5_FLASH => 'Gemini 2.5 Flash',
3132
self::GEMINI_3_FLASH => 'Gemini 3 Flash',
33+
self::GEMINI_3_1_PRO => 'Gemini 3.1 Pro',
3234
};
3335
}
3436

@@ -39,14 +41,15 @@ public function getDescription(): string
3941
self::GPT_5_NANO => 'Cheapest model, best for simpler tasks',
4042
self::GEMINI_2_5_FLASH => 'Fast and versatile performance across a variety of tasks',
4143
self::GEMINI_3_FLASH => 'Google\'s latest model with frontier intelligence built for speed that helps everyone learn, build, and plan anything — faster',
44+
self::GEMINI_3_1_PRO => "Google's latest Pro model with advanced reasoning and frontier capabilities",
4245
};
4346
}
4447

4548
public function getProvider(): string
4649
{
4750
return match ($this) {
4851
self::GPT_5_MINI, self::GPT_5_NANO => 'openai',
49-
self::GEMINI_2_5_FLASH, self::GEMINI_3_FLASH => 'google',
52+
self::GEMINI_2_5_FLASH, self::GEMINI_3_FLASH, self::GEMINI_3_1_PRO => 'google',
5053
};
5154
}
5255

@@ -56,7 +59,7 @@ public function getProvider(): string
5659
public function requiresThinkingMode(): bool
5760
{
5861
return match ($this) {
59-
self::GEMINI_3_FLASH => true,
62+
self::GEMINI_3_FLASH, self::GEMINI_3_1_PRO => true,
6063
default => false,
6164
};
6265
}
@@ -68,7 +71,7 @@ public function requiresThinkingMode(): bool
6871
public function getThinkingBudget(): ?int
6972
{
7073
return match ($this) {
71-
self::GEMINI_3_FLASH => 8192,
74+
self::GEMINI_3_FLASH, self::GEMINI_3_1_PRO => 8192,
7275
default => null,
7376
};
7477
}
@@ -92,7 +95,7 @@ public function supportsTemperature(): bool
9295
public function getRecommendedTemperature(): float
9396
{
9497
return match ($this) {
95-
self::GEMINI_3_FLASH => 1.0,
98+
self::GEMINI_3_FLASH, self::GEMINI_3_1_PRO => 1.0,
9699
default => 0.7,
97100
};
98101
}
@@ -104,7 +107,7 @@ public function getRecommendedTemperature(): float
104107
public function getMinMaxTokens(): int
105108
{
106109
return match ($this) {
107-
self::GEMINI_3_FLASH => 16384,
110+
self::GEMINI_3_FLASH, self::GEMINI_3_1_PRO => 16384,
108111
default => 8000,
109112
};
110113
}

app/Http/Requests/StoreAgentConversationRequest.php

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ public function rules(): array
3535
'messages.*.parts.*.type' => ['required', 'string'],
3636
'messages.*.parts.*.text' => ['required_if:messages.*.parts.*.type,text', 'string'],
3737

38-
// Query params
38+
// Body params (sent by AI SDK transport)
3939
'mode' => ['required', Rule::enum(AgentMode::class)],
4040
'model' => ['required', Rule::enum(ModelName::class)],
4141
];
@@ -94,16 +94,4 @@ public function messages(): array
9494
'model.required' => 'Model is required',
9595
];
9696
}
97-
98-
/**
99-
* Prepare the data for validation.
100-
* Merge query parameters into the request data.
101-
*/
102-
protected function prepareForValidation(): void
103-
{
104-
$this->merge([
105-
'mode' => $this->query('mode'),
106-
'model' => $this->query('model'),
107-
]);
108-
}
10997
}

lang/en/common.php

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -963,6 +963,7 @@
963963
],
964964
'models' => [
965965
'gemini_3_flash' => 'Gemini 3 Flash',
966+
'gemini_3_1_pro' => 'Gemini 3.1 Pro',
966967
],
967968
],
968969
];

lang/fr/common.php

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -962,6 +962,7 @@
962962
],
963963
'models' => [
964964
'gemini_3_flash' => 'Gemini 3 Flash',
965+
'gemini_3_1_pro' => 'Gemini 3.1 Pro',
965966
],
966967
],
967968
];

lang/mn/common.php

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -963,6 +963,7 @@
963963
],
964964
'models' => [
965965
'gemini_3_flash' => 'Gemini 3 Flash',
966+
'gemini_3_1_pro' => 'Gemini 3.1 Pro',
966967
],
967968
],
968969
];

resources/js/hooks/use-chat-stream.ts

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ import { stream } from '@/routes/chat';
33
import type { ChatStatus } from '@/types/chat';
44
import { useChat, type UIMessage } from '@ai-sdk/react';
55
import { DefaultChatTransport } from 'ai';
6-
import { useMemo } from 'react';
6+
import { useMemo, useRef } from 'react';
77

88
interface UseChatStreamOptions {
99
conversationId?: string;
@@ -28,14 +28,18 @@ export function useChatStream({
2828
model,
2929
initialMessages,
3030
}: UseChatStreamOptions): UseChatStreamReturn {
31+
const modelRef = useRef({ model, mode });
32+
modelRef.current = { model, mode };
33+
3134
const transport = useMemo(
3235
() =>
3336
new DefaultChatTransport({
3437
api: stream.url({
35-
query: { mode, model, conversationId },
38+
query: { conversationId },
3639
}),
40+
body: () => modelRef.current,
3741
}),
38-
[mode, model, conversationId],
42+
[conversationId],
3943
);
4044

4145
const { messages, sendMessage, status, error } = useChat({

resources/js/pages/chat/chat-input.tsx

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ import { useTranslation } from 'react-i18next';
1919

2020
export const AI_MODELS = {
2121
'gemini-3-flash-preview': 'chat.models.gemini_3_flash',
22+
'gemini-3.1-pro-preview': 'chat.models.gemini_3_1_pro',
2223
} as const;
2324

2425
export type AIModel = keyof typeof AI_MODELS;
@@ -37,30 +38,32 @@ export const CHAT_MODES = {
3738
export type ChatMode = keyof typeof CHAT_MODES;
3839

3940
interface Props {
40-
onSubmit: (message: string, mode: ChatMode, model: AIModel) => void;
41+
onSubmit: (message: string) => void;
42+
onModeChange: (mode: ChatMode) => void;
43+
onModelChange: (model: AIModel) => void;
4144
className?: string;
4245
disabled?: boolean;
4346
isLoading?: boolean;
4447
mode: ChatMode;
48+
model: AIModel;
4549
}
4650

4751
export default function ChatInput({
4852
className,
4953
onSubmit,
54+
onModeChange,
55+
onModelChange,
5056
disabled = false,
5157
isLoading = false,
5258
mode,
59+
model,
5360
}: Props) {
5461
const { t } = useTranslation('common');
5562
const [message, setMessage] = useState('');
56-
const [selectedMode, setSelectedMode] = useState<ChatMode>(mode || 'ask');
57-
const [selectedModel, setSelectedModel] = useState<AIModel>(
58-
'gemini-3-flash-preview',
59-
);
6063

6164
const handleSubmit = () => {
6265
if (message.trim()) {
63-
onSubmit(message, selectedMode, selectedModel);
66+
onSubmit(message);
6467
setMessage('');
6568
}
6669
};
@@ -72,7 +75,7 @@ export default function ChatInput({
7275
}
7376
};
7477

75-
const SelectedModeIcon = CHAT_MODES[selectedMode].icon;
78+
const SelectedModeIcon = CHAT_MODES[mode].icon;
7679

7780
return (
7881
<div className="mx-auto flex w-full max-w-3xl items-end bg-background p-0.5 md:px-4 md:py-2">
@@ -103,10 +106,8 @@ export default function ChatInput({
103106
className="gap-1.5"
104107
>
105108
<SelectedModeIcon className="size-4" />
106-
{selectedMode !== 'create-meal-plan' && (
107-
<span>
108-
{t(CHAT_MODES[selectedMode].label)}
109-
</span>
109+
{mode !== 'create-meal-plan' && (
110+
<span>{t(CHAT_MODES[mode].label)}</span>
110111
)}
111112
<ChevronDown className="size-3.5 opacity-60" />
112113
</Button>
@@ -117,11 +118,11 @@ export default function ChatInput({
117118
<DropdownMenuItem
118119
key={key}
119120
onClick={() =>
120-
setSelectedMode(key as ChatMode)
121+
onModeChange(key as ChatMode)
121122
}
122123
className={cn(
123124
'gap-2',
124-
selectedMode === key &&
125+
mode === key &&
125126
'bg-accent text-accent-foreground',
126127
)}
127128
>
@@ -152,7 +153,7 @@ export default function ChatInput({
152153
className="gap-1.5 text-muted-foreground hover:text-foreground"
153154
>
154155
<span className="max-w-[80px] truncate sm:max-w-[90px]">
155-
{t(AI_MODELS[selectedModel])}
156+
{t(AI_MODELS[model])}
156157
</span>
157158
<ChevronDown className="size-3.5 opacity-60" />
158159
</Button>
@@ -163,10 +164,10 @@ export default function ChatInput({
163164
<DropdownMenuItem
164165
key={key}
165166
onClick={() =>
166-
setSelectedModel(key as AIModel)
167+
onModelChange(key as AIModel)
167168
}
168169
className={cn(
169-
selectedModel === key &&
170+
model === key &&
170171
'bg-accent text-accent-foreground',
171172
)}
172173
>

resources/js/pages/chat/create-chat.tsx

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import type { ChatPageProps, UIMessage } from '@/types/chat';
77
import { Head, router, usePage } from '@inertiajs/react';
88
import { useEffect, useRef, useState } from 'react';
99
import ChatInput, { type AIModel, type ChatMode } from './chat-input';
10+
1011
import ChatMessages, { ChatErrorBanner } from './chat-messages';
1112

1213
const breadcrumbs: BreadcrumbItem[] = [
@@ -47,11 +48,7 @@ export default function CreateChat() {
4748
}
4849
}, [messages]);
4950

50-
function handleSubmit(
51-
message: string,
52-
chatMode: ChatMode,
53-
aiModel: AIModel,
54-
) {
51+
function handleSubmit(message: string) {
5552
if (!message.trim()) {
5653
return;
5754
}
@@ -65,8 +62,6 @@ export default function CreateChat() {
6562
});
6663
}
6764

68-
setMode(chatMode);
69-
setModel(aiModel);
7065
sendMessage({ text: message });
7166
}
7267

@@ -92,9 +87,12 @@ export default function CreateChat() {
9287
<ChatInput
9388
className="w-full"
9489
onSubmit={handleSubmit}
90+
onModeChange={setMode}
91+
onModelChange={setModel}
9592
disabled={isStreaming || isSubmitting}
9693
isLoading={isStreaming || isSubmitting}
9794
mode={mode}
95+
model={model}
9896
/>
9997
<p className="px-4 pb-4 text-center text-xs text-muted-foreground">
10098
For informational purposes only. Not a substitute for

tests/Feature/Controllers/ChatControllerTest.php

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -71,13 +71,13 @@
7171
it('accepts valid stream request', function (): void {
7272
$user = User::factory()->create();
7373

74-
$url = route('chat.stream').'?mode='.AgentMode::Ask->value.'&model='.ModelName::GPT_5_MINI->value;
75-
7674
actingAs($user)
77-
->post($url, [
75+
->post(route('chat.stream'), [
7876
'messages' => [
7977
['role' => 'user', 'parts' => [['type' => 'text', 'text' => 'Hello API']]],
8078
],
79+
'mode' => AgentMode::Ask->value,
80+
'model' => ModelName::GPT_5_MINI->value,
8181
])
8282
->assertOk();
8383
});

tests/Unit/Enums/ModelNameTest.php

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,32 +8,37 @@
88
expect(ModelName::GPT_5_MINI->value)->toBe('gpt-5-mini')
99
->and(ModelName::GPT_5_NANO->value)->toBe('gpt-5-nano')
1010
->and(ModelName::GEMINI_2_5_FLASH->value)->toBe('gemini-2.5-flash')
11-
->and(ModelName::GEMINI_3_FLASH->value)->toBe('gemini-3-flash-preview');
11+
->and(ModelName::GEMINI_3_FLASH->value)->toBe('gemini-3-flash-preview')
12+
->and(ModelName::GEMINI_3_1_PRO->value)->toBe('gemini-3.1-pro-preview');
1213
});
1314

1415
it('returns correct names', function (): void {
1516
expect(ModelName::GPT_5_MINI->getName())->toBe('GPT-5 mini')
1617
->and(ModelName::GPT_5_NANO->getName())->toBe('GPT-5 Nano')
1718
->and(ModelName::GEMINI_2_5_FLASH->getName())->toBe('Gemini 2.5 Flash')
18-
->and(ModelName::GEMINI_3_FLASH->getName())->toBe('Gemini 3 Flash');
19+
->and(ModelName::GEMINI_3_FLASH->getName())->toBe('Gemini 3 Flash')
20+
->and(ModelName::GEMINI_3_1_PRO->getName())->toBe('Gemini 3.1 Pro');
1921
});
2022

2123
it('returns correct descriptions', function (): void {
2224
expect(ModelName::GPT_5_MINI->getDescription())->toBe('Cheapest model, best for smarter tasks')
2325
->and(ModelName::GPT_5_NANO->getDescription())->toBe('Cheapest model, best for simpler tasks')
2426
->and(ModelName::GEMINI_2_5_FLASH->getDescription())->toBe('Fast and versatile performance across a variety of tasks')
25-
->and(ModelName::GEMINI_3_FLASH->getDescription())->toBe("Google's latest model with frontier intelligence built for speed that helps everyone learn, build, and plan anything — faster");
27+
->and(ModelName::GEMINI_3_FLASH->getDescription())->toBe("Google's latest model with frontier intelligence built for speed that helps everyone learn, build, and plan anything — faster")
28+
->and(ModelName::GEMINI_3_1_PRO->getDescription())->toBe("Google's latest Pro model with advanced reasoning and frontier capabilities");
2629
});
2730

2831
it('returns correct providers', function (): void {
2932
expect(ModelName::GPT_5_MINI->getProvider())->toBe('openai')
3033
->and(ModelName::GPT_5_NANO->getProvider())->toBe('openai')
3134
->and(ModelName::GEMINI_2_5_FLASH->getProvider())->toBe('google')
32-
->and(ModelName::GEMINI_3_FLASH->getProvider())->toBe('google');
35+
->and(ModelName::GEMINI_3_FLASH->getProvider())->toBe('google')
36+
->and(ModelName::GEMINI_3_1_PRO->getProvider())->toBe('google');
3337
});
3438

3539
it('identifies models that require thinking mode', function (): void {
3640
expect(ModelName::GEMINI_3_FLASH->requiresThinkingMode())->toBeTrue()
41+
->and(ModelName::GEMINI_3_1_PRO->requiresThinkingMode())->toBeTrue()
3742
->and(ModelName::GEMINI_2_5_FLASH->requiresThinkingMode())->toBeFalse()
3843
->and(ModelName::GPT_5_MINI->requiresThinkingMode())->toBeFalse()
3944
->and(ModelName::GPT_5_NANO->requiresThinkingMode())->toBeFalse();
@@ -43,23 +48,27 @@
4348
expect(ModelName::GPT_5_MINI->supportsTemperature())->toBeFalse()
4449
->and(ModelName::GPT_5_NANO->supportsTemperature())->toBeFalse()
4550
->and(ModelName::GEMINI_2_5_FLASH->supportsTemperature())->toBeTrue()
46-
->and(ModelName::GEMINI_3_FLASH->supportsTemperature())->toBeTrue();
51+
->and(ModelName::GEMINI_3_FLASH->supportsTemperature())->toBeTrue()
52+
->and(ModelName::GEMINI_3_1_PRO->supportsTemperature())->toBeTrue();
4753
});
4854

4955
it('returns correct thinking budget for thinking models', function (): void {
5056
expect(ModelName::GEMINI_3_FLASH->getThinkingBudget())->toBe(8192)
57+
->and(ModelName::GEMINI_3_1_PRO->getThinkingBudget())->toBe(8192)
5158
->and(ModelName::GEMINI_2_5_FLASH->getThinkingBudget())->toBeNull()
5259
->and(ModelName::GPT_5_MINI->getThinkingBudget())->toBeNull();
5360
});
5461

5562
it('returns correct recommended temperature', function (): void {
5663
expect(ModelName::GEMINI_3_FLASH->getRecommendedTemperature())->toBe(1.0)
64+
->and(ModelName::GEMINI_3_1_PRO->getRecommendedTemperature())->toBe(1.0)
5765
->and(ModelName::GEMINI_2_5_FLASH->getRecommendedTemperature())->toBe(0.7)
5866
->and(ModelName::GPT_5_MINI->getRecommendedTemperature())->toBe(0.7);
5967
});
6068

6169
it('returns correct minimum max tokens', function (): void {
6270
expect(ModelName::GEMINI_3_FLASH->getMinMaxTokens())->toBe(16384)
71+
->and(ModelName::GEMINI_3_1_PRO->getMinMaxTokens())->toBe(16384)
6372
->and(ModelName::GEMINI_2_5_FLASH->getMinMaxTokens())->toBe(8000)
6473
->and(ModelName::GPT_5_MINI->getMinMaxTokens())->toBe(8000);
6574
});
@@ -79,10 +88,11 @@
7988
$models = ModelName::getAvailableModels();
8089

8190
expect($models)->toBeArray()
82-
->and($models)->toHaveCount(4)
91+
->and($models)->toHaveCount(5)
8392
->and($models[0])->toHaveKeys(['id', 'name', 'description', 'provider'])
8493
->and($models[0]['id'])->toBe('gpt-5-mini')
8594
->and($models[1]['id'])->toBe('gpt-5-nano')
8695
->and($models[2]['id'])->toBe('gemini-2.5-flash')
87-
->and($models[3]['id'])->toBe('gemini-3-flash-preview');
96+
->and($models[3]['id'])->toBe('gemini-3-flash-preview')
97+
->and($models[4]['id'])->toBe('gemini-3.1-pro-preview');
8898
});

0 commit comments

Comments
 (0)