Skip to content

Commit a07ed10

Browse files
authored
Merge pull request #71 from OpenAgentPlatform/gemini
feat: support google gemini & mistral ai models settings
2 parents 6c4f136 + a017c23 commit a07ed10

File tree

16 files changed

+460
-54
lines changed

16 files changed

+460
-54
lines changed

electron/main/ipc/llm.ts

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import { Anthropic } from "@anthropic-ai/sdk"
22
import { ipcMain } from "electron"
33
import { Ollama } from "ollama"
44
import OpenAI from "openai"
5+
import { Mistral } from "@mistralai/mistralai"
56

67
export function ipcLlmHandler() {
78
ipcMain.handle("llm:openaiModelList", async (_, apiKey: string) => {
@@ -43,4 +44,25 @@ export function ipcLlmHandler() {
4344
return []
4445
}
4546
})
46-
}
47+
48+
ipcMain.handle("llm:googleGenaiModelList", async (_, apiKey: string) => {
49+
try {
50+
const url = `https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`
51+
const response = await fetch(url)
52+
const data = await response.json() as { models: { name: string }[] }
53+
return data.models.map((model) => model.name)
54+
} catch (error) {
55+
return []
56+
}
57+
})
58+
59+
ipcMain.handle("llm:mistralaiModelList", async (_, apiKey: string) => {
60+
try {
61+
const client = new Mistral({ apiKey })
62+
const models = await client.models.list()
63+
return models.data?.map((model) => model.id) ?? []
64+
} catch (error) {
65+
return []
66+
}
67+
})
68+
}

electron/preload/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@ contextBridge.exposeInMainWorld("ipcRenderer", {
3030
openaiCompatibleModelList: (apiKey: string, baseURL: string) => ipcRenderer.invoke("llm:openaiCompatibleModelList", apiKey, baseURL),
3131
anthropicModelList: (apiKey: string, baseURL: string) => ipcRenderer.invoke("llm:anthropicModelList", apiKey, baseURL),
3232
ollamaModelList: (baseURL: string) => ipcRenderer.invoke("llm:ollamaModelList", baseURL),
33+
googleGenaiModelList: (apiKey: string) => ipcRenderer.invoke("llm:googleGenaiModelList", apiKey),
34+
mistralaiModelList: (apiKey: string) => ipcRenderer.invoke("llm:mistralaiModelList", apiKey),
3335

3436
// context menu
3537
showSelectionContextMenu: () => ipcRenderer.invoke("show-selection-context-menu"),

package-lock.json

Lines changed: 57 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,11 @@
5656
"@langchain/anthropic": "^0.3.12",
5757
"@langchain/community": "^0.3.0",
5858
"@langchain/core": "^0.3.0",
59+
"@langchain/google-genai": "^0.1.10",
60+
"@langchain/mistralai": "^0.2.0",
5961
"@langchain/ollama": "^0.1.4",
6062
"@langchain/openai": "^0.3.17",
63+
"@mistralai/mistralai": "^1.5.1",
6164
"@modelcontextprotocol/inspector": "^0.4.1",
6265
"@modelcontextprotocol/sdk": "^1.4.1",
6366
"@radix-ui/react-dropdown-menu": "^2.1.6",

public/image/model_gemini.svg

Lines changed: 1 addition & 0 deletions
Loading

public/image/model_mistral-ai.svg

Lines changed: 40 additions & 0 deletions
Loading

services/processQuery.ts

Lines changed: 48 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,9 @@ import { Client } from "@modelcontextprotocol/sdk/client/index.js";
1111
import { ModelManager } from "./models/index.js";
1212
import { imageToBase64 } from "./utils/image.js";
1313
import logger from "./utils/logger.js";
14-
import { iQueryInput, iStreamMessage } from "./utils/types.js";
14+
import { iQueryInput, iStreamMessage, ModelSettings } from "./utils/types.js";
15+
import { openAIConvertToGeminiTools } from "./utils/toolHandler.js";
16+
import { ToolDefinition } from "@langchain/core/language_models/base";
1517

1618
// Map to store abort controllers
1719
export const abortControllerMap = new Map<string, AbortController>();
@@ -31,7 +33,7 @@ interface TokenUsage {
3133

3234
export async function handleProcessQuery(
3335
toolToClientMap: Map<string, Client>,
34-
availableTools: BindToolsInput[],
36+
availableTools: ToolDefinition[],
3537
model: BaseChatModel | null,
3638
input: string | iQueryInput,
3739
history: BaseMessage[],
@@ -121,12 +123,16 @@ export async function handleProcessQuery(
121123

122124
let hasToolCalls = true;
123125

124-
const runModel = modelManager.enableTools ? model.bindTools?.(availableTools) || model : model;
126+
const tools = currentModelSettings?.modelProvider === "google-genai" ? openAIConvertToGeminiTools(availableTools) : availableTools;
127+
128+
const runModel = modelManager.enableTools ? model.bindTools?.(tools) || model : model;
125129

126130
const isOllama = currentModelSettings?.modelProvider === "ollama";
127131
const isDeepseek =
128132
currentModelSettings?.configuration?.baseURL?.toLowerCase().includes("deepseek") ||
129133
currentModelSettings?.model?.toLowerCase().includes("deepseek");
134+
const isMistralai = currentModelSettings?.modelProvider === "mistralai";
135+
const isBedrock = currentModelSettings?.modelProvider === "bedrock";
130136

131137
logger.debug(`[${chatId}] Start to process LLM query`);
132138

@@ -141,7 +147,7 @@ export async function handleProcessQuery(
141147
try {
142148
// Track token usage if available
143149
for await (const chunk of stream) {
144-
caculateTokenUsage(tokenUsage, chunk, currentModelSettings!.modelProvider!);
150+
caculateTokenUsage(tokenUsage, chunk, currentModelSettings!);
145151

146152
if (chunk.content) {
147153
let chunkMessage = "";
@@ -235,6 +241,8 @@ export async function handleProcessQuery(
235241
throw error;
236242
}
237243

244+
logger.debug(`[${chatId}] Chunk collected`);
245+
238246
// filter empty tool calls
239247
toolCalls = toolCalls.filter((call) => call);
240248

@@ -247,6 +255,7 @@ export async function handleProcessQuery(
247255
break;
248256
}
249257

258+
logger.debug(`[${chatId}] Tool calls: ${JSON.stringify(toolCalls, null, 2)}`);
250259
// support anthropic multiple tool calls version but other not sure
251260
messages.push(
252261
new AIMessage({
@@ -257,14 +266,23 @@ export async function handleProcessQuery(
257266
text: currentContent || ".",
258267
},
259268
// Deepseek will recursive when tool_use exist in content
260-
...(isDeepseek
269+
...(isDeepseek || isMistralai || isBedrock
261270
? []
262-
: toolCalls.map((toolCall) => ({
271+
: toolCalls.map((toolCall) => {
272+
let parsedArgs = {}
273+
try {
274+
parsedArgs = toolCall.function.arguments === "" ? {} : JSON.parse(toolCall.function.arguments);
275+
} catch (error) {
276+
toolCall.function.arguments = "{}";
277+
logger.error(`[${chatId}] Error parsing tool call ${toolCall.function.name} args: ${error}`);
278+
}
279+
return {
263280
type: "tool_use",
264281
id: toolCall.id,
265282
name: toolCall.function.name,
266-
input: toolCall.function.arguments === "" ? {} : JSON.parse(toolCall.function.arguments),
267-
}))),
283+
input: parsedArgs,
284+
}
285+
})),
268286
],
269287
additional_kwargs: {
270288
tool_calls: toolCalls.map((toolCall) => ({
@@ -297,6 +315,8 @@ export async function handleProcessQuery(
297315
);
298316
}
299317

318+
logger.debug(`[${chatId}] Tool calls collected`);
319+
300320
// Execute all tool calls in parallel
301321
const toolResults = await Promise.all(
302322
toolCalls.map(async (toolCall) => {
@@ -407,10 +427,14 @@ export async function handleProcessQuery(
407427
})
408428
);
409429

430+
logger.debug(`[${chatId}] Tool results collected`);
431+
410432
// Add tool results to conversation
411433
if (toolResults.length > 0) {
412434
messages.push(...toolResults.map((result) => new ToolMessage(result)));
413435
}
436+
437+
logger.debug(`[${chatId}] Messages collected and ready to next round`);
414438
}
415439

416440
// Log token usage at the end of processing
@@ -437,8 +461,20 @@ export async function handleProcessQuery(
437461
}
438462
}
439463

440-
function caculateTokenUsage(tokenUsage: TokenUsage, chunk: AIMessageChunk, currentModelProvider: string) {
441-
switch (currentModelProvider) {
464+
function caculateTokenUsage(tokenUsage: TokenUsage, chunk: AIMessageChunk, currentModelSettings: ModelSettings) {
465+
if (!currentModelSettings) {
466+
return;
467+
}
468+
469+
if (currentModelSettings.configuration?.baseURL?.toLowerCase().includes("silicon")) {
470+
const usage = chunk.response_metadata.usage;
471+
tokenUsage.totalInputTokens = usage?.prompt_tokens || 0;
472+
tokenUsage.totalOutputTokens = usage?.completion_tokens || 0;
473+
tokenUsage.totalTokens = usage?.total_tokens || 0;
474+
return;
475+
}
476+
477+
switch (currentModelSettings.modelProvider) {
442478
case "openai":
443479
if (chunk.response_metadata?.usage) {
444480
const usage = chunk.response_metadata.usage;
@@ -455,7 +491,8 @@ function caculateTokenUsage(tokenUsage: TokenUsage, chunk: AIMessageChunk, curre
455491
tokenUsage.totalOutputTokens += usage?.output_tokens || 0;
456492
tokenUsage.totalTokens += usage?.total_tokens || 0;
457493
}
494+
break;
458495
default:
459496
break;
460497
}
461-
}
498+
}

services/routes/modelVerify.ts

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@ class ModelVerificationService {
7474
result: fullResponse,
7575
};
7676
} catch (error) {
77+
logger.error(`[ModelVerify] error: ${JSON.stringify(error, null, 2)}`);
7778
if ((error as { name?: string })?.name === "AbortError") {
7879
logger.warn(`Model tools test aborted for ${modelName}`);
7980
}
@@ -246,30 +247,25 @@ const testTools = [
246247
page: {
247248
type: "number",
248249
description: "Page number (default 1)",
249-
default: 1,
250250
},
251251
language: {
252252
type: "string",
253253
description: "Search language code (e.g. 'en', 'zh', 'jp', 'all')",
254-
default: "all",
255254
},
256255
categories: {
257256
type: "array",
258257
items: {
259258
type: "string",
260259
enum: ["general", "news", "science", "files", "images", "videos", "music", "social media", "it"],
261260
},
262-
default: ["general"],
263261
},
264262
time_range: {
265263
type: "string",
266-
enum: ["", "day", "week", "month", "year"],
267-
default: "",
264+
enum: ["none", "day", "week", "month", "year"],
268265
},
269266
safesearch: {
270267
type: "number",
271268
description: "0: None, 1: Moderate, 2: Strict",
272-
default: 1,
273269
},
274270
},
275271
required: ["query"],

0 commit comments

Comments
 (0)