Skip to content

Commit cf140d4

Browse files
committed
Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web into ali_bytedance_reasoning_content
2 parents 476d946 + 48cd4b1 commit cf140d4

File tree

4 files changed

+72
-31
lines changed

4 files changed

+72
-31
lines changed

app/client/platforms/siliconflow.ts

Lines changed: 53 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,19 @@
11
"use client";
22
// azure and openai, using same models. so using same LLMApi.
3-
import { ApiPath, SILICONFLOW_BASE_URL, SiliconFlow } from "@/app/constant";
3+
import {
4+
ApiPath,
5+
SILICONFLOW_BASE_URL,
6+
SiliconFlow,
7+
DEFAULT_MODELS,
8+
} from "@/app/constant";
49
import {
510
useAccessStore,
611
useAppConfig,
712
useChatStore,
813
ChatMessageTool,
914
usePluginStore,
1015
} from "@/app/store";
11-
import { streamWithThink } from "@/app/utils/chat";
16+
import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
1217
import {
1318
ChatOptions,
1419
getHeaders,
@@ -20,13 +25,23 @@ import { getClientConfig } from "@/app/config/client";
2025
import {
2126
getMessageTextContent,
2227
getMessageTextContentWithoutThinking,
28+
isVisionModel,
2329
getTimeoutMSByModel,
2430
} from "@/app/utils";
2531
import { RequestPayload } from "./openai";
32+
2633
import { fetch } from "@/app/utils/stream";
34+
export interface SiliconFlowListModelResponse {
35+
object: string;
36+
data: Array<{
37+
id: string;
38+
object: string;
39+
root: string;
40+
}>;
41+
}
2742

2843
export class SiliconflowApi implements LLMApi {
29-
private disableListModels = true;
44+
private disableListModels = false;
3045

3146
path(path: string): string {
3247
const accessStore = useAccessStore.getState();
@@ -67,13 +82,16 @@ export class SiliconflowApi implements LLMApi {
6782
}
6883

6984
async chat(options: ChatOptions) {
85+
const visionModel = isVisionModel(options.config.model);
7086
const messages: ChatOptions["messages"] = [];
7187
for (const v of options.messages) {
7288
if (v.role === "assistant") {
7389
const content = getMessageTextContentWithoutThinking(v);
7490
messages.push({ role: v.role, content });
7591
} else {
76-
const content = getMessageTextContent(v);
92+
const content = visionModel
93+
? await preProcessImageContent(v.content)
94+
: getMessageTextContent(v);
7795
messages.push({ role: v.role, content });
7896
}
7997
}
@@ -234,6 +252,36 @@ export class SiliconflowApi implements LLMApi {
234252
}
235253

236254
async models(): Promise<LLMModel[]> {
237-
return [];
255+
if (this.disableListModels) {
256+
return DEFAULT_MODELS.slice();
257+
}
258+
259+
const res = await fetch(this.path(SiliconFlow.ListModelPath), {
260+
method: "GET",
261+
headers: {
262+
...getHeaders(),
263+
},
264+
});
265+
266+
const resJson = (await res.json()) as SiliconFlowListModelResponse;
267+
const chatModels = resJson.data;
268+
console.log("[Models]", chatModels);
269+
270+
if (!chatModels) {
271+
return [];
272+
}
273+
274+
let seq = 1000; //同 Constant.ts 中的排序保持一致
275+
return chatModels.map((m) => ({
276+
name: m.id,
277+
available: true,
278+
sorted: seq++,
279+
provider: {
280+
id: "siliconflow",
281+
providerName: "SiliconFlow",
282+
providerType: "siliconflow",
283+
sorted: 14,
284+
},
285+
}));
238286
}
239287
}

app/components/emoji.tsx

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,11 +66,11 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
6666
LlmIcon = BotIconGemma;
6767
} else if (modelName.startsWith("claude")) {
6868
LlmIcon = BotIconClaude;
69-
} else if (modelName.startsWith("llama")) {
69+
} else if (modelName.toLowerCase().includes("llama")) {
7070
LlmIcon = BotIconMeta;
7171
} else if (modelName.startsWith("mixtral")) {
7272
LlmIcon = BotIconMistral;
73-
} else if (modelName.startsWith("deepseek")) {
73+
} else if (modelName.toLowerCase().includes("deepseek")) {
7474
LlmIcon = BotIconDeepseek;
7575
} else if (modelName.startsWith("moonshot")) {
7676
LlmIcon = BotIconMoonshot;
@@ -85,7 +85,7 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
8585
} else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
8686
LlmIcon = BotIconDoubao;
8787
} else if (
88-
modelName.startsWith("glm") ||
88+
modelName.toLowerCase().includes("glm") ||
8989
modelName.startsWith("cogview-") ||
9090
modelName.startsWith("cogvideox-")
9191
) {

app/components/exporter.tsx

Lines changed: 14 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ import CopyIcon from "../icons/copy.svg";
2323
import LoadingIcon from "../icons/three-dots.svg";
2424
import ChatGptIcon from "../icons/chatgpt.png";
2525
import ShareIcon from "../icons/share.svg";
26-
import BotIcon from "../icons/bot.png";
2726

2827
import DownloadIcon from "../icons/download.svg";
2928
import { useEffect, useMemo, useRef, useState } from "react";
@@ -33,13 +32,13 @@ import dynamic from "next/dynamic";
3332
import NextImage from "next/image";
3433

3534
import { toBlob, toPng } from "html-to-image";
36-
import { DEFAULT_MASK_AVATAR } from "../store/mask";
3735

3836
import { prettyObject } from "../utils/format";
3937
import { EXPORT_MESSAGE_CLASS_NAME } from "../constant";
4038
import { getClientConfig } from "../config/client";
4139
import { type ClientApi, getClientApi } from "../client/api";
4240
import { getMessageTextContent } from "../utils";
41+
import { MaskAvatar } from "./mask";
4342
import clsx from "clsx";
4443

4544
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
@@ -407,22 +406,6 @@ export function PreviewActions(props: {
407406
);
408407
}
409408

410-
function ExportAvatar(props: { avatar: string }) {
411-
if (props.avatar === DEFAULT_MASK_AVATAR) {
412-
return (
413-
<img
414-
src={BotIcon.src}
415-
width={30}
416-
height={30}
417-
alt="bot"
418-
className="user-avatar"
419-
/>
420-
);
421-
}
422-
423-
return <Avatar avatar={props.avatar} />;
424-
}
425-
426409
export function ImagePreviewer(props: {
427410
messages: ChatMessage[];
428411
topic: string;
@@ -546,9 +529,12 @@ export function ImagePreviewer(props: {
546529
github.com/ChatGPTNextWeb/ChatGPT-Next-Web
547530
</div>
548531
<div className={styles["icons"]}>
549-
<ExportAvatar avatar={config.avatar} />
532+
<MaskAvatar avatar={config.avatar} />
550533
<span className={styles["icon-space"]}>&</span>
551-
<ExportAvatar avatar={mask.avatar} />
534+
<MaskAvatar
535+
avatar={mask.avatar}
536+
model={session.mask.modelConfig.model}
537+
/>
552538
</div>
553539
</div>
554540
<div>
@@ -576,9 +562,14 @@ export function ImagePreviewer(props: {
576562
key={i}
577563
>
578564
<div className={styles["avatar"]}>
579-
<ExportAvatar
580-
avatar={m.role === "user" ? config.avatar : mask.avatar}
581-
/>
565+
{m.role === "user" ? (
566+
<Avatar avatar={config.avatar}></Avatar>
567+
) : (
568+
<MaskAvatar
569+
avatar={session.mask.avatar}
570+
model={m.model || session.mask.modelConfig.model}
571+
/>
572+
)}
582573
</div>
583574

584575
<div className={styles["body"]}>

app/constant.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -258,6 +258,7 @@ export const ChatGLM = {
258258
export const SiliconFlow = {
259259
ExampleEndpoint: SILICONFLOW_BASE_URL,
260260
ChatPath: "v1/chat/completions",
261+
ListModelPath: "v1/models?&sub_type=chat",
261262
};
262263

263264
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
@@ -462,6 +463,7 @@ export const VISION_MODEL_REGEXES = [
462463
/gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview"
463464
/^dall-e-3$/, // Matches exactly "dall-e-3"
464465
/glm-4v/,
466+
/vl/i,
465467
];
466468

467469
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];

0 commit comments

Comments
 (0)