Skip to content

Commit b98e2a7

Browse files
committed
frontend/llm/custom: finish custom icon implementation
1 parent a7b3b01 commit b98e2a7

File tree

6 files changed

+58
-28
lines changed

6 files changed

+58
-28
lines changed

src/packages/frontend/components/language-model-icon.tsx

Lines changed: 38 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,20 @@
1-
import { CSS } from "@cocalc/frontend/app-framework";
1+
import { CSS, useTypedRedux } from "@cocalc/frontend/app-framework";
22
import {
33
LanguageModel,
4-
isCustomOpenAI,
4+
fromCustomOpenAIModel,
5+
fromOllamaModel,
56
isGoogleModel,
67
isLanguageModel,
7-
isOllamaLLM,
88
model2vendor,
99
} from "@cocalc/util/db-schema/llm-utils";
1010
import { unreachable } from "@cocalc/util/misc";
1111
import AIAvatar from "./ai-avatar";
12+
import AnthropicAvatar from "./anthropic-avatar";
1213
import GoogleGeminiLogo from "./google-gemini-avatar";
1314
import GooglePalmLogo from "./google-palm-avatar";
1415
import MistralAvatar from "./mistral-avatar";
1516
import OllamaAvatar from "./ollama-avatar";
1617
import OpenAIAvatar from "./openai-avatar";
17-
import AnthropicAvatar from "./anthropic-avatar";
1818

1919
export function LanguageModelVendorAvatar(
2020
props: Readonly<{
@@ -25,6 +25,9 @@ export function LanguageModelVendorAvatar(
2525
) {
2626
const { model, size = 20 } = props;
2727

28+
const ollama = useTypedRedux("customize", "ollama");
29+
const custom_openai = useTypedRedux("customize", "custom_openai");
30+
2831
const style: CSS = {
2932
marginRight: "5px",
3033
...props.style,
@@ -38,13 +41,35 @@ export function LanguageModelVendorAvatar(
3841
return fallback();
3942
}
4043

44+
function renderImgIcon(icon: string) {
45+
return (
46+
<img
47+
width={size}
48+
height={size}
49+
src={icon}
50+
style={{ display: "inline-block", ...style }}
51+
/>
52+
);
53+
}
54+
4155
if (isLanguageModel(model)) {
4256
const vendorName = model2vendor(model).name;
4357
switch (vendorName) {
4458
case "openai":
45-
case "custom_openai":
4659
return <OpenAIAvatar size={size} style={style} />;
4760

61+
case "custom_openai": {
62+
const icon = custom_openai?.getIn([
63+
fromCustomOpenAIModel(model),
64+
"icon",
65+
]);
66+
if (typeof icon === "string") {
67+
return renderImgIcon(icon);
68+
} else {
69+
return <OpenAIAvatar size={size} style={style} />;
70+
}
71+
}
72+
4873
case "google": {
4974
if (model === "chat-bison-001") {
5075
// Palm2, no longer supported, just for backwards compatibility
@@ -59,8 +84,14 @@ export function LanguageModelVendorAvatar(
5984
case "mistralai":
6085
return <MistralAvatar size={size} style={style} />;
6186

62-
case "ollama":
63-
return <OllamaAvatar size={size} style={style} />;
87+
case "ollama": {
88+
const icon = ollama?.getIn([fromOllamaModel(model), "icon"]);
89+
if (typeof icon === "string") {
90+
return renderImgIcon(icon);
91+
} else {
92+
return <OllamaAvatar size={size} style={style} />;
93+
}
94+
}
6495

6596
case "anthropic":
6697
return <AnthropicAvatar size={size} style={style} />;
@@ -71,13 +102,5 @@ export function LanguageModelVendorAvatar(
71102
}
72103
}
73104

74-
if (isOllamaLLM(model)) {
75-
return <OllamaAvatar size={size} style={style} />;
76-
}
77-
78-
if (isCustomOpenAI(model)) {
79-
return <OpenAIAvatar size={size} style={style} />;
80-
}
81-
82105
return fallback();
83106
}

src/packages/frontend/editors/markdown-input/mentionable-users.tsx

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@ import { redux, useMemo, useTypedRedux } from "@cocalc/frontend/app-framework";
1313
import AnthropicAvatar from "@cocalc/frontend/components/anthropic-avatar";
1414
import GoogleGeminiLogo from "@cocalc/frontend/components/google-gemini-avatar";
1515
import MistralAvatar from "@cocalc/frontend/components/mistral-avatar";
16-
import OllamaAvatar from "@cocalc/frontend/components/ollama-avatar";
1716
import OpenAIAvatar from "@cocalc/frontend/components/openai-avatar";
1817
import { LLMModelPrice } from "@cocalc/frontend/frame-editors/llm/llm-selector";
1918
import { useProjectContext } from "@cocalc/frontend/project/context";
@@ -37,6 +36,7 @@ import {
3736
} from "@cocalc/util/db-schema/llm-utils";
3837
import { cmp, timestamp_cmp, trunc_middle } from "@cocalc/util/misc";
3938
import { CustomLLMPublic } from "@cocalc/util/types/llm";
39+
import { LanguageModelVendorAvatar } from "../../components/language-model-icon";
4040
import { Item as CompleteItem } from "./complete";
4141

4242
// we make the show_llm_main_menu field required, to avoid forgetting to set it ;-)
@@ -259,8 +259,8 @@ function mentionableUsers({
259259
value,
260260
label: (
261261
<span>
262-
<OllamaAvatar size={size} /> {conf.display}{" "}
263-
<LLMModelPrice model={m} floatRight />
262+
<LanguageModelVendorAvatar model={value} size={size} />{" "}
263+
{conf.display} <LLMModelPrice model={m} floatRight />
264264
</span>
265265
),
266266
search: search_term,
@@ -283,8 +283,8 @@ function mentionableUsers({
283283
value,
284284
label: (
285285
<span>
286-
<OpenAIAvatar size={size} /> {conf.display}{" "}
287-
<LLMModelPrice model={m} floatRight />
286+
<LanguageModelVendorAvatar model={value} size={size} />{" "}
287+
{conf.display} <LLMModelPrice model={m} floatRight />
288288
</span>
289289
),
290290
search: search_term,

src/packages/frontend/frame-editors/llm/components.tsx

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,10 @@ export function getCustomLLMGroup() {
66
const site_name = customize.get("site_name");
77
const organization_name = customize.get("organization_name") ?? "";
88
return {
9-
title: `These language models on ${site_name} are managed by ${organization_name}`,
9+
title: `Managed by ${organization_name || site_name}`,
1010
label: (
1111
<>
12-
<Text strong>{site_name} language models</Text> – managed by{" "}
13-
{organization_name}
12+
<Text strong>{site_name} language models</Text>
1413
</>
1514
),
1615
};

src/packages/frontend/frame-editors/llm/llm-selector.tsx

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,15 @@ export default function LLMSelector({
109109

110110
if (service === "custom") {
111111
const { title, label } = getCustomLLMGroup();
112-
ret.push({ label, title, options });
112+
ret.push({
113+
label: (
114+
<>
115+
{label}{title}
116+
</>
117+
),
118+
title: "These language models are configured by the administrators.",
119+
options,
120+
});
113121
} else {
114122
const { name, desc, short } = LLM_PROVIDER[service];
115123
const label = (

src/packages/util/db-schema/llm-utils.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
// this contains bits and pieces from the wrongly named openai.ts file
22

3+
import { isEmpty } from "lodash";
34
import LRU from "lru-cache";
45

56
import { unreachable } from "@cocalc/util/misc";
6-
import { isEmpty } from "lodash";
77

88
// "Client LLMs" are defined in the user's account settings
99
// They directly query an external LLM service.

src/packages/util/db-schema/site-settings-extras.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ function custom_llm_valid(value: string): boolean {
140140
// Ollama and Custom OpenAI have the same schema
141141
function custom_llm_display(value: string): string {
142142
const structure =
143-
"Must be {[key : string] : {model: string, baseUrl: string, cocalc?: {display?: string, desc?: string, ...}, ...}";
143+
"Must be {[key : string] : {model: string, baseUrl: string, cocalc?: {display?: string, desc?: string, icon?: string, ...}, ...}";
144144
if (isEmpty(value)) {
145145
return `Empty. ${structure}`;
146146
}
@@ -312,7 +312,7 @@ export const EXTRAS: SettingsExtras = {
312312
},
313313
ollama_configuration: {
314314
name: "Ollama Configuration",
315-
desc: 'Configure Ollama endpoints. e.g. Ollama has "gemma" installed and is available at localhost:11434: `{"gemma" : {"baseUrl": "http://localhost:11434/" , cocalc: {display: "Gemma", desc: "Google\'s Gemma Model"}}`',
315+
desc: 'Configure Ollama endpoints. e.g. Ollama has "gemma" installed and is available at localhost:11434: `{"gemma" : {"baseUrl": "http://localhost:11434/" , cocalc: {display: "Gemma", desc: "Google\'s Gemma Model", icon: "https://.../...png"}}`',
316316
default: "{}",
317317
multiline: 5,
318318
show: ollama_enabled,
@@ -324,7 +324,7 @@ export const EXTRAS: SettingsExtras = {
324324
// This is very similar to the ollama config, but there are small differences in the details.
325325
custom_openai_configuration: {
326326
name: "Custom OpenAI Endpoints",
327-
desc: 'Configure OpenAI endpoints, queried via [@langchain/openai (Node.js)](https://js.langchain.com/v0.1/docs/integrations/llms/openai/). e.g. `{"myllm" : {"baseUrl": "http://1.2.3.4:5678/" , apiKey: "key...", cocalc: {display: "My LLM", desc: "My custom LLM"}}, "gpt-4o-high": {baseUrl: "https://api.openai.com/v1", temperature: 2, "openAIApiKey": "sk-...", "model": "gpt-4o", cocalc: {display: "High GPT-4 Omni", desc: "GPT 4 Omni High Temp"}}}`',
327+
desc: 'Configure OpenAI endpoints, queried via [@langchain/openai (Node.js)](https://js.langchain.com/v0.1/docs/integrations/llms/openai/). e.g. `{"myllm" : {"baseUrl": "http://1.2.3.4:5678/" , apiKey: "key...", cocalc: {display: "My LLM", desc: "My custom LLM", icon: "https://.../...png"}}, "gpt-4o-high": {baseUrl: "https://api.openai.com/v1", temperature: 1.5, "openAIApiKey": "sk-...", "model": "gpt-4o", cocalc: {display: "High GPT-4 Omni", desc: "GPT 4 Omni High Temp"}}}`',
328328
default: "{}",
329329
multiline: 5,
330330
show: custom_openai_enabled,

0 commit comments

Comments
 (0)