Skip to content

Commit 8e6cae7

Browse files
CodeYHJzerob13
andcommitted
feat: add o3.fan provider (#1259)
* feat: add o3.fan provider * fix: update o3.fan provider apiType and official URL * chore: provider orders and defaultBaseUrl --------- Co-authored-by: zerob13 <[email protected]>
1 parent b5ba2de commit 8e6cae7

File tree

6 files changed

+246
-2
lines changed

6 files changed

+246
-2
lines changed

resources/model-db/providers.json

Lines changed: 130 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61720,7 +61720,7 @@
6172061720
},
6172161721
"burncloud": {
6172261722
"id": "burncloud",
61723-
"name": "burncloud",
61723+
"name": "BurnCloud",
6172461724
"display_name": "burncloud",
6172561725
"models": [
6172661726
{
@@ -95153,6 +95153,135 @@
9515395153
"type": "chat"
9515495154
}
9515595155
]
95156+
},
95157+
"o3fan": {
95158+
"id": "o3fan",
95159+
"name": "o3.fan",
95160+
"display_name": "o3.fan",
95161+
"models": [
95162+
{
95163+
"id": "claude-sonnet-4-5-20250929",
95164+
"name": "Anthropic: Claude Sonnet 4.5 (20250929)",
95165+
"display_name": "Anthropic: Claude Sonnet 4.5 (20250929)",
95166+
"modalities": {
95167+
"input": [
95168+
"text",
95169+
"image"
95170+
],
95171+
"output": [
95172+
"text"
95173+
]
95174+
},
95175+
"limit": {
95176+
"context": 200000,
95177+
"output": 64000
95178+
},
95179+
"temperature": true,
95180+
"tool_call": true,
95181+
"reasoning": {
95182+
"supported": true,
95183+
"default": true
95184+
},
95185+
"attachment": false,
95186+
"open_weights": false,
95187+
"cost": {
95188+
"input": 3,
95189+
"output": 15
95190+
}
95191+
},
95192+
{
95193+
"id": "gemini-3-pro-preview",
95194+
"name": "Gemini 3 Pro Preview",
95195+
"display_name": "Gemini 3 Pro Preview",
95196+
"modalities": {
95197+
"input": [
95198+
"text",
95199+
"image",
95200+
"audio",
95201+
"video"
95202+
],
95203+
"output": [
95204+
"text"
95205+
]
95206+
},
95207+
"limit": {
95208+
"context": 1000000,
95209+
"output": 64000
95210+
},
95211+
"temperature": true,
95212+
"tool_call": true,
95213+
"reasoning": {
95214+
"supported": true,
95215+
"default": true
95216+
},
95217+
"attachment": true,
95218+
"open_weights": false,
95219+
"cost": {
95220+
"input": 0,
95221+
"output": 0
95222+
}
95223+
},
95224+
{
95225+
"id": "gpt-5.2",
95226+
"name": "GPT-5.2",
95227+
"display_name": "GPT-5.2",
95228+
"modalities": {
95229+
"input": [
95230+
"text",
95231+
"image"
95232+
],
95233+
"output": [
95234+
"text"
95235+
]
95236+
},
95237+
"limit": {
95238+
"context": 128000,
95239+
"output": 64000
95240+
},
95241+
"temperature": false,
95242+
"tool_call": true,
95243+
"reasoning": {
95244+
"supported": true,
95245+
"default": true
95246+
},
95247+
"attachment": true,
95248+
"open_weights": false,
95249+
"knowledge": "2025-08-31",
95250+
"release_date": "2025-12-11",
95251+
"last_updated": "2025-12-11",
95252+
"cost": {
95253+
"input": 0,
95254+
"output": 0
95255+
}
95256+
},
95257+
{
95258+
"id": "kimi-k2-0711-preview",
95259+
"name": "Kimi K2 0711",
95260+
"display_name": "Kimi K2 0711",
95261+
"modalities": {
95262+
"input": [
95263+
"text"
95264+
],
95265+
"output": [
95266+
"text"
95267+
]
95268+
},
95269+
"limit": {
95270+
"context": 128000
95271+
},
95272+
"temperature": true,
95273+
"tool_call": true,
95274+
"reasoning": {
95275+
"supported": false
95276+
},
95277+
"attachment": false,
95278+
"open_weights": true,
95279+
"cost": {
95280+
"input": 3.5,
95281+
"output": 14
95282+
}
95283+
}
95284+
]
9515695285
}
9515795286
}
9515895287
}

src/main/presenter/configPresenter/providers.ts

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -746,5 +746,20 @@ export const DEFAULT_PROVIDERS: LLM_PROVIDER_BASE[] = [
746746
models: 'https://platform.xiaomimimo.com/#/docs',
747747
defaultBaseUrl: 'https://api.xiaomimimo.com/v1'
748748
}
749+
},
750+
{
751+
id: 'o3fan',
752+
name: 'o3.fan',
753+
apiType: 'o3fan',
754+
apiKey: '',
755+
baseUrl: 'https://api.o3.fan/v1',
756+
enable: false,
757+
websites: {
758+
official: 'https://o3.fan',
759+
apiKey: 'https://o3.fan/token',
760+
docs: 'https://o3.fan',
761+
models: 'https://o3.fan/info/models',
762+
defaultBaseUrl: 'https://api.o3.fan/v1'
763+
}
749764
}
750765
]

src/main/presenter/llmProviderPresenter/managers/providerInstanceManager.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ import { VercelAIGatewayProvider } from '../providers/vercelAIGatewayProvider'
3434
import { PoeProvider } from '../providers/poeProvider'
3535
import { JiekouProvider } from '../providers/jiekouProvider'
3636
import { ZenmuxProvider } from '../providers/zenmuxProvider'
37+
import { O3fanProvider } from '../providers/o3fanProvider'
3738
import { RateLimitManager } from './rateLimitManager'
3839
import { StreamState } from '../types'
3940
import { AcpSessionPersistence } from '../../agentPresenter/acp'
@@ -95,6 +96,7 @@ export class ProviderInstanceManager {
9596
['aws-bedrock', AwsBedrockProvider],
9697
['jiekou', JiekouProvider],
9798
['zenmux', ZenmuxProvider],
99+
['o3fan', O3fanProvider],
98100
['acp', AcpProvider]
99101
])
100102
}
@@ -127,7 +129,8 @@ export class ProviderInstanceManager {
127129
['aws-bedrock', AwsBedrockProvider],
128130
['jiekou', JiekouProvider],
129131
['zenmux', ZenmuxProvider],
130-
['acp', AcpProvider]
132+
['acp', AcpProvider],
133+
['o3fan', O3fanProvider]
131134
])
132135
}
133136

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
import {
2+
LLM_PROVIDER,
3+
LLMResponse,
4+
MODEL_META,
5+
ChatMessage,
6+
IConfigPresenter
7+
} from '@shared/presenter'
8+
import { ModelType } from '@shared/model'
9+
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
10+
import { providerDbLoader } from '../../configPresenter/providerDbLoader'
11+
import { modelCapabilities } from '../../configPresenter/modelCapabilities'
12+
13+
export class O3fanProvider extends OpenAICompatibleProvider {
14+
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
15+
super(provider, configPresenter)
16+
}
17+
18+
protected async fetchOpenAIModels(): Promise<MODEL_META[]> {
19+
const resolvedId = modelCapabilities.resolveProviderId(this.provider.id) || this.provider.id
20+
const provider = providerDbLoader.getProvider(resolvedId)
21+
if (!provider || !Array.isArray(provider.models)) {
22+
return []
23+
}
24+
25+
return provider.models.map((model) => {
26+
const inputs = model.modalities?.input
27+
const outputs = model.modalities?.output
28+
const hasImageInput = Array.isArray(inputs) && inputs.includes('image')
29+
const hasImageOutput = Array.isArray(outputs) && outputs.includes('image')
30+
const modelType = hasImageOutput ? ModelType.ImageGeneration : ModelType.Chat
31+
32+
return {
33+
id: model.id,
34+
name: model.display_name || model.name || model.id,
35+
group: 'o3fan',
36+
providerId: this.provider.id,
37+
isCustom: false,
38+
contextLength: model.limit?.context ?? 8192,
39+
maxTokens: model.limit?.output ?? 4096,
40+
vision: hasImageInput,
41+
functionCall: Boolean(model.tool_call),
42+
reasoning: Boolean(model.reasoning?.supported),
43+
enableSearch: Boolean(model.search?.supported),
44+
type: modelType
45+
}
46+
})
47+
}
48+
49+
async completions(
50+
messages: ChatMessage[],
51+
modelId: string,
52+
temperature?: number,
53+
maxTokens?: number
54+
): Promise<LLMResponse> {
55+
return this.openAICompletion(messages, modelId, temperature, maxTokens)
56+
}
57+
58+
async summaries(
59+
text: string,
60+
modelId: string,
61+
temperature?: number,
62+
maxTokens?: number
63+
): Promise<LLMResponse> {
64+
return this.openAICompletion(
65+
[
66+
{
67+
role: 'user',
68+
content: `You need to summarize the user's conversation into a title of no more than 10 words, with the title language matching the user's primary language, without using punctuation or other special symbols:\n${text}`
69+
}
70+
],
71+
modelId,
72+
temperature,
73+
maxTokens
74+
)
75+
}
76+
77+
async generateText(
78+
prompt: string,
79+
modelId: string,
80+
temperature?: number,
81+
maxTokens?: number
82+
): Promise<LLMResponse> {
83+
return this.openAICompletion(
84+
[
85+
{
86+
role: 'user',
87+
content: prompt
88+
}
89+
],
90+
modelId,
91+
temperature,
92+
maxTokens
93+
)
94+
}
95+
}
3.24 KB
Loading

src/renderer/src/components/icons/ModelIcon.vue

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,12 +67,14 @@ import jiekouColorIcon from '@/assets/llm-icons/jiekou-color.svg?url'
6767
import zenmuxColorIcon from '@/assets/llm-icons/zenmux-color.svg?url'
6868
import burncloudColorIcon from '@/assets/llm-icons/burncloud-color.svg?url'
6969
import xiaomiColorIcon from '@/assets/llm-icons/xiaomi.png?url'
70+
import o3fanColorIcon from '@/assets/llm-icons/o3-fan.png?url'
7071
7172
// 导入所有图标
7273
const icons = {
7374
'kimi-cli': moonshotColorIcon,
7475
'claude-code-acp': claudeColorIcon,
7576
'codex-acp': openaiColorIcon,
77+
o3fan: o3fanColorIcon,
7678
cherryin: cherryinColorIcon,
7779
modelscope: modelscopeColorIcon,
7880
'302ai': _302aiIcon,

0 commit comments

Comments
 (0)