Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
131 changes: 130 additions & 1 deletion resources/model-db/providers.json
Original file line number Diff line number Diff line change
Expand Up @@ -61720,7 +61720,7 @@
},
"burncloud": {
"id": "burncloud",
"name": "burncloud",
"name": "BurnCloud",
"display_name": "burncloud",
"models": [
{
Expand Down Expand Up @@ -95153,6 +95153,135 @@
"type": "chat"
}
]
},
"o3fan": {
"id": "o3fan",
"name": "o3.fan",
"display_name": "o3.fan",
"models": [
{
"id": "claude-sonnet-4-5-20250929",
"name": "Anthropic: Claude Sonnet 4.5 (20250929)",
"display_name": "Anthropic: Claude Sonnet 4.5 (20250929)",
"modalities": {
"input": [
"text",
"image"
],
"output": [
"text"
]
},
"limit": {
"context": 200000,
"output": 64000
},
"temperature": true,
"tool_call": true,
"reasoning": {
"supported": true,
"default": true
},
"attachment": false,
"open_weights": false,
"cost": {
"input": 3,
"output": 15
}
},
{
"id": "gemini-3-pro-preview",
"name": "Gemini 3 Pro Preview",
"display_name": "Gemini 3 Pro Preview",
"modalities": {
"input": [
"text",
"image",
"audio",
"video"
],
"output": [
"text"
]
},
"limit": {
"context": 1000000,
"output": 64000
},
"temperature": true,
"tool_call": true,
"reasoning": {
"supported": true,
"default": true
},
"attachment": true,
"open_weights": false,
"cost": {
"input": 0,
"output": 0
}
},
{
"id": "gpt-5.2",
"name": "GPT-5.2",
"display_name": "GPT-5.2",
"modalities": {
"input": [
"text",
"image"
],
"output": [
"text"
]
},
"limit": {
"context": 128000,
"output": 64000
},
"temperature": false,
"tool_call": true,
"reasoning": {
"supported": true,
"default": true
},
"attachment": true,
"open_weights": false,
"knowledge": "2025-08-31",
"release_date": "2025-12-11",
"last_updated": "2025-12-11",
"cost": {
"input": 0,
"output": 0
}
},
{
"id": "kimi-k2-0711-preview",
"name": "Kimi K2 0711",
"display_name": "Kimi K2 0711",
"modalities": {
"input": [
"text"
],
"output": [
"text"
]
},
"limit": {
"context": 128000
},
"temperature": true,
"tool_call": true,
"reasoning": {
"supported": false
},
"attachment": false,
"open_weights": true,
"cost": {
"input": 3.5,
"output": 14
}
}
]
}
}
}
15 changes: 15 additions & 0 deletions src/main/presenter/configPresenter/providers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -746,5 +746,20 @@ export const DEFAULT_PROVIDERS: LLM_PROVIDER_BASE[] = [
models: 'https://platform.xiaomimimo.com/#/docs',
defaultBaseUrl: 'https://api.xiaomimimo.com/v1'
}
},
{
id: 'o3fan',
name: 'o3.fan',
apiType: 'o3fan',
apiKey: '',
baseUrl: 'https://api.o3.fan/v1',
enable: false,
websites: {
official: 'https://o3.fan',
apiKey: 'https://o3.fan/token',
docs: 'https://o3.fan',
models: 'https://o3.fan/info/models',
defaultBaseUrl: 'https://api.o3.fan/v1'
}
}
]
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import { VercelAIGatewayProvider } from '../providers/vercelAIGatewayProvider'
import { PoeProvider } from '../providers/poeProvider'
import { JiekouProvider } from '../providers/jiekouProvider'
import { ZenmuxProvider } from '../providers/zenmuxProvider'
import { O3fanProvider } from '../providers/o3fanProvider'
import { RateLimitManager } from './rateLimitManager'
import { StreamState } from '../types'
import { AcpSessionPersistence } from '../../agentPresenter/acp'
Expand Down Expand Up @@ -95,6 +96,7 @@ export class ProviderInstanceManager {
['aws-bedrock', AwsBedrockProvider],
['jiekou', JiekouProvider],
['zenmux', ZenmuxProvider],
['o3fan', O3fanProvider],
['acp', AcpProvider]
])
}
Expand Down Expand Up @@ -127,7 +129,8 @@ export class ProviderInstanceManager {
['aws-bedrock', AwsBedrockProvider],
['jiekou', JiekouProvider],
['zenmux', ZenmuxProvider],
['acp', AcpProvider]
['acp', AcpProvider],
['o3fan', O3fanProvider]
])
}

Expand Down
95 changes: 95 additions & 0 deletions src/main/presenter/llmProviderPresenter/providers/o3fanProvider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import {
LLM_PROVIDER,
LLMResponse,
MODEL_META,
ChatMessage,
IConfigPresenter
} from '@shared/presenter'
import { ModelType } from '@shared/model'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { providerDbLoader } from '../../configPresenter/providerDbLoader'
import { modelCapabilities } from '../../configPresenter/modelCapabilities'

export class O3fanProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
}

protected async fetchOpenAIModels(): Promise<MODEL_META[]> {
const resolvedId = modelCapabilities.resolveProviderId(this.provider.id) || this.provider.id
const provider = providerDbLoader.getProvider(resolvedId)
if (!provider || !Array.isArray(provider.models)) {
return []
}

return provider.models.map((model) => {
const inputs = model.modalities?.input
const outputs = model.modalities?.output
const hasImageInput = Array.isArray(inputs) && inputs.includes('image')
const hasImageOutput = Array.isArray(outputs) && outputs.includes('image')
const modelType = hasImageOutput ? ModelType.ImageGeneration : ModelType.Chat

return {
id: model.id,
name: model.display_name || model.name || model.id,
group: 'o3fan',
providerId: this.provider.id,
isCustom: false,
contextLength: model.limit?.context ?? 8192,
maxTokens: model.limit?.output ?? 4096,
vision: hasImageInput,
functionCall: Boolean(model.tool_call),
reasoning: Boolean(model.reasoning?.supported),
enableSearch: Boolean(model.search?.supported),
type: modelType
}
})
}

async completions(
messages: ChatMessage[],
modelId: string,
temperature?: number,
maxTokens?: number
): Promise<LLMResponse> {
return this.openAICompletion(messages, modelId, temperature, maxTokens)
}

async summaries(
text: string,
modelId: string,
temperature?: number,
maxTokens?: number
): Promise<LLMResponse> {
return this.openAICompletion(
[
{
role: 'user',
content: `You need to summarize the user's conversation into a title of no more than 10 words, with the title language matching the user's primary language, without using punctuation or other special symbols:\n${text}`
}
],
modelId,
temperature,
maxTokens
)
}

async generateText(
prompt: string,
modelId: string,
temperature?: number,
maxTokens?: number
): Promise<LLMResponse> {
return this.openAICompletion(
[
{
role: 'user',
content: prompt
}
],
modelId,
temperature,
maxTokens
)
}
}
Binary file added src/renderer/src/assets/llm-icons/o3-fan.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 2 additions & 0 deletions src/renderer/src/components/icons/ModelIcon.vue
Original file line number Diff line number Diff line change
Expand Up @@ -67,12 +67,14 @@ import jiekouColorIcon from '@/assets/llm-icons/jiekou-color.svg?url'
import zenmuxColorIcon from '@/assets/llm-icons/zenmux-color.svg?url'
import burncloudColorIcon from '@/assets/llm-icons/burncloud-color.svg?url'
import xiaomiColorIcon from '@/assets/llm-icons/xiaomi.png?url'
import o3fanColorIcon from '@/assets/llm-icons/o3-fan.png?url'

// 导入所有图标
const icons = {
'kimi-cli': moonshotColorIcon,
'claude-code-acp': claudeColorIcon,
'codex-acp': openaiColorIcon,
o3fan: o3fanColorIcon,
cherryin: cherryinColorIcon,
modelscope: modelscopeColorIcon,
'302ai': _302aiIcon,
Expand Down