Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -447,14 +447,14 @@
"antd@5.27.0": "patches/antd-npm-5.27.0-aa91c36546.patch",
"electron-updater@6.7.0": "patches/electron-updater-npm-6.7.0-47b11bb0d4.patch",
"epub@1.3.0": "patches/epub-npm-1.3.0-8325494ffe.patch",
"ollama-ai-provider-v2@1.5.5": "patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch",
"atomically@1.7.0": "patches/atomically-npm-1.7.0-e742e5293b.patch",
"file-stream-rotator@0.6.1": "patches/file-stream-rotator-npm-0.6.1-eab45fb13d.patch",
"libsql@0.4.7": "patches/libsql-npm-0.4.7-444e260fb1.patch",
"pdf-parse@1.1.1": "patches/pdf-parse-npm-1.1.1-04a6109b2a.patch",
"@ai-sdk/openai-compatible@1.0.28": "patches/@ai-sdk__openai-compatible@1.0.28.patch",
"@anthropic-ai/claude-agent-sdk@0.1.76": "patches/@anthropic-ai__claude-agent-sdk@0.1.76.patch",
"@openrouter/ai-sdk-provider": "patches/@openrouter__ai-sdk-provider.patch"
"@openrouter/ai-sdk-provider": "patches/@openrouter__ai-sdk-provider.patch",
"ollama-ai-provider-v2@1.5.5": "patches/ollama-ai-provider-v2@1.5.5.patch"
},
"onlyBuiltDependencies": [
"@j178/prek",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
diff --git a/dist/index.d.ts b/dist/index.d.ts
index 8dd9b498050dbecd8dd6b901acf1aa8ca38a49af..ed644349c9d38fe2a66b2fb44214f7c18eb97f89 100644
index 8dd9b498050dbecd8dd6b901acf1aa8ca38a49af..4ff1e193580906ad46f49f7c2dd033613f844915 100644
--- a/dist/index.d.ts
+++ b/dist/index.d.ts
@@ -4,7 +4,7 @@ import { z } from 'zod/v4';

type OllamaChatModelId = "athene-v2" | "athene-v2:72b" | "aya-expanse" | "aya-expanse:8b" | "aya-expanse:32b" | "codegemma" | "codegemma:2b" | "codegemma:7b" | "codellama" | "codellama:7b" | "codellama:13b" | "codellama:34b" | "codellama:70b" | "codellama:code" | "codellama:python" | "command-r" | "command-r:35b" | "command-r-plus" | "command-r-plus:104b" | "command-r7b" | "command-r7b:7b" | "deepseek-r1" | "deepseek-r1:1.5b" | "deepseek-r1:7b" | "deepseek-r1:8b" | "deepseek-r1:14b" | "deepseek-r1:32b" | "deepseek-r1:70b" | "deepseek-r1:671b" | "deepseek-coder-v2" | "deepseek-coder-v2:16b" | "deepseek-coder-v2:236b" | "deepseek-v3" | "deepseek-v3:671b" | "devstral" | "devstral:24b" | "dolphin3" | "dolphin3:8b" | "exaone3.5" | "exaone3.5:2.4b" | "exaone3.5:7.8b" | "exaone3.5:32b" | "falcon2" | "falcon2:11b" | "falcon3" | "falcon3:1b" | "falcon3:3b" | "falcon3:7b" | "falcon3:10b" | "firefunction-v2" | "firefunction-v2:70b" | "gemma" | "gemma:2b" | "gemma:7b" | "gemma2" | "gemma2:2b" | "gemma2:9b" | "gemma2:27b" | "gemma3" | "gemma3:1b" | "gemma3:4b" | "gemma3:12b" | "gemma3:27b" | "granite3-dense" | "granite3-dense:2b" | "granite3-dense:8b" | "granite3-guardian" | "granite3-guardian:2b" | "granite3-guardian:8b" | "granite3-moe" | "granite3-moe:1b" | "granite3-moe:3b" | "granite3.1-dense" | "granite3.1-dense:2b" | "granite3.1-dense:8b" | "granite3.1-moe" | "granite3.1-moe:1b" | "granite3.1-moe:3b" | "llama2" | "llama2:7b" | "llama2:13b" | "llama2:70b" | "llama3" | "llama3:8b" | "llama3:70b" | "llama3-chatqa" | "llama3-chatqa:8b" | "llama3-chatqa:70b" | "llama3-gradient" | "llama3-gradient:8b" | "llama3-gradient:70b" | "llama3.1" | "llama3.1:8b" | "llama3.1:70b" | "llama3.1:405b" | "llama3.2" | "llama3.2:1b" | "llama3.2:3b" | "llama3.2-vision" | "llama3.2-vision:11b" | "llama3.2-vision:90b" | "llama3.3" | "llama3.3:70b" | "llama4" | "llama4:16x17b" | "llama4:128x17b" | "llama-guard3" | "llama-guard3:1b" | "llama-guard3:8b" | "llava" | "llava:7b" | "llava:13b" | "llava:34b" | "llava-llama3" | "llava-llama3:8b" | "llava-phi3" | "llava-phi3:3.8b" | "marco-o1" | "marco-o1:7b" | "mistral" | "mistral:7b" | "mistral-large" | "mistral-large:123b" | "mistral-nemo" | "mistral-nemo:12b" | "mistral-small" | "mistral-small:22b" | "mixtral" | "mixtral:8x7b" | "mixtral:8x22b" | "moondream" | "moondream:1.8b" | "openhermes" | "openhermes:v2.5" | "nemotron" | "nemotron:70b" | "nemotron-mini" | "nemotron-mini:4b" | "olmo" | "olmo:7b" | "olmo:13b" | "opencoder" | "opencoder:1.5b" | "opencoder:8b" | "phi3" | "phi3:3.8b" | "phi3:14b" | "phi3.5" | "phi3.5:3.8b" | "phi4" | "phi4:14b" | "qwen" | "qwen:7b" | "qwen:14b" | "qwen:32b" | "qwen:72b" | "qwen:110b" | "qwen2" | "qwen2:0.5b" | "qwen2:1.5b" | "qwen2:7b" | "qwen2:72b" | "qwen2.5" | "qwen2.5:0.5b" | "qwen2.5:1.5b" | "qwen2.5:3b" | "qwen2.5:7b" | "qwen2.5:14b" | "qwen2.5:32b" | "qwen2.5:72b" | "qwen2.5-coder" | "qwen2.5-coder:0.5b" | "qwen2.5-coder:1.5b" | "qwen2.5-coder:3b" | "qwen2.5-coder:7b" | "qwen2.5-coder:14b" | "qwen2.5-coder:32b" | "qwen3" | "qwen3:0.6b" | "qwen3:1.7b" | "qwen3:4b" | "qwen3:8b" | "qwen3:14b" | "qwen3:30b" | "qwen3:32b" | "qwen3:235b" | "qwq" | "qwq:32b" | "sailor2" | "sailor2:1b" | "sailor2:8b" | "sailor2:20b" | "shieldgemma" | "shieldgemma:2b" | "shieldgemma:9b" | "shieldgemma:27b" | "smallthinker" | "smallthinker:3b" | "smollm" | "smollm:135m" | "smollm:360m" | "smollm:1.7b" | "tinyllama" | "tinyllama:1.1b" | "tulu3" | "tulu3:8b" | "tulu3:70b" | (string & {});
declare const ollamaProviderOptions: z.ZodObject<{
- think: z.ZodOptional<z.ZodBoolean>;
Expand All @@ -24,29 +24,44 @@ index 8dd9b498050dbecd8dd6b901acf1aa8ca38a49af..ed644349c9d38fe2a66b2fb44214f7c1
/**
* Echo back the prompt in addition to the completion.
*/
@@ -146,7 +148,7 @@ declare const ollamaEmbeddingProviderOptions: z.ZodObject<{
@@ -146,11 +148,11 @@ declare const ollamaEmbeddingProviderOptions: z.ZodObject<{
type OllamaEmbeddingProviderOptions = z.infer<typeof ollamaEmbeddingProviderOptions>;

declare const ollamaCompletionProviderOptions: z.ZodObject<{
- think: z.ZodOptional<z.ZodBoolean>;
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodLiteral<"low">, z.ZodLiteral<"medium">, z.ZodLiteral<"high">]>>;
user: z.ZodOptional<z.ZodString>;
suffix: z.ZodOptional<z.ZodString>;
echo: z.ZodOptional<z.ZodBoolean>;
}, z.core.$strip>;
type OllamaCompletionProviderOptions = z.infer<typeof ollamaCompletionProviderOptions>;

-export { type OllamaCompletionProviderOptions, type OllamaEmbeddingProviderOptions, type OllamaProvider, type OllamaProviderSettings, createOllama, ollama };
+export { type OllamaCompletionProviderOptions, type OllamaEmbeddingProviderOptions, type OllamaProvider, type OllamaProviderOptions, type OllamaProviderSettings, createOllama, ollama, ollamaProviderOptions };
diff --git a/dist/index.js b/dist/index.js
index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a8309a5a69f 100644
index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..06a0c593d117e3bb444256c481bc16a8b60bcc11 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -158,7 +158,7 @@ function getResponseMetadata({

@@ -21,7 +21,8 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
var index_exports = {};
__export(index_exports, {
createOllama: () => createOllama,
- ollama: () => ollama
+ ollama: () => ollama,
+ ollamaProviderOptions: () => ollamaProviderOptions
});
module.exports = __toCommonJS(index_exports);

@@ -158,7 +159,7 @@ function getResponseMetadata({

// src/completion/ollama-completion-language-model.ts
var ollamaCompletionProviderOptions = import_v42.z.object({
- think: import_v42.z.boolean().optional(),
+ think: import_v42.z.union([import_v42.z.boolean(), import_v42.z.literal('low'), import_v42.z.literal('medium'), import_v42.z.literal('high')]).optional(),
user: import_v42.z.string().optional(),
suffix: import_v42.z.string().optional(),
echo: import_v42.z.boolean().optional()
@@ -662,7 +662,7 @@ function convertToOllamaChatMessages({
@@ -662,7 +663,7 @@ function convertToOllamaChatMessages({
const images = content.filter((part) => part.type === "file" && part.mediaType.startsWith("image/")).map((part) => part.data);
messages.push({
role: "user",
Expand All @@ -55,7 +70,7 @@ index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a83
images: images.length > 0 ? images : void 0
});
break;
@@ -813,9 +813,11 @@ var ollamaProviderOptions = import_v44.z.object({
@@ -813,9 +814,11 @@ var ollamaProviderOptions = import_v44.z.object({
* the model's thinking from the model's output. When disabled, the model will not think
* and directly output the content.
*
Expand All @@ -68,7 +83,7 @@ index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a83
options: import_v44.z.object({
num_ctx: import_v44.z.number().optional(),
repeat_last_n: import_v44.z.number().optional(),
@@ -929,14 +931,16 @@ var OllamaRequestBuilder = class {
@@ -929,14 +932,16 @@ var OllamaRequestBuilder = class {
prompt,
systemMessageMode: "system"
}),
Expand All @@ -81,19 +96,29 @@ index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a83
think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false,
- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0
+ options: {
+ ...temperature !== void 0 && { temperature },
+ ...topP !== void 0 && { top_p: topP },
+ temperature,
+ top_p: topP,
+ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {})
+ }
};
}
};
@@ -1374,6 +1379,7 @@ var ollama = createOllama();
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
createOllama,
- ollama
+ ollama,
+ ollamaProviderOptions
});
//# sourceMappingURL=index.js.map
\ No newline at end of file
diff --git a/dist/index.mjs b/dist/index.mjs
index e2a634a78d80ac9542f2cc4f96cf2291094b10cf..67b23efce3c1cf4f026693d3ff9246988a3ef26e 100644
index e2a634a78d80ac9542f2cc4f96cf2291094b10cf..f176c20d518495e4d9c07ca3b780a0d3af7a26c6 100644
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -144,7 +144,7 @@ function getResponseMetadata({

// src/completion/ollama-completion-language-model.ts
var ollamaCompletionProviderOptions = z2.object({
- think: z2.boolean().optional(),
Expand Down Expand Up @@ -136,10 +161,19 @@ index e2a634a78d80ac9542f2cc4f96cf2291094b10cf..67b23efce3c1cf4f026693d3ff924698
think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false,
- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0
+ options: {
+ ...temperature !== void 0 && { temperature },
+ ...topP !== void 0 && { top_p: topP },
+ temperature,
+ top_p: topP,
+ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {})
+ }
};
}
};
@@ -1377,6 +1381,7 @@ function createOllama(options = {}) {
var ollama = createOllama();
export {
createOllama,
- ollama
+ ollama,
+ ollamaProviderOptions
};
//# sourceMappingURL=index.mjs.map
22 changes: 11 additions & 11 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 10 additions & 0 deletions src/renderer/src/aiCore/middleware/AiSdkMiddlewareBuilder.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import { getAiSdkProviderId } from '../provider/factory'
import { isOpenRouterGeminiGenerateImageModel } from '../utils/image'
import { anthropicCacheMiddleware } from './anthropicCacheMiddleware'
import { noThinkMiddleware } from './noThinkMiddleware'
import { ollamaReasoningOrderMiddleware } from './ollamaReasoningOrderMiddleware'
import { openrouterGenerateImageMiddleware } from './openrouterGenerateImageMiddleware'
import { openrouterReasoningMiddleware } from './openrouterReasoningMiddleware'
import { qwenThinkingMiddleware } from './qwenThinkingMiddleware'
Expand Down Expand Up @@ -203,6 +204,15 @@ function addProviderSpecificMiddlewares(builder: AiSdkMiddlewareBuilder, config:
case 'aws-bedrock': {
break
}
case 'ollama': {
if (config.enableReasoning) {
builder.add({
name: 'ollama-reasoning-order',
middleware: ollamaReasoningOrderMiddleware()
})
}
break
}
default:
// 其他provider的通用处理
break
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import type { LanguageModelV2StreamPart } from '@ai-sdk/provider'
import type { LanguageModelMiddleware } from 'ai'

const isReasoningPart = (chunk: LanguageModelV2StreamPart) =>
chunk.type === 'reasoning-start' || chunk.type === 'reasoning-delta' || chunk.type === 'reasoning-end'

const isTextPart = (chunk: LanguageModelV2StreamPart) =>
chunk.type === 'text-start' || chunk.type === 'text-delta' || chunk.type === 'text-end'

export function ollamaReasoningOrderMiddleware(): LanguageModelMiddleware {
return {
middlewareVersion: 'v2',
wrapGenerate: async ({ doGenerate }) => {
const { content, ...rest } = await doGenerate()
if (!Array.isArray(content)) {
return { content, ...rest }
}
const reasoningParts = content.filter((part) => part.type === 'reasoning')
if (reasoningParts.length === 0) {
return { content, ...rest }
}
const otherParts = content.filter((part) => part.type !== 'reasoning')
return { content: [...reasoningParts, ...otherParts], ...rest }
},
wrapStream: async ({ doStream }) => {
const { stream, ...rest } = await doStream()
let hasReasoning = false
let bufferedText: LanguageModelV2StreamPart[] = []

const flushBufferedText = (controller: TransformStreamDefaultController<LanguageModelV2StreamPart>) => {
if (bufferedText.length === 0) {
return
}
for (const part of bufferedText) {
controller.enqueue(part)
}
bufferedText = []
}

return {
stream: stream.pipeThrough(
new TransformStream<LanguageModelV2StreamPart, LanguageModelV2StreamPart>({
transform(chunk, controller) {
if (isReasoningPart(chunk)) {
hasReasoning = true
controller.enqueue(chunk)
flushBufferedText(controller)
return
Copy link

Copilot AI Jan 20, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The buffering logic has an issue: when text chunks are buffered and then reasoning chunks arrive, the buffered text is flushed immediately after the first reasoning chunk is enqueued. This causes the text to appear between reasoning chunks rather than after all reasoning chunks. For example, if the order is text-start, text-delta, reasoning-start, reasoning-delta, reasoning-end, the output will be reasoning-start, text-start, text-delta, reasoning-delta, reasoning-end. The flush should happen after all reasoning chunks, not after the first one. Consider moving the flushBufferedText call to only execute after reasoning-end or tracking when reasoning has fully completed.

Copilot uses AI. Check for mistakes.
}

if (!hasReasoning && isTextPart(chunk)) {
bufferedText.push(chunk)
return
}

if (!hasReasoning && (chunk.type === 'finish' || chunk.type === 'error')) {
flushBufferedText(controller)
}

controller.enqueue(chunk)
},
flush(controller) {
flushBufferedText(controller)
}
})
Comment on lines 48 to 109
Copy link

Copilot AI Jan 20, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Superfluous argument passed to function TransformStream.

Copilot uses AI. Check for mistakes.
),
...rest
}
}
}
}
Loading
Loading