Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions mycoder.config.js
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,13 @@ export default {
pageFilter: 'none', // 'simple', 'none', or 'readability'

// Model settings
provider: 'anthropic',
model: 'claude-3-7-sonnet-20250219',
//provider: 'anthropic',
//model: 'claude-3-7-sonnet-20250219',
//provider: 'openai',
//model: 'gpt-4o',
//provider: 'ollama',
//model: 'medragondot/Sky-T1-32B-Preview:latest',
//model: 'llama3.2:3b',
maxTokens: 4096,
temperature: 0.7,

Expand Down
18 changes: 18 additions & 0 deletions packages/agent/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,21 @@
# [mycoder-agent-v1.3.0](https://github.com/drivecore/mycoder/compare/mycoder-agent-v1.2.0...mycoder-agent-v1.3.0) (2025-03-12)

### Features

- implement MCP tools support ([2d99ac8](https://github.com/drivecore/mycoder/commit/2d99ac8cefaa770e368d469355a509739aafe6a3))

# [mycoder-agent-v1.2.0](https://github.com/drivecore/mycoder/compare/mycoder-agent-v1.1.0...mycoder-agent-v1.2.0) (2025-03-12)

### Bug Fixes

- Fix TypeScript errors in MCP implementation ([f5837d3](https://github.com/drivecore/mycoder/commit/f5837d3a5dd219efc8e1d811e467f4bb695a1d94))

### Features

- Add basic Model Context Protocol (MCP) support ([8ec9619](https://github.com/drivecore/mycoder/commit/8ec9619c3cc63df8f14222762f5da0bcabe273a5))
- **agent:** implement incremental resource cleanup for agent lifecycle ([576436e](https://github.com/drivecore/mycoder/commit/576436ef2c7c5f234f088b7dba2e7fd65590738f)), closes [#236](https://github.com/drivecore/mycoder/issues/236)
- background tools is now scope to agents ([e55817f](https://github.com/drivecore/mycoder/commit/e55817f32b373fdbff8bb1ac90105b272044d33f))

# [mycoder-agent-v1.1.0](https://github.com/drivecore/mycoder/compare/mycoder-agent-v1.0.0...mycoder-agent-v1.1.0) (2025-03-12)

### Bug Fixes
Expand Down
3 changes: 2 additions & 1 deletion packages/agent/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "mycoder-agent",
"version": "1.1.0",
"version": "1.3.0",
"description": "Agent module for mycoder - an AI-powered software development assistant",
"type": "module",
"main": "dist/index.js",
Expand Down Expand Up @@ -52,6 +52,7 @@
"chalk": "^5.4.1",
"dotenv": "^16",
"jsdom": "^26.0.0",
"ollama": "^0.5.14",
"openai": "^4.87.3",
"playwright": "^1.50.1",
"uuid": "^11",
Expand Down
5 changes: 4 additions & 1 deletion packages/agent/src/core/executeToolCall.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,10 @@
): Promise<string> => {
const tool = tools.find((t) => t.name === toolCall.name);
if (!tool) {
throw new Error(`No tool with the name '${toolCall.name}' exists.`);
return JSON.stringify({
error: true,
message: `No tool with the name '${toolCall.name}' exists.`,
});
}

const logger = new Logger({
Expand All @@ -25,7 +28,7 @@

const toolContext: ToolContext = { ...context, logger };

let parsedJson: any;

Check warning on line 31 in packages/agent/src/core/executeToolCall.ts

View workflow job for this annotation

GitHub Actions / ci

Unexpected any. Specify a different type
try {
parsedJson = JSON.parse(toolCall.content);
} catch (err) {
Expand All @@ -46,7 +49,7 @@
}

// validate JSON schema for input
let validatedJson: any;

Check warning on line 52 in packages/agent/src/core/executeToolCall.ts

View workflow job for this annotation

GitHub Actions / ci

Unexpected any. Specify a different type
try {
validatedJson = tool.parameters.parse(parsedJson);
} catch (err) {
Expand Down Expand Up @@ -76,7 +79,7 @@
});
}

let output: any;

Check warning on line 82 in packages/agent/src/core/executeToolCall.ts

View workflow job for this annotation

GitHub Actions / ci

Unexpected any. Specify a different type
try {
output = await tool.execute(validatedJson, toolContext);
} catch (err) {
Expand Down
265 changes: 159 additions & 106 deletions packages/agent/src/core/llm/providers/ollama.ts
Original file line number Diff line number Diff line change
@@ -1,14 +1,25 @@
/**
* Ollama provider implementation
* Ollama provider implementation using the official Ollama npm package
*/

import {
ChatRequest as OllamaChatRequest,
ChatResponse as OllamaChatResponse,
Ollama,
ToolCall as OllamaTooCall,
Tool as OllamaTool,
Message as OllamaMessage,
} from 'ollama';

import { TokenUsage } from '../../tokens.js';
import { ToolCall } from '../../types.js';
import { LLMProvider } from '../provider.js';
import {
GenerateOptions,
LLMResponse,
Message,
ProviderOptions,
FunctionDefinition,
} from '../types.js';

/**
Expand All @@ -19,29 +30,26 @@ export interface OllamaOptions extends ProviderOptions {
}

/**
* Ollama provider implementation
* Ollama provider implementation using the official Ollama npm package
*/
export class OllamaProvider implements LLMProvider {
name: string = 'ollama';
provider: string = 'ollama.chat';
model: string;
private baseUrl: string;
private client: Ollama;

constructor(model: string, options: OllamaOptions = {}) {
this.model = model;
this.baseUrl =
const baseUrl =
options.baseUrl ||
process.env.OLLAMA_BASE_URL ||
'http://localhost:11434';

// Ensure baseUrl doesn't end with a slash
if (this.baseUrl.endsWith('/')) {
this.baseUrl = this.baseUrl.slice(0, -1);
}
this.client = new Ollama({ host: baseUrl });
}

/**
* Generate text using Ollama API
* Generate text using Ollama API via the official npm package
*/
async generateText(options: GenerateOptions): Promise<LLMResponse> {
const {
Expand All @@ -52,126 +60,171 @@ export class OllamaProvider implements LLMProvider {
topP,
frequencyPenalty,
presencePenalty,
stopSequences,
} = options;

// Format messages for Ollama API
const formattedMessages = this.formatMessages(messages);

try {
// Prepare request options
const requestOptions: any = {
model: this.model,
messages: formattedMessages,
stream: false,
options: {
temperature: temperature,
// Ollama uses top_k instead of top_p, but we'll include top_p if provided
...(topP !== undefined && { top_p: topP }),
...(frequencyPenalty !== undefined && {
frequency_penalty: frequencyPenalty,
}),
...(presencePenalty !== undefined && {
presence_penalty: presencePenalty,
}),
},
// Prepare request options
const requestOptions: OllamaChatRequest = {
model: this.model,
messages: formattedMessages,
stream: false,
options: {
temperature: temperature,
...(topP !== undefined && { top_p: topP }),
...(frequencyPenalty !== undefined && {
frequency_penalty: frequencyPenalty,
}),
...(presencePenalty !== undefined && {
presence_penalty: presencePenalty,
}),
...(stopSequences &&
stopSequences.length > 0 && { stop: stopSequences }),
},
};

// Add max_tokens if provided
if (maxTokens !== undefined) {
requestOptions.options = {
...requestOptions.options,
num_predict: maxTokens,
};
}

// Add max_tokens if provided
if (maxTokens !== undefined) {
requestOptions.options.num_predict = maxTokens;
}
// Add functions/tools if provided
if (functions && functions.length > 0) {
requestOptions.tools = this.convertFunctionsToTools(functions);
}

// Add functions/tools if provided
if (functions && functions.length > 0) {
requestOptions.tools = functions.map((fn) => ({
name: fn.name,
description: fn.description,
parameters: fn.parameters,
}));
}
// Make the API request using the Ollama client
const response: OllamaChatResponse = await this.client.chat({
...requestOptions,
stream: false,
});

// Make the API request
const response = await fetch(`${this.baseUrl}/api/chat`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(requestOptions),
});

if (!response.ok) {
const errorText = await response.text();
throw new Error(`Ollama API error: ${response.status} ${errorText}`);
}
// Extract content and tool calls
const content = response.message?.content || '';

// Handle tool calls if present
const toolCalls = this.extractToolCalls(response);

// Create token usage from response data
const tokenUsage = new TokenUsage();
tokenUsage.output = response.eval_count || 0;
tokenUsage.input = response.prompt_eval_count || 0;

const data = await response.json();
return {
text: content,
toolCalls: toolCalls,
tokenUsage: tokenUsage,
};
}

/*
interface Tool {
type: string;
function: {
name: string;
description: string;
parameters: {
type: string;
required: string[];
properties: {
[key: string]: {
type: string;
description: string;
enum?: string[];
};
};
};
};
}*/

// Extract content and tool calls
const content = data.message?.content || '';
const toolCalls =
data.message?.tool_calls?.map((toolCall: any) => ({
id:
toolCall.id ||
`tool-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`,
name: toolCall.name,
content: JSON.stringify(toolCall.args || toolCall.arguments || {}),
})) || [];
/**
* Convert our function definitions to Ollama tool format
*/
private convertFunctionsToTools(
functions: FunctionDefinition[],
): OllamaTool[] {
return functions.map(
(fn) =>
({
type: 'function',
function: {
name: fn.name,
description: fn.description,
parameters: fn.parameters,
},
}) as OllamaTool,
);
}

// Create token usage from response data
const tokenUsage = new TokenUsage();
tokenUsage.input = data.prompt_eval_count || 0;
tokenUsage.output = data.eval_count || 0;
/**
* Extract tool calls from Ollama response
*/
private extractToolCalls(response: OllamaChatResponse): ToolCall[] {
if (!response.message?.tool_calls) {
return [];
}

return response.message.tool_calls.map((toolCall: OllamaTooCall) => {
//console.log('ollama tool call', toolCall);
return {
text: content,
toolCalls: toolCalls,
tokenUsage: tokenUsage,
id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`,
name: toolCall.function?.name,
content:
typeof toolCall.function?.arguments === 'string'
? toolCall.function.arguments
: JSON.stringify(toolCall.function?.arguments || {}),
};
} catch (error) {
throw new Error(`Error calling Ollama API: ${(error as Error).message}`);
}
});
}

/**
* Format messages for Ollama API
*/
private formatMessages(messages: Message[]): any[] {
return messages.map((msg) => {
if (
msg.role === 'user' ||
msg.role === 'assistant' ||
msg.role === 'system'
) {
return {
role: msg.role,
content: msg.content,
};
} else if (msg.role === 'tool_result') {
// Ollama expects tool results as a 'tool' role
return {
role: 'tool',
content: msg.content,
tool_call_id: msg.tool_use_id,
};
} else if (msg.role === 'tool_use') {
// We'll convert tool_use to assistant messages with tool_calls
return {
role: 'assistant',
content: '',
tool_calls: [
private formatMessages(messages: Message[]): OllamaMessage[] {
const output: OllamaMessage[] = [];

messages.forEach((msg) => {
switch (msg.role) {
case 'user':
case 'assistant':
case 'system':
output.push({
role: msg.role,
content: msg.content,
} satisfies OllamaMessage);
break;
case 'tool_result':
// Ollama expects tool results as a 'tool' role
output.push({
role: 'tool',
content:
typeof msg.content === 'string'
? msg.content
: JSON.stringify(msg.content),
} as OllamaMessage);
break;
case 'tool_use': {
// So there is an issue here is that ollama expects tool calls to be part of the assistant message
// get last message and add tool call to it
const lastMessage: OllamaMessage = output[output.length - 1]!;
lastMessage.tool_calls = [
{
id: msg.id,
name: msg.name,
arguments: msg.content,
function: {
name: msg.name,
arguments: JSON.parse(msg.content),
},
},
],
};
];
break;
}
}
// Default fallback for unknown message types
return {
role: 'user',
content: (msg as any).content || '',
};
});

return output;
}
}
Loading
Loading