Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ export default {
profile: false,
tokenCache: true,

// Ollama configuration (if using local models)
ollamaBaseUrl: 'http://localhost:11434',
// Base URL configuration (for providers that need it)
baseUrl: 'http://localhost:11434', // Example for Ollama
};
```

Expand Down
3 changes: 3 additions & 0 deletions mycoder.config.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ export default {
//model: 'llama3.2:3b',
//provider: 'xai',
//model: 'grok-2-latest',
//provider: 'openai',
//model: 'qwen2.5-coder:14b',
//baseUrl: 'http://192.168.2.66:80/v1-openai',
maxTokens: 4096,
temperature: 0.7,

Expand Down
2 changes: 1 addition & 1 deletion packages/agent/src/core/llm/__tests__/openai.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ describe('OpenAIProvider', () => {
'role' in toolUseMessage
) {
expect(toolUseMessage.role).toBe('assistant');
expect(toolUseMessage.content).toBe(null);
expect(toolUseMessage.content).toBe(''); // required by gpustack' implementation of openai SDK.

if (
'tool_calls' in toolUseMessage &&
Expand Down
6 changes: 0 additions & 6 deletions packages/agent/src/core/llm/provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,6 @@ export const providerConfig: Record<string, ProviderConfig> = {
model: 'gpt-4o-2024-05-13',
factory: (model, options) => new OpenAIProvider(model, options),
},
gpustack: {
docsUrl: 'https://mycoder.ai/docs/provider/local-openai',
model: 'llama3.2',
baseUrl: 'http://localhost:80',
factory: (model, options) => new OpenAIProvider(model, options),
},
ollama: {
docsUrl: 'https://mycoder.ai/docs/provider/ollama',
model: 'llama3.2',
Expand Down
2 changes: 1 addition & 1 deletion packages/agent/src/core/llm/providers/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ export class OpenAIProvider implements LLMProvider {
// so we'll include it as a function call in an assistant message
return {
role: 'assistant',
content: null,
content: '',
tool_calls: [
{
id: msg.id,
Expand Down
3 changes: 1 addition & 2 deletions packages/agent/src/tools/getTools.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ import { fetchTool } from './io/fetch.js';
import { textEditorTool } from './io/textEditor.js';
import { createMcpTool } from './mcp.js';
import { listBackgroundToolsTool } from './system/listBackgroundTools.js';
import { respawnTool } from './system/respawn.js';
import { sequenceCompleteTool } from './system/sequenceComplete.js';
import { shellMessageTool } from './system/shellMessage.js';
import { shellStartTool } from './system/shellStart.js';
Expand Down Expand Up @@ -39,7 +38,7 @@ export function getTools(options?: GetToolsOptions): Tool[] {
shellMessageTool as unknown as Tool,
browseStartTool as unknown as Tool,
browseMessageTool as unknown as Tool,
respawnTool as unknown as Tool,
//respawnTool as unknown as Tool, this is a confusing tool for now.
sleepTool as unknown as Tool,
listBackgroundToolsTool as unknown as Tool,
];
Expand Down
3 changes: 1 addition & 2 deletions packages/agent/src/tools/system/respawn.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,8 @@ import { respawnTool } from './respawn';
const toolContext: ToolContext = getMockToolContext();

describe('respawnTool', () => {
it('should have correct name and description', () => {
it('should have correct name', () => {
expect(respawnTool.name).toBe('respawn');
expect(respawnTool.description).toContain('Resets the agent context');
});

it('should execute and return confirmation message', async () => {
Expand Down
2 changes: 1 addition & 1 deletion packages/agent/src/tools/system/respawn.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ const returnSchema = z.object({
export const respawnTool: Tool = {
name: 'respawn',
description:
'Resets the agent context to just the system prompt and provided context',
'Resets the current conversation to just the system prompt and provided input context to this tool.',
logPrefix: '🔄',
parameters: parameterSchema,
returns: returnSchema,
Expand Down
15 changes: 11 additions & 4 deletions packages/cli/src/commands/$default.ts
Original file line number Diff line number Diff line change
Expand Up @@ -114,18 +114,25 @@ export async function executePrompt(
throw new Error(`Unknown provider: ${config.provider}`);
}

const { keyName } = providerSettings;
// only validate key if baseUrl is not set, otherwise we assume the user is using a local provider
let apiKey: string | undefined = undefined;
const { keyName } = providerSettings;
if (keyName) {
// Then fall back to environment variable
logger.info(`Looking API key in env: ${keyName}`);
apiKey = process.env[keyName];
if (!apiKey) {
logger.error(getProviderApiKeyError(config.provider));
throw new Error(`${config.provider} API key not found`);
if (!config.baseUrl) {
if (!apiKey) {
logger.error(getProviderApiKeyError(config.provider));
throw new Error(`${config.provider} API key not found`);
}
}
}

logger.info(`LLM: ${config.provider}/${config.model}`);
if (apiKey) {
logger.info(`Using API key: ${apiKey.slice(0, 4)}...`);
}
if (config.baseUrl) {
// For Ollama, we check if the base URL is set
logger.info(`Using base url: ${config.baseUrl}`);
Expand Down
3 changes: 0 additions & 3 deletions packages/docs/docs/providers/anthropic.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,6 @@ export default {
provider: 'anthropic',
model: 'claude-3-7-sonnet-20250219',

// Optional: Set API key directly (environment variable is preferred)
// anthropicApiKey: 'your_api_key_here',

// Other MyCoder settings
maxTokens: 4096,
temperature: 0.7,
Expand Down
4 changes: 3 additions & 1 deletion packages/docs/docs/providers/index.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,9 @@ MyCoder supports multiple Language Model (LLM) providers, giving you flexibility
MyCoder currently supports the following LLM providers:

- [**Anthropic**](./anthropic.md) - Claude models from Anthropic
- [**OpenAI**](./openai.md) - GPT models from OpenAI
- [**OpenAI**](./openai.md) - GPT models from OpenAI (and OpenAI compatible providers)
- [**Ollama**](./ollama.md) - Self-hosted open-source models via Ollama
- [**xAI**](./xai.md) - Grok models from xAI

## Configuring Providers

Expand Down Expand Up @@ -52,3 +53,4 @@ For detailed instructions on setting up each provider, see the provider-specific
- [Anthropic Configuration](./anthropic.md)
- [OpenAI Configuration](./openai.md)
- [Ollama Configuration](./ollama.md)
- [xAI Configuration](./xai.md)
2 changes: 1 addition & 1 deletion packages/docs/docs/providers/ollama.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ export default {
model: 'medragondot/Sky-T1-32B-Preview:latest',

// Optional: Custom base URL (defaults to http://localhost:11434)
// ollamaBaseUrl: 'http://localhost:11434',
// baseUrl: 'http://localhost:11434',

// Other MyCoder settings
maxTokens: 4096,
Expand Down
22 changes: 18 additions & 4 deletions packages/docs/docs/providers/openai.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,6 @@ export default {
provider: 'openai',
model: 'gpt-4o',

// Optional: Set API key directly (environment variable is preferred)
// openaiApiKey: 'your_api_key_here',
// openaiOrganization: 'your_organization_id',

// Other MyCoder settings
maxTokens: 4096,
temperature: 0.7,
Expand All @@ -60,6 +56,24 @@ MyCoder supports all OpenAI models that have tool/function calling capabilities.

You can use any other OpenAI model that supports function calling with MyCoder. The OpenAI provider is not limited to just these listed models.

## Using OpenAI Compatible Providers

A number of providers offer OpenAI compatible REST API endpoints, such as xAI and [GPUStack](https://gpustack.ai). To point the OpenAI provider to a different provider REST API set the `baseUrl` and also, if applicable, the `OPENAI_API_KEY` to their required key. For example:

```javascript
export default {
// Provider selection
provider: 'openai',
model: 'qwen2.5',
baseUrl: 'http://localhost/v1-openai',

// Other MyCoder settings
maxTokens: 4096,
temperature: 0.7,
// ...
};
```

## Best Practices

- GPT-4o provides the best balance of performance and cost for most MyCoder tasks
Expand Down
77 changes: 77 additions & 0 deletions packages/docs/docs/providers/xai.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
---
sidebar_position: 6
---

# xAI (Grok)

[xAI](https://x.ai/) is the company behind Grok, a powerful large language model designed to be helpful, harmless, and honest. Grok models offer strong reasoning capabilities and support for tool calling.

## Setup

To use Grok models with MyCoder, you need an xAI API key:

1. Create an account at [xAI](https://x.ai/)
2. Navigate to the API Keys section and create a new API key
3. Set the API key as an environment variable or in your configuration file

### Environment Variables

You can set the xAI API key as an environment variable:

```bash
export XAI_API_KEY=your_api_key_here
```

### Configuration

Configure MyCoder to use xAI's Grok in your `mycoder.config.js` file:

```javascript
export default {
// Provider selection
provider: 'xai',
model: 'grok-2-latest',

// Other MyCoder settings
maxTokens: 4096,
temperature: 0.7,
// ...
};
```

## Supported Models

xAI offers several Grok models with different capabilities:

- `grok-2-latest` (recommended) - The latest Grok-2 model with strong reasoning and tool-calling capabilities
- `grok-1` - The original Grok model

## Best Practices

- Grok models excel at coding tasks and technical problem-solving
- They have strong tool-calling capabilities, making them suitable for MyCoder workflows
- For complex programming tasks, use Grok-2 models for best results
- Provide clear, specific instructions for optimal results

## Custom Base URL

If you need to use a different base URL for the xAI API (for example, if you're using a proxy or if xAI changes their API endpoint), you can specify it in your configuration:

```javascript
export default {
provider: 'xai',
model: 'grok-2-latest',
baseUrl: 'https://api.x.ai/v1', // Default xAI API URL
};
```

## Troubleshooting

If you encounter issues with xAI's Grok:

- Verify your API key is correct and has sufficient quota
- Check that you're using a supported model name
- For tool-calling issues, ensure your functions are properly formatted
- Monitor your token usage to avoid unexpected costs

For more information, visit the [xAI Documentation](https://x.ai/docs).
Loading