Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@
"@langchain/mistralai": "^0.2.1",
"@langchain/ollama": "^0.2.3",
"@langchain/openai": "^0.5.16",
"@langchain/xai": "^0.0.3",
"@lumino/coreutils": "^2.1.2",
"@lumino/polling": "^2.1.2",
"@lumino/signaling": "^2.1.2",
Expand Down
4 changes: 4 additions & 0 deletions scripts/settings-checker.js
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,10 @@ const providers = {
type: 'ChatOpenAIFields',
excludedProps: ['configuration']
},
Grok: {
path: 'node_modules/@langchain/xai/dist/chat_models.d.ts',
type: 'ChatXAIInput'
},
WebLLM: {
path: 'node_modules/@langchain/community/chat_models/webllm.d.ts',
type: 'WebLLMInputs',
Expand Down
49 changes: 49 additions & 0 deletions src/default-providers/Grok/completer.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import {
CompletionHandler,
IInlineCompletionContext
} from '@jupyterlab/completer';
import { ChatXAI } from '@langchain/xai';
import { AIMessage, SystemMessage } from '@langchain/core/messages';

import { BaseCompleter } from '../../base-completer';

export class GrokCompleter extends BaseCompleter {
constructor(options: BaseCompleter.IOptions) {
super(options);

this._completer = new ChatXAI({
model: 'grok-4',
...options.settings
});
}

async fetch(
request: CompletionHandler.IRequest,
context: IInlineCompletionContext
) {
const { text, offset: cursorOffset } = request;
const prompt = text.slice(0, cursorOffset).trim();

const messages = [
new SystemMessage(this.systemPrompt),
new AIMessage(prompt)
];

try {
const response = await this._completer.invoke(messages);
return {
items: [
{
insertText: response.content,
filterText: prompt
}
]
};
} catch (error) {
console.error('Error fetching Grok completions', error);
return { items: [] };
}
}

protected _completer: ChatXAI;
}
11 changes: 11 additions & 0 deletions src/default-providers/Grok/instructions.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
export default `
<i class="fas fa-exclamation-triangle"></i> This extension is experimental and not affiliated with xAI.

1. Visit <https://console.x.ai/> and sign in to access your API key.
2. In JupyterLab, go to **Settings → AI Providers → Grok**.
3. Enter your API key in the \`apiKey\` field (required).
4. Choose a model like \`grok-4\` (default).
5. Open the chat or use inline completions to interact with Grok.

**Note:** *This extension uses the xAI API to provide AI capabilities. Ensure you have an API key to use this extension with credits.*
`;
26 changes: 26 additions & 0 deletions src/default-providers/Grok/settings-schema.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"apiKey": {
"type": "string",
"description": "Your xAI Grok API Key"
},
"model": {
"type": "string",
"description": "Model name to use (e.g. grok-4)",
"default": "grok-4"
},
"temperature": {
"type": "number",
"description": "Sampling temperature",
"default": 0.7
},
"maxTokens": {
"type": "number",
"description": "Maximum number of tokens to generate"
}
},
"additionalProperties": false
}

11 changes: 11 additions & 0 deletions src/default-providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import { ChatMistralAI } from '@langchain/mistralai';
import { ChatOllama } from '@langchain/ollama';
import { ChatOpenAI } from '@langchain/openai';
import { ChatXAI } from '@langchain/xai';

// Import completers
import { AnthropicCompleter } from './Anthropic/completer';
Expand All @@ -20,6 +21,7 @@ import { CodestralCompleter } from './MistralAI/completer';
import { OllamaCompleter } from './Ollama/completer';
import { OpenAICompleter } from './OpenAI/completer';
import { WebLLMCompleter } from './WebLLM/completer';
import { GrokCompleter } from './Grok/completer';

// Import Settings
import AnthropicSettings from './Anthropic/settings-schema.json';
Expand All @@ -29,6 +31,7 @@ import MistralAISettings from './MistralAI/settings-schema.json';
import OllamaAISettings from './Ollama/settings-schema.json';
import OpenAISettings from './OpenAI/settings-schema.json';
import WebLLMSettings from './WebLLM/settings-schema.json';
import GrokSettings from './Grok/settings-schema.json';

// Import instructions
import ChromeAIInstructions, {
Expand All @@ -40,6 +43,7 @@ import OllamaInstructions from './Ollama/instructions';
import WebLLMInstructions, {
compatibilityCheck as webLLMCompatibilityCheck
} from './WebLLM/instructions';
import GrokInstructions from './Grok/instructions';

import { prebuiltAppConfig } from '@mlc-ai/web-llm';

Expand Down Expand Up @@ -90,6 +94,13 @@ const AIProviders: IAIProvider[] = [
chat: ChatOpenAI,
completer: OpenAICompleter,
settingsSchema: OpenAISettings
},
{
name: 'Grok',
chat: ChatXAI,
completer: GrokCompleter,
settingsSchema: GrokSettings,
instructions: GrokInstructions
}
];

Expand Down
Loading