Skip to content

Commit bc079fe

Browse files
authored
πŸ“¦ NEW: XAI models support (#76)
* πŸ“¦ NEW: XAI models support * πŸ› FIX: Import
1 parent e39854d commit bc079fe

File tree

13 files changed

+115
-5
lines changed

13 files changed

+115
-5
lines changed

β€Žexamples/nextjs/.env.baseai.example

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,3 +19,4 @@ GROQ_API_KEY=
1919
MISTRAL_API_KEY=
2020
PERPLEXITY_API_KEY=
2121
TOGETHER_API_KEY=
22+
XAI_API_KEY=

β€Žpackages/baseai/src/data/models.ts

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,8 @@ export const PERPLEXITY: string = 'Perplexity';
9090
export const DEEPINFRA: string = 'deepinfra';
9191
export const BEDROCK: string = 'bedrock';
9292
export const AZURE_OPEN_AI: string = 'azure-openai';
93+
export const X_AI: string = 'xAI';
94+
export const OLLAMA: string = 'ollama';
9395

9496
interface Model {
9597
id: string;
@@ -570,6 +572,18 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
570572
promptCost: 0.2,
571573
completionCost: 0.2
572574
}
575+
],
576+
[X_AI]: [
577+
{
578+
id: 'grok-beta',
579+
provider: X_AI,
580+
promptCost: 5,
581+
completionCost: 15,
582+
toolSupport: {
583+
toolChoice: true,
584+
parallelToolCalls: false
585+
}
586+
}
573587
]
574588
};
575589

β€Žpackages/baseai/src/dev/data/models.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ export const DEEPINFRA: string = 'deepinfra';
1111
export const BEDROCK: string = 'bedrock';
1212
export const AZURE_OPEN_AI: string = 'azure-openai';
1313
export const OLLAMA: string = 'ollama';
14+
export const X_AI: string = 'xAI';
1415

1516
interface Model {
1617
id: string;
@@ -511,6 +512,14 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
511512
promptCost: 1,
512513
completionCost: 3
513514
}
515+
],
516+
[X_AI]: [
517+
{
518+
id: 'grok-beta',
519+
provider: X_AI,
520+
promptCost: 5,
521+
completionCost: 15
522+
}
514523
]
515524
};
516525

β€Žpackages/baseai/src/dev/llms/call-llm.ts

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,8 @@ import {
77
OLLAMA,
88
OPEN_AI,
99
PERPLEXITY,
10-
TOGETHER_AI
10+
TOGETHER_AI,
11+
X_AI
1112
} from '@/dev/data/models';
1213

1314
import { addContextFromMemory } from '@/utils/memory/lib';
@@ -25,6 +26,7 @@ import { callOllama } from './call-ollama';
2526
import { callOpenAI } from './call-openai';
2627
import { callPerplexity } from './call-perplexity';
2728
import { callTogether } from './call-together';
29+
import { callXAI } from './call-xai';
2830

2931
export async function callLLM({
3032
pipe,
@@ -110,6 +112,16 @@ export async function callLLM({
110112
});
111113
}
112114

115+
if (modelProvider === X_AI) {
116+
dlog('XAI', 'βœ…');
117+
return await callXAI({
118+
pipe,
119+
messages,
120+
llmApiKey,
121+
stream
122+
});
123+
}
124+
113125
if (modelProvider === COHERE) {
114126
dlog('COHERE', 'βœ…');
115127
return await callCohere({
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
import OpenAI from 'openai';
2+
import { dlog } from '../utils/dlog';
3+
import { X_AI } from '../data/models';
4+
import { handleLlmError } from './utils';
5+
import type { Message } from 'types/pipe';
6+
import type { ModelParams } from 'types/providers';
7+
import { addToolsToParams } from '../utils/add-tools-to-params';
8+
9+
export async function callXAI({
10+
pipe,
11+
stream,
12+
llmApiKey,
13+
messages
14+
}: {
15+
pipe: any;
16+
stream: boolean;
17+
llmApiKey: string;
18+
messages: Message[];
19+
}) {
20+
try {
21+
const modelParams = buildModelParams(pipe, stream, messages);
22+
23+
// LLM.
24+
const groq = new OpenAI({
25+
apiKey: llmApiKey,
26+
baseURL: 'https://api.x.ai/v1'
27+
});
28+
29+
// Add tools (functions) to modelParams
30+
addToolsToParams(modelParams, pipe);
31+
dlog('modelParams', modelParams);
32+
33+
return await groq.chat.completions.create(modelParams as any);
34+
} catch (error: any) {
35+
handleLlmError({ error, provider: X_AI });
36+
}
37+
}
38+
39+
function buildModelParams(
40+
pipe: any,
41+
stream: boolean,
42+
messages: Message[]
43+
): ModelParams {
44+
return {
45+
messages,
46+
stream,
47+
model: pipe.model.name,
48+
...pipe.model.params
49+
};
50+
}

β€Žpackages/baseai/src/dev/utils/get-llm-api-key.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,8 @@ import {
66
GROQ,
77
OPEN_AI,
88
PERPLEXITY,
9-
TOGETHER_AI
9+
TOGETHER_AI,
10+
X_AI
1011
} from '@/dev/data/models';
1112

1213
export function getLLMApiKey(modelProvider: string): string {
@@ -27,6 +28,8 @@ export function getLLMApiKey(modelProvider: string): string {
2728
return process.env.FIREWORKS_API_KEY || '';
2829
case modelProvider.includes(PERPLEXITY):
2930
return process.env.PERPLEXITY_API_KEY || '';
31+
case modelProvider.includes(X_AI):
32+
return process.env.XAI_API_KEY || '';
3033
default:
3134
throw new Error(`Unsupported model provider: ${modelProvider}`);
3235
}

β€Žpackages/baseai/src/init/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -292,6 +292,7 @@ GROQ_API_KEY=
292292
MISTRAL_API_KEY=
293293
PERPLEXITY_API_KEY=
294294
TOGETHER_API_KEY=
295+
XAI_API_KEY=
295296
`;
296297

297298
try {

β€Žpackages/baseai/src/utils/to-old-pipe-format.ts

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,15 @@
1-
import { OLLAMA } from '@/dev/data/models';
21
import type { Pipe, PipeOld } from './../../types/pipe';
32
import {
43
ANTHROPIC,
54
COHERE,
65
FIREWORKS_AI,
76
GOOGLE,
87
GROQ,
8+
OLLAMA,
99
OPEN_AI,
1010
PERPLEXITY,
11-
TOGETHER_AI
11+
TOGETHER_AI,
12+
X_AI
1213
} from './../data/models';
1314

1415
type Provider =
@@ -102,7 +103,8 @@ function getProvider(providerString: string): Provider {
102103
cohere: COHERE,
103104
fireworks: FIREWORKS_AI,
104105
perplexity: PERPLEXITY,
105-
ollama: OLLAMA
106+
ollama: OLLAMA,
107+
xai: X_AI
106108
};
107109

108110
const provider = providerMap[providerString.toLowerCase()];

β€Žpackages/core/src/data/models.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ export const DEEPINFRA: string = 'deepinfra';
1111
export const BEDROCK: string = 'bedrock';
1212
export const AZURE_OPEN_AI: string = 'azure-openai';
1313
export const OLLAMA: string = 'ollama';
14+
export const X_AI: string = 'xAI';
1415

1516
interface Model {
1617
id: string;
@@ -400,6 +401,14 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
400401
completionCost: 3,
401402
},
402403
],
404+
[X_AI]: [
405+
{
406+
id: 'grok-beta',
407+
provider: X_AI,
408+
promptCost: 5,
409+
completionCost: 15,
410+
},
411+
],
403412
};
404413

405414
export const jsonModeModels = [

β€Žpackages/core/src/utils/get-llm-api-key.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import {
88
OPEN_AI,
99
PERPLEXITY,
1010
TOGETHER_AI,
11+
X_AI,
1112
} from '../data/models';
1213

1314
export function getLLMApiKey(modelProvider: string): string {
@@ -30,6 +31,8 @@ export function getLLMApiKey(modelProvider: string): string {
3031
return process.env.PERPLEXITY_API_KEY || '';
3132
case modelProvider.includes(OLLAMA):
3233
return process.env.OLLAMA_API_KEY || '';
34+
case modelProvider.includes(X_AI):
35+
return process.env.XAI_API_KEY || '';
3336

3437
default:
3538
throw new Error(`Unsupported model provider: ${modelProvider}`);

0 commit comments

Comments
Β (0)