Skip to content

Commit 26ebfb4

Browse files
feat(engine): add support for MLX AI provider (#437)
* docs(CONTRIBUTING.md): update `TODO.md` reference (#435) Signed-off-by: Emmanuel Ferdman <[email protected]> * feat(engine): add support for MLX AI provider docs/engine: update documentation to include new engine providers * fix(mlx.ts): add repetition_penalty option to generateCommitMessage method for improved model behavior --------- Signed-off-by: Emmanuel Ferdman <[email protected]> Co-authored-by: Emmanuel Ferdman <[email protected]>
1 parent dd65b9c commit 26ebfb4

File tree

7 files changed

+145
-16
lines changed

7 files changed

+145
-16
lines changed

.github/CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ To get started, follow these steps:
1818
1. Clone the project repository locally.
1919
2. Install dependencies with `npm install`.
2020
3. Run the project with `npm run dev`.
21-
4. See [issues](https://github.com/di-sukharev/opencommit/issues) or [TODO.md](../TODO.md) to help the project.
21+
4. See [issues](https://github.com/di-sukharev/opencommit/issues) or [TODO.md](TODO.md) to help the project.
2222

2323
## Commit message guidelines
2424

out/cli.cjs

Lines changed: 45 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -431,8 +431,8 @@ var require_escape = __commonJS({
431431
}
432432
function escapeArgument(arg, doubleEscapeMetaChars) {
433433
arg = `${arg}`;
434-
arg = arg.replace(/(\\*)"/g, '$1$1\\"');
435-
arg = arg.replace(/(\\*)$/, "$1$1");
434+
arg = arg.replace(/(?=(\\+?)?)\1"/g, '$1$1\\"');
435+
arg = arg.replace(/(?=(\\+?)?)\1$/, "$1$1");
436436
arg = `"${arg}"`;
437437
arg = arg.replace(metaCharsRegExp, "^$1");
438438
if (doubleEscapeMetaChars) {
@@ -578,7 +578,7 @@ var require_enoent = __commonJS({
578578
const originalEmit = cp.emit;
579579
cp.emit = function(name, arg1) {
580580
if (name === "exit") {
581-
const err = verifyENOENT(arg1, parsed, "spawn");
581+
const err = verifyENOENT(arg1, parsed);
582582
if (err) {
583583
return originalEmit.call(cp, "error", err);
584584
}
@@ -27389,7 +27389,8 @@ var package_default = {
2738927389
"test:unit:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:unit",
2739027390
"test:e2e": "npm run test:e2e:setup && jest test/e2e",
2739127391
"test:e2e:setup": "sh test/e2e/setup.sh",
27392-
"test:e2e:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:e2e"
27392+
"test:e2e:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:e2e",
27393+
"mlx:start": "OCO_AI_PROVIDER='mlx' node ./out/cli.cjs"
2739327394
},
2739427395
devDependencies: {
2739527396
"@commitlint/types": "^17.4.4",
@@ -29933,6 +29934,8 @@ var getDefaultModel = (provider) => {
2993329934
switch (provider) {
2993429935
case "ollama":
2993529936
return "";
29937+
case "mlx":
29938+
return "";
2993629939
case "anthropic":
2993729940
return MODEL_LIST.anthropic[0];
2993829941
case "gemini":
@@ -29964,7 +29967,7 @@ var configValidators = {
2996429967
validateConfig(
2996529968
"OCO_API_KEY",
2996629969
value,
29967-
'You need to provide the OCO_API_KEY when OCO_AI_PROVIDER set to "openai" (default) or "ollama" or "azure" or "gemini" or "flowise" or "anthropic". Run `oco config set OCO_API_KEY=your_key OCO_AI_PROVIDER=openai`'
29970+
'You need to provide the OCO_API_KEY when OCO_AI_PROVIDER set to "openai" (default) or "ollama" or "mlx" or "azure" or "gemini" or "flowise" or "anthropic". Run `oco config set OCO_API_KEY=your_key OCO_AI_PROVIDER=openai`'
2996829971
);
2996929972
return value;
2997029973
},
@@ -30070,8 +30073,8 @@ var configValidators = {
3007030073
"test",
3007130074
"flowise",
3007230075
"groq"
30073-
].includes(value) || value.startsWith("ollama"),
30074-
`${value} is not supported yet, use 'ollama', 'anthropic', 'azure', 'gemini', 'flowise' or 'openai' (default)`
30076+
].includes(value) || value.startsWith("ollama") || value.startsWith("mlx"),
30077+
`${value} is not supported yet, use 'ollama', 'mlx', anthropic', 'azure', 'gemini', 'flowise' or 'openai' (default)`
3007530078
);
3007630079
return value;
3007730080
},
@@ -30111,6 +30114,7 @@ var OCO_AI_PROVIDER_ENUM = /* @__PURE__ */ ((OCO_AI_PROVIDER_ENUM2) => {
3011130114
OCO_AI_PROVIDER_ENUM2["TEST"] = "test";
3011230115
OCO_AI_PROVIDER_ENUM2["FLOWISE"] = "flowise";
3011330116
OCO_AI_PROVIDER_ENUM2["GROQ"] = "groq";
30117+
OCO_AI_PROVIDER_ENUM2["MLX"] = "mlx";
3011430118
return OCO_AI_PROVIDER_ENUM2;
3011530119
})(OCO_AI_PROVIDER_ENUM || {});
3011630120
var defaultConfigPath = (0, import_path.join)((0, import_os.homedir)(), ".opencommit");
@@ -44524,6 +44528,38 @@ var GroqEngine = class extends OpenAiEngine {
4452444528
}
4452544529
};
4452644530

44531+
// src/engine/mlx.ts
44532+
var MLXEngine = class {
44533+
constructor(config7) {
44534+
this.config = config7;
44535+
this.client = axios_default.create({
44536+
url: config7.baseURL ? `${config7.baseURL}/${config7.apiKey}` : "http://localhost:8080/v1/chat/completions",
44537+
headers: { "Content-Type": "application/json" }
44538+
});
44539+
}
44540+
async generateCommitMessage(messages) {
44541+
const params = {
44542+
messages,
44543+
temperature: 0,
44544+
top_p: 0.1,
44545+
repetition_penalty: 1.5,
44546+
stream: false
44547+
};
44548+
try {
44549+
const response = await this.client.post(
44550+
this.client.getUri(this.config),
44551+
params
44552+
);
44553+
const choices = response.data.choices;
44554+
const message = choices[0].message;
44555+
return message?.content;
44556+
} catch (err) {
44557+
const message = err.response?.data?.error ?? err.message;
44558+
throw new Error(`MLX provider error: ${message}`);
44559+
}
44560+
}
44561+
};
44562+
4452744563
// src/utils/engine.ts
4452844564
function getEngine() {
4452944565
const config7 = getConfig();
@@ -44550,6 +44586,8 @@ function getEngine() {
4455044586
return new FlowiseEngine(DEFAULT_CONFIG2);
4455144587
case "groq" /* GROQ */:
4455244588
return new GroqEngine(DEFAULT_CONFIG2);
44589+
case "mlx" /* MLX */:
44590+
return new MLXEngine(DEFAULT_CONFIG2);
4455344591
default:
4455444592
return new OpenAiEngine(DEFAULT_CONFIG2);
4455544593
}

out/github-action.cjs

Lines changed: 39 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48745,6 +48745,8 @@ var getDefaultModel = (provider) => {
4874548745
switch (provider) {
4874648746
case "ollama":
4874748747
return "";
48748+
case "mlx":
48749+
return "";
4874848750
case "anthropic":
4874948751
return MODEL_LIST.anthropic[0];
4875048752
case "gemini":
@@ -48776,7 +48778,7 @@ var configValidators = {
4877648778
validateConfig(
4877748779
"OCO_API_KEY",
4877848780
value,
48779-
'You need to provide the OCO_API_KEY when OCO_AI_PROVIDER set to "openai" (default) or "ollama" or "azure" or "gemini" or "flowise" or "anthropic". Run `oco config set OCO_API_KEY=your_key OCO_AI_PROVIDER=openai`'
48781+
'You need to provide the OCO_API_KEY when OCO_AI_PROVIDER set to "openai" (default) or "ollama" or "mlx" or "azure" or "gemini" or "flowise" or "anthropic". Run `oco config set OCO_API_KEY=your_key OCO_AI_PROVIDER=openai`'
4878048782
);
4878148783
return value;
4878248784
},
@@ -48882,8 +48884,8 @@ var configValidators = {
4888248884
"test",
4888348885
"flowise",
4888448886
"groq"
48885-
].includes(value) || value.startsWith("ollama"),
48886-
`${value} is not supported yet, use 'ollama', 'anthropic', 'azure', 'gemini', 'flowise' or 'openai' (default)`
48887+
].includes(value) || value.startsWith("ollama") || value.startsWith("mlx"),
48888+
`${value} is not supported yet, use 'ollama', 'mlx', anthropic', 'azure', 'gemini', 'flowise' or 'openai' (default)`
4888748889
);
4888848890
return value;
4888948891
},
@@ -63325,6 +63327,38 @@ var GroqEngine = class extends OpenAiEngine {
6332563327
}
6332663328
};
6332763329

63330+
// src/engine/mlx.ts
63331+
var MLXEngine = class {
63332+
constructor(config6) {
63333+
this.config = config6;
63334+
this.client = axios_default.create({
63335+
url: config6.baseURL ? `${config6.baseURL}/${config6.apiKey}` : "http://localhost:8080/v1/chat/completions",
63336+
headers: { "Content-Type": "application/json" }
63337+
});
63338+
}
63339+
async generateCommitMessage(messages) {
63340+
const params = {
63341+
messages,
63342+
temperature: 0,
63343+
top_p: 0.1,
63344+
repetition_penalty: 1.5,
63345+
stream: false
63346+
};
63347+
try {
63348+
const response = await this.client.post(
63349+
this.client.getUri(this.config),
63350+
params
63351+
);
63352+
const choices = response.data.choices;
63353+
const message = choices[0].message;
63354+
return message?.content;
63355+
} catch (err) {
63356+
const message = err.response?.data?.error ?? err.message;
63357+
throw new Error(`MLX provider error: ${message}`);
63358+
}
63359+
}
63360+
};
63361+
6332863362
// src/utils/engine.ts
6332963363
function getEngine() {
6333063364
const config6 = getConfig();
@@ -63351,6 +63385,8 @@ function getEngine() {
6335163385
return new FlowiseEngine(DEFAULT_CONFIG2);
6335263386
case "groq" /* GROQ */:
6335363387
return new GroqEngine(DEFAULT_CONFIG2);
63388+
case "mlx" /* MLX */:
63389+
return new MLXEngine(DEFAULT_CONFIG2);
6335463390
default:
6335563391
return new OpenAiEngine(DEFAULT_CONFIG2);
6335663392
}

package.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,8 @@
5858
"test:unit:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:unit",
5959
"test:e2e": "npm run test:e2e:setup && jest test/e2e",
6060
"test:e2e:setup": "sh test/e2e/setup.sh",
61-
"test:e2e:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:e2e"
61+
"test:e2e:docker": "npm run test:docker-build && DOCKER_CONTENT_TRUST=0 docker run --rm oco-test npm run test:e2e",
62+
"mlx:start": "OCO_AI_PROVIDER='mlx' node ./out/cli.cjs"
6263
},
6364
"devDependencies": {
6465
"@commitlint/types": "^17.4.4",

src/commands/config.ts

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,8 @@ const getDefaultModel = (provider: string | undefined): string => {
9393
switch (provider) {
9494
case 'ollama':
9595
return '';
96+
case 'mlx':
97+
return '';
9698
case 'anthropic':
9799
return MODEL_LIST.anthropic[0];
98100
case 'gemini':
@@ -138,7 +140,7 @@ export const configValidators = {
138140
validateConfig(
139141
'OCO_API_KEY',
140142
value,
141-
'You need to provide the OCO_API_KEY when OCO_AI_PROVIDER set to "openai" (default) or "ollama" or "azure" or "gemini" or "flowise" or "anthropic". Run `oco config set OCO_API_KEY=your_key OCO_AI_PROVIDER=openai`'
143+
'You need to provide the OCO_API_KEY when OCO_AI_PROVIDER set to "openai" (default) or "ollama" or "mlx" or "azure" or "gemini" or "flowise" or "anthropic". Run `oco config set OCO_API_KEY=your_key OCO_AI_PROVIDER=openai`'
142144
);
143145

144146
return value;
@@ -261,8 +263,8 @@ export const configValidators = {
261263
'test',
262264
'flowise',
263265
'groq'
264-
].includes(value) || value.startsWith('ollama'),
265-
`${value} is not supported yet, use 'ollama', 'anthropic', 'azure', 'gemini', 'flowise' or 'openai' (default)`
266+
].includes(value) || value.startsWith('ollama') || value.startsWith('mlx'),
267+
`${value} is not supported yet, use 'ollama', 'mlx', anthropic', 'azure', 'gemini', 'flowise' or 'openai' (default)`
266268
);
267269

268270
return value;
@@ -307,7 +309,8 @@ export enum OCO_AI_PROVIDER_ENUM {
307309
AZURE = 'azure',
308310
TEST = 'test',
309311
FLOWISE = 'flowise',
310-
GROQ = 'groq'
312+
GROQ = 'groq',
313+
MLX = 'mlx'
311314
}
312315

313316
export type ConfigType = {

src/engine/mlx.ts

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
import axios, { AxiosInstance } from 'axios';
2+
import { OpenAI } from 'openai';
3+
import { AiEngine, AiEngineConfig } from './Engine';
4+
import { chown } from 'fs';
5+
6+
interface MLXConfig extends AiEngineConfig {}
7+
8+
export class MLXEngine implements AiEngine {
9+
config: MLXConfig;
10+
client: AxiosInstance;
11+
12+
constructor(config) {
13+
this.config = config;
14+
this.client = axios.create({
15+
url: config.baseURL
16+
? `${config.baseURL}/${config.apiKey}`
17+
: 'http://localhost:8080/v1/chat/completions',
18+
headers: { 'Content-Type': 'application/json' }
19+
});
20+
}
21+
22+
async generateCommitMessage(
23+
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>):
24+
Promise<string | undefined> {
25+
const params = {
26+
messages,
27+
temperature: 0,
28+
top_p: 0.1,
29+
repetition_penalty: 1.5,
30+
stream: false
31+
};
32+
try {
33+
const response = await this.client.post(
34+
this.client.getUri(this.config),
35+
params
36+
);
37+
38+
const choices = response.data.choices;
39+
const message = choices[0].message;
40+
41+
return message?.content;
42+
} catch (err: any) {
43+
const message = err.response?.data?.error ?? err.message;
44+
throw new Error(`MLX provider error: ${message}`);
45+
}
46+
}
47+
}

src/utils/engine.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import { OllamaEngine } from '../engine/ollama';
88
import { OpenAiEngine } from '../engine/openAi';
99
import { TestAi, TestMockType } from '../engine/testAi';
1010
import { GroqEngine } from '../engine/groq';
11+
import { MLXEngine } from '../engine/mlx';
1112

1213
export function getEngine(): AiEngine {
1314
const config = getConfig();
@@ -43,6 +44,9 @@ export function getEngine(): AiEngine {
4344
case OCO_AI_PROVIDER_ENUM.GROQ:
4445
return new GroqEngine(DEFAULT_CONFIG);
4546

47+
case OCO_AI_PROVIDER_ENUM.MLX:
48+
return new MLXEngine(DEFAULT_CONFIG);
49+
4650
default:
4751
return new OpenAiEngine(DEFAULT_CONFIG);
4852
}

0 commit comments

Comments
 (0)