Skip to content

Commit 91d220f

Browse files
committed
new components
1 parent 1d8a832 commit 91d220f

File tree

13 files changed

+587
-7
lines changed

13 files changed

+587
-7
lines changed
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
import mistralAI from "../../mistral_ai.app.mjs";
2+
import constants from "../../common/constants.mjs";
3+
import { parseObj } from "../../common/utils.mjs";
4+
5+
export default {
6+
key: "mistral_ai-create-batch-job",
7+
name: "Create Batch Job",
8+
description: "Create a new batch job, it will be queued for processing. [See the Documentation](https://docs.mistral.ai/api/#tag/batch/operation/jobs_api_routes_batch_get_batch_jobs)",
9+
version: "0.0.1",
10+
type: "action",
11+
props: {
12+
mistralAI,
13+
inputFiles: {
14+
propDefinition: [
15+
mistralAI,
16+
"fileIds",
17+
],
18+
},
19+
modelId: {
20+
propDefinition: [
21+
mistralAI,
22+
"modelId",
23+
],
24+
},
25+
endpoint: {
26+
type: "string",
27+
label: "Endpoint",
28+
description: "The endpoint to use for the batch job",
29+
options: constants.BATCH_JOB_ENDPOINT_OPTIONS,
30+
},
31+
metadata: {
32+
type: "object",
33+
label: "Metadata",
34+
description: "Optional metadata for the batch job in JSON format.",
35+
optional: true,
36+
},
37+
timeoutHours: {
38+
type: "integer",
39+
label: "Timeout Hours",
40+
description: "Optional timeout duration for the batch job in hours.",
41+
optional: true,
42+
default: 24,
43+
},
44+
},
45+
async run({ $ }) {
46+
const response = await this.mistralAI.createBatchJob({
47+
$,
48+
data: {
49+
input_files: this.inputFiles,
50+
endpoint: this.endpoint,
51+
model: this.modelId,
52+
metadata: parseObj(this.metadata),
53+
timeoutHours: this.timeoutHours,
54+
},
55+
});
56+
if (response?.id) {
57+
$.export("$summary", `Successfully created batch job with ID: ${response.id}`);
58+
}
59+
return response;
60+
},
61+
};
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import mistralAI from "../../mistral_ai.app.mjs";
2+
import { parseArray } from "../../common/utils.mjs";
3+
import constants from "../../common/constants.mjs";
4+
5+
export default {
6+
key: "mistral_ai-create-embeddings",
7+
name: "Create Embeddings",
8+
description: "Create new embedding in Mistral AI. [See the Documentation](https://docs.mistral.ai/api/#tag/embeddings)",
9+
version: "0.0.1",
10+
type: "action",
11+
props: {
12+
mistralAI,
13+
input: {
14+
type: "string",
15+
label: "Input",
16+
description: "The input text for which to create an embedding. May be a string or an array of strings.",
17+
},
18+
},
19+
async run({ $ }) {
20+
const response = await this.mistralAI.createEmbeddings({
21+
$,
22+
data: {
23+
model: constants.EMBEDDINGS_MODEL,
24+
input: parseArray(this.input),
25+
},
26+
});
27+
if (response?.id) {
28+
$.export("$summary", `Successfully created embedding with ID: ${response.id}`);
29+
}
30+
return response;
31+
},
32+
};
Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
import mistralAI from "../../mistral_ai.app.mjs";
2+
3+
export default {
4+
key: "mistral_ai-generate-text",
5+
name: "Generate Text",
6+
description: "Retrieve a list of available Mistral AI models that the user is authorized to access. [See the Documentation](https://docs.mistral.ai/api/#tag/fim/operation/fim_completion_v1_fim_completions_post)",
7+
version: "0.0.1",
8+
type: "action",
9+
props: {
10+
mistralAI,
11+
message: {
12+
type: "string",
13+
label: "Message",
14+
description: "The prompt message to send",
15+
},
16+
modelId: {
17+
propDefinition: [
18+
mistralAI,
19+
"modelId",
20+
],
21+
},
22+
temperature: {
23+
type: "string",
24+
label: "Temperature",
25+
description: "The sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. The default value varies depending on the model you are targeting.",
26+
optional: true,
27+
},
28+
topP: {
29+
type: "string",
30+
label: "Top P",
31+
description: "Nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.",
32+
optional: true,
33+
},
34+
maxTokens: {
35+
type: "integer",
36+
label: "Max Tokens",
37+
description: "The maximum number of tokens to generate in the completion. The token count of your prompt plus max_tokens cannot exceed the model's context length.",
38+
optional: true,
39+
},
40+
randomSeed: {
41+
type: "integer",
42+
label: "Random Seed",
43+
description: "The seed to use for random sampling. If set, different calls will generate deterministic results.",
44+
optional: true,
45+
},
46+
n: {
47+
type: "integer",
48+
label: "N",
49+
description: "Number of completions to return for each request, input tokens are only billed once.",
50+
optional: true,
51+
},
52+
},
53+
async run({ $ }) {
54+
const response = await this.mistralAI.sendPrompt({
55+
$,
56+
data: {
57+
model: this.modelId,
58+
messages: [
59+
{
60+
content: this.message,
61+
role: "user",
62+
},
63+
],
64+
temperature: this.temperature && +this.temperature,
65+
top_p: this.topP && +this.topP,
66+
max_tokens: this.maxTokens,
67+
random_seed: this.randomSeed,
68+
n: this.n,
69+
},
70+
});
71+
if (response?.id) {
72+
$.export("$summary", `Successfully retrieved response with ID: ${response.id}`);
73+
}
74+
return response;
75+
},
76+
};
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
import mistralAI from "../../mistral_ai.app.mjs";
2+
3+
export default {
4+
key: "mistral_ai-list-models",
5+
name: "List Models",
6+
description: "Retrieve a list of available Mistral AI models that the user is authorized to access. [See the Documentation](https://docs.mistral.ai/api/#tag/models)",
7+
version: "0.0.1",
8+
type: "action",
9+
props: {
10+
mistralAI,
11+
},
12+
async run({ $ }) {
13+
const { data } = await this.mistralAI.listModels({
14+
$,
15+
});
16+
if (data?.length) {
17+
$.export("$summary", `Successfully retrieved ${data.length} model${data.length === 1
18+
? ""
19+
: "s"}`);
20+
}
21+
return data;
22+
},
23+
};
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
const DEFAULT_PAGE_SIZE = 100;
2+
3+
const EMBEDDINGS_MODEL = "mistral-embed";
4+
5+
const BATCH_JOB_STATUS_OPTIONS = [
6+
"SUCCESS",
7+
"FAILED",
8+
"TIMEOUT_EXCEEDED",
9+
"CANCELLATION_REQUESTED",
10+
"CANCELLED",
11+
];
12+
13+
const BATCH_JOB_ENDPOINT_OPTIONS = [
14+
"/v1/chat/completions",
15+
"/v1/embeddings",
16+
"/v1/fim/completions",
17+
"/v1/moderations",
18+
"/v1/chat/moderations",
19+
];
20+
21+
export default {
22+
DEFAULT_PAGE_SIZE,
23+
EMBEDDINGS_MODEL,
24+
BATCH_JOB_STATUS_OPTIONS,
25+
BATCH_JOB_ENDPOINT_OPTIONS,
26+
};
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
function parseArray(arr) {
2+
if (!arr) {
3+
return undefined;
4+
}
5+
6+
if (typeof arr === "string") {
7+
try {
8+
return JSON.parse(arr);
9+
} catch {
10+
return arr;
11+
}
12+
}
13+
14+
return arr;
15+
}
16+
17+
function parseObj(obj) {
18+
if (!obj) {
19+
return undefined;
20+
}
21+
22+
if (typeof obj === "string") {
23+
try {
24+
return JSON.parse(obj);
25+
} catch {
26+
return obj;
27+
}
28+
}
29+
30+
return obj;
31+
}
32+
33+
export {
34+
parseArray,
35+
parseObj,
36+
};
Lines changed: 119 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,125 @@
1+
import { axios } from "@pipedream/platform";
2+
import constants from "./common/constants.mjs";
3+
14
export default {
25
type: "app",
36
app: "mistral_ai",
4-
propDefinitions: {},
7+
propDefinitions: {
8+
fileIds: {
9+
type: "string[]",
10+
label: "File IDs",
11+
description: "Array of input file UUIDs for batch processing",
12+
async options({ page }) {
13+
const { data } = await this.listFiles({
14+
params: {
15+
page,
16+
page_size: constants.DEFAULT_PAGE_SIZE,
17+
},
18+
});
19+
return data?.map(({
20+
id: value, filename: label,
21+
}) => ({
22+
value,
23+
label,
24+
})) || [];
25+
},
26+
},
27+
modelId: {
28+
type: "string",
29+
label: "Model ID",
30+
description: "The identifier of the model to use",
31+
async options() {
32+
const { data } = await this.listModels();
33+
return data?.map(({
34+
id: value, name: label,
35+
}) => ({
36+
value,
37+
label,
38+
})) || [];
39+
},
40+
},
41+
},
542
methods: {
6-
// this.$auth contains connected account data
7-
authKeys() {
8-
console.log(Object.keys(this.$auth));
43+
_baseUrl() {
44+
return "https://api.mistral.ai/v1";
45+
},
46+
_makeRequest({
47+
$ = this,
48+
path,
49+
...otherOpts
50+
}) {
51+
return axios($, {
52+
...otherOpts,
53+
url: `${this._baseUrl()}${path}`,
54+
headers: {
55+
"Authorization": `Bearer ${this.$auth.api_key}`,
56+
"Content-Type": "application/json",
57+
},
58+
});
59+
},
60+
listModels(opts = {}) {
61+
return this._makeRequest({
62+
path: "/models",
63+
...opts,
64+
});
65+
},
66+
listBatchJobs(opts = {}) {
67+
return this._makeRequest({
68+
path: "/batch/jobs",
69+
...opts,
70+
});
71+
},
72+
listFiles(opts = {}) {
73+
return this._makeRequest({
74+
path: "/files",
75+
...opts,
76+
});
77+
},
78+
createEmbeddings(opts = {}) {
79+
return this._makeRequest({
80+
method: "POST",
81+
path: "/embeddings",
82+
...opts,
83+
});
84+
},
85+
sendPrompt(opts = {}) {
86+
return this._makeRequest({
87+
method: "POST",
88+
path: "/chat/completions",
89+
...opts,
90+
});
91+
},
92+
createBatchJob(opts = {}) {
93+
return this._makeRequest({
94+
method: "POST",
95+
path: "/batch/jobs",
96+
...opts,
97+
});
98+
},
99+
async *paginate({
100+
fn,
101+
params = {},
102+
max,
103+
}) {
104+
params = {
105+
...params,
106+
page: 0,
107+
page_size: constants.DEFAULT_PAGE_SIZE,
108+
};
109+
let total, count = 0;
110+
do {
111+
const { data } = await fn({
112+
params,
113+
});
114+
for (const item of data) {
115+
yield item;
116+
if (max && ++count >= max) {
117+
return;
118+
}
119+
}
120+
total = data?.length;
121+
params.page++;
122+
} while (total);
9123
},
10124
},
11-
};
125+
};

0 commit comments

Comments
 (0)