Skip to content

Commit 9787c89

Browse files
committed
Merge branch 'master' into issue-16657
2 parents 932e9c5 + cb64910 commit 9787c89

File tree

45 files changed

+2151
-40
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+2151
-40
lines changed
Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
import cerebras from "../../cerebras.app.mjs";
2+
3+
export default {
4+
name: "Create Chat Completion",
5+
key: "cerebras-create-chat-completion",
6+
description: "Create a chat completion with Cerebras AI. [See the documentation](https://inference-docs.cerebras.ai/api-reference/chat-completions)",
7+
version: "0.0.1",
8+
type: "action",
9+
props: {
10+
cerebras,
11+
model: {
12+
propDefinition: [
13+
cerebras,
14+
"model",
15+
],
16+
},
17+
message: {
18+
type: "string",
19+
label: "Message",
20+
description: "The message to send to the model",
21+
},
22+
maxCompletionTokens: {
23+
type: "integer",
24+
label: "Max Completion Tokens",
25+
description: "The maximum number of tokens that can be generated in the completion. The total length of input tokens and generated tokens is limited by the model's context length.",
26+
optional: true,
27+
},
28+
stream: {
29+
propDefinition: [
30+
cerebras,
31+
"stream",
32+
],
33+
},
34+
seed: {
35+
propDefinition: [
36+
cerebras,
37+
"seed",
38+
],
39+
},
40+
stop: {
41+
propDefinition: [
42+
cerebras,
43+
"stop",
44+
],
45+
},
46+
temperature: {
47+
propDefinition: [
48+
cerebras,
49+
"temperature",
50+
],
51+
},
52+
topP: {
53+
propDefinition: [
54+
cerebras,
55+
"topP",
56+
],
57+
},
58+
toolChoice: {
59+
type: "string",
60+
label: "Tool Choice",
61+
description: "Controls which (if any) tool is called by the model",
62+
optional: true,
63+
options: [
64+
"none",
65+
"auto",
66+
"required",
67+
],
68+
},
69+
tools: {
70+
type: "object",
71+
label: "Tools",
72+
description: "A list of tools the model may call. [See the documentation](https://inference-docs.cerebras.ai/api-reference/chat-completions#tool-choice) for more information",
73+
optional: true,
74+
},
75+
user: {
76+
propDefinition: [
77+
cerebras,
78+
"user",
79+
],
80+
},
81+
logprobs: {
82+
type: "boolean",
83+
label: "Log Probabilities",
84+
description: "Whether to return log probabilities of the output tokens or not",
85+
optional: true,
86+
default: false,
87+
},
88+
topLogprobs: {
89+
type: "integer",
90+
label: "Top Log Probabilities",
91+
description: "An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability",
92+
optional: true,
93+
},
94+
},
95+
async run({ $ }) {
96+
const {
97+
model,
98+
message,
99+
maxCompletionTokens,
100+
stream,
101+
seed,
102+
stop,
103+
temperature,
104+
topP,
105+
toolChoice,
106+
tools,
107+
user,
108+
logprobs,
109+
topLogprobs,
110+
} = this;
111+
112+
const response = await this.cerebras.chatCompletion({
113+
$,
114+
data: {
115+
model,
116+
messages: [
117+
{
118+
role: "user",
119+
content: message,
120+
},
121+
],
122+
max_completion_tokens: maxCompletionTokens,
123+
stream,
124+
seed,
125+
stop,
126+
temperature,
127+
top_p: topP,
128+
tool_choice: toolChoice,
129+
tools,
130+
user,
131+
logprobs,
132+
top_logprobs: topLogprobs,
133+
},
134+
});
135+
136+
$.export("$summary", "Successfully created chat completion");
137+
return response;
138+
},
139+
};
Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
import cerebras from "../../cerebras.app.mjs";
2+
import { parseObject } from "../../common/utils.mjs";
3+
import { ConfigurationError } from "@pipedream/platform";
4+
5+
export default {
6+
name: "Create Completion",
7+
key: "cerebras-create-completion",
8+
description: "Create a completion with Cerebras AI. [See the documentation](https://inference-docs.cerebras.ai/api-reference/completions)",
9+
version: "0.0.1",
10+
type: "action",
11+
props: {
12+
cerebras,
13+
model: {
14+
propDefinition: [
15+
cerebras,
16+
"model",
17+
],
18+
},
19+
prompt: {
20+
type: "string",
21+
label: "Prompt",
22+
description: "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays",
23+
},
24+
stream: {
25+
propDefinition: [
26+
cerebras,
27+
"stream",
28+
],
29+
},
30+
returnRawTokens: {
31+
type: "boolean",
32+
label: "Return Raw Tokens",
33+
description: "Return raw tokens instead of text",
34+
optional: true,
35+
default: false,
36+
},
37+
maxTokens: {
38+
type: "integer",
39+
label: "Max Tokens",
40+
description: "The maximum number of tokens that can be generated in the completion. The total length of input tokens and generated tokens is limited by the model's context length",
41+
optional: true,
42+
},
43+
minTokens: {
44+
type: "integer",
45+
label: "Min Tokens",
46+
description: "The minimum number of tokens to generate for a completion. If not specified or set to 0, the model will generate as many tokens as it deems necessary. Setting to -1 sets to max sequence length",
47+
optional: true,
48+
},
49+
seed: {
50+
propDefinition: [
51+
cerebras,
52+
"seed",
53+
],
54+
},
55+
stop: {
56+
propDefinition: [
57+
cerebras,
58+
"stop",
59+
],
60+
},
61+
temperature: {
62+
propDefinition: [
63+
cerebras,
64+
"temperature",
65+
],
66+
},
67+
topP: {
68+
propDefinition: [
69+
cerebras,
70+
"topP",
71+
],
72+
},
73+
echo: {
74+
type: "boolean",
75+
label: "Echo",
76+
description: "Echo back the prompt in addition to the completion. Incompatible with return_raw_tokens=True",
77+
optional: true,
78+
default: false,
79+
},
80+
user: {
81+
propDefinition: [
82+
cerebras,
83+
"user",
84+
],
85+
},
86+
},
87+
async run({ $ }) {
88+
const {
89+
model,
90+
prompt,
91+
stream,
92+
returnRawTokens,
93+
maxTokens,
94+
minTokens,
95+
seed,
96+
stop,
97+
temperature,
98+
topP,
99+
echo,
100+
user,
101+
} = this;
102+
103+
if (returnRawTokens && echo) {
104+
throw new ConfigurationError("The 'echo' option is incompatible with 'returnRawTokens=true'. Please disable one of these options.");
105+
}
106+
107+
const response = await this.cerebras.completion({
108+
$,
109+
data: {
110+
model,
111+
prompt: parseObject(prompt),
112+
stream,
113+
return_raw_tokens: returnRawTokens,
114+
max_tokens: maxTokens,
115+
min_tokens: minTokens,
116+
seed,
117+
stop,
118+
temperature,
119+
top_p: topP,
120+
echo,
121+
user,
122+
},
123+
});
124+
125+
$.export("$summary", "Successfully created completion");
126+
return response;
127+
},
128+
};
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import cerebras from "../../cerebras.app.mjs";
2+
3+
export default {
4+
name: "List Models",
5+
key: "cerebras-list-models",
6+
description: "List all available Cerebras models. [See the documentation](https://inference-docs.cerebras.ai/api-reference/models)",
7+
version: "0.0.1",
8+
type: "action",
9+
props: {
10+
cerebras,
11+
},
12+
async run({ $ }) {
13+
const response = await this.cerebras.listModels({
14+
$,
15+
});
16+
17+
const models = response.data;
18+
$.export("$summary", `Successfully retrieved ${models.length} models`);
19+
return models;
20+
},
21+
};
Lines changed: 88 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,94 @@
1+
import { axios } from "@pipedream/platform";
2+
13
export default {
24
type: "app",
35
app: "cerebras",
4-
propDefinitions: {},
6+
propDefinitions: {
7+
model: {
8+
type: "string",
9+
label: "Model",
10+
description: "The model to use for the request",
11+
async options() {
12+
const { data } = await this.listModels();
13+
return data.map(({ id }) => id);
14+
},
15+
},
16+
stream: {
17+
type: "boolean",
18+
label: "Stream",
19+
description: "If set, partial message deltas will be sent. Tokens will be sent as data-only server-sent events as they become available",
20+
optional: true,
21+
default: false,
22+
},
23+
seed: {
24+
type: "integer",
25+
label: "Seed",
26+
description: "If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result",
27+
optional: true,
28+
},
29+
stop: {
30+
type: "string",
31+
label: "Stop",
32+
description: "Up to 4 sequences, separated by commas, where the API will stop generating further tokens. The returned text will not contain the stop sequence",
33+
optional: true,
34+
},
35+
temperature: {
36+
type: "string",
37+
label: "Temperature",
38+
description: "What sampling temperature to use, between 0 and 1.5. Higher values (e.g., 0.8) will make the output more random, while lower values (e.g., 0.2) will make it more focused and deterministic",
39+
optional: true,
40+
default: "1.0",
41+
},
42+
topP: {
43+
type: "string",
44+
label: "Top P",
45+
description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the tokens with top_p probability mass",
46+
optional: true,
47+
default: "1.0",
48+
},
49+
user: {
50+
type: "string",
51+
label: "User",
52+
description: "A unique identifier representing your end-user, which can help Cerebras to monitor and detect abuse",
53+
optional: true,
54+
},
55+
},
556
methods: {
6-
// this.$auth contains connected account data
7-
authKeys() {
8-
console.log(Object.keys(this.$auth));
57+
_baseUrl() {
58+
return "https://api.cerebras.ai/v1";
59+
},
60+
_makeRequest({
61+
$ = this,
62+
path,
63+
...otherOpts
64+
}) {
65+
return axios($, {
66+
url: `${this._baseUrl()}${path}`,
67+
headers: {
68+
"Authorization": `Bearer ${this.$auth.api_key}`,
69+
},
70+
...otherOpts,
71+
});
72+
},
73+
listModels(opts = {}) {
74+
return this._makeRequest({
75+
path: "/models",
76+
...opts,
77+
});
78+
},
79+
chatCompletion(opts = {}) {
80+
return this._makeRequest({
81+
path: "/chat/completions",
82+
method: "POST",
83+
...opts,
84+
});
85+
},
86+
completion(opts = {}) {
87+
return this._makeRequest({
88+
path: "/completions",
89+
method: "POST",
90+
...opts,
91+
});
992
},
1093
},
11-
};
94+
};

0 commit comments

Comments
 (0)