Skip to content

Commit 2e84ee6

Browse files
authored
Merge branch 'master' into master
2 parents 5974fab + 18c004b commit 2e84ee6

File tree

673 files changed

+23654
-1060
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

673 files changed

+23654
-1060
lines changed

components/_302_ai/_302_ai.app.mjs

Lines changed: 115 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,121 @@
1+
import { axios } from "@pipedream/platform";
2+
13
export default {
24
type: "app",
35
app: "_302_ai",
4-
propDefinitions: {},
6+
propDefinitions: {
7+
modelId: {
8+
type: "string",
9+
label: "Model",
10+
description: "The ID of the model to use",
11+
async options() {
12+
const models = await this.listModels();
13+
return models.map((model) => ({
14+
label: model.id,
15+
value: model.id,
16+
}));
17+
},
18+
},
19+
chatCompletionModelId: {
20+
type: "string",
21+
label: "Model",
22+
description: "The ID of the model to use for chat completions",
23+
async options() {
24+
const models = await this.listModels();
25+
// Filter for chat models (similar to OpenAI)
26+
return models
27+
.filter((model) => model.id.match(/gpt|claude|gemini|llama|mistral|deepseek/gi))
28+
.map((model) => ({
29+
label: model.id,
30+
value: model.id,
31+
}));
32+
},
33+
},
34+
embeddingsModelId: {
35+
type: "string",
36+
label: "Model",
37+
description: "The ID of the embeddings model to use",
38+
async options() {
39+
const models = await this.listModels();
40+
// Filter for embedding models
41+
return models
42+
.filter((model) => model.id.match(/embedding/gi))
43+
.map((model) => ({
44+
label: model.id,
45+
value: model.id,
46+
}));
47+
},
48+
},
49+
},
550
methods: {
6-
// this.$auth contains connected account data
7-
authKeys() {
8-
console.log(Object.keys(this.$auth));
51+
_apiKey() {
52+
return this.$auth.api_key;
53+
},
54+
_baseApiUrl() {
55+
return "https://api.302.ai/v1";
56+
},
57+
_makeRequest({
58+
$ = this,
59+
path,
60+
...args
61+
} = {}) {
62+
return axios($, {
63+
...args,
64+
url: `${this._baseApiUrl()}${path}`,
65+
headers: {
66+
...args.headers,
67+
"Authorization": `Bearer ${this._apiKey()}`,
68+
"Content-Type": "application/json",
69+
},
70+
});
71+
},
72+
async listModels({ $ } = {}) {
73+
const { data: models } = await this._makeRequest({
74+
$,
75+
path: "/models",
76+
});
77+
return models || [];
78+
},
79+
async _makeCompletion({
80+
path, ...args
81+
}) {
82+
const data = await this._makeRequest({
83+
path,
84+
method: "POST",
85+
...args,
86+
});
87+
88+
// For completions, return the text of the first choice at the top-level
89+
let generated_text;
90+
if (path === "/completions") {
91+
const { choices } = data;
92+
generated_text = choices?.[0]?.text;
93+
}
94+
// For chat completions, return the assistant message at the top-level
95+
let generated_message;
96+
if (path === "/chat/completions") {
97+
const { choices } = data;
98+
generated_message = choices?.[0]?.message;
99+
}
100+
101+
return {
102+
generated_text,
103+
generated_message,
104+
...data,
105+
};
106+
},
107+
createChatCompletion(args = {}) {
108+
return this._makeCompletion({
109+
path: "/chat/completions",
110+
...args,
111+
});
112+
},
113+
createEmbeddings(args = {}) {
114+
return this._makeRequest({
115+
path: "/embeddings",
116+
method: "POST",
117+
...args,
118+
});
9119
},
10120
},
11-
};
121+
};
Lines changed: 211 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,211 @@
1+
import _302_ai from "../../_302_ai.app.mjs";
2+
import constants from "../common/constants.mjs";
3+
4+
export default {
5+
name: "Chat using Functions",
6+
version: "0.0.1",
7+
annotations: {
8+
destructiveHint: false,
9+
openWorldHint: true,
10+
readOnlyHint: false,
11+
},
12+
key: "_302_ai-chat-using-functions",
13+
description: "Enable your 302.AI model to invoke user-defined functions. Useful for conditional logic, workflow orchestration, and tool invocation within conversations. [See documentation](https://doc.302.ai/211560247e0)",
14+
type: "action",
15+
props: {
16+
_302_ai,
17+
modelId: {
18+
propDefinition: [
19+
_302_ai,
20+
"chatCompletionModelId",
21+
],
22+
},
23+
input: {
24+
type: "string",
25+
label: "Chat Input",
26+
description: "Text input to the model used to generate a response",
27+
},
28+
functions: {
29+
type: "string",
30+
label: "Functions",
31+
description: "A valid JSON array of tools/functions using the OpenAI function schema definition. Each tool must have a `type` property set to \"function\" and a `function` object with `name`, `description`, and `parameters`.",
32+
default:
33+
`[
34+
{
35+
"type": "function",
36+
"function": {
37+
"name": "get_current_weather",
38+
"description": "Get the current weather in a given location",
39+
"parameters": {
40+
"type": "object",
41+
"properties": {
42+
"location": {
43+
"type": "string",
44+
"description": "The city and state, e.g. San Francisco, CA"
45+
},
46+
"unit": {
47+
"type": "string",
48+
"enum": ["celsius", "fahrenheit"]
49+
}
50+
},
51+
"required": ["location"]
52+
}
53+
}
54+
}
55+
]`,
56+
},
57+
instructions: {
58+
type: "string",
59+
label: "Instructions",
60+
description: "System instructions for the model",
61+
optional: true,
62+
},
63+
toolChoice: {
64+
type: "string",
65+
label: "Tool Choice",
66+
description: "- **auto**: The model decides whether and how many functions to call.\n- **required**: The model must call one or more functions.\n- **function_name**: Enter a custom function name to force the model to call this specific function.",
67+
optional: true,
68+
default: "auto",
69+
options: [
70+
"auto",
71+
"required",
72+
],
73+
},
74+
parallelToolCalls: {
75+
type: "string",
76+
label: "Parallel Function Calling",
77+
description: "Allow or prevent the model to call multiple functions in a single turn",
78+
optional: true,
79+
default: "1",
80+
options: [
81+
{
82+
label: "Enabled",
83+
value: "1",
84+
},
85+
{
86+
label: "Disabled",
87+
value: "0",
88+
},
89+
],
90+
},
91+
maxTokens: {
92+
label: "Max Tokens",
93+
description: "The maximum number of tokens to generate in the completion.",
94+
type: "string",
95+
optional: true,
96+
},
97+
temperature: {
98+
label: "Temperature",
99+
description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
100+
type: "string",
101+
optional: true,
102+
},
103+
responseFormat: {
104+
type: "string",
105+
label: "Response Format",
106+
description: "- **Text**: Returns unstructured text output.\n- **JSON Schema**: Enables you to define a specific structure for the model's output using a JSON schema.",
107+
options: [
108+
constants.CHAT_RESPONSE_FORMAT.TEXT.value,
109+
constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value,
110+
],
111+
default: constants.CHAT_RESPONSE_FORMAT.TEXT.value,
112+
optional: true,
113+
reloadProps: true,
114+
},
115+
},
116+
additionalProps() {
117+
const props = {};
118+
119+
if (this.responseFormat === constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value) {
120+
props.jsonSchema = {
121+
type: "string",
122+
label: "JSON Schema",
123+
description: "Define the schema that the model's output must adhere to.",
124+
};
125+
}
126+
127+
return props;
128+
},
129+
async run({ $ }) {
130+
const messages = [];
131+
132+
if (this.instructions) {
133+
messages.push({
134+
role: "system",
135+
content: this.instructions,
136+
});
137+
}
138+
139+
messages.push({
140+
role: "user",
141+
content: this.input,
142+
});
143+
144+
const data = {
145+
model: this.modelId,
146+
messages,
147+
parallel_tool_calls: parseInt(this.parallelToolCalls) === 1,
148+
tools: [],
149+
};
150+
151+
if (this.maxTokens) {
152+
data.max_tokens = parseInt(this.maxTokens);
153+
}
154+
155+
if (this.temperature) {
156+
data.temperature = parseFloat(this.temperature);
157+
}
158+
159+
let functions = this.functions;
160+
if (typeof functions === "string") {
161+
try {
162+
functions = JSON.parse(functions);
163+
} catch (error) {
164+
throw new Error("Invalid JSON format in the provided Functions Schema");
165+
}
166+
}
167+
168+
if (Array.isArray(functions)) {
169+
data.tools.push(...functions);
170+
} else {
171+
data.tools.push(functions);
172+
}
173+
174+
if (this.toolChoice) {
175+
if (this.toolChoice === "auto" || this.toolChoice === "required") {
176+
data.tool_choice = this.toolChoice;
177+
} else {
178+
data.tool_choice = {
179+
type: "function",
180+
name: this.toolChoice,
181+
};
182+
}
183+
}
184+
185+
if (this.responseFormat === constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value
186+
&& this.jsonSchema) {
187+
try {
188+
data.response_format = {
189+
type: this.responseFormat,
190+
json_schema: typeof this.jsonSchema === "string"
191+
? JSON.parse(this.jsonSchema)
192+
: this.jsonSchema,
193+
};
194+
} catch (error) {
195+
throw new Error("Invalid JSON format in the provided JSON Schema");
196+
}
197+
}
198+
199+
const response = await this._302_ai.createChatCompletion({
200+
$,
201+
data,
202+
});
203+
204+
if (response) {
205+
$.export("$summary", `Successfully sent chat with id ${response.id}`);
206+
}
207+
208+
return response;
209+
},
210+
};
211+

0 commit comments

Comments
 (0)