Skip to content
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
120 changes: 115 additions & 5 deletions components/_302_ai/_302_ai.app.mjs
Original file line number Diff line number Diff line change
@@ -1,11 +1,121 @@
import { axios } from "@pipedream/platform";

export default {
type: "app",
app: "_302_ai",
propDefinitions: {},
propDefinitions: {
modelId: {
type: "string",
label: "Model",
description: "The ID of the model to use",
async options() {
const models = await this.listModels();
return models.map((model) => ({
label: model.id,
value: model.id,
}));
},
},
chatCompletionModelId: {
type: "string",
label: "Model",
description: "The ID of the model to use for chat completions",
async options() {
const models = await this.listModels();
// Filter for chat models (similar to OpenAI)
return models
.filter((model) => model.id.match(/gpt|claude|gemini|llama|mistral|deepseek/gi))
.map((model) => ({
label: model.id,
value: model.id,
}));
},
},
embeddingsModelId: {
type: "string",
label: "Model",
description: "The ID of the embeddings model to use",
async options() {
const models = await this.listModels();
// Filter for embedding models
return models
.filter((model) => model.id.match(/embedding/gi))
.map((model) => ({
label: model.id,
value: model.id,
}));
},
},
},
methods: {
// this.$auth contains connected account data
authKeys() {
console.log(Object.keys(this.$auth));
_apiKey() {
return this.$auth.api_key;
},
_baseApiUrl() {
return "https://api.302.ai/v1";
},
_makeRequest({
$ = this,
path,
...args
} = {}) {
return axios($, {
...args,
url: `${this._baseApiUrl()}${path}`,
headers: {
...args.headers,
"Authorization": `Bearer ${this._apiKey()}`,
"Content-Type": "application/json",
},
});
},
async listModels({ $ } = {}) {
const { data: models } = await this._makeRequest({
$,
path: "/models",
});
return models || [];
},
async _makeCompletion({
path, ...args
}) {
const data = await this._makeRequest({
path,
method: "POST",
...args,
});

// For completions, return the text of the first choice at the top-level
let generated_text;
if (path === "/completions") {
const { choices } = data;
generated_text = choices?.[0]?.text;
}
// For chat completions, return the assistant message at the top-level
let generated_message;
if (path === "/chat/completions") {
const { choices } = data;
generated_message = choices?.[0]?.message;
}

return {
generated_text,
generated_message,
...data,
};
},
createChatCompletion(args = {}) {
return this._makeCompletion({
path: "/chat/completions",
...args,
});
},
createEmbeddings(args = {}) {
return this._makeRequest({
path: "/embeddings",
method: "POST",
...args,
});
},
},
};
};
Original file line number Diff line number Diff line change
@@ -0,0 +1,211 @@
import _302_ai from "../../_302_ai.app.mjs";
import constants from "../common/constants.mjs";

export default {
name: "Chat using Functions",
version: "0.0.1",
annotations: {
destructiveHint: false,
openWorldHint: true,
readOnlyHint: false,
},
key: "_302_ai-chat-using-functions",
description: "Enable your 302.AI model to invoke user-defined functions. Useful for conditional logic, workflow orchestration, and tool invocation within conversations. [See documentation](https://doc.302.ai/211560247e0)",
type: "action",
props: {
_302_ai,
modelId: {
propDefinition: [
_302_ai,
"chatCompletionModelId",
],
},
input: {
type: "string",
label: "Chat Input",
description: "Text input to the model used to generate a response",
},
functions: {
type: "string",
label: "Functions",
description: "A valid JSON array of tools/functions using the OpenAI function schema definition. Each tool must have a `type` property set to \"function\" and a `function` object with `name`, `description`, and `parameters`.",
default:
`[
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
}
]`,
},
instructions: {
type: "string",
label: "Instructions",
description: "System instructions for the model",
optional: true,
},
toolChoice: {
type: "string",
label: "Tool Choice",
description: "- **auto**: The model decides whether and how many functions to call.\n- **required**: The model must call one or more functions.\n- **function_name**: Enter a custom function name to force the model to call this specific function.",
optional: true,
default: "auto",
options: [
"auto",
"required",
],
},
parallelToolCalls: {
type: "string",
label: "Parallel Function Calling",
description: "Allow or prevent the model to call multiple functions in a single turn",
optional: true,
default: "1",
options: [
{
label: "Enabled",
value: "1",
},
{
label: "Disabled",
value: "0",
},
],
},
maxTokens: {
label: "Max Tokens",
description: "The maximum number of tokens to generate in the completion.",
type: "string",
optional: true,
},
temperature: {
label: "Temperature",
description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
type: "string",
optional: true,
},
responseFormat: {
type: "string",
label: "Response Format",
description: "- **Text**: Returns unstructured text output.\n- **JSON Schema**: Enables you to define a specific structure for the model's output using a JSON schema.",
options: [
constants.CHAT_RESPONSE_FORMAT.TEXT.value,
constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value,
],
default: constants.CHAT_RESPONSE_FORMAT.TEXT.value,
optional: true,
reloadProps: true,
},
},
additionalProps() {
const props = {};

if (this.responseFormat === constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value) {
props.jsonSchema = {
type: "string",
label: "JSON Schema",
description: "Define the schema that the model's output must adhere to.",
};
}

return props;
},
async run({ $ }) {
const messages = [];

if (this.instructions) {
messages.push({
role: "system",
content: this.instructions,
});
}

messages.push({
role: "user",
content: this.input,
});

const data = {
model: this.modelId,
messages,
parallel_tool_calls: parseInt(this.parallelToolCalls) === 1,
tools: [],
};

if (this.maxTokens) {
data.max_tokens = parseInt(this.maxTokens);
}

if (this.temperature) {
data.temperature = parseFloat(this.temperature);
}

let functions = this.functions;
if (typeof functions === "string") {
try {
functions = JSON.parse(functions);
} catch (error) {
throw new Error("Invalid JSON format in the provided Functions Schema");
}
}

if (Array.isArray(functions)) {
data.tools.push(...functions);
} else {
data.tools.push(functions);
}

if (this.toolChoice) {
if (this.toolChoice === "auto" || this.toolChoice === "required") {
data.tool_choice = this.toolChoice;
} else {
data.tool_choice = {
type: "function",
name: this.toolChoice,
};
}
}

if (this.responseFormat === constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value
&& this.jsonSchema) {
try {
data.response_format = {
type: this.responseFormat,
json_schema: typeof this.jsonSchema === "string"
? JSON.parse(this.jsonSchema)
: this.jsonSchema,
};
} catch (error) {
throw new Error("Invalid JSON format in the provided JSON Schema");
}
}

const response = await this._302_ai.createChatCompletion({
$,
data,
});

if (response) {
$.export("$summary", `Successfully sent chat with id ${response.id}`);
}

return response;
},
};

Loading