-
Notifications
You must be signed in to change notification settings - Fork 5.5k
302 ai momps #18843
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
michelle0927
merged 12 commits into
PipedreamHQ:master
from
sergio-eliot-rodriguez:302_ai-momps
Oct 24, 2025
Merged
302 ai momps #18843
Changes from 4 commits
Commits
Show all changes
12 commits
Select commit
Hold shift + click to select a range
4ca205c
new 302 AI components, getting started
sergio-eliot-rodriguez ec53f11
302.AI new actions
sergio-eliot-rodriguez 67439e0
updates pnm-lock.yaml
sergio-eliot-rodriguez 34398de
removed context file for llms not intended to push
sergio-eliot-rodriguez a42de94
Delete mdc_pipedream-context.mdc
sergio-eliot-rodriguez fe23910
Delete components/memory.mdc
sergio-eliot-rodriguez 42b1763
Delete components/ticketsauce/ticketsauce_components.mdc
sergio-eliot-rodriguez 6ad14ba
Delete components/pipedream-context.mdc
sergio-eliot-rodriguez bb7743e
Delete mdc_memory.mdc
sergio-eliot-rodriguez 3af7a6e
restores .husky/pres-push
sergio-eliot-rodriguez 611a0de
Update components/_302_ai/actions/classify-items/classify-items.mjs
sergio-eliot-rodriguez 473a798
running pnpm install
sergio-eliot-rodriguez File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,11 +1,121 @@ | ||
| import { axios } from "@pipedream/platform"; | ||
|
|
||
| export default { | ||
| type: "app", | ||
| app: "_302_ai", | ||
| propDefinitions: {}, | ||
| propDefinitions: { | ||
| modelId: { | ||
| type: "string", | ||
| label: "Model", | ||
| description: "The ID of the model to use", | ||
| async options() { | ||
| const models = await this.listModels(); | ||
| return models.map((model) => ({ | ||
| label: model.id, | ||
| value: model.id, | ||
| })); | ||
| }, | ||
| }, | ||
| chatCompletionModelId: { | ||
| type: "string", | ||
| label: "Model", | ||
| description: "The ID of the model to use for chat completions", | ||
| async options() { | ||
| const models = await this.listModels(); | ||
| // Filter for chat models (similar to OpenAI) | ||
| return models | ||
| .filter((model) => model.id.match(/gpt|claude|gemini|llama|mistral|deepseek/gi)) | ||
| .map((model) => ({ | ||
| label: model.id, | ||
| value: model.id, | ||
| })); | ||
| }, | ||
| }, | ||
| embeddingsModelId: { | ||
| type: "string", | ||
| label: "Model", | ||
| description: "The ID of the embeddings model to use", | ||
| async options() { | ||
| const models = await this.listModels(); | ||
| // Filter for embedding models | ||
| return models | ||
| .filter((model) => model.id.match(/embedding/gi)) | ||
| .map((model) => ({ | ||
| label: model.id, | ||
| value: model.id, | ||
| })); | ||
| }, | ||
| }, | ||
| }, | ||
| methods: { | ||
| // this.$auth contains connected account data | ||
| authKeys() { | ||
| console.log(Object.keys(this.$auth)); | ||
| _apiKey() { | ||
| return this.$auth.api_key; | ||
| }, | ||
| _baseApiUrl() { | ||
| return "https://api.302.ai/v1"; | ||
| }, | ||
| _makeRequest({ | ||
| $ = this, | ||
| path, | ||
| ...args | ||
| } = {}) { | ||
| return axios($, { | ||
| ...args, | ||
| url: `${this._baseApiUrl()}${path}`, | ||
| headers: { | ||
| ...args.headers, | ||
| "Authorization": `Bearer ${this._apiKey()}`, | ||
| "Content-Type": "application/json", | ||
| }, | ||
| }); | ||
| }, | ||
| async listModels({ $ } = {}) { | ||
| const { data: models } = await this._makeRequest({ | ||
| $, | ||
| path: "/models", | ||
| }); | ||
| return models || []; | ||
| }, | ||
| async _makeCompletion({ | ||
| path, ...args | ||
| }) { | ||
| const data = await this._makeRequest({ | ||
| path, | ||
| method: "POST", | ||
| ...args, | ||
| }); | ||
|
|
||
| // For completions, return the text of the first choice at the top-level | ||
| let generated_text; | ||
| if (path === "/completions") { | ||
| const { choices } = data; | ||
| generated_text = choices?.[0]?.text; | ||
| } | ||
| // For chat completions, return the assistant message at the top-level | ||
| let generated_message; | ||
| if (path === "/chat/completions") { | ||
| const { choices } = data; | ||
| generated_message = choices?.[0]?.message; | ||
| } | ||
|
|
||
| return { | ||
| generated_text, | ||
| generated_message, | ||
| ...data, | ||
| }; | ||
| }, | ||
| createChatCompletion(args = {}) { | ||
| return this._makeCompletion({ | ||
| path: "/chat/completions", | ||
| ...args, | ||
| }); | ||
| }, | ||
| createEmbeddings(args = {}) { | ||
| return this._makeRequest({ | ||
| path: "/embeddings", | ||
| method: "POST", | ||
| ...args, | ||
| }); | ||
| }, | ||
| }, | ||
| }; | ||
| }; |
211 changes: 211 additions & 0 deletions
211
components/_302_ai/actions/chat-using-functions/chat-using-functions.mjs
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,211 @@ | ||
| import _302_ai from "../../_302_ai.app.mjs"; | ||
| import constants from "../common/constants.mjs"; | ||
|
|
||
| export default { | ||
| name: "Chat using Functions", | ||
| version: "0.0.1", | ||
| annotations: { | ||
| destructiveHint: false, | ||
| openWorldHint: true, | ||
| readOnlyHint: false, | ||
| }, | ||
| key: "_302_ai-chat-using-functions", | ||
| description: "Enable your 302.AI model to invoke user-defined functions. Useful for conditional logic, workflow orchestration, and tool invocation within conversations. [See documentation](https://doc.302.ai/211560247e0)", | ||
| type: "action", | ||
| props: { | ||
| _302_ai, | ||
| modelId: { | ||
| propDefinition: [ | ||
| _302_ai, | ||
| "chatCompletionModelId", | ||
| ], | ||
| }, | ||
| input: { | ||
| type: "string", | ||
| label: "Chat Input", | ||
| description: "Text input to the model used to generate a response", | ||
| }, | ||
| functions: { | ||
| type: "string", | ||
| label: "Functions", | ||
| description: "A valid JSON array of tools/functions using the OpenAI function schema definition. Each tool must have a `type` property set to \"function\" and a `function` object with `name`, `description`, and `parameters`.", | ||
| default: | ||
| `[ | ||
| { | ||
| "type": "function", | ||
| "function": { | ||
| "name": "get_current_weather", | ||
| "description": "Get the current weather in a given location", | ||
| "parameters": { | ||
| "type": "object", | ||
| "properties": { | ||
| "location": { | ||
| "type": "string", | ||
| "description": "The city and state, e.g. San Francisco, CA" | ||
| }, | ||
| "unit": { | ||
| "type": "string", | ||
| "enum": ["celsius", "fahrenheit"] | ||
| } | ||
| }, | ||
| "required": ["location"] | ||
| } | ||
| } | ||
| } | ||
| ]`, | ||
| }, | ||
| instructions: { | ||
| type: "string", | ||
| label: "Instructions", | ||
| description: "System instructions for the model", | ||
| optional: true, | ||
| }, | ||
| toolChoice: { | ||
| type: "string", | ||
| label: "Tool Choice", | ||
| description: "- **auto**: The model decides whether and how many functions to call.\n- **required**: The model must call one or more functions.\n- **function_name**: Enter a custom function name to force the model to call this specific function.", | ||
| optional: true, | ||
| default: "auto", | ||
| options: [ | ||
| "auto", | ||
| "required", | ||
| ], | ||
| }, | ||
| parallelToolCalls: { | ||
| type: "string", | ||
| label: "Parallel Function Calling", | ||
| description: "Allow or prevent the model to call multiple functions in a single turn", | ||
| optional: true, | ||
| default: "1", | ||
| options: [ | ||
| { | ||
| label: "Enabled", | ||
| value: "1", | ||
| }, | ||
| { | ||
| label: "Disabled", | ||
| value: "0", | ||
| }, | ||
| ], | ||
| }, | ||
| maxTokens: { | ||
| label: "Max Tokens", | ||
| description: "The maximum number of tokens to generate in the completion.", | ||
| type: "string", | ||
| optional: true, | ||
| }, | ||
| temperature: { | ||
| label: "Temperature", | ||
| description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.", | ||
| type: "string", | ||
| optional: true, | ||
| }, | ||
| responseFormat: { | ||
| type: "string", | ||
| label: "Response Format", | ||
| description: "- **Text**: Returns unstructured text output.\n- **JSON Schema**: Enables you to define a specific structure for the model's output using a JSON schema.", | ||
| options: [ | ||
| constants.CHAT_RESPONSE_FORMAT.TEXT.value, | ||
| constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value, | ||
| ], | ||
| default: constants.CHAT_RESPONSE_FORMAT.TEXT.value, | ||
| optional: true, | ||
| reloadProps: true, | ||
| }, | ||
| }, | ||
| additionalProps() { | ||
| const props = {}; | ||
|
|
||
| if (this.responseFormat === constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value) { | ||
| props.jsonSchema = { | ||
| type: "string", | ||
| label: "JSON Schema", | ||
| description: "Define the schema that the model's output must adhere to.", | ||
| }; | ||
| } | ||
|
|
||
| return props; | ||
| }, | ||
| async run({ $ }) { | ||
| const messages = []; | ||
|
|
||
| if (this.instructions) { | ||
| messages.push({ | ||
| role: "system", | ||
| content: this.instructions, | ||
| }); | ||
| } | ||
|
|
||
| messages.push({ | ||
| role: "user", | ||
| content: this.input, | ||
| }); | ||
|
|
||
| const data = { | ||
| model: this.modelId, | ||
| messages, | ||
| parallel_tool_calls: parseInt(this.parallelToolCalls) === 1, | ||
| tools: [], | ||
| }; | ||
|
|
||
| if (this.maxTokens) { | ||
| data.max_tokens = parseInt(this.maxTokens); | ||
| } | ||
|
|
||
| if (this.temperature) { | ||
| data.temperature = parseFloat(this.temperature); | ||
| } | ||
|
|
||
| let functions = this.functions; | ||
| if (typeof functions === "string") { | ||
| try { | ||
| functions = JSON.parse(functions); | ||
| } catch (error) { | ||
| throw new Error("Invalid JSON format in the provided Functions Schema"); | ||
| } | ||
| } | ||
|
|
||
| if (Array.isArray(functions)) { | ||
| data.tools.push(...functions); | ||
| } else { | ||
| data.tools.push(functions); | ||
| } | ||
|
|
||
| if (this.toolChoice) { | ||
| if (this.toolChoice === "auto" || this.toolChoice === "required") { | ||
| data.tool_choice = this.toolChoice; | ||
| } else { | ||
| data.tool_choice = { | ||
| type: "function", | ||
| name: this.toolChoice, | ||
| }; | ||
| } | ||
| } | ||
|
|
||
| if (this.responseFormat === constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value | ||
| && this.jsonSchema) { | ||
| try { | ||
| data.response_format = { | ||
| type: this.responseFormat, | ||
| json_schema: typeof this.jsonSchema === "string" | ||
| ? JSON.parse(this.jsonSchema) | ||
| : this.jsonSchema, | ||
| }; | ||
| } catch (error) { | ||
| throw new Error("Invalid JSON format in the provided JSON Schema"); | ||
| } | ||
| } | ||
|
|
||
| const response = await this._302_ai.createChatCompletion({ | ||
| $, | ||
| data, | ||
| }); | ||
|
|
||
| if (response) { | ||
| $.export("$summary", `Successfully sent chat with id ${response.id}`); | ||
| } | ||
|
|
||
| return response; | ||
| }, | ||
| }; | ||
|
|
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.