diff --git a/components/apipie_ai/README.md b/components/apipie_ai/README.md new file mode 100644 index 0000000000000..cd96921619971 --- /dev/null +++ b/components/apipie_ai/README.md @@ -0,0 +1,46 @@ +# Overview + +[APIpie.ai](https://apipie.ai) connects developers with open-source and commercial AI models via a unified API. With zero infrastructure setup, you can send requests to popular models, switch providers instantly, and explore a growing catalog of AI models—all through one endpoint. Here's an overview of the services offered by [APIpie's API](https://apipie.ai): + +- **Model Discovery**: List and explore available LLM models, image models, voice models, and voices from various providers as seen on the [APIpie Dashboard](https://apipie.ai/dashboard) +- **Chat Completions**: Send messages to any supported model and receive AI-generated responses +- **Image Generation**: Create images using AI image generation models +- **Text-to-Speech**: Convert text to speech using various voice models and voices + +Use Python or Node.js code to make fully authenticated API requests with your APIpie account, enabling you to prototype, test, or integrate AI-generated content including text, images, and speech into apps, emails, alerts, dashboards, and more. + +# Example Use Cases + +The [APIpie API](https://apipie.ai) can be leveraged in a wide range of business contexts to drive efficiency, enhance customer experiences, and innovate product offerings through unified access to multiple AI models. Here are some specific business use cases for utilizing the APIpie API: + +## **Customer Support Automation** + +Significantly reduce response times and free up human agents to tackle more complex issues by automating customer support ticket responses. Use the List Models actions to dynamically select the most appropriate AI model based on ticket complexity or language requirements, then leverage Chat Completions to generate contextual, helpful responses that can be reviewed before sending to customers. + +## **Content Creation and Management** + +Utilize AI to generate high-quality content for blogs, articles, product descriptions, and marketing material. Create workflows that automatically test different models using the same prompt to compare writing styles, then select the best output for your brand voice. Generate accompanying images and convert text to speech for multimedia content creation. APIpie's unified interface lets you experiment with various open-source and commercial models without managing multiple API integrations. + +## **Creative Asset Generation** + +Generate visual content and audio assets for marketing campaigns, presentations, and social media. Use image generation models to create custom graphics, illustrations, and visual content that align with your brand. Convert written content to speech using different voice models to create podcasts, audiobooks, or accessibility features for your applications. + +## **Multi-Model AI Experimentation Framework** + +Build intelligent systems that automatically compare AI model performance across different use cases and modalities. Set up workflows that test text generation, image creation, and voice synthesis across multiple models simultaneously, collect responses in databases, and analyze quality, cost, and latency differences. This enables data-driven decisions about which AI models work best for specific business scenarios, while maintaining the flexibility to switch providers as new models become available. + +# Getting Started + +First, sign up for an APIpie account, then in a new workflow step open the APIpie app and select one of the available actions: + +- **Retrieve Available Image Models**: Fetch the current catalog of available language models +- **Retrieve Available LLM Models**: Fetch the current catalog of available image generation models +- **Retrieve Available TTS Models**: Fetch the current catalog of available voice models +- **Retrieve Available TTS Voices**: Fetch the available voices for text-to-speech +- **Send Chat Completion Request**: Send messages to any supported language model and receive responses +- **Create Image**: Generate images using AI image generation models +- **Convert Text to Speech (TTS)**: Convert text to speech using various voice models and voices + +Then connect your APIpie account to Pipedream. Visit [APIpie.ai](https://apipie.ai) and navigate to your profile to generate your [API key.](https://apipie.ai/profile/api-keys) + +Copy your API key and paste it into Pipedream when prompted. Now you're all set to use pre-built actions like `Chat`, `Create Image`, `Create Text to Speech`, or any of the list actions, or use your APIpie API key directly in Node.js or Python code to access the unified AI model interface. diff --git a/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs new file mode 100644 index 0000000000000..7531471102261 --- /dev/null +++ b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs @@ -0,0 +1,122 @@ +import fs from "fs"; +import apipieAi from "../../apipie_ai.app.mjs"; +import { ConfigurationError } from "@pipedream/platform"; + +export default { + key: "apipie_ai-convert-text-to-speech", + name: "Convert Text to Speech (TTS)", + description: "Generates audio from the input text. [See the documentation](https://apipie.ai/docs/Features/Voices)", + version: "0.0.1", + type: "action", + props: { + apipieAi, + model: { + propDefinition: [ + apipieAi, + "ttsModelId", + ], + reloadProps: true, + }, + input: { + propDefinition: [ + apipieAi, + "input", + ], + }, + responseFormat: { + propDefinition: [ + apipieAi, + "audioResponseFormat", + ], + }, + speed: { + propDefinition: [ + apipieAi, + "speed", + ], + }, + outputFile: { + type: "string", + label: "Output Filename", + description: "The filename of the output audio file that will be written to the `/tmp` folder, e.g. `/tmp/myFile.mp3`", + }, + }, + async additionalProps() { + try { + const props = {}; + if (this.model) { + // Parse the model JSON to get id and route + const modelData = JSON.parse(this.model); + const { route } = modelData; + + // Get all voices and filter by the model route + const { data } = await this.apipieAi.listVoices(); + const filteredVoices = data.filter(voice => voice.model === route); + + const uniqueVoices = new Map(); + filteredVoices.forEach(({ voice_id, name }) => { + if (!uniqueVoices.has(voice_id)) { + uniqueVoices.set(voice_id, name); + } + }); + + props.voice = { + type: "string", + label: "Voice", + description: "The voice to use when generating the audio.", + options: Array.from(uniqueVoices.entries()) + .map(([value, name]) => ({ + label: name, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)), + }; + } + return props; + } catch (e) { + $.export("Error fetching voices", e); + throw new ConfigurationError(e.message || "Failed to fetch voices"); + } + }, + async run({ $ }) { + // Parse the model JSON to get the actual model id for the API call + try { + const modelData = JSON.parse(this.model); + const { id: modelId } = modelData; + const response = await this.apipieAi.createSpeech({ + $, + data: { + model: modelId, + input: this.input, + voice: this.voice, + response_format: this.responseFormat, + speed: this.speed, + }, + responseType: "arraybuffer", + }); + + if (response.error) { + $.export("Error creating audio", response.error); + throw new ConfigurationError(response.error.message || "Failed to create audio"); + } + const outputFilePath = this.outputFile.includes("tmp/") + ? this.outputFile + : `/tmp/${this.outputFile}`; + + try { + await fs.promises.writeFile(outputFilePath, Buffer.from(response)); + } catch (e) { + $.export("Error saving audio file", e); + throw new ConfigurationError(e.message || "Failed to save audio file"); + } + $.export("$summary", "Generated audio successfully"); + return { + outputFilePath, + response, + }; + } catch (e) { + $.export("Error creating audio", e); + throw new ConfigurationError(e.message || "Failed to create audio"); + } + }, +}; diff --git a/components/apipie_ai/actions/create-image/create-image.mjs b/components/apipie_ai/actions/create-image/create-image.mjs new file mode 100644 index 0000000000000..8bdd51ec63cf1 --- /dev/null +++ b/components/apipie_ai/actions/create-image/create-image.mjs @@ -0,0 +1,84 @@ +import apipieAi from "../../apipie_ai.app.mjs"; +import { ConfigurationError } from "@pipedream/platform"; + +export default { + name: "Create Image", + version: "0.0.1", + key: "apipie_ai-create-image", + description: "Creates an image given a prompt returning a URL to the image. [See the documentation](https://apipie.ai/docs/Features/Images)", + type: "action", + props: { + apipieAi, + model: { + propDefinition: [ + apipieAi, + "imageModelId", + ], + }, + prompt: { + propDefinition: [ + apipieAi, + "prompt", + ], + }, + responseFormat: { + propDefinition: [ + apipieAi, + "imageResponseFormat", + ], + }, + size: { + propDefinition: [ + apipieAi, + "size", + ], + }, + n: { + propDefinition: [ + apipieAi, + "n", + ], + }, + quality: { + propDefinition: [ + apipieAi, + "quality", + ], + }, + style: { + propDefinition: [ + apipieAi, + "style", + ], + }, + }, + async run({ $ }) { + try { + const response = await this.apipieAi.createImage({ + $, + data: { + prompt: this.prompt, + n: this.n, + size: this.size, + ...(this.responseFormat && { response_format: this.responseFormat }), + model: this.model, + quality: this.quality, + style: this.style, + }, + }); + if (response.error) { + $.export("Error creating Image", response.error); + throw new ConfigurationError(response.error.message || "Failed to create Image"); + } + if (response.data.length) { + $.export("$summary", `Successfully created ${response.data.length} image${response.data.length === 1 + ? "" + : "s"}`); + } + return response; + } catch (e) { + $.export("Error creating Image", e); + throw new ConfigurationError(e.message || "Failed to create Image"); + } + }, +}; diff --git a/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs b/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs new file mode 100644 index 0000000000000..386f4a0380856 --- /dev/null +++ b/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs @@ -0,0 +1,25 @@ +import apipieAi from "../../apipie_ai.app.mjs"; +import { ConfigurationError } from "@pipedream/platform"; + +export default { + key: "apipie_ai-retrieve-available-image-models", + name: "Retrieve Available Image Models", + version: "0.0.1", + description: "Returns a list of Image models available through the API. [See the dashboard](https://apipie.ai/dashboard)", + type: "action", + props: { + apipieAi, + }, + async run({ $ }) { + try { + const response = await this.apipieAi.listImageModels({ + $, + }); + $.export("$summary", `Successfully retrieved ${response.data.length} available Image model(s)!`); + return response; + } catch (e) { + $.export("Error fetching Image Models", e); + throw new ConfigurationError(e.message || "Failed to fetch Image Models"); + } + }, +}; diff --git a/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs b/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs new file mode 100644 index 0000000000000..830c67303427c --- /dev/null +++ b/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs @@ -0,0 +1,25 @@ +import apipieAi from "../../apipie_ai.app.mjs"; +import { ConfigurationError } from "@pipedream/platform"; + +export default { + key: "apipie_ai-retrieve-available-llm-models", + name: "Retrieve Available LLM Models", + version: "0.0.1", + description: "Returns a list of LLM models available through the API. [See the dashboard](https://apipie.ai/dashboard)", + type: "action", + props: { + apipieAi, + }, + async run({ $ }) { + try { + const response = await this.apipieAi.listLlmModels({ + $, + }); + $.export("$summary", `Successfully retrieved ${response.data.length} available LLM model(s)!`); + return response; + } catch (e) { + $.export("Error fetching LLM Models", e); + throw new ConfigurationError(e.message || "Failed to fetch LLM Models"); + } + }, +}; diff --git a/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs b/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs new file mode 100644 index 0000000000000..afbb8fc7d4aa1 --- /dev/null +++ b/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs @@ -0,0 +1,25 @@ +import apipieAi from "../../apipie_ai.app.mjs"; +import { ConfigurationError } from "@pipedream/platform"; + +export default { + key: "apipie_ai-retrieve-available-tts-models", + name: "Retrieve Available TTS Models", + version: "0.0.1", + description: "Returns a list of TTS models available through the API. [See the dashboard](https://apipie.ai/dashboard)", + type: "action", + props: { + apipieAi, + }, + async run({ $ }) { + try { + const response = await this.apipieAi.listTtsModels({ + $, + }); + $.export("$summary", `Successfully retrieved ${response.data.length} available TTS model(s)!`); + return response; + } catch (e) { + $.export("Error fetching TTS Models", e); + throw new ConfigurationError(e.message || "Failed to fetch TTS models"); + } + }, +}; diff --git a/components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs b/components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs new file mode 100644 index 0000000000000..b74378ca8ea7d --- /dev/null +++ b/components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs @@ -0,0 +1,25 @@ +import apipieAi from "../../apipie_ai.app.mjs"; +import { ConfigurationError } from "@pipedream/platform"; + +export default { + key: "apipie_ai-retrieve-available-tts-voices", + name: "Retrieve Available TTS Voices", + version: "0.0.1", + description: "Returns a list of TTS Voices available through the API. [See the dashboard](https://apipie.ai/dashboard)", + type: "action", + props: { + apipieAi, + }, + async run({ $ }) { + try { + const response = await this.apipieAi.listVoices({ + $, + }); + $.export("$summary", `Successfully retrieved ${response.data.length} available TTS Voices!`); + return response; + } catch (e) { + $.export("Error fetching Voices", e); + throw new ConfigurationError(e.message || "Failed to fetch Voices"); + } + }, +}; diff --git a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs new file mode 100644 index 0000000000000..595b98e9d55c2 --- /dev/null +++ b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs @@ -0,0 +1,191 @@ +import { ConfigurationError } from "@pipedream/platform"; +import { parseObject } from "../../common/utils.mjs"; +import constants from "../../common/constants.mjs"; +import apipieAi from "../../apipie_ai.app.mjs"; + +export default { + key: "apipie_ai-send-chat-completion-request", + name: "Send Chat Completion Request", + version: "0.0.1", + description: "Send a chat completion request to a selected LLM model. [See the dashboard](https://apipie.ai/dashboard)", + type: "action", + props: { + apipieAi, + model: { + propDefinition: [ + apipieAi, + "chatCompletionModelId", + ], + }, + messages: { + type: "string[]", + label: "Messages", + description: "A list of objects containing role and content. E.g. **{\"role\":\"user\", \"content\":\"text\"}**. [See the documentation](https://apipie.ai/docs/Features/Completions) for further details.", + }, + maxTokens: { + propDefinition: [ + apipieAi, + "maxTokens", + ], + }, + temperature: { + propDefinition: [ + apipieAi, + "temperature", + ], + }, + seed: { + propDefinition: [ + apipieAi, + "seed", + ], + }, + topP: { + propDefinition: [ + apipieAi, + "topP", + ], + }, + topK: { + propDefinition: [ + apipieAi, + "topK", + ], + }, + frequencyPenalty: { + propDefinition: [ + apipieAi, + "frequencyPenalty", + ], + }, + presencePenalty: { + propDefinition: [ + apipieAi, + "presencePenalty", + ], + }, + repetitionPenalty: { + propDefinition: [ + apipieAi, + "repetitionPenalty", + ], + }, + reasoningEffort: { + propDefinition: [ + apipieAi, + "reasoningEffort", + ], + }, + toolTypes: { + type: "string[]", + label: "Tool Types", + description: "The types of tools to enable on the assistant", + options: constants.TOOL_TYPES?.filter((toolType) => toolType === "function") || ["function"], + optional: true, + reloadProps: true, + }, + }, + additionalProps() { + const { + toolTypes, + numberOfFunctions, + } = this; + const props = {}; + + if (toolTypes?.includes("function")) { + props.numberOfFunctions = { + type: "integer", + label: "Number of Functions", + description: "The number of functions to define", + optional: true, + reloadProps: true, + default: 1, + }; + + for (let i = 0; i < (numberOfFunctions || 1); i++) { + props[`functionName_${i}`] = { + type: "string", + label: `Function Name ${i + 1}`, + description: "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.", + }; + props[`functionDescription_${i}`] = { + type: "string", + label: `Function Description ${i + 1}`, + description: "A description of what the function does, used by the model to choose when and how to call the function.", + optional: true, + }; + props[`functionParameters_${i}`] = { + type: "object", + label: `Function Parameters ${i + 1}`, + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.", + optional: true, + }; + } + } + + return props; + }, + methods: { + _buildTools() { + const tools = []; + + if (this.toolTypes?.includes("function")) { + const numberOfFunctions = this.numberOfFunctions || 1; + for (let i = 0; i < numberOfFunctions; i++) { + if (this[`functionName_${i}`]) { + tools.push({ + type: "function", + function: { + name: this[`functionName_${i}`], + description: this[`functionDescription_${i}`], + parameters: this[`functionParameters_${i}`], + }, + }); + } + } + } + + return tools.length ? tools : undefined; + }, + }, + async run({ $ }) { + try { + const data = { + model: this.model, + messages: parseObject(this.messages), + stream: false, + }; + + // Add optional parameters only if they exist + if (this.maxTokens) data.max_tokens = this.maxTokens; + if (this.temperature) data.temperature = this.temperature; + if (this.seed) data.seed = this.seed; + if (this.topP) data.top_p = this.topP; + if (this.topK) data.top_k = this.topK; + if (this.frequencyPenalty) data.frequency_penalty = this.frequencyPenalty; + if (this.presencePenalty) data.presence_penalty = this.presencePenalty; + if (this.repetitionPenalty) data.repetition_penalty = this.repetitionPenalty; + if (this.reasoningEffort) data.reasoning_effort = this.reasoningEffort; + + // Add tools if they exist + const tools = this._buildTools(); + if (tools) data.tools = tools; + + const response = await this.apipieAi.sendChatCompletionRequest({ + $, + data, + timeout: 1000 * 60 * 5, + }); + if (response.error) { + $.export("Error creating Chat Completion", response.error); + throw new ConfigurationError(response.error.message || "Failed to create Chat Completion"); + } + + $.export("$summary", `A new chat completion request with Id: ${response.id} was successfully created!`); + return response; + } catch (e) { + $.export("Error creating Chat Completion", e); + throw new ConfigurationError(e.message || "Failed to create Chat Completion"); + } + }, +}; diff --git a/components/apipie_ai/apipie_ai.app.mjs b/components/apipie_ai/apipie_ai.app.mjs index dc830e6a1a949..26f0fdeab9c8b 100644 --- a/components/apipie_ai/apipie_ai.app.mjs +++ b/components/apipie_ai/apipie_ai.app.mjs @@ -1,11 +1,284 @@ +import { axios } from "@pipedream/platform"; +import { ConfigurationError } from "@pipedream/platform"; +import constants from "./common/constants.mjs"; + export default { type: "app", app: "apipie_ai", - propDefinitions: {}, + propDefinitions: { + chatCompletionModelId: { + type: "string", + label: "Completions Model", + description: "The ID of the LLM model to use for completions.", + async options() { + try { + const { data } = await this.listLlmModels(); + const uniqueModels = new Map(); + data.forEach(({ id, name }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, name); + } + }); + return Array.from(uniqueModels.entries()) + .map(([value, label]) => ({ + label, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)); + } catch (e) { + $.export("Error fetching LLM Models", e); + throw new ConfigurationError(e.message || "Failed to fetch LLM models"); + } + }, + }, + imageModelId: { + type: "string", + label: "Model", + description: "The ID of the image model to use for completions.", + async options() { + try { + const { data } = await this.listImageModels(); + const uniqueModels = new Map(); + data.forEach(({ id, name }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, name); + } + }); + return Array.from(uniqueModels.entries()) + .map(([value, label]) => ({ + label, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)); + } catch (e) { + $.export("Error fetching Image Models", e); + throw new ConfigurationError(e.message || "Failed to fetch Image models"); + } + }, + }, + ttsModelId: { + type: "string", + label: "Model", + description: "The ID of the tts model to use for completions.", + async options() { + try { + const { data } = await this.listTtsModels(); + const uniqueModels = new Map(); + data.forEach(({ id, name }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, name); + } + }); + return Array.from(uniqueModels.entries()) + .map(([value, label]) => ({ + label, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)); + } catch (e) { + $.export("Error fetching TTS Models", e); + throw new ConfigurationError(e.message || "Failed to fetch TTS models"); + } + }, + }, + maxTokens: { + type: "integer", + label: "Max Tokens", + description: "Maximum number of tokens. **(range: [1, context_length))**.", + min: 1, + optional: true, + }, + temperature: { + type: "number", + label: "Temperature", + description: "Sampling temperature. **(range: [0, 2])**.", + min: 0, + max: 2, + optional: true, + }, + seed: { + type: "number", + label: "Seed", + description: "Seed for deterministic outputs.", + optional: true, + }, + topP: { + type: "number", + label: "Top P", + description: "Top-p sampling value. **(range: (0, 1])**.", + min: 0, + max: 1.0, + optional: true, + }, + topK: { + type: "number", + label: "Top K", + description: "Top-k sampling value. **(range: [1, Infinity))**.", + min: 1, + optional: true, + }, + frequencyPenalty: { + type: "number", + label: "Frequency Penalty", + description: "Frequency penalty. **(range: [-2, 2])**.", + min: -2.0, + max: 2.0, + optional: true, + }, + presencePenalty: { + type: "number", + label: "Presence Penalty", + description: "Presence penalty. **(range: [-2, 2])**.", + min: -2.0, + max: 2.0, + optional: true, + }, + repetitionPenalty: { + type: "number", + label: "Repetition Penalty", + description: "Repetition penalty. **(range: (0, 2])**.", + min: 0, + max: 2.0, + optional: true, + }, + reasoningEffort: { + type: "string", + label: "Reasoning Effort", + description: "OpenAI-style reasoning effort setting.", + options: constants.EFFORT_OPTIONS, + optional: true, + }, + input: { + type: "string", + label: "Input", + description: "The text to generate audio for. The maximum length is 4096 characters.", + }, + audioResponseFormat: { + type: "string", + label: "Response Format", + description: "The format to generate audio in. Supported formats are mp3, opus, aac, flac, wav, and pcm.", + options: constants.AUDIO_RESPONSE_FORMATS, + optional: true, + }, + speed: { + type: "number", + label: "Speed", + description: "The speed of the generated audio. Provide a value from 0.25 to 4.0.", + default: 1, + min: 0.25, + max: 4.0, + optional: true, + }, + prompt: { + label: "Prompt", + description: "A text description of the desired image(s).", + type: "string", + }, + imageResponseFormat: { + label: "Response Format", + description: "The format in which the generated images are returned.", + type: "string", + optional: true, + options: constants.IMAGE_RESPONSE_FORMATS, + default: "url", + reloadProps: true, + }, + size: { + label: "Size", + description: "The size of the generated images.", + type: "string", + optional: true, + options: constants.IMAGE_SIZES, + default: "1024x1024", + }, + n: { + type: "integer", + label: "N", + description: "The number of images to generate. Must be between 1 and 10. not supported for all models.", + optional: true, + default: 1, + }, + quality: { + type: "string", + label: "Quality", + description: "The quality of the image", + options: constants.IMAGE_QUALITIES, + optional: true, + default: "standard", + }, + style: { + type: "string", + label: "Style", + description: "The style of the image, not supported for all models.", + options: constants.IMAGE_STYLES, + optional: true, + default: "natural", + }, + }, methods: { - // this.$auth contains connected account data - authKeys() { - console.log(Object.keys(this.$auth)); + _apiKey() { + return this.$auth.api_key; + }, + _apiUrl() { + return "https://apipie.ai/v1"; + }, + _getHeaders() { + return { + "Authorization": `Bearer ${this._apiKey()}`, + "Accept": "application/json", + "User-Agent": "@PipedreamHQ/pipedream v1.0", + }; + }, + _makeRequest({ + $, path, ...opts + }) { + $ = $ || this.$; + return axios($, { + url: `${this._apiUrl()}/${path}`, + headers: this._getHeaders(), + ...opts, + }); + }, + listLlmModels() { + return this._makeRequest({ + path: "models?type=llm", + }); + }, + listImageModels() { + return this._makeRequest({ + path: "models?type=image", + }); + }, + listTtsModels() { + return this._makeRequest({ + path: "models?subtype=text-to-speech", + }); + }, + listVoices() { + return this._makeRequest({ + path: "models?voices", + }); + }, + sendChatCompletionRequest(opts = {}) { + return this._makeRequest({ + method: "POST", + path: "chat/completions", + ...opts, + }); + }, + createImage(args = {}) { + return this._makeRequest({ + path: "images/generations", + method: "POST", + ...args, + }); + }, + createSpeech(args = {}) { + return this._makeRequest({ + path: "audio/speech", + method: "POST", + ...args, + }); }, }, -}; \ No newline at end of file +}; diff --git a/components/apipie_ai/common/constants.mjs b/components/apipie_ai/common/constants.mjs new file mode 100644 index 0000000000000..85a04092b0d0b --- /dev/null +++ b/components/apipie_ai/common/constants.mjs @@ -0,0 +1,64 @@ +const EFFORT_OPTIONS = [ + "high", + "medium", + "low", +]; +const AUDIO_RESPONSE_FORMATS = [ + "mp3", + "opus", + "aac", + "flac", + "wav", + "pcm", +]; +const IMAGE_RESPONSE_FORMATS = [ + { + label: "URL", + value: "url", + }, + { + label: "Base64 JSON", + value: "b64_json", + }, +]; +const IMAGE_QUALITIES = [ + { + label: "Standard", + value: "standard", + }, + { + label: "HD", + value: "hd", + }, +]; +const IMAGE_STYLES = [ + { + label: "Natural", + value: "natural", + }, + { + label: "Vivid", + value: "vivid", + }, +]; +const IMAGE_SIZES = [ + "256x256", + "512x512", + "1024x1024", + "1792x1024", + "1024x1792", +]; +const TOOL_TYPES = [ + "code_interpreter", + "file_search", + "function", +]; +export default { + EFFORT_OPTIONS, + AUDIO_RESPONSE_FORMATS, + IMAGE_RESPONSE_FORMATS, + IMAGE_QUALITIES, + IMAGE_STYLES, + IMAGE_SIZES, + TOOL_TYPES, +}; diff --git a/components/apipie_ai/common/utils.mjs b/components/apipie_ai/common/utils.mjs new file mode 100644 index 0000000000000..6631ff58c328e --- /dev/null +++ b/components/apipie_ai/common/utils.mjs @@ -0,0 +1,29 @@ +/** +* Safely parses JSON strings or arrays of JSON strings into JavaScript objects +* @param {any} obj - Input that may be a JSON string, array of JSON strings, or any other value +* @returns {any} - Parsed object(s) or the original input if parsing fails +*/ +export const parseObject = (obj) => { + if (!obj) return undefined; + + if (Array.isArray(obj)) { + return obj.map((item) => { + if (typeof item === "string") { + try { + return JSON.parse(item); + } catch (e) { + return item; + } + } + return item; + }); + } + if (typeof obj === "string") { + try { + return JSON.parse(obj); + } catch (e) { + return obj; + } + } + return obj; +}; diff --git a/components/apipie_ai/package.json b/components/apipie_ai/package.json index 0fc068179e4ae..a1c88ca2379e3 100644 --- a/components/apipie_ai/package.json +++ b/components/apipie_ai/package.json @@ -5,11 +5,15 @@ "main": "apipie_ai.app.mjs", "keywords": [ "pipedream", - "apipie_ai" + "apipie ai" ], - "homepage": "https://pipedream.com/apps/apipie_ai", + "homepage": "https://pipedream.com/apps/apipie-ai", "author": "Pipedream (https://pipedream.com/)", "publishConfig": { "access": "public" + }, + "dependencies": { + "@pipedream/platform": "^3.0.3", + "axios": "^1.6.2" } }