|
| 1 | +import openai from "../../openai.app.mjs"; |
| 2 | +import common from "../common/common.mjs"; |
| 3 | +import constants from "../../common/constants.mjs"; |
| 4 | +import { ConfigurationError } from "@pipedream/platform"; |
| 5 | + |
| 6 | +export default { |
| 7 | + ...common, |
| 8 | + name: "Chat with Tools (Responses API)", |
| 9 | + version: "0.0.1", |
| 10 | + key: "openai-chat-with-tools", |
| 11 | + description: "Chat, using the Responses API. [See the documentation](https://platform.openai.com/docs/api-reference/responses)", |
| 12 | + type: "action", |
| 13 | + props: { |
| 14 | + openai, |
| 15 | + modelId: { |
| 16 | + propDefinition: [ |
| 17 | + openai, |
| 18 | + "chatCompletionModelId", |
| 19 | + ], |
| 20 | + }, |
| 21 | + userMessage: { |
| 22 | + label: "User Message", |
| 23 | + type: "string", |
| 24 | + description: "The user messages provide instructions to the assistant. They can be generated by the end users of an application, or set by a developer as an instruction.", |
| 25 | + }, |
| 26 | + ...common.props, |
| 27 | + systemInstructions: { |
| 28 | + label: "System Instructions", |
| 29 | + type: "string", |
| 30 | + description: "The system message helps set the behavior of the assistant. For example: \"You are a helpful assistant.\" [See these docs](https://platform.openai.com/docs/guides/chat/instructing-chat-models) for tips on writing good instructions.", |
| 31 | + optional: true, |
| 32 | + }, |
| 33 | + messages: { |
| 34 | + label: "Prior Message History", |
| 35 | + type: "string[]", |
| 36 | + description: "_Advanced_. Because [the models have no memory of past chat requests](https://platform.openai.com/docs/guides/chat/introduction), all relevant information must be supplied via the conversation. You can provide [an array of messages](https://platform.openai.com/docs/guides/chat/introduction) from prior conversations here. If this param is set, the action ignores the values passed to **System Instructions** and **Assistant Response**, appends the new **User Message** to the end of this array, and sends it to the API.", |
| 37 | + optional: true, |
| 38 | + }, |
| 39 | + images: { |
| 40 | + label: "Images", |
| 41 | + type: "string[]", |
| 42 | + description: "Provide one or more images to [OpenAI's vision model](https://platform.openai.com/docs/guides/vision). Accepts URLs or base64 encoded strings. Compatible with the `gpt4-vision-preview` model", |
| 43 | + optional: true, |
| 44 | + }, |
| 45 | + audio: { |
| 46 | + type: "string", |
| 47 | + label: "Audio", |
| 48 | + description: "Provide the file path to an audio file in the `/tmp` directory. For use with the `gpt-4o-audio-preview` model. Currently supports `wav` and `mp3` files.", |
| 49 | + optional: true, |
| 50 | + }, |
| 51 | + responseFormat: { |
| 52 | + type: "string", |
| 53 | + label: "Response Format", |
| 54 | + description: "Specify the format that the model must output. \n- **Text** (default): Returns unstructured text output.\n- **JSON Object**: Ensures the model's output is a valid JSON object.\n- **JSON Schema** (GPT-4o and later): Enables you to define a specific structure for the model's output using a JSON schema. Supported with models `gpt-4o-2024-08-06` and later, and `gpt-4o-mini-2024-07-18` and later.", |
| 55 | + options: Object.values(constants.CHAT_RESPONSE_FORMAT), |
| 56 | + default: constants.CHAT_RESPONSE_FORMAT.TEXT.value, |
| 57 | + optional: true, |
| 58 | + reloadProps: true, |
| 59 | + }, |
| 60 | + toolTypes: { |
| 61 | + type: "string[]", |
| 62 | + label: "Tool Types", |
| 63 | + description: "The types of tools to enable on the assistant", |
| 64 | + options: constants.TOOL_TYPES.filter((toolType) => toolType === "function"), |
| 65 | + optional: true, |
| 66 | + reloadProps: true, |
| 67 | + }, |
| 68 | + }, |
| 69 | + additionalProps() { |
| 70 | + const { |
| 71 | + responseFormat, |
| 72 | + toolTypes, |
| 73 | + numberOfFunctions, |
| 74 | + } = this; |
| 75 | + const props = {}; |
| 76 | + |
| 77 | + if (responseFormat === constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value) { |
| 78 | + props.jsonSchema = { |
| 79 | + type: "string", |
| 80 | + label: "JSON Schema", |
| 81 | + description: "Define the schema that the model's output must adhere to. [See the documentation here](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas).", |
| 82 | + }; |
| 83 | + } |
| 84 | + |
| 85 | + if (toolTypes?.includes("function")) { |
| 86 | + props.numberOfFunctions = { |
| 87 | + type: "integer", |
| 88 | + label: "Number of Functions", |
| 89 | + description: "The number of functions to define", |
| 90 | + optional: true, |
| 91 | + reloadProps: true, |
| 92 | + default: 1, |
| 93 | + }; |
| 94 | + |
| 95 | + for (let i = 0; i < (numberOfFunctions || 1); i++) { |
| 96 | + props[`functionName_${i}`] = { |
| 97 | + type: "string", |
| 98 | + label: `Function Name ${i + 1}`, |
| 99 | + description: "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.", |
| 100 | + }; |
| 101 | + props[`functionDescription_${i}`] = { |
| 102 | + type: "string", |
| 103 | + label: `Function Description ${i + 1}`, |
| 104 | + description: "A description of what the function does, used by the model to choose when and how to call the function.", |
| 105 | + optional: true, |
| 106 | + }; |
| 107 | + props[`functionParameters_${i}`] = { |
| 108 | + type: "object", |
| 109 | + label: `Function Parameters ${i + 1}`, |
| 110 | + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.", |
| 111 | + optional: true, |
| 112 | + }; |
| 113 | + } |
| 114 | + } |
| 115 | + |
| 116 | + return props; |
| 117 | + }, |
| 118 | + methods: { |
| 119 | + ...common.methods, |
| 120 | + _buildTools() { |
| 121 | + const tools = this.toolTypes?.filter((toolType) => toolType !== "function")?.map((toolType) => ({ |
| 122 | + type: toolType, |
| 123 | + })) || []; |
| 124 | + if (this.toolTypes?.includes("function")) { |
| 125 | + const numberOfFunctions = this.numberOfFunctions || 1; |
| 126 | + for (let i = 0; i < numberOfFunctions; i++) { |
| 127 | + tools.push({ |
| 128 | + type: "function", |
| 129 | + function: { |
| 130 | + name: this[`functionName_${i}`], |
| 131 | + description: this[`functionDescription_${i}`], |
| 132 | + parameters: this[`functionParameters_${i}`], |
| 133 | + }, |
| 134 | + }); |
| 135 | + } |
| 136 | + } |
| 137 | + return tools.length |
| 138 | + ? tools |
| 139 | + : undefined; |
| 140 | + }, |
| 141 | + }, |
| 142 | + async run({ $ }) { |
| 143 | + if (this.audio && !this.modelId.includes("gpt-4o-audio-preview")) { |
| 144 | + throw new ConfigurationError("Use of audio files requires using the `gpt-4o-audio-preview` model."); |
| 145 | + } |
| 146 | + |
| 147 | + const args = this._getChatArgs(); |
| 148 | + |
| 149 | + const response = await this.openai.createChatCompletion({ |
| 150 | + $, |
| 151 | + data: { |
| 152 | + ...args, |
| 153 | + tools: this._buildTools(), |
| 154 | + }, |
| 155 | + }); |
| 156 | + |
| 157 | + if (response) { |
| 158 | + $.export("$summary", `Successfully sent chat with id ${response.id}`); |
| 159 | + } |
| 160 | + |
| 161 | + const { messages } = args; |
| 162 | + return { |
| 163 | + original_messages: messages, |
| 164 | + original_messages_with_assistant_response: messages.concat(response.choices[0]?.message), |
| 165 | + ...response, |
| 166 | + }; |
| 167 | + }, |
| 168 | +}; |
0 commit comments