Skip to content

Commit 10f33b7

Browse files
add chat functions action
1 parent f7f210a commit 10f33b7

File tree

1 file changed

+229
-0
lines changed

1 file changed

+229
-0
lines changed
Lines changed: 229 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,229 @@
1+
import openai from "../../openai.app.mjs";
2+
import common from "../common/common.mjs";
3+
import constants from "../../common/constants.mjs";
4+
5+
export default {
6+
...common,
7+
name: "Chat using Functions",
8+
version: "0.0.1",
9+
key: "openai-chat-functions",
10+
description: "Chat with your models and allow them to invoke functions. Optionally, you can build and invoke workflows as functions. [See the documentation](https://platform.openai.com/docs/guides/function-calling)",
11+
type: "action",
12+
props: {
13+
openai,
14+
alert: {
15+
type: "alert",
16+
alertType: "info",
17+
content: "Provide function names and parameters, and the model will either answer the question directly or decide to invoke one of the functions, returning a function call that adheres to your specified schema. Add a custom code step that includes all available functions which can be invoked based on the model's response - [you can even build an entire workflow as a function](https://pipedream.com/docs/workflows/building-workflows/code/nodejs/#invoke-another-workflow)! Once the appropriate function or workflow is executed, continue the overall execution or pass the result back to the model for further analysis. For more details, [see this guide](https://platform.openai.com/docs/guides/function-calling?api-mode=responses#overview).",
18+
},
19+
modelId: {
20+
propDefinition: [
21+
openai,
22+
"chatCompletionModelId",
23+
],
24+
},
25+
input: {
26+
type: "string",
27+
label: "Chat Input",
28+
description: "Text, image, or file inputs to the model, used to generate a response",
29+
},
30+
functions: {
31+
type: "string",
32+
label: "Functions",
33+
description: "A valid JSON array of functions using OpenAI's function schema definition. [See guide here](https://platform.openai.com/docs/guides/function-calling?api-mode=responses&example=search-knowledge-base#defining-functions).",
34+
default:
35+
`[
36+
{
37+
"type": "function",
38+
"name": "your_function_name",
39+
"description": "Details on when and how to use the function",
40+
"strict": true,
41+
"parameters": {
42+
"type": "object",
43+
"properties": {
44+
"property_name": {
45+
"type": "property_type",
46+
"description": "A description for this property"
47+
},
48+
"another_property_name": {
49+
"type": "property_type",
50+
"description": "A description for this property"
51+
}
52+
},
53+
"required": [
54+
"list",
55+
"of",
56+
"required",
57+
"properties",
58+
"for",
59+
"this",
60+
"object"
61+
],
62+
"additionalProperties": false
63+
}
64+
}
65+
]`,
66+
},
67+
instructions: {
68+
type: "string",
69+
label: "Instructions",
70+
description: "Inserts a system (or developer) message as the first item in the model's context",
71+
optional: true,
72+
},
73+
toolChoice: {
74+
type: "string",
75+
label: "Tool Choice",
76+
// TODO: fix markdown display
77+
description: `Determines how the model will use tools:
78+
- **auto**: The model decides whether and how many tools to call.
79+
- **required**: The model must call one or more tools.
80+
- **function_name**: Write the function name as a custom expression to force the model call this function.`,
81+
optional: true,
82+
default: "auto",
83+
options: [
84+
"auto",
85+
"required",
86+
],
87+
},
88+
parallelToolCalls: {
89+
type: "boolean",
90+
label: "Parallel Function Calling",
91+
description: "Allow or prevent the model to call multiple functions in a single turn",
92+
optional: true,
93+
default: true,
94+
},
95+
previousResponseId: {
96+
type: "string",
97+
label: "Previous Response ID",
98+
description: "The unique ID of the previous response to the model. Use this to create multi-turn conversations",
99+
optional: true,
100+
},
101+
truncation: {
102+
type: "string",
103+
label: "Truncation",
104+
description: "Specifies the truncation mode for the response if it's larger than the context window size",
105+
optional: true,
106+
default: "auto",
107+
options: [
108+
"auto",
109+
"disabled",
110+
],
111+
},
112+
responseFormat: {
113+
type: "string",
114+
label: "Response Format",
115+
description: "Specify the format that the model must output. \n- **Text**: Returns unstructured text output.\n- **JSON Schema**: Enables you to define a [specific structure for the model's output using a JSON schema](https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses).",
116+
options: [
117+
"text",
118+
"json_schema",
119+
],
120+
default: "text",
121+
optional: true,
122+
reloadProps: true,
123+
},
124+
},
125+
additionalProps() {
126+
const {
127+
modelId,
128+
responseFormat,
129+
} = this;
130+
const props = {};
131+
132+
if (this.openai.isReasoningModel(modelId)) {
133+
props.reasoningEffort = {
134+
type: "string",
135+
label: "Reasoning Effort",
136+
description: "Constrains effort on reasoning for reasoning models",
137+
optional: true,
138+
options: [
139+
"low",
140+
"medium",
141+
"high",
142+
],
143+
};
144+
145+
// aparrently not supported yet as of 12/march/2025
146+
// props.generateSummary = {
147+
// type: "string",
148+
// label: "Generate Reasoning Summary",
149+
// description: "A summary of the reasoning performed by the model",
150+
// optional: true,
151+
// options: [
152+
// "concise",
153+
// "detailed",
154+
// ],
155+
// };
156+
}
157+
158+
if (responseFormat === constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value) {
159+
props.jsonSchema = {
160+
type: "string",
161+
label: "JSON Schema",
162+
description: "Define the schema that the model's output must adhere to. [See the documentation here](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas).",
163+
};
164+
}
165+
166+
return props;
167+
},
168+
methods: {
169+
...common.methods,
170+
},
171+
async run({ $ }) {
172+
const data = {
173+
model: this.modelId,
174+
input: this.input,
175+
instructions: this.instructions,
176+
previous_response_id: this.previousResponseId,
177+
truncation: this.truncation,
178+
parallel_tool_calls: this.parallelToolCalls,
179+
tools: [],
180+
};
181+
182+
let functions = this.functions;
183+
if (typeof functions === "string") {
184+
functions = JSON.parse(functions);
185+
}
186+
187+
if (Array.isArray(functions)) {
188+
data.tools.push(...functions);
189+
} else {
190+
data.tools.push(functions);
191+
}
192+
193+
if (this.toolChoice) {
194+
if (this.toolChoice === "auto" || this.toolChoice === "required") {
195+
data.tool_choice = this.toolChoice;
196+
} else {
197+
data.tool_choice = {
198+
type: "function",
199+
name: this.toolChoice,
200+
};
201+
}
202+
}
203+
204+
if (this.openai.isReasoningModel(this.modelId) && this.reasoningEffort) {
205+
data.reasoning = {
206+
...data.reasoning,
207+
effort: this.reasoningEffort,
208+
};
209+
}
210+
211+
if (this.openai.isReasoningModel(this.modelId) && this.generateSummary) {
212+
data.reasoning = {
213+
...data.reasoning,
214+
generate_summary: this.generateSummary,
215+
};
216+
}
217+
218+
const response = await this.openai.responses({
219+
$,
220+
data,
221+
});
222+
223+
if (response) {
224+
$.export("$summary", `Successfully sent chat with id ${response.id}`);
225+
}
226+
227+
return response;
228+
},
229+
};

0 commit comments

Comments
 (0)