Skip to content

Commit f7f210a

Browse files
add chat file search action
1 parent 663bdde commit f7f210a

File tree

1 file changed

+203
-0
lines changed

1 file changed

+203
-0
lines changed
Lines changed: 203 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,203 @@
1+
import openai from "../../openai.app.mjs";
2+
import common from "../common/common.mjs";
3+
import constants from "../../common/constants.mjs";
4+
5+
export default {
6+
...common,
7+
name: "Chat using File Search",
8+
version: "0.0.1",
9+
key: "openai-chat-file-search",
10+
description: "Chat with your files knowledge base (vector stores). [See the documentation](https://platform.openai.com/docs/guides/tools-file-search)",
11+
type: "action",
12+
props: {
13+
openai,
14+
alert: {
15+
type: "alert",
16+
alertType: "info",
17+
content: "To use this action, you need to have set up a knowledge base in a vector store and uploaded files to it. [More infomation here](https://platform.openai.com/docs/guides/tools-file-search?lang=javascript#overview).",
18+
},
19+
modelId: {
20+
propDefinition: [
21+
openai,
22+
"chatCompletionModelId",
23+
],
24+
},
25+
vectorStoreId: {
26+
propDefinition: [
27+
openai,
28+
"vectorStoreId",
29+
],
30+
description: "The identifier of a vector store. Currently supports only one vector store at a time",
31+
},
32+
input: {
33+
type: "string",
34+
label: "Chat Input",
35+
description: "Text, image, or file inputs to the model, used to generate a response",
36+
},
37+
instructions: {
38+
type: "string",
39+
label: "Instructions",
40+
description: "Inserts a system (or developer) message as the first item in the model's context",
41+
optional: true,
42+
},
43+
includeSearchResults: {
44+
type: "boolean",
45+
label: "Include Search Results",
46+
description: "Include the search results in the response",
47+
default: false,
48+
optional: true,
49+
},
50+
maxNumResults: {
51+
type: "integer",
52+
label: "Max Number of Results",
53+
description: "Customize the number of results you want to retrieve from the vector store",
54+
optional: true,
55+
},
56+
metadataFiltering: {
57+
type: "boolean",
58+
label: "Metadata Filtering",
59+
description: "Configure how the search results are filtered based on file metadata",
60+
optional: true,
61+
reloadProps: true,
62+
},
63+
previousResponseId: {
64+
type: "string",
65+
label: "Previous Response ID",
66+
description: "The unique ID of the previous response to the model. Use this to create multi-turn conversations",
67+
optional: true,
68+
},
69+
truncation: {
70+
type: "string",
71+
label: "Truncation",
72+
description: "Specifies the truncation mode for the response if it's larger than the context window size",
73+
optional: true,
74+
default: "auto",
75+
options: [
76+
"auto",
77+
"disabled",
78+
],
79+
},
80+
responseFormat: {
81+
type: "string",
82+
label: "Response Format",
83+
description: "Specify the format that the model must output. \n- **Text**: Returns unstructured text output.\n- **JSON Schema**: Enables you to define a [specific structure for the model's output using a JSON schema](https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses).",
84+
options: [
85+
"text",
86+
"json_schema",
87+
],
88+
default: "text",
89+
optional: true,
90+
reloadProps: true,
91+
},
92+
},
93+
additionalProps() {
94+
const {
95+
modelId,
96+
metadataFiltering,
97+
responseFormat,
98+
} = this;
99+
const props = {};
100+
101+
if (this.openai.isReasoningModel(modelId)) {
102+
props.reasoningEffort = {
103+
type: "string",
104+
label: "Reasoning Effort",
105+
description: "Constrains effort on reasoning for reasoning models",
106+
optional: true,
107+
options: [
108+
"low",
109+
"medium",
110+
"high",
111+
],
112+
};
113+
114+
// aparrently not supported yet as of 12/march/2025
115+
// props.generateSummary = {
116+
// type: "string",
117+
// label: "Generate Reasoning Summary",
118+
// description: "A summary of the reasoning performed by the model",
119+
// optional: true,
120+
// options: [
121+
// "concise",
122+
// "detailed",
123+
// ],
124+
// };
125+
}
126+
127+
// TODO: make this configuration user-friendly
128+
// https://platform.openai.com/docs/guides/retrieval?attributes-filter-example=region#attribute-filtering
129+
if (metadataFiltering) {
130+
props.filters = {
131+
type: "object",
132+
label: "Filters",
133+
description: "Filter the search results based on file metadata. [See the documentation here](https://platform.openai.com/docs/guides/retrieval#attribute-filtering)",
134+
};
135+
}
136+
137+
if (responseFormat === constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value) {
138+
props.jsonSchema = {
139+
type: "string",
140+
label: "JSON Schema",
141+
description: "Define the schema that the model's output must adhere to. [See the documentation here](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas).",
142+
};
143+
}
144+
145+
return props;
146+
},
147+
methods: {
148+
...common.methods,
149+
},
150+
async run({ $ }) {
151+
const data = {
152+
model: this.modelId,
153+
input: this.input,
154+
instructions: this.instructions,
155+
previous_response_id: this.previousResponseId,
156+
truncation: this.truncation,
157+
tools: [
158+
{
159+
type: "file_search",
160+
vector_store_ids: [
161+
this.vectorStoreId,
162+
],
163+
max_num_results: this.maxNumResults,
164+
},
165+
],
166+
};
167+
168+
if (this.includeSearchResults) {
169+
data.include = [
170+
"output[*].file_search_call.search_results",
171+
];
172+
}
173+
174+
if (this.filters) {
175+
data.tools[0].filters = this.filters;
176+
}
177+
178+
if (this.openai.isReasoningModel(this.modelId) && this.reasoningEffort) {
179+
data.reasoning = {
180+
...data.reasoning,
181+
effort: this.reasoningEffort,
182+
};
183+
}
184+
185+
if (this.openai.isReasoningModel(this.modelId) && this.generateSummary) {
186+
data.reasoning = {
187+
...data.reasoning,
188+
generate_summary: this.generateSummary,
189+
};
190+
}
191+
192+
const response = await this.openai.responses({
193+
$,
194+
data,
195+
});
196+
197+
if (response) {
198+
$.export("$summary", `Successfully sent chat with id ${response.id}`);
199+
}
200+
201+
return response;
202+
},
203+
};

0 commit comments

Comments
 (0)