Skip to content

Commit 1a67bfc

Browse files
authored
Merging pull request #16286
* [Components] openrouter #15025 Actions - Send Completion Request - Send Chat Completion Request - Retrieve Available Models * pnpm update * fix field name * remove unnecessary props
1 parent c0a7bb9 commit 1a67bfc

File tree

8 files changed

+593
-6
lines changed

8 files changed

+593
-6
lines changed
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
import openrouter from "../../openrouter.app.mjs";
2+
3+
export default {
4+
key: "openrouter-retrieve-available-models",
5+
name: "Retrieve Available Models",
6+
version: "0.0.1",
7+
description: "Returns a list of models available through the API. [See the documentation](https://openrouter.ai/docs/api-reference/list-available-models)",
8+
type: "action",
9+
props: {
10+
openrouter,
11+
},
12+
async run({ $ }) {
13+
const response = await this.openrouter.listModels({
14+
$,
15+
});
16+
17+
$.export("$summary", `Successfully retrieved ${response.data.length} available model(s)!`);
18+
return response;
19+
},
20+
};
Lines changed: 188 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,188 @@
1+
import { ConfigurationError } from "@pipedream/platform";
2+
import { parseObject } from "../../common/utils.mjs";
3+
import openrouter from "../../openrouter.app.mjs";
4+
5+
export default {
6+
key: "openrouter-send-chat-completion-request",
7+
name: "Send Chat Completion Request",
8+
version: "0.0.1",
9+
description: "Send a chat completion request to a selected model. [See the documentation](https://openrouter.ai/docs/api-reference/chat-completion)",
10+
type: "action",
11+
props: {
12+
openrouter,
13+
model: {
14+
propDefinition: [
15+
openrouter,
16+
"model",
17+
],
18+
},
19+
messages: {
20+
type: "string[]",
21+
label: "Messages",
22+
description: "A list of objects containing role and content. E.g. **{\"role\":\"user\", \"content\":\"text\"}**. [See the documentation](https://openrouter.ai/docs/api-reference/chat-completion#request.body.messages) for further details.",
23+
},
24+
maxTokens: {
25+
propDefinition: [
26+
openrouter,
27+
"maxTokens",
28+
],
29+
},
30+
temperature: {
31+
propDefinition: [
32+
openrouter,
33+
"temperature",
34+
],
35+
},
36+
seed: {
37+
propDefinition: [
38+
openrouter,
39+
"seed",
40+
],
41+
},
42+
topP: {
43+
propDefinition: [
44+
openrouter,
45+
"topP",
46+
],
47+
},
48+
topK: {
49+
propDefinition: [
50+
openrouter,
51+
"topK",
52+
],
53+
},
54+
frequencyPenalty: {
55+
propDefinition: [
56+
openrouter,
57+
"frequencyPenalty",
58+
],
59+
},
60+
presencePenalty: {
61+
propDefinition: [
62+
openrouter,
63+
"presencePenalty",
64+
],
65+
},
66+
repetitionPenalty: {
67+
propDefinition: [
68+
openrouter,
69+
"repetitionPenalty",
70+
],
71+
},
72+
logitBias: {
73+
propDefinition: [
74+
openrouter,
75+
"logitBias",
76+
],
77+
},
78+
togLogprobs: {
79+
propDefinition: [
80+
openrouter,
81+
"togLogprobs",
82+
],
83+
},
84+
minP: {
85+
propDefinition: [
86+
openrouter,
87+
"minP",
88+
],
89+
},
90+
topA: {
91+
propDefinition: [
92+
openrouter,
93+
"topA",
94+
],
95+
},
96+
transforms: {
97+
propDefinition: [
98+
openrouter,
99+
"transforms",
100+
],
101+
},
102+
models: {
103+
propDefinition: [
104+
openrouter,
105+
"model",
106+
],
107+
type: "string[]",
108+
label: "Models",
109+
description: "Alternate list of models for routing overrides.",
110+
},
111+
sort: {
112+
propDefinition: [
113+
openrouter,
114+
"sort",
115+
],
116+
},
117+
effort: {
118+
propDefinition: [
119+
openrouter,
120+
"effort",
121+
],
122+
},
123+
reasoningMaxTokens: {
124+
propDefinition: [
125+
openrouter,
126+
"reasoningMaxTokens",
127+
],
128+
},
129+
exclude: {
130+
propDefinition: [
131+
openrouter,
132+
"exclude",
133+
],
134+
},
135+
},
136+
async run({ $ }) {
137+
if (this.effort && this.reasoningMaxTokens) {
138+
throw new ConfigurationError("**Reasoning Effort** and **Reasoning Max Tokens** cannot be used simultaneously.");
139+
}
140+
const data = {
141+
model: this.model,
142+
messages: parseObject(this.messages),
143+
stream: false,
144+
maxTokens: this.maxTokens,
145+
temperature: this.temperature && parseFloat(this.temperature),
146+
seed: this.seed,
147+
topP: this.topP && parseFloat(this.topP),
148+
topK: this.topK,
149+
frequencyPenalty: this.frequencyPenalty && parseFloat(this.frequencyPenalty),
150+
presencePenalty: this.presencePenalty && parseFloat(this.presencePenalty),
151+
repetitionPenalty: this.repetitionPenalty && parseFloat(this.repetitionPenalty),
152+
logitBias: this.logitBias,
153+
togLogprobs: this.togLogprobs,
154+
minP: this.minP && parseFloat(this.minP),
155+
topA: this.topA && parseFloat(this.topA),
156+
transforms: this.transforms,
157+
models: this.models,
158+
};
159+
if (this.sort) {
160+
data.provider = {
161+
sort: this.sort,
162+
};
163+
}
164+
const reasoning = {};
165+
if (this.effort) {
166+
reasoning.effort = this.effort;
167+
}
168+
if (this.reasoningMaxTokens) {
169+
reasoning.max_tokens = parseFloat(this.reasoningMaxTokens);
170+
}
171+
if (this.exclude) {
172+
reasoning.exclude = this.exclude;
173+
}
174+
if (Object.entries(reasoning).length) {
175+
data.reasoning = reasoning;
176+
}
177+
const response = await this.openrouter.sendChatCompetionRequest({
178+
$,
179+
data,
180+
timeout: 1000 * 60 * 5,
181+
});
182+
if (response.error) {
183+
throw new ConfigurationError(response.error.message);
184+
}
185+
$.export("$summary", `A new chat completion request with Id: ${response.id} was successfully created!`);
186+
return response;
187+
},
188+
};
Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
import { ConfigurationError } from "@pipedream/platform";
2+
import openrouter from "../../openrouter.app.mjs";
3+
4+
export default {
5+
key: "openrouter-send-completion-request",
6+
name: "Send Completion Request",
7+
version: "0.0.1",
8+
description: "Send a completion request to a selected model (text-only format) [See the documentation](https://openrouter.ai/docs/api-reference/completions)",
9+
type: "action",
10+
props: {
11+
openrouter,
12+
model: {
13+
propDefinition: [
14+
openrouter,
15+
"model",
16+
],
17+
},
18+
prompt: {
19+
type: "string",
20+
label: "Prompt",
21+
description: "The text prompt to complete.",
22+
},
23+
maxTokens: {
24+
propDefinition: [
25+
openrouter,
26+
"maxTokens",
27+
],
28+
},
29+
temperature: {
30+
propDefinition: [
31+
openrouter,
32+
"temperature",
33+
],
34+
},
35+
seed: {
36+
propDefinition: [
37+
openrouter,
38+
"seed",
39+
],
40+
},
41+
topP: {
42+
propDefinition: [
43+
openrouter,
44+
"topP",
45+
],
46+
},
47+
topK: {
48+
propDefinition: [
49+
openrouter,
50+
"topK",
51+
],
52+
},
53+
frequencyPenalty: {
54+
propDefinition: [
55+
openrouter,
56+
"frequencyPenalty",
57+
],
58+
},
59+
presencePenalty: {
60+
propDefinition: [
61+
openrouter,
62+
"presencePenalty",
63+
],
64+
},
65+
repetitionPenalty: {
66+
propDefinition: [
67+
openrouter,
68+
"repetitionPenalty",
69+
],
70+
},
71+
logitBias: {
72+
propDefinition: [
73+
openrouter,
74+
"logitBias",
75+
],
76+
},
77+
togLogprobs: {
78+
propDefinition: [
79+
openrouter,
80+
"togLogprobs",
81+
],
82+
},
83+
minP: {
84+
propDefinition: [
85+
openrouter,
86+
"minP",
87+
],
88+
},
89+
topA: {
90+
propDefinition: [
91+
openrouter,
92+
"topA",
93+
],
94+
},
95+
transforms: {
96+
propDefinition: [
97+
openrouter,
98+
"transforms",
99+
],
100+
},
101+
models: {
102+
propDefinition: [
103+
openrouter,
104+
"model",
105+
],
106+
type: "string[]",
107+
label: "Models",
108+
description: "Alternate list of models for routing overrides.",
109+
},
110+
sort: {
111+
propDefinition: [
112+
openrouter,
113+
"sort",
114+
],
115+
},
116+
effort: {
117+
propDefinition: [
118+
openrouter,
119+
"effort",
120+
],
121+
},
122+
reasoningMaxTokens: {
123+
propDefinition: [
124+
openrouter,
125+
"reasoningMaxTokens",
126+
],
127+
},
128+
exclude: {
129+
propDefinition: [
130+
openrouter,
131+
"exclude",
132+
],
133+
},
134+
},
135+
async run({ $ }) {
136+
if (this.effort && this.reasoningMaxTokens) {
137+
throw new ConfigurationError("**Reasoning Effort** and **Reasoning Max Tokens** cannot be used simultaneously.");
138+
}
139+
const data = {
140+
model: this.model,
141+
prompt: this.prompt,
142+
stream: false,
143+
maxTokens: this.maxTokens,
144+
temperature: this.temperature && parseFloat(this.temperature),
145+
seed: this.seed,
146+
topP: this.topP && parseFloat(this.topP),
147+
topK: this.topK,
148+
frequencyPenalty: this.frequencyPenalty && parseFloat(this.frequencyPenalty),
149+
presencePenalty: this.presencePenalty && parseFloat(this.presencePenalty),
150+
repetitionPenalty: this.repetitionPenalty && parseFloat(this.repetitionPenalty),
151+
logitBias: this.logitBias,
152+
togLogprobs: this.togLogprobs,
153+
minP: this.minP && parseFloat(this.minP),
154+
topA: this.topA && parseFloat(this.topA),
155+
transforms: this.transforms,
156+
models: this.models,
157+
};
158+
if (this.sort) {
159+
data.provider = {
160+
sort: this.sort,
161+
};
162+
}
163+
const reasoning = {};
164+
if (this.effort) {
165+
reasoning.effort = this.effort;
166+
}
167+
if (this.reasoningMaxTokens) {
168+
reasoning.max_tokens = parseFloat(this.reasoningMaxTokens);
169+
}
170+
if (this.exclude) {
171+
reasoning.exclude = this.exclude;
172+
}
173+
if (Object.entries(reasoning).length) {
174+
data.reasoning = reasoning;
175+
}
176+
const response = await this.openrouter.sendCompetionRequest({
177+
$,
178+
data,
179+
timeout: 1000 * 60 * 5,
180+
});
181+
if (response.error) {
182+
throw new ConfigurationError(response.error.message);
183+
}
184+
$.export("$summary", `A new completion request with Id: ${response.id} was successfully created!`);
185+
return response;
186+
},
187+
};

0 commit comments

Comments
 (0)