You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
---Function to run when the request has completed. Useful to catch errors
196
-
---@paramselfCodeCompanion.Adapter
197
-
---@paramdatatable
198
-
---@returnnil
199
118
on_exit=function(self, data)
200
-
ifdata.status>=400then
201
-
log:error("Error: %s", data.body)
202
-
end
119
+
returnopenai.handlers.on_exit(self, data)
203
120
end,
204
121
},
205
122
schema= {
@@ -232,8 +149,7 @@ return {
232
149
type="number",
233
150
optional=true,
234
151
default=0.95,
235
-
desc=
236
-
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. Not used for R1.",
152
+
desc="An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. Not used for R1.",
237
153
validate=function(n)
238
154
returnn>=0andn<=1, "Must be between 0 and 1"
239
155
end,
@@ -258,8 +174,7 @@ return {
258
174
type="integer",
259
175
optional=true,
260
176
default=8192,
261
-
desc=
262
-
"The maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length.",
177
+
desc="The maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length.",
263
178
validate=function(n)
264
179
returnn>0, "Must be greater than 0"
265
180
end,
@@ -270,8 +185,7 @@ return {
270
185
type="number",
271
186
optional=true,
272
187
default=0,
273
-
desc=
274
-
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. Not used for R1",
188
+
desc="Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. Not used for R1",
275
189
validate=function(n)
276
190
returnn>=-2andn<=2, "Must be between -2 and 2"
277
191
end,
@@ -282,8 +196,7 @@ return {
282
196
type="number",
283
197
optional=true,
284
198
default=0,
285
-
desc=
286
-
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. Not used for R1, but may be specified.",
199
+
desc="Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. Not used for R1, but may be specified.",
287
200
validate=function(n)
288
201
returnn>=-2andn<=2, "Must be between -2 and 2"
289
202
end,
@@ -294,8 +207,7 @@ return {
294
207
type="boolean",
295
208
optional=true,
296
209
default=nil,
297
-
desc=
298
-
"Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. Not supported for R1.",
210
+
desc="Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. Not supported for R1.",
299
211
subtype_key= {
300
212
type="integer",
301
213
},
@@ -306,8 +218,7 @@ return {
306
218
type="string",
307
219
optional=true,
308
220
default=nil,
309
-
desc=
310
-
"A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.",
221
+
desc="A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.",
311
222
validate=function(u)
312
223
returnu:len() <100, "Cannot be longer than 100 characters"
0 commit comments