Skip to content

Commit 37aa39b

Browse files
github-actions[bot]actions-user
authored andcommitted
🚨 Commit Build Artifact from GitHub Actions
1 parent 09bac6a commit 37aa39b

File tree

4 files changed

+68
-4
lines changed

4 files changed

+68
-4
lines changed

dist/main_bun.mjs

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,14 +105,30 @@ function genModel(req) {
105105
const model = GeminiModel.modelMapping(req.model);
106106
let functions = req.tools?.filter((it) => it.type === "function")?.map((it) => it.function) ?? [];
107107
functions = functions.concat((req.functions ?? []).map((it) => ({ strict: null, ...it })));
108-
const responseMimeType = req.response_format?.type === "json_object" ? "application/json" : "text/plain";
108+
let responseMimeType;
109+
let responseSchema;
110+
switch (req.response_format?.type) {
111+
case "json_object":
112+
responseMimeType = "application/json";
113+
break;
114+
case "json_schema":
115+
responseMimeType = "application/json";
116+
responseSchema = req.response_format.json_schema.schema;
117+
break;
118+
case "text":
119+
responseMimeType = "text/plain";
120+
break;
121+
default:
122+
break;
123+
}
109124
const generateContentRequest = {
110125
contents: openAiMessageToGeminiMessage(req.messages),
111126
generationConfig: {
112127
maxOutputTokens: req.max_completion_tokens ?? void 0,
113128
temperature: req.temperature ?? void 0,
114129
topP: req.top_p ?? void 0,
115130
responseMimeType,
131+
responseSchema,
116132
thinkingConfig: !model.isThinkingModel() ? void 0 : {
117133
includeThoughts: true
118134
}

dist/main_cloudflare-workers.mjs

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,14 +105,30 @@ function genModel(req) {
105105
const model = GeminiModel.modelMapping(req.model);
106106
let functions = req.tools?.filter((it) => it.type === "function")?.map((it) => it.function) ?? [];
107107
functions = functions.concat((req.functions ?? []).map((it) => ({ strict: null, ...it })));
108-
const responseMimeType = req.response_format?.type === "json_object" ? "application/json" : "text/plain";
108+
let responseMimeType;
109+
let responseSchema;
110+
switch (req.response_format?.type) {
111+
case "json_object":
112+
responseMimeType = "application/json";
113+
break;
114+
case "json_schema":
115+
responseMimeType = "application/json";
116+
responseSchema = req.response_format.json_schema.schema;
117+
break;
118+
case "text":
119+
responseMimeType = "text/plain";
120+
break;
121+
default:
122+
break;
123+
}
109124
const generateContentRequest = {
110125
contents: openAiMessageToGeminiMessage(req.messages),
111126
generationConfig: {
112127
maxOutputTokens: req.max_completion_tokens ?? void 0,
113128
temperature: req.temperature ?? void 0,
114129
topP: req.top_p ?? void 0,
115130
responseMimeType,
131+
responseSchema,
116132
thinkingConfig: !model.isThinkingModel() ? void 0 : {
117133
includeThoughts: true
118134
}

dist/main_deno.mjs

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,14 +105,30 @@ function genModel(req) {
105105
const model = GeminiModel.modelMapping(req.model);
106106
let functions = req.tools?.filter((it) => it.type === "function")?.map((it) => it.function) ?? [];
107107
functions = functions.concat((req.functions ?? []).map((it) => ({ strict: null, ...it })));
108-
const responseMimeType = req.response_format?.type === "json_object" ? "application/json" : "text/plain";
108+
let responseMimeType;
109+
let responseSchema;
110+
switch (req.response_format?.type) {
111+
case "json_object":
112+
responseMimeType = "application/json";
113+
break;
114+
case "json_schema":
115+
responseMimeType = "application/json";
116+
responseSchema = req.response_format.json_schema.schema;
117+
break;
118+
case "text":
119+
responseMimeType = "text/plain";
120+
break;
121+
default:
122+
break;
123+
}
109124
const generateContentRequest = {
110125
contents: openAiMessageToGeminiMessage(req.messages),
111126
generationConfig: {
112127
maxOutputTokens: req.max_completion_tokens ?? void 0,
113128
temperature: req.temperature ?? void 0,
114129
topP: req.top_p ?? void 0,
115130
responseMimeType,
131+
responseSchema,
116132
thinkingConfig: !model.isThinkingModel() ? void 0 : {
117133
includeThoughts: true
118134
}

dist/main_node.mjs

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -563,14 +563,30 @@ function genModel(req) {
563563
const model = GeminiModel.modelMapping(req.model);
564564
let functions = req.tools?.filter((it) => it.type === "function")?.map((it) => it.function) ?? [];
565565
functions = functions.concat((req.functions ?? []).map((it) => ({ strict: null, ...it })));
566-
const responseMimeType = req.response_format?.type === "json_object" ? "application/json" : "text/plain";
566+
let responseMimeType;
567+
let responseSchema;
568+
switch (req.response_format?.type) {
569+
case "json_object":
570+
responseMimeType = "application/json";
571+
break;
572+
case "json_schema":
573+
responseMimeType = "application/json";
574+
responseSchema = req.response_format.json_schema.schema;
575+
break;
576+
case "text":
577+
responseMimeType = "text/plain";
578+
break;
579+
default:
580+
break;
581+
}
567582
const generateContentRequest = {
568583
contents: openAiMessageToGeminiMessage(req.messages),
569584
generationConfig: {
570585
maxOutputTokens: req.max_completion_tokens ?? void 0,
571586
temperature: req.temperature ?? void 0,
572587
topP: req.top_p ?? void 0,
573588
responseMimeType,
589+
responseSchema,
574590
thinkingConfig: !model.isThinkingModel() ? void 0 : {
575591
includeThoughts: true
576592
}

0 commit comments

Comments
 (0)