Skip to content

Commit 1362e9d

Browse files
committed
fix ci
Signed-off-by: zuisong <com.me@foxmail.com>
1 parent 599ee7d commit 1362e9d

File tree

5 files changed

+64
-34
lines changed

5 files changed

+64
-34
lines changed

changelog.md

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,15 @@
11
# changelog
2-
## 0.15.1 (2025-08-30)
2+
## 0.15.0 (2025-09-04)
33
- Update model mapping
44
- add openai tts api, implement openai.fm and edge tts
55
- fix embedding for lobechat
6-
## 0.15.0 (Unreleased)
7-
8-
- ...
96

107
## 0.14.2
118

129
- Update model mapping
1310

1411
| Request Model | Target Gemini Model |
15-
| -------------------- | -------------------------- |
12+
|----------------------|----------------------------|
1613
| gpt-3.5-turbo | gemini-1.5-flash-8b-latest |
1714
| gpt-4 | gemini-1.5-pro-latest |
1815
| gpt-4o | gemini-1.5-flash-latest |
@@ -106,7 +103,7 @@
106103

107104
- Add cloudflare workers support
108105
- Reduce docker image size
109-
- Imporve document
106+
- Improve document
110107

111108
## 0.4.0(2024-01-17)
112109

deno.jsonc

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,14 +6,19 @@
66
"tasks": {
77
"start:deno": "deno run --allow-net --allow-env main_deno.ts",
88
"build:deno": "deno run -A build.ts",
9-
"test:deno": "deno test --allow-net --allow-env --coverage"
9+
"test:deno": "deno test --allow-net --allow-env --coverage --no-check"
1010
},
1111
"imports": {
1212
"openapi-format": "https://esm.sh/openapi-format?bundle",
1313
"itty-router/": "npm:/itty-router/",
1414
"eventsource-parser/": "npm:/eventsource-parser/",
1515
"@hono/node-server": "npm:/@hono/node-server"
1616
},
17+
"lint": {
18+
"rules": {
19+
"exclude": ["no-import-prefix"]
20+
}
21+
},
1722
"nodeModulesDir": "none",
1823
"fmt": {
1924
"exclude": ["*"]

src/openai/audio/speech/EdgeProxyHandler.ts

Lines changed: 37 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -4,30 +4,35 @@ const DEFAULT_AUDIO_FORMAT = "audio-24khz-48kbitrate-mono-mp3"
44
const Edge_TTS_VOICE_URL =
55
"https://speech.platform.bing.com/consumer/speech/synthesize/readaloud/voices/list?trustedclienttoken=6A5AA1D4EAFF4E9FB37E23D68491D6F4"
66

7-
let VOICE_LIST = []
7+
let VOICE_LIST: string[] = []
88
const TOKEN_REFRESH_BEFORE_EXPIRY = 3 * 60
9-
let tokenInfo: { endpoint: EdgeEndpointParam; token: string; expiredAt: string } = {
9+
let tokenInfo: {
10+
endpoint: EdgeEndpointParam | null
11+
token: string | null
12+
expiredAt: number | null
13+
} = {
1014
endpoint: null,
1115
token: null,
1216
expiredAt: null,
1317
}
1418

1519
let Edge_TTS_ENDPOINT_URL = ""
16-
let EDGE_ENDPONT: EdgeEndpointParam = null
20+
let EDGE_ENDPONT2: EdgeEndpointParam | null = null
1721
interface EdgeEndpointParam {
1822
t: string
1923
r: string
2024
}
2125

2226
interface EdgeEndpointJWTParam {
23-
exp: string
27+
exp: number
2428
}
2529

2630
async function getVoice() {
31+
const EDGE_ENDPONT = await getEndpoint()
2732
const response = await fetch(Edge_TTS_VOICE_URL, {
2833
method: "GET",
2934
headers: {
30-
Authorization: EDGE_ENDPONT.t,
35+
Authorization: EDGE_ENDPONT?.t ?? "",
3136
"Content-Type": "application/ssml+xml",
3237
"User-Agent":
3338
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
@@ -36,18 +41,28 @@ async function getVoice() {
3641
})
3742

3843
const data = await response.json()
39-
VOICE_LIST = data.map((v) => v["ShortName"])
44+
VOICE_LIST = data.map((v: any) => v["ShortName"])
4045
const VOICE_LIST_en = VOICE_LIST.filter((x) => x.startsWith("en"))
4146
const VOICE_LIST_zh = VOICE_LIST.filter((x) => x.startsWith("zh"))
4247
const VOICE_LIST_ja = VOICE_LIST.filter((x) => x.startsWith("ja"))
4348
VOICE_LIST = VOICE_LIST_en.concat(VOICE_LIST_zh.concat(VOICE_LIST_ja))
4449
console.error(VOICE_LIST)
4550
}
46-
async function getEndpoint() {
51+
52+
async function getEndpoint(): Promise<EdgeEndpointParam> {
53+
if (EDGE_ENDPONT2) {
54+
return EDGE_ENDPONT2
55+
}
56+
EDGE_ENDPONT2 = await getEndpoint2()
57+
58+
return EDGE_ENDPONT2
59+
}
60+
61+
async function getEndpoint2(): Promise<EdgeEndpointParam> {
4762
const now = Date.now() / 1000
4863

4964
if (tokenInfo.token && tokenInfo.expiredAt && now < tokenInfo.expiredAt - TOKEN_REFRESH_BEFORE_EXPIRY) {
50-
return tokenInfo.endpoint
65+
return tokenInfo.endpoint!
5166
}
5267

5368
// 获取新token
@@ -92,13 +107,21 @@ async function getEndpoint() {
92107
// 如果有缓存的token,即使过期也尝试使用
93108
if (tokenInfo.token) {
94109
console.log("使用过期的缓存token")
95-
return tokenInfo.endpoint
110+
return tokenInfo.endpoint!
96111
}
97112
throw error
98113
}
99114
}
100115

101-
function getSsml(text, voiceName, rate, pitch, volume, style, slien = 0) {
116+
function getSsml(
117+
text: string,
118+
voiceName: string,
119+
rate?: string,
120+
pitch?: string,
121+
volume?: string,
122+
style?: string,
123+
slien = 0,
124+
) {
102125
let slien_str = ""
103126
if (slien > 0) {
104127
slien_str = `<break time="${slien}ms" />`
@@ -124,11 +147,12 @@ interface EdgeRequestParam {
124147
}
125148

126149
async function EdgeProxyDownloader(formData: EdgeRequestParam): Promise<Response> {
150+
const EDGE_ENDPONT = await getEndpoint()
127151
try {
128152
const response = await fetch(Edge_TTS_ENDPOINT_URL, {
129153
method: "POST",
130154
headers: {
131-
Authorization: EDGE_ENDPONT.t,
155+
Authorization: EDGE_ENDPONT?.t ?? "",
132156
"Content-Type": "application/ssml+xml",
133157
"User-Agent":
134158
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
@@ -192,8 +216,8 @@ async function EdgeProxyDownloader(formData: EdgeRequestParam): Promise<Response
192216
}
193217

194218
export async function EdgeProxyHandler(req: TTSParam): Promise<Response> {
195-
EDGE_ENDPONT = await getEndpoint()
196-
Edge_TTS_ENDPOINT_URL = `https://${EDGE_ENDPONT.r}.tts.speech.microsoft.com/cognitiveservices/v1`
219+
const EDGE_ENDPONT = await getEndpoint()
220+
Edge_TTS_ENDPOINT_URL = `https://${EDGE_ENDPONT?.r}.tts.speech.microsoft.com/cognitiveservices/v1`
197221

198222
if (VOICE_LIST.length === 0) {
199223
await getVoice()
@@ -250,5 +274,3 @@ export async function EdgeProxyHandler(req: TTSParam): Promise<Response> {
250274

251275
return response
252276
}
253-
254-
await getEndpoint()

src/openai/models.ts

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,15 +10,28 @@ export const modelData: OpenAI.Models.Model[] = Object.keys(ModelMapping).map((m
1010

1111
export const models = async (req: Request) => {
1212
const apiParam = getToken(req.headers)
13-
var gemini_models = await listModels(apiParam)
14-
var model_list = gemini_models["data"] as Array<any>
13+
const gemini_models = await listModels(apiParam)
14+
const model_list = gemini_models["data"] as Array<any>
1515
const finala_model_list = model_list.concat(modelData)
16+
1617
return {
1718
object: "list",
18-
data: finala_model_list,
19+
data: distinctArrayByKey(finala_model_list, (item) => item.id),
1920
}
2021
}
2122

23+
function distinctArrayByKey<T>(arr: T[], keySelector: (item: T) => any): T[] {
24+
const seen = new Set()
25+
return arr.filter((item) => {
26+
const value = keySelector(item)
27+
if (seen.has(value)) {
28+
return false
29+
}
30+
seen.add(value)
31+
return true
32+
})
33+
}
34+
2235
export const modelDetail = (model: string) => {
2336
return modelData.find((it) => it.id === model)
2437
}

test/chat-completion_test.ts

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,22 +4,15 @@ import { afterEach, beforeEach, describe, it } from "jsr:@std/testing/bdd"
44
import { EventSourceParserStream } from "eventsource-parser/stream"
55
import { app } from "../src/app.ts"
66
import type { OpenAI } from "../src/types.ts"
7+
import { ModelMapping } from "../src/utils.ts"
78
import { MockFetch } from "./mock-fetch.ts"
89
import { gemini_ok_resp } from "./test-data.ts"
910

1011
describe("openai to gemini test", () => {
1112
describe("success test", () => {
1213
const fetchMocker = new MockFetch()
1314

14-
for (const [openaiModel, geminiModel] of [
15-
["gpt-3.5-turbo", "gemini-1.5-flash-8b-latest"],
16-
["gpt-4", "gemini-1.5-pro-latest"],
17-
["gpt-4o", "gemini-1.5-flash-latest"],
18-
["gpt-4o-mini", "gemini-1.5-flash-8b-latest"],
19-
["gpt-4-vision-preview", "gemini-1.5-flash-latest"],
20-
["gpt-4-turbo", "gemini-1.5-pro-latest"],
21-
["gpt-4-turbo-preview", "gemini-2.0-flash-exp"],
22-
]) {
15+
for (const [openaiModel, geminiModel] of Object.entries(ModelMapping)) {
2316
beforeEach(() => {
2417
fetchMocker.mock(
2518
(req) => req.url.includes(`generativelanguage.googleapis.com/v1beta/models/${geminiModel}:generateContent`),

0 commit comments

Comments
 (0)