Skip to content

Commit 73f5c05

Browse files
committed
Move bytez chatComplete responseTransform into chatComplete.ts, conform to openai compliant spec.
1 parent a6070dc commit 73f5c05

File tree

2 files changed

+49
-46
lines changed

2 files changed

+49
-46
lines changed

src/providers/bytez/chatComplete.ts

Lines changed: 47 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
1+
import { BYTEZ } from '../../globals';
12
import { ProviderConfig } from '../types';
3+
import { BytezResponse } from './types';
4+
import { generateErrorResponse } from '../utils';
25

36
const BytezInferenceChatCompleteConfig: ProviderConfig = {
47
messages: {
@@ -28,4 +31,47 @@ const BytezInferenceChatCompleteConfig: ProviderConfig = {
2831
},
2932
};
3033

31-
export { BytezInferenceChatCompleteConfig };
34+
function chatComplete(
35+
response: BytezResponse,
36+
responseStatus: number,
37+
responseHeaders: any,
38+
strictOpenAiCompliance: boolean,
39+
endpoint: string,
40+
requestBody: any
41+
) {
42+
const { error, output } = response;
43+
44+
if (error) {
45+
return generateErrorResponse(
46+
{
47+
message: error,
48+
type: String(responseStatus),
49+
param: null,
50+
code: null,
51+
},
52+
BYTEZ
53+
);
54+
}
55+
56+
return {
57+
id: crypto.randomUUID(),
58+
object: 'chat.completion',
59+
created: Date.now(),
60+
model: requestBody.model,
61+
choices: [
62+
{
63+
index: 0,
64+
message: output,
65+
logprobs: null,
66+
finish_reason: 'stop',
67+
},
68+
],
69+
usage: {
70+
completion_tokens: -1,
71+
prompt_tokens: -1,
72+
total_tokens: -1,
73+
},
74+
};
75+
}
76+
77+
export { BytezInferenceChatCompleteConfig, chatComplete };

src/providers/bytez/index.ts

Lines changed: 2 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -1,55 +1,12 @@
1-
import { BYTEZ } from '../../globals';
21
import { ProviderConfigs } from '../types';
3-
import { generateErrorResponse } from '../utils';
42
import BytezInferenceAPI from './api';
5-
import { BytezInferenceChatCompleteConfig } from './chatComplete';
6-
import { BytezResponse } from './types';
3+
import { BytezInferenceChatCompleteConfig, chatComplete } from './chatComplete';
74

85
const BytezInferenceAPIConfig: ProviderConfigs = {
96
api: BytezInferenceAPI,
107
chatComplete: BytezInferenceChatCompleteConfig,
118
responseTransforms: {
12-
chatComplete: (
13-
response: BytezResponse,
14-
responseStatus: number,
15-
responseHeaders: any,
16-
strictOpenAiCompliance: boolean,
17-
endpoint: string,
18-
requestBody: any
19-
) => {
20-
const { error, output } = response;
21-
22-
if (error) {
23-
return generateErrorResponse(
24-
{
25-
message: error,
26-
type: String(responseStatus),
27-
param: null,
28-
code: null,
29-
},
30-
BYTEZ
31-
);
32-
}
33-
34-
return {
35-
id: crypto.randomUUID(),
36-
object: 'chat.completion',
37-
created: Date.now(),
38-
model: requestBody.model,
39-
choices: [
40-
{
41-
index: 0,
42-
message: output,
43-
logprobs: null,
44-
finish_reason: 'stop',
45-
},
46-
],
47-
usage: {
48-
inferenceTime: responseHeaders.get('inference-time'),
49-
modelSize: responseHeaders.get('inference-meter'),
50-
},
51-
};
52-
},
9+
chatComplete,
5310
},
5411
};
5512

0 commit comments

Comments
 (0)