File tree Expand file tree Collapse file tree 2 files changed +49
-46
lines changed Expand file tree Collapse file tree 2 files changed +49
-46
lines changed Original file line number Diff line number Diff line change
1
+ import { BYTEZ } from '../../globals' ;
1
2
import { ProviderConfig } from '../types' ;
3
+ import { BytezResponse } from './types' ;
4
+ import { generateErrorResponse } from '../utils' ;
2
5
3
6
const BytezInferenceChatCompleteConfig : ProviderConfig = {
4
7
messages : {
@@ -28,4 +31,47 @@ const BytezInferenceChatCompleteConfig: ProviderConfig = {
28
31
} ,
29
32
} ;
30
33
31
- export { BytezInferenceChatCompleteConfig } ;
34
+ function chatComplete (
35
+ response : BytezResponse ,
36
+ responseStatus : number ,
37
+ responseHeaders : any ,
38
+ strictOpenAiCompliance : boolean ,
39
+ endpoint : string ,
40
+ requestBody : any
41
+ ) {
42
+ const { error, output } = response ;
43
+
44
+ if ( error ) {
45
+ return generateErrorResponse (
46
+ {
47
+ message : error ,
48
+ type : String ( responseStatus ) ,
49
+ param : null ,
50
+ code : null ,
51
+ } ,
52
+ BYTEZ
53
+ ) ;
54
+ }
55
+
56
+ return {
57
+ id : crypto . randomUUID ( ) ,
58
+ object : 'chat.completion' ,
59
+ created : Date . now ( ) ,
60
+ model : requestBody . model ,
61
+ choices : [
62
+ {
63
+ index : 0 ,
64
+ message : output ,
65
+ logprobs : null ,
66
+ finish_reason : 'stop' ,
67
+ } ,
68
+ ] ,
69
+ usage : {
70
+ completion_tokens : - 1 ,
71
+ prompt_tokens : - 1 ,
72
+ total_tokens : - 1 ,
73
+ } ,
74
+ } ;
75
+ }
76
+
77
+ export { BytezInferenceChatCompleteConfig , chatComplete } ;
Original file line number Diff line number Diff line change 1
- import { BYTEZ } from '../../globals' ;
2
1
import { ProviderConfigs } from '../types' ;
3
- import { generateErrorResponse } from '../utils' ;
4
2
import BytezInferenceAPI from './api' ;
5
- import { BytezInferenceChatCompleteConfig } from './chatComplete' ;
6
- import { BytezResponse } from './types' ;
3
+ import { BytezInferenceChatCompleteConfig , chatComplete } from './chatComplete' ;
7
4
8
5
const BytezInferenceAPIConfig : ProviderConfigs = {
9
6
api : BytezInferenceAPI ,
10
7
chatComplete : BytezInferenceChatCompleteConfig ,
11
8
responseTransforms : {
12
- chatComplete : (
13
- response : BytezResponse ,
14
- responseStatus : number ,
15
- responseHeaders : any ,
16
- strictOpenAiCompliance : boolean ,
17
- endpoint : string ,
18
- requestBody : any
19
- ) => {
20
- const { error, output } = response ;
21
-
22
- if ( error ) {
23
- return generateErrorResponse (
24
- {
25
- message : error ,
26
- type : String ( responseStatus ) ,
27
- param : null ,
28
- code : null ,
29
- } ,
30
- BYTEZ
31
- ) ;
32
- }
33
-
34
- return {
35
- id : crypto . randomUUID ( ) ,
36
- object : 'chat.completion' ,
37
- created : Date . now ( ) ,
38
- model : requestBody . model ,
39
- choices : [
40
- {
41
- index : 0 ,
42
- message : output ,
43
- logprobs : null ,
44
- finish_reason : 'stop' ,
45
- } ,
46
- ] ,
47
- usage : {
48
- inferenceTime : responseHeaders . get ( 'inference-time' ) ,
49
- modelSize : responseHeaders . get ( 'inference-meter' ) ,
50
- } ,
51
- } ;
52
- } ,
9
+ chatComplete,
53
10
} ,
54
11
} ;
55
12
You can’t perform that action at this time.
0 commit comments