File tree Expand file tree Collapse file tree 8 files changed +132
-1
lines changed Expand file tree Collapse file tree 8 files changed +132
-1
lines changed Original file line number Diff line number Diff line change @@ -95,6 +95,7 @@ export const LEPTON: string = 'lepton';
95
95
export const KLUSTER_AI : string = 'kluster-ai' ;
96
96
export const NSCALE : string = 'nscale' ;
97
97
export const HYPERBOLIC : string = 'hyperbolic' ;
98
+ export const BYTEZ : string = 'bytez' ;
98
99
export const FEATHERLESS_AI : string = 'featherless-ai' ;
99
100
export const KRUTRIM : string = 'krutrim' ;
100
101
export const QDRANT : string = 'qdrant' ;
@@ -158,6 +159,7 @@ export const VALID_PROVIDERS = [
158
159
KLUSTER_AI ,
159
160
NSCALE ,
160
161
HYPERBOLIC ,
162
+ BYTEZ ,
161
163
FEATHERLESS_AI ,
162
164
KRUTRIM ,
163
165
QDRANT ,
Original file line number Diff line number Diff line change
1
+ import { ProviderAPIConfig } from '../types' ;
2
+ import { version } from '../../../package.json' ;
3
+
4
+ const BytezInferenceAPI : ProviderAPIConfig = {
5
+ getBaseURL : ( ) => 'https://api.bytez.com' ,
6
+ headers : async ( { providerOptions } ) => {
7
+ const { apiKey } = providerOptions ;
8
+
9
+ const headers : Record < string , string > = { } ;
10
+
11
+ headers [ 'Authorization' ] = `Key ${ apiKey } ` ;
12
+ headers [ 'user-agent' ] = `portkey/${ version } ` ;
13
+
14
+ return headers ;
15
+ } ,
16
+ getEndpoint : ( { gatewayRequestBodyJSON : { version = 2 , model } } ) =>
17
+ `/models/v${ version } /${ model } ` ,
18
+ } ;
19
+
20
+ export default BytezInferenceAPI ;
Original file line number Diff line number Diff line change
1
+ import { BYTEZ } from '../../globals' ;
2
+ import { ProviderConfig } from '../types' ;
3
+ import { BytezResponse } from './types' ;
4
+ import { generateErrorResponse } from '../utils' ;
5
+
6
+ const BytezInferenceChatCompleteConfig : ProviderConfig = {
7
+ messages : {
8
+ param : 'messages' ,
9
+ required : true ,
10
+ } ,
11
+ max_tokens : {
12
+ param : 'params.max_new_tokens' ,
13
+ default : 100 ,
14
+ min : 0 ,
15
+ } ,
16
+ temperature : {
17
+ param : 'params.temperature' ,
18
+ default : 1 ,
19
+ min : 0 ,
20
+ max : 2 ,
21
+ } ,
22
+ top_p : {
23
+ param : 'params.top_p' ,
24
+ default : 1 ,
25
+ min : 0 ,
26
+ max : 1 ,
27
+ } ,
28
+ stream : {
29
+ param : 'stream' ,
30
+ default : false ,
31
+ } ,
32
+ } ;
33
+
34
+ function chatComplete (
35
+ response : BytezResponse ,
36
+ responseStatus : number ,
37
+ responseHeaders : any ,
38
+ strictOpenAiCompliance : boolean ,
39
+ endpoint : string ,
40
+ requestBody : any
41
+ ) {
42
+ const { error, output } = response ;
43
+
44
+ if ( error ) {
45
+ return generateErrorResponse (
46
+ {
47
+ message : error ,
48
+ type : String ( responseStatus ) ,
49
+ param : null ,
50
+ code : null ,
51
+ } ,
52
+ BYTEZ
53
+ ) ;
54
+ }
55
+
56
+ return {
57
+ id : crypto . randomUUID ( ) ,
58
+ object : 'chat.completion' ,
59
+ created : Date . now ( ) ,
60
+ model : requestBody . model ,
61
+ choices : [
62
+ {
63
+ index : 0 ,
64
+ message : output ,
65
+ logprobs : null ,
66
+ finish_reason : 'stop' ,
67
+ } ,
68
+ ] ,
69
+ usage : {
70
+ completion_tokens : - 1 ,
71
+ prompt_tokens : - 1 ,
72
+ total_tokens : - 1 ,
73
+ } ,
74
+ } ;
75
+ }
76
+
77
+ export { BytezInferenceChatCompleteConfig , chatComplete } ;
Original file line number Diff line number Diff line change
1
+ import { ProviderConfigs } from '../types' ;
2
+ import BytezInferenceAPI from './api' ;
3
+ import { BytezInferenceChatCompleteConfig , chatComplete } from './chatComplete' ;
4
+
5
+ const BytezInferenceAPIConfig : ProviderConfigs = {
6
+ api : BytezInferenceAPI ,
7
+ chatComplete : BytezInferenceChatCompleteConfig ,
8
+ responseTransforms : {
9
+ chatComplete,
10
+ } ,
11
+ } ;
12
+
13
+ export default BytezInferenceAPIConfig ;
Original file line number Diff line number Diff line change
1
+ interface Model {
2
+ task : string ;
3
+ }
4
+
5
+ interface BytezResponse {
6
+ error : string ;
7
+ output : Model [ ] ;
8
+ }
9
+
10
+ export { Model , BytezResponse } ;
Original file line number Diff line number Diff line change
1
+ import BytezConfig from './bytez' ;
1
2
import AI21Config from './ai21' ;
2
3
import AnthropicConfig from './anthropic' ;
3
4
import AnyscaleConfig from './anyscale' ;
@@ -120,6 +121,7 @@ const Providers: { [key: string]: ProviderConfigs } = {
120
121
'kluster-ai' : KlusterAIConfig ,
121
122
nscale : NscaleConfig ,
122
123
hyperbolic : HyperbolicConfig ,
124
+ bytez : BytezConfig ,
123
125
'featherless-ai' : FeatherlessAIConfig ,
124
126
krutrim : KrutrimConfig ,
125
127
} ;
Original file line number Diff line number Diff line change @@ -1115,6 +1115,7 @@ <h3>Select Provider</h3>
1115
1115
< option value ="together-ai "> Together AI</ option >
1116
1116
< option value ="perplexity-ai "> Perplexity AI</ option >
1117
1117
< option value ="mistral-ai "> Mistral AI</ option >
1118
+ < option value ="bytez "> Bytez</ option >
1118
1119
< option value ="others "> Others</ option >
1119
1120
</ select >
1120
1121
</ div >
@@ -1465,6 +1466,7 @@ <h3>Enter API Key</h3>
1465
1466
"together-ai" : "llama-3.1-8b-instruct" ,
1466
1467
"perplexity-ai" : "pplx-7b-online" ,
1467
1468
"mistral-ai" : "mistral-small-latest" ,
1469
+ "bytez" : "google/gemma-3-1b-it" ,
1468
1470
"others" : "gpt-4o-mini"
1469
1471
}
1470
1472
Original file line number Diff line number Diff line change 7
7
DEEPINFRA ,
8
8
SAMBANOVA ,
9
9
BEDROCK ,
10
+ BYTEZ ,
10
11
} from './globals' ;
11
12
import { Params } from './types/requestBody' ;
12
13
@@ -48,9 +49,13 @@ export const getStreamModeSplitPattern = (
48
49
splitPattern = '\n' ;
49
50
}
50
51
52
+ if ( proxyProvider === BYTEZ ) {
53
+ splitPattern = ' ' ;
54
+ }
55
+
51
56
return splitPattern ;
52
57
} ;
53
- export type SplitPatternType = '\n\n' | '\r\n\r\n' | '\n' | '\r\n' ;
58
+ export type SplitPatternType = '\n\n' | '\r\n\r\n' | '\n' | '\r\n' | ' ' ;
54
59
55
60
export const getStreamingMode = (
56
61
reqBody : Params ,
You can’t perform that action at this time.
0 commit comments