1
1
import { ANTHROPIC } from '../../globals' ;
2
2
import { Params } from '../../types/requestBody' ;
3
3
import { CompletionResponse , ErrorResponse , ProviderConfig } from '../types' ;
4
- import { generateInvalidProviderResponseError } from '../utils' ;
4
+ import {
5
+ generateInvalidProviderResponseError ,
6
+ transformFinishReason ,
7
+ } from '../utils' ;
8
+ import { ANTHROPIC_STOP_REASON , AnthropicStreamState , AnthropicErrorResponse } from './types' ;
5
9
import { AnthropicErrorResponseTransform } from './utils' ;
6
- import { AnthropicErrorResponse } from './types' ;
7
10
8
11
// TODO: this configuration does not enforce the maximum token limit for the input parameter. If you want to enforce this, you might need to add a custom validation function or a max property to the ParameterConfig interface, and then use it in the input configuration. However, this might be complex because the token count is not a simple length check, but depends on the specific tokenization method used by the model.
9
12
@@ -57,7 +60,7 @@ export const AnthropicCompleteConfig: ProviderConfig = {
57
60
58
61
interface AnthropicCompleteResponse {
59
62
completion : string ;
60
- stop_reason : string ;
63
+ stop_reason : ANTHROPIC_STOP_REASON ;
61
64
model : string ;
62
65
truncated : boolean ;
63
66
stop : null | string ;
@@ -68,10 +71,20 @@ interface AnthropicCompleteResponse {
68
71
// TODO: The token calculation is wrong atm
69
72
export const AnthropicCompleteResponseTransform : (
70
73
response : AnthropicCompleteResponse | AnthropicErrorResponse ,
71
- responseStatus : number
72
- ) => CompletionResponse | ErrorResponse = ( response , responseStatus ) => {
73
- if ( responseStatus !== 200 && 'error' in response ) {
74
- return AnthropicErrorResponseTransform ( response ) ;
74
+ responseStatus : number ,
75
+ responseHeaders : Headers ,
76
+ strictOpenAiCompliance : boolean
77
+ ) => CompletionResponse | ErrorResponse = (
78
+ response ,
79
+ responseStatus ,
80
+ _responseHeaders ,
81
+ strictOpenAiCompliance
82
+ ) => {
83
+ if ( responseStatus !== 200 ) {
84
+ const errorResposne = AnthropicErrorResponseTransform (
85
+ response as AnthropicErrorResponse
86
+ ) ;
87
+ if ( errorResposne ) return errorResposne ;
75
88
}
76
89
77
90
if ( 'completion' in response ) {
@@ -86,7 +99,10 @@ export const AnthropicCompleteResponseTransform: (
86
99
text : response . completion ,
87
100
index : 0 ,
88
101
logprobs : null ,
89
- finish_reason : response . stop_reason ,
102
+ finish_reason : transformFinishReason (
103
+ response . stop_reason ,
104
+ strictOpenAiCompliance
105
+ ) ,
90
106
} ,
91
107
] ,
92
108
} ;
@@ -96,8 +112,16 @@ export const AnthropicCompleteResponseTransform: (
96
112
} ;
97
113
98
114
export const AnthropicCompleteStreamChunkTransform : (
99
- response : string
100
- ) => string | undefined = ( responseChunk ) => {
115
+ response : string ,
116
+ fallbackId : string ,
117
+ streamState : AnthropicStreamState ,
118
+ strictOpenAiCompliance : boolean
119
+ ) => string | undefined = (
120
+ responseChunk ,
121
+ fallbackId ,
122
+ streamState ,
123
+ strictOpenAiCompliance
124
+ ) => {
101
125
let chunk = responseChunk . trim ( ) ;
102
126
if ( chunk . startsWith ( 'event: ping' ) ) {
103
127
return ;
@@ -110,6 +134,9 @@ export const AnthropicCompleteStreamChunkTransform: (
110
134
return chunk ;
111
135
}
112
136
const parsedChunk : AnthropicCompleteResponse = JSON . parse ( chunk ) ;
137
+ const finishReason = parsedChunk . stop_reason
138
+ ? transformFinishReason ( parsedChunk . stop_reason , strictOpenAiCompliance )
139
+ : null ;
113
140
return (
114
141
`data: ${ JSON . stringify ( {
115
142
id : parsedChunk . log_id ,
@@ -122,7 +149,7 @@ export const AnthropicCompleteStreamChunkTransform: (
122
149
text : parsedChunk . completion ,
123
150
index : 0 ,
124
151
logprobs : null ,
125
- finish_reason : parsedChunk . stop_reason ,
152
+ finish_reason : finishReason ,
126
153
} ,
127
154
] ,
128
155
} ) } ` + '\n\n'
0 commit comments