1
1
import { ANTHROPIC } from '../../globals' ;
2
2
import { Params } from '../../types/requestBody' ;
3
3
import { CompletionResponse , ErrorResponse , ProviderConfig } from '../types' ;
4
- import { generateInvalidProviderResponseError } from '../utils' ;
4
+ import {
5
+ generateInvalidProviderResponseError ,
6
+ transformFinishReason ,
7
+ } from '../utils' ;
8
+ import {
9
+ ANTHROPIC_STOP_REASON ,
10
+ AnthropicStreamState ,
11
+ AnthropicErrorResponse ,
12
+ } from './types' ;
5
13
import { AnthropicErrorResponseTransform } from './utils' ;
6
- import { AnthropicErrorResponse } from './types' ;
7
14
8
15
// TODO: this configuration does not enforce the maximum token limit for the input parameter. If you want to enforce this, you might need to add a custom validation function or a max property to the ParameterConfig interface, and then use it in the input configuration. However, this might be complex because the token count is not a simple length check, but depends on the specific tokenization method used by the model.
9
16
@@ -57,7 +64,7 @@ export const AnthropicCompleteConfig: ProviderConfig = {
57
64
58
65
interface AnthropicCompleteResponse {
59
66
completion : string ;
60
- stop_reason : string ;
67
+ stop_reason : ANTHROPIC_STOP_REASON ;
61
68
model : string ;
62
69
truncated : boolean ;
63
70
stop : null | string ;
@@ -68,10 +75,20 @@ interface AnthropicCompleteResponse {
68
75
// TODO: The token calculation is wrong atm
69
76
export const AnthropicCompleteResponseTransform : (
70
77
response : AnthropicCompleteResponse | AnthropicErrorResponse ,
71
- responseStatus : number
72
- ) => CompletionResponse | ErrorResponse = ( response , responseStatus ) => {
73
- if ( responseStatus !== 200 && 'error' in response ) {
74
- return AnthropicErrorResponseTransform ( response ) ;
78
+ responseStatus : number ,
79
+ responseHeaders : Headers ,
80
+ strictOpenAiCompliance : boolean
81
+ ) => CompletionResponse | ErrorResponse = (
82
+ response ,
83
+ responseStatus ,
84
+ _responseHeaders ,
85
+ strictOpenAiCompliance
86
+ ) => {
87
+ if ( responseStatus !== 200 ) {
88
+ const errorResposne = AnthropicErrorResponseTransform (
89
+ response as AnthropicErrorResponse
90
+ ) ;
91
+ if ( errorResposne ) return errorResposne ;
75
92
}
76
93
77
94
if ( 'completion' in response ) {
@@ -86,7 +103,10 @@ export const AnthropicCompleteResponseTransform: (
86
103
text : response . completion ,
87
104
index : 0 ,
88
105
logprobs : null ,
89
- finish_reason : response . stop_reason ,
106
+ finish_reason : transformFinishReason (
107
+ response . stop_reason ,
108
+ strictOpenAiCompliance
109
+ ) ,
90
110
} ,
91
111
] ,
92
112
} ;
@@ -96,8 +116,16 @@ export const AnthropicCompleteResponseTransform: (
96
116
} ;
97
117
98
118
export const AnthropicCompleteStreamChunkTransform : (
99
- response : string
100
- ) => string | undefined = ( responseChunk ) => {
119
+ response : string ,
120
+ fallbackId : string ,
121
+ streamState : AnthropicStreamState ,
122
+ strictOpenAiCompliance : boolean
123
+ ) => string | undefined = (
124
+ responseChunk ,
125
+ fallbackId ,
126
+ streamState ,
127
+ strictOpenAiCompliance
128
+ ) => {
101
129
let chunk = responseChunk . trim ( ) ;
102
130
if ( chunk . startsWith ( 'event: ping' ) ) {
103
131
return ;
@@ -110,6 +138,9 @@ export const AnthropicCompleteStreamChunkTransform: (
110
138
return chunk ;
111
139
}
112
140
const parsedChunk : AnthropicCompleteResponse = JSON . parse ( chunk ) ;
141
+ const finishReason = parsedChunk . stop_reason
142
+ ? transformFinishReason ( parsedChunk . stop_reason , strictOpenAiCompliance )
143
+ : null ;
113
144
return (
114
145
`data: ${ JSON . stringify ( {
115
146
id : parsedChunk . log_id ,
@@ -122,7 +153,7 @@ export const AnthropicCompleteStreamChunkTransform: (
122
153
text : parsedChunk . completion ,
123
154
index : 0 ,
124
155
logprobs : null ,
125
- finish_reason : parsedChunk . stop_reason ,
156
+ finish_reason : finishReason ,
126
157
} ,
127
158
] ,
128
159
} ) } ` + '\n\n'
0 commit comments