@@ -79,7 +79,9 @@ def transform_anthropic_to_bedrock_count_tokens(
79
79
else :
80
80
return self ._transform_to_invoke_model_format (request_data )
81
81
82
- def _transform_to_converse_format (self , messages : List [Dict [str , Any ]]) -> Dict [str , Any ]:
82
+ def _transform_to_converse_format (
83
+ self , messages : List [Dict [str , Any ]]
84
+ ) -> Dict [str , Any ]:
83
85
"""Transform to Converse input format."""
84
86
# Extract system messages if present
85
87
system_messages = []
@@ -90,10 +92,7 @@ def _transform_to_converse_format(self, messages: List[Dict[str, Any]]) -> Dict[
90
92
system_messages .append ({"text" : message .get ("content" , "" )})
91
93
else :
92
94
# Transform message content to Bedrock format
93
- transformed_message = {
94
- "role" : message .get ("role" ),
95
- "content" : []
96
- }
95
+ transformed_message = {"role" : message .get ("role" ), "content" : []}
97
96
98
97
# Handle content - ensure it's in the correct array format
99
98
content = message .get ("content" , "" )
@@ -107,38 +106,30 @@ def _transform_to_converse_format(self, messages: List[Dict[str, Any]]) -> Dict[
107
106
user_messages .append (transformed_message )
108
107
109
108
# Build the converse input format
110
- converse_input = {
111
- "messages" : user_messages
112
- }
109
+ converse_input = {"messages" : user_messages }
113
110
114
111
# Add system messages if present
115
112
if system_messages :
116
113
converse_input ["system" ] = system_messages
117
114
118
115
# Build the complete request
119
- return {
120
- "input" : {
121
- "converse" : converse_input
122
- }
123
- }
116
+ return {"input" : {"converse" : converse_input }}
124
117
125
- def _transform_to_invoke_model_format (self , request_data : Dict [str , Any ]) -> Dict [str , Any ]:
118
+ def _transform_to_invoke_model_format (
119
+ self , request_data : Dict [str , Any ]
120
+ ) -> Dict [str , Any ]:
126
121
"""Transform to InvokeModel input format."""
127
122
import json
128
123
129
124
# For InvokeModel, we need to provide the raw body that would be sent to the model
130
125
# Remove the 'model' field from the body as it's not part of the model input
131
126
body_data = {k : v for k , v in request_data .items () if k != "model" }
132
127
133
- return {
134
- "input" : {
135
- "invokeModel" : {
136
- "body" : json .dumps (body_data )
137
- }
138
- }
139
- }
128
+ return {"input" : {"invokeModel" : {"body" : json .dumps (body_data )}}}
140
129
141
- def get_bedrock_count_tokens_endpoint (self , model : str , aws_region_name : str ) -> str :
130
+ def get_bedrock_count_tokens_endpoint (
131
+ self , model : str , aws_region_name : str
132
+ ) -> str :
142
133
"""
143
134
Construct the AWS Bedrock CountTokens API endpoint using existing LiteLLM functions.
144
135
@@ -161,8 +152,9 @@ def get_bedrock_count_tokens_endpoint(self, model: str, aws_region_name: str) ->
161
152
162
153
return endpoint
163
154
164
-
165
- def transform_bedrock_response_to_anthropic (self , bedrock_response : Dict [str , Any ]) -> Dict [str , Any ]:
155
+ def transform_bedrock_response_to_anthropic (
156
+ self , bedrock_response : Dict [str , Any ]
157
+ ) -> Dict [str , Any ]:
166
158
"""
167
159
Transform Bedrock CountTokens response to Anthropic format.
168
160
@@ -178,9 +170,7 @@ def transform_bedrock_response_to_anthropic(self, bedrock_response: Dict[str, An
178
170
"""
179
171
input_tokens = bedrock_response .get ("inputTokens" , 0 )
180
172
181
- return {
182
- "input_tokens" : input_tokens
183
- }
173
+ return {"input_tokens" : input_tokens }
184
174
185
175
def validate_count_tokens_request (self , request_data : Dict [str , Any ]) -> None :
186
176
"""
@@ -220,4 +210,4 @@ def validate_count_tokens_request(self, request_data: Dict[str, Any]) -> None:
220
210
# For InvokeModel format, we need at least some content to count tokens
221
211
# The content structure varies by model, so we do minimal validation
222
212
if len (request_data ) <= 1 : # Only has 'model' field
223
- raise ValueError ("Request must contain content to count tokens" )
213
+ raise ValueError ("Request must contain content to count tokens" )
0 commit comments