Skip to content

Commit dba7920

Browse files
committed
fix anthropic
1 parent 825b536 commit dba7920

File tree

1 file changed

+50
-49
lines changed

1 file changed

+50
-49
lines changed

patchwork/common/client/llm/anthropic.py

Lines changed: 50 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,7 @@ def __get_model_limit(self, model: str) -> int:
8484
return 200_000 - safety_margin
8585

8686
def __adapt_input_messages(self, messages: Iterable[ChatCompletionMessageParam]) -> list[MessageParam]:
87+
system: Union[str, Iterable[TextBlockParam]] | NotGiven = NOT_GIVEN
8788
new_messages = []
8889
for message in messages:
8990
if message.get("role") == "system":
@@ -128,22 +129,22 @@ def __adapt_input_messages(self, messages: Iterable[ChatCompletionMessageParam])
128129
return new_messages
129130

130131
def __adapt_chat_completion_request(
131-
self,
132-
messages: Iterable[ChatCompletionMessageParam],
133-
model: str,
134-
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
135-
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
136-
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
137-
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
138-
n: Optional[int] | NotGiven = NOT_GIVEN,
139-
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
140-
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
141-
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
142-
temperature: Optional[float] | NotGiven = NOT_GIVEN,
143-
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
144-
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
145-
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
146-
top_p: Optional[float] | NotGiven = NOT_GIVEN,
132+
self,
133+
messages: Iterable[ChatCompletionMessageParam],
134+
model: str,
135+
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
136+
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
137+
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
138+
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
139+
n: Optional[int] | NotGiven = NOT_GIVEN,
140+
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
141+
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
142+
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
143+
temperature: Optional[float] | NotGiven = NOT_GIVEN,
144+
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
145+
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
146+
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
147+
top_p: Optional[float] | NotGiven = NOT_GIVEN,
147148
):
148149
system: Union[str, Iterable[TextBlockParam]] | NotGiven = NOT_GIVEN
149150
adapted_messages = self.__adapt_input_messages(messages)
@@ -207,22 +208,22 @@ def is_model_supported(self, model: str) -> bool:
207208
return model in self.__definitely_allowed_models or model.startswith(self.__allowed_model_prefix)
208209

209210
def is_prompt_supported(
210-
self,
211-
messages: Iterable[ChatCompletionMessageParam],
212-
model: str,
213-
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
214-
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
215-
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
216-
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
217-
n: Optional[int] | NotGiven = NOT_GIVEN,
218-
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
219-
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
220-
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
221-
temperature: Optional[float] | NotGiven = NOT_GIVEN,
222-
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
223-
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
224-
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
225-
top_p: Optional[float] | NotGiven = NOT_GIVEN,
211+
self,
212+
messages: Iterable[ChatCompletionMessageParam],
213+
model: str,
214+
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
215+
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
216+
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
217+
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
218+
n: Optional[int] | NotGiven = NOT_GIVEN,
219+
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
220+
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
221+
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
222+
temperature: Optional[float] | NotGiven = NOT_GIVEN,
223+
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
224+
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
225+
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
226+
top_p: Optional[float] | NotGiven = NOT_GIVEN,
226227
) -> int:
227228
model_limit = self.__get_model_limit(model)
228229
input_kwargs = self.__adapt_chat_completion_request(
@@ -251,27 +252,27 @@ def is_prompt_supported(
251252
return model_limit - message_token_count.input_tokens
252253

253254
def truncate_messages(
254-
self, messages: Iterable[ChatCompletionMessageParam], model: str
255+
self, messages: Iterable[ChatCompletionMessageParam], model: str
255256
) -> Iterable[ChatCompletionMessageParam]:
256257
return self._truncate_messages(self, messages, model)
257258

258259
def chat_completion(
259-
self,
260-
messages: Iterable[ChatCompletionMessageParam],
261-
model: str,
262-
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
263-
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
264-
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
265-
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
266-
n: Optional[int] | NotGiven = NOT_GIVEN,
267-
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
268-
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
269-
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
270-
temperature: Optional[float] | NotGiven = NOT_GIVEN,
271-
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
272-
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
273-
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
274-
top_p: Optional[float] | NotGiven = NOT_GIVEN,
260+
self,
261+
messages: Iterable[ChatCompletionMessageParam],
262+
model: str,
263+
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
264+
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
265+
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
266+
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
267+
n: Optional[int] | NotGiven = NOT_GIVEN,
268+
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
269+
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
270+
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
271+
temperature: Optional[float] | NotGiven = NOT_GIVEN,
272+
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
273+
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
274+
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
275+
top_p: Optional[float] | NotGiven = NOT_GIVEN,
275276
) -> ChatCompletion:
276277
input_kwargs = self.__adapt_chat_completion_request(
277278
messages=messages,

0 commit comments

Comments
 (0)