Skip to content

Commit 8f8c148

Browse files
authored
fix (#1066)
* fix * bump version
1 parent b778445 commit 8f8c148

File tree

2 files changed

+54
-51
lines changed

2 files changed

+54
-51
lines changed

patchwork/common/client/llm/anthropic.py

Lines changed: 53 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -128,22 +128,22 @@ def __adapt_input_messages(self, messages: Iterable[ChatCompletionMessageParam])
128128
return new_messages
129129

130130
def __adapt_chat_completion_request(
131-
self,
132-
messages: Iterable[ChatCompletionMessageParam],
133-
model: str,
134-
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
135-
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
136-
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
137-
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
138-
n: Optional[int] | NotGiven = NOT_GIVEN,
139-
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
140-
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
141-
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
142-
temperature: Optional[float] | NotGiven = NOT_GIVEN,
143-
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
144-
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
145-
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
146-
top_p: Optional[float] | NotGiven = NOT_GIVEN,
131+
self,
132+
messages: Iterable[ChatCompletionMessageParam],
133+
model: str,
134+
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
135+
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
136+
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
137+
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
138+
n: Optional[int] | NotGiven = NOT_GIVEN,
139+
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
140+
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
141+
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
142+
temperature: Optional[float] | NotGiven = NOT_GIVEN,
143+
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
144+
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
145+
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
146+
top_p: Optional[float] | NotGiven = NOT_GIVEN,
147147
):
148148
system: Union[str, Iterable[TextBlockParam]] | NotGiven = NOT_GIVEN
149149
adapted_messages = self.__adapt_input_messages(messages)
@@ -171,14 +171,17 @@ def __adapt_chat_completion_request(
171171
elif tool_choice_type == "none":
172172
tool_choice = NOT_GIVEN
173173

174+
anthropic_tools = NOT_GIVEN
175+
if tools is not None and tools is not NOT_GIVEN:
176+
anthropic_tools = [tool.get("function") for tool in tools if tool.get("function") is not None]
174177
input_kwargs = dict(
175178
messages=adapted_messages,
176179
system=system,
177180
max_tokens=default_max_token if max_tokens is None or max_tokens is NOT_GIVEN else max_tokens,
178181
model=model,
179182
stop_sequences=[stop] if isinstance(stop, str) else stop,
180183
temperature=temperature,
181-
tools=[tool.get("function") for tool in tools if tool.get("function") is not None],
184+
tools=anthropic_tools,
182185
tool_choice=tool_choice,
183186
top_p=top_p,
184187
)
@@ -204,22 +207,22 @@ def is_model_supported(self, model: str) -> bool:
204207
return model in self.__definitely_allowed_models or model.startswith(self.__allowed_model_prefix)
205208

206209
def is_prompt_supported(
207-
self,
208-
messages: Iterable[ChatCompletionMessageParam],
209-
model: str,
210-
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
211-
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
212-
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
213-
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
214-
n: Optional[int] | NotGiven = NOT_GIVEN,
215-
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
216-
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
217-
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
218-
temperature: Optional[float] | NotGiven = NOT_GIVEN,
219-
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
220-
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
221-
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
222-
top_p: Optional[float] | NotGiven = NOT_GIVEN,
210+
self,
211+
messages: Iterable[ChatCompletionMessageParam],
212+
model: str,
213+
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
214+
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
215+
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
216+
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
217+
n: Optional[int] | NotGiven = NOT_GIVEN,
218+
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
219+
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
220+
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
221+
temperature: Optional[float] | NotGiven = NOT_GIVEN,
222+
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
223+
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
224+
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
225+
top_p: Optional[float] | NotGiven = NOT_GIVEN,
223226
) -> int:
224227
model_limit = self.__get_model_limit(model)
225228
input_kwargs = self.__adapt_chat_completion_request(
@@ -248,27 +251,27 @@ def is_prompt_supported(
248251
return model_limit - message_token_count.input_tokens
249252

250253
def truncate_messages(
251-
self, messages: Iterable[ChatCompletionMessageParam], model: str
254+
self, messages: Iterable[ChatCompletionMessageParam], model: str
252255
) -> Iterable[ChatCompletionMessageParam]:
253256
return self._truncate_messages(self, messages, model)
254257

255258
def chat_completion(
256-
self,
257-
messages: Iterable[ChatCompletionMessageParam],
258-
model: str,
259-
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
260-
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
261-
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
262-
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
263-
n: Optional[int] | NotGiven = NOT_GIVEN,
264-
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
265-
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
266-
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
267-
temperature: Optional[float] | NotGiven = NOT_GIVEN,
268-
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
269-
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
270-
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
271-
top_p: Optional[float] | NotGiven = NOT_GIVEN,
259+
self,
260+
messages: Iterable[ChatCompletionMessageParam],
261+
model: str,
262+
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
263+
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
264+
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
265+
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
266+
n: Optional[int] | NotGiven = NOT_GIVEN,
267+
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
268+
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
269+
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
270+
temperature: Optional[float] | NotGiven = NOT_GIVEN,
271+
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
272+
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
273+
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
274+
top_p: Optional[float] | NotGiven = NOT_GIVEN,
272275
) -> ChatCompletion:
273276
input_kwargs = self.__adapt_chat_completion_request(
274277
messages=messages,

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "patchwork-cli"
3-
version = "0.0.80"
3+
version = "0.0.81"
44
description = ""
55
authors = ["patched.codes"]
66
license = "AGPL"

0 commit comments

Comments
 (0)