Skip to content

Commit b502765

Browse files
committed
lint
1 parent e187221 commit b502765

File tree

5 files changed

+33
-37
lines changed

5 files changed

+33
-37
lines changed

patchwork/common/client/llm/aio.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
from __future__ import annotations
22

33
import os
4-
from logging import getLogger
54

65
from openai.types.chat import (
76
ChatCompletion,

patchwork/common/client/llm/google.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -58,22 +58,22 @@ def is_model_supported(self, model: str) -> bool:
5858
return model in self.get_models()
5959

6060
def is_prompt_supported(
61-
self,
62-
messages: Iterable[ChatCompletionMessageParam],
63-
model: str,
64-
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
65-
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
66-
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
67-
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
68-
n: Optional[int] | NotGiven = NOT_GIVEN,
69-
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
70-
response_format: dict | completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
71-
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
72-
temperature: Optional[float] | NotGiven = NOT_GIVEN,
73-
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
74-
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
75-
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
76-
top_p: Optional[float] | NotGiven = NOT_GIVEN,
61+
self,
62+
messages: Iterable[ChatCompletionMessageParam],
63+
model: str,
64+
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
65+
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
66+
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
67+
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
68+
n: Optional[int] | NotGiven = NOT_GIVEN,
69+
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
70+
response_format: dict | completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
71+
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
72+
temperature: Optional[float] | NotGiven = NOT_GIVEN,
73+
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
74+
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
75+
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
76+
top_p: Optional[float] | NotGiven = NOT_GIVEN,
7777
) -> int:
7878
system, chat = self.__openai_messages_to_google_messages(messages)
7979
gen_model = generativeai.GenerativeModel(model_name=model, system_instruction=system)

patchwork/common/client/llm/openai_.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -64,22 +64,22 @@ def __get_model_limits(self, model: str) -> int:
6464
return self.__MODEL_LIMITS.get(model, 128_000)
6565

6666
def is_prompt_supported(
67-
self,
68-
messages: Iterable[ChatCompletionMessageParam],
69-
model: str,
70-
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
71-
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
72-
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
73-
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
74-
n: Optional[int] | NotGiven = NOT_GIVEN,
75-
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
76-
response_format: dict | completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
77-
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
78-
temperature: Optional[float] | NotGiven = NOT_GIVEN,
79-
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
80-
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
81-
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
82-
top_p: Optional[float] | NotGiven = NOT_GIVEN,
67+
self,
68+
messages: Iterable[ChatCompletionMessageParam],
69+
model: str,
70+
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
71+
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
72+
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
73+
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
74+
n: Optional[int] | NotGiven = NOT_GIVEN,
75+
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
76+
response_format: dict | completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
77+
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
78+
temperature: Optional[float] | NotGiven = NOT_GIVEN,
79+
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
80+
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
81+
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
82+
top_p: Optional[float] | NotGiven = NOT_GIVEN,
8383
) -> int:
8484
# might not implement model endpoint
8585
if self.__is_not_openai_url():

patchwork/common/multiturn_strategy/multiturn_strategy.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@
1212

1313
from patchwork.common.tools import Tool
1414

15-
logger = logging.getLogger("patched")
16-
1715

1816
class MultiturnStrategy(ABC):
1917
def __init__(self, tool_set: dict[str, Tool], limit: int | None = None, *args, **kwargs):

patchwork/patchflows/ResolveIssue/ResolveIssue.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@
55
from patchwork.common.utils.progress_bar import PatchflowProgressBar
66
from patchwork.common.utils.step_typing import validate_steps_with_inputs
77
from patchwork.step import Step
8-
from patchwork.steps import PR, ReadIssues
9-
from patchwork.steps import FixIssue
8+
from patchwork.steps import PR, FixIssue, ReadIssues
109

1110
_DEFAULT_INPUT_FILE = Path(__file__).parent / "defaults.yml"
1211

0 commit comments

Comments
 (0)