Skip to content

Commit a321f88

Browse files
committed
fix aio
1 parent 537978c commit a321f88

File tree

3 files changed

+42
-4
lines changed

3 files changed

+42
-4
lines changed

.github/workflows/test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ jobs:
148148
poetry run patchwork ResolveIssue --log debug \
149149
--patched_api_key=${{ secrets.PATCHED_API_KEY }} \
150150
--github_api_key=${{ secrets.SCM_GITHUB_KEY }} \
151-
--issue_url=https://github.com/patched-codes/patchwork/issues/20 \
151+
--issue_url=https://github.com/patched-codes/patchwork/issues/1039 \
152152
--disable_telemetry
153153
154154
main-test:

patchwork/common/client/llm/google.py

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@
1616
from openai.types.chat import (
1717
ChatCompletionMessage,
1818
ChatCompletionMessageParam,
19+
ChatCompletionToolChoiceOptionParam,
20+
ChatCompletionToolParam,
1921
completion_create_params,
2022
)
2123
from openai.types.chat.chat_completion import ChatCompletion, Choice
@@ -55,7 +57,24 @@ def get_models(self) -> set[str]:
5557
def is_model_supported(self, model: str) -> bool:
5658
return model in self.get_models()
5759

58-
def is_prompt_supported(self, messages: Iterable[ChatCompletionMessageParam], model: str) -> int:
60+
def is_prompt_supported(
61+
self,
62+
messages: Iterable[ChatCompletionMessageParam],
63+
model: str,
64+
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
65+
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
66+
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
67+
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
68+
n: Optional[int] | NotGiven = NOT_GIVEN,
69+
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
70+
response_format: dict | completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
71+
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
72+
temperature: Optional[float] | NotGiven = NOT_GIVEN,
73+
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
74+
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
75+
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
76+
top_p: Optional[float] | NotGiven = NOT_GIVEN,
77+
) -> int:
5978
system, chat = self.__openai_messages_to_google_messages(messages)
6079
gen_model = generativeai.GenerativeModel(model_name=model, system_instruction=system)
6180
try:
@@ -96,9 +115,11 @@ def chat_completion(
96115
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
97116
n: Optional[int] | NotGiven = NOT_GIVEN,
98117
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
99-
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
118+
response_format: str | completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
100119
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
101120
temperature: Optional[float] | NotGiven = NOT_GIVEN,
121+
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
122+
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
102123
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
103124
top_p: Optional[float] | NotGiven = NOT_GIVEN,
104125
) -> ChatCompletion:

patchwork/common/client/llm/openai_.py

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,24 @@ def is_model_supported(self, model: str) -> bool:
6363
def __get_model_limits(self, model: str) -> int:
6464
return self.__MODEL_LIMITS.get(model, 128_000)
6565

66-
def is_prompt_supported(self, messages: Iterable[ChatCompletionMessageParam], model: str) -> int:
66+
def is_prompt_supported(
67+
self,
68+
messages: Iterable[ChatCompletionMessageParam],
69+
model: str,
70+
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
71+
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
72+
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
73+
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
74+
n: Optional[int] | NotGiven = NOT_GIVEN,
75+
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
76+
response_format: dict | completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
77+
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
78+
temperature: Optional[float] | NotGiven = NOT_GIVEN,
79+
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
80+
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
81+
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
82+
top_p: Optional[float] | NotGiven = NOT_GIVEN,
83+
) -> int:
6784
# might not implement model endpoint
6885
if self.__is_not_openai_url():
6986
return 1

0 commit comments

Comments
 (0)