Skip to content

Commit ef27fec

Browse files
authored
Fix pytype (#39)
* Squashed commit of the following: commit de62d09 Author: Mark Daoust <[email protected]> Date: Tue Jun 6 17:26:10 2023 -0700 Revert discuss types (tests failing with 3.9). commit 7e2ccd8 Author: Mark Daoust <[email protected]> Date: Fri Jun 2 13:59:38 2023 -0700 format commit 70cc6d5 Author: Mark Daoust <[email protected]> Date: Fri Jun 2 13:47:20 2023 -0700 Add future annotations for py3.9 commit d74f436 Author: Mark Daoust <[email protected]> Date: Fri Jun 2 13:30:54 2023 -0700 Update Unions commit 4d2e710 Author: Mark Daoust <[email protected]> Date: Fri Jun 2 13:23:37 2023 -0700 Update Unions commit 3d93a3e Author: Mark Daoust <[email protected]> Date: Thu Jun 1 15:49:15 2023 -0700 Pytype passes with python 3.10 + modernize some annotations * Debug: revert to Union for non-lazy annotations * format
1 parent 8cc54d1 commit ef27fec

17 files changed

+118
-98
lines changed

google/generativeai/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@
6565
```
6666
6767
"""
68+
from __future__ import annotations
6869

6970
from google.generativeai import types
7071
from google.generativeai import version

google/generativeai/client.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
15+
from __future__ import annotations
1516

1617
import os
1718
from typing import cast, Optional, Union
@@ -35,17 +36,17 @@
3536

3637
def configure(
3738
*,
38-
api_key: Optional[str] = None,
39-
credentials: Union[ga_credentials.Credentials, dict, None] = None,
39+
api_key: str | None = None,
40+
credentials: ga_credentials.Credentials | dict | None = None,
4041
# The user can pass a string to choose `rest` or `grpc` or 'grpc_asyncio'.
4142
# See `_transport_registry` in `DiscussServiceClientMeta`.
4243
# Since the transport classes align with the client classes it wouldn't make
4344
# sense to accept a `Transport` object here even though the client classes can.
4445
# We could accept a dict since all the `Transport` classes take the same args,
4546
# but that seems rare. Users that need it can just switch to the low level API.
46-
transport: Union[str, None] = None,
47-
client_options: Union[client_options_lib.ClientOptions, dict, None] = None,
48-
client_info: Optional[gapic_v1.client_info.ClientInfo] = None,
47+
transport: str | None = None,
48+
client_options: client_options_lib.ClientOptions | dict | None = None,
49+
client_info: gapic_v1.client_info.ClientInfo | None = None,
4950
):
5051
"""Captures default client configuration.
5152

google/generativeai/discuss.py

Lines changed: 45 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
15+
from __future__ import annotations
1516

1617
import dataclasses
1718
import sys
@@ -150,9 +151,9 @@ def _make_examples(examples: discuss_types.ExamplesOptions) -> List[glm.Example]
150151
def _make_message_prompt_dict(
151152
prompt: discuss_types.MessagePromptOptions = None,
152153
*,
153-
context: Optional[str] = None,
154-
examples: Optional[discuss_types.ExamplesOptions] = None,
155-
messages: Optional[discuss_types.MessagesOptions] = None,
154+
context: str | None = None,
155+
examples: discuss_types.ExamplesOptions | None = None,
156+
messages: discuss_types.MessagesOptions | None = None,
156157
) -> glm.MessagePrompt:
157158
if prompt is None:
158159
prompt = dict(
@@ -196,9 +197,9 @@ def _make_message_prompt_dict(
196197
def _make_message_prompt(
197198
prompt: discuss_types.MessagePromptOptions = None,
198199
*,
199-
context: Optional[str] = None,
200-
examples: Optional[discuss_types.ExamplesOptions] = None,
201-
messages: Optional[discuss_types.MessagesOptions] = None,
200+
context: str | None = None,
201+
examples: discuss_types.ExamplesOptions | None = None,
202+
messages: discuss_types.MessagesOptions | None = None,
202203
) -> glm.MessagePrompt:
203204
prompt = _make_message_prompt_dict(
204205
prompt=prompt, context=context, examples=examples, messages=messages
@@ -208,15 +209,15 @@ def _make_message_prompt(
208209

209210
def _make_generate_message_request(
210211
*,
211-
model: Optional[model_types.ModelNameOptions],
212-
context: Optional[str] = None,
213-
examples: Optional[discuss_types.ExamplesOptions] = None,
214-
messages: Optional[discuss_types.MessagesOptions] = None,
215-
temperature: Optional[float] = None,
216-
candidate_count: Optional[int] = None,
217-
top_p: Optional[float] = None,
218-
top_k: Optional[float] = None,
219-
prompt: Optional[discuss_types.MessagePromptOptions] = None,
212+
model: model_types.ModelNameOptions | None,
213+
context: str | None = None,
214+
examples: discuss_types.ExamplesOptions | None = None,
215+
messages: discuss_types.MessagesOptions | None = None,
216+
temperature: float | None = None,
217+
candidate_count: int | None = None,
218+
top_p: float | None = None,
219+
top_k: float | None = None,
220+
prompt: discuss_types.MessagePromptOptions | None = None,
220221
) -> glm.GenerateMessageRequest:
221222
model = model_types.make_model_name(model)
222223

@@ -247,16 +248,16 @@ def inner(f):
247248

248249
def chat(
249250
*,
250-
model: Optional[model_types.ModelNameOptions] = "models/chat-bison-001",
251-
context: Optional[str] = None,
252-
examples: Optional[discuss_types.ExamplesOptions] = None,
253-
messages: Optional[discuss_types.MessagesOptions] = None,
254-
temperature: Optional[float] = None,
255-
candidate_count: Optional[int] = None,
256-
top_p: Optional[float] = None,
257-
top_k: Optional[float] = None,
258-
prompt: Optional[discuss_types.MessagePromptOptions] = None,
259-
client: Optional[glm.DiscussServiceClient] = None,
251+
model: model_types.ModelNameOptions | None = "models/chat-bison-001",
252+
context: str | None = None,
253+
examples: discuss_types.ExamplesOptions | None = None,
254+
messages: discuss_types.MessagesOptions | None = None,
255+
temperature: float | None = None,
256+
candidate_count: int | None = None,
257+
top_p: float | None = None,
258+
top_k: float | None = None,
259+
prompt: discuss_types.MessagePromptOptions | None = None,
260+
client: glm.DiscussServiceClient | None = None,
260261
) -> discuss_types.ChatResponse:
261262
"""Calls the API and returns a `types.ChatResponse` containing the response.
262263
@@ -345,16 +346,16 @@ def chat(
345346
@set_doc(chat.__doc__)
346347
async def chat_async(
347348
*,
348-
model: Optional[model_types.ModelNameOptions] = None,
349-
context: Optional[str] = None,
350-
examples: Optional[discuss_types.ExamplesOptions] = None,
351-
messages: Optional[discuss_types.MessagesOptions] = None,
352-
temperature: Optional[float] = None,
353-
candidate_count: Optional[int] = None,
354-
top_p: Optional[float] = None,
355-
top_k: Optional[float] = None,
356-
prompt: Optional[discuss_types.MessagePromptOptions] = None,
357-
client: Optional[glm.DiscussServiceAsyncClient] = None,
349+
model: model_types.ModelNameOptions | None = None,
350+
context: str | None = None,
351+
examples: discuss_types.ExamplesOptions | None = None,
352+
messages: discuss_types.MessagesOptions | None = None,
353+
temperature: float | None = None,
354+
candidate_count: int | None = None,
355+
top_p: float | None = None,
356+
top_k: float | None = None,
357+
prompt: discuss_types.MessagePromptOptions | None = None,
358+
client: glm.DiscussServiceAsyncClient | None = None,
358359
) -> discuss_types.ChatResponse:
359360
request = _make_generate_message_request(
360361
model=model,
@@ -380,7 +381,7 @@ async def chat_async(
380381
@set_doc(discuss_types.ChatResponse.__doc__)
381382
@dataclasses.dataclass(**DATACLASS_KWARGS, init=False)
382383
class ChatResponse(discuss_types.ChatResponse):
383-
_client: Optional[glm.DiscussServiceClient] = dataclasses.field(
384+
_client: glm.DiscussServiceClient | None = dataclasses.field(
384385
default=lambda: None, repr=False
385386
)
386387

@@ -390,7 +391,7 @@ def __init__(self, **kwargs):
390391

391392
@property
392393
@set_doc(discuss_types.ChatResponse.last.__doc__)
393-
def last(self) -> Optional[str]:
394+
def last(self) -> str | None:
394395
if self.messages[-1]:
395396
return self.messages[-1]["content"]
396397
else:
@@ -445,7 +446,7 @@ async def reply_async(
445446
def _build_chat_response(
446447
request: glm.GenerateMessageRequest,
447448
response: glm.GenerateMessageResponse,
448-
client: Union[glm.DiscussServiceClient, glm.DiscussServiceAsyncClient],
449+
client: glm.DiscussServiceClient | glm.DiscussServiceAsyncClient,
449450
) -> ChatResponse:
450451
request = type(request).to_dict(request)
451452
prompt = request.pop("prompt")
@@ -473,7 +474,7 @@ def _build_chat_response(
473474

474475
def _generate_response(
475476
request: glm.GenerateMessageRequest,
476-
client: Optional[glm.DiscussServiceClient] = None,
477+
client: glm.DiscussServiceClient | None = None,
477478
) -> ChatResponse:
478479
if client is None:
479480
client = get_default_discuss_client()
@@ -485,7 +486,7 @@ def _generate_response(
485486

486487
async def _generate_response_async(
487488
request: glm.GenerateMessageRequest,
488-
client: Optional[glm.DiscussServiceAsyncClient] = None,
489+
client: glm.DiscussServiceAsyncClient | None = None,
489490
) -> ChatResponse:
490491
if client is None:
491492
client = get_default_discuss_async_client()
@@ -498,11 +499,11 @@ async def _generate_response_async(
498499
def count_message_tokens(
499500
*,
500501
prompt: discuss_types.MessagePromptOptions = None,
501-
context: Optional[str] = None,
502-
examples: Optional[discuss_types.ExamplesOptions] = None,
503-
messages: Optional[discuss_types.MessagesOptions] = None,
502+
context: str | None = None,
503+
examples: discuss_types.ExamplesOptions | None = None,
504+
messages: discuss_types.MessagesOptions | None = None,
504505
model: model_types.ModelNameOptions = DEFAULT_DISCUSS_MODEL,
505-
client: Optional[glm.DiscussServiceAsyncClient] = None,
506+
client: glm.DiscussServiceAsyncClient | None = None,
506507
):
507508
model = model_types.make_model_name(model)
508509
prompt = _make_message_prompt(

google/generativeai/docstring_utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
15+
from __future__ import annotations
1516

1617

1718
def strip_oneof(docstring):

google/generativeai/models.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
15+
from __future__ import annotations
16+
1517
import re
1618
from typing import Optional, List
1719

@@ -37,9 +39,9 @@ def __init__(
3739
self,
3840
*,
3941
page_size: int,
40-
page_token: Optional[str],
42+
page_token: str | None,
4143
models: List[model_types.Model],
42-
client: Optional[glm.ModelServiceClient],
44+
client: glm.ModelServiceClient | None,
4345
):
4446
self._page_size = page_size
4547
self._page_token = page_token
@@ -73,7 +75,7 @@ def _list_models(page_size, page_token, client):
7375

7476

7577
def list_models(
76-
*, page_size: Optional[int] = None, client: Optional[glm.ModelServiceClient] = None
78+
*, page_size: int | None = None, client: glm.ModelServiceClient | None = None
7779
) -> model_types.ModelsIterable:
7880
"""Lists available models.
7981

google/generativeai/notebook/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ def load_ipython_extension(ipython):
2525
# Since we're in an interactive environment, make the tables prettier.
2626
try:
2727
# pylint: disable-next=g-import-not-at-top
28-
from google import colab
28+
from google import colab # type: ignore
2929

3030
colab.data_table.enable_dataframe_formatter()
3131
except ImportError:

google/generativeai/notebook/flag_def.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
import argparse
3636
import dataclasses
3737
import enum
38-
from typing import Any, Callable, Sequence, Union, Tuple
38+
from typing import Any, Callable, Sequence, Tuple, Union
3939

4040
from google.generativeai.notebook.lib import llmfn_inputs_source
4141
from google.generativeai.notebook.lib import llmfn_outputs
@@ -49,10 +49,10 @@
4949
_DESTTYPES = Union[
5050
_PARSETYPES,
5151
enum.Enum,
52-
Tuple[str, Callable[[str, str], Any]], # For --compare_fn
53-
Sequence[str], # For --ground_truth
54-
llmfn_inputs_source.LLMFnInputsSource, # For --inputs
55-
llmfn_outputs.LLMFnOutputsSink, # For --outputs
52+
Tuple[str, Callable[[str, str], Any]],
53+
Sequence[str], # For --compare_fn
54+
llmfn_inputs_source.LLMFnInputsSource, # For --ground_truth
55+
llmfn_outputs.LLMFnOutputsSink, # For --inputs # For --outputs
5656
]
5757

5858
# The signature of a function that converts a command line argument from the

google/generativeai/notebook/lib/llm_function.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,16 @@
1717

1818
import abc
1919
import dataclasses
20-
from typing import AbstractSet, Any, Callable, Iterable, Mapping, Optional, Sequence
20+
from typing import (
21+
AbstractSet,
22+
Any,
23+
Callable,
24+
Iterable,
25+
Mapping,
26+
Optional,
27+
Sequence,
28+
Union,
29+
)
2130

2231
from google.generativeai.notebook.lib import llmfn_input_utils
2332
from google.generativeai.notebook.lib import llmfn_output_row
@@ -117,7 +126,7 @@ def _generate_prompts(
117126

118127
class LLMFunction(
119128
Callable[
120-
[Optional[llmfn_input_utils.LLMFunctionInputs]],
129+
[Union[llmfn_input_utils.LLMFunctionInputs, None]],
121130
llmfn_outputs.LLMFnOutputs,
122131
],
123132
metaclass=abc.ABCMeta,

google/generativeai/notebook/lib/llm_function_test.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
"""Unittest for llm_function."""
1616
from __future__ import annotations
1717

18-
from typing import Any, Callable, Optional, Mapping, Sequence
18+
from typing import Any, Callable, Mapping, Sequence
1919

2020
from absl.testing import absltest
2121
from google.generativeai.notebook.lib import llm_function
@@ -61,7 +61,7 @@ class LLMFunctionBasicTest(absltest.TestCase):
6161

6262
def _test_is_callable(
6363
self,
64-
llm_fn: Callable[[Optional[Sequence[tuple[str, str]]]], LLMFnOutputs],
64+
llm_fn: Callable[[Sequence[tuple[str, str]] | None], LLMFnOutputs],
6565
) -> LLMFnOutputs:
6666
return llm_fn(None)
6767

google/generativeai/notebook/lib/llmfn_input_utils.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,7 @@
2424

2525
_ColumnOrderValuesList = Mapping[str, Sequence[str]]
2626

27-
LLMFunctionInputs = Union[
28-
_ColumnOrderValuesList,
29-
llmfn_inputs_source.LLMFnInputsSource,
30-
]
27+
LLMFunctionInputs = Union[_ColumnOrderValuesList, llmfn_inputs_source.LLMFnInputsSource]
3128

3229

3330
def _is_column_order_values_list(inputs: Any) -> bool:

0 commit comments

Comments
 (0)