Skip to content

Commit 837d237

Browse files
Addition of docstrings to the files (#61)
* update model.py * more * Add back-ticks, remove some redundant types. * update model.py 1 * fomatting * update text.py * Formatting fixes --------- Co-authored-by: Mark Daoust <[email protected]>
1 parent 97dd747 commit 837d237

File tree

4 files changed

+154
-4
lines changed

4 files changed

+154
-4
lines changed

google/generativeai/discuss.py

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030

3131

3232
def _make_message(content: discuss_types.MessageOptions) -> glm.Message:
33+
"""Creates a `glm.Message` object from the provided content."""
3334
if isinstance(content, glm.Message):
3435
return content
3536
if isinstance(content, str):
@@ -39,6 +40,20 @@ def _make_message(content: discuss_types.MessageOptions) -> glm.Message:
3940

4041

4142
def _make_messages(messages: discuss_types.MessagesOptions) -> List[glm.Message]:
43+
"""
44+
Creates a list of `glm.Message` objects from the provided messages.
45+
46+
This function takes a variety of message content inputs, such as strings, dictionaries,
47+
or `glm.Message` objects, and creates a list of `glm.Message` objects. It ensures that
48+
the authors of the messages alternate appropriately. If authors are not provided,
49+
default authors are assigned based on their position in the list.
50+
51+
Args:
52+
messages: The messages to convert.
53+
54+
Returns:
55+
A list of `glm.Message` objects with alternating authors.
56+
"""
4257
if isinstance(messages, (str, dict, glm.Message)):
4358
messages = [_make_message(messages)]
4459
else:
@@ -71,6 +86,7 @@ def _make_messages(messages: discuss_types.MessagesOptions) -> List[glm.Message]
7186

7287

7388
def _make_example(item: discuss_types.ExampleOptions) -> glm.Example:
89+
"""Creates a `glm.Example` object from the provided item."""
7490
if isinstance(item, glm.Example):
7591
return item
7692

@@ -91,6 +107,21 @@ def _make_example(item: discuss_types.ExampleOptions) -> glm.Example:
91107
def _make_examples_from_flat(
92108
examples: List[discuss_types.MessageOptions],
93109
) -> List[glm.Example]:
110+
"""
111+
Creates a list of `glm.Example` objects from a list of message options.
112+
113+
This function takes a list of `discuss_types.MessageOptions` and pairs them into
114+
`glm.Example` objects. The input examples must be in pairs to create valid examples.
115+
116+
Args:
117+
examples: The list of `discuss_types.MessageOptions`.
118+
119+
Returns:
120+
A list of `glm.Example objects` created by pairing up the provided messages.
121+
122+
Raises:
123+
ValueError: If the provided list of examples is not of even length.
124+
"""
94125
if len(examples) % 2 != 0:
95126
raise ValueError(
96127
textwrap.dedent(
@@ -116,6 +147,19 @@ def _make_examples_from_flat(
116147

117148

118149
def _make_examples(examples: discuss_types.ExamplesOptions) -> List[glm.Example]:
150+
"""
151+
Creates a list of `glm.Example` objects from the provided examples.
152+
153+
This function takes various types of example content inputs and creates a list
154+
of `glm.Example` objects. It handles the conversion of different input types and ensures
155+
the appropriate structure for creating valid examples.
156+
157+
Args:
158+
examples: The examples to convert.
159+
160+
Returns:
161+
A list of `glm.Example` objects created from the provided examples.
162+
"""
119163
if isinstance(examples, glm.Example):
120164
return [examples]
121165

@@ -155,6 +199,23 @@ def _make_message_prompt_dict(
155199
examples: discuss_types.ExamplesOptions | None = None,
156200
messages: discuss_types.MessagesOptions | None = None,
157201
) -> glm.MessagePrompt:
202+
"""
203+
Creates a `glm.MessagePrompt` object from the provided prompt components.
204+
205+
This function constructs a `glm.MessagePrompt` object using the provided `context`, `examples`,
206+
or `messages`. It ensures the proper structure and handling of the input components.
207+
208+
Either pass a `prompt` or it's component `context`, `examples`, `messages`.
209+
210+
Args:
211+
prompt: The complete prompt components.
212+
context: The context for the prompt.
213+
examples: The examples for the prompt.
214+
messages: The messages for the prompt.
215+
216+
Returns:
217+
A `glm.MessagePrompt` object created from the provided prompt components.
218+
"""
158219
if prompt is None:
159220
prompt = dict(
160221
context=context,
@@ -201,6 +262,7 @@ def _make_message_prompt(
201262
examples: discuss_types.ExamplesOptions | None = None,
202263
messages: discuss_types.MessagesOptions | None = None,
203264
) -> glm.MessagePrompt:
265+
"""Creates a `glm.MessagePrompt` object from the provided prompt components."""
204266
prompt = _make_message_prompt_dict(
205267
prompt=prompt, context=context, examples=examples, messages=messages
206268
)
@@ -219,6 +281,7 @@ def _make_generate_message_request(
219281
top_k: float | None = None,
220282
prompt: discuss_types.MessagePromptOptions | None = None,
221283
) -> glm.GenerateMessageRequest:
284+
"""Creates a `glm.GenerateMessageRequest` object for generating messages."""
222285
model = model_types.make_model_name(model)
223286

224287
prompt = _make_message_prompt(
@@ -236,6 +299,8 @@ def _make_generate_message_request(
236299

237300

238301
def set_doc(doc):
302+
"""A decorator to set the docstring of a function."""
303+
239304
def inner(f):
240305
f.__doc__ = doc
241306
return f

google/generativeai/models.py

Lines changed: 43 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
from __future__ import annotations
1616

1717
import re
18-
from typing import Optional, List
18+
from typing import Optional, List, Iterator
1919

2020
import google.ai.generativelanguage as glm
2121
from google.generativeai.client import get_default_model_client
@@ -35,6 +35,22 @@ def get_model(name: str, *, client=None) -> model_types.Model:
3535

3636

3737
class ModelsIterable(model_types.ModelsIterable):
38+
"""
39+
An iterable class to traverse through a list of models.
40+
41+
This class allows you to iterate over a list of models, fetching them in pages
42+
if necessary based on the provided `page_size` and `page_token`.
43+
44+
Args:
45+
page_size: The number of `models` to fetch per page.
46+
page_token: Token representing the current page. Pass `None` for the first page.
47+
models: List of models to iterate through.
48+
client: An optional client for the model service.
49+
50+
Returns:
51+
A `ModelsIterable` iterable object that allows iterating through the models.
52+
"""
53+
3854
def __init__(
3955
self,
4056
*,
@@ -48,21 +64,44 @@ def __init__(
4864
self._models = models
4965
self._client = client
5066

51-
def __iter__(self):
67+
def __iter__(self) -> Iterator[model_types.Model]:
68+
"""
69+
Returns an iterator over the models.
70+
"""
5271
while self:
5372
page = self._models
5473
yield from page
5574
self = self._next_page()
5675

57-
def _next_page(self):
76+
def _next_page(self) -> ModelsIterable | None:
77+
"""
78+
Fetches the next page of models based on the page token.
79+
"""
5880
if not self._page_token:
5981
return None
6082
return _list_models(
6183
page_size=self._page_size, page_token=self._page_token, client=self._client
6284
)
6385

6486

65-
def _list_models(page_size, page_token, client):
87+
def _list_models(
88+
page_size: int, page_token: str | None, client: glm.ModelServiceClient
89+
) -> ModelsIterable:
90+
"""
91+
Fetches a page of models using the provided client and pagination tokens.
92+
93+
This function queries the `client` to retrieve a page of models based on the given
94+
`page_size` and `page_token`. It then processes the response and returns an iterable
95+
object to traverse through the models.
96+
97+
Args:
98+
page_size: How many `types.Models` to fetch per page (api call).
99+
page_token: Token representing the current page.
100+
client: The client to communicate with the model service.
101+
102+
Returns:
103+
An iterable `ModelsIterable` object containing the fetched models and pagination info.
104+
"""
66105
result = client.list_models(page_size=page_size, page_token=page_token)
67106
result = result._response
68107
result = type(result).to_dict(result)

google/generativeai/text.py

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,18 @@
2929

3030

3131
def _make_text_prompt(prompt: str | dict[str, str]) -> glm.TextPrompt:
32+
"""
33+
Creates a `glm.TextPrompt` object based on the provided prompt input.
34+
35+
Args:
36+
prompt: The prompt input, either a string or a dictionary.
37+
38+
Returns:
39+
glm.TextPrompt: A TextPrompt object containing the prompt text.
40+
41+
Raises:
42+
TypeError: If the provided prompt is neither a string nor a dictionary.
43+
"""
3244
if isinstance(prompt, str):
3345
return glm.TextPrompt(text=prompt)
3446
elif isinstance(prompt, dict):
@@ -49,6 +61,28 @@ def _make_generate_text_request(
4961
safety_settings: safety_types.SafetySettingOptions | None = None,
5062
stop_sequences: str | Iterable[str] | None = None,
5163
) -> glm.GenerateTextRequest:
64+
"""
65+
Creates a `glm.GenerateTextRequest` object based on the provided parameters.
66+
67+
This function generates a `glm.GenerateTextRequest` object with the specified
68+
parameters. It prepares the input parameters and creates a request that can be
69+
used for generating text using the chosen model.
70+
71+
Args:
72+
model: The model to use for text generation.
73+
prompt: The prompt for text generation. Defaults to None.
74+
temperature: The temperature for randomness in generation. Defaults to None.
75+
candidate_count: The number of candidates to consider. Defaults to None.
76+
max_output_tokens: The maximum number of output tokens. Defaults to None.
77+
top_p: The nucleus sampling probability threshold. Defaults to None.
78+
top_k: The top-k sampling parameter. Defaults to None.
79+
safety_settings: Safety settings for generated text. Defaults to None.
80+
stop_sequences: Stop sequences to halt text generation. Can be a string
81+
or iterable of strings. Defaults to None.
82+
83+
Returns:
84+
`glm.GenerateTextRequest`: A `GenerateTextRequest` object configured with the specified parameters.
85+
"""
5286
model = model_types.make_model_name(model)
5387
prompt = _make_text_prompt(prompt=prompt)
5488
safety_settings = safety_types.normalize_safety_settings(safety_settings)
@@ -155,6 +189,17 @@ def __init__(self, **kwargs):
155189
def _generate_response(
156190
request: glm.GenerateTextRequest, client: glm.TextServiceClient = None
157191
) -> Completion:
192+
"""
193+
Generates a response using the provided `glm.GenerateTextRequest` and client.
194+
195+
Args:
196+
request: The text generation request.
197+
client: The client to use for text generation. Defaults to None, in which
198+
case the default text client is used.
199+
200+
Returns:
201+
`Completion`: A `Completion` object with the generated text and response information.
202+
"""
158203
if client is None:
159204
client = get_default_text_client()
160205

tests/test_models.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
from absl.testing import absltest
1818

1919
import google.ai.generativelanguage as glm
20+
2021
from google.ai.generativelanguage_v1beta2.types import model
2122

2223
from google.generativeai import models

0 commit comments

Comments
 (0)