Skip to content

Commit 07fb91c

Browse files
authored
Docstring (#110)
* Update docstring * update docstrings * 0.3.1
1 parent 0cd555b commit 07fb91c

File tree

4 files changed

+70
-41
lines changed

4 files changed

+70
-41
lines changed

docs/build_docs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
import textwrap
2727
import typing
2828

29-
# For showing the conditional imports and types in `content_types.py`
29+
# For showing the conditional imports and types in `content_types.py`
3030
typing.TYPE_CHECKING = True
3131

3232
from absl import app

google/generativeai/__init__.py

Lines changed: 10 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -12,60 +12,31 @@
1212
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
15-
"""A high level client library for generative AI.
15+
"""Google AI Python SDK
1616
1717
## Setup
1818
1919
```posix-terminal
2020
pip install google-generativeai
2121
```
2222
23-
```
24-
import google.generativeai as palm
25-
import os
26-
27-
palm.configure(api_key=os.environ['API_KEY'])
28-
```
29-
30-
## Text
31-
32-
Use the `palm.generate_text` function to have the model complete some initial
33-
text.
34-
35-
```
36-
response = palm.generate_text(prompt="The opposite of hot is")
37-
print(response.result) # 'cold.'
38-
```
23+
## GenerativeModel
3924
40-
## Chat
25+
Use `genai.GenerativeModel` to access the API:
4126
42-
Use the `palm.chat` function to have a discussion with a model:
43-
44-
```
45-
chat = palm.chat(messages=["Hello."])
46-
print(chat.last) # 'Hello! What can I help you with?'
47-
chat = chat.reply("Can you tell me a joke?")
48-
print(chat.last) # 'Why did the chicken cross the road?'
4927
```
28+
import google.generativeai as genai
29+
import os
5030
51-
## Models
52-
53-
Use the model service discover models and find out more about them:
54-
55-
Use `palm.get_model` to get details if you know a model's name:
56-
57-
```
58-
model = palm.get_model('models/chat-bison-001') # 🦬
59-
```
31+
genai.configure(api_key=os.environ['API_KEY'])
6032
61-
Use `palm.list_models` to discover models:
33+
model = genai.Model(name='gemini-pro')
34+
response = model.generate_content('Please summarise this document: ...')
6235
63-
```
64-
import pprint
65-
for model in palm.list_models():
66-
pprint.pprint(model) # 🦎🦦🦬🦄
36+
print(response.text)
6737
```
6838
39+
See the [python quickstart](https://ai.google.dev/tutorials/python_quickstart) for more details.
6940
"""
7041
from __future__ import annotations
7142

google/generativeai/types/generation_types.py

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,64 @@ class GenerationConfigDict(TypedDict):
5454

5555
@dataclasses.dataclass
5656
class GenerationConfig:
57+
"""A simple dataclass used to configure the generation parameters of `GenerativeModel.generate_content`.
58+
59+
Attributes:
60+
candidate_count:
61+
Number of generated responses to return.
62+
stop_sequences:
63+
The set of character sequences (up
64+
to 5) that will stop output generation. If
65+
specified, the API will stop at the first
66+
appearance of a stop sequence. The stop sequence
67+
will not be included as part of the response.
68+
max_output_tokens:
69+
The maximum number of tokens to include in a
70+
candidate.
71+
72+
If unset, this will default to output_token_limit specified
73+
in the model's specification.
74+
temperature:
75+
Controls the randomness of the output. Note: The
76+
77+
default value varies by model, see the `Model.temperature`
78+
attribute of the `Model` returned the `genai.get_model`
79+
function.
80+
81+
Values can range from [0.0,1.0], inclusive. A value closer
82+
to 1.0 will produce responses that are more varied and
83+
creative, while a value closer to 0.0 will typically result
84+
in more straightforward responses from the model.
85+
top_p:
86+
Optional. The maximum cumulative probability of tokens to
87+
consider when sampling.
88+
89+
The model uses combined Top-k and nucleus sampling.
90+
91+
Tokens are sorted based on their assigned probabilities so
92+
that only the most likely tokens are considered. Top-k
93+
sampling directly limits the maximum number of tokens to
94+
consider, while Nucleus sampling limits number of tokens
95+
based on the cumulative probability.
96+
97+
Note: The default value varies by model, see the
98+
`Model.top_p` attribute of the `Model` returned the
99+
`genai.get_model` function.
100+
101+
top_k (int):
102+
Optional. The maximum number of tokens to consider when
103+
sampling.
104+
105+
The model uses combined Top-k and nucleus sampling.
106+
107+
Top-k sampling considers the set of `top_k` most probable
108+
tokens. Defaults to 40.
109+
110+
Note: The default value varies by model, see the
111+
`Model.top_k` attribute of the `Model` returned the
112+
`genai.get_model` function.
113+
"""
114+
57115
candidate_count: int | None = None
58116
stop_sequences: Iterable[str] | None = None
59117
max_output_tokens: int | None = None

google/generativeai/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,4 @@
1414
# limitations under the License.
1515
from __future__ import annotations
1616

17-
__version__ = "0.3.0"
17+
__version__ = "0.3.1"

0 commit comments

Comments
 (0)