Skip to content

Commit fb12ada

Browse files
authored
V1beta3 (#67)
* Working on v1beta3 Add support for batched embeddings Update to v0.2 Add get_tuned_model, get_base_model, get_model. Add list_models, list_tuned_models. Add create_tuned_model. Support batched embeddings. Change-Id: I917576a61719fcd90cca581506e15826b67cae88 * Implement {update,delete}_tuned_model. Change-Id: Idd276934666f7fa8da034e59a80e4e6047cea4d5 * Decode TunedModel to python objects. Change-Id: I4376012686b53065f9372bd8dd6a340d3e0ca43a * Add more model service tests + bug fixes + pytype. Change-Id: Iaa1b4e75b5c075d1b0b6b383d37da8d254b0715b * Upgrade the Operation returned by create_tuned_model. Updated into colab here: https://colab.sandbox.google.com/drive/1Eros5eLfAl4pNfsQ2sftyx5khq2TrK_V?resourcekey=0-ByOp_F6lxWCuKVJM_tUjeQ Change-Id: Ic3e40f25abc6eba69a8866fe855c7ecb83c4c4c3 * Implement the operations service. Change-Id: I29e92a4c8697f165deb9782599b21d98eb869737 * fix update_tuned_model Change-Id: Iaacaaa08c1807e13ad520804dd20eb1e54a9b34d * Remove decoding error workaround + format + use the 0.3.2 v1beta3 package. Change-Id: I6e329c635aa51e67c50b2318a506455c21c8d988 * Fix tests & remove pytz Fix tuned_model/base_model/source_model handling so nested tuned models work Remove the last 'v1main' Change-Id: Ia07c169d0bfb0782edd0115b5d0418c9eb3eb040 * black . * fix test * add tqdm requirement * Use union to fix py3.9 * Use union to fix py3.9 * black .
1 parent 02f3298 commit fb12ada

20 files changed

+1412
-180
lines changed

RELEASE.md

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
2+
## v0.2
3+
4+
- More flexible safety settings:
5+
- accept strings, ints for enums.
6+
- accept a `{category:threshold}` dict in addition to the
7+
list of dicts format (`[{"category": category, "threshold": threshold}, ...]`).
8+
- Add support for batched embeddings.
9+
- Add support for tuning:
10+
- Add `get_{base,tuned}_model`.
11+
- Add `list_tuned_models`.
12+
- Add `create_tuned_model`.
13+
14+
## v0.1
15+
16+
Initial version

docs/build_docs.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,15 +29,15 @@
2929
from absl import flags
3030

3131
import google
32-
from google import generativeai
33-
from google.ai import generativelanguage
32+
from google import generativeai as palm
33+
from google.ai import generativelanguage_v1beta3 as glm
3434

3535
from tensorflow_docs.api_generator import generate_lib
3636
from tensorflow_docs.api_generator import public_api
3737

3838
import yaml
3939

40-
google.ai.generativelanguage.__doc__ = """\
40+
glm.__doc__ = """\
4141
This package, `google.ai.generativelanguage`, is a low-level auto-generated client library for the PaLM API.
4242
4343
```posix-terminal
@@ -54,7 +54,7 @@
5454
when initializing a client:
5555
5656
```
57-
from google.ai import generativelanguage as glm
57+
from google.ai import generativelanguage_v1beta3 as glm
5858
5959
client = glm.DiscussServiceClient(
6060
client_options={'api_key':'YOUR_API_KEY'})
@@ -186,8 +186,8 @@ def gen_api_docs():
186186
py_modules=[("google", google)],
187187
# Replace `tensorflow_docs.api_generator` with your module, here.
188188
base_dir=(
189-
pathlib.Path(google.generativeai.__file__).parent,
190-
pathlib.Path(google.ai.generativelanguage.__file__).parent.parent,
189+
pathlib.Path(palm.__file__).parent,
190+
pathlib.Path(glm.__file__).parent.parent,
191191
),
192192
code_url_prefix=(
193193
_CODE_URL_PREFIX.value,

google/generativeai/__init__.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,15 @@
7979
from google.generativeai.text import generate_embeddings
8080

8181
from google.generativeai.models import list_models
82+
from google.generativeai.models import list_tuned_models
83+
8284
from google.generativeai.models import get_model
85+
from google.generativeai.models import get_base_model
86+
from google.generativeai.models import get_tuned_model
87+
88+
from google.generativeai.models import create_tuned_model
89+
from google.generativeai.models import update_tuned_model
90+
from google.generativeai.models import delete_tuned_model
8391

8492
from google.generativeai.client import configure
8593

google/generativeai/client.py

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,13 @@
1717
import os
1818
from typing import cast, Optional, Union
1919

20-
import google.ai.generativelanguage as glm
20+
import google.ai.generativelanguage_v1beta3 as glm
2121

2222
from google.auth import credentials as ga_credentials
2323
from google.api_core import client_options as client_options_lib
2424
from google.api_core import gapic_v1
25+
from google.api_core import operations_v1
26+
2527
from google.generativeai import version
2628

2729

@@ -32,6 +34,7 @@
3234
default_discuss_async_client = None
3335
default_model_client = None
3436
default_text_client = None
37+
default_operations_client = None
3538

3639

3740
def configure(
@@ -64,6 +67,7 @@ def configure(
6467
global default_discuss_client
6568
global default_model_client
6669
global default_text_client
70+
global default_operations_client
6771

6872
if isinstance(client_options, dict):
6973
client_options = client_options_lib.from_dict(client_options)
@@ -112,6 +116,7 @@ def configure(
112116
default_discuss_client = None
113117
default_text_client = None
114118
default_model_client = None
119+
default_operations_client = None
115120

116121

117122
def get_default_discuss_client() -> glm.DiscussServiceClient:
@@ -125,7 +130,7 @@ def get_default_discuss_client() -> glm.DiscussServiceClient:
125130
return default_discuss_client
126131

127132

128-
def get_default_text_client():
133+
def get_default_text_client() -> glm.TextServiceClient:
129134
global default_text_client
130135
if default_text_client is None:
131136
# Attempt to configure using defaults.
@@ -149,7 +154,7 @@ def get_default_discuss_async_client() -> glm.DiscussServiceAsyncClient:
149154
return default_discuss_async_client
150155

151156

152-
def get_default_model_client():
157+
def get_default_model_client() -> glm.ModelServiceClient:
153158
global default_model_client
154159
if default_model_client is None:
155160
# Attempt to configure using defaults.
@@ -158,3 +163,12 @@ def get_default_model_client():
158163
default_model_client = glm.ModelServiceClient(**default_client_config)
159164

160165
return default_model_client
166+
167+
168+
def get_default_operations_client() -> operations_v1.OperationsClient:
169+
global default_operations_client
170+
if default_operations_client is None:
171+
model_client = get_default_model_client()
172+
default_operations_client = model_client._transport.operations_client
173+
174+
return default_operations_client

google/generativeai/discuss.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020

2121
from typing import Iterable, List, Optional, Union
2222

23-
import google.ai.generativelanguage as glm
23+
import google.ai.generativelanguage_v1beta3 as glm
2424

2525
from google.generativeai.client import get_default_discuss_client
2626
from google.generativeai.client import get_default_discuss_async_client
@@ -271,7 +271,7 @@ def _make_message_prompt(
271271

272272
def _make_generate_message_request(
273273
*,
274-
model: model_types.ModelNameOptions | None,
274+
model: model_types.AnyModelNameOptions | None,
275275
context: str | None = None,
276276
examples: discuss_types.ExamplesOptions | None = None,
277277
messages: discuss_types.MessagesOptions | None = None,
@@ -313,7 +313,7 @@ def inner(f):
313313

314314
def chat(
315315
*,
316-
model: model_types.ModelNameOptions | None = "models/chat-bison-001",
316+
model: model_types.AnyModelNameOptions | None = "models/chat-bison-001",
317317
context: str | None = None,
318318
examples: discuss_types.ExamplesOptions | None = None,
319319
messages: discuss_types.MessagesOptions | None = None,
@@ -411,7 +411,7 @@ def chat(
411411
@set_doc(chat.__doc__)
412412
async def chat_async(
413413
*,
414-
model: model_types.ModelNameOptions | None = "models/chat-bison-001",
414+
model: model_types.AnyModelNameOptions | None = "models/chat-bison-001",
415415
context: str | None = None,
416416
examples: discuss_types.ExamplesOptions | None = None,
417417
messages: discuss_types.MessagesOptions | None = None,
@@ -567,7 +567,7 @@ def count_message_tokens(
567567
context: str | None = None,
568568
examples: discuss_types.ExamplesOptions | None = None,
569569
messages: discuss_types.MessagesOptions | None = None,
570-
model: model_types.ModelNameOptions = DEFAULT_DISCUSS_MODEL,
570+
model: model_types.AnyModelNameOptions = DEFAULT_DISCUSS_MODEL,
571571
client: glm.DiscussServiceAsyncClient | None = None,
572572
):
573573
model = model_types.make_model_name(model)

0 commit comments

Comments
 (0)