Skip to content

Commit edc6fa0

Browse files
committed
Use flash more often.
Change-Id: If20e5d5e8462d160681d9dc2bfec965fd94fb633
1 parent 42d952a commit edc6fa0

File tree

9 files changed

+29
-33
lines changed

9 files changed

+29
-33
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ genai.configure(api_key=os.environ["GEMINI_API_KEY"])
3333
3. Create a model and run a prompt.
3434

3535
```python
36-
model = genai.GenerativeModel('gemini-1.0-pro-latest')
36+
model = genai.GenerativeModel('gemini-1.5-flash')
3737
response = model.generate_content("The opposite of hot is")
3838
print(response.text)
3939
```

samples/code_execution.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ def test_code_execution_basic(self):
2929
)
3030

3131
# Each `part` either contains `text`, `executable_code` or an `execution_result`
32-
for part in result.candidates[0].content.parts:
32+
for part in response.candidates[0].content.parts:
3333
print(part, "\n")
3434

3535
print("-" * 80)
@@ -92,7 +92,7 @@ def test_code_execution_basic(self):
9292

9393
def test_code_execution_request_override(self):
9494
# [START code_execution_request_override]
95-
model = genai.GenerativeModel(model_name="gemini-1.5-pro")
95+
model = genai.GenerativeModel(model_name="gemini-1.5-flash")
9696
response = model.generate_content(
9797
(
9898
"What is the sum of the first 50 prime numbers? "
@@ -140,7 +140,7 @@ def test_code_execution_request_override(self):
140140

141141
def test_code_execution_chat(self):
142142
# [START code_execution_chat]
143-
model = genai.GenerativeModel(model_name="gemini-1.5-pro", tools="code_execution")
143+
model = genai.GenerativeModel(model_name="gemini-1.5-flash", tools="code_execution")
144144
chat = model.start_chat()
145145
response = chat.send_message('Can you print "Hello world!"?')
146146
response = chat.send_message(

samples/count_tokens.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
class UnitTests(absltest.TestCase):
2424
def test_tokens_context_window(self):
2525
# [START tokens_context_window]
26-
model_info = genai.get_model("models/gemini-1.0-pro-001")
26+
model_info = genai.get_model("models/gemini-1.5-flash")
2727

2828
# Returns the "context window" for the model,
2929
# which is the combined input and output token limits.
@@ -91,7 +91,7 @@ def test_tokens_multimodal_image_inline(self):
9191
model = genai.GenerativeModel("models/gemini-1.5-flash")
9292

9393
prompt = "Tell me about this image"
94-
your_image_file = PIL.Image.open("image.jpg")
94+
your_image_file = PIL.Image.open(media/"organ.jpg")
9595

9696
# Call `count_tokens` to get the input token count
9797
# of the combined text and file (`total_tokens`).
@@ -115,7 +115,7 @@ def test_tokens_multimodal_image_file_api(self):
115115
model = genai.GenerativeModel("models/gemini-1.5-flash")
116116

117117
prompt = "Tell me about this image"
118-
your_image_file = genai.upload_file(path="image.jpg")
118+
your_image_file = genai.upload_file(path=media/"organ.jpg")
119119

120120
# Call `count_tokens` to get the input token count
121121
# of the combined text and file (`total_tokens`).

samples/rest/code_execution.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ set -eu
22

33
echo "[START code_execution_basic]"
44
# [START code_execution_basic]
5-
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \
5+
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
66
-H 'Content-Type: application/json' \
77
-d ' {"tools": [{'code_execution': {}}],
88
"contents": {
@@ -16,7 +16,7 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-lat
1616

1717
echo "[START code_execution_chat]"
1818
# [START code_execution_chat]
19-
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \
19+
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
2020
-H 'Content-Type: application/json' \
2121
-d '{"tools": [{'code_execution': {}}],
2222
"contents": [

samples/rest/controlled_generation.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ set -eu
22

33
echo "json_controlled_generation"
44
# [START json_controlled_generation]
5-
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \
5+
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
66
-H 'Content-Type: application/json' \
77
-d '{
88
"contents": [{
@@ -27,7 +27,7 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-lat
2727

2828
echo "json_no_schema"
2929
# [START json_no_schema]
30-
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \
30+
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
3131
-H 'Content-Type: application/json' \
3232
-d '{
3333
"contents": [{

samples/rest/models.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,5 @@ curl https://generativelanguage.googleapis.com/v1beta/models?key=$GOOGLE_API_KEY
77

88
echo "[START models_get]"
99
# [START models_get]
10-
curl https://generativelanguage.googleapis.com/v1beta/models/gemini-pro?key=$GOOGLE_API_KEY
10+
curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash?key=$GOOGLE_API_KEY
1111
# [END models_get]

samples/rest/safety_settings.sh

Lines changed: 14 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -2,37 +2,33 @@ set -eu
22

33
echo "[START safety_settings]"
44
# [START safety_settings]
5-
echo '{
5+
echo '{
66
"safetySettings": [
7-
{'category': HARM_CATEGORY_HARASSMENT, 'threshold': BLOCK_ONLY_HIGH}
7+
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"}
88
],
99
"contents": [{
1010
"parts":[{
1111
"text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json
1212

13-
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY" \
13+
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
1414
-H 'Content-Type: application/json' \
1515
-X POST \
16-
-d @request.json 2> /dev/null > tee response.json
17-
18-
jq .promptFeedback > response.json
16+
-d @request.json 2> /dev/null
1917
# [END safety_settings]
2018

2119
echo "[START safety_settings_multi]"
2220
# [START safety_settings_multi]
23-
echo '{
24-
"safetySettings": [
25-
{'category': HARM_CATEGORY_HARASSMENT, 'threshold': BLOCK_ONLY_HIGH},
26-
{'category': HARM_CATEGORY_HATE_SPEECH, 'threshold': BLOCK_MEDIUM_AND_ABOVE}
27-
],
28-
"contents": [{
29-
"parts":[{
30-
"text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json
21+
echo '{
22+
"safetySettings": [
23+
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"},
24+
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}
25+
],
26+
"contents": [{
27+
"parts":[{
28+
"text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json
3129

32-
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY" \
30+
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
3331
-H 'Content-Type: application/json' \
3432
-X POST \
35-
-d @request.json 2> /dev/null > response.json
36-
37-
jq .promptFeedback > response.json
33+
-d @request.json 2> /dev/null
3834
# [END safety_settings_multi]

samples/rest/system_instruction.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ set -eu
22

33
echo "[START system_instruction]"
44
# [START system_instruction]
5-
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \
5+
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
66
-H 'Content-Type: application/json' \
77
-d '{ "system_instruction": {
88
"parts":

samples/tuned_models.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def test_tuned_models_create(self):
2727
# [START tuned_models_create]
2828
import time
2929

30-
base_model = "models/gemini-1.0-pro-001"
30+
base_model = "models/gemini-1.5-flash-001-tuning"
3131
training_data = [
3232
{"text_input": "1", "output": "2"},
3333
# ... more examples ...
@@ -94,7 +94,7 @@ def test_tuned_models_list(self):
9494
def test_tuned_models_delete(self):
9595
import time
9696

97-
base_model = "models/gemini-1.0-pro-001"
97+
base_model = "models/gemini-1.5-flash-001-tuning"
9898
training_data = samples / "increment_tuning_data.json"
9999
try:
100100
operation = genai.create_tuned_model(

0 commit comments

Comments
 (0)