Skip to content

Commit f49ad7b

Browse files
Merge branch 'main' into tuned_models_rest
2 parents 009b1c2 + a8edb40 commit f49ad7b

File tree

8 files changed

+355
-12
lines changed

8 files changed

+355
-12
lines changed

.github/workflows/samples.yaml

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
name: Validate samples
2+
3+
on:
4+
pull_request:
5+
types: [opened, synchronize] # new, updates
6+
7+
jobs:
8+
update-python-list:
9+
runs-on: ubuntu-latest
10+
11+
steps:
12+
- name: Checkout Code
13+
uses: actions/checkout@v3
14+
15+
- name: Get Changed Files
16+
id: changed_files
17+
uses: tj-actions/changed-files@v44
18+
with:
19+
files: |
20+
samples/*.py
21+
22+
- name: Check Python samples
23+
env:
24+
NEW_FILES: ${{ steps.changed_files.outputs.all_modified_files }}
25+
README: samples/README.md
26+
run: |
27+
#!/bin/bash
28+
29+
for file in ${NEW_FILES}; do
30+
echo "Testing $file"
31+
if [[ -f ${file} ]]; then
32+
# File exists, so needs to be listed.
33+
if ! grep -q $name ${README}; then
34+
echo "Error: Sample not listed in README ($name)"
35+
exit 1
36+
fi
37+
else
38+
# File does not exist, ensure it's not listed
39+
if grep -q $name ${README}; then
40+
echo "Error: Sample should not be listed in README ($name)"
41+
exit 1
42+
fi
43+
fi
44+
done
45+
46+
update-rest-list:
47+
runs-on: ubuntu-latest
48+
49+
steps:
50+
- name: Checkout Code
51+
uses: actions/checkout@v3
52+
53+
- name: Get Changed Files
54+
id: changed_files
55+
uses: tj-actions/changed-files@v44
56+
with:
57+
files: |
58+
samples/rest/*.sh
59+
60+
- name: Check REST samples
61+
env:
62+
NEW_FILES: ${{ steps.changed_files.outputs.all_modified_files }}
63+
README: samples/rest/README.md
64+
run: |
65+
#!/bin/bash
66+
67+
for file in ${NEW_FILES}; do
68+
echo "Testing $file"
69+
if [[ -f ${file} ]]; then
70+
# File exists, so needs to be listed.
71+
echo $(basename $file)
72+
name=$(basename $file)
73+
if ! grep -q $name ${README}; then
74+
echo "Error: Sample not listed in README ($name)"
75+
exit 1
76+
fi
77+
else
78+
# File does not exist, ensure it's not listed
79+
name=$(basename $file)
80+
if grep -q $name ${README}; then
81+
echo "Error: Sample should not be listed in README ($name)"
82+
exit 1
83+
fi
84+
fi
85+
done

samples/README.md

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# Gemini API Python SDK sample code
2+
3+
This directory contains sample code for key features of the SDK, organised by high level feature.
4+
5+
These samples are embedded in parts of the [documentation](https://ai.google.dev), most notably in the [API reference](https://ai.google.dev/api).
6+
7+
Each file is structured as a runnable test case, ensuring that samples are executable and functional. Each test demonstrates a single concept, and contains region tags that are used to demarcate the test scaffolding from the spotlight code. If you are contributing, code within region tags should follow sample code best practices - being clear, complete and concise.
8+
9+
## Contents
10+
11+
| File | Description |
12+
| ---- | ----------- |
13+
| [cache.py](./cache.py) | Context caching |
14+
| [chat.py](./chat.py) | Multi-turn chat conversations |
15+
| [code_execution.py](./code_execution.py) | Executing code |
16+
| [configure_model_parameters.py](./configure_model_parameters.py) | Setting model parameters |
17+
| [controlled_generation.py](./controlled_generation.py) | Generating content with output constraints (e.g. JSON mode) |
18+
| [count_tokens.py](./count_tokens.py) | Counting input and output tokens |
19+
| [embed.py](./embed.py) | Generating embeddings |
20+
| [files.py](./files.py) | Managing files with the File API |
21+
| [function_calling.py](./function_calling.py) | Using function calling |
22+
| [models.py](./models.py) | Listing models and model metadata |
23+
| [safety_settings.py](./safety_settings.py) | Setting and using safety controls |
24+
| [system_instruction.py](./system_instruction.py) | Setting system instructions |
25+
| [text_generation.py](./text_generation.py) | Generating text |
26+
| [tuned_models.py](./tuned_models.py) | Creating and managing tuned models |

samples/code_execution.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,7 @@ def test_code_execution_chat(self):
142142
# [START code_execution_chat]
143143
model = genai.GenerativeModel(model_name="gemini-1.5-pro", tools="code_execution")
144144
chat = model.start_chat()
145+
response = chat.send_message('Can you print "Hello world!"?')
145146
response = chat.send_message(
146147
(
147148
"What is the sum of the first 50 prime numbers? "

samples/controlled_generation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ class Recipe(typing.TypedDict):
2727
result = model.generate_content(
2828
"List a few popular cookie recipes.",
2929
generation_config=genai.GenerationConfig(
30-
response_mime_type="application/json", response_schema=list([Recipe])
30+
response_mime_type="application/json", response_schema=list[Recipe]
3131
),
3232
)
3333
print(result)

samples/rest/README.md

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Gemini API REST sample code
2+
3+
This directory contains sample code for key features of the API, organised by high level feature.
4+
5+
These samples are embedded in parts of the [documentation](https://ai.google.dev), most notably in the [API reference](https://ai.google.dev/api).
6+
7+
Each file is structured as a runnable script, ensuring that samples are executable and functional. Each filee contains region tags that are used to demarcate the script from the spotlight code. If you are contributing, code within region tags should follow sample code best practices - being clear, complete and concise.
8+
9+
## Contents
10+
11+
| File | Description |
12+
| ---- | ----------- |
13+
| [cache.sh](./cache.sh) | Context caching |
14+
| [chat.sh](./chat.sh) | Multi-turn chat conversations |
15+
| [code_execution.sh](./code_execution.sh) | Executing code |
16+
| [configure_model_parameters.sh](./configure_model_parameters.sh) | Setting model parameters |
17+
| [controlled_generation.sh](./controlled_generation.sh) | Generating content with output constraints (e.g. JSON mode) |
18+
| [count_tokens.sh](./count_tokens.sh) | Counting input and output tokens |
19+
| [embed.sh](./embed.sh) | Generating embeddings |
20+
| [files.sh](./files.sh) | Managing files with the File API |
21+
| [function_calling.sh](./function_calling.sh) | Using function calling |
22+
| [models.sh](./models.sh) | Listing models and model metadata |
23+
| [safety_settings.sh](./safety_settings.sh) | Setting and using safety controls |
24+
| [system_instruction.sh](./system_instruction.sh) | Setting system instructions |
25+
| [text_generation.sh](./text_generation.sh) | Generating text |

samples/rest/code_execution.sh

Lines changed: 48 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,55 @@ echo "[START code_execution_basic]"
44
# [START code_execution_basic]
55
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \
66
-H 'Content-Type: application/json' \
7-
-d ' {
8-
"tools": [{"code_execution": {}}],
7+
-d ' {"tools": [{'code_execution': {}}],
98
"contents": {
10-
"parts": {
11-
"text": "What is the sum of the first 50 prime numbers? Generate
12-
and run code for the calculation, and make sure you get all 50."
9+
"parts":
10+
{
11+
"text": "What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."
1312
}
14-
}
13+
},
1514
}'
1615
# [END code_execution_basic]
16+
17+
echo "[START code_execution_chat]"
18+
# [START code_execution_chat]
19+
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \
20+
-H 'Content-Type: application/json' \
21+
-d '{"tools": [{'code_execution': {}}],
22+
"contents": [
23+
{
24+
"role": "user",
25+
"parts": [{
26+
"text": "Can you print \"Hello world!\"?"
27+
}]
28+
},{
29+
"role": "model",
30+
"parts": [
31+
{
32+
"text": ""
33+
},
34+
{
35+
"executable_code": {
36+
"language": "PYTHON",
37+
"code": "\nprint(\"hello world!\")\n"
38+
}
39+
},
40+
{
41+
"code_execution_result": {
42+
"outcome": "OUTCOME_OK",
43+
"output": "hello world!\n"
44+
}
45+
},
46+
{
47+
"text": "I have printed \"hello world!\" using the provided python code block. \n"
48+
}
49+
],
50+
},{
51+
"role": "user",
52+
"parts": [{
53+
"text": "What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."
54+
}]
55+
}
56+
]
57+
}'
58+
# [END code_execution_chat]

samples/rest/count_tokens.sh

Lines changed: 120 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ SCRIPT_DIR=$(dirname "$0")
44
MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party)
55

66
TEXT_PATH=${MEDIA_DIR}/poem.txt
7+
A11_PATH=${MEDIA_DIR}/a11.txt
78
IMG_PATH=${MEDIA_DIR}/organ.jpg
89
AUDIO_PATH=${MEDIA_DIR}/sample.mp3
910
VIDEO_PATH=${MEDIA_DIR}/Big_Buck_Bunny.mp4
@@ -16,6 +17,13 @@ else
1617
B64FLAGS="-w0"
1718
fi
1819

20+
echo "[START tokens_context_window]"
21+
# [START tokens_context_window]
22+
curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro?key=$GOOGLE_API_KEY > model.json
23+
jq .inputTokenLimit model.json
24+
jq .outputTokenLimit model.json
25+
# [END tokens_context_window]
26+
1927
echo "[START tokens_text_only]"
2028
# [START tokens_text_only]
2129
curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY \
@@ -97,7 +105,6 @@ curl "${upload_url}" \
97105
--data-binary "@${IMG_PATH}" 2> /dev/null > file_info.json
98106

99107
file_uri=$(jq ".file.uri" file_info.json)
100-
echo file_uri=$file_uri
101108

102109
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY" \
103110
-H 'Content-Type: application/json' \
@@ -143,13 +150,10 @@ curl "${upload_url}" \
143150
--data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json
144151

145152
file_uri=$(jq ".file.uri" file_info.json)
146-
echo file_uri=$file_uri
147153

148154
state=$(jq ".file.state" file_info.json)
149-
echo state=$state
150155

151156
name=$(jq ".file.name" file_info.json)
152-
echo name=$name
153157

154158
while [[ "($state)" = *"PROCESSING"* ]];
155159
do
@@ -170,4 +174,115 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:c
170174
{"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}]
171175
}]
172176
}'
173-
# [END tokens_multimodal_video_audio_file_api]
177+
# [END tokens_multimodal_video_audio_file_api]
178+
179+
echo "[START tokens_cached_content]"
180+
# [START tokens_cached_content]
181+
echo '{
182+
"model": "models/gemini-1.5-flash-001",
183+
"contents":[
184+
{
185+
"parts":[
186+
{
187+
"inline_data": {
188+
"mime_type":"text/plain",
189+
"data": "'$(base64 $B64FLAGS $A11_PATH)'"
190+
}
191+
}
192+
],
193+
"role": "user"
194+
}
195+
],
196+
"systemInstruction": {
197+
"parts": [
198+
{
199+
"text": "You are an expert at analyzing transcripts."
200+
}
201+
]
202+
},
203+
"ttl": "300s"
204+
}' > request.json
205+
206+
curl -X POST "https://generativelanguage.googleapis.com/v1beta/cachedContents?key=$GOOGLE_API_KEY" \
207+
-H 'Content-Type: application/json' \
208+
-d @request.json \
209+
> cache.json
210+
211+
jq .usageMetadata.totalTokenCount cache.json
212+
# [END tokens_cached_content]
213+
214+
echo "[START tokens_system_instruction]"
215+
# [START tokens_system_instruction]
216+
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \
217+
-H 'Content-Type: application/json' \
218+
-d '{ "system_instruction": {
219+
"parts":
220+
{ "text": "You are a cat. Your name is Neko."}},
221+
"contents": {
222+
"parts": {
223+
"text": "Hello there"}}}' > system_instructions.json
224+
225+
jq .usageMetadata.totalTokenCount system_instructions.json
226+
# [END tokens_system_instruction]
227+
228+
echo "[START tokens_tools]"
229+
# [START tokens_tools]
230+
cat > tools.json << EOF
231+
{
232+
"function_declarations": [
233+
{
234+
"name": "enable_lights",
235+
"description": "Turn on the lighting system.",
236+
"parameters": { "type": "object" }
237+
},
238+
{
239+
"name": "set_light_color",
240+
"description": "Set the light color. Lights must be enabled for this to work.",
241+
"parameters": {
242+
"type": "object",
243+
"properties": {
244+
"rgb_hex": {
245+
"type": "string",
246+
"description": "The light color as a 6-digit hex string, e.g. ff0000 for red."
247+
}
248+
},
249+
"required": [
250+
"rgb_hex"
251+
]
252+
}
253+
},
254+
{
255+
"name": "stop_lights",
256+
"description": "Turn off the lighting system.",
257+
"parameters": { "type": "object" }
258+
}
259+
]
260+
}
261+
EOF
262+
263+
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \
264+
-H 'Content-Type: application/json' \
265+
-d '
266+
{
267+
"system_instruction": {
268+
"parts": {
269+
"text": "You are a helpful lighting system bot. You can turn lights on and off, and you can set the color. Do not perform any other tasks."
270+
}
271+
},
272+
"tools": ['$(source "$tools")'],
273+
274+
"tool_config": {
275+
"function_calling_config": {"mode": "none"}
276+
},
277+
278+
"contents": {
279+
"role": "user",
280+
"parts": {
281+
"text": "What can you do?"
282+
}
283+
}
284+
}
285+
' > tools_output.json
286+
287+
jq .usageMetadata.totalTokenCount tools_output.json
288+
# [END tokens_tools]

0 commit comments

Comments
 (0)