@@ -45,7 +45,7 @@ vertex_credentials_json = json.dumps(vertex_credentials)
45
45
46
46
# # COMPLETION CALL
47
47
response = completion(
48
- model = " vertex_ai/gemini-pro" ,
48
+ model = " vertex_ai/gemini-2.5- pro" ,
49
49
messages = [{ " content" : " Hello, how are you?" ," role" : " user" }],
50
50
vertex_credentials = vertex_credentials_json
51
51
)
@@ -69,7 +69,7 @@ vertex_credentials_json = json.dumps(vertex_credentials)
69
69
70
70
71
71
response = completion(
72
- model = " vertex_ai/gemini-pro" ,
72
+ model = " vertex_ai/gemini-2.5- pro" ,
73
73
messages = [{" content" : " You are a good bot." ," role" : " system" }, {" content" : " Hello, how are you?" ," role" : " user" }],
74
74
vertex_credentials = vertex_credentials_json
75
75
)
@@ -189,7 +189,7 @@ print(json.loads(completion.choices[0].message.content))
189
189
1 . Add model to config.yaml
190
190
``` yaml
191
191
model_list :
192
- - model_name : gemini-pro
192
+ - model_name : gemini-2.5- pro
193
193
litellm_params :
194
194
model : vertex_ai/gemini-1.5-pro
195
195
vertex_project : " project-id"
@@ -210,7 +210,7 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
210
210
-H ' Content-Type: application/json' \
211
211
-H ' Authorization: Bearer sk-1234' \
212
212
-D ' {
213
- "model": "gemini-pro",
213
+ "model": "gemini-2.5- pro",
214
214
"messages": [
215
215
{"role": "user", "content": "List 5 popular cookie recipes."}
216
216
],
@@ -262,7 +262,7 @@ except JSONSchemaValidationError as e:
262
262
1 . Add model to config.yaml
263
263
``` yaml
264
264
model_list :
265
- - model_name : gemini-pro
265
+ - model_name : gemini-2.5- pro
266
266
litellm_params :
267
267
model : vertex_ai/gemini-1.5-pro
268
268
vertex_project : " project-id"
@@ -283,7 +283,7 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
283
283
-H ' Content-Type: application/json' \
284
284
-H ' Authorization: Bearer sk-1234' \
285
285
-D ' {
286
- "model": "gemini-pro",
286
+ "model": "gemini-2.5- pro",
287
287
"messages": [
288
288
{"role": "user", "content": "List 5 popular cookie recipes."}
289
289
],
@@ -391,7 +391,7 @@ client = OpenAI(
391
391
)
392
392
393
393
response = client.chat.completions.create(
394
- model = " gemini-pro" ,
394
+ model = " gemini-2.5- pro" ,
395
395
messages = [{" role" : " user" , " content" : " Who won the world cup?" }],
396
396
tools = [{" googleSearch" : {}}],
397
397
)
@@ -406,7 +406,7 @@ curl http://localhost:4000/v1/chat/completions \
406
406
-H " Content-Type: application/json" \
407
407
-H " Authorization: Bearer sk-1234" \
408
408
-d ' {
409
- "model": "gemini-pro",
409
+ "model": "gemini-2.5- pro",
410
410
"messages": [
411
411
{"role": "user", "content": "Who won the world cup?"}
412
412
],
@@ -527,7 +527,7 @@ client = OpenAI(
527
527
)
528
528
529
529
response = client.chat.completions.create(
530
- model = " gemini-pro" ,
530
+ model = " gemini-2.5- pro" ,
531
531
messages = [{" role" : " user" , " content" : " Who won the world cup?" }],
532
532
tools = [{" enterpriseWebSearch" : {}}],
533
533
)
@@ -542,7 +542,7 @@ curl http://localhost:4000/v1/chat/completions \
542
542
-H " Content-Type: application/json" \
543
543
-H " Authorization: Bearer sk-1234" \
544
544
-d ' {
545
- "model": "gemini-pro",
545
+ "model": "gemini-2.5- pro",
546
546
"messages": [
547
547
{"role": "user", "content": "Who won the world cup?"}
548
548
],
@@ -835,7 +835,7 @@ import litellm
835
835
litellm.vertex_project = " hardy-device-38811" # Your Project ID
836
836
litellm.vertex_location = " us-central1" # proj location
837
837
838
- response = litellm.completion(model = " gemini-pro" , messages = [{" role" : " user" , " content" : " write code for saying hi from LiteLLM" }])
838
+ response = litellm.completion(model = " gemini-2.5- pro" , messages = [{" role" : " user" , " content" : " write code for saying hi from LiteLLM" }])
839
839
```
840
840
841
841
## Usage with LiteLLM Proxy Server
@@ -876,9 +876,9 @@ Here's how to use Vertex AI with the LiteLLM Proxy Server
876
876
vertex_location : " us-central1" # proj location
877
877
878
878
model_list :
879
- -model_name : team1-gemini-pro
879
+ -model_name : team1-gemini-2.5- pro
880
880
litellm_params :
881
- model : gemini-pro
881
+ model : gemini-2.5- pro
882
882
` ` `
883
883
884
884
</TabItem>
@@ -905,7 +905,7 @@ Here's how to use Vertex AI with the LiteLLM Proxy Server
905
905
)
906
906
907
907
response = client.chat.completions.create(
908
- model = " team1-gemini-pro" ,
908
+ model = " team1-gemini-2.5- pro" ,
909
909
messages = [
910
910
{
911
911
" role" : " user" ,
@@ -925,7 +925,7 @@ Here's how to use Vertex AI with the LiteLLM Proxy Server
925
925
--header ' Authorization: Bearer sk-1234' \
926
926
--header ' Content-Type: application/json' \
927
927
--data ' {
928
- "model": "team1-gemini-pro",
928
+ "model": "team1-gemini-2.5- pro",
929
929
"messages": [
930
930
{
931
931
"role": "user",
@@ -975,7 +975,7 @@ vertex_credentials_json = json.dumps(vertex_credentials)
975
975
976
976
977
977
response = completion(
978
- model = " vertex_ai/gemini-pro" ,
978
+ model = " vertex_ai/gemini-2.5- pro" ,
979
979
messages = [{" content" : " You are a good bot." ," role" : " system" }, {" content" : " Hello, how are you?" ," role" : " user" }],
980
980
vertex_credentials = vertex_credentials_json,
981
981
vertex_project = " my-special-project" ,
@@ -1039,7 +1039,7 @@ In certain use-cases you may need to make calls to the models and pass [safety s
1039
1039
1040
1040
``` python
1041
1041
response = completion(
1042
- model = " vertex_ai/gemini-pro" ,
1042
+ model = " vertex_ai/gemini-2.5- pro" ,
1043
1043
messages = [{" role" : " user" , " content" : " write code for saying hi from LiteLLM" }]
1044
1044
safety_settings = [
1045
1045
{
@@ -1153,7 +1153,7 @@ litellm.vertex_ai_safety_settings = [
1153
1153
},
1154
1154
]
1155
1155
response = completion(
1156
- model = " vertex_ai/gemini-pro" ,
1156
+ model = " vertex_ai/gemini-2.5- pro" ,
1157
1157
messages = [{" role" : " user" , " content" : " write code for saying hi from LiteLLM" }]
1158
1158
)
1159
1159
```
@@ -1212,7 +1212,7 @@ litellm.vertex_location = "us-central1 # Your Location
1212
1212
## Gemini Pro
1213
1213
| Model Name | Function Call |
1214
1214
| ------------------| --------------------------------------|
1215
- | gemini-pro | ` completion('gemini-pro', messages) ` , ` completion('vertex_ai/gemini-pro', messages) ` |
1215
+ | gemini-2.5- pro | ` completion('gemini-2.5- pro', messages) ` , ` completion('vertex_ai/gemini-2.5 -pro', messages) ` |
1216
1216
1217
1217
## Fine-tuned Models
1218
1218
@@ -1307,7 +1307,7 @@ curl --location 'https://0.0.0.0:4000/v1/chat/completions' \
1307
1307
## Gemini Pro Vision
1308
1308
| Model Name | Function Call |
1309
1309
| ------------------| --------------------------------------|
1310
- | gemini-pro-vision | ` completion('gemini-pro-vision', messages) ` , ` completion('vertex_ai/gemini-pro-vision', messages) ` |
1310
+ | gemini-2.5- pro-vision | ` completion('gemini-2.5- pro-vision', messages) ` , ` completion('vertex_ai/gemini-2.5 -pro-vision', messages) ` |
1311
1311
1312
1312
## Gemini 1.5 Pro (and Vision)
1313
1313
| Model Name | Function Call |
@@ -1321,7 +1321,7 @@ curl --location 'https://0.0.0.0:4000/v1/chat/completions' \
1321
1321
1322
1322
#### Using Gemini Pro Vision
1323
1323
1324
- Call ` gemini-pro-vision ` in the same input/output format as OpenAI [ ` gpt-4-vision ` ] ( https://docs.litellm.ai/docs/providers/openai#openai-vision-models )
1324
+ Call ` gemini-2.5- pro-vision ` in the same input/output format as OpenAI [ ` gpt-4-vision ` ] ( https://docs.litellm.ai/docs/providers/openai#openai-vision-models )
1325
1325
1326
1326
LiteLLM Supports the following image types passed in ` url `
1327
1327
- Images with Cloud Storage URIs - gs://cloud-samples-data/generative-ai/image/boats.jpeg
@@ -1339,7 +1339,7 @@ LiteLLM Supports the following image types passed in `url`
1339
1339
import litellm
1340
1340
1341
1341
response = litellm.completion(
1342
- model = " vertex_ai/gemini-pro-vision" ,
1342
+ model = " vertex_ai/gemini-2.5- pro-vision" ,
1343
1343
messages = [
1344
1344
{
1345
1345
" role" : " user" ,
@@ -1377,7 +1377,7 @@ image_path = "cached_logo.jpg"
1377
1377
# Getting the base64 string
1378
1378
base64_image = encode_image(image_path)
1379
1379
response = litellm.completion(
1380
- model = " vertex_ai/gemini-pro-vision" ,
1380
+ model = " vertex_ai/gemini-2.5- pro-vision" ,
1381
1381
messages = [
1382
1382
{
1383
1383
" role" : " user" ,
@@ -1433,7 +1433,7 @@ tools = [
1433
1433
messages = [{" role" : " user" , " content" : " What's the weather like in Boston today?" }]
1434
1434
1435
1435
response = completion(
1436
- model = " vertex_ai/gemini-pro-vision" ,
1436
+ model = " vertex_ai/gemini-2.5- pro-vision" ,
1437
1437
messages = messages,
1438
1438
tools = tools,
1439
1439
)
0 commit comments