@@ -45,7 +45,7 @@ vertex_credentials_json = json.dumps(vertex_credentials)
45
45
46
46
# # COMPLETION CALL
47
47
response = completion(
48
- model = " vertex_ai/gemini-pro" ,
48
+ model = " vertex_ai/gemini-2.5- pro" ,
49
49
messages = [{ " content" : " Hello, how are you?" ," role" : " user" }],
50
50
vertex_credentials = vertex_credentials_json
51
51
)
@@ -69,7 +69,7 @@ vertex_credentials_json = json.dumps(vertex_credentials)
69
69
70
70
71
71
response = completion(
72
- model = " vertex_ai/gemini-pro" ,
72
+ model = " vertex_ai/gemini-2.5- pro" ,
73
73
messages = [{" content" : " You are a good bot." ," role" : " system" }, {" content" : " Hello, how are you?" ," role" : " user" }],
74
74
vertex_credentials = vertex_credentials_json
75
75
)
@@ -189,7 +189,7 @@ print(json.loads(completion.choices[0].message.content))
189
189
1 . Add model to config.yaml
190
190
``` yaml
191
191
model_list :
192
- - model_name : gemini-pro
192
+ - model_name : gemini-2.5- pro
193
193
litellm_params :
194
194
model : vertex_ai/gemini-1.5-pro
195
195
vertex_project : " project-id"
@@ -222,7 +222,7 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
222
222
-H ' Content-Type: application/json' \
223
223
-H ' Authorization: Bearer sk-1234' \
224
224
-D ' {
225
- "model": "gemini-pro",
225
+ "model": "gemini-2.5- pro",
226
226
"messages": [
227
227
{"role": "user", "content": "List 5 popular cookie recipes."}
228
228
],
@@ -274,7 +274,7 @@ except JSONSchemaValidationError as e:
274
274
1 . Add model to config.yaml
275
275
``` yaml
276
276
model_list :
277
- - model_name : gemini-pro
277
+ - model_name : gemini-2.5- pro
278
278
litellm_params :
279
279
model : vertex_ai/gemini-1.5-pro
280
280
vertex_project : " project-id"
@@ -295,7 +295,7 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
295
295
-H ' Content-Type: application/json' \
296
296
-H ' Authorization: Bearer sk-1234' \
297
297
-D ' {
298
- "model": "gemini-pro",
298
+ "model": "gemini-2.5- pro",
299
299
"messages": [
300
300
{"role": "user", "content": "List 5 popular cookie recipes."}
301
301
],
@@ -403,7 +403,7 @@ client = OpenAI(
403
403
)
404
404
405
405
response = client.chat.completions.create(
406
- model = " gemini-pro" ,
406
+ model = " gemini-2.5- pro" ,
407
407
messages = [{" role" : " user" , " content" : " Who won the world cup?" }],
408
408
tools = [{" googleSearch" : {}}],
409
409
)
@@ -418,7 +418,7 @@ curl http://localhost:4000/v1/chat/completions \
418
418
-H " Content-Type: application/json" \
419
419
-H " Authorization: Bearer sk-1234" \
420
420
-d ' {
421
- "model": "gemini-pro",
421
+ "model": "gemini-2.5- pro",
422
422
"messages": [
423
423
{"role": "user", "content": "Who won the world cup?"}
424
424
],
@@ -539,7 +539,7 @@ client = OpenAI(
539
539
)
540
540
541
541
response = client.chat.completions.create(
542
- model = " gemini-pro" ,
542
+ model = " gemini-2.5- pro" ,
543
543
messages = [{" role" : " user" , " content" : " Who won the world cup?" }],
544
544
tools = [{" enterpriseWebSearch" : {}}],
545
545
)
@@ -554,7 +554,7 @@ curl http://localhost:4000/v1/chat/completions \
554
554
-H " Content-Type: application/json" \
555
555
-H " Authorization: Bearer sk-1234" \
556
556
-d ' {
557
- "model": "gemini-pro",
557
+ "model": "gemini-2.5- pro",
558
558
"messages": [
559
559
{"role": "user", "content": "Who won the world cup?"}
560
560
],
@@ -847,7 +847,7 @@ import litellm
847
847
litellm.vertex_project = " hardy-device-38811" # Your Project ID
848
848
litellm.vertex_location = " us-central1" # proj location
849
849
850
- response = litellm.completion(model = " gemini-pro" , messages = [{" role" : " user" , " content" : " write code for saying hi from LiteLLM" }])
850
+ response = litellm.completion(model = " gemini-2.5- pro" , messages = [{" role" : " user" , " content" : " write code for saying hi from LiteLLM" }])
851
851
```
852
852
853
853
## Usage with LiteLLM Proxy Server
@@ -888,9 +888,9 @@ Here's how to use Vertex AI with the LiteLLM Proxy Server
888
888
vertex_location : " us-central1" # proj location
889
889
890
890
model_list :
891
- -model_name : team1-gemini-pro
891
+ -model_name : team1-gemini-2.5- pro
892
892
litellm_params :
893
- model : gemini-pro
893
+ model : gemini-2.5- pro
894
894
` ` `
895
895
896
896
</TabItem>
@@ -917,7 +917,7 @@ Here's how to use Vertex AI with the LiteLLM Proxy Server
917
917
)
918
918
919
919
response = client.chat.completions.create(
920
- model = " team1-gemini-pro" ,
920
+ model = " team1-gemini-2.5- pro" ,
921
921
messages = [
922
922
{
923
923
" role" : " user" ,
@@ -937,7 +937,7 @@ Here's how to use Vertex AI with the LiteLLM Proxy Server
937
937
--header ' Authorization: Bearer sk-1234' \
938
938
--header ' Content-Type: application/json' \
939
939
--data ' {
940
- "model": "team1-gemini-pro",
940
+ "model": "team1-gemini-2.5- pro",
941
941
"messages": [
942
942
{
943
943
"role": "user",
@@ -987,7 +987,7 @@ vertex_credentials_json = json.dumps(vertex_credentials)
987
987
988
988
989
989
response = completion(
990
- model = " vertex_ai/gemini-pro" ,
990
+ model = " vertex_ai/gemini-2.5- pro" ,
991
991
messages = [{" content" : " You are a good bot." ," role" : " system" }, {" content" : " Hello, how are you?" ," role" : " user" }],
992
992
vertex_credentials = vertex_credentials_json,
993
993
vertex_project = " my-special-project" ,
@@ -1051,7 +1051,7 @@ In certain use-cases you may need to make calls to the models and pass [safety s
1051
1051
1052
1052
``` python
1053
1053
response = completion(
1054
- model = " vertex_ai/gemini-pro" ,
1054
+ model = " vertex_ai/gemini-2.5- pro" ,
1055
1055
messages = [{" role" : " user" , " content" : " write code for saying hi from LiteLLM" }]
1056
1056
safety_settings = [
1057
1057
{
@@ -1165,7 +1165,7 @@ litellm.vertex_ai_safety_settings = [
1165
1165
},
1166
1166
]
1167
1167
response = completion(
1168
- model = " vertex_ai/gemini-pro" ,
1168
+ model = " vertex_ai/gemini-2.5- pro" ,
1169
1169
messages = [{" role" : " user" , " content" : " write code for saying hi from LiteLLM" }]
1170
1170
)
1171
1171
```
@@ -1224,7 +1224,7 @@ litellm.vertex_location = "us-central1 # Your Location
1224
1224
## Gemini Pro
1225
1225
| Model Name | Function Call |
1226
1226
| ------------------| --------------------------------------|
1227
- | gemini-pro | ` completion('gemini-pro', messages) ` , ` completion('vertex_ai/gemini-pro', messages) ` |
1227
+ | gemini-2.5- pro | ` completion('gemini-2.5- pro', messages) ` , ` completion('vertex_ai/gemini-2.5 -pro', messages) ` |
1228
1228
1229
1229
## Fine-tuned Models
1230
1230
@@ -1319,7 +1319,7 @@ curl --location 'https://0.0.0.0:4000/v1/chat/completions' \
1319
1319
## Gemini Pro Vision
1320
1320
| Model Name | Function Call |
1321
1321
| ------------------| --------------------------------------|
1322
- | gemini-pro-vision | ` completion('gemini-pro-vision', messages) ` , ` completion('vertex_ai/gemini-pro-vision', messages) ` |
1322
+ | gemini-2.5- pro-vision | ` completion('gemini-2.5- pro-vision', messages) ` , ` completion('vertex_ai/gemini-2.5 -pro-vision', messages) ` |
1323
1323
1324
1324
## Gemini 1.5 Pro (and Vision)
1325
1325
| Model Name | Function Call |
@@ -1333,7 +1333,7 @@ curl --location 'https://0.0.0.0:4000/v1/chat/completions' \
1333
1333
1334
1334
#### Using Gemini Pro Vision
1335
1335
1336
- Call ` gemini-pro-vision ` in the same input/output format as OpenAI [ ` gpt-4-vision ` ] ( https://docs.litellm.ai/docs/providers/openai#openai-vision-models )
1336
+ Call ` gemini-2.5- pro-vision ` in the same input/output format as OpenAI [ ` gpt-4-vision ` ] ( https://docs.litellm.ai/docs/providers/openai#openai-vision-models )
1337
1337
1338
1338
LiteLLM Supports the following image types passed in ` url `
1339
1339
- Images with Cloud Storage URIs - gs://cloud-samples-data/generative-ai/image/boats.jpeg
@@ -1351,7 +1351,7 @@ LiteLLM Supports the following image types passed in `url`
1351
1351
import litellm
1352
1352
1353
1353
response = litellm.completion(
1354
- model = " vertex_ai/gemini-pro-vision" ,
1354
+ model = " vertex_ai/gemini-2.5- pro-vision" ,
1355
1355
messages = [
1356
1356
{
1357
1357
" role" : " user" ,
@@ -1389,7 +1389,7 @@ image_path = "cached_logo.jpg"
1389
1389
# Getting the base64 string
1390
1390
base64_image = encode_image(image_path)
1391
1391
response = litellm.completion(
1392
- model = " vertex_ai/gemini-pro-vision" ,
1392
+ model = " vertex_ai/gemini-2.5- pro-vision" ,
1393
1393
messages = [
1394
1394
{
1395
1395
" role" : " user" ,
@@ -1445,7 +1445,7 @@ tools = [
1445
1445
messages = [{" role" : " user" , " content" : " What's the weather like in Boston today?" }]
1446
1446
1447
1447
response = completion(
1448
- model = " vertex_ai/gemini-pro-vision" ,
1448
+ model = " vertex_ai/gemini-2.5- pro-vision" ,
1449
1449
messages = messages,
1450
1450
tools = tools,
1451
1451
)
0 commit comments