Skip to content

Commit ed4c2b6

Browse files
authored
Update model references from gemini-pro to gemini-2.5-pro
1 parent 52a56bd commit ed4c2b6

File tree

1 file changed

+24
-24
lines changed

1 file changed

+24
-24
lines changed

docs/my-website/docs/providers/vertex.md

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ vertex_credentials_json = json.dumps(vertex_credentials)
4545

4646
## COMPLETION CALL
4747
response = completion(
48-
model="vertex_ai/gemini-pro",
48+
model="vertex_ai/gemini-2.5-pro",
4949
messages=[{ "content": "Hello, how are you?","role": "user"}],
5050
vertex_credentials=vertex_credentials_json
5151
)
@@ -69,7 +69,7 @@ vertex_credentials_json = json.dumps(vertex_credentials)
6969

7070

7171
response = completion(
72-
model="vertex_ai/gemini-pro",
72+
model="vertex_ai/gemini-2.5-pro",
7373
messages=[{"content": "You are a good bot.","role": "system"}, {"content": "Hello, how are you?","role": "user"}],
7474
vertex_credentials=vertex_credentials_json
7575
)
@@ -189,7 +189,7 @@ print(json.loads(completion.choices[0].message.content))
189189
1. Add model to config.yaml
190190
```yaml
191191
model_list:
192-
- model_name: gemini-pro
192+
- model_name: gemini-2.5-pro
193193
litellm_params:
194194
model: vertex_ai/gemini-1.5-pro
195195
vertex_project: "project-id"
@@ -210,7 +210,7 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
210210
-H 'Content-Type: application/json' \
211211
-H 'Authorization: Bearer sk-1234' \
212212
-D '{
213-
"model": "gemini-pro",
213+
"model": "gemini-2.5-pro",
214214
"messages": [
215215
{"role": "user", "content": "List 5 popular cookie recipes."}
216216
],
@@ -262,7 +262,7 @@ except JSONSchemaValidationError as e:
262262
1. Add model to config.yaml
263263
```yaml
264264
model_list:
265-
- model_name: gemini-pro
265+
- model_name: gemini-2.5-pro
266266
litellm_params:
267267
model: vertex_ai/gemini-1.5-pro
268268
vertex_project: "project-id"
@@ -283,7 +283,7 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
283283
-H 'Content-Type: application/json' \
284284
-H 'Authorization: Bearer sk-1234' \
285285
-D '{
286-
"model": "gemini-pro",
286+
"model": "gemini-2.5-pro",
287287
"messages": [
288288
{"role": "user", "content": "List 5 popular cookie recipes."}
289289
],
@@ -391,7 +391,7 @@ client = OpenAI(
391391
)
392392

393393
response = client.chat.completions.create(
394-
model="gemini-pro",
394+
model="gemini-2.5-pro",
395395
messages=[{"role": "user", "content": "Who won the world cup?"}],
396396
tools=[{"googleSearch": {}}],
397397
)
@@ -406,7 +406,7 @@ curl http://localhost:4000/v1/chat/completions \
406406
-H "Content-Type: application/json" \
407407
-H "Authorization: Bearer sk-1234" \
408408
-d '{
409-
"model": "gemini-pro",
409+
"model": "gemini-2.5-pro",
410410
"messages": [
411411
{"role": "user", "content": "Who won the world cup?"}
412412
],
@@ -527,7 +527,7 @@ client = OpenAI(
527527
)
528528

529529
response = client.chat.completions.create(
530-
model="gemini-pro",
530+
model="gemini-2.5-pro",
531531
messages=[{"role": "user", "content": "Who won the world cup?"}],
532532
tools=[{"enterpriseWebSearch": {}}],
533533
)
@@ -542,7 +542,7 @@ curl http://localhost:4000/v1/chat/completions \
542542
-H "Content-Type: application/json" \
543543
-H "Authorization: Bearer sk-1234" \
544544
-d '{
545-
"model": "gemini-pro",
545+
"model": "gemini-2.5-pro",
546546
"messages": [
547547
{"role": "user", "content": "Who won the world cup?"}
548548
],
@@ -835,7 +835,7 @@ import litellm
835835
litellm.vertex_project = "hardy-device-38811" # Your Project ID
836836
litellm.vertex_location = "us-central1" # proj location
837837

838-
response = litellm.completion(model="gemini-pro", messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}])
838+
response = litellm.completion(model="gemini-2.5-pro", messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}])
839839
```
840840

841841
## Usage with LiteLLM Proxy Server
@@ -876,9 +876,9 @@ Here's how to use Vertex AI with the LiteLLM Proxy Server
876876
vertex_location: "us-central1" # proj location
877877

878878
model_list:
879-
-model_name: team1-gemini-pro
879+
-model_name: team1-gemini-2.5-pro
880880
litellm_params:
881-
model: gemini-pro
881+
model: gemini-2.5-pro
882882
```
883883
884884
</TabItem>
@@ -905,7 +905,7 @@ Here's how to use Vertex AI with the LiteLLM Proxy Server
905905
)
906906

907907
response = client.chat.completions.create(
908-
model="team1-gemini-pro",
908+
model="team1-gemini-2.5-pro",
909909
messages = [
910910
{
911911
"role": "user",
@@ -925,7 +925,7 @@ Here's how to use Vertex AI with the LiteLLM Proxy Server
925925
--header 'Authorization: Bearer sk-1234' \
926926
--header 'Content-Type: application/json' \
927927
--data '{
928-
"model": "team1-gemini-pro",
928+
"model": "team1-gemini-2.5-pro",
929929
"messages": [
930930
{
931931
"role": "user",
@@ -975,7 +975,7 @@ vertex_credentials_json = json.dumps(vertex_credentials)
975975

976976

977977
response = completion(
978-
model="vertex_ai/gemini-pro",
978+
model="vertex_ai/gemini-2.5-pro",
979979
messages=[{"content": "You are a good bot.","role": "system"}, {"content": "Hello, how are you?","role": "user"}],
980980
vertex_credentials=vertex_credentials_json,
981981
vertex_project="my-special-project",
@@ -1039,7 +1039,7 @@ In certain use-cases you may need to make calls to the models and pass [safety s
10391039

10401040
```python
10411041
response = completion(
1042-
model="vertex_ai/gemini-pro",
1042+
model="vertex_ai/gemini-2.5-pro",
10431043
messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}]
10441044
safety_settings=[
10451045
{
@@ -1153,7 +1153,7 @@ litellm.vertex_ai_safety_settings = [
11531153
},
11541154
]
11551155
response = completion(
1156-
model="vertex_ai/gemini-pro",
1156+
model="vertex_ai/gemini-2.5-pro",
11571157
messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}]
11581158
)
11591159
```
@@ -1212,7 +1212,7 @@ litellm.vertex_location = "us-central1 # Your Location
12121212
## Gemini Pro
12131213
| Model Name | Function Call |
12141214
|------------------|--------------------------------------|
1215-
| gemini-pro | `completion('gemini-pro', messages)`, `completion('vertex_ai/gemini-pro', messages)` |
1215+
| gemini-2.5-pro | `completion('gemini-2.5-pro', messages)`, `completion('vertex_ai/gemini-2.5-pro', messages)` |
12161216

12171217
## Fine-tuned Models
12181218

@@ -1307,7 +1307,7 @@ curl --location 'https://0.0.0.0:4000/v1/chat/completions' \
13071307
## Gemini Pro Vision
13081308
| Model Name | Function Call |
13091309
|------------------|--------------------------------------|
1310-
| gemini-pro-vision | `completion('gemini-pro-vision', messages)`, `completion('vertex_ai/gemini-pro-vision', messages)`|
1310+
| gemini-2.5-pro-vision | `completion('gemini-2.5-pro-vision', messages)`, `completion('vertex_ai/gemini-2.5-pro-vision', messages)`|
13111311

13121312
## Gemini 1.5 Pro (and Vision)
13131313
| Model Name | Function Call |
@@ -1321,7 +1321,7 @@ curl --location 'https://0.0.0.0:4000/v1/chat/completions' \
13211321

13221322
#### Using Gemini Pro Vision
13231323

1324-
Call `gemini-pro-vision` in the same input/output format as OpenAI [`gpt-4-vision`](https://docs.litellm.ai/docs/providers/openai#openai-vision-models)
1324+
Call `gemini-2.5-pro-vision` in the same input/output format as OpenAI [`gpt-4-vision`](https://docs.litellm.ai/docs/providers/openai#openai-vision-models)
13251325

13261326
LiteLLM Supports the following image types passed in `url`
13271327
- Images with Cloud Storage URIs - gs://cloud-samples-data/generative-ai/image/boats.jpeg
@@ -1339,7 +1339,7 @@ LiteLLM Supports the following image types passed in `url`
13391339
import litellm
13401340

13411341
response = litellm.completion(
1342-
model = "vertex_ai/gemini-pro-vision",
1342+
model = "vertex_ai/gemini-2.5-pro-vision",
13431343
messages=[
13441344
{
13451345
"role": "user",
@@ -1377,7 +1377,7 @@ image_path = "cached_logo.jpg"
13771377
# Getting the base64 string
13781378
base64_image = encode_image(image_path)
13791379
response = litellm.completion(
1380-
model="vertex_ai/gemini-pro-vision",
1380+
model="vertex_ai/gemini-2.5-pro-vision",
13811381
messages=[
13821382
{
13831383
"role": "user",
@@ -1433,7 +1433,7 @@ tools = [
14331433
messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]
14341434

14351435
response = completion(
1436-
model="vertex_ai/gemini-pro-vision",
1436+
model="vertex_ai/gemini-2.5-pro-vision",
14371437
messages=messages,
14381438
tools=tools,
14391439
)

0 commit comments

Comments
 (0)