Skip to content

Commit e377e30

Browse files
Merge pull request #14775 from SmartManoj/patch-3
Docs: Update model references from gemini-pro to gemini-2.5-pro
2 parents 666bcac + ed4c2b6 commit e377e30

File tree

1 file changed

+24
-24
lines changed

1 file changed

+24
-24
lines changed

docs/my-website/docs/providers/vertex.md

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ vertex_credentials_json = json.dumps(vertex_credentials)
4545

4646
## COMPLETION CALL
4747
response = completion(
48-
model="vertex_ai/gemini-pro",
48+
model="vertex_ai/gemini-2.5-pro",
4949
messages=[{ "content": "Hello, how are you?","role": "user"}],
5050
vertex_credentials=vertex_credentials_json
5151
)
@@ -69,7 +69,7 @@ vertex_credentials_json = json.dumps(vertex_credentials)
6969

7070

7171
response = completion(
72-
model="vertex_ai/gemini-pro",
72+
model="vertex_ai/gemini-2.5-pro",
7373
messages=[{"content": "You are a good bot.","role": "system"}, {"content": "Hello, how are you?","role": "user"}],
7474
vertex_credentials=vertex_credentials_json
7575
)
@@ -189,7 +189,7 @@ print(json.loads(completion.choices[0].message.content))
189189
1. Add model to config.yaml
190190
```yaml
191191
model_list:
192-
- model_name: gemini-pro
192+
- model_name: gemini-2.5-pro
193193
litellm_params:
194194
model: vertex_ai/gemini-1.5-pro
195195
vertex_project: "project-id"
@@ -222,7 +222,7 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
222222
-H 'Content-Type: application/json' \
223223
-H 'Authorization: Bearer sk-1234' \
224224
-D '{
225-
"model": "gemini-pro",
225+
"model": "gemini-2.5-pro",
226226
"messages": [
227227
{"role": "user", "content": "List 5 popular cookie recipes."}
228228
],
@@ -274,7 +274,7 @@ except JSONSchemaValidationError as e:
274274
1. Add model to config.yaml
275275
```yaml
276276
model_list:
277-
- model_name: gemini-pro
277+
- model_name: gemini-2.5-pro
278278
litellm_params:
279279
model: vertex_ai/gemini-1.5-pro
280280
vertex_project: "project-id"
@@ -295,7 +295,7 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
295295
-H 'Content-Type: application/json' \
296296
-H 'Authorization: Bearer sk-1234' \
297297
-D '{
298-
"model": "gemini-pro",
298+
"model": "gemini-2.5-pro",
299299
"messages": [
300300
{"role": "user", "content": "List 5 popular cookie recipes."}
301301
],
@@ -403,7 +403,7 @@ client = OpenAI(
403403
)
404404

405405
response = client.chat.completions.create(
406-
model="gemini-pro",
406+
model="gemini-2.5-pro",
407407
messages=[{"role": "user", "content": "Who won the world cup?"}],
408408
tools=[{"googleSearch": {}}],
409409
)
@@ -418,7 +418,7 @@ curl http://localhost:4000/v1/chat/completions \
418418
-H "Content-Type: application/json" \
419419
-H "Authorization: Bearer sk-1234" \
420420
-d '{
421-
"model": "gemini-pro",
421+
"model": "gemini-2.5-pro",
422422
"messages": [
423423
{"role": "user", "content": "Who won the world cup?"}
424424
],
@@ -539,7 +539,7 @@ client = OpenAI(
539539
)
540540

541541
response = client.chat.completions.create(
542-
model="gemini-pro",
542+
model="gemini-2.5-pro",
543543
messages=[{"role": "user", "content": "Who won the world cup?"}],
544544
tools=[{"enterpriseWebSearch": {}}],
545545
)
@@ -554,7 +554,7 @@ curl http://localhost:4000/v1/chat/completions \
554554
-H "Content-Type: application/json" \
555555
-H "Authorization: Bearer sk-1234" \
556556
-d '{
557-
"model": "gemini-pro",
557+
"model": "gemini-2.5-pro",
558558
"messages": [
559559
{"role": "user", "content": "Who won the world cup?"}
560560
],
@@ -847,7 +847,7 @@ import litellm
847847
litellm.vertex_project = "hardy-device-38811" # Your Project ID
848848
litellm.vertex_location = "us-central1" # proj location
849849

850-
response = litellm.completion(model="gemini-pro", messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}])
850+
response = litellm.completion(model="gemini-2.5-pro", messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}])
851851
```
852852

853853
## Usage with LiteLLM Proxy Server
@@ -888,9 +888,9 @@ Here's how to use Vertex AI with the LiteLLM Proxy Server
888888
vertex_location: "us-central1" # proj location
889889

890890
model_list:
891-
-model_name: team1-gemini-pro
891+
-model_name: team1-gemini-2.5-pro
892892
litellm_params:
893-
model: gemini-pro
893+
model: gemini-2.5-pro
894894
```
895895
896896
</TabItem>
@@ -917,7 +917,7 @@ Here's how to use Vertex AI with the LiteLLM Proxy Server
917917
)
918918

919919
response = client.chat.completions.create(
920-
model="team1-gemini-pro",
920+
model="team1-gemini-2.5-pro",
921921
messages = [
922922
{
923923
"role": "user",
@@ -937,7 +937,7 @@ Here's how to use Vertex AI with the LiteLLM Proxy Server
937937
--header 'Authorization: Bearer sk-1234' \
938938
--header 'Content-Type: application/json' \
939939
--data '{
940-
"model": "team1-gemini-pro",
940+
"model": "team1-gemini-2.5-pro",
941941
"messages": [
942942
{
943943
"role": "user",
@@ -987,7 +987,7 @@ vertex_credentials_json = json.dumps(vertex_credentials)
987987

988988

989989
response = completion(
990-
model="vertex_ai/gemini-pro",
990+
model="vertex_ai/gemini-2.5-pro",
991991
messages=[{"content": "You are a good bot.","role": "system"}, {"content": "Hello, how are you?","role": "user"}],
992992
vertex_credentials=vertex_credentials_json,
993993
vertex_project="my-special-project",
@@ -1051,7 +1051,7 @@ In certain use-cases you may need to make calls to the models and pass [safety s
10511051

10521052
```python
10531053
response = completion(
1054-
model="vertex_ai/gemini-pro",
1054+
model="vertex_ai/gemini-2.5-pro",
10551055
messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}]
10561056
safety_settings=[
10571057
{
@@ -1165,7 +1165,7 @@ litellm.vertex_ai_safety_settings = [
11651165
},
11661166
]
11671167
response = completion(
1168-
model="vertex_ai/gemini-pro",
1168+
model="vertex_ai/gemini-2.5-pro",
11691169
messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}]
11701170
)
11711171
```
@@ -1224,7 +1224,7 @@ litellm.vertex_location = "us-central1 # Your Location
12241224
## Gemini Pro
12251225
| Model Name | Function Call |
12261226
|------------------|--------------------------------------|
1227-
| gemini-pro | `completion('gemini-pro', messages)`, `completion('vertex_ai/gemini-pro', messages)` |
1227+
| gemini-2.5-pro | `completion('gemini-2.5-pro', messages)`, `completion('vertex_ai/gemini-2.5-pro', messages)` |
12281228

12291229
## Fine-tuned Models
12301230

@@ -1319,7 +1319,7 @@ curl --location 'https://0.0.0.0:4000/v1/chat/completions' \
13191319
## Gemini Pro Vision
13201320
| Model Name | Function Call |
13211321
|------------------|--------------------------------------|
1322-
| gemini-pro-vision | `completion('gemini-pro-vision', messages)`, `completion('vertex_ai/gemini-pro-vision', messages)`|
1322+
| gemini-2.5-pro-vision | `completion('gemini-2.5-pro-vision', messages)`, `completion('vertex_ai/gemini-2.5-pro-vision', messages)`|
13231323

13241324
## Gemini 1.5 Pro (and Vision)
13251325
| Model Name | Function Call |
@@ -1333,7 +1333,7 @@ curl --location 'https://0.0.0.0:4000/v1/chat/completions' \
13331333

13341334
#### Using Gemini Pro Vision
13351335

1336-
Call `gemini-pro-vision` in the same input/output format as OpenAI [`gpt-4-vision`](https://docs.litellm.ai/docs/providers/openai#openai-vision-models)
1336+
Call `gemini-2.5-pro-vision` in the same input/output format as OpenAI [`gpt-4-vision`](https://docs.litellm.ai/docs/providers/openai#openai-vision-models)
13371337

13381338
LiteLLM Supports the following image types passed in `url`
13391339
- Images with Cloud Storage URIs - gs://cloud-samples-data/generative-ai/image/boats.jpeg
@@ -1351,7 +1351,7 @@ LiteLLM Supports the following image types passed in `url`
13511351
import litellm
13521352

13531353
response = litellm.completion(
1354-
model = "vertex_ai/gemini-pro-vision",
1354+
model = "vertex_ai/gemini-2.5-pro-vision",
13551355
messages=[
13561356
{
13571357
"role": "user",
@@ -1389,7 +1389,7 @@ image_path = "cached_logo.jpg"
13891389
# Getting the base64 string
13901390
base64_image = encode_image(image_path)
13911391
response = litellm.completion(
1392-
model="vertex_ai/gemini-pro-vision",
1392+
model="vertex_ai/gemini-2.5-pro-vision",
13931393
messages=[
13941394
{
13951395
"role": "user",
@@ -1445,7 +1445,7 @@ tools = [
14451445
messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]
14461446

14471447
response = completion(
1448-
model="vertex_ai/gemini-pro-vision",
1448+
model="vertex_ai/gemini-2.5-pro-vision",
14491449
messages=messages,
14501450
tools=tools,
14511451
)

0 commit comments

Comments
 (0)