Skip to content

Commit 8e44ffc

Browse files
authored
[FIX] Vertex AI conditional field bug and update default max_tokens to 4096 (#1688)
* modified vertext_ai.json * Updated max_tokens to 4096 for all llm adapters
1 parent dd530d7 commit 8e44ffc

File tree

7 files changed

+7
-6
lines changed

7 files changed

+7
-6
lines changed

unstract/sdk1/src/unstract/sdk1/adapters/llm1/static/anthropic.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
"type": "number",
2727
"minimum": 0,
2828
"multipleOf": 1,
29-
"default": 512,
29+
"default": 4096,
3030
"title": "Maximum Output Tokens",
3131
"description": "Maximum number of output tokens to limit LLM replies, the maximum possible differs from model to model."
3232
},

unstract/sdk1/src/unstract/sdk1/adapters/llm1/static/anyscale.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
"minimum": 0,
4040
"multipleOf": 1,
4141
"title": "Maximum Output Tokens",
42-
"default": 256,
42+
"default": 4096,
4343
"description": "Maximum number of output tokens to limit LLM replies, maximum possible varies from model to model."
4444
},
4545
"max_retries": {

unstract/sdk1/src/unstract/sdk1/adapters/llm1/static/azure.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@
5050
"minimum": 0,
5151
"multipleOf": 1,
5252
"title": "Maximum Output Tokens",
53+
"default": 4096,
5354
"description": "Maximum number of output tokens to limit LLM replies, leave it empty to use the maximum possible for the selected model."
5455
},
5556
"max_retries": {

unstract/sdk1/src/unstract/sdk1/adapters/llm1/static/bedrock.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
"type": "number",
4343
"minimum": 0,
4444
"multipleOf": 1,
45-
"default": 512,
45+
"default": 4096,
4646
"title": "Maximum Output Tokens",
4747
"description": "Maximum number of output tokens to limit LLM replies, the maximum possible differs from model to model."
4848
},

unstract/sdk1/src/unstract/sdk1/adapters/llm1/static/mistral.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
"type": "number",
3838
"minimum": 0,
3939
"multipleOf": 1,
40-
"default": 512,
40+
"default": 4096,
4141
"title": "Maximum Output Tokens",
4242
"description": "Maximum number of output tokens to limit LLM replies, the maximum possible differs from model to model."
4343
},

unstract/sdk1/src/unstract/sdk1/adapters/llm1/static/openai.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@
4343
"minimum": 0,
4444
"multipleOf": 1,
4545
"title": "Maximum Output Tokens",
46+
"default": 4096,
4647
"description": "Maximum number of output tokens to limit LLM replies, leave it empty to use the maximum possible for the selected model."
4748
},
4849
"max_retries": {

unstract/sdk1/src/unstract/sdk1/adapters/llm1/static/vertex_ai.json

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,15 +45,14 @@
4545
"minimum": 0,
4646
"multipleOf": 1,
4747
"title": "Max output tokens",
48-
"default": 2048,
48+
"default": 4096,
4949
"description": "Maximum number of output tokens to generate. This is limited by the maximum supported by the model and will vary from model to model"
5050
},
5151
"safety_settings": {
5252
"type": "object",
5353
"title": "Safety Settings",
5454
"description": "Vertex AI's configurable safety filters",
5555
"properties": {
56-
"type": "string",
5756
"dangerous_content": {
5857
"type": "string",
5958
"title": "Dangerous Content",

0 commit comments

Comments
 (0)