Skip to content

Commit 3e94570

Browse files
authored
updating max prompt configs, vision support (#109)
1 parent 7a5b91e commit 3e94570

File tree

3 files changed

+39
-35
lines changed

3 files changed

+39
-35
lines changed

src/agentlab/agents/generic_agent/agent_configs.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import bgym
2+
23
from agentlab.agents import dynamic_prompting as dp
34
from agentlab.experiments import args
45
from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT
@@ -41,7 +42,7 @@
4142
use_abstract_example=True,
4243
use_hints=True,
4344
enable_chat=False,
44-
max_prompt_tokens=None,
45+
max_prompt_tokens=40_000,
4546
be_cautious=True,
4647
extra_instructions=None,
4748
)
@@ -89,7 +90,7 @@
8990
use_abstract_example=True, # useful
9091
use_hints=True, # useful
9192
enable_chat=False,
92-
max_prompt_tokens=None,
93+
max_prompt_tokens=40_000,
9394
be_cautious=True,
9495
extra_instructions=None,
9596
)
@@ -136,7 +137,7 @@
136137
use_abstract_example=True,
137138
use_hints=True,
138139
enable_chat=False,
139-
max_prompt_tokens=None,
140+
max_prompt_tokens=40_000,
140141
be_cautious=True,
141142
extra_instructions=None,
142143
add_missparsed_messages=True,
@@ -186,7 +187,7 @@
186187
use_abstract_example=True,
187188
use_hints=True,
188189
enable_chat=False,
189-
max_prompt_tokens=None,
190+
max_prompt_tokens=40_000,
190191
be_cautious=True,
191192
extra_instructions=None,
192193
add_missparsed_messages=True,
@@ -234,7 +235,7 @@
234235
use_abstract_example=True,
235236
use_hints=True,
236237
enable_chat=False,
237-
max_prompt_tokens=None,
238+
max_prompt_tokens=40_000,
238239
be_cautious=True,
239240
extra_instructions=None,
240241
)
@@ -298,7 +299,7 @@
298299
use_hints=args.Choice([True, False], p=[0.7, 0.3]),
299300
be_cautious=args.Choice([True, False]),
300301
enable_chat=False,
301-
max_prompt_tokens=None,
302+
max_prompt_tokens=40_000,
302303
extra_instructions=None,
303304
)
304305

src/agentlab/agents/generic_agent/tmlr_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
use_abstract_example=True,
4141
use_hints=True,
4242
enable_chat=False,
43-
max_prompt_tokens=None,
43+
max_prompt_tokens=40_000,
4444
be_cautious=True,
4545
extra_instructions=None,
4646
)

src/agentlab/llm/llm_configs.py

Lines changed: 31 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -20,28 +20,28 @@
2020
"openai/gpt-4o-mini-2024-07-18": OpenAIModelArgs(
2121
model_name="gpt-4o-mini-2024-07-18",
2222
max_total_tokens=128_000,
23-
max_input_tokens=40_000,
24-
max_new_tokens=4000,
23+
max_input_tokens=100_000,
24+
max_new_tokens=28_000,
2525
vision_support=True,
2626
),
2727
"openai/gpt-4-1106-preview": OpenAIModelArgs(
2828
model_name="gpt-4-1106-preview",
2929
max_total_tokens=128_000,
30-
max_input_tokens=40_000, # make sure we don't bust budget
31-
max_new_tokens=4000,
30+
max_input_tokens=100_000,
31+
max_new_tokens=28_000,
3232
),
3333
"openai/gpt-4-vision-preview": OpenAIModelArgs(
3434
model_name="gpt-4-vision-preview",
3535
max_total_tokens=128_000,
36-
max_input_tokens=40_000, # make sure we don't bust budget
37-
max_new_tokens=4000, # I think this model has very small default value if we don't set max_new_tokens
36+
max_input_tokens=100_000,
37+
max_new_tokens=28_000, # I think this model has very small default value if we don't set max_new_tokens
3838
vision_support=True,
3939
),
4040
"openai/gpt-4o-2024-05-13": OpenAIModelArgs(
4141
model_name="gpt-4o-2024-05-13",
4242
max_total_tokens=128_000,
43-
max_input_tokens=40_000, # make sure we don't bust budget
44-
max_new_tokens=4000, # I think this model has very small default value if we don't set max_new_tokens
43+
max_input_tokens=100_000,
44+
max_new_tokens=28_000, # I think this model has very small default value if we don't set max_new_tokens
4545
vision_support=True,
4646
),
4747
"openai/gpt-3.5-turbo-0125": OpenAIModelArgs(
@@ -67,22 +67,25 @@
6767
model_name="gpt-4o",
6868
deployment_name="gpt-4o-2024-05-13",
6969
max_total_tokens=128_000,
70-
max_input_tokens=40_000,
71-
max_new_tokens=4_000,
70+
max_input_tokens=100_000,
71+
max_new_tokens=28_000,
72+
vision_support=True,
7273
),
7374
"azure/gpt-4o-2024-08-06": AzureModelArgs(
7475
model_name="gpt-4o",
7576
deployment_name="gpt-4o-2024-08-06",
7677
max_total_tokens=128_000,
77-
max_input_tokens=40_000,
78-
max_new_tokens=4_000,
78+
max_input_tokens=100_000,
79+
max_new_tokens=28_000,
80+
vision_support=True,
7981
),
8082
"azure/gpt-4o-mini-2024-07-18": AzureModelArgs(
8183
model_name="gpt-4o-mini",
8284
deployment_name="gpt-4o-mini-2024-07-18",
8385
max_total_tokens=128_000,
84-
max_input_tokens=40_000,
85-
max_new_tokens=4_000,
86+
max_input_tokens=100_000,
87+
max_new_tokens=28_000,
88+
vision_support=True,
8689
),
8790
# ---------------- OSS LLMs ----------------#
8891
"meta-llama/Meta-Llama-3-70B-Instruct": SelfHostedModelArgs(
@@ -113,43 +116,43 @@
113116
"openrouter/meta-llama/llama-3.1-405b-instruct": OpenRouterModelArgs(
114117
model_name="meta-llama/llama-3.1-405b-instruct",
115118
max_total_tokens=128_000,
116-
max_input_tokens=40_000,
117-
max_new_tokens=4000,
119+
max_input_tokens=100_000,
120+
max_new_tokens=28_000,
118121
temperature=1e-1,
119122
),
120123
"openrouter/meta-llama/llama-3.1-70b-instruct": OpenRouterModelArgs(
121124
model_name="meta-llama/llama-3.1-70b-instruct",
122125
max_total_tokens=128_000,
123-
max_input_tokens=40_000,
124-
max_new_tokens=4000,
126+
max_input_tokens=100_000,
127+
max_new_tokens=28_000,
125128
temperature=1e-1,
126129
),
127130
"openrouter/meta-llama/llama-3-70b-instruct": OpenRouterModelArgs(
128131
model_name="meta-llama/llama-3-70b-instruct",
129132
max_total_tokens=128_000,
130-
max_input_tokens=40_000,
131-
max_new_tokens=4000,
133+
max_input_tokens=100_000,
134+
max_new_tokens=28_000,
132135
temperature=1e-1,
133136
),
134137
"openrouter/meta-llama/llama-3.1-8b-instruct:free": OpenRouterModelArgs(
135138
model_name="meta-llama/llama-3.1-8b-instruct:free",
136139
max_total_tokens=128_000,
137-
max_input_tokens=40_000,
138-
max_new_tokens=4000,
140+
max_input_tokens=100_000,
141+
max_new_tokens=28_000,
139142
temperature=1e-1,
140143
),
141144
"openrouter/meta-llama/llama-3.1-8b-instruct": OpenRouterModelArgs(
142145
model_name="meta-llama/llama-3.1-8b-instruct",
143146
max_total_tokens=128_000,
144-
max_input_tokens=40_000,
145-
max_new_tokens=4000,
147+
max_input_tokens=100_000,
148+
max_new_tokens=28_000,
146149
temperature=1e-1,
147150
),
148151
"openrouter/anthropic/claude-3.5-sonnet:beta": OpenRouterModelArgs(
149152
model_name="anthropic/claude-3.5-sonnet:beta",
150153
max_total_tokens=200_000,
151-
max_input_tokens=40_000,
152-
max_new_tokens=4000,
154+
max_input_tokens=160_000,
155+
max_new_tokens=40_000,
153156
temperature=1e-1,
154157
vision_support=True,
155158
),
@@ -163,8 +166,8 @@
163166
"openrouter/openai/o1-mini-2024-09-12": OpenRouterModelArgs(
164167
model_name="openai/o1-mini-2024-09-12",
165168
max_total_tokens=128_000,
166-
max_input_tokens=40_000,
167-
max_new_tokens=4000,
169+
max_input_tokens=100_000,
170+
max_new_tokens=28_000,
168171
temperature=1e-1,
169172
),
170173
}

0 commit comments

Comments
 (0)