Skip to content

Commit f47fc8a

Browse files
Meta Prompting (#244)
1 parent 30da2f1 commit f47fc8a

25 files changed

+665
-39
lines changed

src/autogluon/assistant/agents/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from .description_file_retriever_agent import DescriptionFileRetrieverAgent
44
from .error_analyzer_agent import ErrorAnalyzerAgent
55
from .executer_agent import ExecuterAgent
6+
from .meta_prompting_agent import MetaPromptingAgent
67
from .reranker_agent import RerankerAgent
78
from .retriever_agent import RetrieverAgent
89
from .task_descriptor_agent import TaskDescriptorAgent
Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
"""
2+
Meta-prompting agent for dynamically rewriting prompts.
3+
4+
This module provides the MetaPromptingAgent class that can analyze the current task
5+
and dynamically rewrite a specific prompt to better suit the requirements.
6+
"""
7+
8+
import logging
9+
10+
from ..prompts import MetaPromptingPrompt
11+
from .base_agent import BaseAgent
12+
from .utils import init_llm
13+
14+
logger = logging.getLogger(__name__)
15+
16+
17+
class MetaPromptingAgent(BaseAgent):
18+
"""
19+
Meta-prompting agent that dynamically rewrites prompts based on the current task.
20+
21+
This agent is designed to be instantiated once and used for all prompts that need rewriting,
22+
handling any prompt template provided during the call.
23+
24+
Agent Input:
25+
- Target prompt template to rewrite
26+
- Target prompt class for meta-instructions
27+
- Current task description and user input
28+
- Available template variables
29+
30+
Agent Output:
31+
- Rewritten prompt template customized for the current task
32+
"""
33+
34+
def __init__(self, config, manager, llm_config, meta_prompt_template=None):
35+
"""
36+
Initialize the MetaPromptingAgent for all prompts.
37+
38+
Args:
39+
config: Configuration object
40+
manager: Manager that provides state and variable values
41+
llm_config: Configuration for the language model
42+
meta_prompt_template: Optional custom template for the meta-prompting prompt
43+
"""
44+
super().__init__(config=config, manager=manager)
45+
46+
self.llm_config = llm_config
47+
48+
# Initialize the meta-prompting prompt
49+
self.meta_prompt = MetaPromptingPrompt(
50+
llm_config=self.llm_config, manager=self.manager, template=meta_prompt_template
51+
)
52+
53+
# Initialize the LLM lazily
54+
self.llm = None
55+
# Store rewritten templates in a dictionary keyed by prompt class name
56+
self._rewritten_templates = {}
57+
58+
def __call__(self, target_prompt_instance, force_rewrite=False):
59+
"""
60+
Generate a rewritten prompt template for the specified prompt class.
61+
62+
Args:
63+
target_prompt_instance: Instance of the prompt (to access its meta_instructions)
64+
force_rewrite: If True, rewrite the template even if previously rewritten
65+
66+
Returns:
67+
Rewritten prompt template
68+
"""
69+
assert self.manager.target_prompt_instance is None, f"{self.manager.target_prompt_instance.__class__}"
70+
self.manager.target_prompt_instance = target_prompt_instance
71+
# Generate a key to identify this prompt in our cache
72+
prompt_name = target_prompt_instance.__class__.__name__
73+
74+
# If already rewritten and not forcing a rewrite, return the cached version
75+
if not force_rewrite and prompt_name in self._rewritten_templates:
76+
self.manager.target_prompt_instance = None
77+
return self._rewritten_templates[prompt_name]
78+
79+
self.manager.log_agent_start(
80+
f"MetaPromptingAgent: starting to analyze task and rewrite {prompt_name} template."
81+
)
82+
83+
# Build the meta-prompting prompt
84+
prompt = self.meta_prompt.build()
85+
86+
# Initialize LLM if not already done
87+
if self.llm is None:
88+
self.llm = init_llm(
89+
llm_config=self.llm_config, agent_name="meta_prompting", multi_turn=self.llm_config.multi_turn
90+
)
91+
92+
# Get response from LLM
93+
response = self.llm.assistant_chat(prompt)
94+
95+
# Parse the response to get the rewritten template
96+
rewritten_template = self.meta_prompt.parse(response)
97+
98+
# Cache the rewritten template
99+
self._rewritten_templates[prompt_name] = rewritten_template
100+
101+
# Save the rewritten template for debugging
102+
self.manager.save_and_log_states(
103+
content=rewritten_template,
104+
save_name=f"rewritten_{prompt_name}_template.txt",
105+
per_iteration=False,
106+
add_uuid=False,
107+
)
108+
109+
self.manager.log_agent_end(f"MetaPromptingAgent: finished rewriting {prompt_name} template.")
110+
111+
self.manager.target_prompt_instance = None
112+
113+
return rewritten_template

src/autogluon/assistant/cli/app.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,11 +54,17 @@ def main(
5454
"--continuous_improvement",
5555
help="If enabled, the system will continue optimizing even after finding a valid solution. Instead of stopping at the first successful run, it will keep searching for better solutions until reaching the maximum number of iterations. This allows the system to potentially find higher quality solutions at the cost of additional computation time.",
5656
),
57-
need_user_input: bool = typer.Option(
57+
enable_per_iteration_instruction: bool = typer.Option(
5858
False,
5959
"--enable-per-iteration-instruction",
6060
help="If enabled, provide an instruction at the start of each iteration (except the first, which uses the initial instruction). The process suspends until you provide it.",
6161
),
62+
enable_meta_prompting: bool = typer.Option(
63+
False,
64+
"-m",
65+
"--enable-meta-prompting",
66+
help="If enabled, the system will refine the prompts itself based on user instruction and given data.",
67+
),
6268
initial_user_input: str | None = typer.Option(
6369
None, "-t", "--initial-instruction", help="You can provide the initial instruction here."
6470
),
@@ -100,7 +106,8 @@ def main(
100106
config_path=str(provider_config_path),
101107
max_iterations=max_iterations,
102108
continuous_improvement=continuous_improvement,
103-
need_user_input=need_user_input,
109+
enable_per_iteration_instruction=enable_per_iteration_instruction,
110+
enable_meta_prompting=enable_meta_prompting,
104111
initial_user_input=initial_user_input,
105112
extract_archives_to=extract_archives_to,
106113
verbosity=verbosity,

src/autogluon/assistant/coding_agent.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,8 @@ def run_agent(
1919
config_path=None,
2020
max_iterations=5,
2121
continuous_improvement=None,
22-
need_user_input=False,
22+
enable_meta_prompting=None,
23+
enable_per_iteration_instruction=False,
2324
initial_user_input=None,
2425
extract_archives_to=None,
2526
manager=None,
@@ -95,15 +96,21 @@ def run_agent(
9596

9697
if continuous_improvement is not None:
9798
config.continuous_improvement = continuous_improvement
99+
if enable_meta_prompting is not None:
100+
config.enable_meta_prompting = enable_meta_prompting
98101

99102
if manager is None:
100103
manager = Manager(
101104
input_data_folder=input_data_folder,
102105
output_folder=output_folder,
103106
config=config,
107+
enable_per_iteration_instruction=enable_per_iteration_instruction,
108+
initial_user_input=initial_user_input,
104109
)
105110

106-
manager.set_initial_user_input(need_user_input=need_user_input, initial_user_input=initial_user_input)
111+
manager.set_initial_user_input(
112+
enable_per_iteration_instruction=enable_per_iteration_instruction, initial_user_input=initial_user_input
113+
)
107114

108115
while manager.time_step + 1 < max_iterations:
109116
logger.brief(f"Starting iteration {manager.time_step + 1}!")

src/autogluon/assistant/configs/anthropic.yaml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ use_tutorial_summary: True
1818
continuous_improvement: False
1919
optimize_system_resources: False
2020
cleanup_unused_env: True
21+
enable_meta_prompting: False
2122

2223
llm: &default_llm
2324
provider: anthropic
@@ -30,11 +31,13 @@ llm: &default_llm
3031
multi_turn: False
3132
template: null
3233
add_coding_format_instruction: false
34+
apply_meta_prompting: False
3335

3436
# Ensure all agent types inherit the SageMaker LLM config
3537
python_coder:
3638
<<: *default_llm # Merge llm_config
3739
multi_turn: True
40+
apply_meta_prompting: True
3841

3942
bash_coder:
4043
<<: *default_llm # Merge llm_config
@@ -45,6 +48,10 @@ executer:
4548
max_stdout_length: 8192
4649
max_stderr_length: 2048
4750

51+
meta_prompting:
52+
<<: *default_llm # Merge llm_config
53+
multi_turn: False
54+
4855
reader:
4956
<<: *default_llm # Merge llm_config
5057
details: False
@@ -69,6 +76,7 @@ task_descriptor:
6976
<<: *default_llm # Merge llm_config
7077
max_description_files_length_to_show: 1024
7178
max_description_files_length_for_summarization: 16384
79+
apply_meta_prompting: True
7280

7381
tool_selector:
7482
<<: *default_llm # Merge llm_config

src/autogluon/assistant/configs/bedrock.yaml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ use_tutorial_summary: True
1818
continuous_improvement: False
1919
optimize_system_resources: False
2020
cleanup_unused_env: True
21+
enable_meta_prompting: False
2122

2223
llm: &default_llm
2324
provider: bedrock
@@ -30,10 +31,12 @@ llm: &default_llm
3031
multi_turn: False
3132
template: null
3233
add_coding_format_instruction: false
34+
apply_meta_prompting: False
3335

3436
python_coder:
3537
<<: *default_llm # Merge llm_config
3638
multi_turn: True
39+
apply_meta_prompting: True
3740

3841
bash_coder:
3942
<<: *default_llm # Merge llm_config
@@ -44,6 +47,10 @@ executer:
4447
max_stdout_length: 8192
4548
max_stderr_length: 2048
4649

50+
meta_prompting:
51+
<<: *default_llm # Merge llm_config
52+
multi_turn: False
53+
4754
reader:
4855
<<: *default_llm # Merge llm_config
4956
details: False
@@ -68,6 +75,7 @@ task_descriptor:
6875
<<: *default_llm # Merge llm_config
6976
max_description_files_length_to_show: 1024
7077
max_description_files_length_for_summarization: 16384
78+
apply_meta_prompting: True
7179

7280
tool_selector:
7381
<<: *default_llm # Merge llm_config

src/autogluon/assistant/configs/data_visualizer.yaml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ use_tutorial_summary: True
1717
continuous_improvement: False
1818
optimize_system_resources: False
1919
cleanup_unused_env: True
20+
enable_meta_prompting: False
2021

2122
# Default LLM Configuration
2223
# For each agent (coder, etc.) you can use a different one
@@ -31,6 +32,7 @@ llm: &default_llm
3132
multi_turn: False
3233
template: null
3334
add_coding_format_instruction: false
35+
apply_meta_prompting: False
3436

3537
bash_coder:
3638
<<: *default_llm # Merge llm_config
@@ -39,6 +41,7 @@ bash_coder:
3941
python_coder:
4042
<<: *default_llm # Merge llm_config
4143
multi_turn: True
44+
apply_meta_prompting: True
4245
template: |
4346
As a Data Visualization Agent, you will be given a folder containing data and description files. Please generate Python code using {selected_tool} to create insightful and informative visualizations. Follow these specifications:
4447
@@ -103,6 +106,10 @@ executer:
103106
max_stdout_length: 8192
104107
max_stderr_length: 2048
105108

109+
meta_prompting:
110+
<<: *default_llm # Merge llm_config
111+
multi_turn: False
112+
106113
reader:
107114
<<: *default_llm # Merge llm_config
108115
details: False
@@ -127,6 +134,7 @@ task_descriptor:
127134
<<: *default_llm # Merge llm_config
128135
max_description_files_length_to_show: 1024
129136
max_description_files_length_for_summarization: 16384
137+
apply_meta_prompting: True
130138
template: |
131139
Based ONLY on the information explicitly stated in the provided data structure and description files, provide a condensed and precise description of the data visualization task. Include only details that are directly mentioned in the source materials. Do not add assumptions or infer unstated information.
132140

src/autogluon/assistant/configs/default.yaml

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ use_tutorial_summary: True
1717
continuous_improvement: False
1818
optimize_system_resources: False
1919
cleanup_unused_env: True
20+
enable_meta_prompting: False
2021

2122
# Default LLM Configuration
2223
# For each agent (coder, etc.) you can use a different one
@@ -37,19 +38,23 @@ llm: &default_llm
3738
multi_turn: False
3839
template: null
3940
add_coding_format_instruction: false
41+
apply_meta_prompting: False
4042

4143
python_coder:
4244
<<: *default_llm # Merge llm_config
4345
multi_turn: True
46+
apply_meta_prompting: True
4447

4548
bash_coder:
4649
<<: *default_llm # Merge llm_config
4750
multi_turn: True
4851

4952
executer:
5053
<<: *default_llm # Merge llm_config
51-
max_stdout_length: 8192
52-
max_stderr_length: 2048
54+
55+
meta_prompting:
56+
<<: *default_llm # Merge llm_config
57+
multi_turn: False
5358

5459
reader:
5560
<<: *default_llm # Merge llm_config
@@ -75,6 +80,7 @@ task_descriptor:
7580
<<: *default_llm # Merge llm_config
7681
max_description_files_length_to_show: 1024
7782
max_description_files_length_for_summarization: 16384
83+
apply_meta_prompting: True
7884

7985
tool_selector:
8086
<<: *default_llm # Merge llm_config

src/autogluon/assistant/configs/openai.yaml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ use_tutorial_summary: True
1818
continuous_improvement: False
1919
optimize_system_resources: False
2020
cleanup_unused_env: True
21+
enable_meta_prompting: False
2122

2223
llm: &default_llm
2324
provider: openai
@@ -30,11 +31,13 @@ llm: &default_llm
3031
multi_turn: False
3132
template: null
3233
add_coding_format_instruction: false
34+
apply_meta_prompting: False
3335

3436
# Ensure all agent types inherit the SageMaker LLM config
3537
python_coder:
3638
<<: *default_llm # Merge llm_config
3739
multi_turn: True
40+
apply_meta_prompting: True
3841

3942
bash_coder:
4043
<<: *default_llm # Merge llm_config
@@ -45,6 +48,10 @@ executer:
4548
max_stdout_length: 8192
4649
max_stderr_length: 2048
4750

51+
meta_prompting:
52+
<<: *default_llm # Merge llm_config
53+
multi_turn: False
54+
4855
reader:
4956
<<: *default_llm # Merge llm_config
5057
details: False
@@ -69,6 +76,7 @@ task_descriptor:
6976
<<: *default_llm # Merge llm_config
7077
max_description_files_length_to_show: 1024
7178
max_description_files_length_for_summarization: 16384
79+
apply_meta_prompting: True
7280

7381
tool_selector:
7482
<<: *default_llm # Merge llm_config

0 commit comments

Comments
 (0)