|
1 | 1 | import os |
| 2 | + |
| 3 | +from langroid import ChatAgentConfig |
| 4 | + |
2 | 5 | from lib.utils import CodeGenSandbox |
| 6 | +from lib.agents import FirstAttemptAgent |
3 | 7 | import typer |
4 | 8 |
|
5 | 9 | import langroid as lr |
|
12 | 16 | setup_colored_logging() |
13 | 17 |
|
14 | 18 |
|
15 | | -def generate_first_attempt(sandbox: CodeGenSandbox) -> str: |
16 | | - with open(sandbox.get_sandboxed_class_path(), "r") as f: |
17 | | - class_skeleton = f.read() |
18 | | - |
19 | | - cfg = lr.ChatAgentConfig( |
20 | | - llm=lr.language_models.OpenAIGPTConfig( |
21 | | - chat_model="ollama/llama3:latest", |
22 | | - ), |
23 | | - vecdb=None |
24 | | - ) |
25 | | - main_agent = lr.ChatAgent(cfg) |
26 | | - response = main_agent.llm_response(f"You are an expert at writing Python code." |
27 | | - f"Fill in the following class skeleton." |
28 | | - f"Do NOT add any other methods or commentary." |
29 | | - f"Your response should be ONLY the python code." |
30 | | - f"Do not say 'here is the python code'" |
31 | | - f"Do not surround your response with quotes or backticks." |
32 | | - f"DO NOT EVER USE ``` in your output." |
33 | | - f"Your output MUST be valid, runnable python code and NOTHING else." |
34 | | - f"{class_skeleton}") |
35 | | - with open(sandbox.get_sandboxed_class_path(), "w+") as _out: |
36 | | - _out.write(response.content) |
37 | | - |
38 | | - return response.content |
39 | | - |
40 | | - |
41 | 19 | def generate_next_attempt(sandbox: CodeGenSandbox, test_results: str, test_results_insights: str) -> str: |
42 | 20 | cfg = lr.ChatAgentConfig( |
43 | 21 | llm=lr.language_models.OpenAIGPTConfig( |
44 | | - chat_model="ollama/llama3:latest", |
| 22 | + chat_model="ollama/llama3.1:latest", |
45 | 23 | ), |
46 | 24 | vecdb=None |
47 | 25 | ) |
@@ -78,7 +56,7 @@ def generate_next_attempt(sandbox: CodeGenSandbox, test_results: str, test_resul |
78 | 56 | def interpret_test_results(results: str, code: str) -> str: |
79 | 57 | cfg = lr.ChatAgentConfig( |
80 | 58 | llm=lr.language_models.OpenAIGPTConfig( |
81 | | - chat_model="ollama/llama3:latest", |
| 59 | + chat_model="ollama/llama3.1:latest", |
82 | 60 | ), |
83 | 61 | vecdb=None |
84 | 62 | ) |
@@ -108,8 +86,25 @@ def teardown() -> None: |
108 | 86 | generated_file.truncate(0) |
109 | 87 |
|
110 | 88 |
|
111 | | -def chat(sandbox: CodeGenSandbox, test_runner: GenericTestRunner, max_epochs: int=5) -> None: |
112 | | - code_attempt = generate_first_attempt(sandbox) |
| 89 | +def chat( |
| 90 | + sandbox: CodeGenSandbox, |
| 91 | + first_attempt_agent: FirstAttemptAgent, |
| 92 | + test_runner: GenericTestRunner, |
| 93 | + max_epochs: int = 5 |
| 94 | +) -> None: |
| 95 | + code_attempt = first_attempt_agent.respond( |
| 96 | + prompt=f""" |
| 97 | + You are an expert at writing Python code. |
| 98 | + Fill in the following class skeleton. |
| 99 | + Do NOT add any other methods or commentary. |
| 100 | + Your response should be ONLY the python code. |
| 101 | + Do not say 'here is the python code' |
| 102 | + Do not surround your response with quotes or backticks. |
| 103 | + DO NOT EVER USE ``` in your output. |
| 104 | + Your output MUST be valid, runnable python code and NOTHING else. |
| 105 | + {first_attempt_agent.class_skeleton} |
| 106 | + """ |
| 107 | + ) |
113 | 108 | solved = False |
114 | 109 | for _ in range(max_epochs): |
115 | 110 | # test_exit_code, test_result(s = get_test_results() |
@@ -168,10 +163,18 @@ def main( |
168 | 163 | ) |
169 | 164 | ) |
170 | 165 |
|
| 166 | + llama3 = ChatAgentConfig( |
| 167 | + llm=lr.language_models.OpenAIGPTConfig( |
| 168 | + chat_model="ollama/llama3:latest", |
| 169 | + ), |
| 170 | + vecdb=None |
| 171 | + ) |
| 172 | + |
171 | 173 | sandbox = CodeGenSandbox(project_dir, class_skeleton_path, test_path, sandbox_path) |
172 | 174 | sandbox.init_sandbox() |
| 175 | + fa: FirstAttemptAgent = FirstAttemptAgent(sandbox, llama3) |
173 | 176 | tr: GenericTestRunner = SubProcessTestRunner(sandbox) |
174 | | - chat(sandbox, tr, max_epochs=max_epochs) |
| 177 | + chat(sandbox, fa, tr, max_epochs=max_epochs) |
175 | 178 |
|
176 | 179 |
|
177 | 180 | if __name__ == "__main__": |
|
0 commit comments