Skip to content

Commit 93579dd

Browse files
committed
update update_history
1 parent bf196b4 commit 93579dd

File tree

3 files changed

+24
-157
lines changed

3 files changed

+24
-157
lines changed

amadeusgpt/analysis_objects/llm.py

Lines changed: 15 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ def connect_gpt_oai_1(self, messages, **kwargs):
118118

119119
return response
120120

121-
def update_history(self, role, content, encoded_image=None, replace=False):
121+
def update_history(self, role, content, encoded_image = None, replace=False):
122122
if role == "system":
123123
if len(self.history) > 0:
124124
self.history[0]["content"] = content
@@ -132,35 +132,26 @@ def update_history(self, role, content, encoded_image=None, replace=False):
132132
self.history.append({"role": role, "content": content})
133133
num_AI_messages = (len(self.context_window) - 1) // 2
134134
if num_AI_messages == self.keep_last_n_messages:
135-
print("doing active forgetting")
135+
print ("doing active forgetting")
136136
# we forget the oldest AI message and corresponding answer
137137
self.context_window.pop(1)
138138
self.context_window.pop(1)
139139
new_message = {"role": role, "content": content}
140140
else:
141-
if encoded_image is None:
142-
self.history.append({"role": role, "content": content})
143-
num_AI_messages = (len(self.context_window) - 1) // 2
144-
if num_AI_messages == self.keep_last_n_messages:
145-
print("doing active forgetting")
146-
# we forget the oldest AI message and corresponding answer
147-
self.context_window.pop(1)
148-
self.context_window.pop(1)
149-
self.context_window.append({"role": role, "content": content})
150-
else:
151-
message = {
152-
"role": "user",
153-
"content": [
154-
{"type": "text", "text": content},
155-
{
156-
"type": "image_url",
157-
"image_url": {
158-
"url": f"data:image/png;base64,{encoded_image}"
159-
},
160-
},
161-
],
141+
new_message = {"role": "user", "content": [
142+
{"type": "text", "text": ""},
143+
{"type": "image_url", "image_url": {
144+
"url": f"data:image/jpeg;base64,{encoded_image}"}
162145
}
163-
self.context_window.append(message)
146+
]}
147+
148+
self.history.append(new_message)
149+
150+
if replace == True:
151+
if len(self.context_window) == 2:
152+
self.context_window[1] = new_message
153+
else:
154+
self.context_window.append(new_message)
164155

165156
def clean_context_window(self):
166157
while len(self.context_window) > 1:
@@ -288,66 +279,6 @@ def update_system_prompt(self, sandbox):
288279
# update both history and context window
289280
self.update_history("system", self.system_prompt)
290281

291-
292-
class MutationLLM(LLM):
293-
def __init__(self, config):
294-
super().__init__(config)
295-
296-
def update_system_prompt(self, sandbox):
297-
from amadeusgpt.system_prompts.mutation import _get_system_prompt
298-
299-
self.system_prompt = _get_system_prompt(sandbox)
300-
# update both history and context window
301-
self.update_history("system", self.system_prompt)
302-
303-
def speak(self, sandbox):
304-
# TODO maybe we don't need to keep the history
305-
"""
306-
Speak to the chat channel
307-
"""
308-
# query = "Please start. Make sure you provide one task program a time. Thanks a million!"
309-
query = "Please start. Thanks a million!"
310-
self.update_system_prompt(sandbox)
311-
self.update_history("user", query, replace=True)
312-
response = self.connect_gpt(self.context_window, max_tokens=4000)
313-
text = response.choices[0].message.content.strip()
314-
sandbox.chat_channel.chain_of_thought.append(response)
315-
return text
316-
317-
318-
class BreedLLM(LLM):
319-
def __init__(self, config):
320-
super().__init__(config)
321-
322-
def update_system_prompt(self, sandbox):
323-
from amadeusgpt.system_prompts.breed import _get_system_prompt
324-
325-
behavior1_docs, behavior2_docs, composition_type = sandbox.get_breed_info()
326-
327-
self.system_prompt = _get_system_prompt(
328-
behavior1_docs, behavior2_docs, composition_type
329-
)
330-
331-
# update both history and context window
332-
333-
self.update_history("system", self.system_prompt)
334-
335-
def speak(self, sandbox):
336-
# TODO maybe we don't need to keep the history
337-
"""
338-
Speak to the chat channel
339-
"""
340-
query = "Now write the template function. Make sure your answer is concise and don't mention anything about filtering such as smooth_window or min_window\n"
341-
self.update_system_prompt(sandbox)
342-
self.update_history("user", query, replace=True)
343-
344-
response = self.connect_gpt(self.context_window, max_tokens=400)
345-
text = response.choices[0].message.content.strip()
346-
sandbox.chat_channel.chain_of_thought.append(response)
347-
348-
return text
349-
350-
351282
class DiagnosisLLM(LLM):
352283
"""
353284
Resource management for testing and error handling

amadeusgpt/main.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
from amadeusgpt.utils import *
99

1010
warnings.filterwarnings("ignore")
11-
import os
1211

1312
from amadeusgpt.analysis_objects.llm import (CodeGenerationLLM, DiagnosisLLM,
1413
SelfDebugLLM, VisualLLM)
@@ -73,11 +72,7 @@ def match_integration_module(self, user_query: str):
7372
modules.append(query_module)
7473

7574
# parse the query result by loading active loading
76-
return modules
77-
78-
def chat_iteration(self, user_query):
79-
qa_message = self.sandbox.llm_step(user_query)
80-
return qa_message
75+
return modules
8176

8277
def step(self, user_query):
8378
integration_module_names = self.match_integration_module(user_query)

amadeusgpt/programs/sandbox.py

Lines changed: 8 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -399,7 +399,6 @@ def events_to_videos(self, events, function_name):
399399
def render_qa_message(self, qa_message):
400400
function_rets = qa_message["function_rets"]
401401
behavior_analysis = self.exec_namespace["behavior_analysis"]
402-
n_animals = behavior_analysis.animal_manager.get_n_individuals()
403402
bodypart_names = behavior_analysis.animal_manager.get_keypoint_names()
404403
qa_message["pose_video"] = (
405404
behavior_analysis.animal_manager.superanimal_predicted_video
@@ -464,7 +463,6 @@ def llm_step(self, user_query):
464463
qa_message["meta_info"] = self.meta_info
465464

466465
self.messages.append(qa_message)
467-
post_process_llm = [] # ['self_debug', 'diagnosis']
468466
self.query = user_query
469467
self.llms["code_generator"].speak(self)
470468

@@ -484,7 +482,14 @@ def run_task_program(self, task_program_name):
484482
return qa_message
485483

486484
def step(self, user_query, number_of_debugs=1):
485+
"""
486+
Currently not used. We tried to seperate LLM inference and code execution
487+
"""
487488
qa_message = create_message(user_query, self)
489+
490+
if self.meta_info is not None:
491+
qa_message["meta_info"] = self.meta_info
492+
488493
self.messages.append(qa_message)
489494

490495
self.query = user_query
@@ -595,68 +600,4 @@ def render_temp_message(query, sandbox):
595600
for name, roi_object in roi_objects.items():
596601
analysis.object_manager.add_roi_object(ROIObject(name, roi_object["Path"]))
597602

598-
render_temp_message("random query", sandbox)
599-
600-
# def get_head_dips_events(config: Config):
601-
# """
602-
# Identify and count the number of head_dips events.
603-
604-
# Parameters:
605-
# ----------
606-
# config: Config
607-
608-
# Returns:
609-
# -------
610-
# head_dips_events: List[BaseEvent]
611-
# List of events where head_dips behavior occurs.
612-
# num_bouts: int
613-
# Number of bouts for head_dips behavior.
614-
# """
615-
# # Create an instance of AnimalBehaviorAnalysis
616-
# analysis = create_analysis(config)
617-
618-
# # Get events where mouse_center and neck are inside ROI0
619-
# mouse_center_neck_in_ROI0_events = analysis.get_animals_object_events(
620-
# object_name='ROI0',
621-
# query='overlap == True',
622-
# bodypart_names=['mouse_center', 'neck'],
623-
# min_window=1,
624-
# max_window=100000,
625-
# negate=False
626-
# )
627-
# # print ("mouse center neck in ROI0")
628-
# # print (len(mouse_center_neck_in_ROI0_events))
629-
# # for event in mouse_center_neck_in_ROI0_events:
630-
# # print (event)
631-
632-
# # Get events where head_midpoint is outside ROI1
633-
# head_midpoint_outside_ROI1_events = analysis.get_animals_object_events(
634-
# object_name='ROI1',
635-
# query='overlap == True',
636-
# bodypart_names=['head_midpoint'],
637-
# min_window=1,
638-
# max_window=100000,
639-
# negate=True
640-
# )
641-
# # print ('mouse head not in ROI1')
642-
# # print (len(head_midpoint_outside_ROI1_events))
643-
# # for event in head_midpoint_outside_ROI1_events:
644-
# # print (event)
645-
646-
# # Combine the events to define head_dips behavior
647-
# head_dips_events = analysis.get_composite_events(
648-
# events_A=mouse_center_neck_in_ROI0_events,
649-
# events_B=head_midpoint_outside_ROI1_events,
650-
# composition_type='logical_and',
651-
# max_interval_between_sequential_events=0,
652-
# min_window=1,
653-
# max_window=100000
654-
# )
655-
# print ('head dips events', len(head_dips_events))
656-
657-
# # Count the number of bouts for head_dips behavior
658-
# num_bouts = len(head_dips_events)
659-
660-
# return head_dips_events, num_bouts
661-
662-
# get_head_dips_events(config)
603+
render_temp_message("random query", sandbox)

0 commit comments

Comments
 (0)