2
2
from pydantic import BaseModel , Field
3
3
from langchain_openai import ChatOpenAI
4
4
from langchain .prompts import ChatPromptTemplate
5
+ import logging
6
+ import warnings
7
+ from transformers import logging as transformers_logging
8
+
9
+ # Configure logging
10
+ logging .basicConfig (
11
+ level = logging .INFO ,
12
+ format = '%(asctime)s | %(name)s | %(levelname)s | %(message)s' ,
13
+ datefmt = '%Y-%m-%d %H:%M:%S'
14
+ )
15
+ logger = logging .getLogger (__name__ )
16
+
17
+ # Suppress specific transformers warnings
18
+ transformers_logging .set_verbosity_error ()
19
+ warnings .filterwarnings ("ignore" , message = "Setting `pad_token_id` to `eos_token_id`" )
5
20
6
21
class Agent (BaseModel ):
7
22
"""Base agent class with common properties"""
@@ -10,6 +25,14 @@ class Agent(BaseModel):
10
25
description : str
11
26
llm : Any = Field (description = "Language model for the agent" )
12
27
28
+ def log_prompt (self , prompt : str , prefix : str = "" ):
29
+ """Log a prompt being sent to the LLM"""
30
+ logger .info (f"\n { '=' * 80 } \n { prefix } Prompt:\n { '-' * 40 } \n { prompt } \n { '=' * 80 } " )
31
+
32
+ def log_response (self , response : str , prefix : str = "" ):
33
+ """Log a response received from the LLM"""
34
+ logger .info (f"\n { '=' * 80 } \n { prefix } Response:\n { '-' * 40 } \n { response } \n { '=' * 80 } " )
35
+
13
36
class PlannerAgent (Agent ):
14
37
"""Agent responsible for breaking down problems and planning steps"""
15
38
def __init__ (self , llm ):
@@ -21,6 +44,8 @@ def __init__(self, llm):
21
44
)
22
45
23
46
def plan (self , query : str , context : List [Dict [str , Any ]] = None ) -> str :
47
+ logger .info (f"\n 🎯 Planning step for query: { query } " )
48
+
24
49
if context :
25
50
template = """You are a strategic planning agent. Your role is to break down complex problems into clear, manageable steps.
26
51
@@ -34,6 +59,7 @@ def plan(self, query: str, context: List[Dict[str, Any]] = None) -> str:
34
59
35
60
Plan:"""
36
61
context_str = "\n \n " .join ([f"Context { i + 1 } :\n { item ['content' ]} " for i , item in enumerate (context )])
62
+ logger .info (f"Using context ({ len (context )} items)" )
37
63
else :
38
64
template = """You are a strategic planning agent. Your role is to break down complex problems into clear, manageable steps.
39
65
@@ -44,10 +70,15 @@ def plan(self, query: str, context: List[Dict[str, Any]] = None) -> str:
44
70
45
71
Plan:"""
46
72
context_str = ""
73
+ logger .info ("No context available" )
47
74
48
75
prompt = ChatPromptTemplate .from_template (template )
49
76
messages = prompt .format_messages (query = query , context = context_str )
77
+ prompt_text = "\n " .join ([msg .content for msg in messages ])
78
+ self .log_prompt (prompt_text , "Planner" )
79
+
50
80
response = self .llm .invoke (messages )
81
+ self .log_response (response .content , "Planner" )
51
82
return response .content
52
83
53
84
class ResearchAgent (Agent ):
@@ -64,14 +95,18 @@ def __init__(self, llm, vector_store):
64
95
)
65
96
66
97
def research (self , query : str , step : str ) -> List [Dict [str , Any ]]:
98
+ logger .info (f"\n 🔍 Researching for step: { step } " )
99
+
67
100
# Query all collections
68
101
pdf_results = self .vector_store .query_pdf_collection (query )
69
102
repo_results = self .vector_store .query_repo_collection (query )
70
103
71
104
# Combine results
72
105
all_results = pdf_results + repo_results
106
+ logger .info (f"Found { len (all_results )} relevant documents" )
73
107
74
108
if not all_results :
109
+ logger .warning ("No relevant documents found" )
75
110
return []
76
111
77
112
# Have LLM analyze and summarize findings
@@ -89,7 +124,11 @@ def research(self, query: str, step: str) -> List[Dict[str, Any]]:
89
124
context_str = "\n \n " .join ([f"Source { i + 1 } :\n { item ['content' ]} " for i , item in enumerate (all_results )])
90
125
prompt = ChatPromptTemplate .from_template (template )
91
126
messages = prompt .format_messages (step = step , context = context_str )
127
+ prompt_text = "\n " .join ([msg .content for msg in messages ])
128
+ self .log_prompt (prompt_text , "Researcher" )
129
+
92
130
response = self .llm .invoke (messages )
131
+ self .log_response (response .content , "Researcher" )
93
132
94
133
return [{"content" : response .content , "metadata" : {"source" : "Research Summary" }}]
95
134
@@ -104,6 +143,8 @@ def __init__(self, llm):
104
143
)
105
144
106
145
def reason (self , query : str , step : str , context : List [Dict [str , Any ]]) -> str :
146
+ logger .info (f"\n 🤔 Reasoning about step: { step } " )
147
+
107
148
template = """You are a reasoning agent. Your role is to apply logical analysis to information and draw conclusions.
108
149
109
150
Given the following step, context, and query, apply logical reasoning to reach a conclusion.
@@ -121,7 +162,11 @@ def reason(self, query: str, step: str, context: List[Dict[str, Any]]) -> str:
121
162
context_str = "\n \n " .join ([f"Context { i + 1 } :\n { item ['content' ]} " for i , item in enumerate (context )])
122
163
prompt = ChatPromptTemplate .from_template (template )
123
164
messages = prompt .format_messages (step = step , query = query , context = context_str )
165
+ prompt_text = "\n " .join ([msg .content for msg in messages ])
166
+ self .log_prompt (prompt_text , "Reasoner" )
167
+
124
168
response = self .llm .invoke (messages )
169
+ self .log_response (response .content , "Reasoner" )
125
170
return response .content
126
171
127
172
class SynthesisAgent (Agent ):
@@ -135,6 +180,8 @@ def __init__(self, llm):
135
180
)
136
181
137
182
def synthesize (self , query : str , reasoning_steps : List [str ]) -> str :
183
+ logger .info (f"\n 📝 Synthesizing final answer from { len (reasoning_steps )} reasoning steps" )
184
+
138
185
template = """You are a synthesis agent. Your role is to combine multiple pieces of information into a clear, coherent response.
139
186
140
187
Given the following query and reasoning steps, create a final comprehensive answer.
@@ -150,7 +197,11 @@ def synthesize(self, query: str, reasoning_steps: List[str]) -> str:
150
197
steps_str = "\n \n " .join ([f"Step { i + 1 } :\n { step } " for i , step in enumerate (reasoning_steps )])
151
198
prompt = ChatPromptTemplate .from_template (template )
152
199
messages = prompt .format_messages (query = query , steps = steps_str )
200
+ prompt_text = "\n " .join ([msg .content for msg in messages ])
201
+ self .log_prompt (prompt_text , "Synthesizer" )
202
+
153
203
response = self .llm .invoke (messages )
204
+ self .log_response (response .content , "Synthesizer" )
154
205
return response .content
155
206
156
207
def create_agents (llm , vector_store = None ):
0 commit comments