33logger = logging .getLogger (__name__ )
44
55def mixture_of_agents (system_prompt : str , initial_query : str , client , model : str ) -> str :
6+ logger .info (f"Starting mixture_of_agents function with model: { model } " )
67 moa_completion_tokens = 0
78 completions = []
89
10+ logger .debug (f"Generating initial completions for query: { initial_query } " )
911 response = client .chat .completions .create (
1012 model = model ,
1113 messages = [
@@ -18,7 +20,9 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str
1820 )
1921 completions = [choice .message .content for choice in response .choices ]
2022 moa_completion_tokens += response .usage .completion_tokens
23+ logger .info (f"Generated { len (completions )} initial completions. Tokens used: { response .usage .completion_tokens } " )
2124
25+ logger .debug ("Preparing critique prompt" )
2226 critique_prompt = f"""
2327 Original query: { initial_query }
2428
@@ -36,6 +40,7 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str
3640 Please provide your critique for each candidate:
3741 """
3842
43+ logger .debug ("Generating critiques" )
3944 critique_response = client .chat .completions .create (
4045 model = model ,
4146 messages = [
@@ -48,7 +53,9 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str
4853 )
4954 critiques = critique_response .choices [0 ].message .content
5055 moa_completion_tokens += critique_response .usage .completion_tokens
56+ logger .info (f"Generated critiques. Tokens used: { critique_response .usage .completion_tokens } " )
5157
58+ logger .debug ("Preparing final prompt" )
5259 final_prompt = f"""
5360 Original query: { initial_query }
5461
@@ -69,6 +76,7 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str
6976 Please provide a final, optimized response to the original query:
7077 """
7178
79+ logger .debug ("Generating final response" )
7280 final_response = client .chat .completions .create (
7381 model = model ,
7482 messages = [
@@ -80,4 +88,7 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str
8088 temperature = 0.1
8189 )
8290 moa_completion_tokens += final_response .usage .completion_tokens
83- return final_response .choices [0 ].message .content , moa_completion_tokens
91+ logger .info (f"Generated final response. Tokens used: { final_response .usage .completion_tokens } " )
92+
93+ logger .info (f"Total completion tokens used: { moa_completion_tokens } " )
94+ return final_response .choices [0 ].message .content , moa_completion_tokens
0 commit comments