|
1 | 1 | import logging |
2 | 2 | import optillm |
| 3 | +from optillm import conversation_logger |
3 | 4 |
|
4 | 5 | logger = logging.getLogger(__name__) |
5 | 6 |
|
@@ -29,8 +30,8 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str |
29 | 30 | response_dict = response.model_dump() if hasattr(response, 'model_dump') else response |
30 | 31 |
|
31 | 32 | # Log provider call if conversation logging is enabled |
32 | | - if hasattr(optillm, 'conversation_logger') and optillm.conversation_logger and request_id: |
33 | | - optillm.conversation_logger.log_provider_call(request_id, provider_request, response_dict) |
| 33 | + if request_id: |
| 34 | + conversation_logger.log_provider_call(request_id, provider_request, response_dict) |
34 | 35 |
|
35 | 36 | completions = [choice.message.content for choice in response.choices] |
36 | 37 | moa_completion_tokens += response.usage.completion_tokens |
@@ -60,8 +61,8 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str |
60 | 61 | response_dict = response.model_dump() if hasattr(response, 'model_dump') else response |
61 | 62 |
|
62 | 63 | # Log provider call if conversation logging is enabled |
63 | | - if hasattr(optillm, 'conversation_logger') and optillm.conversation_logger and request_id: |
64 | | - optillm.conversation_logger.log_provider_call(request_id, provider_request, response_dict) |
| 64 | + if request_id: |
| 65 | + conversation_logger.log_provider_call(request_id, provider_request, response_dict) |
65 | 66 |
|
66 | 67 | completions.append(response.choices[0].message.content) |
67 | 68 | moa_completion_tokens += response.usage.completion_tokens |
@@ -122,8 +123,8 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str |
122 | 123 | response_dict = critique_response.model_dump() if hasattr(critique_response, 'model_dump') else critique_response |
123 | 124 |
|
124 | 125 | # Log provider call if conversation logging is enabled |
125 | | - if hasattr(optillm, 'conversation_logger') and optillm.conversation_logger and request_id: |
126 | | - optillm.conversation_logger.log_provider_call(request_id, provider_request, response_dict) |
| 126 | + if request_id: |
| 127 | + conversation_logger.log_provider_call(request_id, provider_request, response_dict) |
127 | 128 |
|
128 | 129 | critiques = critique_response.choices[0].message.content |
129 | 130 | moa_completion_tokens += critique_response.usage.completion_tokens |
@@ -169,8 +170,8 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str |
169 | 170 | response_dict = final_response.model_dump() if hasattr(final_response, 'model_dump') else final_response |
170 | 171 |
|
171 | 172 | # Log provider call if conversation logging is enabled |
172 | | - if hasattr(optillm, 'conversation_logger') and optillm.conversation_logger and request_id: |
173 | | - optillm.conversation_logger.log_provider_call(request_id, provider_request, response_dict) |
| 173 | + if request_id: |
| 174 | + conversation_logger.log_provider_call(request_id, provider_request, response_dict) |
174 | 175 |
|
175 | 176 | moa_completion_tokens += final_response.usage.completion_tokens |
176 | 177 | logger.info(f"Generated final response. Tokens used: {final_response.usage.completion_tokens}") |
|
0 commit comments