1919from unstract .sdk .helper import SdkHelper
2020from unstract .sdk .tool .base import BaseTool
2121from unstract .sdk .utils .callback_manager import CallbackManager
22+ from unstract .sdk .utils .common_utils import capture_metrics
2223
2324logger = logging .getLogger (__name__ )
2425
@@ -36,6 +37,7 @@ def __init__(
3637 tool : BaseTool ,
3738 adapter_instance_id : Optional [str ] = None ,
3839 usage_kwargs : dict [Any , Any ] = {},
40+ capture_metrics : bool = False ,
3941 ):
4042 """Creates an instance of this LLM class.
4143
@@ -50,6 +52,10 @@ def __init__(
5052 self ._adapter_instance_id = adapter_instance_id
5153 self ._llm_instance : LlamaIndexLLM = None
5254 self ._usage_kwargs = usage_kwargs
55+ self ._capture_metrics = capture_metrics
56+ self ._run_id = usage_kwargs .get ("run_id" )
57+ self ._usage_reason = usage_kwargs .get ("llm_usage_reason" )
58+ self ._metrics = {}
5359 self ._initialise ()
5460
5561 def _initialise (self ):
@@ -65,14 +71,16 @@ def _initialise(self):
6571 kwargs = self ._usage_kwargs ,
6672 )
6773
74+ @capture_metrics
6875 def complete (
6976 self ,
7077 prompt : str ,
7178 extract_json : bool = True ,
7279 process_text : Optional [Callable [[str ], str ]] = None ,
7380 ** kwargs : Any ,
7481 ) -> dict [str , Any ]:
75- """Generates a completion response for the given prompt.
82+ """Generates a completion response for the given prompt and captures
83+ metrics if run_id is provided.
7684
7785 Args:
7886 prompt (str): The input text prompt for generating the completion.
@@ -85,12 +93,8 @@ def complete(
8593 **kwargs (Any): Additional arguments passed to the completion function.
8694
8795 Returns:
88- dict[str, Any]: A dictionary containing the result of the completion
89- and any processed output.
90-
91- Raises:
92- LLMError: If an error occurs during the completion process, it will be
93- raised after being processed by `parse_llm_err`.
96+ dict[str, Any]: A dictionary containing the result of the completion,
97+ any processed output, and the captured metrics (if applicable).
9498 """
9599 try :
96100 response : CompletionResponse = self ._llm_instance .complete (prompt , ** kwargs )
@@ -105,12 +109,19 @@ def complete(
105109 if not isinstance (process_text_output , dict ):
106110 process_text_output = {}
107111 except Exception as e :
108- logger .error (f"Error occured inside function 'process_text': { e } " )
112+ logger .error (f"Error occurred inside function 'process_text': { e } " )
109113 process_text_output = {}
110- return {LLM .RESPONSE : response , ** process_text_output }
114+ response_data = {LLM .RESPONSE : response , ** process_text_output }
115+ return response_data
111116 except Exception as e :
112117 raise parse_llm_err (e , self ._llm_adapter_class ) from e
113118
119+ def get_metrics (self ):
120+ return self ._metrics
121+
122+ def get_usage_reason (self ):
123+ return self ._usage_reason
124+
114125 def stream_complete (
115126 self ,
116127 prompt : str ,
0 commit comments