@@ -371,13 +371,38 @@ def llm_output_value(params: LLMAttributesParams) -> ExtractorResponse:
371371 )
372372
373373
374+ def llm_gen_ai_request_functions (params : LLMAttributesParams ) -> ExtractorResponse :
375+ functions = []
376+
377+ for idx , (tool_name , tool_instance ) in enumerate (
378+ params .llm_request .tools_dict .items ()
379+ ):
380+ functions .append (
381+ {
382+ f"gen_ai.request.functions.{ idx } .name" : tool_instance .name ,
383+ f"gen_ai.request.functions.{ idx } .description" : tool_instance .description ,
384+ f"gen_ai.request.functions.{ idx } .parameters" : str (
385+ tool_instance ._get_declaration ().parameters .model_dump ( # type: ignore
386+ exclude_none = True
387+ )
388+ if tool_instance ._get_declaration ()
389+ and tool_instance ._get_declaration ().parameters # type: ignore
390+ else {}
391+ ),
392+ }
393+ )
394+
395+ return ExtractorResponse (content = functions )
396+
397+
374398LLM_ATTRIBUTES = {
375399 # ===== request attributes =====
376400 "gen_ai.request.model" : llm_gen_ai_request_model ,
377401 "gen_ai.request.type" : llm_gen_ai_request_type ,
378402 "gen_ai.request.max_tokens" : llm_gen_ai_request_max_tokens ,
379403 "gen_ai.request.temperature" : llm_gen_ai_request_temperature ,
380404 "gen_ai.request.top_p" : llm_gen_ai_request_top_p ,
405+ "gen_ai.request.functions" : llm_gen_ai_request_functions ,
381406 # ===== response attributes =====
382407 "gen_ai.response.model" : llm_gen_ai_response_model ,
383408 "gen_ai.response.stop_reason" : llm_gen_ai_response_stop_reason ,
0 commit comments