@@ -426,8 +426,8 @@ def validate(
426426 query : str ,
427427 response : str ,
428428 use_llm_matching : bool | NotGiven = NOT_GIVEN ,
429- bad_response_thresholds : project_validate_params .BadResponseThresholds | NotGiven = NOT_GIVEN ,
430429 constrain_outputs : Optional [List [str ]] | NotGiven = NOT_GIVEN ,
430+ custom_eval_thresholds : Optional [Dict [str , float ]] | NotGiven = NOT_GIVEN ,
431431 custom_metadata : Optional [object ] | NotGiven = NOT_GIVEN ,
432432 eval_scores : Optional [Dict [str , float ]] | NotGiven = NOT_GIVEN ,
433433 options : Optional [project_validate_params .Options ] | NotGiven = NOT_GIVEN ,
@@ -451,10 +451,13 @@ def validate(
451451 query will be recorded in the project for SMEs to answer.
452452
453453 Args:
454+ custom_eval_thresholds: Optional custom thresholds for specific evals. Keys should match with the keys
455+ in the `eval_scores` dictionary.
456+
454457 custom_metadata: Arbitrary metadata supplied by the user/system
455458
456- eval_scores: Evaluation scores to use for flagging a response as bad . If not provided, TLM
457- will be used to generate scores.
459+ eval_scores: Scores assessing different aspects of the RAG system . If not provided, TLM will
460+ be used to generate scores.
458461
459462 options: Typed dict of advanced configuration options for the Trustworthy Language Model.
460463 Many of these configurations are determined by the quality preset selected
@@ -575,8 +578,8 @@ def validate(
575578 "prompt" : prompt ,
576579 "query" : query ,
577580 "response" : response ,
578- "bad_response_thresholds" : bad_response_thresholds ,
579581 "constrain_outputs" : constrain_outputs ,
582+ "custom_eval_thresholds" : custom_eval_thresholds ,
580583 "custom_metadata" : custom_metadata ,
581584 "eval_scores" : eval_scores ,
582585 "options" : options ,
@@ -967,8 +970,8 @@ async def validate(
967970 query : str ,
968971 response : str ,
969972 use_llm_matching : bool | NotGiven = NOT_GIVEN ,
970- bad_response_thresholds : project_validate_params .BadResponseThresholds | NotGiven = NOT_GIVEN ,
971973 constrain_outputs : Optional [List [str ]] | NotGiven = NOT_GIVEN ,
974+ custom_eval_thresholds : Optional [Dict [str , float ]] | NotGiven = NOT_GIVEN ,
972975 custom_metadata : Optional [object ] | NotGiven = NOT_GIVEN ,
973976 eval_scores : Optional [Dict [str , float ]] | NotGiven = NOT_GIVEN ,
974977 options : Optional [project_validate_params .Options ] | NotGiven = NOT_GIVEN ,
@@ -992,10 +995,13 @@ async def validate(
992995 query will be recorded in the project for SMEs to answer.
993996
994997 Args:
998+ custom_eval_thresholds: Optional custom thresholds for specific evals. Keys should match with the keys
999+ in the `eval_scores` dictionary.
1000+
9951001 custom_metadata: Arbitrary metadata supplied by the user/system
9961002
997- eval_scores: Evaluation scores to use for flagging a response as bad . If not provided, TLM
998- will be used to generate scores.
1003+ eval_scores: Scores assessing different aspects of the RAG system . If not provided, TLM will
1004+ be used to generate scores.
9991005
10001006 options: Typed dict of advanced configuration options for the Trustworthy Language Model.
10011007 Many of these configurations are determined by the quality preset selected
@@ -1116,8 +1122,8 @@ async def validate(
11161122 "prompt" : prompt ,
11171123 "query" : query ,
11181124 "response" : response ,
1119- "bad_response_thresholds" : bad_response_thresholds ,
11201125 "constrain_outputs" : constrain_outputs ,
1126+ "custom_eval_thresholds" : custom_eval_thresholds ,
11211127 "custom_metadata" : custom_metadata ,
11221128 "eval_scores" : eval_scores ,
11231129 "options" : options ,
0 commit comments