2121 _fluency ,
2222 _xpia ,
2323 _coherence ,
24+ _code_vulnerability ,
25+ _ungrounded_attributes ,
2426)
2527from azure .ai .evaluation ._evaluators ._eci ._eci import ECIEvaluator
2628from azure .ai .evaluation ._evaluate import _evaluate
3234 AdversarialScenario ,
3335 AdversarialScenarioJailbreak ,
3436 IndirectAttackSimulator ,
35- DirectAttackSimulator ,
37+ DirectAttackSimulator ,
3638)
3739from azure .ai .evaluation .simulator ._adversarial_scenario import _UnstableAdversarialScenario
3840from azure .ai .evaluation .simulator ._utils import JsonLineList
@@ -72,6 +74,7 @@ class _SafetyEvaluator(Enum):
7274 """
7375
7476 CONTENT_SAFETY = "content_safety"
77+ CODE_VULNERABILITY = "code_vulnerability"
7578 GROUNDEDNESS = "groundedness"
7679 PROTECTED_MATERIAL = "protected_material"
7780 RELEVANCE = "relevance"
@@ -81,6 +84,7 @@ class _SafetyEvaluator(Enum):
8184 INDIRECT_ATTACK = "indirect_attack"
8285 DIRECT_ATTACK = "direct_attack"
8386 ECI = "eci"
87+ UNGROUNDED_ATTRIBUTES = "ungrounded_attributes"
8488
8589
8690@experimental
@@ -380,6 +384,10 @@ def _get_scenario(
380384 )
381385 if evaluator == _SafetyEvaluator .ECI :
382386 return _UnstableAdversarialScenario .ECI
387+ if evaluator == _SafetyEvaluator .CODE_VULNERABILITY :
388+ return AdversarialScenario .ADVERSARIAL_CODE_VULNERABILITY
389+ if evaluator == _SafetyEvaluator .UNGROUNDED_ATTRIBUTES :
390+ return AdversarialScenario .ADVERSARIAL_UNGROUNDED_ATTRIBUTES
383391 if evaluator in [
384392 _SafetyEvaluator .GROUNDEDNESS ,
385393 _SafetyEvaluator .RELEVANCE ,
@@ -461,6 +469,14 @@ def _get_evaluators(
461469 evaluators_dict ["eci" ] = ECIEvaluator (
462470 azure_ai_project = self .azure_ai_project , credential = self .credential
463471 )
472+ elif evaluator == _SafetyEvaluator .CODE_VULNERABILITY :
473+ evaluators_dict ["code_vulnerability" ] = _code_vulnerability .CodeVulnerabilityEvaluator (
474+ azure_ai_project = self .azure_ai_project , credential = self .credential
475+ )
476+ elif evaluator == _SafetyEvaluator .UNGROUNDED_ATTRIBUTES :
477+ evaluators_dict ["ungrounded_attributes" ] = _ungrounded_attributes .UngroundedAttributesEvaluator (
478+ azure_ai_project = self .azure_ai_project , credential = self .credential
479+ )
464480 else :
465481 msg = (
466482 f"Invalid evaluator: { evaluator } . Supported evaluators are: { _SafetyEvaluator .__members__ .values ()} "
@@ -597,7 +613,28 @@ def _validate_inputs(
597613 category = ErrorCategory .INVALID_VALUE ,
598614 blame = ErrorBlame .USER_ERROR ,
599615 )
600-
616+
617+ if _SafetyEvaluator .CODE_VULNERABILITY in evaluators and num_turns > 1 :
618+ self .logger .error ("Code vulnerability evaluation only supports single-turn conversations." )
619+ msg = "Code vulnerability evaluation only supports single-turn conversations."
620+ raise EvaluationException (
621+ message = msg ,
622+ internal_message = msg ,
623+ target = ErrorTarget .UNKNOWN ,
624+ category = ErrorCategory .INVALID_VALUE ,
625+ blame = ErrorBlame .USER_ERROR ,
626+ )
627+ if _SafetyEvaluator .UNGROUNDED_ATTRIBUTES in evaluators and num_turns > 1 :
628+ self .logger .error ("Ungrounded attributes evaluation only supports single-turn conversations." )
629+ msg = "Ungrounded attributes evaluation only supports single-turn conversations."
630+ raise EvaluationException (
631+ message = msg ,
632+ internal_message = msg ,
633+ target = ErrorTarget .UNKNOWN ,
634+ category = ErrorCategory .INVALID_VALUE ,
635+ blame = ErrorBlame .USER_ERROR ,
636+ )
637+
601638 if _SafetyEvaluator .CONTENT_SAFETY in evaluators and scenario and num_turns > 1 and scenario != AdversarialScenario .ADVERSARIAL_CONVERSATION :
602639 self .logger .error (f"Adversarial scenario { scenario } is not supported for content safety evaluation with more than 1 turn." )
603640 msg = f"Adversarial scenario { scenario } is not supported for content safety evaluation with more than 1 turn."
0 commit comments