Skip to content

Commit 9936faa

Browse files
authored
Rename to ungrounded attributes (Azure#40078)
* rename to personal attributes * uploading asset with renamed new tests * rename to ungroundedness * few changes * fix * fix
1 parent 9f6bf3b commit 9936faa

File tree

17 files changed

+91
-86
lines changed

17 files changed

+91
-86
lines changed

sdk/evaluation/azure-ai-evaluation/CHANGELOG.md

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -26,14 +26,14 @@
2626
- insecure-randomness
2727
- It also supports multiple coding languages such as (Python, Java, C++, C#, Go, Javascript, SQL)
2828

29-
- New Built-in evaluator called ISAEvaluator is added.
30-
- It evaluates ungrounded inference of sensitive attributes (ISA) for a given query, response, and context for a single-turn
31-
evaluation only, where query represents the user query and response represents the AI system response given the provided context.
29+
- New Built-in evaluator called UngroundedAttributesEvaluator is added.
30+
- It evaluates ungrounded inference of human attributes for a given query, response, and context for a single-turn evaluation only,
31+
- where query represents the user query and response represents the AI system response given the provided context.
3232

33-
Inference of Sensitive Attribute checks for whether a response is first, ungrounded, and checks if it contains information
34-
about protected class or emotional state of someone.
35-
36-
The inference of sensitive attributes evaluation identifies the following vulnerabilities:
33+
- Ungrounded Attributes checks for whether a response is first, ungrounded, and checks if it contains information about protected class
34+
- or emotional state of a person.
35+
36+
- It identifies the following attributes:
3737

3838
- emotional_state
3939
- protected_class

sdk/evaluation/azure-ai-evaluation/assets.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,5 @@
22
"AssetsRepo": "Azure/azure-sdk-assets",
33
"AssetsRepoPrefixPath": "python",
44
"TagPrefix": "python/evaluation/azure-ai-evaluation",
5-
"Tag": "python/evaluation/azure-ai-evaluation_2eb57a3d9a"
5+
"Tag": "python/evaluation/azure-ai-evaluation_ceeaf3cbb7"
66
}

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
from ._evaluators._similarity import SimilarityEvaluator
2727
from ._evaluators._xpia import IndirectAttackEvaluator
2828
from ._evaluators._code_vulnerability import CodeVulnerabilityEvaluator
29-
from ._evaluators._isa import ISAEvaluator
29+
from ._evaluators._ungrounded_attributes import UngroundedAttributesEvaluator
3030
from ._model_configurations import (
3131
AzureAIProject,
3232
AzureOpenAIModelConfiguration,
@@ -68,5 +68,5 @@
6868
"Message",
6969
"EvaluationResult",
7070
"CodeVulnerabilityEvaluator",
71-
"ISAEvaluator",
71+
"UngroundedAttributesEvaluator",
7272
]

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_common/constants.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ class Tasks:
4040
XPIA = "xpia"
4141
GROUNDEDNESS = "groundedness"
4242
CODE_VULNERABILITY = "code vulnerability"
43-
ISA = "inference sensitive attributes"
43+
UNGROUNDED_ATTRIBUTES = "inference sensitive attributes"
4444

4545

4646
class _InternalAnnotationTasks:
@@ -64,7 +64,7 @@ class EvaluationMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta):
6464
XPIA = "xpia"
6565
GROUNDEDNESS = "generic_groundedness"
6666
CODE_VULNERABILITY = "code_vulnerability"
67-
ISA = "inference_sensitive_attributes"
67+
UNGROUNDED_ATTRIBUTES = "ungrounded_attributes"
6868

6969

7070
class _InternalEvaluationMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta):

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_common/rai_service.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242
"DEFAULT": Template("<Human>{$query}</><System>{$response}</>"),
4343
}
4444

45+
INFERENCE_OF_SENSITIVE_ATTRIBUTES = "inference_sensitive_attributes"
4546

4647
def get_formatted_template(data: dict, annotation_task: str) -> str:
4748
"""Given the task and input data, produce a formatted string that will serve as the main
@@ -70,7 +71,7 @@ def get_formatted_template(data: dict, annotation_task: str) -> str:
7071
"completion": data.get("response", "")
7172
}
7273
return json.dumps(as_dict)
73-
if annotation_task == Tasks.ISA:
74+
if annotation_task == Tasks.UNGROUNDED_ATTRIBUTES:
7475
as_dict = {
7576
"query": data.get("query", ""),
7677
"response": data.get("response", ""),
@@ -173,7 +174,7 @@ def generate_payload(normalized_user_text: str, metric: str, annotation_task: st
173174
task = annotation_task
174175
if metric == EvaluationMetrics.PROTECTED_MATERIAL:
175176
include_metric = False
176-
elif metric == EvaluationMetrics.ISA:
177+
elif metric == EvaluationMetrics.UNGROUNDED_ATTRIBUTES:
177178
include_metric = False
178179
elif metric == _InternalEvaluationMetrics.ECI:
179180
include_metric = False
@@ -266,7 +267,6 @@ async def fetch_result(operation_id: str, rai_svc_url: str, credential: TokenCre
266267
sleep_time = RAIService.SLEEP_TIME**request_count
267268
await asyncio.sleep(sleep_time)
268269

269-
270270
def parse_response( # pylint: disable=too-many-branches,too-many-statements
271271
batch_response: List[Dict], metric_name: str, metric_display_name: Optional[str] = None
272272
) -> Dict[str, Union[str, float]]:
@@ -290,11 +290,15 @@ def parse_response( # pylint: disable=too-many-branches,too-many-statements
290290
_InternalEvaluationMetrics.ECI,
291291
EvaluationMetrics.XPIA,
292292
EvaluationMetrics.CODE_VULNERABILITY,
293-
EvaluationMetrics.ISA,
293+
EvaluationMetrics.UNGROUNDED_ATTRIBUTES,
294294
}:
295295
result = {}
296296
if not batch_response or len(batch_response[0]) == 0:
297297
return {}
298+
if metric_name == EvaluationMetrics.UNGROUNDED_ATTRIBUTES and INFERENCE_OF_SENSITIVE_ATTRIBUTES in batch_response[0]:
299+
batch_response[0] = {
300+
EvaluationMetrics.UNGROUNDED_ATTRIBUTES: batch_response[0][INFERENCE_OF_SENSITIVE_ATTRIBUTES]
301+
}
298302
if metric_name == EvaluationMetrics.PROTECTED_MATERIAL and metric_name not in batch_response[0]:
299303
pm_metric_names = {"artwork", "fictional_characters", "logos_and_brands"}
300304
for pm_metric_name in pm_metric_names:
@@ -330,7 +334,7 @@ def parse_response( # pylint: disable=too-many-branches,too-many-statements
330334
result[metric_display_name + "_information_gathering"] = (
331335
parsed_response["information_gathering"] if "information_gathering" in parsed_response else math.nan
332336
)
333-
if metric_name == EvaluationMetrics.CODE_VULNERABILITY or metric_name == EvaluationMetrics.ISA:
337+
if metric_name == EvaluationMetrics.CODE_VULNERABILITY or metric_name == EvaluationMetrics.UNGROUNDED_ATTRIBUTES:
334338
# Add all attributes under the details.
335339
details = {}
336340
for key, value in parsed_response.items():

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_constants.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ class EvaluationMetrics:
2727
LOGOS_AND_BRANDS = "logos_and_brands"
2828
XPIA = "xpia"
2929
CODE_VULNERABILITY = "code_vulnerability"
30-
ISA = "inference_sensitive_attributes"
30+
UNGROUNDED_ATTRIBUTES = "ungrounded_attributes"
3131

3232

3333
class _InternalEvaluationMetrics:

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluate/_evaluate.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ def _aggregate_label_defect_metrics(df: pd.DataFrame) -> Tuple[List[str], Dict[s
153153
_InternalEvaluationMetrics.ECI,
154154
EvaluationMetrics.XPIA,
155155
EvaluationMetrics.CODE_VULNERABILITY,
156-
EvaluationMetrics.ISA,
156+
EvaluationMetrics.UNGROUNDED_ATTRIBUTES,
157157
]
158158
label_cols = []
159159
details_cols = []

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,6 @@ def _get_task(self):
170170
return Tasks.PROTECTED_MATERIAL
171171
if self._eval_metric == EvaluationMetrics.CODE_VULNERABILITY:
172172
return Tasks.CODE_VULNERABILITY
173-
if self._eval_metric == EvaluationMetrics.ISA:
174-
return Tasks.ISA
173+
if self._eval_metric == EvaluationMetrics.UNGROUNDED_ATTRIBUTES:
174+
return Tasks.UNGROUNDED_ATTRIBUTES
175175
return Tasks.CONTENT_HARM

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_isa/__init__.py

Lines changed: 0 additions & 5 deletions
This file was deleted.
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
from ._ungrounded_attributes import UngroundedAttributesEvaluator
2+
3+
__all__ = [
4+
"UngroundedAttributesEvaluator",
5+
]

0 commit comments

Comments
 (0)