Skip to content

Commit 3a7a934

Browse files
authored
[evaluation] style: Re-enable black (Azure#37532)
* ci: Re-enable black * style: Run black
1 parent 75502e2 commit 3a7a934

File tree

15 files changed

+120
-127
lines changed

15 files changed

+120
-127
lines changed

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_common/utils.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ def nltk_tokenize(text: str) -> List[str]:
6060
if not text.isascii():
6161
# Use NISTTokenizer for international tokenization
6262
from nltk.tokenize.nist import NISTTokenizer
63+
6364
tokens = NISTTokenizer().international_tokenize(text)
6465
else:
6566
# By default, use NLTK word tokenizer
@@ -72,9 +73,7 @@ def check_and_add_api_version_for_aoai_model_config(
7273
model_config: Union[AzureOpenAIModelConfiguration, OpenAIModelConfiguration],
7374
default_api_version: str,
7475
) -> None:
75-
if (
76-
"azure_endpoint" in model_config or "azure_deployment" in model_config
77-
):
76+
if "azure_endpoint" in model_config or "azure_deployment" in model_config:
7877
model_config["api_version"] = model_config.get("api_version", default_api_version)
7978

8079

@@ -84,4 +83,4 @@ def check_and_add_user_agent_for_aoai_model_config(
8483
user_agent: Optional[str] = None,
8584
) -> None:
8685
if user_agent and ("azure_endpoint" in model_config or "azure_deployment" in model_config):
87-
prompty_model_config["parameters"]["extra_headers"].update({"x-ms-useragent": user_agent})
86+
prompty_model_config["parameters"]["extra_headers"].update({"x-ms-useragent": user_agent})

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluate/_batch_run_client/code_client.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ def run(self, flow, data, evaluator_name=None, column_mapping=None, **kwargs):
121121
json_data = load_jsonl(data)
122122
except json.JSONDecodeError as exc:
123123
raise EvaluationException(
124-
message = f"Failed to parse data as JSON: {data}. Provide valid json lines data.",
124+
message=f"Failed to parse data as JSON: {data}. Provide valid json lines data.",
125125
internal_message="Failed to parse data as JSON",
126126
target=ErrorTarget.CODE_CLIENT,
127127
category=ErrorCategory.INVALID_VALUE,

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluate/_eval_run.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ def _end_run(self, reason: str) -> None:
216216
internal_message="Incorrect terminal status. Valid statuses are 'FINISHED', 'FAILED' and 'KILLED'",
217217
target=ErrorTarget.EVAL_RUN,
218218
category=ErrorCategory.FAILED_EXECUTION,
219-
blame=ErrorBlame.UNKNOWN
219+
blame=ErrorBlame.UNKNOWN,
220220
)
221221
url = f"https://{self._url_base}/mlflow/v2.0" f"{self._get_scope()}/api/2.0/mlflow/runs/update"
222222
body = {
@@ -354,7 +354,7 @@ def _check_state_and_log(self, action: str, bad_states: Set[RunStatus], should_r
354354
internal_message=msg,
355355
target=ErrorTarget.EVAL_RUN,
356356
category=ErrorCategory.FAILED_EXECUTION,
357-
blame=ErrorBlame.UNKNOWN
357+
blame=ErrorBlame.UNKNOWN,
358358
)
359359
LOGGER.warning(msg)
360360
return False

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluate/_evaluate.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -260,12 +260,12 @@ def _validate_and_load_data(target, data, evaluators, output_path, azure_ai_proj
260260
initial_data_df = pd.read_json(data, lines=True)
261261
except Exception as e:
262262
raise EvaluationException(
263-
message=f"Failed to load data from {data}. Confirm that it is valid jsonl data. Error: {str(e)}.",
264-
internal_message="Failed to load data. Confirm that it is valid jsonl data.",
265-
target=ErrorTarget.EVALUATE,
266-
category=ErrorCategory.INVALID_VALUE,
267-
blame=ErrorBlame.USER_ERROR,
268-
) from e
263+
message=f"Failed to load data from {data}. Confirm that it is valid jsonl data. Error: {str(e)}.",
264+
internal_message="Failed to load data. Confirm that it is valid jsonl data.",
265+
target=ErrorTarget.EVALUATE,
266+
category=ErrorCategory.INVALID_VALUE,
267+
blame=ErrorBlame.USER_ERROR,
268+
) from e
269269

270270
return initial_data_df
271271

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluate/_utils.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -33,16 +33,16 @@ def extract_workspace_triad_from_trace_provider(trace_provider: str): # pylint:
3333
match = re.match(AZURE_WORKSPACE_REGEX_FORMAT, trace_provider)
3434
if not match or len(match.groups()) != 5:
3535
raise EvaluationException(
36-
message="Malformed trace provider string, expected azureml://subscriptions/<subscription_id>/"
37-
"resourceGroups/<resource_group>/providers/Microsoft.MachineLearningServices/"
38-
f"workspaces/<workspace_name>, got {trace_provider}",
39-
internal_message="Malformed trace provider string, expected azureml://subscriptions/<subscription_id>/"
40-
"resourceGroups/<resource_group>/providers/Microsoft.MachineLearningServices/"
41-
"workspaces/<workspace_name>,",
42-
target=ErrorTarget.UNKNOWN,
43-
category=ErrorCategory.INVALID_VALUE,
44-
blame=ErrorBlame.UNKNOWN,
45-
)
36+
message="Malformed trace provider string, expected azureml://subscriptions/<subscription_id>/"
37+
"resourceGroups/<resource_group>/providers/Microsoft.MachineLearningServices/"
38+
f"workspaces/<workspace_name>, got {trace_provider}",
39+
internal_message="Malformed trace provider string, expected azureml://subscriptions/<subscription_id>/"
40+
"resourceGroups/<resource_group>/providers/Microsoft.MachineLearningServices/"
41+
"workspaces/<workspace_name>,",
42+
target=ErrorTarget.UNKNOWN,
43+
category=ErrorCategory.INVALID_VALUE,
44+
blame=ErrorBlame.UNKNOWN,
45+
)
4646
subscription_id = match.group(1)
4747
resource_group_name = match.group(3)
4848
workspace_name = match.group(5)

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_chat/_chat.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -272,7 +272,7 @@ def _validate_conversation(self, conversation: List[Dict]):
272272
category=ErrorCategory.INVALID_VALUE,
273273
blame=ErrorBlame.USER_ERROR,
274274
)
275-
275+
276276
if turn["role"] != expected_role:
277277
msg = f"Expected role {expected_role} but got {turn['role']}. Turn number: {one_based_turn_num}"
278278
raise EvaluationException(
@@ -305,7 +305,9 @@ def _validate_conversation(self, conversation: List[Dict]):
305305
)
306306

307307
if "citations" not in turn["context"]:
308-
msg = f"Context in each assistant's turn must have 'citations' key. Turn number: {one_based_turn_num}"
308+
msg = (
309+
f"Context in each assistant's turn must have 'citations' key. Turn number: {one_based_turn_num}"
310+
)
309311
raise EvaluationException(
310312
message=msg,
311313
internal_message=msg,

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from promptflow.tracing import ThreadPoolExecutorWithContext as ThreadPoolExecutor
1010
from azure.ai.evaluation._exceptions import EvaluationException, ErrorBlame, ErrorCategory, ErrorTarget
1111

12-
from azure.ai.evaluation._model_configurations import AzureAIProject
12+
from azure.ai.evaluation._model_configurations import AzureAIProject
1313

1414
try:
1515
from ._hate_unfairness import HateUnfairnessEvaluator

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_qa/_qa.py

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,7 @@ class QAEvaluator:
5252
}
5353
"""
5454

55-
def __init__(
56-
self, model_config: dict, parallel: bool = True
57-
):
55+
def __init__(self, model_config: dict, parallel: bool = True):
5856
self._parallel = parallel
5957

6058
self._evaluators = [
@@ -88,12 +86,7 @@ def __call__(self, *, query: str, response: str, context: str, ground_truth: str
8886
with ThreadPoolExecutor() as executor:
8987
futures = {
9088
executor.submit(
91-
evaluator,
92-
query=query,
93-
response=response,
94-
context=context,
95-
ground_truth=ground_truth,
96-
**kwargs
89+
evaluator, query=query, response=response, context=context, ground_truth=ground_truth, **kwargs
9790
): evaluator
9891
for evaluator in self._evaluators
9992
}
@@ -103,9 +96,7 @@ def __call__(self, *, query: str, response: str, context: str, ground_truth: str
10396
results.update(future.result())
10497
else:
10598
for evaluator in self._evaluators:
106-
result = evaluator(
107-
query=query, response=response, context=context, ground_truth=ground_truth, **kwargs
108-
)
99+
result = evaluator(query=query, response=response, context=context, ground_truth=ground_truth, **kwargs)
109100
results.update(result)
110101

111102
return results

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/simulator/_helpers/_experimental.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,4 +154,4 @@ def _is_warning_cached(warning_msg):
154154
if warning_msg in _warning_cache:
155155
return True
156156
_warning_cache.add(warning_msg)
157-
return False
157+
return False

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/simulator/simulator.py

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from .._user_agent import USER_AGENT
2121
from ._conversation.constants import ConversationRole
2222
from ._helpers import ConversationHistory, Turn, experimental
23+
2324
# from ._tracing import monitor_task_simulator
2425
from ._utils import JsonLineChatProtocol
2526

@@ -198,7 +199,7 @@ async def _simulate_with_predefined_turns(
198199
"""
199200
simulated_conversations = []
200201
progress_bar = tqdm(
201-
total=int(len(conversation_turns) * (max_conversation_turns/2)),
202+
total=int(len(conversation_turns) * (max_conversation_turns / 2)),
202203
desc="Simulating with predefined conversation turns: ",
203204
ncols=100,
204205
unit="messages",
@@ -290,8 +291,8 @@ def _load_user_simulation_flow(
290291
:return: The loaded flow for simulating user interactions.
291292
"""
292293
if not user_simulator_prompty:
293-
package = 'azure.ai.evaluation.simulator._prompty'
294-
resource_name = 'task_simulate.prompty'
294+
package = "azure.ai.evaluation.simulator._prompty"
295+
resource_name = "task_simulate.prompty"
295296
try:
296297
# Access the resource as a file path
297298
with pkg_resources.path(package, resource_name) as prompty_path:
@@ -315,12 +316,12 @@ def _parse_prompty_response(self, *, response: str) -> Dict[str, Any]:
315316
"""
316317
try:
317318
if type(response) == str:
318-
response = response.replace('\u2019', "'").replace('\u2018', "'")
319-
response = response.replace('\u201C', '"').replace('\u201D', '"')
320-
319+
response = response.replace("\u2019", "'").replace("\u2018", "'")
320+
response = response.replace("\u201C", '"').replace("\u201D", '"')
321+
321322
# Replace None with null
322-
response = response.replace('None', 'null')
323-
323+
response = response.replace("None", "null")
324+
324325
# Escape unescaped single quotes inside string values
325326
def escape_single_quotes(match):
326327
s = match.group(0)
@@ -391,8 +392,8 @@ def _load_query_generation_flow(
391392
:return: The loaded flow for generating query responses.
392393
"""
393394
if not query_response_generating_prompty:
394-
package = 'azure.ai.evaluation.simulator._prompty'
395-
resource_name = 'task_query_response.prompty'
395+
package = "azure.ai.evaluation.simulator._prompty"
396+
resource_name = "task_query_response.prompty"
396397
try:
397398
# Access the resource as a file path
398399
with pkg_resources.path(package, resource_name) as prompty_path:
@@ -432,7 +433,7 @@ async def _create_conversations_from_query_responses(
432433
total_turns = len(query_responses) * max_conversation_turns
433434

434435
progress_bar = tqdm(
435-
total=int(total_turns/2),
436+
total=int(total_turns / 2),
436437
desc="Generating: ",
437438
ncols=100,
438439
unit="message",
@@ -516,11 +517,14 @@ async def _complete_conversation(
516517
user_simulator_prompty_kwargs=user_simulator_prompty_kwargs,
517518
)
518519
conversation_starter_from_simulated_user = user_flow(
519-
task=task, conversation_history=[{
520-
"role": "assistant",
521-
"content": conversation_starter,
522-
"your_task": "Act as the user and translate the content into a user query."
523-
}]
520+
task=task,
521+
conversation_history=[
522+
{
523+
"role": "assistant",
524+
"content": conversation_starter,
525+
"your_task": "Act as the user and translate the content into a user query.",
526+
}
527+
],
524528
)
525529
if type(conversation_starter_from_simulated_user) == dict:
526530
conversation_starter_from_simulated_user = conversation_starter_from_simulated_user["content"]

0 commit comments

Comments
 (0)