-
Notifications
You must be signed in to change notification settings - Fork 1
Open
Labels
Description
Leaving something I jotted down:
- May be cool to give people a way to pass image references via the context. I initially thought aind-data-schema allowed other assets but it seems only images. One idea could be to allow to have "side-effects" inside tests that return a context to be included in the
self.<action>_testmethods
t = datetime(2022, 11, 22, 0, 0, 0, tzinfo=timezone.utc)
s = QCStatus(evaluator="Automated", status=Status.PASS, timestamp=t)
sp = QCStatus(evaluator="", status=Status.PENDING, timestamp=t)
def result_to_qc_metric(result: qc.TestResult) -> typing.Optional[QCMetric]:
if result.status in (qc.TestStatus.PASSED, qc.TestStatus.SKIPPED):
status = QCStatus(evaluator="Automated", status=Status.PASS, timestamp=t)
elif result.status == qc.TestStatus.FAILED:
status = QCStatus(evaluator="Automated", status=Status.FAIL, timestamp=t)
else:
return None
return QCMetric(
name=result._test_reference.__name__ if result._test_reference is not None else "Unknown",
description=result.description,
value=result.result,
status_history=[status],
)
def to_ads(results: list[qc.TestResult]) -> QualityControl:
groupby_test_suite = itertools.groupby(results, lambda x: x.suite_name)
evals = []
for suite_name, test_results in groupby_test_suite:
_test_results = list(test_results)
evals.append(
QCEvaluation(
modality=Modality.BEHAVIOR,
stage="Raw data",
description="todo",
name=suite_name,
created=t,
notes="",
metrics=[result_to_qc_metric(r) for r in _test_results if result_to_qc_metric(r) is not None],
)
)
return QualityControl(evaluations=evals)
ads = to_ads(results)
#print_json(ads.model_dump_json(indent=2))