11from datetime import datetime
22
3- import pytest
43from groundlight import ExperimentalApi
54
65
76def test_metrics_and_evaluation (gl_experimental : ExperimentalApi ):
87 name = f"Test metrics and evaluation { datetime .utcnow ()} "
98 det = gl_experimental .create_detector (name , "test_query" )
109 for i in range (6 ):
11- iq = gl_experimental .submit_image_query (det , "test/assets/cat.jpeg" , wait = 0 , patience_time = 10 , human_review = "NEVER" )
10+ iq = gl_experimental .submit_image_query (
11+ det , "test/assets/cat.jpeg" , wait = 0 , patience_time = 10 , human_review = "NEVER"
12+ )
1213 gl_experimental .add_label (iq , "YES" )
13- iq = gl_experimental .submit_image_query (det , "test/assets/cat.jpeg" , wait = 0 , patience_time = 10 , human_review = "NEVER" )
14+ iq = gl_experimental .submit_image_query (
15+ det , "test/assets/cat.jpeg" , wait = 0 , patience_time = 10 , human_review = "NEVER"
16+ )
1417 gl_experimental .add_label (iq , "NO" )
1518 metrics = gl_experimental .get_detector_metrics (det .id )
1619 assert metrics ["summary" ] is not None
@@ -27,4 +30,4 @@ def test_metrics_and_evaluation(gl_experimental: ExperimentalApi):
2730 assert evaluation ["evaluation_results" ]["kfold_pooled__negative_accuracy" ] is not None
2831 assert evaluation ["evaluation_results" ]["balanced_system_accuracies" ] is not None
2932 assert evaluation ["evaluation_results" ]["positive_system_accuracies" ] is not None
30- assert evaluation ["evaluation_results" ]["negative_system_accuracies" ] is not None
33+ assert evaluation ["evaluation_results" ]["negative_system_accuracies" ] is not None
0 commit comments