Skip to content

Commit 11af78b

Browse files
committed
Fixing Evaluation class and testing
1 parent 9d5786d commit 11af78b

File tree

4 files changed

+48
-6
lines changed

4 files changed

+48
-6
lines changed

bigml/evaluation.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
from bigml.basemodel import retrieve_resource, get_resource_dict
2727

2828
CLASSIFICATION_METRICS = [
29-
"accuracy", "precision", "recall", "phi" "phi_coefficient",
29+
"accuracy", "precision", "recall", "phi", "phi_coefficient",
3030
"f_measure", "confusion_matrix", "per_class_statistics"]
3131

3232
REGRESSION_METRICS = ["mean_absolute_error", "mean_squared_error", "r_squared"]
@@ -35,6 +35,7 @@
3535
class ClassificationEval():
3636
"""A class to store the classification metrics """
3737
def __init__(self, name, per_class_statistics):
38+
3839
self.name = name
3940
for statistics in per_class_statistics:
4041
if statistics["class_name"] == name:

bigml/tests/create_evaluation_steps.py

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,14 @@
1515
# License for the specific language governing permissions and limitations
1616
# under the License.
1717

18+
import json
19+
1820
from bigml.api import HTTP_CREATED
1921
from bigml.api import FINISHED, FAULTY
22+
from bigml.evaluation import Evaluation
2023

2124
from .read_resource_steps import wait_until_status_code_is
22-
from .world import world, eq_, ok_
25+
from .world import world, eq_, ok_, res_filename, approx_
2326

2427
def i_create_an_evaluation(step, shared=None):
2528
"""Step: I create an evaluation for the model with the dataset"""
@@ -105,5 +108,18 @@ def the_measured_measure_is_value(step, measure, value):
105108

106109

107110
def the_measured_measure_is_greater_value(step, measure, value):
108-
"""#@step(r'the measured <measure> is greater than <value>"""
111+
"""Step: the measured <measure> is greater than <value>"""
109112
ok_(float(world.evaluation['result']['model'][measure]) > float(value))
113+
114+
def i_create_a_local_evaluation(step, filename):
115+
"""Step: I create an Evaluation from the JSON file"""
116+
filename = res_filename(filename)
117+
with open(filename) as handler:
118+
evaluation = json.load(handler)
119+
local_evaluation = Evaluation(evaluation)
120+
step.bigml["local_evaluation"] = local_evaluation
121+
122+
def the_local_metric_is_value(step, metric, value):
123+
"""Step: The metric in the local evaluation is <value> """
124+
approx_(getattr(step.bigml["local_evaluation"], metric), value,
125+
precision=4)

bigml/tests/test_14_create_evaluations.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -214,3 +214,29 @@ def test_scenario4(self):
214214
self, example["evaluation_wait"])
215215
evaluation_create.the_measured_measure_is_value(
216216
self, example["metric"], example["value"])
217+
218+
def test_scenario5(self):
219+
"""
220+
Scenario5: Successfully instantiating Evaluation:
221+
Given a stored evaluation "<data>" file
222+
When I create an Evaluation for the JSON
223+
Then the measured "<metric>" is <value>
224+
"""
225+
show_doc(self.test_scenario5)
226+
headers = ["data", "metric", "value"]
227+
examples = [
228+
['data/classification_evaluation.json', 'phi',
229+
0.64837],
230+
['data/classification_evaluation.json', 'accuracy',
231+
0.91791],
232+
['data/classification_evaluation.json', 'precision',
233+
0.86639],
234+
['data/regression_evaluation.json', 'r_squared',
235+
0.9288]]
236+
for example in examples:
237+
example = dict(zip(headers, example))
238+
show_method(self, self.bigml["method"], example)
239+
evaluation_create.i_create_a_local_evaluation(
240+
self, example["data"])
241+
evaluation_create.the_local_metric_is_value(
242+
self, example["metric"], example["value"])

bigml/tests/world.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -179,9 +179,8 @@ def approx_(number_a, number_b, msg=None, precision=5):
179179
"""Wrapper for pytest approx function"""
180180
epsilon = math.pow(0.1, precision)
181181
if msg is None:
182-
assert number_a == pytest.approx(number_b, abs=epsilon)
183-
else:
184-
assert number_a == pytest.approx(number_b, abs=epsilon), msg
182+
msg = "%s != %s" % (repr(number_a), repr(number_b))
183+
assert number_a == pytest.approx(number_b, abs=epsilon), msg
185184

186185

187186
class World:

0 commit comments

Comments
 (0)