Skip to content

Commit 775b9d8

Browse files
committed
PR changes
1 parent 64c7845 commit 775b9d8

File tree

3 files changed

+48
-52
lines changed

3 files changed

+48
-52
lines changed

dataikuapi/dss/modelcomparison.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
from dataikuapi.dss.discussion import DSSObjectDiscussions
2+
import re
23

34

45
class DSSModelComparison(object):
@@ -36,6 +37,36 @@ def get_object_discussions(self):
3637
"""
3738
return DSSObjectDiscussions(self.client, self.project_key, "MODEL_COMPARISON", self.mec_id)
3839

40+
def get_evaluation_like_from_full_id(self, full_id):
41+
"""
42+
Retrieves a Saved Model from the flow, a Lab Model from an Analysis or a Model Evaluation from a Model Evaluation Store) using its full id.
43+
44+
:param string full_id: the full id of the item to retrieve
45+
46+
:returns: A handle on the Saved Model, the Model Evaluation or the Lab Model
47+
:rtype: :class:`dataikuapi.dss.savedmodel.DSSSavedModel`
48+
:rtype: :class:`dataikuapi.dss.modelevaluationstore.DSSModelEvaluation`
49+
:rtype: :class:`dataikuapi.dss.ml.DSSTrainedPredictionModelDetails`
50+
"""
51+
52+
saved_model_pattern = re.compile("^S-(\\w+)-(\\w+)-(\\w+)(?:-part-(\\w+)-(v?\\d+))?$\\Z")
53+
analysis_model_pattern = re.compile("^A-(\\w+)-(\\w+)-(\\w+)-(s[0-9]+)-(pp[0-9]+(?:-part-(\\w+)|-base)?)-(m[0-9]+)$\\Z")
54+
model_evaluation_pattern = re.compile("^ME-(\\w+)-(\\w+)-(\\w+)$\\Z")
55+
56+
if saved_model_pattern.match(full_id):
57+
return self.project.get_saved_model(full_id)
58+
elif model_evaluation_pattern.match(full_id):
59+
mes_id = full_id.split('-')[2]
60+
evaluation_id = full_id.split('-')[3]
61+
mes = self.project.get_model_evaluation_store(mes_id)
62+
return mes.get_model_evaluation(evaluation_id)
63+
elif analysis_model_pattern.match(full_id):
64+
analysis_id = full_id.split('-')[2]
65+
task_id = full_id.split('-')[3]
66+
return self.project.get_ml_task(analysis_id, task_id).get_trained_model_details(full_id)
67+
68+
raise ValueError("{} is not a valid full model id or full model evaluation id.".format(full_id))
69+
3970
########################################################
4071
# Deletion
4172
########################################################

dataikuapi/dss/project.py

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import time, warnings, sys, os.path as osp
2-
import re
32
from .dataset import DSSDataset, DSSDatasetListItem, DSSManagedDatasetCreationHelper
43
from .modelcomparison import DSSModelComparison
54
from .jupyternotebook import DSSJupyterNotebook, DSSJupyterNotebookListItem
@@ -866,36 +865,6 @@ def create_model_comparison(self, name, prediction_type):
866865
mec_id = res['id']
867866
return DSSModelComparison(self.client, self.project_key, mec_id)
868867

869-
def get_from_full_id(self, full_id):
870-
"""
871-
Retrieves a Saved Model from the flow, a Lab Model from an Analysis or a Model Evaluation from a Model Evaluation Store) using its full id.
872-
873-
:param string full_id: the full id of the item to retrieve
874-
875-
:returns: A handle on the Saved Model, the Model Evaluation or the Lab Model
876-
:rtype: :class:`dataikuapi.dss.savedmodel.DSSSavedModel`
877-
:rtype: :class:`dataikuapi.dss.modelevaluationstore.DSSModelEvaluation`
878-
:rtype: :class:`dataikuapi.dss.ml.DSSTrainedPredictionModelDetails`
879-
"""
880-
881-
saved_model_pattern = re.compile("^S-(\\w+)-(\\w+)-(\\w+)(?:-part-(\\w+)-(v?\\d+))?$\\Z")
882-
analysis_model_pattern = re.compile("^A-(\\w+)-(\\w+)-(\\w+)-(s[0-9]+)-(pp[0-9]+(?:-part-(\\w+)|-base)?)-(m[0-9]+)$\\Z")
883-
model_evaluation_pattern = re.compile("^ME-(\\w+)-(\\w+)-(\\w+)$\\Z")
884-
885-
if saved_model_pattern.match(full_id):
886-
return self.get_saved_model(full_id)
887-
elif model_evaluation_pattern.match(full_id):
888-
mes_id = full_id.split('-')[2]
889-
evaluation_id = full_id.split('-')[3]
890-
mes = self.get_model_evaluation_store(mes_id)
891-
return mes.get_model_evaluation(evaluation_id)
892-
elif analysis_model_pattern.match(full_id):
893-
analysis_id = full_id.split('-')[2]
894-
task_id = full_id.split('-')[3]
895-
return self.get_ml_task(analysis_id, task_id).get_trained_model_details(full_id)
896-
897-
raise ValueError("{} is not a valid full model id or full model evaluation id.".format(full_id))
898-
899868
########################################################
900869
# Jobs
901870
########################################################

dataikuapi/dss/recipe.py

Lines changed: 17 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1331,7 +1331,7 @@ class EvaluationRecipeCreator(DSSRecipeCreator):
13311331
# Create a new evaluation recipe outputing to a new dataset, to a metrics dataset and/or to a model evaluation store
13321332
13331333
project = client.get_project("MYPROJECT")
1334-
builder = EvaluationRecipeCreator("my_evaluation_recipe", project)
1334+
builder = project.new_recipe("evaluation")
13351335
builder.with_input_model(saved_model_id)
13361336
builder.with_input("dataset_to_evaluate")
13371337
@@ -1344,21 +1344,20 @@ class EvaluationRecipeCreator(DSSRecipeCreator):
13441344
# Access the settings
13451345
13461346
er_settings = new_recipe.get_settings()
1347-
json_payload = er_settings.get_json_payload()
1347+
payload = er_settings.obj_payload
13481348
13491349
# Change the settings
13501350
1351-
json_payload['dontComputePerformance'] = True
1352-
json_payload['outputProbabilities'] = False
1353-
json_payload['metrics'] = ["precision", "recall", "auc", "f1", "costMatrixGain"]
1351+
payload['dontComputePerformance'] = True
1352+
payload['outputProbabilities'] = False
1353+
payload['metrics'] = ["precision", "recall", "auc", "f1", "costMatrixGain"]
13541354
13551355
# Manage evaluation labels
13561356
1357-
json_payload['labels'] = [dict(key="label_1", value="value_1"), dict(key="label_2", value="value_2")]
1357+
payload['labels'] = [dict(key="label_1", value="value_1"), dict(key="label_2", value="value_2")]
13581358
13591359
# Save the settings and run the recipe
13601360
1361-
er_settings.set_json_payload(json_payload)
13621361
er_settings.save()
13631362
13641363
new_recipe.run()
@@ -1408,7 +1407,7 @@ class StandaloneEvaluationRecipeCreator(DSSRecipeCreator):
14081407
# Create a new standalone evaluation of a scored dataset
14091408
14101409
project = client.get_project("MYPROJECT")
1411-
builder = StandaloneEvaluationRecipeCreator("my_standalone_evaluation_recipe", project)
1410+
builder = project.new_recipe("standalone_evaluation")
14121411
builder.with_input("scored_dataset_to_evaluate")
14131412
builder.with_output_evaluation_store(evaluation_store_id)
14141413
@@ -1417,20 +1416,20 @@ class StandaloneEvaluationRecipeCreator(DSSRecipeCreator):
14171416
# Modify the model parameters in the SER settings
14181417
14191418
ser_settings = new_recipe.get_settings()
1420-
ser_json_payload = ser_settings.get_json_payload()
1419+
payload = ser_settings.obj_payload
14211420
1422-
ser_json_payload['predictionType'] = "BINARY_CLASSIFICATION"
1423-
ser_json_payload['targetVariable'] = "Survived"
1424-
ser_json_payload['predictionVariable'] = "prediction"
1425-
ser_json_payload['isProbaAware'] = True
1426-
ser_json_payload['dontComputePerformance'] = False
1421+
payload['predictionType'] = "BINARY_CLASSIFICATION"
1422+
payload['targetVariable'] = "Survived"
1423+
payload['predictionVariable'] = "prediction"
1424+
payload['isProbaAware'] = True
1425+
payload['dontComputePerformance'] = False
14271426
14281427
# For a classification model with probabilities, the 'probas' section can be filled with the mapping of the class and the probability column
14291428
# e.g. for a binary classification model with 2 columns: proba_0 and proba_1
14301429
14311430
class_0 = dict(key=0, value="proba_0")
14321431
class_1 = dict(key=1, value="proba_1")
1433-
ser_payload['probas'] = [class_0, class_1]
1432+
payload['probas'] = [class_0, class_1]
14341433
14351434
# Change the 'features' settings for this standalone evaluation
14361435
# e.g. reject the features that you do not want to use in the evaluation
@@ -1439,18 +1438,15 @@ class StandaloneEvaluationRecipeCreator(DSSRecipeCreator):
14391438
feature_ticket = dict(name="Ticket", role="REJECT", type="TEXT")
14401439
feature_cabin = dict(name="Cabin", role="REJECT", type="TEXT")
14411440
1442-
ser_payload['features'] = [feature_passengerid, feature_ticket, feature_cabin]
1441+
payload['features'] = [feature_passengerid, feature_ticket, feature_cabin]
14431442
14441443
# To set the cost matrix properly, access the 'metricParams' section of the payload and set the cost matrix weights:
14451444
1446-
ser_payload['metricParams'] = dict(costMatrixWeights=dict(tpGain=0.4, fpGain=-1.0, tnGain=0.2, fnGain=-0.5))
1445+
payload['metricParams'] = dict(costMatrixWeights=dict(tpGain=0.4, fpGain=-1.0, tnGain=0.2, fnGain=-0.5))
14471446
1448-
# Add the modified json payload to the recipe settings and save the recipe
1447+
# Save the recipe and run the recipe
14491448
# Note that with this method, all the settings that were not explicitly set are instead set to their default value.
14501449
1451-
ser_settings = new_recipe.get_settings()
1452-
1453-
ser_settings.set_json_payload(ser_payload)
14541450
ser_settings.save()
14551451
14561452
new_recipe.run()

0 commit comments

Comments
 (0)