Skip to content

Commit f112c33

Browse files
authored
Add tags for preceding mlflow run ids and modelgauge run ids (#46)
* Add former mlflow run_ids to tags. * Better handling of non-numeric metric values as mlflow 3 has a bug where it crashes the page in these cases. * Add tag for modelgauge run id. * Replace numpy NaN and infinity checks with math module equivalents.
1 parent 97a62a6 commit f112c33

File tree

5 files changed

+15
-1
lines changed

5 files changed

+15
-1
lines changed

src/modelplane/mlflow/loghelpers.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,3 +15,6 @@ def log_tags(run_id: str) -> None:
1515
if not k.startswith("mlflow.") and k != RUN_TYPE_TAG_NAME
1616
}
1717
)
18+
run_type = run.data.tags.get(RUN_TYPE_TAG_NAME, None)
19+
if run_type is not None:
20+
mlflow.set_tag(f"{run_type}_run_id", run_id)

src/modelplane/runways/annotator.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323

2424
from modelplane.mlflow.loghelpers import log_tags
2525
from modelplane.runways.utils import (
26+
MODELGAUGE_RUN_TAG_NAME,
2627
PROMPT_RESPONSE_ARTIFACT_NAME,
2728
RUN_TYPE_ANNOTATOR,
2829
RUN_TYPE_TAG_NAME,
@@ -113,6 +114,7 @@ def annotate(
113114
pipeline_runner.run(
114115
progress_callback=mlflow.log_metrics, debug=is_debug_mode()
115116
)
117+
mlflow.set_tag(MODELGAUGE_RUN_TAG_NAME, pipeline_runner.run_id)
116118

117119
# log the output to mlflow's artifact store
118120
mlflow.log_artifact(

src/modelplane/runways/responder.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from modelgauge.sut_registry import SUTS
1010

1111
from modelplane.runways.utils import (
12+
MODELGAUGE_RUN_TAG_NAME,
1213
RUN_TYPE_RESPONDER,
1314
RUN_TYPE_TAG_NAME,
1415
get_experiment_id,
@@ -53,6 +54,7 @@ def respond(
5354
pipeline_runner.run(
5455
progress_callback=mlflow.log_metrics, debug=is_debug_mode()
5556
)
57+
mlflow.set_tag(MODELGAUGE_RUN_TAG_NAME, pipeline_runner.run_id)
5658

5759
# log the output to mlflow's artifact store
5860
mlflow.log_artifact(

src/modelplane/runways/scorer.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
"""Runway for measuring annotations against ground truth."""
22

33
import json
4+
import math
45
import os
56
import tempfile
67
from pathlib import Path
@@ -63,7 +64,12 @@ def score(
6364
for annotator in annotators:
6465
score = score_annotator(annotator, annotations_df, ground_truth_df)
6566
for metric in score:
66-
mlflow.log_metric(f"{annotator}_{metric}", score[metric])
67+
if math.isnan(score[metric]):
68+
mlflow.log_metric(f"{annotator}_{metric}_is_nan", 1.0)
69+
elif math.isinf(score[metric]):
70+
mlflow.log_metric(f"{annotator}_{metric}_is_inf", 1.0)
71+
else:
72+
mlflow.log_metric(f"{annotator}_{metric}", score[metric])
6773

6874
return run.info.run_id
6975

src/modelplane/runways/utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
RUN_TYPE_RESPONDER = "get-sut-responses"
2222
RUN_TYPE_ANNOTATOR = "annotate"
2323
RUN_TYPE_SCORER = "score"
24+
MODELGAUGE_RUN_TAG_NAME = "modelgauge_run_id"
2425

2526

2627
def is_debug_mode() -> bool:

0 commit comments

Comments
 (0)