Skip to content

Commit 0681df5

Browse files
committed
fix: robust synthetic property handling for score calibrations
- Refactored ScoreCalibration view models to ensure robust handling of synthetic and required fields for both ORM and dict contexts. - Made the source fields non-optional to enforce required data integrity. - Added tests to verify all key attributes and synthetic properties are correctly handled in both construction modes. - Ensured creation from both dict and ORM contexts, mirroring the approach used for other models.
1 parent af7cddb commit 0681df5

File tree

3 files changed

+65
-39
lines changed

3 files changed

+65
-39
lines changed

src/mavedb/view_models/score_calibration.py

Lines changed: 24 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -184,9 +184,9 @@ class ScoreCalibrationBase(BaseModel):
184184
notes: Optional[str] = None
185185

186186
functional_ranges: Optional[Sequence[FunctionalRangeBase]] = None
187-
threshold_sources: Optional[Sequence[PublicationIdentifierBase]] = None
188-
classification_sources: Optional[Sequence[PublicationIdentifierBase]] = None
189-
method_sources: Optional[Sequence[PublicationIdentifierBase]] = None
187+
threshold_sources: Sequence[PublicationIdentifierBase]
188+
classification_sources: Sequence[PublicationIdentifierBase]
189+
method_sources: Sequence[PublicationIdentifierBase]
190190
calibration_metadata: Optional[dict] = None
191191

192192
@field_validator("functional_ranges")
@@ -278,18 +278,18 @@ class ScoreCalibrationModify(ScoreCalibrationBase):
278278
score_set_urn: Optional[str] = None
279279

280280
functional_ranges: Optional[Sequence[FunctionalRangeModify]] = None
281-
threshold_sources: Optional[Sequence[PublicationIdentifierCreate]] = None
282-
classification_sources: Optional[Sequence[PublicationIdentifierCreate]] = None
283-
method_sources: Optional[Sequence[PublicationIdentifierCreate]] = None
281+
threshold_sources: Sequence[PublicationIdentifierCreate]
282+
classification_sources: Sequence[PublicationIdentifierCreate]
283+
method_sources: Sequence[PublicationIdentifierCreate]
284284

285285

286286
class ScoreCalibrationCreate(ScoreCalibrationModify):
287287
"""Model used to create a new score calibration."""
288288

289289
functional_ranges: Optional[Sequence[FunctionalRangeCreate]] = None
290-
threshold_sources: Optional[Sequence[PublicationIdentifierCreate]] = None
291-
classification_sources: Optional[Sequence[PublicationIdentifierCreate]] = None
292-
method_sources: Optional[Sequence[PublicationIdentifierCreate]] = None
290+
threshold_sources: Sequence[PublicationIdentifierCreate]
291+
classification_sources: Sequence[PublicationIdentifierCreate]
292+
method_sources: Sequence[PublicationIdentifierCreate]
293293

294294

295295
class SavedScoreCalibration(ScoreCalibrationBase):
@@ -307,9 +307,9 @@ class SavedScoreCalibration(ScoreCalibrationBase):
307307
private: bool = True
308308

309309
functional_ranges: Optional[Sequence[SavedFunctionalRange]] = None
310-
threshold_sources: Optional[Sequence[SavedPublicationIdentifier]] = None
311-
classification_sources: Optional[Sequence[SavedPublicationIdentifier]] = None
312-
method_sources: Optional[Sequence[SavedPublicationIdentifier]] = None
310+
threshold_sources: Sequence[SavedPublicationIdentifier]
311+
classification_sources: Sequence[SavedPublicationIdentifier]
312+
method_sources: Sequence[SavedPublicationIdentifier]
313313

314314
created_by: Optional[SavedUser] = None
315315
modified_by: Optional[SavedUser] = None
@@ -327,9 +327,6 @@ class Config:
327327
@field_validator("threshold_sources", "classification_sources", "method_sources", mode="before")
328328
def publication_identifiers_validator(cls, value: Any) -> Optional[list[PublicationIdentifier]]:
329329
"""Coerce association proxy collections to plain lists."""
330-
if value is None:
331-
return None
332-
333330
assert isinstance(value, Collection), "Publication identifier lists must be a collection"
334331
return list(value)
335332

@@ -354,29 +351,23 @@ def primary_calibrations_may_not_be_private(self: "SavedScoreCalibration") -> "S
354351

355352
return self
356353

354+
# These 'synthetic' fields are generated from other model properties. Transform data from other properties as needed, setting
355+
# the appropriate field on the model itself. Then, proceed with Pydantic ingestion once fields are created. Only perform these
356+
# transformations if the relevant attributes are present on the input data (i.e., when creating from an ORM object).
357357
@model_validator(mode="before")
358358
def generate_threshold_classification_and_method_sources(cls, data: Any): # type: ignore[override]
359359
"""Populate threshold/classification/method source fields from association objects if missing."""
360-
association_keys = {
361-
"threshold_sources",
362-
"thresholdSources",
363-
"classification_sources",
364-
"classificationSources",
365-
"method_sources",
366-
"methodSources",
367-
}
368-
369-
if not any(hasattr(data, key) for key in association_keys):
360+
if hasattr(data, "publication_identifier_associations"):
370361
try:
371362
publication_identifiers = transform_score_calibration_publication_identifiers(
372363
data.publication_identifier_associations
373364
)
374365
data.__setattr__("threshold_sources", publication_identifiers["threshold_sources"])
375366
data.__setattr__("classification_sources", publication_identifiers["classification_sources"])
376367
data.__setattr__("method_sources", publication_identifiers["method_sources"])
377-
except AttributeError as exc:
368+
except (AttributeError, KeyError) as exc:
378369
raise ValidationError(
379-
f"Unable to create {cls.__name__} without attribute: {exc}." # type: ignore
370+
f"Unable to coerce publication associations for {cls.__name__}: {exc}." # type: ignore
380371
)
381372
return data
382373

@@ -385,9 +376,9 @@ class ScoreCalibration(SavedScoreCalibration):
385376
"""Complete score calibration model returned by the API."""
386377

387378
functional_ranges: Optional[Sequence[FunctionalRange]] = None
388-
threshold_sources: Optional[Sequence[PublicationIdentifier]] = None
389-
classification_sources: Optional[Sequence[PublicationIdentifier]] = None
390-
method_sources: Optional[Sequence[PublicationIdentifier]] = None
379+
threshold_sources: Sequence[PublicationIdentifier]
380+
classification_sources: Sequence[PublicationIdentifier]
381+
method_sources: Sequence[PublicationIdentifier]
391382
created_by: Optional[User] = None
392383
modified_by: Optional[User] = None
393384

@@ -399,11 +390,11 @@ class ScoreCalibrationWithScoreSetUrn(SavedScoreCalibration):
399390

400391
@model_validator(mode="before")
401392
def generate_score_set_urn(cls, data: Any):
402-
if not hasattr(data, "score_set_urn"):
393+
if hasattr(data, "score_set"):
403394
try:
404395
data.__setattr__("score_set_urn", transform_score_set_to_urn(data.score_set))
405-
except AttributeError as exc:
396+
except (AttributeError, KeyError) as exc:
406397
raise ValidationError(
407-
f"Unable to create {cls.__name__} without attribute: {exc}." # type: ignore
398+
f"Unable to coerce score set urn for {cls.__name__}: {exc}." # type: ignore
408399
)
409400
return data

tests/helpers/constants.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1561,8 +1561,8 @@
15611561
TEST_FUNCTIONAL_RANGE_ABNORMAL,
15621562
],
15631563
"threshold_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}],
1564-
"classification_sources": None,
1565-
"method_sources": None,
1564+
"classification_sources": [],
1565+
"method_sources": [],
15661566
"calibration_metadata": {},
15671567
}
15681568

@@ -1578,8 +1578,8 @@
15781578
TEST_SAVED_FUNCTIONAL_RANGE_ABNORMAL,
15791579
],
15801580
"thresholdSources": [SAVED_PUBMED_PUBLICATION],
1581-
"classificationSources": None,
1582-
"methodSources": None,
1581+
"classificationSources": [],
1582+
"methodSources": [],
15831583
"id": 2,
15841584
"investigatorProvided": True,
15851585
"primary": False,

tests/view_models/test_score_calibration.py

Lines changed: 37 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -392,13 +392,20 @@ def test_can_create_valid_score_calibration_from_attributed_object(valid_calibra
392392

393393
def test_cannot_create_score_calibration_when_publication_information_is_missing():
394394
invalid_data = deepcopy(TEST_SAVED_BRNICH_SCORE_CALIBRATION)
395+
395396
# Add publication identifiers with missing information
396397
invalid_data.pop("thresholdSources", None)
397398
invalid_data.pop("classificationSources", None)
398399
invalid_data.pop("methodSources", None)
399-
with pytest.raises(ValidationError, match="Unable to create ScoreCalibration without attribute"):
400+
401+
with pytest.raises(ValidationError) as exc_info:
400402
ScoreCalibration.model_validate(dummy_attributed_object_from_dict(invalid_data))
401403

404+
assert "Field required" in str(exc_info.value)
405+
assert "thresholdSources" in str(exc_info.value)
406+
assert "classificationSources" in str(exc_info.value)
407+
assert "methodSources" in str(exc_info.value)
408+
402409

403410
def test_can_create_score_calibration_from_association_style_publication_identifiers_against_attributed_object():
404411
orig_data = TEST_SAVED_BRNICH_SCORE_CALIBRATION
@@ -480,6 +487,24 @@ def test_primary_score_calibration_cannot_be_private():
480487
ScoreCalibration.model_validate(dummy_attributed_object_from_dict(invalid_data))
481488

482489

490+
def test_can_create_score_calibration_from_non_orm_context():
491+
data = deepcopy(TEST_SAVED_BRNICH_SCORE_CALIBRATION)
492+
493+
sc = ScoreCalibration.model_validate(data)
494+
495+
assert sc.title == data["title"]
496+
assert sc.research_use_only == data.get("researchUseOnly", False)
497+
assert sc.primary == data.get("primary", False)
498+
assert sc.investigator_provided == data.get("investigatorProvided", False)
499+
assert sc.baseline_score == data.get("baselineScore")
500+
assert sc.baseline_score_description == data.get("baselineScoreDescription")
501+
assert len(sc.functional_ranges) == len(data["functionalRanges"])
502+
assert len(sc.threshold_sources) == len(data["thresholdSources"])
503+
assert len(sc.classification_sources) == len(data["classificationSources"])
504+
assert len(sc.method_sources) == len(data["methodSources"])
505+
assert sc.calibration_metadata == data.get("calibrationMetadata")
506+
507+
483508
def test_score_calibration_with_score_set_urn_can_be_created_from_attributed_object():
484509
data = deepcopy(TEST_SAVED_BRNICH_SCORE_CALIBRATION)
485510
data["score_set"] = dummy_attributed_object_from_dict({"urn": "urn:mavedb:00000000-0000-0000-0000-000000000001"})
@@ -493,5 +518,15 @@ def test_score_calibration_with_score_set_urn_can_be_created_from_attributed_obj
493518
def test_score_calibration_with_score_set_urn_cannot_be_created_without_score_set_urn():
494519
invalid_data = deepcopy(TEST_SAVED_BRNICH_SCORE_CALIBRATION)
495520
invalid_data["score_set"] = dummy_attributed_object_from_dict({})
496-
with pytest.raises(ValidationError, match="Unable to create ScoreCalibrationWithScoreSetUrn without attribute"):
521+
with pytest.raises(ValidationError, match="Unable to coerce score set urn for ScoreCalibrationWithScoreSetUrn"):
497522
ScoreCalibrationWithScoreSetUrn.model_validate(dummy_attributed_object_from_dict(invalid_data))
523+
524+
525+
def test_score_calibration_with_score_set_urn_can_be_created_from_non_orm_context():
526+
data = deepcopy(TEST_SAVED_BRNICH_SCORE_CALIBRATION)
527+
data["score_set_urn"] = "urn:mavedb:00000000-0000-0000-0000-000000000001"
528+
529+
sc = ScoreCalibrationWithScoreSetUrn.model_validate(data)
530+
531+
assert sc.title == data["title"]
532+
assert sc.score_set_urn == data["score_set_urn"]

0 commit comments

Comments
 (0)