Skip to content

Commit 1556a6e

Browse files
committed
Add Threshold Calibrations to Score Set Models and Tests
1 parent 0a3b5f4 commit 1556a6e

File tree

7 files changed

+200
-9
lines changed

7 files changed

+200
-9
lines changed

alembic/versions/aa73d39b3705_score_set_level_score_thresholds.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,11 @@
1919

2020
def upgrade():
2121
# ### commands auto generated by Alembic - please adjust! ###
22-
op.add_column("scoresets", sa.Column("score_thresholds", postgresql.JSONB(astext_type=sa.Text()), nullable=True))
22+
op.add_column("scoresets", sa.Column("score_calibrations", postgresql.JSONB(astext_type=sa.Text()), nullable=True))
2323
# ### end Alembic commands ###
2424

2525

2626
def downgrade():
2727
# ### commands auto generated by Alembic - please adjust! ###
28-
op.drop_column("scoresets", "score_thresholds")
28+
op.drop_column("scoresets", "score_calibrations")
2929
# ### end Alembic commands ###

src/mavedb/models/score_set.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ class ScoreSet(Base):
157157

158158
target_genes: Mapped[List["TargetGene"]] = relationship(back_populates="score_set", cascade="all, delete-orphan")
159159
score_ranges = Column(JSONB, nullable=True)
160-
score_thresholds = Column(JSONB, nullable=True)
160+
score_calibrations = Column(JSONB, nullable=True)
161161

162162
# Unfortunately, we can't use association_proxy here, because in spite of what the documentation seems to imply, it
163163
# doesn't check for a pre-existing keyword with the same text.

src/mavedb/routers/score_sets.py

Lines changed: 51 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,18 @@
99
from fastapi.encoders import jsonable_encoder
1010
from fastapi.exceptions import HTTPException
1111
from fastapi.responses import StreamingResponse
12-
from sqlalchemy import or_
13-
from sqlalchemy.exc import MultipleResultsFound
12+
from sqlalchemy import or_, select
13+
from sqlalchemy.exc import MultipleResultsFound, NoResultFound
1414
from sqlalchemy.orm import Session
1515

1616
from mavedb import deps
1717
from mavedb.lib.authentication import UserData
18-
from mavedb.lib.authorization import get_current_user, require_current_user, require_current_user_with_email
18+
from mavedb.lib.authorization import (
19+
get_current_user,
20+
require_current_user,
21+
require_current_user_with_email,
22+
RoleRequirer,
23+
)
1924
from mavedb.lib.contributors import find_or_create_contributor
2025
from mavedb.lib.exceptions import MixedTargetError, NonexistentOrcidUserError, ValidationError
2126
from mavedb.lib.identifiers import (
@@ -49,6 +54,7 @@
4954
)
5055
from mavedb.models.contributor import Contributor
5156
from mavedb.models.enums.processing_state import ProcessingState
57+
from mavedb.models.enums.user_role import UserRole
5258
from mavedb.models.experiment import Experiment
5359
from mavedb.models.license import License
5460
from mavedb.models.mapped_variant import MappedVariant
@@ -57,7 +63,7 @@
5763
from mavedb.models.target_gene import TargetGene
5864
from mavedb.models.target_sequence import TargetSequence
5965
from mavedb.models.variant import Variant
60-
from mavedb.view_models import mapped_variant, score_set
66+
from mavedb.view_models import mapped_variant, score_set, calibration
6167
from mavedb.view_models.search import ScoreSetsSearch
6268

6369
logger = logging.getLogger(__name__)
@@ -336,8 +342,10 @@ async def create_score_set(
336342
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Unknown experiment")
337343
# Not allow add score set in meta-analysis experiments.
338344
if any(s.meta_analyzes_score_sets for s in experiment.score_sets):
339-
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
340-
detail="Score sets may not be added to a meta-analysis experiment.")
345+
raise HTTPException(
346+
status_code=status.HTTP_403_FORBIDDEN,
347+
detail="Score sets may not be added to a meta-analysis experiment.",
348+
)
341349

342350
save_to_logging_context({"experiment": experiment.urn})
343351
assert_permission(user_data, experiment, Action.ADD_SCORE_SET)
@@ -656,6 +664,43 @@ async def upload_score_set_variant_data(
656664
return item
657665

658666

667+
@router.post(
668+
"/score-sets/{urn}/calibration/data",
669+
response_model=score_set.ScoreSet,
670+
responses={422: {}},
671+
response_model_exclude_none=True,
672+
)
673+
async def update_score_set_calibration_data(
674+
*,
675+
urn: str,
676+
calibration_update: dict[str, calibration.Calibration],
677+
db: Session = Depends(deps.get_db),
678+
user_data: UserData = Depends(RoleRequirer([UserRole.admin])),
679+
):
680+
"""
681+
Update thresholds / score calibrations for a score set.
682+
"""
683+
save_to_logging_context({"requested_resource": urn, "resource_property": "score_thresholds"})
684+
685+
try:
686+
item = db.scalars(select(ScoreSet).where(ScoreSet.urn == urn)).one()
687+
except NoResultFound:
688+
logger.info(
689+
msg="Failed to add score thresholds; The requested score set does not exist.", extra=logging_context()
690+
)
691+
raise HTTPException(status_code=404, detail=f"score set with URN '{urn}' not found")
692+
693+
assert_permission(user_data, item, Action.UPDATE)
694+
695+
item.score_calibrations = {k: v.dict() for k, v in calibration_update.items()}
696+
db.add(item)
697+
db.commit()
698+
db.refresh(item)
699+
700+
save_to_logging_context({"updated_resource": item.urn})
701+
return item
702+
703+
659704
@router.put(
660705
"/score-sets/{urn}", response_model=score_set.ScoreSet, responses={422: {}}, response_model_exclude_none=True
661706
)
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
from typing import Union
2+
3+
from pydantic import root_validator
4+
5+
from mavedb.lib.validation.exceptions import ValidationError
6+
from mavedb.view_models.base.base import BaseModel
7+
8+
9+
class PillarProjectParameters(BaseModel):
10+
skew: float
11+
location: float
12+
scale: float
13+
14+
15+
class PillarProjectParameterSet(BaseModel):
16+
functionally_altering: PillarProjectParameters
17+
functionally_normal: PillarProjectParameters
18+
fraction_functionally_altering: float
19+
20+
21+
class PillarProjectCalibration(BaseModel):
22+
parameter_sets: list[PillarProjectParameterSet]
23+
evidence_strengths: list[int]
24+
thresholds: list[float]
25+
positive_likelihood_ratios: list[float]
26+
prior_probability_pathogenicity: float
27+
28+
@root_validator
29+
def validate_all_calibrations_have_a_pairwise_companion(cls, values):
30+
num_es = len(values.get("evidence_strengths"))
31+
num_st = len(values.get("thresholds"))
32+
num_plr = len(values.get("positive_likelihood_ratios"))
33+
34+
if len(set((num_es, num_st, num_plr))) != 1:
35+
raise ValidationError(
36+
"Calibration object must provide the same number of evidence strenghts, score thresholds, and positive likelihood ratios. "
37+
"One or more of these provided objects was not the same length as the others."
38+
)
39+
40+
return values
41+
42+
43+
Calibration = Union[PillarProjectCalibration]

src/mavedb/view_models/score_set.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
from mavedb.models.enums.processing_state import ProcessingState
1616
from mavedb.view_models import PublicationIdentifiersGetter, record_type_validator, set_record_type
1717
from mavedb.view_models.base.base import BaseModel, validator
18+
from mavedb.view_models.calibration import Calibration
1819
from mavedb.view_models.contributor import Contributor, ContributorCreate
1920
from mavedb.view_models.doi_identifier import (
2021
DoiIdentifier,
@@ -387,6 +388,7 @@ class SavedScoreSet(ScoreSetBase):
387388
external_links: Dict[str, ExternalLink]
388389
contributors: list[Contributor]
389390
score_ranges: Optional[ScoreRanges]
391+
score_calibrations: Optional[dict[str, Calibration]]
390392

391393
_record_type_factory = record_type_validator()(set_record_type)
392394

tests/helpers/constants.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -658,10 +658,41 @@
658658
],
659659
}
660660

661+
661662
TEST_SAVED_SCORESET_RANGE = {
662663
"wtScore": 1.0,
663664
"ranges": [
664665
{"label": "test1", "classification": "normal", "range": [0.0, 2.0]},
665666
{"label": "test2", "classification": "abnormal", "range": [-2.0, 0.0]},
666667
],
667668
}
669+
670+
671+
TEST_SCORE_CALIBRATION = {
672+
"parameter_sets": [
673+
{
674+
"functionally_altering": {"skew": 1.15, "location": -2.20, "scale": 1.20},
675+
"functionally_normal": {"skew": -1.5, "location": 2.25, "scale": 0.8},
676+
"fraction_functionally_altering": 0.20,
677+
},
678+
],
679+
"evidence_strengths": [3, 2, 1, -1],
680+
"thresholds": [1.25, 2.5, 3, 5.5],
681+
"positive_likelihood_ratios": [100, 10, 1, 0.1],
682+
"prior_probability_pathogenicity": 0.20,
683+
}
684+
685+
686+
TEST_SAVED_SCORE_CALIBRATION = {
687+
"parameterSets": [
688+
{
689+
"functionallyAltering": {"skew": 1.15, "location": -2.20, "scale": 1.20},
690+
"functionallyNormal": {"skew": -1.5, "location": 2.25, "scale": 0.8},
691+
"fractionFunctionallyAltering": 0.20,
692+
},
693+
],
694+
"evidenceStrengths": [3, 2, 1, -1],
695+
"thresholds": [1.25, 2.5, 3, 5.5],
696+
"positiveLikelihoodRatios": [100, 10, 1, 0.1],
697+
"priorProbabilityPathogenicity": 0.20,
698+
}

tests/routers/test_score_set.py

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,8 @@
3434
SAVED_EXTRA_CONTRIBUTOR,
3535
SAVED_PUBMED_PUBLICATION,
3636
SAVED_SHORT_EXTRA_LICENSE,
37+
TEST_SCORE_CALIBRATION,
38+
TEST_SAVED_SCORE_CALIBRATION,
3739
)
3840
from tests.helpers.dependency_overrider import DependencyOverrider
3941
from tests.helpers.util import (
@@ -1557,3 +1559,71 @@ def test_can_modify_metadata_for_score_set_with_inactive_license(session, client
15571559
assert response.status_code == 200
15581560
response_data = response.json()
15591561
assert ("title", response_data["title"]) == ("title", "Update title")
1562+
1563+
1564+
def test_anonymous_user_cannot_add_score_calibrations_to_score_set(client, setup_router_db, anonymous_app_overrides):
1565+
experiment = create_experiment(client)
1566+
score_set = create_seq_score_set(client, experiment["urn"])
1567+
calibration_payload = deepcopy(TEST_SCORE_CALIBRATION)
1568+
1569+
with DependencyOverrider(anonymous_app_overrides):
1570+
response = client.post(
1571+
f"/api/v1/score-sets/{score_set['urn']}/calibration/data", json={"test_calibrations": calibration_payload}
1572+
)
1573+
response_data = response.json()
1574+
1575+
assert response.status_code == 401
1576+
assert "score_calibrations" not in response_data
1577+
1578+
1579+
def test_user_cannot_add_score_calibrations_to_own_score_set(client, setup_router_db, anonymous_app_overrides):
1580+
experiment = create_experiment(client)
1581+
score_set = create_seq_score_set(client, experiment["urn"])
1582+
calibration_payload = deepcopy(TEST_SCORE_CALIBRATION)
1583+
1584+
response = client.post(
1585+
f"/api/v1/score-sets/{score_set['urn']}/calibration/data", json={"test_calibrations": calibration_payload}
1586+
)
1587+
response_data = response.json()
1588+
1589+
assert response.status_code == 401
1590+
assert "score_calibrations" not in response_data
1591+
1592+
1593+
def test_admin_can_add_score_calibrations_to_score_set(client, setup_router_db, admin_app_overrides):
1594+
experiment = create_experiment(client)
1595+
score_set = create_seq_score_set(client, experiment["urn"])
1596+
calibration_payload = deepcopy(TEST_SCORE_CALIBRATION)
1597+
1598+
with DependencyOverrider(admin_app_overrides):
1599+
response = client.post(
1600+
f"/api/v1/score-sets/{score_set['urn']}/calibration/data", json={"test_calibrations": calibration_payload}
1601+
)
1602+
response_data = response.json()
1603+
1604+
expected_response = update_expected_response_for_created_resources(
1605+
deepcopy(TEST_MINIMAL_SEQ_SCORESET_RESPONSE), experiment, score_set
1606+
)
1607+
expected_response["scoreCalibrations"] = {"test_calibrations": deepcopy(TEST_SAVED_SCORE_CALIBRATION)}
1608+
1609+
assert response.status_code == 200
1610+
for key in expected_response:
1611+
assert (key, expected_response[key]) == (key, response_data[key])
1612+
1613+
1614+
def test_score_set_not_found_for_non_existent_score_set_when_adding_score_calibrations(
1615+
client, setup_router_db, admin_app_overrides
1616+
):
1617+
experiment = create_experiment(client)
1618+
score_set = create_seq_score_set(client, experiment["urn"])
1619+
calibration_payload = deepcopy(TEST_SCORE_CALIBRATION)
1620+
1621+
with DependencyOverrider(admin_app_overrides):
1622+
response = client.post(
1623+
f"/api/v1/score-sets/{score_set['urn']+'xxx'}/calibration/data",
1624+
json={"test_calibrations": calibration_payload},
1625+
)
1626+
response_data = response.json()
1627+
1628+
assert response.status_code == 404
1629+
assert "score_calibrations" not in response_data

0 commit comments

Comments
 (0)