Skip to content

Commit 00602eb

Browse files
committed
Adds various small patches to new score calibration model
- Removed the `investigator_provided` field from `ScoreCalibrationBase` and adjusted related validation logic. - Updated `SavedScoreCalibration` to include `investigator_provided` as a boolean field. - Modified validation error messages to include specific locations in the request body. - Changed references from `InvestigatorProvidedScoreCalibrationCreate` to `ScoreCalibrationCreate` in `ScoreSetCreate`. - Updated test constants and removed unnecessary test cases related to `investigator_provided`. - Enhanced tests for creating score calibrations to ensure correct behavior for investigator-provided settings. - Adjusted tests to reflect changes in the model structure and validation logic.
1 parent dff64d8 commit 00602eb

File tree

14 files changed

+516
-122
lines changed

14 files changed

+516
-122
lines changed

src/mavedb/lib/permissions.py

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ class Action(Enum):
2828
ADD_ROLE = "add_role"
2929
PUBLISH = "publish"
3030
ADD_BADGE = "add_badge"
31+
CHANGE_RANK = "change_rank"
3132

3233

3334
class PermissionResponse:
@@ -409,11 +410,12 @@ def has_permission(user_data: Optional[UserData], item: Base, action: Action) ->
409410
else:
410411
return PermissionResponse(False, 403, f"insufficient permissions for URN '{item.urn}'")
411412
elif action == Action.UPDATE:
412-
if user_may_edit:
413-
return PermissionResponse(True)
414-
# Roles which may perform this operation.
415-
elif roles_permitted(active_roles, [UserRole.admin]):
413+
if roles_permitted(active_roles, [UserRole.admin]):
416414
return PermissionResponse(True)
415+
# TODO#549: Allow editing of certain fields even if published. For now,
416+
# Owner may only edit if a calibration is not published.
417+
elif user_may_edit:
418+
return PermissionResponse(not published, 403, f"insufficient permissions for URN '{item.urn}'")
417419
elif private:
418420
# Do not acknowledge the existence of a private entity.
419421
return PermissionResponse(False, 404, f"score calibration with URN '{item.urn}' not found")
@@ -422,12 +424,12 @@ def has_permission(user_data: Optional[UserData], item: Base, action: Action) ->
422424
else:
423425
return PermissionResponse(False, 403, f"insufficient permissions for URN '{item.urn}'")
424426
elif action == Action.DELETE:
425-
# Owner may only delete a calibration if it has not already been published.
426-
if user_may_edit:
427-
return PermissionResponse(not published, 403, f"insufficient permissions for URN '{item.urn}'")
428427
# Roles which may perform this operation.
429-
elif roles_permitted(active_roles, [UserRole.admin]):
428+
if roles_permitted(active_roles, [UserRole.admin]):
430429
return PermissionResponse(True)
430+
# Owner may only delete a calibration if it has not already been published.
431+
elif user_may_edit:
432+
return PermissionResponse(not published, 403, f"insufficient permissions for URN '{item.urn}'")
431433
elif private:
432434
# Do not acknowledge the existence of a private entity.
433435
return PermissionResponse(False, 404, f"score calibration with URN '{item.urn}' not found")
@@ -444,6 +446,13 @@ def has_permission(user_data: Optional[UserData], item: Base, action: Action) ->
444446
return PermissionResponse(False, 404, f"score calibration with URN '{item.urn}' not found")
445447
else:
446448
return PermissionResponse(False)
449+
elif action == Action.CHANGE_RANK:
450+
if user_may_edit:
451+
return PermissionResponse(True)
452+
elif roles_permitted(active_roles, [UserRole.admin]):
453+
return PermissionResponse(True)
454+
else:
455+
return PermissionResponse(False, 403, f"insufficient permissions for URN '{item.urn}'")
447456

448457
else:
449458
raise NotImplementedError(f"has_permission(User, ScoreCalibration, {action}, Role)")

src/mavedb/lib/score_calibrations.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,14 @@ async def create_score_calibration_in_score_set(
145145
calibration = await _create_score_calibration(db, calibration_create, user)
146146
calibration.score_set = containing_score_set
147147

148+
if user.username in [contributor.orcid_id for contributor in containing_score_set.contributors] + [
149+
containing_score_set.created_by.username,
150+
containing_score_set.modified_by.username,
151+
]:
152+
calibration.investigator_provided = True
153+
else:
154+
calibration.investigator_provided = False
155+
148156
db.add(calibration)
149157
return calibration
150158

src/mavedb/routers/score_calibrations.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,7 @@ async def promote_score_calibration_to_primary_route(
264264
logger.debug("The requested score calibration does not exist", extra=logging_context())
265265
raise HTTPException(status_code=404, detail="The requested score calibration does not exist")
266266

267-
assert_permission(user_data, item, Action.UPDATE)
267+
assert_permission(user_data, item, Action.CHANGE_RANK)
268268

269269
if item.primary:
270270
logger.debug("The requested score calibration is already primary", extra=logging_context())
@@ -293,7 +293,7 @@ async def promote_score_calibration_to_primary_route(
293293
detail="A primary score calibration already exists for this score set. Demote it first or pass demoteExistingPrimary=True.",
294294
)
295295
elif existing_primary_calibration and demote_existing_primary:
296-
assert_permission(user_data, existing_primary_calibration, Action.UPDATE)
296+
assert_permission(user_data, existing_primary_calibration, Action.CHANGE_RANK)
297297

298298
promoted_calibration = promote_score_calibration_to_primary(db, item, user_data.user, demote_existing_primary)
299299
db.commit()
@@ -323,7 +323,7 @@ def demote_score_calibration_from_primary_route(
323323
logger.debug("The requested score calibration does not exist", extra=logging_context())
324324
raise HTTPException(status_code=404, detail="The requested score calibration does not exist")
325325

326-
assert_permission(user_data, item, Action.UPDATE)
326+
assert_permission(user_data, item, Action.CHANGE_RANK)
327327

328328
if not item.primary:
329329
logger.debug("The requested score calibration is not primary", extra=logging_context())

src/mavedb/routers/score_sets.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -811,6 +811,7 @@ async def create_score_set(
811811
if item_create.score_calibrations:
812812
for calibration_create in item_create.score_calibrations:
813813
created_calibration_item = await create_score_calibration(db, calibration_create, user_data.user)
814+
created_calibration_item.investigator_provided = True # necessarily true on score set creation
814815
score_calibrations.append(created_calibration_item)
815816

816817
targets: list[TargetGene] = []

src/mavedb/view_models/score_calibration.py

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,6 @@ class ScoreCalibrationBase(BaseModel):
178178
"""
179179

180180
title: str
181-
investigator_provided: bool
182181
research_use_only: bool = False
183182

184183
baseline_score: Optional[float] = None
@@ -225,7 +224,7 @@ def test_overlap(range_test: FunctionalRangeBase, range_check: FunctionalRangeBa
225224
if test_overlap(a, b):
226225
raise ValidationError(
227226
f"Score ranges may not overlap; `{a.label}` ({a.range}) overlaps with `{b.label}` ({b.range}).",
228-
custom_loc=["body", "scoreCalibration", "functionalRanges", i, "range"],
227+
custom_loc=["body", i, "range"],
229228
)
230229
return field_value
231230

@@ -236,16 +235,17 @@ def functional_range_labels_must_be_unique(self: "ScoreCalibrationBase") -> "Sco
236235
return self
237236

238237
seen, dupes = set(), set()
239-
for fr in self.functional_ranges:
238+
for i, fr in enumerate(self.functional_ranges):
240239
fr.label = fr.label.strip()
241240
if fr.label in seen:
242-
dupes.add(fr.label)
241+
dupes.add((fr.label, i))
243242
else:
244243
seen.add(fr.label)
245244

246245
if dupes:
247246
raise ValidationError(
248-
f"Detected repeated label(s): {', '.join(dupes)}. Functional range labels must be unique."
247+
f"Detected repeated label(s): {', '.join(label for label, _ in dupes)}. Functional range labels must be unique.",
248+
custom_loc=["body", "functionalRanges", dupes.pop()[1], "label"],
249249
)
250250

251251
return self
@@ -261,8 +261,9 @@ def validate_baseline_score(self: "ScoreCalibrationBase") -> "ScoreCalibrationBa
261261

262262
for fr in self.functional_ranges:
263263
if fr.is_contained_by_range(self.baseline_score) and fr.classification != "normal":
264-
raise ValueError(
265-
f"The provided baseline score of {self.baseline_score} falls within a non-normal range ({fr.label}). Baseline scores may not fall within non-normal ranges."
264+
raise ValidationError(
265+
f"The provided baseline score of {self.baseline_score} falls within a non-normal range ({fr.label}). Baseline scores may not fall within non-normal ranges.",
266+
custom_loc=["body", "baselineScore"],
266267
)
267268

268269
return self
@@ -288,12 +289,6 @@ class ScoreCalibrationCreate(ScoreCalibrationModify):
288289
method_sources: Optional[Sequence[PublicationIdentifierCreate]] = None
289290

290291

291-
class InvestigatorProvidedScoreCalibrationCreate(ScoreCalibrationCreate):
292-
"""Model used to create a new investigator-provided score calibration. Enforces fixed field values for certain properties."""
293-
294-
investigator_provided: Literal[True] = True
295-
296-
297292
class SavedScoreCalibration(ScoreCalibrationBase):
298293
"""Persisted score calibration model (includes identifiers and source lists)."""
299294

@@ -304,6 +299,7 @@ class SavedScoreCalibration(ScoreCalibrationBase):
304299

305300
score_set_id: int
306301

302+
investigator_provided: bool
307303
primary: bool = False
308304
private: bool = True
309305

@@ -338,15 +334,20 @@ def publication_identifiers_validator(cls, value: Any) -> Optional[list[Publicat
338334
def primary_calibrations_may_not_be_research_use_only(self: "SavedScoreCalibration") -> "SavedScoreCalibration":
339335
"""Primary calibrations may not be marked as research use only."""
340336
if self.primary and self.research_use_only:
341-
raise ValidationError("Primary score calibrations may not be marked as research use only.")
337+
raise ValidationError(
338+
"Primary score calibrations may not be marked as research use only.",
339+
custom_loc=["body", "researchUseOnly"],
340+
)
342341

343342
return self
344343

345344
@model_validator(mode="after")
346345
def primary_calibrations_may_not_be_private(self: "SavedScoreCalibration") -> "SavedScoreCalibration":
347346
"""Primary calibrations may not be marked as private."""
348347
if self.primary and self.private:
349-
raise ValidationError("Primary score calibrations may not be marked as private.")
348+
raise ValidationError(
349+
"Primary score calibrations may not be marked as private.", custom_loc=["body", "private"]
350+
)
350351

351352
return self
352353

src/mavedb/view_models/score_set.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
)
3434
from mavedb.view_models.score_calibration import (
3535
SavedScoreCalibration,
36-
InvestigatorProvidedScoreCalibrationCreate,
36+
ScoreCalibrationCreate,
3737
ScoreCalibration,
3838
)
3939
from mavedb.view_models.target_gene import (
@@ -178,7 +178,7 @@ class ScoreSetCreate(ScoreSetModify):
178178
# If this propertie ever became available during calibration creation,
179179
# validation criteria which enforces constraints on there being a single primary
180180
# calibration per score set would need to be added at this model level.
181-
score_calibrations: Optional[Sequence[InvestigatorProvidedScoreCalibrationCreate]] = None
181+
score_calibrations: Optional[Sequence[ScoreCalibrationCreate]] = None
182182

183183
@field_validator("superseded_score_set_urn")
184184
def validate_superseded_score_set_urn(cls, v: Optional[str]) -> Optional[str]:

tests/helpers/constants.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1502,7 +1502,6 @@
15021502
TEST_BRNICH_SCORE_CALIBRATION = {
15031503
"title": "Test BRNICH Score Calibration",
15041504
"research_use_only": False,
1505-
"investigator_provided": False,
15061505
"baseline_score": TEST_BASELINE_SCORE,
15071506
"baseline_score_description": "Test baseline score description",
15081507
"functional_ranges": [
@@ -1536,6 +1535,7 @@
15361535
"methodSources": [SAVED_PUBMED_PUBLICATION],
15371536
"id": 1,
15381537
"urn": VALID_CALIBRATION_URN,
1538+
"investigatorProvided": True,
15391539
"primary": True,
15401540
"private": False,
15411541
"scoreSetId": 1,
@@ -1555,20 +1555,9 @@
15551555
"modificationDate": date.today().isoformat(),
15561556
}
15571557

1558-
TEST_INVESTIGATOR_PROVIDED_SCORE_CALIBRATION = {
1559-
**TEST_BRNICH_SCORE_CALIBRATION,
1560-
"investigator_provided": True,
1561-
}
1562-
1563-
TEST_SAVED_INVESTIGATOR_PROVIDED_SCORE_CALIBRATION = {
1564-
**TEST_SAVED_BRNICH_SCORE_CALIBRATION,
1565-
"investigatorProvided": True,
1566-
}
1567-
15681558
TEST_PATHOGENICITY_SCORE_CALIBRATION = {
15691559
"title": "Test Pathogenicity Score Calibration",
15701560
"research_use_only": False,
1571-
"investigator_provided": False,
15721561
"baseline_score": TEST_BASELINE_SCORE,
15731562
"baseline_score_description": "Test baseline score description",
15741563
"functional_ranges": [
@@ -1596,6 +1585,7 @@
15961585
"classificationSources": None,
15971586
"methodSources": None,
15981587
"id": 2,
1588+
"investigatorProvided": True,
15991589
"primary": False,
16001590
"private": False,
16011591
"urn": VALID_CALIBRATION_URN,

tests/lib/conftest.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,13 +64,18 @@ def setup_lib_db_with_score_set(session, setup_lib_db):
6464
"""
6565
Sets up the lib test db with a user, reference, license, and a score set.
6666
"""
67+
user = session.query(User).filter(User.username == TEST_USER["username"]).first()
6768

6869
experiment_set = ExperimentSet(**TEST_EXPERIMENT_SET, urn=VALID_EXPERIMENT_SET_URN)
70+
experiment_set.created_by = user
71+
experiment_set.modified_by = user
6972
session.add(experiment_set)
7073
session.commit()
7174
session.refresh(experiment_set)
7275

7376
experiment = Experiment(**TEST_EXPERIMENT, urn=VALID_EXPERIMENT_URN, experiment_set_id=experiment_set.id)
77+
experiment.created_by = user
78+
experiment.modified_by = user
7479
session.add(experiment)
7580
session.commit()
7681
session.refresh(experiment)
@@ -80,7 +85,8 @@ def setup_lib_db_with_score_set(session, setup_lib_db):
8085
score_set = ScoreSet(
8186
**score_set_scaffold, urn=VALID_SCORE_SET_URN, experiment_id=experiment.id, licence_id=TEST_LICENSE["id"]
8287
)
83-
88+
score_set.created_by = user
89+
score_set.modified_by = user
8490
session.add(score_set)
8591
session.commit()
8692
session.refresh(score_set)

tests/lib/test_score_calibrations.py

Lines changed: 80 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,9 @@
3434
TEST_PUBMED_IDENTIFIER,
3535
TEST_SEQ_SCORESET,
3636
VALID_SCORE_SET_URN,
37+
EXTRA_USER,
3738
)
39+
from tests.helpers.util.contributor import add_contributor
3840
from tests.helpers.util.score_calibration import create_test_score_calibration_in_score_set
3941

4042
################################################################################
@@ -88,6 +90,82 @@ async def test_create_score_calibration_in_score_set_creates_score_calibration_w
8890
assert calibration.score_set == setup_lib_db_with_score_set
8991

9092

93+
@pytest.mark.asyncio
94+
async def test_create_score_calibration_in_score_set_investigator_provided_set_when_creator_is_owner(
95+
setup_lib_db_with_score_set, session, mock_user
96+
):
97+
test_user = session.execute(select(User)).scalars().first()
98+
99+
MockCalibrationCreate = create_model(
100+
"MockCalibrationCreate",
101+
score_set_urn=(str | None, setup_lib_db_with_score_set.urn),
102+
threshold_sources=(list, []),
103+
classification_sources=(list, []),
104+
method_sources=(list, []),
105+
)
106+
107+
calibration = await create_score_calibration_in_score_set(session, MockCalibrationCreate(), test_user)
108+
assert calibration is not None
109+
assert calibration.score_set == setup_lib_db_with_score_set
110+
assert calibration.created_by == test_user
111+
assert calibration.modified_by == test_user
112+
assert calibration.investigator_provided is True
113+
114+
115+
@pytest.mark.asyncio
116+
async def test_create_score_calibration_in_score_set_investigator_provided_set_when_creator_is_contributor(
117+
setup_lib_db_with_score_set, session
118+
):
119+
extra_user = session.execute(select(User).where(User.username == EXTRA_USER["username"])).scalars().first()
120+
121+
add_contributor(
122+
session,
123+
setup_lib_db_with_score_set.urn,
124+
ScoreSet,
125+
EXTRA_USER["username"],
126+
EXTRA_USER["first_name"],
127+
EXTRA_USER["last_name"],
128+
)
129+
130+
MockCalibrationCreate = create_model(
131+
"MockCalibrationCreate",
132+
score_set_urn=(str | None, setup_lib_db_with_score_set.urn),
133+
threshold_sources=(list, []),
134+
classification_sources=(list, []),
135+
method_sources=(list, []),
136+
)
137+
138+
calibration = await create_score_calibration_in_score_set(session, MockCalibrationCreate(), extra_user)
139+
assert calibration is not None
140+
assert calibration.score_set == setup_lib_db_with_score_set
141+
assert calibration.created_by == extra_user
142+
assert calibration.modified_by == extra_user
143+
assert calibration.investigator_provided is True
144+
145+
146+
@pytest.mark.asyncio
147+
async def test_create_score_calibration_in_score_set_investigator_provided_not_set_when_creator_not_owner(
148+
setup_lib_db_with_score_set, session
149+
):
150+
MockCalibrationCreate = create_model(
151+
"MockCalibrationCreate",
152+
score_set_urn=(str | None, setup_lib_db_with_score_set.urn),
153+
threshold_sources=(list, []),
154+
classification_sources=(list, []),
155+
method_sources=(list, []),
156+
)
157+
158+
# invoke from a different user context
159+
extra_user = session.execute(select(User).where(User.username == EXTRA_USER["username"])).scalars().first()
160+
161+
calibration = await create_score_calibration_in_score_set(session, MockCalibrationCreate(), extra_user)
162+
assert calibration is not None
163+
assert calibration.score_set == setup_lib_db_with_score_set
164+
assert calibration.created_by == extra_user
165+
assert calibration.modified_by == extra_user
166+
assert calibration.investigator_provided is False
167+
168+
91169
### create_score_calibration
92170

93171

@@ -605,7 +683,8 @@ async def test_modify_score_calibration_new_score_set(setup_lib_db_with_score_se
605683
experiment_id=existing_experiment.id,
606684
licence_id=TEST_LICENSE["id"],
607685
)
608-
686+
new_containing_score_set.created_by = setup_lib_db_with_score_set.created_by
687+
new_containing_score_set.modified_by = setup_lib_db_with_score_set.modified_by
609688
session.add(new_containing_score_set)
610689
session.commit()
611690
session.refresh(new_containing_score_set)

0 commit comments

Comments
 (0)