Skip to content

Commit 073a6ac

Browse files
authored
Update versions for pre-commit hooks (#41)
1 parent 0893db7 commit 073a6ac

File tree

14 files changed

+49
-49
lines changed

14 files changed

+49
-49
lines changed

.pre-commit-config.yaml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,13 @@ repos:
1616
- id: check-toml
1717

1818
- repo: https://github.com/python-poetry/poetry
19-
rev: 1.8.4
19+
rev: 2.0.1
2020
hooks:
2121
- id: poetry-check
2222
args: [--lock]
2323

2424
- repo: https://github.com/astral-sh/ruff-pre-commit
25-
rev: v0.7.2
25+
rev: v0.9.2
2626
hooks:
2727
- id: ruff
2828
args: [--fix, --exit-non-zero-on-fix]
@@ -31,13 +31,13 @@ repos:
3131
types_or: [ python, pyi, jupyter ]
3232

3333
- repo: https://github.com/crate-ci/typos
34-
rev: v1.27.0
34+
rev: v1.29.4
3535
hooks:
3636
- id: typos
3737
args: []
3838

3939
- repo: https://github.com/pre-commit/mirrors-mypy
40-
rev: v1.13.0
40+
rev: v1.14.1
4141
hooks:
4242
- id: mypy
4343
entry: mypy
@@ -46,7 +46,7 @@ repos:
4646
exclude: tests|projects
4747

4848
- repo: https://github.com/nbQA-dev/nbQA
49-
rev: 1.9.0
49+
rev: 1.9.1
5050
hooks:
5151
- id: nbqa-ruff
5252
args: [--fix, --exit-non-zero-on-fix]

mmlearn/cli/_instantiators.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -103,9 +103,9 @@ def instantiate_sampler(
103103
kwargs.update(distributed_sampler_kwargs)
104104

105105
sampler = hydra.utils.instantiate(cfg, **kwargs)
106-
assert isinstance(
107-
sampler, Sampler
108-
), f"Expected a `torch.utils.data.Sampler` object but got {type(sampler)}."
106+
assert isinstance(sampler, Sampler), (
107+
f"Expected a `torch.utils.data.Sampler` object but got {type(sampler)}."
108+
)
109109

110110
if sampler is None and requires_distributed_sampler:
111111
sampler = DistributedSampler(dataset, **distributed_sampler_kwargs)

mmlearn/cli/run.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -56,9 +56,9 @@ def main(cfg: MMLearnConf) -> None: # noqa: PLR0912
5656
trainer: Trainer = hydra.utils.instantiate(
5757
cfg.trainer, callbacks=callbacks, logger=loggers, _convert_="all"
5858
)
59-
assert isinstance(
60-
trainer, Trainer
61-
), "Trainer must be an instance of `lightning.pytorch.trainer.Trainer`"
59+
assert isinstance(trainer, Trainer), (
60+
"Trainer must be an instance of `lightning.pytorch.trainer.Trainer`"
61+
)
6262

6363
if rank_zero_only.rank == 0 and loggers is not None: # update wandb config
6464
for trainer_logger in loggers:
@@ -79,9 +79,9 @@ def main(cfg: MMLearnConf) -> None: # noqa: PLR0912
7979
# prepare dataloaders
8080
if cfg.job_type == JobType.train:
8181
train_dataset = instantiate_datasets(cfg.datasets.train)
82-
assert (
83-
train_dataset is not None
84-
), "Train dataset (`cfg.datasets.train`) is required for training."
82+
assert train_dataset is not None, (
83+
"Train dataset (`cfg.datasets.train`) is required for training."
84+
)
8585

8686
train_sampler = instantiate_sampler(
8787
cfg.dataloader.train.get("sampler"),
@@ -109,9 +109,9 @@ def main(cfg: MMLearnConf) -> None: # noqa: PLR0912
109109
)
110110
else:
111111
test_dataset = instantiate_datasets(cfg.datasets.test)
112-
assert (
113-
test_dataset is not None
114-
), "Test dataset (`cfg.datasets.test`) is required for evaluation."
112+
assert test_dataset is not None, (
113+
"Test dataset (`cfg.datasets.test`) is required for evaluation."
114+
)
115115

116116
test_sampler = instantiate_sampler(
117117
cfg.dataloader.test.get("sampler"),

mmlearn/datasets/chexpert.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -66,12 +66,12 @@ def __init__(
6666
transform: Optional[Callable[[Image.Image], torch.Tensor]] = None,
6767
) -> None:
6868
assert split in ["train", "valid"], f"split {split} is not available."
69-
assert (
70-
labeler in ["chexpert", "chexbert", "vchexbert"] or labeler is None
71-
), f"labeler {labeler} is not available."
72-
assert (
73-
callable(transform) or transform is None
74-
), "transform is not callable or None."
69+
assert labeler in ["chexpert", "chexbert", "vchexbert"] or labeler is None, (
70+
f"labeler {labeler} is not available."
71+
)
72+
assert callable(transform) or transform is None, (
73+
"transform is not callable or None."
74+
)
7575

7676
if split == "valid":
7777
data_file = f"{split}_data.json"

mmlearn/datasets/core/data_collator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ def _merge_examples(examples: list[Example]) -> dict[str, Any]:
103103
else:
104104
merged_examples[key] = [example[key]]
105105

106-
for key in merged_examples:
106+
for key in merged_examples: # noqa: PLC0206
107107
if isinstance(merged_examples[key][0], Example):
108108
merged_examples[key] = _merge_examples(merged_examples[key])
109109

mmlearn/datasets/librispeech.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -107,9 +107,9 @@ def __len__(self) -> int:
107107
def __getitem__(self, idx: int) -> Example:
108108
"""Return an example from the dataset."""
109109
waveform, sample_rate, transcript, _, _, _ = self.dataset[idx]
110-
assert (
111-
sample_rate == SAMPLE_RATE
112-
), f"Expected sample rate to be `16000`, got {sample_rate}."
110+
assert sample_rate == SAMPLE_RATE, (
111+
f"Expected sample rate to be `16000`, got {sample_rate}."
112+
)
113113
waveform = pad_or_trim(waveform.flatten())
114114

115115
return Example(

mmlearn/datasets/nihcxr.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,9 @@ def __init__(
6262
transform: Optional[Callable[[Image.Image], torch.Tensor]] = None,
6363
) -> None:
6464
assert split in ["train", "test", "bbox"], f"split {split} is not available."
65-
assert (
66-
callable(transform) or transform is None
67-
), "transform is not callable or None."
65+
assert callable(transform) or transform is None, (
66+
"transform is not callable or None."
67+
)
6868

6969
data_path = os.path.join(root_dir, split + "_data.json")
7070

mmlearn/modules/encoders/vision.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -512,9 +512,9 @@ def forward(
512512
masks: Union[torch.Tensor, list[torch.Tensor]],
513513
) -> torch.Tensor:
514514
"""Forward pass through the Vision Transformer Predictor."""
515-
assert (masks is not None) and (
516-
masks_x is not None
517-
), "Cannot run predictor without mask indices"
515+
assert (masks is not None) and (masks_x is not None), (
516+
"Cannot run predictor without mask indices"
517+
)
518518

519519
if not isinstance(masks_x, list):
520520
masks_x = [masks_x]

mmlearn/tasks/contrastive_pretraining.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -277,9 +277,9 @@ def __init__( # noqa: PLR0912, PLR0915
277277
Modalities.get_modality(modality_key)
278278
for modality_key in modality_encoder_mapping
279279
]
280-
assert (
281-
len(self._available_modalities) >= 2
282-
), "Expected at least two modalities to be available. "
280+
assert len(self._available_modalities) >= 2, (
281+
"Expected at least two modalities to be available. "
282+
)
283283

284284
#: A :py:class:`~torch.nn.ModuleDict`, where the keys are the names of the
285285
#: modalities and the values are the encoder modules.

projects/med_benchmarking/datasets/mimiciv_cxr.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -160,9 +160,9 @@ def __getitem__(self, idx: int) -> Example:
160160
)
161161
if tokens is not None:
162162
if isinstance(tokens, dict): # output of HFTokenizer
163-
assert (
164-
Modalities.TEXT.name in tokens
165-
), f"Missing key `{Modalities.TEXT.name}` in tokens."
163+
assert Modalities.TEXT.name in tokens, (
164+
f"Missing key `{Modalities.TEXT.name}` in tokens."
165+
)
166166
example.update(tokens)
167167
else:
168168
example[Modalities.TEXT.name] = tokens

0 commit comments

Comments
 (0)