Skip to content

Commit 6421dd8

Browse files
authored
precommit: drop Black in favor of Ruff (#19380)
1 parent 01f8531 commit 6421dd8

File tree

4 files changed

+12
-20
lines changed

4 files changed

+12
-20
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -84,25 +84,11 @@ repos:
8484
- flake8-return
8585

8686
- repo: https://github.com/astral-sh/ruff-pre-commit
87-
rev: "v0.1.9"
87+
rev: "v0.1.15"
8888
hooks:
8989
- id: ruff
9090
args: ["--fix", "--preview"]
9191

92-
- repo: https://github.com/psf/black
93-
rev: 23.12.1
94-
hooks:
95-
- id: black
96-
name: Format code
97-
exclude: docs/source-app
98-
99-
- repo: https://github.com/asottile/blacken-docs
100-
rev: 1.16.0
101-
hooks:
102-
- id: blacken-docs
103-
args: ["--line-length=120"]
104-
exclude: docs/source-app
105-
10692
- repo: https://github.com/executablebooks/mdformat
10793
rev: 0.7.17
10894
hooks:

docs/source-pytorch/extensions/strategy.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ The below table lists all relevant strategies available in Lightning with their
7777
- Strategy for multi-process single-device training on one or multiple nodes. :ref:`Learn more. <accelerators/gpu_intermediate:Distributed Data Parallel>`
7878
* - ddp_spawn
7979
- :class:`~lightning.pytorch.strategies.DDPStrategy`
80-
- Same as "ddp" but launches processes using :func:`torch.multiprocessing.spawn` method and joins processes after training finishes. :ref:`Learn more. <accelerators/gpu_intermediate:Distributed Data Parallel Spawn>`
80+
- Same as "ddp" but launches processes using ``torch.multiprocessing.spawn`` method and joins processes after training finishes. :ref:`Learn more. <accelerators/gpu_intermediate:Distributed Data Parallel Spawn>`
8181
* - deepspeed
8282
- :class:`~lightning.pytorch.strategies.DeepSpeedStrategy`
8383
- Provides capabilities to run training using the DeepSpeed library, with training optimizations for large billion parameter models. :doc:`Learn more. <../advanced/model_parallel/deepspeed>`

pyproject.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,8 @@ ignore-init-module-imports = true
9393
"S113", # todo: Probable use of requests call without timeout
9494
"S301", # todo: `pickle` and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue
9595
"S324", # todo: Probable use of insecure hash functions in `hashlib`
96+
"S403", # todo: `pickle`, `cPickle`, `dill`, and `shelve` modules are possibly insecure
97+
"S404", # todo: `subprocess` module is possibly insecure
9698
"S602", # todo: `subprocess` call with `shell=True` identified, security issue
9799
"S603", # todo: `subprocess` call: check for execution of untrusted input
98100
"S605", # todo: Starting a process with a shell: seems safe, but may be changed in the future; consider rewriting without `shell`
@@ -108,6 +110,8 @@ ignore-init-module-imports = true
108110
"S311", # todo: Standard pseudo-random generators are not suitable for cryptographic purposes
109111
"S108", # todo: Probable insecure usage of temporary file or directory: "/tmp/sys-customizations-sync"
110112
"S202", # Uses of `tarfile.extractall()`
113+
"S403", # `pickle`, `cPickle`, `dill`, and `shelve` modules are possibly insecure
114+
"S404", # `subprocess` module is possibly insecure
111115
"S602", # todo: `subprocess` call with `shell=True` identified, security issue
112116
"S603", # todo: `subprocess` call: check for execution of untrusted input
113117
"S605", # todo: Starting a process with a shell: seems safe, but may be changed in the future; consider rewriting without `shell`

tests/tests_pytorch/utilities/test_compile.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616

1717
import pytest
1818
import torch
19-
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_1
2019
from lightning.pytorch import LightningModule, Trainer
2120
from lightning.pytorch.demos.boring_classes import BoringModel
2221
from lightning.pytorch.utilities.compile import from_compiled, to_uncompiled
@@ -26,7 +25,8 @@
2625
from tests_pytorch.helpers.runif import RunIf
2726

2827

29-
@pytest.mark.skipif(sys.platform == "darwin" and not _TORCH_GREATER_EQUAL_2_1, reason="Fix for MacOS in PyTorch 2.1")
28+
# https://github.com/pytorch/pytorch/issues/95708
29+
@pytest.mark.skipif(sys.platform == "darwin", reason="fatal error: 'omp.h' file not found")
3030
@RunIf(dynamo=True)
3131
@mock.patch("lightning.pytorch.trainer.call._call_and_handle_interrupt")
3232
def test_trainer_compiled_model(_, tmp_path, monkeypatch, mps_count_0):
@@ -112,7 +112,8 @@ def has_dynamo(fn):
112112
assert not has_dynamo(to_uncompiled_model.predict_step)
113113

114114

115-
@pytest.mark.skipif(sys.platform == "darwin" and not _TORCH_GREATER_EQUAL_2_1, reason="Fix for MacOS in PyTorch 2.1")
115+
# https://github.com/pytorch/pytorch/issues/95708
116+
@pytest.mark.skipif(sys.platform == "darwin", reason="fatal error: 'omp.h' file not found")
116117
@RunIf(dynamo=True)
117118
def test_trainer_compiled_model_that_logs(tmp_path):
118119
class MyModel(BoringModel):
@@ -137,7 +138,8 @@ def training_step(self, batch, batch_idx):
137138
assert set(trainer.callback_metrics) == {"loss"}
138139

139140

140-
@pytest.mark.skipif(sys.platform == "darwin" and not _TORCH_GREATER_EQUAL_2_1, reason="Fix for MacOS in PyTorch 2.1")
141+
# https://github.com/pytorch/pytorch/issues/95708
142+
@pytest.mark.skipif(sys.platform == "darwin", reason="fatal error: 'omp.h' file not found")
141143
@RunIf(dynamo=True)
142144
def test_trainer_compiled_model_test(tmp_path):
143145
model = BoringModel()

0 commit comments

Comments
 (0)