Skip to content

Commit 35d3f17

Browse files
authored
Merge branch 'master' into master
2 parents f6b3468 + d195d2b commit 35d3f17

File tree

25 files changed

+262
-31
lines changed

25 files changed

+262
-31
lines changed

.github/workflows/docker-build.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ jobs:
9797
# adding dome more images as Thunder mainly using python 3.10,
9898
# and we need to support integrations as for example LitGPT
9999
python_version: ["3.10"]
100-
pytorch_version: ["2.6.0", "2.7.0"]
100+
pytorch_version: ["2.6.0", "2.7.1"]
101101
cuda_version: ["12.6.3"]
102102
include:
103103
# These are the base images for PL release docker images.
@@ -108,7 +108,7 @@ jobs:
108108
- { python_version: "3.11", pytorch_version: "2.4.1", cuda_version: "12.1.1" }
109109
- { python_version: "3.12", pytorch_version: "2.5.1", cuda_version: "12.1.1" }
110110
- { python_version: "3.12", pytorch_version: "2.6.0", cuda_version: "12.4.1" }
111-
- { python_version: "3.12", pytorch_version: "2.7.0", cuda_version: "12.6.3" }
111+
- { python_version: "3.12", pytorch_version: "2.7.1", cuda_version: "12.6.3" }
112112
steps:
113113
- uses: actions/checkout@v4
114114
- uses: docker/setup-buildx-action@v3

docs/source-pytorch/conf.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -487,6 +487,7 @@ def _load_py_module(name: str, location: str) -> ModuleType:
487487
("py:meth", "setup"),
488488
("py:meth", "test_step"),
489489
("py:meth", "toggle_optimizer"),
490+
("py:meth", "toggled_optimizer"),
490491
("py:class", "torch.ScriptModule"),
491492
("py:class", "torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload"),
492493
("py:class", "torch.distributed.fsdp.fully_sharded_data_parallel.MixedPrecision"),

docs/source-pytorch/model/manual_optimization.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ To manually optimize, do the following:
1717
* ``optimizer.zero_grad()`` to clear the gradients from the previous training step
1818
* ``self.manual_backward(loss)`` instead of ``loss.backward()``
1919
* ``optimizer.step()`` to update your model parameters
20-
* ``self.toggle_optimizer()`` and ``self.untoggle_optimizer()`` if needed
20+
* ``self.toggle_optimizer()`` and ``self.untoggle_optimizer()``, or ``self.toggled_optimizer()`` if needed
2121

2222
Here is a minimal example of manual optimization.
2323

requirements/ci.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
setuptools <80.8.1
1+
setuptools <80.9.1
22
wheel <0.46.0
33
awscli >=1.30.0, <1.41.0
44
twine ==6.1.0

requirements/doctests.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
pytest ==8.3.5
1+
pytest ==8.4.0
22
pytest-doctestplus ==1.4.0

requirements/fabric/test.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
coverage ==7.8.0
1+
coverage ==7.8.2
22
numpy >=1.17.2, <1.27.0
3-
pytest ==8.3.5
3+
pytest ==8.4.0
44
pytest-cov ==6.1.1
55
pytest-timeout ==2.4.0
66
pytest-rerunfailures ==15.1

requirements/pytorch/test.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
coverage ==7.8.0
2-
pytest ==8.3.5
1+
coverage ==7.8.2
2+
pytest ==8.4.0
33
pytest-cov ==6.1.1
44
pytest-timeout ==2.4.0
55
pytest-rerunfailures ==15.1
@@ -12,7 +12,7 @@ numpy >=1.17.2, <1.27.0
1212
onnx >=1.12.0, <1.19.0
1313
onnxruntime >=1.12.0, <1.21.0
1414
psutil <7.0.1 # for `DeviceStatsMonitor`
15-
pandas >1.0, <2.3.0 # needed in benchmarks
15+
pandas >2.0, <2.4.0 # needed in benchmarks
1616
fastapi # for `ServableModuleValidator` # not setting version as re-defined in App
1717
uvicorn # for `ServableModuleValidator` # not setting version as re-defined in App
1818

requirements/typing.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
mypy==1.15.0
2-
torch==2.7.0
2+
torch==2.7.1
33

44
types-Markdown
55
types-PyYAML

setup.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,8 @@ def _set_manifest_path(manifest_dir: str, aggregate: bool = False, mapping: Mapp
110110
assert os.path.exists(manifest_path)
111111
# avoid error: setup script specifies an absolute path
112112
manifest_path = os.path.relpath(manifest_path, _PATH_ROOT)
113-
logging.info("Set manifest path to", manifest_path)
113+
# Use lazy logging formatting
114+
logging.info("Set manifest path to %s", manifest_path)
114115
setuptools.command.egg_info.manifest_maker.template = manifest_path
115116
yield
116117
# cleanup

src/lightning/fabric/connector.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -492,7 +492,7 @@ def _check_and_init_precision(self) -> Precision:
492492
if self._precision_input == "16-mixed"
493493
else "Using bfloat16 Automatic Mixed Precision (AMP)"
494494
)
495-
device = "cpu" if self._accelerator_flag == "cpu" else "cuda"
495+
device = self._accelerator_flag if self._accelerator_flag in ("cpu", "mps") else "cuda"
496496
return MixedPrecision(precision=self._precision_input, device=device) # type: ignore[arg-type]
497497

498498
raise RuntimeError("No precision set")

0 commit comments

Comments
 (0)