diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 51bd2056177bb..91cf94023786c 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -189,7 +189,7 @@ We welcome any useful contribution! For your convenience here's a recommended wo #### How can I help/contribute? All types of contributions are welcome - reporting bugs, fixing documentation, adding test cases, solving issues, and preparing bug fixes. -To get started with code contributions, look for issues marked with the label [good first issue](https://github.com/Lightning-AI/lightning/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) or chose something close to your domain with the label [help wanted](https://github.com/Lightning-AI/lightning/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22). Before coding, make sure that the issue description is clear and comment on the issue so that we can assign it to you (or simply self-assign if you can). +To get started with code contributions, look for issues marked with the label [good first issue](https://github.com/Lightning-AI/pytorch-lightning/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) or chose something close to your domain with the label [help wanted](https://github.com/Lightning-AI/pytorch-lightning/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22). Before coding, make sure that the issue description is clear and comment on the issue so that we can assign it to you (or simply self-assign if you can). #### Is there a recommendation for branch names? diff --git a/docs/source-fabric/_templates/theme_variables.jinja b/docs/source-fabric/_templates/theme_variables.jinja index cce7263609621..e2ebb66281716 100644 --- a/docs/source-fabric/_templates/theme_variables.jinja +++ b/docs/source-fabric/_templates/theme_variables.jinja @@ -1,6 +1,6 @@ {%- set external_urls = { 'github': 'https://github.com/Lightning-AI/lightning', - 'github_issues': 'https://github.com/Lightning-AI/lightning/issues', + 'github_issues': 'https://github.com/Lightning-AI/pytorch-lightning/issues', 'contributing': 'https://github.com/Lightning-AI/lightning/blob/master/.github/CONTRIBUTING.md', 'governance': 'https://lightning.ai/docs/pytorch/latest/community/governance.html', 'docs': 'https://lightning.ai/docs/fabric/', diff --git a/docs/source-fabric/links.rst b/docs/source-fabric/links.rst index d8e1d12b1ac6d..bd7b431e76642 100644 --- a/docs/source-fabric/links.rst +++ b/docs/source-fabric/links.rst @@ -1,3 +1,3 @@ -.. _PyTorchJob: https://www.kubeflow.org/docs/components/training/pytorch/ +.. _PyTorchJob: https://www.kubeflow.org/docs/components/trainer/legacy-v1/user-guides/pytorch/ .. _Kubeflow: https://www.kubeflow.org .. _Trainer: https://lightning.ai/docs/pytorch/stable/common/trainer.html diff --git a/docs/source-pytorch/_templates/theme_variables.jinja b/docs/source-pytorch/_templates/theme_variables.jinja index 912c1882b9138..3e6a4d19bdeed 100644 --- a/docs/source-pytorch/_templates/theme_variables.jinja +++ b/docs/source-pytorch/_templates/theme_variables.jinja @@ -1,6 +1,6 @@ {%- set external_urls = { 'github': 'https://github.com/Lightning-AI/lightning', - 'github_issues': 'https://github.com/Lightning-AI/lightning/issues', + 'github_issues': 'https://github.com/Lightning-AI/pytorch-lightning/issues', 'contributing': 'https://github.com/Lightning-AI/lightning/blob/master/.github/CONTRIBUTING.md', 'governance': 'https://lightning.ai/docs/pytorch/latest/community/governance.html', 'docs': 'https://lightning.ai/docs/pytorch/latest/', diff --git a/docs/source-pytorch/accelerators/accelerator_prepare.rst b/docs/source-pytorch/accelerators/accelerator_prepare.rst index 4d1c539f23273..356c5d78dff1c 100644 --- a/docs/source-pytorch/accelerators/accelerator_prepare.rst +++ b/docs/source-pytorch/accelerators/accelerator_prepare.rst @@ -123,7 +123,7 @@ It is possible to perform some computation manually and log the reduced result o # When you call `self.log` only on rank 0, don't forget to add # `rank_zero_only=True` to avoid deadlocks on synchronization. - # Caveat: monitoring this is unimplemented, see https://github.com/Lightning-AI/lightning/issues/15852 + # Caveat: monitoring this is unimplemented, see https://github.com/Lightning-AI/pytorch-lightning/issues/15852 if self.trainer.is_global_zero: self.log("my_reduced_metric", mean, rank_zero_only=True) diff --git a/docs/source-pytorch/accelerators/gpu_intermediate.rst b/docs/source-pytorch/accelerators/gpu_intermediate.rst index 2774a4cf8fc6f..e5dcd151375b2 100644 --- a/docs/source-pytorch/accelerators/gpu_intermediate.rst +++ b/docs/source-pytorch/accelerators/gpu_intermediate.rst @@ -25,10 +25,6 @@ Lightning supports multiple ways of doing distributed training. .. note:: If you request multiple GPUs or nodes without setting a strategy, DDP will be automatically used. -For a deeper understanding of what Lightning is doing, feel free to read this -`guide `_. - - ---- diff --git a/docs/source-pytorch/advanced/ddp_optimizations.rst b/docs/source-pytorch/advanced/ddp_optimizations.rst index d2d14375155a5..34ca5d743a8f5 100644 --- a/docs/source-pytorch/advanced/ddp_optimizations.rst +++ b/docs/source-pytorch/advanced/ddp_optimizations.rst @@ -58,7 +58,7 @@ On a Multi-Node Cluster, Set NCCL Parameters ******************************************** `NCCL `__ is the NVIDIA Collective Communications Library that is used by PyTorch to handle communication across nodes and GPUs. -There are reported benefits in terms of speedups when adjusting NCCL parameters as seen in this `issue `__. +There are reported benefits in terms of speedups when adjusting NCCL parameters as seen in this `issue `__. In the issue, we see a 30% speed improvement when training the Transformer XLM-RoBERTa and a 15% improvement in training with Detectron2. NCCL parameters can be adjusted via environment variables. diff --git a/docs/source-pytorch/advanced/model_parallel/deepspeed.rst b/docs/source-pytorch/advanced/model_parallel/deepspeed.rst index 9689f8c217eaf..3a3846500ff35 100644 --- a/docs/source-pytorch/advanced/model_parallel/deepspeed.rst +++ b/docs/source-pytorch/advanced/model_parallel/deepspeed.rst @@ -319,7 +319,7 @@ Additionally, DeepSpeed supports offloading to NVMe drives for even larger model ) trainer.fit(model) -When offloading to NVMe you may notice that the speed is slow. There are parameters that need to be tuned based on the drives that you are using. Running the `aio_bench_perf_sweep.py `__ script can help you to find optimum parameters. See the `issue `__ for more information on how to parse the information. +When offloading to NVMe you may notice that the speed is slow. There are parameters that need to be tuned based on the drives that you are using. Running the `aio_bench_perf_sweep.py `__ script can help you to find optimum parameters. See the `issue `__ for more information on how to parse the information. .. _deepspeed-activation-checkpointing: diff --git a/docs/source-pytorch/data/alternatives.rst b/docs/source-pytorch/data/alternatives.rst index 976f6f9de7297..02c751fe3134a 100644 --- a/docs/source-pytorch/data/alternatives.rst +++ b/docs/source-pytorch/data/alternatives.rst @@ -90,7 +90,7 @@ the desired GPU in your pipeline. When moving data to a specific device, you can WebDataset ^^^^^^^^^^ -The `WebDataset `__ makes it easy to write I/O pipelines for large datasets. +The `WebDataset `__ makes it easy to write I/O pipelines for large datasets. Datasets can be stored locally or in the cloud. ``WebDataset`` is just an instance of a standard IterableDataset. The webdataset library contains a small wrapper (``WebLoader``) that adds a fluid interface to the DataLoader (and is otherwise identical). diff --git a/docs/source-pytorch/data/iterables.rst b/docs/source-pytorch/data/iterables.rst index 58b7ff42c26e1..759400714d3de 100644 --- a/docs/source-pytorch/data/iterables.rst +++ b/docs/source-pytorch/data/iterables.rst @@ -50,7 +50,7 @@ To choose a different mode, you can use the :class:`~lightning.pytorch.utilities Currently, the ``trainer.predict`` method only supports the ``"sequential"`` mode, while ``trainer.fit`` method does not support it. -Support for this feature is tracked in this `issue `__. +Support for this feature is tracked in this `issue `__. Note that when using the ``"sequential"`` mode, you need to add an additional argument ``dataloader_idx`` to some specific hooks. Lightning will `raise an error `__ informing you of this requirement. diff --git a/docs/source-pytorch/links.rst b/docs/source-pytorch/links.rst index 64ec918bf8e25..5291f9548d9e4 100644 --- a/docs/source-pytorch/links.rst +++ b/docs/source-pytorch/links.rst @@ -1,2 +1,2 @@ -.. _PyTorchJob: https://www.kubeflow.org/docs/components/training/pytorch/ +.. _PyTorchJob: https://www.kubeflow.org/docs/components/trainer/legacy-v1/user-guides/pytorch/ .. _Kubeflow: https://www.kubeflow.org diff --git a/docs/source-pytorch/versioning.rst b/docs/source-pytorch/versioning.rst index d923b01c7edb3..10c6ec2fdf8e5 100644 --- a/docs/source-pytorch/versioning.rst +++ b/docs/source-pytorch/versioning.rst @@ -61,8 +61,8 @@ For API removal, renaming or other forms of backwards-incompatible changes, the #. From that version onward, the deprecation warning gets converted into a helpful error, which will remain until next major release. This policy is not strict. Shorter or longer deprecation cycles may apply to some cases. -For example, in the past DDP2 was removed without a deprecation process because the feature was broken and unusable beyond fixing as discussed in `#12584 `_. -Also, `#10410 `_ is an example that a longer deprecation applied to. We deprecated the accelerator arguments, such as ``Trainer(gpus=...)``, in 1.7, however, because the APIs were so core that they would impact almost all use cases, we decided not to introduce the breaking change until 2.0. +For example, in the past DDP2 was removed without a deprecation process because the feature was broken and unusable beyond fixing as discussed in `#12584 `_. +Also, `#10410 `_ is an example that a longer deprecation applied to. We deprecated the accelerator arguments, such as ``Trainer(gpus=...)``, in 1.7, however, because the APIs were so core that they would impact almost all use cases, we decided not to introduce the breaking change until 2.0. Compatibility matrix ******************** diff --git a/src/lightning/__setup__.py b/src/lightning/__setup__.py index 71c24aefd39d8..b71410c4f18cc 100644 --- a/src/lightning/__setup__.py +++ b/src/lightning/__setup__.py @@ -104,7 +104,7 @@ def _setup_args() -> dict[str, Any]: "install_requires": install_requires, "extras_require": _prepare_extras(), "project_urls": { - "Bug Tracker": "https://github.com/Lightning-AI/lightning/issues", + "Bug Tracker": "https://github.com/Lightning-AI/pytorch-lightning/issues", "Documentation": "https://lightning.ai/lightning-docs", "Source Code": "https://github.com/Lightning-AI/lightning", }, diff --git a/src/lightning/fabric/CHANGELOG.md b/src/lightning/fabric/CHANGELOG.md index c8e87416b1bc7..992dbbb4fbff9 100644 --- a/src/lightning/fabric/CHANGELOG.md +++ b/src/lightning/fabric/CHANGELOG.md @@ -337,7 +337,7 @@ Removed legacy supoport for `lightning run model`. Use `fabric run` instead. ([# ### Fixed - Fixed computing the next version folder in `CSVLogger` ([#17139](https://github.com/Lightning-AI/lightning/pull/17139)) -- Fixed inconsistent settings for FSDP Precision ([#17670](https://github.com/Lightning-AI/lightning/issues/17670)) +- Fixed inconsistent settings for FSDP Precision ([#17670](https://github.com/Lightning-AI/pytorch-lightning/issues/17670)) ## [2.0.2] - 2023-04-24 diff --git a/src/lightning/fabric/plugins/environments/kubeflow.py b/src/lightning/fabric/plugins/environments/kubeflow.py index ce2dd002e57bd..23a1c0d1753af 100644 --- a/src/lightning/fabric/plugins/environments/kubeflow.py +++ b/src/lightning/fabric/plugins/environments/kubeflow.py @@ -28,7 +28,7 @@ class KubeflowEnvironment(ClusterEnvironment): This environment, unlike others, does not get auto-detected and needs to be passed to the Fabric/Trainer constructor manually. - .. _PyTorchJob: https://www.kubeflow.org/docs/components/training/pytorch/ + .. _PyTorchJob: https://www.kubeflow.org/docs/components/trainer/legacy-v1/user-guides/pytorch/ .. _Kubeflow: https://www.kubeflow.org """ diff --git a/src/lightning/fabric/strategies/launchers/multiprocessing.py b/src/lightning/fabric/strategies/launchers/multiprocessing.py index d9b96dca5471d..3b3e180e63f41 100644 --- a/src/lightning/fabric/strategies/launchers/multiprocessing.py +++ b/src/lightning/fabric/strategies/launchers/multiprocessing.py @@ -78,7 +78,7 @@ def __init__( def is_interactive_compatible(self) -> bool: # The start method 'spawn' is not supported in interactive environments # The start method 'fork' is the only one supported in Jupyter environments, with constraints around CUDA - # initialization. For more context, see https://github.com/Lightning-AI/lightning/issues/7550 + # initialization. For more context, see https://github.com/Lightning-AI/pytorch-lightning/issues/7550 return self._start_method == "fork" @override diff --git a/src/lightning/fabric/strategies/launchers/subprocess_script.py b/src/lightning/fabric/strategies/launchers/subprocess_script.py index a28fe971c7ac4..8a78eb3c7dfbf 100644 --- a/src/lightning/fabric/strategies/launchers/subprocess_script.py +++ b/src/lightning/fabric/strategies/launchers/subprocess_script.py @@ -156,7 +156,7 @@ def _check_can_spawn_children(self) -> None: def _basic_subprocess_cmd() -> Sequence[str]: - import __main__ # local import to avoid https://github.com/Lightning-AI/lightning/issues/15218 + import __main__ # local import to avoid https://github.com/Lightning-AI/pytorch-lightning/issues/15218 if __main__.__spec__ is None: # pragma: no-cover return [sys.executable, os.path.abspath(sys.argv[0])] + sys.argv[1:] @@ -167,7 +167,7 @@ def _hydra_subprocess_cmd(local_rank: int) -> tuple[Sequence[str], str]: from hydra.core.hydra_config import HydraConfig from hydra.utils import get_original_cwd, to_absolute_path - import __main__ # local import to avoid https://github.com/Lightning-AI/lightning/issues/15218 + import __main__ # local import to avoid https://github.com/Lightning-AI/pytorch-lightning/issues/15218 # when user is using hydra find the absolute path if __main__.__spec__ is None: # pragma: no-cover diff --git a/src/lightning/pytorch/CHANGELOG.md b/src/lightning/pytorch/CHANGELOG.md index 9f7317c218c30..ef0f3dc73c9e0 100644 --- a/src/lightning/pytorch/CHANGELOG.md +++ b/src/lightning/pytorch/CHANGELOG.md @@ -199,16 +199,16 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed handling checkpoint dirpath suffix in NeptuneLogger ([#18863](https://github.com/Lightning-AI/lightning/pull/18863)) - Fixed an edge case where `ModelCheckpoint` would alternate between versioned and unversioned filename ([#19064](https://github.com/Lightning-AI/lightning/pull/19064)) - Fixed broadcast at initialization in `MPIEnvironment` ([#19074](https://github.com/Lightning-AI/lightning/pull/19074)) -- Fixed the tensor conversion in `self.log` to respect the default dtype ([#19046](https://github.com/Lightning-AI/lightning/issues/19046)) +- Fixed the tensor conversion in `self.log` to respect the default dtype ([#19046](https://github.com/Lightning-AI/pytorch-lightning/issues/19046)) ## [2.1.2] - 2023-11-15 ### Fixed -- Fixed an issue causing permission errors on Windows when attempting to create a symlink for the "last" checkpoint ([#18942](https://github.com/Lightning-AI/lightning/issues/18942)) -- Fixed an issue where Metric instances from `torchmetrics` wouldn't get moved to the device when using FSDP ([#18954](https://github.com/Lightning-AI/lightning/issues/18954)) -- Fixed an issue preventing the user to `Trainer.save_checkpoint()` an FSDP model when `Trainer.test/validate/predict()` ran after `Trainer.fit()` ([#18992](https://github.com/Lightning-AI/lightning/issues/18992)) +- Fixed an issue causing permission errors on Windows when attempting to create a symlink for the "last" checkpoint ([#18942](https://github.com/Lightning-AI/pytorch-lightning/issues/18942)) +- Fixed an issue where Metric instances from `torchmetrics` wouldn't get moved to the device when using FSDP ([#18954](https://github.com/Lightning-AI/pytorch-lightning/issues/18954)) +- Fixed an issue preventing the user to `Trainer.save_checkpoint()` an FSDP model when `Trainer.test/validate/predict()` ran after `Trainer.fit()` ([#18992](https://github.com/Lightning-AI/pytorch-lightning/issues/18992)) ## [2.1.1] - 2023-11-06 @@ -216,10 +216,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Fixed - Fixed an issue when replacing an existing `last.ckpt` file with a symlink ([#18793](https://github.com/Lightning-AI/lightning/pull/18793)) -- Fixed an issue when `BatchSizeFinder` `steps_per_trial` parameter ends up defining how many validation batches to run during the entire training ([#18394](https://github.com/Lightning-AI/lightning/issues/18394)) -- Fixed an issue saving the `last.ckpt` file when using `ModelCheckpoint` on a remote filesystem and no logger is used ([#18867](https://github.com/Lightning-AI/lightning/issues/18867)) +- Fixed an issue when `BatchSizeFinder` `steps_per_trial` parameter ends up defining how many validation batches to run during the entire training ([#18394](https://github.com/Lightning-AI/pytorch-lightning/issues/18394)) +- Fixed an issue saving the `last.ckpt` file when using `ModelCheckpoint` on a remote filesystem and no logger is used ([#18867](https://github.com/Lightning-AI/pytorch-lightning/issues/18867)) - Refined the FSDP saving logic and error messaging when path exists ([#18884](https://github.com/Lightning-AI/lightning/pull/18884)) -- Fixed an issue parsing the version from folders that don't include a version number in `TensorBoardLogger` and `CSVLogger` ([#18897](https://github.com/Lightning-AI/lightning/issues/18897)) +- Fixed an issue parsing the version from folders that don't include a version number in `TensorBoardLogger` and `CSVLogger` ([#18897](https://github.com/Lightning-AI/pytorch-lightning/issues/18897)) ## [2.1.0] - 2023-10-11 diff --git a/src/lightning/pytorch/callbacks/stochastic_weight_avg.py b/src/lightning/pytorch/callbacks/stochastic_weight_avg.py index 5643a038e00c1..375bd15f29051 100644 --- a/src/lightning/pytorch/callbacks/stochastic_weight_avg.py +++ b/src/lightning/pytorch/callbacks/stochastic_weight_avg.py @@ -354,7 +354,7 @@ def _clear_schedulers(trainer: "pl.Trainer") -> None: # Note that this relies on the callback state being restored before the scheduler state is # restored, and doesn't work if restore_checkpoint_after_setup is True, but at the time of # writing that is only True for deepspeed which is already not supported by SWA. - # See https://github.com/Lightning-AI/lightning/issues/11665 for background. + # See https://github.com/Lightning-AI/pytorch-lightning/issues/11665 for background. if trainer.lr_scheduler_configs: assert len(trainer.lr_scheduler_configs) == 1 trainer.lr_scheduler_configs.clear() diff --git a/src/lightning/pytorch/plugins/precision/xla.py b/src/lightning/pytorch/plugins/precision/xla.py index 7682cdc4502f9..6890cc4c1d825 100644 --- a/src/lightning/pytorch/plugins/precision/xla.py +++ b/src/lightning/pytorch/plugins/precision/xla.py @@ -79,7 +79,7 @@ def optimizer_step( # type: ignore[override] # we lack coverage here so disable this - something to explore if there's demand raise MisconfigurationException( "Skipping backward by returning `None` from your `training_step` is not implemented with XLA." - " Please, open an issue in `https://github.com/Lightning-AI/lightning/issues`" + " Please, open an issue in `https://github.com/Lightning-AI/pytorch-lightning/issues`" " requesting this feature." ) return closure_result diff --git a/src/lightning/pytorch/strategies/launchers/multiprocessing.py b/src/lightning/pytorch/strategies/launchers/multiprocessing.py index aa207a527814e..3589460574c39 100644 --- a/src/lightning/pytorch/strategies/launchers/multiprocessing.py +++ b/src/lightning/pytorch/strategies/launchers/multiprocessing.py @@ -88,7 +88,7 @@ def __init__( def is_interactive_compatible(self) -> bool: # The start method 'spawn' is not supported in interactive environments # The start method 'fork' is the only one supported in Jupyter environments, with constraints around CUDA - # initialization. For more context, see https://github.com/Lightning-AI/lightning/issues/7550 + # initialization. For more context, see https://github.com/Lightning-AI/pytorch-lightning/issues/7550 return self._start_method == "fork" @override @@ -111,7 +111,7 @@ def launch(self, function: Callable, *args: Any, trainer: Optional["pl.Trainer"] if self._start_method == "spawn": _check_missing_main_guard() if self._already_fit and trainer is not None and trainer.state.fn == TrainerFn.FITTING: - # resolving https://github.com/Lightning-AI/lightning/issues/18775 will lift this restriction + # resolving https://github.com/Lightning-AI/pytorch-lightning/issues/18775 will lift this restriction raise NotImplementedError( "Calling `trainer.fit()` twice on the same Trainer instance using a spawn-based strategy is not" " supported. You can work around this limitation by creating a new Trainer instance and passing the" diff --git a/src/lightning/pytorch/strategies/launchers/xla.py b/src/lightning/pytorch/strategies/launchers/xla.py index 831faeb7bb993..066fecc79f208 100644 --- a/src/lightning/pytorch/strategies/launchers/xla.py +++ b/src/lightning/pytorch/strategies/launchers/xla.py @@ -76,7 +76,7 @@ def launch(self, function: Callable, *args: Any, trainer: Optional["pl.Trainer"] """ if self._already_fit and trainer is not None and trainer.state.fn == TrainerFn.FITTING: - # resolving https://github.com/Lightning-AI/lightning/issues/18775 will lift this restriction + # resolving https://github.com/Lightning-AI/pytorch-lightning/issues/18775 will lift this restriction raise NotImplementedError( "Calling `trainer.fit()` twice on the same Trainer instance using a spawn-based strategy is not" " supported. You can work around this by creating a new Trainer instance and passing the" diff --git a/src/lightning/pytorch/trainer/connectors/logger_connector/fx_validator.py b/src/lightning/pytorch/trainer/connectors/logger_connector/fx_validator.py index 0dbdc4eaf76e1..c1ee0013bfa19 100644 --- a/src/lightning/pytorch/trainer/connectors/logger_connector/fx_validator.py +++ b/src/lightning/pytorch/trainer/connectors/logger_connector/fx_validator.py @@ -154,7 +154,7 @@ def check_logging(cls, fx_name: str) -> None: if fx_name not in cls.functions: raise RuntimeError( f"Logging inside `{fx_name}` is not implemented." - " Please, open an issue in `https://github.com/Lightning-AI/lightning/issues`." + " Please, open an issue in `https://github.com/Lightning-AI/pytorch-lightning/issues`." ) if cls.functions[fx_name] is None: diff --git a/src/lightning_fabric/__setup__.py b/src/lightning_fabric/__setup__.py index a55e1f2332f37..36dbae53ef171 100644 --- a/src/lightning_fabric/__setup__.py +++ b/src/lightning_fabric/__setup__.py @@ -85,7 +85,7 @@ def _setup_args() -> dict[str, Any]: }, "extras_require": _prepare_extras(), "project_urls": { - "Bug Tracker": "https://github.com/Lightning-AI/lightning/issues", + "Bug Tracker": "https://github.com/Lightning-AI/pytorch-lightning/issues", "Documentation": "https://pytorch-lightning.rtfd.io/en/latest/", "Source Code": "https://github.com/Lightning-AI/lightning", }, diff --git a/src/pytorch_lightning/__setup__.py b/src/pytorch_lightning/__setup__.py index 6677b469ba1de..97250404230b6 100644 --- a/src/pytorch_lightning/__setup__.py +++ b/src/pytorch_lightning/__setup__.py @@ -89,7 +89,7 @@ def _setup_args() -> dict[str, Any]: ), "extras_require": _prepare_extras(), "project_urls": { - "Bug Tracker": "https://github.com/Lightning-AI/lightning/issues", + "Bug Tracker": "https://github.com/Lightning-AI/pytorch-lightning/issues", "Documentation": "https://pytorch-lightning.rtfd.io/en/latest/", "Source Code": "https://github.com/Lightning-AI/lightning", }, diff --git a/tests/tests_pytorch/callbacks/test_early_stopping.py b/tests/tests_pytorch/callbacks/test_early_stopping.py index c11aa37b456dc..9a87b3daaad6e 100644 --- a/tests/tests_pytorch/callbacks/test_early_stopping.py +++ b/tests/tests_pytorch/callbacks/test_early_stopping.py @@ -61,8 +61,8 @@ def on_train_epoch_end(self, trainer, pl_module): def test_resume_early_stopping_from_checkpoint(tmp_path): """Prevent regressions to bugs: - https://github.com/Lightning-AI/lightning/issues/1464 - https://github.com/Lightning-AI/lightning/issues/1463 + https://github.com/Lightning-AI/pytorch-lightning/issues/1464 + https://github.com/Lightning-AI/pytorch-lightning/issues/1463 """ seed_everything(42) diff --git a/tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py b/tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py index e50eef7f258e1..e9e11b6dbb466 100644 --- a/tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py +++ b/tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py @@ -347,7 +347,7 @@ def test_swa_resume_training_from_checkpoint(tmp_path, crash_on_epoch): @pytest.mark.parametrize("crash_on_epoch", [1, 3]) def test_swa_resume_training_from_checkpoint_custom_scheduler(tmp_path, crash_on_epoch): - # Reproduces the bug reported in https://github.com/Lightning-AI/lightning/issues/11665 + # Reproduces the bug reported in https://github.com/Lightning-AI/pytorch-lightning/issues/11665 model = CustomSchedulerModel(crash_on_epoch=crash_on_epoch) resume_model = CustomSchedulerModel() _swa_resume_training_from_checkpoint(tmp_path, model, resume_model) diff --git a/tests/tests_pytorch/core/test_datamodules.py b/tests/tests_pytorch/core/test_datamodules.py index 49e8bf0f5b36d..fcdc660a0fffc 100644 --- a/tests/tests_pytorch/core/test_datamodules.py +++ b/tests/tests_pytorch/core/test_datamodules.py @@ -112,7 +112,7 @@ def prepare_data(self): def test_hooks_no_recursion_error(): # hooks were appended in cascade every tine a new data module was instantiated leading to a recursion error. - # See https://github.com/Lightning-AI/lightning/issues/3652 + # See https://github.com/Lightning-AI/pytorch-lightning/issues/3652 class DummyDM(LightningDataModule): def setup(self, *args, **kwargs): pass diff --git a/tests/tests_pytorch/profilers/test_profiler.py b/tests/tests_pytorch/profilers/test_profiler.py index df9bc16c284ad..d0221d12e317f 100644 --- a/tests/tests_pytorch/profilers/test_profiler.py +++ b/tests/tests_pytorch/profilers/test_profiler.py @@ -69,7 +69,7 @@ def test_simple_profiler_durations(simple_profiler, action: str, expected: list) time.sleep(duration) # different environments have different precision when it comes to time.sleep() - # see: https://github.com/Lightning-AI/lightning/issues/796 + # see: https://github.com/Lightning-AI/pytorch-lightning/issues/796 np.testing.assert_allclose(simple_profiler.recorded_durations[action], expected, rtol=0.2) @@ -277,7 +277,7 @@ def test_advanced_profiler_durations(advanced_profiler, action: str, expected: l time.sleep(duration) # different environments have different precision when it comes to time.sleep() - # see: https://github.com/Lightning-AI/lightning/issues/796 + # see: https://github.com/Lightning-AI/pytorch-lightning/issues/796 recorded_total_duration = _get_python_cprofile_total_duration(advanced_profiler.profiled_actions[action]) expected_total_duration = np.sum(expected) np.testing.assert_allclose(recorded_total_duration, expected_total_duration, rtol=0.2) diff --git a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py index 3981ddd64d773..be99489cfdf89 100644 --- a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py @@ -563,7 +563,7 @@ def training_step(self, *args): def test_log_tensor_and_clone_no_torch_warning(tmp_path): - """Regression test for issue https://github.com/Lightning-AI/lightning/issues/14594.""" + """Regression test for issue https://github.com/Lightning-AI/pytorch-lightning/issues/14594.""" class TestModel(BoringModel): def training_step(self, *args):