Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion requirements/pytorch/extra.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
matplotlib>3.1, <3.9.0
omegaconf >=2.2.3, <2.4.0
hydra-core >=1.2.0, <1.4.0
jsonargparse[signatures] >=4.27.7, <=4.35.0
jsonargparse[signatures] >=4.28.0, <=4.40.0
rich >=12.3.0, <13.6.0
tensorboardX >=2.2, <2.7.0 # min version is set by torch.onnx missing attribute
bitsandbytes >=0.45.2,<0.45.3; platform_system != "Darwin"
2 changes: 0 additions & 2 deletions tests/tests_pytorch/loggers/test_wandb.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
from lightning.pytorch.demos.boring_classes import BoringModel
from lightning.pytorch.loggers import TensorBoardLogger, WandbLogger
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from tests_pytorch.test_cli import _xfail_python_ge_3_11_9


def test_wandb_project_name(wandb_mock):
Expand Down Expand Up @@ -645,7 +644,6 @@ def test_wandb_logger_download_artifact(wandb_mock, tmp_path):
wandb_mock.Api().artifact.assert_called_once_with("test_artifact", type="model")


@_xfail_python_ge_3_11_9
@pytest.mark.parametrize(("log_model", "expected"), [("True", True), ("False", False), ("all", "all")])
def test_wandb_logger_cli_integration(log_model, expected, wandb_mock, monkeypatch, tmp_path):
"""Test that the WandbLogger can be used with the LightningCLI."""
Expand Down
26 changes: 0 additions & 26 deletions tests/tests_pytorch/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
import yaml
from lightning_utilities import compare_version
from lightning_utilities.test.warning import no_warning_call
from packaging.version import Version
from tensorboard.backend.event_processing import event_accumulator
from tensorboard.plugins.hparams.plugin_data_pb2 import HParamsPluginData
from torch.optim import SGD
Expand Down Expand Up @@ -65,14 +64,6 @@ def lazy_instance(*args, **kwargs):
return None


_xfail_python_ge_3_11_9 = pytest.mark.xfail(
# https://github.com/omni-us/jsonargparse/issues/484
Version(f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}") >= Version("3.11.9"),
strict=False,
reason="jsonargparse + Python 3.11.9 compatibility issue",
)


@contextmanager
def mock_subclasses(baseclass, *subclasses):
"""Mocks baseclass so that it only has the given child subclasses."""
Expand Down Expand Up @@ -356,7 +347,6 @@ def test_save_to_log_dir_false_error():
)


@_xfail_python_ge_3_11_9
def test_lightning_cli_logger_save_config(cleandir):
class LoggerSaveConfigCallback(SaveConfigCallback):
def __init__(self, *args, **kwargs) -> None:
Expand Down Expand Up @@ -753,7 +743,6 @@ def add_arguments_to_parser(self, parser):
assert cli.trainer.lr_scheduler_configs[0].scheduler.step_size == 50


@_xfail_python_ge_3_11_9
@RunIf(min_torch="2.2")
@pytest.mark.parametrize("use_generic_base_class", [False, True])
def test_lightning_cli_optimizers_and_lr_scheduler_with_link_to(use_generic_base_class):
Expand Down Expand Up @@ -801,7 +790,6 @@ def __init__(self, optim1: dict, optim2: dict, scheduler: dict):
assert isinstance(cli.model.scheduler, torch.optim.lr_scheduler.ExponentialLR)


@_xfail_python_ge_3_11_9
@RunIf(min_torch="2.2")
def test_lightning_cli_optimizers_and_lr_scheduler_with_callable_type():
class TestModel(BoringModel):
Expand Down Expand Up @@ -1118,7 +1106,6 @@ def __init__(self, foo, bar=5):
self.bar = bar


@_xfail_python_ge_3_11_9
def test_lightning_cli_model_short_arguments():
with (
mock.patch("sys.argv", ["any.py", "fit", "--model=BoringModel"]),
Expand Down Expand Up @@ -1146,7 +1133,6 @@ def __init__(self, foo, bar=5):
self.bar = bar


@_xfail_python_ge_3_11_9
def test_lightning_cli_datamodule_short_arguments():
# with set model
with (
Expand Down Expand Up @@ -1200,7 +1186,6 @@ def test_lightning_cli_datamodule_short_arguments():
assert cli.parser.groups["data"].group_class is BoringDataModule


@_xfail_python_ge_3_11_9
@pytest.mark.parametrize("use_class_path_callbacks", [False, True])
def test_callbacks_append(use_class_path_callbacks):
"""This test validates registries are used when simplified command line are being used."""
Expand Down Expand Up @@ -1244,7 +1229,6 @@ def test_callbacks_append(use_class_path_callbacks):
assert all(t in callback_types for t in expected)


@_xfail_python_ge_3_11_9
def test_optimizers_and_lr_schedulers_reload(cleandir):
base = ["any.py", "--trainer.max_epochs=1"]
input = base + [
Expand Down Expand Up @@ -1276,7 +1260,6 @@ def test_optimizers_and_lr_schedulers_reload(cleandir):
LightningCLI(BoringModel, run=False)


@_xfail_python_ge_3_11_9
def test_optimizers_and_lr_schedulers_add_arguments_to_parser_implemented_reload(cleandir):
class TestLightningCLI(LightningCLI):
def __init__(self, *args):
Expand Down Expand Up @@ -1540,7 +1523,6 @@ def test_cli_help_message():
assert "Implements Adam" in shorthand_help.getvalue()


@_xfail_python_ge_3_11_9
def test_cli_reducelronplateau():
with mock.patch(
"sys.argv", ["any.py", "--optimizer=Adam", "--lr_scheduler=ReduceLROnPlateau", "--lr_scheduler.monitor=foo"]
Expand All @@ -1551,7 +1533,6 @@ def test_cli_reducelronplateau():
assert config["lr_scheduler"]["scheduler"].monitor == "foo"


@_xfail_python_ge_3_11_9
def test_cli_configureoptimizers_can_be_overridden():
class MyCLI(LightningCLI):
def __init__(self):
Expand Down Expand Up @@ -1596,7 +1577,6 @@ def __init__(self, activation: torch.nn.Module = lazy_instance(torch.nn.LeakyReL
assert cli.model.activation is not model.activation


@_xfail_python_ge_3_11_9
def test_ddpstrategy_instantiation_and_find_unused_parameters(mps_count_0):
strategy_default = lazy_instance(DDPStrategy, find_unused_parameters=True)
with mock.patch("sys.argv", ["any.py", "--trainer.strategy.process_group_backend=group"]):
Expand All @@ -1612,7 +1592,6 @@ def test_ddpstrategy_instantiation_and_find_unused_parameters(mps_count_0):
assert strategy_default is not cli.config_init.trainer.strategy


@_xfail_python_ge_3_11_9
def test_cli_logger_shorthand():
with mock.patch("sys.argv", ["any.py"]):
cli = LightningCLI(TestModel, run=False, trainer_defaults={"logger": False})
Expand Down Expand Up @@ -1643,7 +1622,6 @@ def _test_logger_init_args(logger_name, init, unresolved=None):
assert data["dict_kwargs"] == unresolved


@_xfail_python_ge_3_11_9
def test_comet_logger_init_args():
_test_logger_init_args(
"CometLogger",
Expand All @@ -1664,7 +1642,6 @@ def test_comet_logger_init_args():
strict=False,
reason="TypeError on Windows when parsing",
)
@_xfail_python_ge_3_11_9
def test_neptune_logger_init_args():
_test_logger_init_args(
"NeptuneLogger",
Expand All @@ -1673,7 +1650,6 @@ def test_neptune_logger_init_args():
)


@_xfail_python_ge_3_11_9
def test_tensorboard_logger_init_args():
_test_logger_init_args(
"TensorBoardLogger",
Expand All @@ -1685,7 +1661,6 @@ def test_tensorboard_logger_init_args():
)


@_xfail_python_ge_3_11_9
def test_wandb_logger_init_args():
_test_logger_init_args(
"WandbLogger",
Expand Down Expand Up @@ -1770,7 +1745,6 @@ def __init__(self, a_func: Callable = torch.nn.Softmax):
assert "a_func: torch.nn.Softmax" in out.getvalue()


@_xfail_python_ge_3_11_9
def test_pytorch_profiler_init_args():
from lightning.pytorch.profilers import Profiler, PyTorchProfiler

Expand Down
Loading