diff --git a/requirements/pytorch/extra.txt b/requirements/pytorch/extra.txt index e14cb38297caa..139513fb22fd9 100644 --- a/requirements/pytorch/extra.txt +++ b/requirements/pytorch/extra.txt @@ -5,7 +5,7 @@ matplotlib>3.1, <3.9.0 omegaconf >=2.2.3, <2.4.0 hydra-core >=1.2.0, <1.4.0 -jsonargparse[signatures] >=4.27.7, <=4.35.0 +jsonargparse[signatures] >=4.28.0, <=4.40.0 rich >=12.3.0, <13.6.0 tensorboardX >=2.2, <2.7.0 # min version is set by torch.onnx missing attribute bitsandbytes >=0.45.2,<0.45.3; platform_system != "Darwin" diff --git a/tests/tests_pytorch/loggers/test_wandb.py b/tests/tests_pytorch/loggers/test_wandb.py index 35c1917983dcf..e9b9e9a8090b0 100644 --- a/tests/tests_pytorch/loggers/test_wandb.py +++ b/tests/tests_pytorch/loggers/test_wandb.py @@ -26,7 +26,6 @@ from lightning.pytorch.demos.boring_classes import BoringModel from lightning.pytorch.loggers import TensorBoardLogger, WandbLogger from lightning.pytorch.utilities.exceptions import MisconfigurationException -from tests_pytorch.test_cli import _xfail_python_ge_3_11_9 def test_wandb_project_name(wandb_mock): @@ -645,7 +644,6 @@ def test_wandb_logger_download_artifact(wandb_mock, tmp_path): wandb_mock.Api().artifact.assert_called_once_with("test_artifact", type="model") -@_xfail_python_ge_3_11_9 @pytest.mark.parametrize(("log_model", "expected"), [("True", True), ("False", False), ("all", "all")]) def test_wandb_logger_cli_integration(log_model, expected, wandb_mock, monkeypatch, tmp_path): """Test that the WandbLogger can be used with the LightningCLI.""" diff --git a/tests/tests_pytorch/test_cli.py b/tests/tests_pytorch/test_cli.py index 5c33a8539b693..7658894b37414 100644 --- a/tests/tests_pytorch/test_cli.py +++ b/tests/tests_pytorch/test_cli.py @@ -29,7 +29,6 @@ import yaml from lightning_utilities import compare_version from lightning_utilities.test.warning import no_warning_call -from packaging.version import Version from tensorboard.backend.event_processing import event_accumulator from tensorboard.plugins.hparams.plugin_data_pb2 import HParamsPluginData from torch.optim import SGD @@ -65,14 +64,6 @@ def lazy_instance(*args, **kwargs): return None -_xfail_python_ge_3_11_9 = pytest.mark.xfail( - # https://github.com/omni-us/jsonargparse/issues/484 - Version(f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}") >= Version("3.11.9"), - strict=False, - reason="jsonargparse + Python 3.11.9 compatibility issue", -) - - @contextmanager def mock_subclasses(baseclass, *subclasses): """Mocks baseclass so that it only has the given child subclasses.""" @@ -356,7 +347,6 @@ def test_save_to_log_dir_false_error(): ) -@_xfail_python_ge_3_11_9 def test_lightning_cli_logger_save_config(cleandir): class LoggerSaveConfigCallback(SaveConfigCallback): def __init__(self, *args, **kwargs) -> None: @@ -753,7 +743,6 @@ def add_arguments_to_parser(self, parser): assert cli.trainer.lr_scheduler_configs[0].scheduler.step_size == 50 -@_xfail_python_ge_3_11_9 @RunIf(min_torch="2.2") @pytest.mark.parametrize("use_generic_base_class", [False, True]) def test_lightning_cli_optimizers_and_lr_scheduler_with_link_to(use_generic_base_class): @@ -801,7 +790,6 @@ def __init__(self, optim1: dict, optim2: dict, scheduler: dict): assert isinstance(cli.model.scheduler, torch.optim.lr_scheduler.ExponentialLR) -@_xfail_python_ge_3_11_9 @RunIf(min_torch="2.2") def test_lightning_cli_optimizers_and_lr_scheduler_with_callable_type(): class TestModel(BoringModel): @@ -1118,7 +1106,6 @@ def __init__(self, foo, bar=5): self.bar = bar -@_xfail_python_ge_3_11_9 def test_lightning_cli_model_short_arguments(): with ( mock.patch("sys.argv", ["any.py", "fit", "--model=BoringModel"]), @@ -1146,7 +1133,6 @@ def __init__(self, foo, bar=5): self.bar = bar -@_xfail_python_ge_3_11_9 def test_lightning_cli_datamodule_short_arguments(): # with set model with ( @@ -1200,7 +1186,6 @@ def test_lightning_cli_datamodule_short_arguments(): assert cli.parser.groups["data"].group_class is BoringDataModule -@_xfail_python_ge_3_11_9 @pytest.mark.parametrize("use_class_path_callbacks", [False, True]) def test_callbacks_append(use_class_path_callbacks): """This test validates registries are used when simplified command line are being used.""" @@ -1244,7 +1229,6 @@ def test_callbacks_append(use_class_path_callbacks): assert all(t in callback_types for t in expected) -@_xfail_python_ge_3_11_9 def test_optimizers_and_lr_schedulers_reload(cleandir): base = ["any.py", "--trainer.max_epochs=1"] input = base + [ @@ -1276,7 +1260,6 @@ def test_optimizers_and_lr_schedulers_reload(cleandir): LightningCLI(BoringModel, run=False) -@_xfail_python_ge_3_11_9 def test_optimizers_and_lr_schedulers_add_arguments_to_parser_implemented_reload(cleandir): class TestLightningCLI(LightningCLI): def __init__(self, *args): @@ -1540,7 +1523,6 @@ def test_cli_help_message(): assert "Implements Adam" in shorthand_help.getvalue() -@_xfail_python_ge_3_11_9 def test_cli_reducelronplateau(): with mock.patch( "sys.argv", ["any.py", "--optimizer=Adam", "--lr_scheduler=ReduceLROnPlateau", "--lr_scheduler.monitor=foo"] @@ -1551,7 +1533,6 @@ def test_cli_reducelronplateau(): assert config["lr_scheduler"]["scheduler"].monitor == "foo" -@_xfail_python_ge_3_11_9 def test_cli_configureoptimizers_can_be_overridden(): class MyCLI(LightningCLI): def __init__(self): @@ -1596,7 +1577,6 @@ def __init__(self, activation: torch.nn.Module = lazy_instance(torch.nn.LeakyReL assert cli.model.activation is not model.activation -@_xfail_python_ge_3_11_9 def test_ddpstrategy_instantiation_and_find_unused_parameters(mps_count_0): strategy_default = lazy_instance(DDPStrategy, find_unused_parameters=True) with mock.patch("sys.argv", ["any.py", "--trainer.strategy.process_group_backend=group"]): @@ -1612,7 +1592,6 @@ def test_ddpstrategy_instantiation_and_find_unused_parameters(mps_count_0): assert strategy_default is not cli.config_init.trainer.strategy -@_xfail_python_ge_3_11_9 def test_cli_logger_shorthand(): with mock.patch("sys.argv", ["any.py"]): cli = LightningCLI(TestModel, run=False, trainer_defaults={"logger": False}) @@ -1643,7 +1622,6 @@ def _test_logger_init_args(logger_name, init, unresolved=None): assert data["dict_kwargs"] == unresolved -@_xfail_python_ge_3_11_9 def test_comet_logger_init_args(): _test_logger_init_args( "CometLogger", @@ -1664,7 +1642,6 @@ def test_comet_logger_init_args(): strict=False, reason="TypeError on Windows when parsing", ) -@_xfail_python_ge_3_11_9 def test_neptune_logger_init_args(): _test_logger_init_args( "NeptuneLogger", @@ -1673,7 +1650,6 @@ def test_neptune_logger_init_args(): ) -@_xfail_python_ge_3_11_9 def test_tensorboard_logger_init_args(): _test_logger_init_args( "TensorBoardLogger", @@ -1685,7 +1661,6 @@ def test_tensorboard_logger_init_args(): ) -@_xfail_python_ge_3_11_9 def test_wandb_logger_init_args(): _test_logger_init_args( "WandbLogger", @@ -1770,7 +1745,6 @@ def __init__(self, a_func: Callable = torch.nn.Softmax): assert "a_func: torch.nn.Softmax" in out.getvalue() -@_xfail_python_ge_3_11_9 def test_pytorch_profiler_init_args(): from lightning.pytorch.profilers import Profiler, PyTorchProfiler