|
13 | 13 | from lightning.fabric.strategies import FSDPStrategy, SingleDeviceStrategy |
14 | 14 | from torch.utils.data import DataLoader |
15 | 15 |
|
| 16 | +from test_utils import test_init_out_dir |
16 | 17 | from litgpt import pretrain |
17 | 18 | from litgpt.args import EvalArgs, TrainArgs |
18 | 19 | from litgpt.config import Config |
19 | | -from litgpt.pretrain import init_out_dir, initialize_weights |
| 20 | +from litgpt.pretrain import initialize_weights |
20 | 21 |
|
21 | 22 |
|
22 | 23 | @RunIf(min_cuda_gpus=2, standalone=True) |
@@ -89,17 +90,6 @@ def test_pretrain_model_name_and_config(): |
89 | 90 | pretrain.setup(model_name="tiny-llama-1.1b", model_config=Config(name="tiny-llama-1.1b")) |
90 | 91 |
|
91 | 92 |
|
92 | | -def test_init_out_dir(tmp_path): |
93 | | - relative_path = Path("./out") |
94 | | - absolute_path = tmp_path / "out" |
95 | | - assert init_out_dir(relative_path) == relative_path |
96 | | - assert init_out_dir(absolute_path) == absolute_path |
97 | | - |
98 | | - with mock.patch.dict(os.environ, {"LIGHTNING_ARTIFACTS_DIR": "prefix"}): |
99 | | - assert init_out_dir(relative_path) == Path("prefix") / relative_path |
100 | | - assert init_out_dir(absolute_path) == absolute_path |
101 | | - |
102 | | - |
103 | 93 | @pytest.mark.parametrize(("strategy", "expected"), [(SingleDeviceStrategy, True), (FSDPStrategy, False)]) |
104 | 94 | def test_initialize_weights(strategy, expected): |
105 | 95 | fabric_mock = Mock() |
|
0 commit comments