|
| 1 | +import os |
1 | 2 | import pickle |
2 | 3 |
|
3 | 4 | import numpy as np |
4 | 5 | import torch |
5 | 6 |
|
6 | 7 | from pytorch_lightning import Trainer |
7 | 8 | from pytorch_lightning.testing import LightningTestModel |
| 9 | +from pytorch_lightning.logging import LightningLoggerBase, rank_zero_only |
8 | 10 | from . import testing_utils |
9 | 11 |
|
10 | 12 | RANDOM_FILE_PATHS = list(np.random.randint(12000, 19000, 1000)) |
@@ -69,117 +71,119 @@ def test_testtube_pickle(): |
69 | 71 | testing_utils.clear_save_dir() |
70 | 72 |
|
71 | 73 |
|
72 | | -# def test_mlflow_logger(): |
73 | | -# """ |
74 | | -# verify that basic functionality of mlflow logger works |
75 | | -# """ |
76 | | -# reset_seed() |
77 | | -# |
78 | | -# try: |
79 | | -# from pytorch_lightning.logging import MLFlowLogger |
80 | | -# except ModuleNotFoundError: |
81 | | -# return |
82 | | -# |
83 | | -# hparams = get_hparams() |
84 | | -# model = LightningTestModel(hparams) |
85 | | -# |
86 | | -# root_dir = os.path.dirname(os.path.realpath(__file__)) |
87 | | -# mlflow_dir = os.path.join(root_dir, "mlruns") |
88 | | -# import pdb |
89 | | -# pdb.set_trace() |
90 | | -# |
91 | | -# logger = MLFlowLogger("test", f"file://{mlflow_dir}") |
92 | | -# logger.log_hyperparams(hparams) |
93 | | -# logger.save() |
94 | | -# |
95 | | -# trainer_options = dict( |
96 | | -# max_nb_epochs=1, |
97 | | -# train_percent_check=0.01, |
98 | | -# logger=logger |
99 | | -# ) |
100 | | -# |
101 | | -# trainer = Trainer(**trainer_options) |
102 | | -# result = trainer.fit(model) |
103 | | -# |
104 | | -# print('result finished') |
105 | | -# assert result == 1, "Training failed" |
106 | | -# |
107 | | -# shutil.move(mlflow_dir, mlflow_dir + f'_{n}') |
108 | | - |
109 | | - |
110 | | -# def test_mlflow_pickle(): |
111 | | -# """ |
112 | | -# verify that pickling trainer with mlflow logger works |
113 | | -# """ |
114 | | -# reset_seed() |
115 | | -# |
116 | | -# try: |
117 | | -# from pytorch_lightning.logging import MLFlowLogger |
118 | | -# except ModuleNotFoundError: |
119 | | -# return |
120 | | -# |
121 | | -# hparams = get_hparams() |
122 | | -# model = LightningTestModel(hparams) |
123 | | -# |
124 | | -# root_dir = os.path.dirname(os.path.realpath(__file__)) |
125 | | -# mlflow_dir = os.path.join(root_dir, "mlruns") |
126 | | -# |
127 | | -# logger = MLFlowLogger("test", f"file://{mlflow_dir}") |
128 | | -# logger.log_hyperparams(hparams) |
129 | | -# logger.save() |
130 | | -# |
131 | | -# trainer_options = dict( |
132 | | -# max_nb_epochs=1, |
133 | | -# logger=logger |
134 | | -# ) |
135 | | -# |
136 | | -# trainer = Trainer(**trainer_options) |
137 | | -# pkl_bytes = pickle.dumps(trainer) |
138 | | -# trainer2 = pickle.loads(pkl_bytes) |
139 | | -# trainer2.logger.log_metrics({"acc": 1.0}) |
140 | | -# |
141 | | -# n = RANDOM_FILE_PATHS.pop() |
142 | | -# shutil.move(mlflow_dir, mlflow_dir + f'_{n}') |
143 | | - |
144 | | - |
145 | | -# def test_custom_logger(): |
146 | | -# |
147 | | -# class CustomLogger(LightningLoggerBase): |
148 | | -# def __init__(self): |
149 | | -# super().__init__() |
150 | | -# self.hparams_logged = None |
151 | | -# self.metrics_logged = None |
152 | | -# self.finalized = False |
153 | | -# |
154 | | -# @rank_zero_only |
155 | | -# def log_hyperparams(self, params): |
156 | | -# self.hparams_logged = params |
157 | | -# |
158 | | -# @rank_zero_only |
159 | | -# def log_metrics(self, metrics, step_num): |
160 | | -# self.metrics_logged = metrics |
161 | | -# |
162 | | -# @rank_zero_only |
163 | | -# def finalize(self, status): |
164 | | -# self.finalized_status = status |
165 | | -# |
166 | | -# hparams = get_hparams() |
167 | | -# model = LightningTestModel(hparams) |
168 | | -# |
169 | | -# logger = CustomLogger() |
170 | | -# |
171 | | -# trainer_options = dict( |
172 | | -# max_nb_epochs=1, |
173 | | -# train_percent_check=0.01, |
174 | | -# logger=logger |
175 | | -# ) |
176 | | -# |
177 | | -# trainer = Trainer(**trainer_options) |
178 | | -# result = trainer.fit(model) |
179 | | -# assert result == 1, "Training failed" |
180 | | -# assert logger.hparams_logged == hparams |
181 | | -# assert logger.metrics_logged != {} |
182 | | -# assert logger.finalized_status == "success" |
| 74 | +def test_mlflow_logger(): |
| 75 | + """ |
| 76 | + verify that basic functionality of mlflow logger works |
| 77 | + """ |
| 78 | + reset_seed() |
| 79 | + |
| 80 | + try: |
| 81 | + from pytorch_lightning.logging import MLFlowLogger |
| 82 | + except ModuleNotFoundError: |
| 83 | + return |
| 84 | + |
| 85 | + hparams = testing_utils.get_hparams() |
| 86 | + model = LightningTestModel(hparams) |
| 87 | + |
| 88 | + root_dir = os.path.dirname(os.path.realpath(__file__)) |
| 89 | + mlflow_dir = os.path.join(root_dir, "mlruns") |
| 90 | + |
| 91 | + logger = MLFlowLogger("test", f"file://{mlflow_dir}") |
| 92 | + |
| 93 | + trainer_options = dict( |
| 94 | + max_nb_epochs=1, |
| 95 | + train_percent_check=0.01, |
| 96 | + logger=logger |
| 97 | + ) |
| 98 | + |
| 99 | + trainer = Trainer(**trainer_options) |
| 100 | + result = trainer.fit(model) |
| 101 | + |
| 102 | + print('result finished') |
| 103 | + assert result == 1, "Training failed" |
| 104 | + |
| 105 | + testing_utils.clear_save_dir() |
| 106 | + |
| 107 | + |
| 108 | +def test_mlflow_pickle(): |
| 109 | + """ |
| 110 | + verify that pickling trainer with mlflow logger works |
| 111 | + """ |
| 112 | + reset_seed() |
| 113 | + |
| 114 | + try: |
| 115 | + from pytorch_lightning.logging import MLFlowLogger |
| 116 | + except ModuleNotFoundError: |
| 117 | + return |
| 118 | + |
| 119 | + hparams = testing_utils.get_hparams() |
| 120 | + model = LightningTestModel(hparams) |
| 121 | + |
| 122 | + root_dir = os.path.dirname(os.path.realpath(__file__)) |
| 123 | + mlflow_dir = os.path.join(root_dir, "mlruns") |
| 124 | + |
| 125 | + logger = MLFlowLogger("test", f"file://{mlflow_dir}") |
| 126 | + |
| 127 | + trainer_options = dict( |
| 128 | + max_nb_epochs=1, |
| 129 | + logger=logger |
| 130 | + ) |
| 131 | + |
| 132 | + trainer = Trainer(**trainer_options) |
| 133 | + pkl_bytes = pickle.dumps(trainer) |
| 134 | + trainer2 = pickle.loads(pkl_bytes) |
| 135 | + trainer2.logger.log_metrics({"acc": 1.0}) |
| 136 | + |
| 137 | + testing_utils.clear_save_dir() |
| 138 | + |
| 139 | + |
| 140 | +def test_custom_logger(tmpdir): |
| 141 | + |
| 142 | + class CustomLogger(LightningLoggerBase): |
| 143 | + def __init__(self): |
| 144 | + super().__init__() |
| 145 | + self.hparams_logged = None |
| 146 | + self.metrics_logged = None |
| 147 | + self.finalized = False |
| 148 | + |
| 149 | + @rank_zero_only |
| 150 | + def log_hyperparams(self, params): |
| 151 | + self.hparams_logged = params |
| 152 | + |
| 153 | + @rank_zero_only |
| 154 | + def log_metrics(self, metrics, step_num): |
| 155 | + self.metrics_logged = metrics |
| 156 | + |
| 157 | + @rank_zero_only |
| 158 | + def finalize(self, status): |
| 159 | + self.finalized_status = status |
| 160 | + |
| 161 | + @property |
| 162 | + def name(self): |
| 163 | + return "name" |
| 164 | + |
| 165 | + @property |
| 166 | + def version(self): |
| 167 | + return "1" |
| 168 | + |
| 169 | + hparams = testing_utils.get_hparams() |
| 170 | + model = LightningTestModel(hparams) |
| 171 | + |
| 172 | + logger = CustomLogger() |
| 173 | + |
| 174 | + trainer_options = dict( |
| 175 | + max_nb_epochs=1, |
| 176 | + train_percent_check=0.01, |
| 177 | + logger=logger, |
| 178 | + default_save_path=tmpdir |
| 179 | + ) |
| 180 | + |
| 181 | + trainer = Trainer(**trainer_options) |
| 182 | + result = trainer.fit(model) |
| 183 | + assert result == 1, "Training failed" |
| 184 | + assert logger.hparams_logged == hparams |
| 185 | + assert logger.metrics_logged != {} |
| 186 | + assert logger.finalized_status == "success" |
183 | 187 |
|
184 | 188 |
|
185 | 189 | def reset_seed(): |
|
0 commit comments