Skip to content

Commit 2462bc7

Browse files
committed
update: pytest markers
1 parent 32d9629 commit 2462bc7

9 files changed

+78
-0
lines changed

tests/test_create_optimizer.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
1+
import pytest
2+
13
from pytorch_optimizer import create_optimizer
24
from tests.utils import LogisticRegression
35

46

7+
@pytest.mark.utils
58
def test_create_optimizer():
69
model = LogisticRegression()
710

tests/test_gradients.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from tests.utils import build_environment, simple_parameter, simple_sparse_parameter
88

99

10+
@pytest.mark.gradient
1011
@pytest.mark.parametrize('optimizer_name', [*VALID_OPTIMIZER_NAMES, 'lookahead'])
1112
def test_no_gradients(optimizer_name):
1213
p1 = simple_parameter(require_grad=True)
@@ -32,6 +33,7 @@ def test_no_gradients(optimizer_name):
3233
optimizer.step(lambda: 0.1) # for AliG optimizer
3334

3435

36+
@pytest.mark.gradient
3537
@pytest.mark.parametrize('no_sparse_optimizer', NO_SPARSE_OPTIMIZERS)
3638
def test_sparse_not_supported(no_sparse_optimizer):
3739
param = simple_sparse_parameter()
@@ -45,6 +47,7 @@ def test_sparse_not_supported(no_sparse_optimizer):
4547
optimizer.step(lambda: 0.1)
4648

4749

50+
@pytest.mark.gradient
4851
@pytest.mark.parametrize('sparse_optimizer', SPARSE_OPTIMIZERS)
4952
def test_sparse_supported(sparse_optimizer):
5053
param = simple_sparse_parameter()
@@ -78,6 +81,7 @@ def test_sparse_supported(sparse_optimizer):
7881
optimizer.step()
7982

8083

84+
@pytest.mark.gradient
8185
@pytest.mark.parametrize('optimizer_name', VALID_OPTIMIZER_NAMES)
8286
def test_bf16_gradient(optimizer_name):
8387
# torch.eye does not support bf16
@@ -92,6 +96,7 @@ def test_bf16_gradient(optimizer_name):
9296
optimizer.step(lambda: 0.1)
9397

9498

99+
@pytest.mark.gradient
95100
def test_sam_no_gradient():
96101
(x_data, y_data), model, loss_fn = build_environment()
97102
model.fc1.weight.requires_grad = False

tests/test_load_lr_schedulers.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,19 @@
44
from tests.constants import INVALID_LR_SCHEDULER_NAMES, VALID_LR_SCHEDULER_NAMES
55

66

7+
@pytest.mark.utils
78
@pytest.mark.parametrize('valid_lr_scheduler_names', VALID_LR_SCHEDULER_NAMES)
89
def test_load_optimizers_valid(valid_lr_scheduler_names):
910
load_lr_scheduler(valid_lr_scheduler_names)
1011

1112

13+
@pytest.mark.utils
1214
@pytest.mark.parametrize('invalid_lr_scheduler_names', INVALID_LR_SCHEDULER_NAMES)
1315
def test_load_optimizers_invalid(invalid_lr_scheduler_names):
1416
with pytest.raises(NotImplementedError):
1517
load_lr_scheduler(invalid_lr_scheduler_names)
1618

1719

20+
@pytest.mark.utils
1821
def test_get_supported_lr_schedulers():
1922
assert len(get_supported_lr_schedulers()) == 10

tests/test_load_optimizers.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,19 @@
44
from tests.constants import INVALID_OPTIMIZER_NAMES, VALID_OPTIMIZER_NAMES
55

66

7+
@pytest.mark.utils
78
@pytest.mark.parametrize('valid_optimizer_names', VALID_OPTIMIZER_NAMES)
89
def test_load_optimizers_valid(valid_optimizer_names):
910
load_optimizer(valid_optimizer_names)
1011

1112

13+
@pytest.mark.utils
1214
@pytest.mark.parametrize('invalid_optimizer_names', INVALID_OPTIMIZER_NAMES)
1315
def test_load_optimizers_invalid(invalid_optimizer_names):
1416
with pytest.raises(NotImplementedError):
1517
load_optimizer(invalid_optimizer_names)
1618

1719

20+
@pytest.mark.utils
1821
def test_get_supported_optimizers():
1922
assert len(get_supported_optimizers()) == 29

tests/test_lr_scheduler_parameters.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
from tests.utils import Example
99

1010

11+
@pytest.mark.lr_scheduler
1112
def test_cosine_annealing_warmup_restarts_params():
1213
optimizer = AdamP(Example().parameters())
1314

@@ -36,6 +37,7 @@ def test_cosine_annealing_warmup_restarts_params():
3637
lr_scheduler.step(epoch=None)
3738

3839

40+
@pytest.mark.lr_scheduler
3941
def test_linear_warmup_lr_scheduler_params():
4042
optimizer = AdamP(Example().parameters())
4143

@@ -60,6 +62,7 @@ def test_linear_warmup_lr_scheduler_params():
6062
PolyScheduler(optimizer=optimizer, t_max=1, max_lr=1, min_lr=1, init_lr=1, warmup_steps=-1)
6163

6264

65+
@pytest.mark.lr_scheduler
6366
def test_chebyshev_params():
6467
with pytest.raises(IndexError):
6568
get_chebyshev_schedule(2)

tests/test_lr_schedulers.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@
114114
PROPORTION_LEARNING_RATES = [(1e-1, 1e-1, 2.0), (1e-1, 1e-3, 1.090909)]
115115

116116

117+
@pytest.mark.lr_scheduler
117118
@pytest.mark.parametrize('cosine_annealing_warmup_restart_param', CAWR_RECIPES)
118119
def test_cosine_annealing_warmup_restarts(cosine_annealing_warmup_restart_param):
119120
model = Example()
@@ -150,11 +151,13 @@ def test_cosine_annealing_warmup_restarts(cosine_annealing_warmup_restart_param)
150151
np.testing.assert_almost_equal(expected_lrs[epoch], lr)
151152

152153

154+
@pytest.mark.lr_scheduler
153155
def test_get_chebyshev_scheduler():
154156
np.testing.assert_almost_equal(get_chebyshev_schedule(3), 1.81818182, decimal=6)
155157
np.testing.assert_array_equal(chebyshev_perm(5), np.asarray([0, 7, 3, 4, 1, 6, 2, 5]))
156158

157159

160+
@pytest.mark.lr_scheduler
158161
def test_linear_warmup_linear_scheduler():
159162
optimizer = AdamP(Example().parameters())
160163
lr_scheduler = LinearScheduler(optimizer, t_max=10, max_lr=1e-2, min_lr=1e-4, init_lr=1e-3, warmup_steps=5)
@@ -164,6 +167,7 @@ def test_linear_warmup_linear_scheduler():
164167
np.testing.assert_almost_equal(expected_lr, lr_scheduler.get_lr())
165168

166169

170+
@pytest.mark.lr_scheduler
167171
def test_linear_warmup_cosine_scheduler():
168172
optimizer = AdamP(Example().parameters())
169173
lr_scheduler = CosineScheduler(optimizer, t_max=10, max_lr=1e-2, min_lr=1e-4, init_lr=1e-3, warmup_steps=5)
@@ -173,6 +177,7 @@ def test_linear_warmup_cosine_scheduler():
173177
np.testing.assert_almost_equal(expected_lr, lr_scheduler.get_lr(), 5)
174178

175179

180+
@pytest.mark.lr_scheduler
176181
def test_linear_warmup_poly_scheduler():
177182
optimizer = AdamP(Example().parameters())
178183
lr_scheduler = PolyScheduler(optimizer=optimizer, t_max=10, max_lr=1e-2, min_lr=1e-4, init_lr=1e-3, warmup_steps=5)
@@ -182,6 +187,7 @@ def test_linear_warmup_poly_scheduler():
182187
np.testing.assert_almost_equal(expected_lr, lr_scheduler.get_lr(), 6)
183188

184189

190+
@pytest.mark.lr_scheduler
185191
@pytest.mark.parametrize('proportion_learning_rate', PROPORTION_LEARNING_RATES)
186192
def test_proportion_scheduler(proportion_learning_rate: Tuple[float, float, float]):
187193
base_optimizer = AdamP(Example().parameters())
@@ -201,6 +207,7 @@ def test_proportion_scheduler(proportion_learning_rate: Tuple[float, float, floa
201207
np.testing.assert_almost_equal(proportion_learning_rate[2], rho_scheduler.get_lr(), 6)
202208

203209

210+
@pytest.mark.lr_scheduler
204211
def test_proportion_no_last_lr_scheduler():
205212
base_optimizer = AdamP(Example().parameters())
206213
lr_scheduler = CosineAnnealingWarmupRestarts(
@@ -222,6 +229,7 @@ def test_proportion_no_last_lr_scheduler():
222229
np.testing.assert_almost_equal(2.0, rho_scheduler.get_lr(), 6)
223230

224231

232+
@pytest.mark.lr_scheduler
225233
def test_deberta_v3_large_lr_scheduler():
226234
try:
227235
from transformers import AutoConfig, AutoModel

tests/test_optimizer_parameters.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
from tests.utils import Example, simple_parameter
99

1010

11+
@pytest.mark.cfg
1112
@pytest.mark.parametrize('optimizer_name', VALID_OPTIMIZER_NAMES)
1213
def test_learning_rate(optimizer_name):
1314
if optimizer_name in ('alig',):
@@ -23,6 +24,7 @@ def test_learning_rate(optimizer_name):
2324
optimizer(None, **config)
2425

2526

27+
@pytest.mark.cfg
2628
@pytest.mark.parametrize('optimizer_name', VALID_OPTIMIZER_NAMES)
2729
def test_epsilon(optimizer_name):
2830
if optimizer_name in ('nero', 'shampoo', 'scalableshampoo', 'dadaptsgd', 'adafactor', 'lion'):
@@ -40,6 +42,7 @@ def test_epsilon(optimizer_name):
4042
assert str(error_info.value) == '[-] epsilon -1e-06 must be non-negative'
4143

4244

45+
@pytest.mark.cfg
4346
def test_shampoo_epsilon():
4447
shampoo = load_optimizer('Shampoo')
4548
scalable_shampoo = load_optimizer('ScalableShampoo')
@@ -54,6 +57,7 @@ def test_shampoo_epsilon():
5457
shampoo(None, matrix_eps=-1e-6)
5558

5659

60+
@pytest.mark.cfg
5761
def test_adafactor_epsilon():
5862
adafactor = load_optimizer('adafactor')
5963

@@ -64,6 +68,7 @@ def test_adafactor_epsilon():
6468
adafactor(None, eps2=-1e-6)
6569

6670

71+
@pytest.mark.cfg
6772
@pytest.mark.parametrize('optimizer_name', VALID_OPTIMIZER_NAMES)
6873
def test_weight_decay(optimizer_name):
6974
if optimizer_name in ('nero', 'alig'):
@@ -81,6 +86,7 @@ def test_weight_decay(optimizer_name):
8186
assert str(error_info.value) == '[-] weight_decay -0.001 must be non-negative'
8287

8388

89+
@pytest.mark.cfg
8490
@pytest.mark.parametrize('optimizer_name', ['apollo'])
8591
def test_weight_decay_type(optimizer_name):
8692
optimizer = load_optimizer(optimizer_name)
@@ -89,6 +95,7 @@ def test_weight_decay_type(optimizer_name):
8995
optimizer(None, weight_decay_type='dummy')
9096

9197

98+
@pytest.mark.cfg
9299
@pytest.mark.parametrize('optimizer_name', ['apollo'])
93100
def test_rebound(optimizer_name):
94101
optimizer = load_optimizer(optimizer_name)
@@ -97,48 +104,55 @@ def test_rebound(optimizer_name):
97104
optimizer(None, rebound='dummy')
98105

99106

107+
@pytest.mark.cfg
100108
@pytest.mark.parametrize('optimizer_name', ['adamp', 'sgdp'])
101109
def test_wd_ratio(optimizer_name):
102110
optimizer = load_optimizer(optimizer_name)
103111
with pytest.raises(ValueError):
104112
optimizer(None, wd_ratio=-1e-3)
105113

106114

115+
@pytest.mark.cfg
107116
@pytest.mark.parametrize('optimizer_name', ['lars'])
108117
def test_trust_coefficient(optimizer_name):
109118
optimizer = load_optimizer(optimizer_name)
110119
with pytest.raises(ValueError):
111120
optimizer(None, trust_coefficient=-1e-3)
112121

113122

123+
@pytest.mark.cfg
114124
@pytest.mark.parametrize('optimizer_name', ['madgrad', 'lars'])
115125
def test_momentum(optimizer_name):
116126
optimizer = load_optimizer(optimizer_name)
117127
with pytest.raises(ValueError):
118128
optimizer(None, momentum=-1e-3)
119129

120130

131+
@pytest.mark.cfg
121132
@pytest.mark.parametrize('optimizer_name', ['ranger'])
122133
def test_lookahead_k(optimizer_name):
123134
optimizer = load_optimizer(optimizer_name)
124135
with pytest.raises(ValueError):
125136
optimizer(None, k=-1)
126137

127138

139+
@pytest.mark.cfg
128140
@pytest.mark.parametrize('optimizer_name', ['ranger21'])
129141
def test_beta0(optimizer_name):
130142
optimizer = load_optimizer(optimizer_name)
131143
with pytest.raises(ValueError):
132144
optimizer(None, num_iterations=200, beta0=-0.1)
133145

134146

147+
@pytest.mark.cfg
135148
@pytest.mark.parametrize('optimizer_name', ['nero', 'apollo'])
136149
def test_beta(optimizer_name):
137150
optimizer = load_optimizer(optimizer_name)
138151
with pytest.raises(ValueError):
139152
optimizer(None, beta=-0.1)
140153

141154

155+
@pytest.mark.cfg
142156
@pytest.mark.parametrize('optimizer_name', BETA_OPTIMIZER_NAMES)
143157
def test_betas(optimizer_name):
144158
optimizer = load_optimizer(optimizer_name)
@@ -160,6 +174,7 @@ def test_betas(optimizer_name):
160174
optimizer(None, betas=(0.1, 0.1, -0.1))
161175

162176

177+
@pytest.mark.cfg
163178
def test_reduction():
164179
parameters = Example().parameters()
165180
optimizer = load_optimizer('adamp')(parameters)
@@ -168,6 +183,7 @@ def test_reduction():
168183
PCGrad(optimizer, reduction='wrong')
169184

170185

186+
@pytest.mark.cfg
171187
@pytest.mark.parametrize('optimizer_name', ['scalableshampoo', 'shampoo'])
172188
def test_update_frequency(optimizer_name):
173189
optimizer = load_optimizer(optimizer_name)
@@ -183,18 +199,21 @@ def test_update_frequency(optimizer_name):
183199
optimizer(None, preconditioning_compute_steps=-1)
184200

185201

202+
@pytest.mark.cfg
186203
@pytest.mark.parametrize('optimizer_name', ['adan', 'lamb'])
187204
def test_norm(optimizer_name):
188205
optimizer = load_optimizer(optimizer_name)
189206
with pytest.raises(ValueError):
190207
optimizer(None, max_grad_norm=-0.1)
191208

192209

210+
@pytest.mark.cfg
193211
def test_sam_parameters():
194212
with pytest.raises(ValueError, match=''):
195213
SAM(None, load_optimizer('adamp'), rho=-0.1)
196214

197215

216+
@pytest.mark.cfg
198217
def test_lookahead_parameters():
199218
param = simple_parameter()
200219
optimizer = load_optimizer('adamp')([param])
@@ -216,6 +235,7 @@ def test_lookahead_parameters():
216235
Lookahead(optimizer, pullback_momentum='invalid')
217236

218237

238+
@pytest.mark.cfg
219239
def test_sam_methods():
220240
param = simple_parameter()
221241

@@ -224,6 +244,7 @@ def test_sam_methods():
224244
optimizer.load_state_dict(optimizer.state_dict())
225245

226246

247+
@pytest.mark.cfg
227248
def test_safe_fp16_methods():
228249
param = simple_parameter()
229250

@@ -244,12 +265,14 @@ def test_safe_fp16_methods():
244265
assert optimizer.loss_scale == 2.0 ** (15 - 1)
245266

246267

268+
@pytest.mark.cfg
247269
def test_ranger21_warm_methods():
248270
assert Ranger21.build_warm_up_iterations(1000, 0.999) == 220
249271
assert Ranger21.build_warm_up_iterations(4500, 0.999) == 2000
250272
assert Ranger21.build_warm_down_iterations(1000) == 280
251273

252274

275+
@pytest.mark.cfg
253276
@pytest.mark.parametrize('optimizer', ['ranger21', 'adai'])
254277
def test_size_of_parameter(optimizer):
255278
param = simple_parameter(require_grad=False)
@@ -259,6 +282,7 @@ def test_size_of_parameter(optimizer):
259282
load_optimizer(optimizer)([param], 1).step()
260283

261284

285+
@pytest.mark.cfg
262286
def test_ranger21_closure():
263287
model: nn.Module = Example()
264288
optimizer = load_optimizer('ranger21')(model.parameters(), num_iterations=100, betas=(0.9, 1e-9))
@@ -273,6 +297,7 @@ def closure():
273297
optimizer.step(closure)
274298

275299

300+
@pytest.mark.cfg
276301
def test_adafactor_reset():
277302
param = torch.zeros(1).requires_grad_(True)
278303
param.grad = torch.zeros(1)
@@ -281,6 +306,7 @@ def test_adafactor_reset():
281306
optimizer.reset()
282307

283308

309+
@pytest.mark.cfg
284310
def test_adafactor_get_lr():
285311
model: nn.Module = Example()
286312

0 commit comments

Comments
 (0)