Skip to content

Commit ce7508b

Browse files
committed
add testing
1 parent eed3b69 commit ce7508b

File tree

1 file changed

+76
-0
lines changed

1 file changed

+76
-0
lines changed

tests/tests_pytorch/tuner/test_lr_finder.py

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -538,3 +538,79 @@ def training_step(self, batch: Any, batch_idx: int) -> STEP_OUTPUT:
538538
suggested_lr = lr_finder.suggestion()
539539
assert math.isfinite(suggested_lr)
540540
assert math.isclose(model.lr, suggested_lr)
541+
542+
543+
def test_gradient_correctness():
544+
"""Test that torch.gradient uses correct spacing parameter."""
545+
lr_finder = _LRFinder(mode="exponential", lr_min=1e-6, lr_max=1e-1, num_training=20)
546+
547+
# Synthetic example
548+
lrs = torch.linspace(0, 2 * math.pi, steps=1000)
549+
losses = torch.sin(lrs)
550+
lr_finder.results = {"lr": lrs.tolist(), "loss": losses.tolist()}
551+
552+
# Test the suggestion method
553+
suggestion = lr_finder.suggestion(skip_begin=2, skip_end=2)
554+
assert suggestion is not None
555+
assert abs(suggestion - math.pi) < 1e-2, "Suggestion should be close to pi for this synthetic example"
556+
557+
558+
def test_exponential_vs_linear_mode_gradient_difference(tmp_path):
559+
"""Test that exponential and linear modes produce different but valid suggestions.
560+
561+
This verifies that the spacing fix works for both modes and that they behave differently as expected due to their
562+
different lr progressions.
563+
564+
"""
565+
566+
class TestModel(BoringModel):
567+
def __init__(self):
568+
super().__init__()
569+
self.lr = 1e-3
570+
571+
seed_everything(42)
572+
573+
# Test both modes with identical parameters
574+
model_linear = TestModel()
575+
model_exp = TestModel()
576+
577+
trainer_linear = Trainer(default_root_dir=tmp_path, max_epochs=1)
578+
trainer_exp = Trainer(default_root_dir=tmp_path, max_epochs=1)
579+
580+
tuner_linear = Tuner(trainer_linear)
581+
tuner_exp = Tuner(trainer_exp)
582+
583+
lr_finder_linear = tuner_linear.lr_find(model_linear, min_lr=1e-6, max_lr=1e-1, num_training=50, mode="linear")
584+
lr_finder_exp = tuner_exp.lr_find(model_exp, min_lr=1e-6, max_lr=1e-1, num_training=50, mode="exponential")
585+
586+
# Both should produce valid suggestions
587+
suggestion_linear = lr_finder_linear.suggestion()
588+
suggestion_exp = lr_finder_exp.suggestion()
589+
590+
assert suggestion_linear is not None
591+
assert suggestion_exp is not None
592+
assert suggestion_linear > 0
593+
assert suggestion_exp > 0
594+
595+
# Verify that gradient computation uses correct spacing for both modes
596+
for lr_finder, mode in [(lr_finder_linear, "linear"), (lr_finder_exp, "exponential")]:
597+
losses = torch.tensor(lr_finder.results["loss"][10:-10])
598+
lrs = torch.tensor(lr_finder.results["lr"][10:-10])
599+
is_finite = torch.isfinite(losses)
600+
losses_filtered = losses[is_finite]
601+
lrs_filtered = lrs[is_finite]
602+
603+
if len(losses_filtered) >= 2:
604+
# Test that gradient computation works and produces finite results
605+
gradients = torch.gradient(losses_filtered, spacing=[lrs_filtered])[0]
606+
assert torch.isfinite(gradients).all(), f"Non-finite gradients in {mode} mode"
607+
assert len(gradients) == len(losses_filtered)
608+
609+
# Verify gradients with spacing differ from gradients without spacing
610+
gradients_no_spacing = torch.gradient(losses_filtered)[0]
611+
612+
# For exponential mode, these should definitely be different, for linear mode, they might be similar
613+
if mode == "exponential":
614+
assert not torch.allclose(gradients, gradients_no_spacing, rtol=0.1), (
615+
"Gradients should differ significantly in exponential mode when using proper spacing"
616+
)

0 commit comments

Comments
 (0)