Skip to content

Commit baf6f35

Browse files
committed
fix
Signed-off-by: Hemil Desai <[email protected]>
1 parent 05f65ca commit baf6f35

File tree

1 file changed

+4
-9
lines changed

1 file changed

+4
-9
lines changed

tests/unit_tests/_peft/test_lora_moe.py

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,10 @@
1616
import torch
1717
import torch.nn as nn
1818
from unittest.mock import MagicMock, patch
19+
try:
20+
import grouped_gemm
21+
except ImportError:
22+
grouped_gemm = None
1923

2024
from nemo_automodel.components.moe.layers import GroupedExperts, GroupedExpertsDeepEP, MoEConfig
2125
from nemo_automodel.components._peft.lora_moe import GroupedExpertsLoRA, GroupedExpertsDeepEPLoRA
@@ -156,15 +160,6 @@ def test_grouped_experts_deepep_lora_init(moe_config, device):
156160
assert lora_experts.lora_gate_and_up_B.requires_grad
157161

158162

159-
try:
160-
import grouped_gemm
161-
except ImportError:
162-
grouped_gemm = None
163-
164-
165-
166-
167-
168163
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA required")
169164
def test_patch_moe_module(moe_config, device):
170165
"""Test that patch_moe_module correctly wraps the original experts with the appropriate LoRA class."""

0 commit comments

Comments
 (0)