We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ffbee8f commit da137abCopy full SHA for da137ab
tests/gpu/torch/export/test_fsdp2_export.py
@@ -27,8 +27,7 @@
27
_export_quantized_weight,
28
requantize_resmooth_fused_llm_layers,
29
)
30
-from modelopt.torch.quantization.qtensor.base_qtensor import fsdp2_aware_weight_update
31
-from modelopt.torch.quantization.utils import patch_fsdp_mp_dtypes
+from modelopt.torch.quantization.utils import fsdp2_aware_weight_update, patch_fsdp_mp_dtypes
32
33
orig_init_mp_dtypes = patch_fsdp_mp_dtypes()
34
0 commit comments