Skip to content

Commit a7963ae

Browse files
authored
Reenable flaky tests (#110)
1 parent 83176cd commit a7963ae

File tree

1 file changed

+0
-3
lines changed

1 file changed

+0
-3
lines changed

test/integration/test_integration.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -902,7 +902,6 @@ def test_int8_dynamic_quant_subclass(self, device, dtype):
902902
)
903903

904904
@parameterized.expand(COMMON_DEVICE_DTYPE)
905-
@unittest.skip("flaky test, will fix in another PR")
906905
def test_int8_weight_only_quant_subclass(self, device, dtype):
907906
self._test_lin_weight_subclass_impl(
908907
Int8WeightOnlyQuantizedLinearWeight.from_float, device, 40, test_dtype=dtype
@@ -976,7 +975,6 @@ def test_int8_dynamic_quant_subclass_api(self, device, dtype):
976975
)
977976

978977
@parameterized.expand(COMMON_DEVICE_DTYPE)
979-
@unittest.skip("flaky test, will fix in another PR")
980978
def test_int8_weight_only_quant_subclass_api(self, device, dtype):
981979
self._test_lin_weight_subclass_api_impl(
982980
change_linear_weights_to_int8_woqtensors, device, 40, test_dtype=dtype
@@ -1157,7 +1155,6 @@ def test_save_load_dqtensors(self, device, dtype):
11571155

11581156
@parameterized.expand(COMMON_DEVICE_DTYPE)
11591157
@torch.no_grad()
1160-
@unittest.skip("flaky test, will fix in another PR")
11611158
def test_save_load_int8woqtensors(self, device, dtype):
11621159
self._test_handle_save_load_meta_impl(change_linear_weights_to_int8_woqtensors, device, test_dtype=dtype)
11631160

0 commit comments

Comments
 (0)