We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ead9779 commit a0a0f26Copy full SHA for a0a0f26
tests/quantization/bnb/test_mixed_int8.py
@@ -97,6 +97,10 @@ class Base8bitTests(unittest.TestCase):
97
num_inference_steps = 10
98
seed = 0
99
100
+ @classmethod
101
+ def setUpClass(cls):
102
+ torch.use_deterministic_algorithms(True)
103
+
104
def get_dummy_inputs(self):
105
prompt_embeds = load_pt(
106
"https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/prompt_embeds.pt",
@@ -485,7 +489,6 @@ def test_generate_quality_dequantize(self):
485
489
r"""
486
490
Test that loading the model and unquantize it produce correct results.
487
491
"""
488
- torch.use_deterministic_algorithms(True)
492
self.pipeline_8bit.transformer.dequantize()
493
output = self.pipeline_8bit(
494
prompt=self.prompt,
0 commit comments