Skip to content

Commit b21a753

Browse files
committed
Update test_models_transformer_ltx.py
1 parent fb29132 commit b21a753

File tree

1 file changed

+24
-1
lines changed

1 file changed

+24
-1
lines changed

tests/models/transformers/test_models_transformer_ltx.py

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,14 @@
1818
import torch
1919

2020
from diffusers import LTXVideoTransformer3DModel
21-
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
21+
from diffusers.utils.testing_utils import (
22+
enable_full_determinism,
23+
is_torch_compile,
24+
require_torch_2,
25+
require_torch_gpu,
26+
slow,
27+
torch_device,
28+
)
2229

2330
from ..test_modeling_common import ModelTesterMixin
2431

@@ -81,3 +88,19 @@ def prepare_init_args_and_inputs_for_common(self):
8188
def test_gradient_checkpointing_is_applied(self):
8289
expected_set = {"LTXVideoTransformer3DModel"}
8390
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
91+
92+
@require_torch_gpu
93+
@require_torch_2
94+
@is_torch_compile
95+
@slow
96+
def test_torch_compile_recompilation_and_graph_break(self):
97+
torch._dynamo.reset()
98+
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
99+
100+
model = self.model_class(**init_dict).to(torch_device)
101+
model.eval()
102+
model = torch.compile(model, fullgraph=True)
103+
104+
with torch._dynamo.config.patch(error_on_recompile=True), torch.no_grad():
105+
_ = model(**inputs_dict)
106+
_ = model(**inputs_dict)

0 commit comments

Comments
 (0)