We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 178c22d commit a5e6c13Copy full SHA for a5e6c13
tests/lora/test_lora_layers_ltx_video.py
@@ -52,10 +52,19 @@ class LTXVideoLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
52
}
53
transformer_cls = LTXVideoTransformer3DModel
54
vae_kwargs = {
55
+ "in_channels": 3,
56
+ "out_channels": 3,
57
"latent_channels": 8,
58
"block_out_channels": (8, 8, 8, 8),
- "spatio_temporal_scaling": (True, True, False, False),
59
+ "decoder_block_out_channels": (8, 8, 8, 8),
60
"layers_per_block": (1, 1, 1, 1, 1),
61
+ "decoder_layers_per_block": (1, 1, 1, 1, 1),
62
+ "spatio_temporal_scaling": (True, True, False, False),
63
+ "decoder_spatio_temporal_scaling": (True, True, False, False),
64
+ "decoder_inject_noise": (False, False, False, False, False),
65
+ "upsample_residual": (False, False, False, False),
66
+ "upsample_factor": (1, 1, 1, 1),
67
+ "timestep_conditioning": False,
68
"patch_size": 1,
69
"patch_size_t": 1,
70
"encoder_causal": True,
0 commit comments