@@ -31,26 +31,38 @@ import torch
3131from  diffusers import  AutoencoderKLLTXVideo, LTXImageToVideoPipeline, LTXVideoTransformer3DModel
3232
3333single_file_url =  " https://huggingface.co/Lightricks/LTX-Video/ltx-video-2b-v0.9.safetensors" 
34- transformer =  LTXVideoTransformer3DModel.from_single_file(single_file_url, torch_dtype = torch.bfloat16)
34+ transformer =  LTXVideoTransformer3DModel.from_single_file(
35+   single_file_url, torch_dtype = torch.bfloat16
36+ )
3537vae =  AutoencoderKLLTXVideo.from_single_file(single_file_url, torch_dtype = torch.bfloat16)
36- pipe =  LTXImageToVideoPipeline.from_pretrained(" Lightricks/LTX-Video" transformer = transformer, vae = vae, torch_dtype = torch.bfloat16)
38+ pipe =  LTXImageToVideoPipeline.from_pretrained(
39+   " Lightricks/LTX-Video" transformer = transformer, vae = vae, torch_dtype = torch.bfloat16
40+ )
3741
3842#  ... inference code ...
3943``` 
4044
41- Alternatively, the pipeline can be used to load the weights with [ ~ FromSingleFileMixin.from_single_file`] .
45+ Alternatively, the pipeline can be used to load the weights with [ ` ~FromSingleFileMixin.from_single_file ` ] .
4246
4347``` python 
4448import  torch
4549from  diffusers import  LTXImageToVideoPipeline
4650from  transformers import  T5EncoderModel, T5Tokenizer
4751
4852single_file_url =  " https://huggingface.co/Lightricks/LTX-Video/ltx-video-2b-v0.9.safetensors" 
49- text_encoder =  T5EncoderModel.from_pretrained(" Lightricks/LTX-Video" subfolder = " text_encoder" torch_dtype = torch.bfloat16)
50- tokenizer =  T5Tokenizer.from_pretrained(" Lightricks/LTX-Video" subfolder = " tokenizer" torch_dtype = torch.bfloat16)
51- pipe =  LTXImageToVideoPipeline.from_single_file(single_file_url, text_encoder = text_encoder, tokenizer = tokenizer, torch_dtype = torch.bfloat16)
53+ text_encoder =  T5EncoderModel.from_pretrained(
54+   " Lightricks/LTX-Video" subfolder = " text_encoder" torch_dtype = torch.bfloat16
55+ )
56+ tokenizer =  T5Tokenizer.from_pretrained(
57+   " Lightricks/LTX-Video" subfolder = " tokenizer" torch_dtype = torch.bfloat16
58+ )
59+ pipe =  LTXImageToVideoPipeline.from_single_file(
60+   single_file_url, text_encoder = text_encoder, tokenizer = tokenizer, torch_dtype = torch.bfloat16
61+ )
5262``` 
5363
64+ Refer to [ this section] ( https://huggingface.co/docs/diffusers/main/en/api/pipelines/cogvideox#memory-optimization )  to learn more about optimizing memory consumption.
65+ 
5466## LTXPipeline  
5567
5668[[ autodoc]]  LTXPipeline
0 commit comments