|  | 
| 71 | 71 |         >>> import torch | 
| 72 | 72 |         >>> from diffusers import AniMemoryPipeline | 
| 73 | 73 | 
 | 
| 74 |  | -        >>> pipe = AniMemoryPipeline.from_pretrained( | 
| 75 |  | -        ...     "animEEEmpire/AniMemory-alpha", torch_dtype=torch.bfloat16 | 
| 76 |  | -        ... ) | 
|  | 74 | +        >>> pipe = AniMemoryPipeline.from_pretrained("animEEEmpire/AniMemory-alpha", torch_dtype=torch.bfloat16) | 
| 77 | 75 |         >>> pipe = pipe.to("cuda") | 
| 78 | 76 | 
 | 
| 79 |  | -        >>> prompt = '一只凶恶的狼,猩红的眼神,在午夜咆哮,月光皎洁' | 
| 80 |  | -        >>> negative_prompt = 'nsfw, worst quality, low quality, normal quality, low resolution, monochrome, blurry, wrong, Mutated hands and fingers, text, ugly faces, twisted, jpeg artifacts, watermark, low contrast, realistic' | 
|  | 77 | +        >>> prompt = "一只凶恶的狼,猩红的眼神,在午夜咆哮,月光皎洁" | 
|  | 78 | +        >>> negative_prompt = "nsfw, worst quality, low quality, normal quality, low resolution, monochrome, blurry, wrong, Mutated hands and fingers, text, ugly faces, twisted, jpeg artifacts, watermark, low contrast, realistic" | 
| 81 | 79 |         >>> image = pipe( | 
| 82 | 80 |         ...     prompt=prompt, | 
| 83 | 81 |         ...     negative_prompt=negative_prompt, | 
| 84 | 82 |         ...     num_inference_steps=40, | 
| 85 |  | -        ...     height=1024, width=1024, | 
| 86 |  | -        ...     guidance_scale=6.0 | 
|  | 83 | +        ...     height=1024, | 
|  | 84 | +        ...     width=1024, | 
|  | 85 | +        ...     guidance_scale=6.0, | 
| 87 | 86 |         ... ).images[0] | 
| 88 | 87 |         >>> image.save("output.png") | 
| 89 | 88 |         ``` | 
| @@ -359,11 +358,14 @@ class AniMemoryPipeline( | 
| 359 | 358 | 
 | 
| 360 | 359 |     Args: | 
| 361 | 360 |         vae ([`MoVQ`]): | 
| 362 |  | -            Variational Auto-Encoder (VAE) Model. AniMemory uses [MoVQ](https://github.com/ai-forever/Kandinsky-3/blob/main/kandinsky3/movq.py) | 
|  | 361 | +            Variational Auto-Encoder (VAE) Model. AniMemory uses | 
|  | 362 | +            [MoVQ](https://github.com/ai-forever/Kandinsky-3/blob/main/kandinsky3/movq.py) | 
| 363 | 363 |         text_encoder ([`AniMemoryT5`]): | 
| 364 |  | -            Frozen text-encoder. AniMemory builds based on [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel). | 
|  | 364 | +            Frozen text-encoder. AniMemory builds based on | 
|  | 365 | +            [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel). | 
| 365 | 366 |         text_encoder_2 ([`AniMemoryAltCLip`]): | 
| 366 |  | -            Second frozen text-encoder. AniMemory builds based on [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection). | 
|  | 367 | +            Second frozen text-encoder. AniMemory builds based on | 
|  | 368 | +            [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection). | 
| 367 | 369 |         tokenizer (`XLMRobertaTokenizerFast`): | 
| 368 | 370 |             Tokenizer of class | 
| 369 | 371 |             [XLMRobertaTokenizerFast](https://huggingface.co/docs/transformers/v4.46.3/en/model_doc/xlm-roberta#transformers.XLMRobertaTokenizerFast). | 
|  | 
0 commit comments