diff --git a/docs.json b/docs.json index c8e8e0fc..c1d08caa 100644 --- a/docs.json +++ b/docs.json @@ -55,7 +55,8 @@ "development/core-concepts/properties", "development/core-concepts/links", "development/core-concepts/models", - "development/core-concepts/dependencies" ] + "development/core-concepts/dependencies" + ] }, { "group": "Interface Guide", @@ -135,9 +136,7 @@ }, { "group": "OmniGen", - "pages": [ - "tutorials/image/omnigen/omnigen2" - ] + "pages": ["tutorials/image/omnigen/omnigen2"] } ] }, @@ -154,13 +153,18 @@ "group": "Wan Video", "pages": [ "tutorials/video/wan/wan2_2", - "tutorials/video/wan/wan-video", - "tutorials/video/wan/vace", - "tutorials/video/wan/wan-ati", - "tutorials/video/wan/fun-control", - "tutorials/video/wan/fun-camera", - "tutorials/video/wan/fun-inp", - "tutorials/video/wan/wan-flf" + { + "group": "Wan2.1", + "pages": [ + "tutorials/video/wan/wan-video", + "tutorials/video/wan/vace", + "tutorials/video/wan/wan-ati", + "tutorials/video/wan/fun-control", + "tutorials/video/wan/fun-camera", + "tutorials/video/wan/fun-inp", + "tutorials/video/wan/wan-flf" + ] + } ] }, { @@ -330,9 +334,7 @@ }, { "group": "3D", - "pages": [ - "built-in-nodes/Load3D" - ] + "pages": ["built-in-nodes/Load3D"] }, { "group": "API Node", @@ -602,7 +604,8 @@ "zh-CN/development/core-concepts/properties", "zh-CN/development/core-concepts/links", "zh-CN/development/core-concepts/models", - "zh-CN/development/core-concepts/dependencies" ] + "zh-CN/development/core-concepts/dependencies" + ] }, { "group": "界面指南", @@ -684,9 +687,7 @@ }, { "group": "OmniGen", - "pages": [ - "zh-CN/tutorials/image/omnigen/omnigen2" - ] + "pages": ["zh-CN/tutorials/image/omnigen/omnigen2"] } ] }, @@ -703,13 +704,18 @@ "group": "万相视频", "pages": [ "zh-CN/tutorials/video/wan/wan2_2", - "zh-CN/tutorials/video/wan/wan-video", - "zh-CN/tutorials/video/wan/vace", - "zh-CN/tutorials/video/wan/wan-ati", - "zh-CN/tutorials/video/wan/fun-control", - "zh-CN/tutorials/video/wan/fun-camera", - "zh-CN/tutorials/video/wan/fun-inp", - "zh-CN/tutorials/video/wan/wan-flf" + { + "group": "Wan2.1", + "pages": [ + "zh-CN/tutorials/video/wan/wan-video", + "zh-CN/tutorials/video/wan/vace", + "zh-CN/tutorials/video/wan/wan-ati", + "zh-CN/tutorials/video/wan/fun-control", + "zh-CN/tutorials/video/wan/fun-camera", + "zh-CN/tutorials/video/wan/fun-inp", + "zh-CN/tutorials/video/wan/wan-flf" + ] + } ] }, { @@ -885,9 +891,7 @@ }, { "group": "3D", - "pages": [ - "zh-CN/built-in-nodes/Load3D" - ] + "pages": ["zh-CN/built-in-nodes/Load3D"] }, { "group": "API 节点", diff --git a/images/tutorial/video/wan/wan2_2/wan_2.2_14b_flf2v.jpg b/images/tutorial/video/wan/wan2_2/wan_2.2_14b_flf2v.jpg new file mode 100644 index 00000000..3284f0cc Binary files /dev/null and b/images/tutorial/video/wan/wan2_2/wan_2.2_14b_flf2v.jpg differ diff --git a/tutorials/video/wan/vace.mdx b/tutorials/video/wan/vace.mdx index 37e03583..61961942 100644 --- a/tutorials/video/wan/vace.mdx +++ b/tutorials/video/wan/vace.mdx @@ -1,7 +1,7 @@ --- title: "ComfyUI Wan2.1 VACE Video Examples" description: "This article introduces how to complete Wan VACE video generation examples in ComfyUI" -sidebarTitle: "Wan VACE" +sidebarTitle: "Wan2.1 VACE" --- import CancelBypass from '/snippets/interface/cancel-bypass.mdx' diff --git a/tutorials/video/wan/wan-flf.mdx b/tutorials/video/wan/wan-flf.mdx index fd3ebc0f..0d368ffa 100644 --- a/tutorials/video/wan/wan-flf.mdx +++ b/tutorials/video/wan/wan-flf.mdx @@ -1,7 +1,7 @@ --- title: "ComfyUI Wan2.1 FLF2V Native Example" description: "This guide explains how to complete Wan2.1 FLF2V video generation examples in ComfyUI" -sidebarTitle: "First-Last Frame" +sidebarTitle: "Wan2.1 FLF2V" --- import UpdateReminder from "/snippets/tutorials/update-reminder.mdx"; diff --git a/tutorials/video/wan/wan2_2.mdx b/tutorials/video/wan/wan2_2.mdx index eb28058d..3fe1c7fb 100644 --- a/tutorials/video/wan/wan2_2.mdx +++ b/tutorials/video/wan/wan2_2.mdx @@ -19,6 +19,8 @@ Wan 2.2 is a new generation multimodal generative model launched by WAN AI. This Wan 2.2 has three core features: cinematic-level aesthetic control, deeply integrating professional film industry aesthetic standards, supporting multi-dimensional visual control such as lighting, color, and composition; large-scale complex motion, easily restoring various complex motions and enhancing the smoothness and controllability of motion; precise semantic compliance, excelling in complex scenes and multi-object generation, better restoring users' creative intentions. The model supports multiple generation modes such as text-to-video and image-to-video, suitable for content creation, artistic creation, education and training, and other application scenarios. +[Wan2.2 Prompt Guide](https://alidocs.dingtalk.com/i/nodes/EpGBa2Lm8aZxe5myC99MelA2WgN7R35y) + ## Model Highlights - **Cinematic-level Aesthetic Control**: Professional camera language, supports multi-dimensional visual control such as lighting, color, and composition @@ -56,6 +58,15 @@ For ComfyUI Wan2.2 usage, we have conducted live streams, which you can view to allowFullScreen > + + + This tutorial will use the [🤗 Comfy-Org/Wan_2.2_ComfyUI_Repackaged](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged) version. @@ -104,7 +115,7 @@ ComfyUI/ │ └── wan2.2_vae.safetensors ``` -### 3. Follow the Workflow Steps +### 3. Follow the Steps ![Step Diagram](/images/tutorial/video/wan/wan2_2/wan_2.2_5b_t2v.jpg) 1. Ensure the `Load Diffusion Model` node loads the `wan2.2_ti2v_5B_fp16.safetensors` model. @@ -158,7 +169,7 @@ ComfyUI/ │ └── wan_2.1_vae.safetensors ``` -### 3. Follow the Workflow Steps +### 3. Follow the Steps ![Step Diagram](/images/tutorial/video/wan/wan2_2/wan_2.2_14b_t2v.jpg) 1. Ensure the first `Load Diffusion Model` node loads the `wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors` model. @@ -211,7 +222,7 @@ ComfyUI/ │ └───📂 vae/ │ └── wan_2.1_vae.safetensors ``` -### 3. Follow the Workflow Steps +### 3. Follow the Steps ![Step Diagram](/images/tutorial/video/wan/wan2_2/wan_2.2_14b_i2v.jpg) 1. Make sure the first `Load Diffusion Model` node loads the `wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors` model. @@ -223,6 +234,40 @@ ComfyUI/ 7. (Optional) In `EmptyHunyuanLatentVideo`, you can adjust the size settings and the total number of video frames (`length`). 8. Click the `Run` button, or use the shortcut `Ctrl(cmd) + Enter` to execute video generation. +## Wan2.2 14B FLF2V Workflow Example + +The first and last frame workflow uses the same model locations as the I2V section. + +### 1. Workflow and Input Material Preparation + +Download the video or the JSON workflow below and open it in ComfyUI. + + + +

Download JSON Workflow

+
+ +Download the following images as input materials: + +![Input Material](https://raw.githubusercontent.com/Comfy-Org/example_workflows/refs/heads/main/video/wan/2.2/wan22_14B_flf2v_start_image.png) +![Input Material](https://raw.githubusercontent.com/Comfy-Org/example_workflows/refs/heads/main/video/wan/2.2/wan22_14B_flf2v_end_image.png) + +### 2. Follow the Steps + +![Step Diagram](/images/tutorial/video/wan/wan2_2/wan_2.2_14b_flf2v.jpg) + +1. Upload the image to be used as the starting frame in the first `Load Image` node. +2. Upload the image to be used as the ending frame in the second `Load Image` node. +3. Adjust the size settings in the `WanFirstLastFrameToVideo` node. + - By default, a relatively small size is set to prevent low VRAM users from consuming too many resources. + - If you have enough VRAM, you can try a resolution around 720P. +4. Write appropriate prompts according to your first and last frames. +5. Click the `Run` button, or use the shortcut `Ctrl(cmd) + Enter` to execute video generation. + ## Community Resources ### GGUF Versions @@ -241,4 +286,6 @@ ComfyUI/ [Kijai/WanVideo_comfy_fp8_scaled](https://hf-mirror.com/Kijai/WanVideo_comfy_fp8_scaled) **Wan2.1 models** -[Kijai/WanVideo_comfy](https://huggingface.co/Kijai/WanVideo_comfy) \ No newline at end of file +[Kijai/WanVideo_comfy/Lightx2v](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Lightx2v) + +> Use the Lightx2v LoRA to achieve 8-step video generation acceleration \ No newline at end of file diff --git a/zh-CN/tutorials/video/wan/wan-ati.mdx b/zh-CN/tutorials/video/wan/wan-ati.mdx index 8518a10f..14c394a3 100644 --- a/zh-CN/tutorials/video/wan/wan-ati.mdx +++ b/zh-CN/tutorials/video/wan/wan-ati.mdx @@ -1,7 +1,7 @@ --- title: "Wan ATI ComfyUI 原生工作流教程" description: "使用轨迹控制视频生成。" -sidebarTitle: "WAN ATI" +sidebarTitle: "Wan2.1 ATI" --- import UpdateReminder from '/snippets/zh/tutorials/update-reminder.mdx' diff --git a/zh-CN/tutorials/video/wan/wan-video.mdx b/zh-CN/tutorials/video/wan/wan-video.mdx index 1fb79500..c5326951 100644 --- a/zh-CN/tutorials/video/wan/wan-video.mdx +++ b/zh-CN/tutorials/video/wan/wan-video.mdx @@ -1,7 +1,7 @@ --- title: ComfyUI Wan2.1 Video 示例 description: "本文介绍了如何在 ComfyUI 中完成 Wan2.1 Video 视频首尾帧视频生成示例" -sidebarTitle: Wan Video +sidebarTitle: Wan2.1 --- import UpdateReminder from '/snippets/zh/tutorials/update-reminder.mdx' diff --git a/zh-CN/tutorials/video/wan/wan2_2.mdx b/zh-CN/tutorials/video/wan/wan2_2.mdx index 0d47922c..208137a8 100644 --- a/zh-CN/tutorials/video/wan/wan2_2.mdx +++ b/zh-CN/tutorials/video/wan/wan2_2.mdx @@ -19,6 +19,9 @@ import UpdateReminder from '/snippets/zh/tutorials/update-reminder.mdx' Wan 2.2 具备三大核心特性:影视级美学控制,深度融合专业电影工业的美学标准,支持光影、色彩、构图等多维度视觉控制;大规模复杂运动,轻松还原各类复杂运动并强化运动的流畅度和可控性;精准语义遵循,在复杂场景和多对象生成方面表现卓越,更好还原用户的创意意图。 模型支持文生视频、图生视频等多种生成模式,适用于内容创作、艺术创作、教育培训等多种应用场景。 + +[Wan2.2 提示词指南](https://alidocs.dingtalk.com/i/nodes/jb9Y4gmKWrx9eo4dCql9LlbYJGXn6lpz) + ## 模型亮点 - **影视级美学控制**:专业镜头语言,支持光影、色彩、构图等多维度视觉控制 @@ -51,7 +54,15 @@ Wan2.2 系列模型基于 Apache2.0 开源协议,支持商业使用。Apache2. + + @@ -224,6 +235,40 @@ ComfyUI/ 7. 可选)在`EmptyHunyuanLatentVideo` 你可以进行尺寸的设置调整,和视频总帧数 `length` 调整 8. 点击 `Run` 按钮,或者使用快捷键 `Ctrl(cmd) + Enter(回车)` 来执行视频生成 +## Wan2.2 14B FLF2V 首尾帧视频生成工作流示例 + +首尾帧工作流使用模型位置与 I2V 部分完全一致 + +### 1. 工作流及素材生成 + +下载下面的视频或者 JSON 格式工作流在 ComfyUI 中打开 + + + +

下载 JSON 格式工作流

+
+ +下载下面的素材作为输入 + +![输入素材](/images/tutorial/video/wan/wan2_2/wan22_14B_flf2v_start_image.png) +![输入素材](/images/tutorial/video/wan/wan2_2/wan22_14B_flf2v_end_image.png) + +### 2. 按步骤完成工作流 + +![步骤图](/images/tutorial/video/wan/wan2_2/wan_2.2_14b_flf2v.jpg) + +1. 在第一个 `Load Image` 节点上传作为起始帧的图像 +2. 在第二个 `Load Image` 节点上传作为起始帧的图像 +3. 在 `WanFirstLastFrameToVideo` 上修改尺寸设置 + - 我们默认设置了一个比较小的尺寸,防止低显存用户运行占用过多资源 + - 如果你有足够的显存,可以尝试 720P 左右尺寸 +4. 根据你的首尾帧撰写合适的提示词 +5. 点击 `Run` 按钮,或者使用快捷键 `Ctrl(cmd) + Enter(回车)` 来执行视频生成 + ## 社区资源 ### GGUF 版本 @@ -242,4 +287,6 @@ ComfyUI/ [Kijai/WanVideo_comfy_fp8_scaled](https://hf-mirror.com/Kijai/WanVideo_comfy_fp8_scaled) **Wan2.1 models** -[Kijai/WanVideo_comfy](https://huggingface.co/Kijai/WanVideo_comfy) \ No newline at end of file +[Kijai/WanVideo_comfy/Lightx2v](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Lightx2v) + +> 使用 lightx2v LoRA 实现 8 步加速视频生成 \ No newline at end of file