|
3 | 3 | import math |
4 | 4 | from typing_extensions import override |
5 | 5 | from comfy_api.latest import ComfyExtension, io |
6 | | - |
| 6 | +import comfy.model_management |
| 7 | +import torch |
| 8 | +import nodes |
7 | 9 |
|
8 | 10 | class TextEncodeQwenImageEdit(io.ComfyNode): |
9 | 11 | @classmethod |
@@ -104,12 +106,37 @@ def execute(cls, clip, prompt, vae=None, image1=None, image2=None, image3=None) |
104 | 106 | return io.NodeOutput(conditioning) |
105 | 107 |
|
106 | 108 |
|
| 109 | +class EmptyQwenImageLayeredLatentImage(io.ComfyNode): |
| 110 | + @classmethod |
| 111 | + def define_schema(cls): |
| 112 | + return io.Schema( |
| 113 | + node_id="EmptyQwenImageLayeredLatentImage", |
| 114 | + display_name="Empty Qwen Image Layered Latent", |
| 115 | + category="latent/qwen", |
| 116 | + inputs=[ |
| 117 | + io.Int.Input("width", default=640, min=16, max=nodes.MAX_RESOLUTION, step=16), |
| 118 | + io.Int.Input("height", default=640, min=16, max=nodes.MAX_RESOLUTION, step=16), |
| 119 | + io.Int.Input("layers", default=3, min=0, max=nodes.MAX_RESOLUTION, step=1), |
| 120 | + io.Int.Input("batch_size", default=1, min=1, max=4096), |
| 121 | + ], |
| 122 | + outputs=[ |
| 123 | + io.Latent.Output(), |
| 124 | + ], |
| 125 | + ) |
| 126 | + |
| 127 | + @classmethod |
| 128 | + def execute(cls, width, height, layers, batch_size=1) -> io.NodeOutput: |
| 129 | + latent = torch.zeros([batch_size, 16, layers + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) |
| 130 | + return io.NodeOutput({"samples": latent}) |
| 131 | + |
| 132 | + |
107 | 133 | class QwenExtension(ComfyExtension): |
108 | 134 | @override |
109 | 135 | async def get_node_list(self) -> list[type[io.ComfyNode]]: |
110 | 136 | return [ |
111 | 137 | TextEncodeQwenImageEdit, |
112 | 138 | TextEncodeQwenImageEditPlus, |
| 139 | + EmptyQwenImageLayeredLatentImage, |
113 | 140 | ] |
114 | 141 |
|
115 | 142 |
|
|
0 commit comments