|
| 1 | +import torch |
| 2 | +from comfy.ldm.modules.attention import optimized_attention_for_device |
| 3 | + |
| 4 | +class CLIPAttention(torch.nn.Module): |
| 5 | + def __init__(self, embed_dim, heads, dtype, device, operations): |
| 6 | + super().__init__() |
| 7 | + |
| 8 | + self.heads = heads |
| 9 | + self.q_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) |
| 10 | + self.k_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) |
| 11 | + self.v_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) |
| 12 | + |
| 13 | + self.out_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) |
| 14 | + |
| 15 | + def forward(self, x, mask=None, optimized_attention=None): |
| 16 | + q = self.q_proj(x) |
| 17 | + k = self.k_proj(x) |
| 18 | + v = self.v_proj(x) |
| 19 | + |
| 20 | + out = optimized_attention(q, k, v, self.heads, mask) |
| 21 | + return self.out_proj(out) |
| 22 | + |
| 23 | +ACTIVATIONS = {"quick_gelu": lambda a: a * torch.sigmoid(1.702 * a), |
| 24 | + "gelu": torch.nn.functional.gelu, |
| 25 | +} |
| 26 | + |
| 27 | +class CLIPMLP(torch.nn.Module): |
| 28 | + def __init__(self, embed_dim, intermediate_size, activation, dtype, device, operations): |
| 29 | + super().__init__() |
| 30 | + self.fc1 = operations.Linear(embed_dim, intermediate_size, bias=True, dtype=dtype, device=device) |
| 31 | + self.activation = ACTIVATIONS[activation] |
| 32 | + self.fc2 = operations.Linear(intermediate_size, embed_dim, bias=True, dtype=dtype, device=device) |
| 33 | + |
| 34 | + def forward(self, x): |
| 35 | + x = self.fc1(x) |
| 36 | + x = self.activation(x) |
| 37 | + x = self.fc2(x) |
| 38 | + return x |
| 39 | + |
| 40 | +class CLIPLayer(torch.nn.Module): |
| 41 | + def __init__(self, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations): |
| 42 | + super().__init__() |
| 43 | + self.layer_norm1 = operations.LayerNorm(embed_dim, dtype=dtype, device=device) |
| 44 | + self.self_attn = CLIPAttention(embed_dim, heads, dtype, device, operations) |
| 45 | + self.layer_norm2 = operations.LayerNorm(embed_dim, dtype=dtype, device=device) |
| 46 | + self.mlp = CLIPMLP(embed_dim, intermediate_size, intermediate_activation, dtype, device, operations) |
| 47 | + |
| 48 | + def forward(self, x, mask=None, optimized_attention=None): |
| 49 | + x += self.self_attn(self.layer_norm1(x), mask, optimized_attention) |
| 50 | + x += self.mlp(self.layer_norm2(x)) |
| 51 | + return x |
| 52 | + |
| 53 | + |
| 54 | +class CLIPEncoder(torch.nn.Module): |
| 55 | + def __init__(self, num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations): |
| 56 | + super().__init__() |
| 57 | + self.layers = torch.nn.ModuleList([CLIPLayer(embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) for i in range(num_layers)]) |
| 58 | + |
| 59 | + def forward(self, x, mask=None, intermediate_output=None): |
| 60 | + optimized_attention = optimized_attention_for_device(x.device, mask=True) |
| 61 | + causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1) |
| 62 | + if mask is not None: |
| 63 | + mask += causal_mask |
| 64 | + else: |
| 65 | + mask = causal_mask |
| 66 | + |
| 67 | + if intermediate_output is not None: |
| 68 | + if intermediate_output < 0: |
| 69 | + intermediate_output = len(self.layers) + intermediate_output |
| 70 | + |
| 71 | + intermediate = None |
| 72 | + for i, l in enumerate(self.layers): |
| 73 | + x = l(x, mask, optimized_attention) |
| 74 | + if i == intermediate_output: |
| 75 | + intermediate = x.clone() |
| 76 | + return x, intermediate |
| 77 | + |
| 78 | +class CLIPEmbeddings(torch.nn.Module): |
| 79 | + def __init__(self, embed_dim, vocab_size=49408, num_positions=77, dtype=None, device=None): |
| 80 | + super().__init__() |
| 81 | + self.token_embedding = torch.nn.Embedding(vocab_size, embed_dim, dtype=dtype, device=device) |
| 82 | + self.position_embedding = torch.nn.Embedding(num_positions, embed_dim, dtype=dtype, device=device) |
| 83 | + |
| 84 | + def forward(self, input_tokens): |
| 85 | + return self.token_embedding(input_tokens) + self.position_embedding.weight |
| 86 | + |
| 87 | + |
| 88 | +class CLIPTextModel_(torch.nn.Module): |
| 89 | + def __init__(self, config_dict, dtype, device, operations): |
| 90 | + num_layers = config_dict["num_hidden_layers"] |
| 91 | + embed_dim = config_dict["hidden_size"] |
| 92 | + heads = config_dict["num_attention_heads"] |
| 93 | + intermediate_size = config_dict["intermediate_size"] |
| 94 | + intermediate_activation = config_dict["hidden_act"] |
| 95 | + |
| 96 | + super().__init__() |
| 97 | + self.embeddings = CLIPEmbeddings(embed_dim, dtype=torch.float32, device=device) |
| 98 | + self.encoder = CLIPEncoder(num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) |
| 99 | + self.final_layer_norm = operations.LayerNorm(embed_dim, dtype=dtype, device=device) |
| 100 | + |
| 101 | + def forward(self, input_tokens, attention_mask=None, intermediate_output=None, final_layer_norm_intermediate=True): |
| 102 | + x = self.embeddings(input_tokens) |
| 103 | + mask = None |
| 104 | + if attention_mask is not None: |
| 105 | + mask = 1.0 - attention_mask.to(x.dtype).unsqueeze(1).unsqueeze(1).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]) |
| 106 | + mask = mask.masked_fill(mask.to(torch.bool), float("-inf")) |
| 107 | + |
| 108 | + x, i = self.encoder(x, mask=mask, intermediate_output=intermediate_output) |
| 109 | + x = self.final_layer_norm(x) |
| 110 | + if i is not None and final_layer_norm_intermediate: |
| 111 | + i = self.final_layer_norm(i) |
| 112 | + |
| 113 | + pooled_output = x[torch.arange(x.shape[0], device=x.device), input_tokens.to(dtype=torch.int, device=x.device).argmax(dim=-1),] |
| 114 | + return x, i, pooled_output |
| 115 | + |
| 116 | +class CLIPTextModel(torch.nn.Module): |
| 117 | + def __init__(self, config_dict, dtype, device, operations): |
| 118 | + super().__init__() |
| 119 | + self.num_layers = config_dict["num_hidden_layers"] |
| 120 | + self.text_model = CLIPTextModel_(config_dict, dtype, device, operations) |
| 121 | + self.dtype = dtype |
| 122 | + |
| 123 | + def get_input_embeddings(self): |
| 124 | + return self.text_model.embeddings.token_embedding |
| 125 | + |
| 126 | + def set_input_embeddings(self, embeddings): |
| 127 | + self.text_model.embeddings.token_embedding = embeddings |
| 128 | + |
| 129 | + def forward(self, *args, **kwargs): |
| 130 | + return self.text_model(*args, **kwargs) |
0 commit comments