We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent dcc5cdd commit e06b850Copy full SHA for e06b850
llm2clip/eva_clip/model.py
@@ -1,4 +1,4 @@
1
-""" CLIP Model
+P;""" CLIP Model
2
3
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
"""
@@ -13,7 +13,7 @@
13
from torch import nn
14
15
try:
16
- from .hf_model import HFTextEncoder
+ from .hf_model import HFTextEncoder:
17
except:
18
HFTextEncoder = None
19
from .modified_resnet import ModifiedResNet
@@ -244,7 +244,7 @@ def set_grad_checkpointing(self, enable=True):
244
245
def forward(self, text, return_all_features: bool=False, l2_norm: bool=True):
246
if l2_norm:
247
- x = torch.nn.functional.normalize(x, p=2, dim=-1)
+ x = torch.nn.functional.normalize(text, p=2, dim=-1)
248
x = self.text_adaptor(text)
249
return x
250
0 commit comments