Skip to content

Commit 1cdbb55

Browse files
committed
make style
1 parent d4718fd commit 1cdbb55

File tree

2 files changed

+7
-6
lines changed

2 files changed

+7
-6
lines changed

examples/research_projects/anytext/frozen_clip_embedder_t3.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,8 +110,9 @@ def text_encoder_forward(
110110
)
111111
# CLIP's text model uses causal mask, prepare it here.
112112
# https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
113-
causal_attention_mask = _create_4d_causal_attention_mask(input_shape, hidden_states.dtype,
114-
device=hidden_states.device)
113+
causal_attention_mask = _create_4d_causal_attention_mask(
114+
input_shape, hidden_states.dtype, device=hidden_states.device
115+
)
115116
# expand attention_mask
116117
if attention_mask is not None:
117118
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]

examples/research_projects/anytext/recognizer.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -134,13 +134,13 @@ def get_image_file_list(img_file):
134134

135135
class TextRecognizer(object):
136136
def __init__(self, args, predictor):
137-
self.rec_image_shape = [int(v) for v in args['rec_image_shape'].split(",")]
138-
self.rec_batch_num = args['rec_batch_num']
137+
self.rec_image_shape = [int(v) for v in args["rec_image_shape"].split(",")]
138+
self.rec_batch_num = args["rec_batch_num"]
139139
self.predictor = predictor
140-
self.chars = self.get_char_dict(args['rec_char_dict_path'])
140+
self.chars = self.get_char_dict(args["rec_char_dict_path"])
141141
self.char2id = {x: i for i, x in enumerate(self.chars)}
142142
self.is_onnx = not isinstance(self.predictor, torch.nn.Module)
143-
self.use_fp16 = args['use_fp16']
143+
self.use_fp16 = args["use_fp16"]
144144

145145
# img: CHW
146146
def resize_norm_img(self, img, max_wh_ratio):

0 commit comments

Comments
 (0)