Skip to content

Commit 3ea49c1

Browse files
committed
[UPDATE] Remove commented-out code and unnecessary docstring in anytext.py and anytext_controlnet.py for improved clarity
1 parent 3b2435f commit 3ea49c1

File tree

2 files changed

+1
-17
lines changed

2 files changed

+1
-17
lines changed

examples/research_projects/anytext/anytext.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
import os
2626
import re
2727
import sys
28+
import unicodedata
2829
from functools import partial
2930
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
3031

@@ -324,12 +325,6 @@ def adjust_image(box, img):
324325
return result
325326

326327

327-
"""
328-
mask: numpy.ndarray, mask of textual, HWC
329-
src_img: torch.Tensor, source image, CHW
330-
"""
331-
332-
333328
def crop_image(src_img, mask):
334329
box = min_bounding_rect(mask)
335330
result = adjust_image(box, src_img)
@@ -526,10 +521,8 @@ def get_ctcloss(self, preds, gt_text, weight):
526521

527522

528523
class TextEmbeddingModule(nn.Module):
529-
# @register_to_config
530524
def __init__(self, font_path, use_fp16=False, device="cpu"):
531525
super().__init__()
532-
# TODO: Learn if the recommended font file is free to use
533526
self.font = ImageFont.truetype(font_path, 60)
534527
self.use_fp16 = use_fp16
535528
self.device = device

examples/research_projects/anytext/anytext_controlnet.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -85,15 +85,6 @@ def __init__(
8585

8686
self.fuse_block = nn.Conv2d(256 + 64 + 4, conditioning_embedding_channels, 3, padding=1)
8787

88-
# self.glyph_block.load_state_dict(load_file("glyph_block.safetensors", device=str(self.device)))
89-
# self.position_block.load_state_dict(load_file("position_block.safetensors", device=str(self.device)))
90-
# self.fuse_block.load_state_dict(load_file("fuse_block.safetensors", device=str(self.device)))
91-
92-
# if use_fp16:
93-
# self.glyph_block = self.glyph_block.to(dtype=torch.float16)
94-
# self.position_block = self.position_block.to(dtype=torch.float16)
95-
# self.fuse_block = self.fuse_block.to(dtype=torch.float16)
96-
9788
def forward(self, glyphs, positions, text_info):
9889
glyph_embedding = self.glyph_block(glyphs.to(self.glyph_block[0].weight.device))
9990
position_embedding = self.position_block(positions.to(self.position_block[0].weight.device))

0 commit comments

Comments
 (0)