Skip to content

Commit 7759d37

Browse files
committed
Formatting
1 parent 084515f commit 7759d37

File tree

3 files changed

+12
-12
lines changed

3 files changed

+12
-12
lines changed

install.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,13 @@
88

99
def pip_install(package: str) -> None:
1010
subprocess.check_call(
11-
[sys.executable, "-m", "pip", "install", package],
12-
cwd=PARENT_DIR
11+
[sys.executable, "-m", "pip", "install", package], cwd=PARENT_DIR
1312
)
1413

1514

1615
def main() -> None:
1716
pip_install("-e .")
1817

1918

20-
if __name__ == '__main__':
19+
if __name__ == "__main__":
2120
main()

src/inference_core_nodes/prompt_expansion/prompt_expansion.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,7 @@
1616

1717
CONFIGS_DIR = Path(__file__).parent.joinpath("configs")
1818

19-
fooocus_magic_split = [
20-
", extremely",
21-
", intricate,"
22-
]
19+
fooocus_magic_split = [", extremely", ", intricate,"]
2320

2421
disallowed_chars_table = str.maketrans("", "", "[]【】()()|::")
2522

@@ -46,7 +43,9 @@ def __init__(self, model_directory: str):
4643
self.tokenizer = AutoTokenizer.from_pretrained(model_directory)
4744
self.model = AutoModelForCausalLM.from_pretrained(model_directory)
4845

49-
positive_tokens = model_directory.joinpath("positive.txt").read_text().splitlines()
46+
positive_tokens = (
47+
model_directory.joinpath("positive.txt").read_text().splitlines()
48+
)
5049

5150
positive_tokens = []
5251

@@ -86,7 +85,7 @@ def __call__(self, prompt: str, seed: int) -> str:
8685
)
8786

8887
response = self.tokenizer.batch_decode(features, skip_special_tokens=True)
89-
result = response[0][len(origin):]
88+
result = response[0][len(origin) :]
9089
result = safe_str(result)
9190
result = result.translate(disallowed_chars_table)
9291
return result
@@ -110,7 +109,7 @@ def INPUT_TYPES(s):
110109
"required": {
111110
"model_name": (folder_paths.get_filename_list("prompt_expansion"),),
112111
"text": ("STRING", {"multiline": True}),
113-
"seed": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFF})
112+
"seed": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFF}),
114113
},
115114
}
116115

@@ -151,4 +150,6 @@ def expand_prompt(model_name: str, text: str, seed: int, log_prompt: str):
151150
NODE_CLASS_MAPPINGS = {"Inference_Core_PromptExpansion": PromptExpansion}
152151

153152
# A dictionary that contains human-readable titles for the nodes
154-
NODE_DISPLAY_NAME_MAPPINGS = {"Inference_Core_PromptExpansion": "[Inference.Core] Prompt Expansion"}
153+
NODE_DISPLAY_NAME_MAPPINGS = {
154+
"Inference_Core_PromptExpansion": "[Inference.Core] Prompt Expansion"
155+
}

src/inference_core_nodes/prompt_expansion/util.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,4 +11,4 @@ def join_prompts(*args, **kwargs):
1111
return ""
1212
if len(prompts) == 1:
1313
return prompts[0]
14-
return ', '.join(prompts)
14+
return ", ".join(prompts)

0 commit comments

Comments
 (0)