Skip to content
Merged
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@
convert_state_dict_to_kohya,
is_wandb_available,
)
from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
from diffusers.utils.import_utils import is_xformers_available


Expand All @@ -79,30 +80,27 @@
def save_model_card(
repo_id: str,
use_dora: bool,
images=None,
base_model=str,
images: list = None,
base_model: str = None,
train_text_encoder=False,
train_text_encoder_ti=False,
token_abstraction_dict=None,
instance_prompt=str,
validation_prompt=str,
instance_prompt=None,
validation_prompt=None,
repo_folder=None,
vae_path=None,
):
img_str = "widget:\n"
lora = "lora" if not use_dora else "dora"
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
img_str += f"""
- text: '{validation_prompt if validation_prompt else ' ' }'
output:
url:
"image_{i}.png"
"""
if not images:
img_str += f"""
- text: '{instance_prompt}'
"""
widget_dict = []
if images is not None:
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
widget_dict.append(
{"text": validation_prompt if validation_prompt else " ", "output": {"url": f"image_{i}.png"}}
)
else:
widget_dict.append(
{"text": instance_prompt}
)
embeddings_filename = f"{repo_folder}_emb"
instance_prompt_webui = re.sub(r"<s\d+>", "", re.sub(r"<s\d+>", embeddings_filename, instance_prompt, count=1))
ti_keys = ", ".join(f'"{match}"' for match in re.findall(r"<s\d+>", instance_prompt))
Expand Down Expand Up @@ -137,24 +135,7 @@ def save_model_card(
trigger_str += f"""
to trigger concept `{key}` → use `{tokens}` in your prompt \n
"""

yaml = f"""---
tags:
- stable-diffusion
- stable-diffusion-diffusers
- diffusers-training
- text-to-image
- diffusers
- {lora}
- template:sd-lora
{img_str}
base_model: {base_model}
instance_prompt: {instance_prompt}
license: openrail++
---
"""

model_card = f"""
model_description = f"""
# SD1.5 LoRA DreamBooth - {repo_id}

<Gallery />
Expand Down Expand Up @@ -202,9 +183,27 @@ def save_model_card(
Special VAE used for training: {vae_path}.

"""
with open(os.path.join(repo_folder, "README.md"), "w") as f:
f.write(yaml + model_card)

model_card = load_or_create_model_card(
repo_id_or_path=repo_id,
from_training=True,
license="openrail++",
base_model=base_model,
prompt=instance_prompt,
model_description=model_description,
inference=True,
widget=widget_dict,
)

tags = ["text-to-image",
"diffusers",
"diffusers-training",
"lora",
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

since it can be either a LoRA or a DoRA, we need to check if the use_dora arg is True and adapt the tag accordingly
(we check that in the line - lora = "lora" if not use_dora else "dora" :) )

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

got it, my bad for removing the check! thanks for pointing out, pushing it now.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

lets just switch "lora" in the tags here to lora and we're good to go 😊

"template:sd-lora"
"stable-diffusion",
"stable-diffusion-diffusers"]
model_card = populate_model_card(model_card, tags=tags)

model_card.save(os.path.join(repo_folder, "README.md"))

def import_model_class_from_model_name_or_path(
pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
Expand Down