Skip to content

Commit 17a2076

Browse files
Merge pull request #3928 from R-N/validate-before-load
Optimize training a little
2 parents 3dc9a43 + 3d58510 commit 17a2076

File tree

3 files changed

+107
-51
lines changed

3 files changed

+107
-51
lines changed

modules/hypernetworks/hypernetwork.py

Lines changed: 43 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
335335
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
336336
from modules import images
337337

338-
assert hypernetwork_name, 'hypernetwork not selected'
338+
save_hypernetwork_every = save_hypernetwork_every or 0
339+
create_image_every = create_image_every or 0
340+
textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, data_root, template_file, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork")
339341

340342
path = shared.hypernetworks.get(hypernetwork_name, None)
341343
shared.loaded_hypernetwork = Hypernetwork()
@@ -361,39 +363,44 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
361363
else:
362364
images_dir = None
363365

366+
hypernetwork = shared.loaded_hypernetwork
367+
checkpoint = sd_models.select_checkpoint()
368+
369+
ititial_step = hypernetwork.step or 0
370+
if ititial_step >= steps:
371+
shared.state.textinfo = f"Model has already been trained beyond specified max steps"
372+
return hypernetwork, filename
373+
374+
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
375+
376+
# dataset loading may take a while, so input validations and early returns should be done before this
364377
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
365378
with torch.autocast("cuda"):
366379
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size)
380+
367381
if unload:
368382
shared.sd_model.cond_stage_model.to(devices.cpu)
369383
shared.sd_model.first_stage_model.to(devices.cpu)
370384

371-
hypernetwork = shared.loaded_hypernetwork
372-
weights = hypernetwork.weights()
373-
for weight in weights:
374-
weight.requires_grad = True
375-
376385
size = len(ds.indexes)
377386
loss_dict = defaultdict(lambda : deque(maxlen = 1024))
378387
losses = torch.zeros((size,))
379388
previous_mean_losses = [0]
380389
previous_mean_loss = 0
381390
print("Mean loss of {} elements".format(size))
382-
383-
last_saved_file = "<none>"
384-
last_saved_image = "<none>"
385-
forced_filename = "<none>"
386-
387-
ititial_step = hypernetwork.step or 0
388-
if ititial_step > steps:
389-
return hypernetwork, filename
390-
391-
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
391+
392+
weights = hypernetwork.weights()
393+
for weight in weights:
394+
weight.requires_grad = True
392395
# if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
393396
optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
394397

395398
steps_without_grad = 0
396399

400+
last_saved_file = "<none>"
401+
last_saved_image = "<none>"
402+
forced_filename = "<none>"
403+
397404
pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step)
398405
for i, entries in pbar:
399406
hypernetwork.step = i + ititial_step
@@ -446,9 +453,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
446453

447454
if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0:
448455
# Before saving, change name to match current checkpoint.
449-
hypernetwork.name = f'{hypernetwork_name}-{steps_done}'
450-
last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt')
451-
hypernetwork.save(last_saved_file)
456+
hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
457+
last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt')
458+
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
452459

453460
textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
454461
"loss": f"{previous_mean_loss:.7f}",
@@ -509,13 +516,23 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
509516
"""
510517

511518
report_statistics(loss_dict)
512-
checkpoint = sd_models.select_checkpoint()
513519

514-
hypernetwork.sd_checkpoint = checkpoint.hash
515-
hypernetwork.sd_checkpoint_name = checkpoint.model_name
516-
# Before saving for the last time, change name back to the base name (as opposed to the save_hypernetwork_every step-suffixed naming convention).
517-
hypernetwork.name = hypernetwork_name
518-
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork.name}.pt')
519-
hypernetwork.save(filename)
520+
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
521+
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename)
520522

521523
return hypernetwork, filename
524+
525+
def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
526+
old_hypernetwork_name = hypernetwork.name
527+
old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None
528+
old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None
529+
try:
530+
hypernetwork.sd_checkpoint = checkpoint.hash
531+
hypernetwork.sd_checkpoint_name = checkpoint.model_name
532+
hypernetwork.name = hypernetwork_name
533+
hypernetwork.save(filename)
534+
except:
535+
hypernetwork.sd_checkpoint = old_sd_checkpoint
536+
hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name
537+
hypernetwork.name = old_hypernetwork_name
538+
raise

modules/textual_inversion/dataset.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,8 @@ def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_to
4242
self.lines = lines
4343

4444
assert data_root, 'dataset directory not specified'
45+
assert os.path.isdir(data_root), "Dataset directory doesn't exist"
46+
assert os.listdir(data_root), "Dataset directory is empty"
4547

4648
cond_model = shared.sd_model.cond_stage_model
4749

modules/textual_inversion/textual_inversion.py

Lines changed: 62 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def process_file(path, filename):
119119
vec = emb.detach().to(devices.device, dtype=torch.float32)
120120
embedding = Embedding(vec, name)
121121
embedding.step = data.get('step', None)
122-
embedding.sd_checkpoint = data.get('hash', None)
122+
embedding.sd_checkpoint = data.get('sd_checkpoint', None)
123123
embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
124124
self.register_embedding(embedding, shared.sd_model)
125125

@@ -204,9 +204,30 @@ def write_loss(log_directory, filename, step, epoch_len, values):
204204
**values,
205205
})
206206

207+
def validate_train_inputs(model_name, learn_rate, batch_size, data_root, template_file, steps, save_model_every, create_image_every, log_directory, name="embedding"):
208+
assert model_name, f"{name} not selected"
209+
assert learn_rate, "Learning rate is empty or 0"
210+
assert isinstance(batch_size, int), "Batch size must be integer"
211+
assert batch_size > 0, "Batch size must be positive"
212+
assert data_root, "Dataset directory is empty"
213+
assert os.path.isdir(data_root), "Dataset directory doesn't exist"
214+
assert os.listdir(data_root), "Dataset directory is empty"
215+
assert template_file, "Prompt template file is empty"
216+
assert os.path.isfile(template_file), "Prompt template file doesn't exist"
217+
assert steps, "Max steps is empty or 0"
218+
assert isinstance(steps, int), "Max steps must be integer"
219+
assert steps > 0 , "Max steps must be positive"
220+
assert isinstance(save_model_every, int), "Save {name} must be integer"
221+
assert save_model_every >= 0 , "Save {name} must be positive or 0"
222+
assert isinstance(create_image_every, int), "Create image must be integer"
223+
assert create_image_every >= 0 , "Create image must be positive or 0"
224+
if save_model_every or create_image_every:
225+
assert log_directory, "Log directory is empty"
207226

208227
def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
209-
assert embedding_name, 'embedding not selected'
228+
save_embedding_every = save_embedding_every or 0
229+
create_image_every = create_image_every or 0
230+
validate_train_inputs(embedding_name, learn_rate, batch_size, data_root, template_file, steps, save_embedding_every, create_image_every, log_directory, name="embedding")
210231

211232
shared.state.textinfo = "Initializing textual inversion training..."
212233
shared.state.job_count = steps
@@ -232,17 +253,28 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
232253
os.makedirs(images_embeds_dir, exist_ok=True)
233254
else:
234255
images_embeds_dir = None
235-
256+
236257
cond_model = shared.sd_model.cond_stage_model
237258

259+
hijack = sd_hijack.model_hijack
260+
261+
embedding = hijack.embedding_db.word_embeddings[embedding_name]
262+
checkpoint = sd_models.select_checkpoint()
263+
264+
ititial_step = embedding.step or 0
265+
if ititial_step >= steps:
266+
shared.state.textinfo = f"Model has already been trained beyond specified max steps"
267+
return embedding, filename
268+
269+
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
270+
271+
# dataset loading may take a while, so input validations and early returns should be done before this
238272
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
239273
with torch.autocast("cuda"):
240274
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size)
241275

242-
hijack = sd_hijack.model_hijack
243-
244-
embedding = hijack.embedding_db.word_embeddings[embedding_name]
245276
embedding.vec.requires_grad = True
277+
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
246278

247279
losses = torch.zeros((32,))
248280

@@ -251,13 +283,6 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
251283
forced_filename = "<none>"
252284
embedding_yet_to_be_embedded = False
253285

254-
ititial_step = embedding.step or 0
255-
if ititial_step > steps:
256-
return embedding, filename
257-
258-
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
259-
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
260-
261286
pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step)
262287
for i, entries in pbar:
263288
embedding.step = i + ititial_step
@@ -290,9 +315,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
290315

291316
if embedding_dir is not None and steps_done % save_embedding_every == 0:
292317
# Before saving, change name to match current checkpoint.
293-
embedding.name = f'{embedding_name}-{steps_done}'
294-
last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt')
295-
embedding.save(last_saved_file)
318+
embedding_name_every = f'{embedding_name}-{steps_done}'
319+
last_saved_file = os.path.join(embedding_dir, f'{embedding_name_every}.pt')
320+
save_embedding(embedding, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True)
296321
embedding_yet_to_be_embedded = True
297322

298323
write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), {
@@ -373,14 +398,26 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
373398
</p>
374399
"""
375400

376-
checkpoint = sd_models.select_checkpoint()
377-
378-
embedding.sd_checkpoint = checkpoint.hash
379-
embedding.sd_checkpoint_name = checkpoint.model_name
380-
embedding.cached_checksum = None
381-
# Before saving for the last time, change name back to base name (as opposed to the save_embedding_every step-suffixed naming convention).
382-
embedding.name = embedding_name
383-
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding.name}.pt')
384-
embedding.save(filename)
401+
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
402+
save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True)
385403

386404
return embedding, filename
405+
406+
def save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True):
407+
old_embedding_name = embedding.name
408+
old_sd_checkpoint = embedding.sd_checkpoint if hasattr(embedding, "sd_checkpoint") else None
409+
old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None
410+
old_cached_checksum = embedding.cached_checksum if hasattr(embedding, "cached_checksum") else None
411+
try:
412+
embedding.sd_checkpoint = checkpoint.hash
413+
embedding.sd_checkpoint_name = checkpoint.model_name
414+
if remove_cached_checksum:
415+
embedding.cached_checksum = None
416+
embedding.name = embedding_name
417+
embedding.save(filename)
418+
except:
419+
embedding.sd_checkpoint = old_sd_checkpoint
420+
embedding.sd_checkpoint_name = old_sd_checkpoint_name
421+
embedding.name = old_embedding_name
422+
embedding.cached_checksum = old_cached_checksum
423+
raise

0 commit comments

Comments
 (0)