From 388d666fd6c8cf2c4a7c21aad2ecf2ae404bfd5a Mon Sep 17 00:00:00 2001 From: xander Date: Mon, 13 Feb 2023 11:48:53 -0800 Subject: [PATCH 01/24] improvements to face cropping, person template and h_flipping --- lora_diffusion/preprocess_files.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lora_diffusion/preprocess_files.py b/lora_diffusion/preprocess_files.py index bedb89f..de4e6fe 100644 --- a/lora_diffusion/preprocess_files.py +++ b/lora_diffusion/preprocess_files.py @@ -150,7 +150,6 @@ def blip_captioning_dataset( return captions - def face_mask_google_mediapipe( images: List[Image.Image], blur_amount: float = 80.0, bias: float = 0.05 ) -> List[Image.Image]: From a87954cca9ed9849d6d7ae3f2f3c1a8b912b61f7 Mon Sep 17 00:00:00 2001 From: xander Date: Mon, 13 Feb 2023 11:49:37 -0800 Subject: [PATCH 02/24] improvements to face cropping, person template and h_flipping --- lora_diffusion/cli_lora_add.py | 30 ++++++++---- lora_diffusion/cli_lora_pti.py | 85 +++++++++++++++++++++++++++++++-- lora_diffusion/dataset.py | 87 ++++++++++++++++++++++++++++++---- lora_diffusion/lora.py | 2 +- 4 files changed, 182 insertions(+), 22 deletions(-) diff --git a/lora_diffusion/cli_lora_add.py b/lora_diffusion/cli_lora_add.py index fc7f7e4..b6ef5c5 100644 --- a/lora_diffusion/cli_lora_add.py +++ b/lora_diffusion/cli_lora_add.py @@ -6,14 +6,24 @@ from safetensors.torch import safe_open, save_file import torch -from .lora import ( - tune_lora_scale, - patch_pipe, - collapse_lora, - monkeypatch_remove_lora, -) -from .lora_manager import lora_join -from .to_ckpt_v2 import convert_to_ckpt + +try: + from .lora import ( + tune_lora_scale, + patch_pipe, + collapse_lora, + monkeypatch_remove_lora, + ) +except: + from lora_diffusion import ( + tune_lora_scale, + patch_pipe, + collapse_lora, + monkeypatch_remove_lora, + ) + +from lora_diffusion.lora_manager import lora_join +from lora_diffusion.to_ckpt_v2 import convert_to_ckpt def _text_lora_path(path: str) -> str: @@ -185,3 +195,7 @@ def add( def main(): fire.Fire(add) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/lora_diffusion/cli_lora_pti.py b/lora_diffusion/cli_lora_pti.py index 83703d0..d88046f 100644 --- a/lora_diffusion/cli_lora_pti.py +++ b/lora_diffusion/cli_lora_pti.py @@ -7,6 +7,8 @@ import itertools import math import os +import json +import time import random import re from pathlib import Path @@ -162,6 +164,7 @@ def collate_fn(examples): train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=train_batch_size, + num_workers=4, shuffle=True, collate_fn=collate_fn, ) @@ -337,6 +340,7 @@ def train_inversion( clip_ti_decay: bool = True, ): + print("Performing Inversion....") progress_bar = tqdm(range(num_steps)) progress_bar.set_description("Steps") global_step = 0 @@ -349,6 +353,7 @@ def train_inversion( index_updates = ~index_no_updates loss_sum = 0.0 + losses = [] for epoch in range(math.ceil(num_steps / len(dataloader))): unet.eval() @@ -371,6 +376,7 @@ def train_inversion( / accum_iter ) + losses.append(loss.detach().mean().item()) loss.backward() loss_sum += loss.detach().item() @@ -423,6 +429,7 @@ def train_inversion( progress_bar.set_postfix(**logs) if global_step % save_steps == 0: + plot_loss_curve(losses, "textual_inversion") save_all( unet=unet, text_encoder=text_encoder, @@ -470,6 +477,16 @@ def train_inversion( if global_step >= num_steps: return +import matplotlib.pyplot as plt +import numpy as np +def plot_loss_curve(losses, name, moving_avg=20): + losses = np.array(losses) + losses = np.convolve(losses, np.ones(moving_avg)/moving_avg, mode='valid') + plt.plot(losses) + plt.xlabel("Step") + plt.ylabel("Loss") + plt.savefig(f"{name}.png") + plt.clf() def perform_tuning( unet, @@ -495,7 +512,7 @@ def perform_tuning( class_token: str = "person", train_inpainting: bool = False, ): - + print("Performing Tuning....") progress_bar = tqdm(range(num_steps)) progress_bar.set_description("Steps") global_step = 0 @@ -508,9 +525,13 @@ def perform_tuning( if log_wandb: preped_clip = prepare_clip_model_sets() + print(f"Performing {math.ceil(num_steps / len(dataloader))} epochs of training!") loss_sum = 0.0 + losses = [] for epoch in range(math.ceil(num_steps / len(dataloader))): + dataloader.dataset.tune_h_flip_prob(epoch / math.ceil(num_steps / len(dataloader))) + for batch in dataloader: lr_scheduler_lora.step() @@ -540,10 +561,15 @@ def perform_tuning( "lr": lr_scheduler_lora.get_last_lr()[0], } progress_bar.set_postfix(**logs) + losses.append(loss.detach().item()) + global_step += 1 if global_step % save_steps == 0: + # plot the loss curve: + plot_loss_curve(losses, "tuning") + save_all( unet, text_encoder, @@ -619,6 +645,24 @@ def perform_tuning( target_replace_module_unet=lora_unet_target_modules, ) +def preview_training_batch(train_dataloader, mode, n_imgs = 40): + outdir = f"training_batch_preview/{mode}" + os.makedirs(outdir, exist_ok=True) + imgs_saved = 0 + + while True: + for batch_i, batch in enumerate(train_dataloader): + imgs = batch["pixel_values"] + for i, img_torch in enumerate(imgs): + img_torch = (img_torch+1) /2 + # convert to pil and save to disk: + img = Image.fromarray((255.*img_torch).permute(1, 2, 0).detach().cpu().numpy().astype(np.uint8)) + img.save(f"{outdir}/preview_{imgs_saved}.jpg") + imgs_saved += 1 + + if imgs_saved > n_imgs: + print(f"\nSaved {imgs_saved} preview training imgs to {outdir}") + return def train( instance_data_dir: str, @@ -683,8 +727,12 @@ def train( enable_xformers_memory_efficient_attention: bool = False, out_name: str = "final_lora", ): + script_start_time = time.time() torch.manual_seed(seed) + # Get a dict with all the arguments: + args_dict = locals() + if log_wandb: wandb.init( project=wandb_project_name, @@ -704,7 +752,6 @@ def train( print("PTI : Placeholder Tokens not given, using null token") else: placeholder_tokens = placeholder_tokens.split("|") - assert ( sorted(placeholder_tokens) == placeholder_tokens ), f"Placeholder tokens should be sorted. Use something like {'|'.join(sorted(placeholder_tokens))}'" @@ -815,6 +862,9 @@ def train( # STEP 1 : Perform Inversion if perform_inversion: + preview_training_batch(train_dataloader, "inversion") + + print("PTI : Performing Inversion") ti_optimizer = optim.AdamW( text_encoder.get_input_embeddings().parameters(), lr=ti_lr, @@ -823,6 +873,9 @@ def train( weight_decay=weight_decay_ti, ) + token_ids_positions_to_update = np.where(index_no_updates.cpu().numpy() == 0) + print("Training embedding of size", text_encoder.get_input_embeddings().weight[token_ids_positions_to_update].shape) + lr_scheduler = get_scheduler( lr_scheduler, optimizer=ti_optimizer, @@ -856,6 +909,7 @@ def train( ) del ti_optimizer + print("############### Inversion Done ###############") # Next perform Tuning with LoRA: if not use_extended_lora: @@ -866,18 +920,23 @@ def train( dropout_p=lora_dropout_p, scale=lora_scale, ) + print("PTI : not use_extended_lora...") else: print("PTI : USING EXTENDED UNET!!!") lora_unet_target_modules = ( lora_unet_target_modules | UNET_EXTENDED_TARGET_REPLACE ) print("PTI : Will replace modules: ", lora_unet_target_modules) - unet_lora_params, _ = inject_trainable_lora_extended( unet, r=lora_rank, target_replace_module=lora_unet_target_modules ) - print(f"PTI : has {len(unet_lora_params)} lora") + n_optimizable_unet_params = sum( + [el.numel() for el in itertools.chain(*unet_lora_params)] + ) + print("PTI : n_optimizable_unet_params: ", n_optimizable_unet_params) + + print(f"PTI : has {len(unet_lora_params)} lora") print("PTI : Before training:") inspect_lora(unet) @@ -924,6 +983,7 @@ def train( unet.train() if train_text_encoder: + print("Training text encoder!") text_encoder.train() train_dataset.blur_amount = 70 @@ -935,6 +995,8 @@ def train( num_training_steps=max_train_steps_tuning, ) + preview_training_batch(train_dataloader, "tuning") + perform_tuning( unet, vae, @@ -960,6 +1022,21 @@ def train( train_inpainting=train_inpainting, ) + print("############### Tuning Done ###############") + training_time = time.time() - script_start_time + print(f"Training time: {training_time/60:.1f} minutes") + args_dict["training_time_s"] = int(training_time) + + # get the templates: + if use_template is not None: + args_dict["train_template"] = train_dataloader.templates + + # Save the args_dict to the output directory as a json file: + with open(os.path.join(output_dir, "lora_training_args.json"), "w") as f: + json.dump(args_dict, f, default=lambda o: '', indent=2) def main(): fire.Fire(train) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/lora_diffusion/dataset.py b/lora_diffusion/dataset.py index 2a46313..39910f7 100644 --- a/lora_diffusion/dataset.py +++ b/lora_diffusion/dataset.py @@ -42,6 +42,32 @@ "a photo of a small {}", ] +PERSON_TEMPLATE = [ + "{}", + "a picture of {}", + "a closeup of {}", + "a closeup photo of {}", + "a close-up picture of {}", + "a photo of {}", + "a photo of {}", + "the photo of {}", + "a cropped photo of {}", + "a funny photo of {}", + "a selfie of {}", + "a photo of the handsome {}", + "a photo of the beautiful {}", + "a selfie taken by the handsome {}", + "a selfie taken by {}", + "a photo of the cool {}", + "a close-up photo of {}", + "a bright photo of {}", + "a cropped photo of {}", + "a good photo of {}", + "a beautiful picture of {}", + "a photo showing {}", + "a good photo of {}", +] + STYLE_TEMPLATE = [ "a painting in the style of {}", "a rendering in the style of {}", @@ -68,6 +94,7 @@ TEMPLATE_MAP = { "object": OBJECT_TEMPLATE, + "person": PERSON_TEMPLATE, "style": STYLE_TEMPLATE, "null": NULL_TEMPLATE, } @@ -119,6 +146,28 @@ def _generate_random_mask(image): return mask, masked_image +def expand_rectangle(mask, f): + rows, cols = np.where(mask == 255) + top_row, bottom_row = np.min(rows), np.max(rows) + left_col, right_col = np.min(cols), np.max(cols) + + rect_height, rect_width = bottom_row - top_row + 1, right_col - left_col + 1 + new_height, new_width = np.round(rect_height * f), np.round(rect_width * f) + + center_row, center_col = top_row + rect_height // 2, left_col + rect_width // 2 + top_row, bottom_row = np.round(center_row - new_height / 2), np.round(center_row + new_height / 2) + left_col, right_col = np.round(center_col - new_width / 2), np.round(center_col + new_width / 2) + + top_row, bottom_row = int(np.clip(top_row, 0, mask.shape[0] - 1)), int(np.clip(bottom_row, 0, mask.shape[0] - 1)) + left_col, right_col = int(np.clip(left_col, 0, mask.shape[1] - 1)), int(np.clip(right_col, 0, mask.shape[1] - 1)) + + expanded_mask = np.ones_like(mask) + expanded_mask[top_row:bottom_row + 1, left_col:right_col + 1] = 255 + + return expanded_mask + + + class PivotalTuningDatasetCapation(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. @@ -147,6 +196,7 @@ def __init__( self.tokenizer = tokenizer self.resize = resize self.train_inpainting = train_inpainting + self.h_flip_prob = 0.5 instance_data_root = Path(instance_data_root) if not instance_data_root.exists(): @@ -162,6 +212,8 @@ def __init__( # Prepare the instance images if use_mask_captioned_data: src_imgs = glob.glob(str(instance_data_root) + "/*src.jpg") + src_imgs = sorted(src_imgs, key=lambda x: int(str(Path(x).stem).split(".")[0])) + for f in src_imgs: idx = int(str(Path(f).stem).split(".")[0]) mask_path = f"{instance_data_root}/{idx}.mask.png" @@ -224,6 +276,20 @@ def __init__( ] ) for idx, mask in enumerate(masks): + # if the entire mask is black, make it white instead: + avg_pixel_value = np.array(mask.getdata()).mean() + if avg_pixel_value == 1.0: + #mask = Image.new("L", mask.size, 255) + print(f"No mask detected for {idx}..") + else: + if 1: + # convert to numpy array: + mask = np.array(mask) + # Make the rectangular mask region bigger: + mask = expand_rectangle(mask, 1.25) + # convert back to PIL image: + mask = Image.fromarray(mask) + mask.save(f"{instance_data_root}/{idx}.mask.png") break @@ -255,12 +321,13 @@ def __init__( self.h_flip = h_flip self.image_transforms = transforms.Compose( [ + transforms.RandomAffine(degrees=0, translate=(0, 0), scale=(1.0, 1.2)), transforms.Resize( size, interpolation=transforms.InterpolationMode.BILINEAR ) if resize else transforms.Lambda(lambda x: x), - transforms.ColorJitter(0.1, 0.1) + transforms.ColorJitter(0.1, 0.1, 0.02, 0.02) if color_jitter else transforms.Lambda(lambda x: x), transforms.CenterCrop(size), @@ -271,6 +338,12 @@ def __init__( self.blur_amount = blur_amount + def tune_h_flip_prob(self, training_progress, end_prob = 0.25): + if self.h_flip: + # Tune the h_flip probability to be 0.5 training_progress is 0 and end_prob when training_progress is 1 + self.h_flip_prob = 0.5 + (end_prob - 0.5) * training_progress + print(f"h_flip_prob: {self.h_flip_prob:.3f}") + def __len__(self): return self._length @@ -301,18 +374,14 @@ def __getitem__(self, index): for token, value in self.token_map.items(): text = text.replace(token, value) + mask_str = "using mask! " if self.use_mask else "" print(text) if self.use_mask: - example["mask"] = ( - self.image_transforms( - Image.open(self.mask_path[index % self.num_instance_images]) - ) - * 0.5 - + 1.0 - ) + img_mask = Image.open(self.mask_path[index % self.num_instance_images]) + example["mask"] = (self.image_transforms(img_mask)* 0.5 + 1.0) - if self.h_flip and random.random() > 0.5: + if self.h_flip and random.random() < self.h_flip_prob: hflip = transforms.RandomHorizontalFlip(p=1) example["instance_images"] = hflip(example["instance_images"]) diff --git a/lora_diffusion/lora.py b/lora_diffusion/lora.py index 8753f15..bc3c5d1 100644 --- a/lora_diffusion/lora.py +++ b/lora_diffusion/lora.py @@ -914,7 +914,7 @@ def apply_learned_embed_in_clip( trained_tokens = list(learned_embeds.keys()) for token in trained_tokens: - print(token) + print("Adding new token: ", token) embeds = learned_embeds[token] # cast to dtype of text_encoder From f476828fa2ccfee07e88c3866a1d65bc44e7d2d2 Mon Sep 17 00:00:00 2001 From: xander Date: Mon, 13 Feb 2023 12:01:11 -0800 Subject: [PATCH 03/24] xander updates --- textual_inversion.png | Bin 0 -> 44633 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 textual_inversion.png diff --git a/textual_inversion.png b/textual_inversion.png new file mode 100644 index 0000000000000000000000000000000000000000..8c39982554f66632a69deb2ff1b06f24f81206fd GIT binary patch literal 44633 zcmeGES5Q>l7d?vZ4$vfS&EJ#L>jG_{yQ8EZf zk_?h12SGAt^Zlw${h!XM`*I&{-7321+O=4F@43dBVa&0J*49+IL3E1<0Dv1R%J&}v z02B)Vka+@p@XFnv(@Wql2@gd>4_)Wy9xp81tN;xQ4;Ke#4+mRICNC>DcUxyCVLnkl zVO}O14-Xf2Nq&CE|9b$Rvzs;lz~R?Ja1ufnWg~Y0Aho#uKuTnbZ2_R7L*>5QBk!!O zW}hs|!?#yg>y4|~!NE7T6%rNrAk4Hk1oO?+S?o-nPN(Gl)L@xQPxK6G74CQ@_`pZf5q#%qoXfoE&^p3?xp{E|L1*W zWu#|z@M<1M1bx3VhER@GC=;{#bjDGfbhSeswCvt z|BYkEC&c`BDT+K4&h+1(-j9}pCjM{2HYSMj^=xn*%m4p$|G#Cs-8aoJFAj!;#kOdQ zlG4(U-en=b7ykjK3nGUaoSBLH{XO#g7M}+HOSm~%mbfdl;6a>_pV(vsU$*}fg<*nm2PsvE|1%odA9i~3t z+4{Wm^^C8wX)ZAm$sj@BgU=Q~hv5B^3qJ>S9b-TOPJP`B4X zSL1);2H+MGWaDYf{K>TL?ZLmE&Fs8$2{r#4=90YNH zHWQ-EIv1hSPHEA83ZU`FZ}v)If4k61d2QnI6=T~vr=7&8?-++wJpOX4fj>{1z|_?A zLlE48(liU$OlVV|rT|~*+qU5oAY0OtsK$`w#*G^vI_YlR8k}};5ISEUEz{A}Wxb@k zYnPlCl*8qy`7gY^O65QI({Anqv0>M*Fb?nU*W8=?S)lY|mOVer<_}S{*}usDjR}8x zg2tVm`g-4EsI9H-VsN&#Re3z0M-z+zf-$(k78TUXm43*cz1#IxjIyk5g01~K6SVF} zJ51N<`13L(JYDRFGpsfbj`n+i>KK}4C&bD8Z!8NqmH^-}sBHUS)hqB~+oUg3j1*i< z0bf7A_R{%{%0?vXgO@n)1t4rhN=I8$oPh!7$F7^R&2%X#Dd_?hP$0Ls7{M5LoOdjS zq?ZhvX?*c?FjtON+PAi~Dv?QOm@&S)6nh)44qBBm^bUM})W9q*iBoH#vT1{)Kl#uA>Q2cX$f7dCz}x(-eF~7m-2& zG*VVp6djjY$9Z3{8jb5%X989wuj5#lU?SilUVO%es201QQs5O%h1X%mMozp$1 z>!6({%?8kGig8kUBU}#Ec7DG~BTcVSyPuu4sX~b*c~4@l?ZX1^lWNq=(_H~)ng{a% zL&95lt1j{;m4;^G5#A=apThyiC-#gG;HzuvO%dj{g{)0YB3vneUxUWEkicHihR7sj zG`_q20VN=DZSw~C7lDxl4#hudqJKY15Haw=1-HXBLz~SGQy`DWvs&cFW_tiSP9W*nC78m`Dt!8+nY^!??I# z==jR(WWY+!B`l=J&+Zm-0|4!#S17C_k^9e^)Td`JjEautb9AgH1qA9smVt<&%WQfb(J$}*!ogWDu4hfhmEBg0 zicEY6yoTA>8q$tzLAhV+V%G0N*2+<~Eej?j!wX?d1oC3afF!LP>LJ`bh>e33lGJNy z+mJLY$PIf^HxqrtAMXhyL94mbQ4LF)U}#!H5MmG%@hW24IzYtYBe*52q5IPM(is8> zt-f5@aLVml_8lj)hghdRFC~xXzm7uaXa13`G%qFoLzjI ze%aizc#Xei9uQ=3aCsv1j`5qEl20-KbfU9@g{aza1v&JHgB}+ZipB+2;*t1Oj3Bh7 zwhh0~{I)?~UF;*C?-0qMsDt6Tp%x2#^M_1p8cm~1z$Eb@sv1U*Tk@g(TTiB(UA6?+ zAp-gdL$u|ggRRQY!8r3F$R@ITwd^4>_mc&)c?N7vQxc1$#9GOao~uBVw`;w){CGj^ zUmKyEfNkELb?vakNueM6>Cd_rf!7!v={y?48IDy%42EL%O^QSXCVw)%8<3Dgxm{aX z|8Vu6vILeyUm3#}`{@sZ@)oSi4R}ih)y#@?LnPjh)lY;eqr%9q-Zx^ z&{3fN#td!wOJhb1$1E$d=T(vvmJ&L4>uW9t!;AiJ!Aw!#u5Emb#*E=d2$naH0C6E7 zB{R+d$<+t_^2u^JnQp+{2Pn9aoCf2aQ!v{n#3+52w}DjAL5cuMdQ3yN@y$_|#Airz zUOo&eiy1mmZ@f~B##H=K=VPIjhW!+IOb*vky?$wbx^zlJbp#qyHatNir)FU3bg(-q zwC9@h_;g&dh&){a`v|XGy4Y9#!;YW)f5if1&E}aNjtC;o@&|ZGN4xRiV9gf5)twTN zlA3^`=vf9kptg}Is$CGGgW#aXM zW(rcQloeoFE3$C7XZZ2jKTy1U${nny)cOh- zfiE8`aYLCm<%7g;-rOxC6=I43or8i=;{{jX<)jkuundbZIBr)}a|0@(nFwrl)Rvq^ z4Ni3f``^e(711!s62+eacD>Qade}G}nD36mE*mg5q=cAAx1FmkiWa2zGV}bpP|mUR zGGLta)kWO{cML^E@Y_Npt!_n)BN8WZHb{41LOw(u7iPM~2`S+L-C*)qhTUEk~zu%W%b1T{Roy(6BUIITB4k}PM1cVTSx0%h2s9Y8-8RLkTG-*`S4Dx}(JdP+PLi=yeqma3C z)DPRe#urLt{*Po^{jDTbg6ySuPjByD8--Sh(|?(Er|hNR*S|&l%qRjI@aC+kS!BJ3 zBps|~Z^G})HKXoY$#_dUc_)R#MUefzwbo=3R zgM#c1A?Yw>WG)|=U`Kqn08I;vrM^SX=-xPESZhP}*=fdZ%{@Y%*~8Pb`_hPt8hTX6 zj<@Dk|7`9fDb=Tt8iryc0&xvwu0c7}tHbYC)et^Dn3(Yfbko^b;duCNWerJ2xyU5L zO88T0u*I5h<$0+U6cj=?_aY+jMa0DDPP57ksvc-)#2q`2l<44H1hAUt35#(aCu{T2 zZVb(4Do<-MLN#am=*d~EM%UzW5Kq1kmp{KvS>XPW*049ttwm`QKA-%J`P)gZx$s*3Dflv9iKr#4qpQ=5d6HfIZ?2nv7PuD!*J z7~JY%0&LM)RuVtZI9_J+;yMXSy7Z-RnR!oE%D5ycQ#R?mwmlwF@47KL5L+AH=hC}R9f{j zYlOfU1+PIx-%&N}eO<71VH=dtrD?W*uQX8t+ixg>>W@&wGvVvn49fZ(^7ulS1$FB!CGWIHrQq1-kk%VXQ3nn zTh-w})9HGd>;96;G+U5+NKW9MXdaWHSN zEU9++E$=W{d;9t^)M=lQV;>@!2r*@?PD3&Y4ZyRw+_x^m2E>Qzw6D{R_blv5v*!ZP zqgFB|0?}dSU;lC_;0O@{u6GEe=ChcyNcDh?ggbC1pxm$q%A;SdeQ}e*^JUG`PI=S^ zDb|ea;q54-c|=OmBRt*~W{0afdRgy1Y5yH!Ym2cG2(AKh1iusgSG z8i;oH{i3POTTWJ)lm3M%cVg1vxwC_BXY;BR16Rda2&$T*|7Jzk2xp1_jUXaX9@iiT9n35F0o8w9N#^0HO^z)- zTYG0OQd7sY;_|Kn>ZL7X(C}=hHDw8)R?8_TN|(o}Dg(wex5d(*#XK2&<;f+X0kv>6 zryQm}rfaJi4v^fWe?@qCeD>iG;vr9vo(by{u6wu*u2CPU9IB25l~C@Mm30Bnh*=_V z*U?0BNOY^sv5cc_EM~f&aJ6XTUr`@q^HySiFX>~y zk{~N;d=}SgJ~^tuDhI*eVFwS0m9LD_f(i5>8#llOWBDB1oeblW#JWMc=4=v@0Ff+$ ze}T`oU~5>HY)WWq%h;(1^Zb?1q5!pY?M+h0V_Z6Y?QI9qAZ^e|>iRWqLJuYKxpW5+KKt*zr16e*pU|od(w%Q$Us3 z{7eeV3-610O{FK@T@gkGHhlh)g=2c5cVx=s1y{CP|wYGvqk*2oYBs(oG7 zP>$huWXeH4Fu0#{)omx5?0VivgxQF^;(a;~yuYE|CZ~t17suWU5V^(2%4_|hLEUbH zEiUxqm5(mhc}SW(Zmj5&)Nerak@XV-@^A$ZZ`Gm#8S>~MHq@Vx@d}jxi2pC|`;c@& z_~t!?z+HsEM_`i^l1_FsQSlp|tA%DZY=9<~c2;PK-%|2(X1VgWp_0J*UgLJ@5nkUU z&LsdyVV>tBg3xJxmcT{lNZ3%Cyy2cN%_VJVa^iCmcg|YhIgIFf+aIwX4_8d#L47t4qlqy`gv_B0hH5O1scZM?2oaiIk)s71n6N~l*q zmpI66t^9j;zM232MxEy#n>wv=#{S1ZH@Bhv?P$%-dv3R|(~!TzA}Aw*al#x%&Tsoy z5&C$y)vKh+Kj zbA8La#g^V>`S2J8WgqN&t^=>->)6ig4%k90RE!&wsBph%FX^{Ggc7);wP*rw1ndn1EY<%YhO-HA=^ug~D= z!K2H8treiE%Qxh1Ga`808^mg&-WPBTHh>9yFj%Gc13yGIvqoX(KYv(AD=itsl4B@OofRQ&nd&3T2aVo!f}PH%qUn@^TUet3^vhwG%OU?P?pII?MMd|z&|Pn_ippsa>fb>q zp?*#C=|zXGtZ9(1u(YwUL@w7S>b)%Z+q;$u{gZ&uV^#OIK!lh z@V+a!#!UY9b?OpWK)V%!B6AH}oeq9gBK&itFuwt})e%xN-+}6Y4@75= z8?5G%fLe|VBS{!)5(BPH+q3We?w$Y$jA)e0LWjkwz~a`x)~mzVUN+kJ7s}A;x%<}9 z%j|eU;$SJBkk0BY8Cvp5@|B^9fAhpQ>E$e#cp>*sh7hD5oCv<{b%0mLfXd$>s5jhlJ$RHz)XbCW;038F&6oQWG zgY;2DkzES7CRo=ye*f{If=;J(O%ZCE_Mgg?RTh$(phco}IoeoI&MW^kdtkLyz_UDt zgsBw0#NGGtp*&p>^~fQ}5>0RuK=Mpkkc+B>pzV$FTZl5SP6*CVV1}1fK~QKy27NAl zRT?6b)e4_i@gu@eH+a<-c5?*W3^OGadpy>+{_s7i`ATfgBSMEcR5k7Tp3i4Mi7oo zsUBEA-wPECjPj2VYU%1uww!-->>`c{04j(+1q((DHA0htpXhg30Yl3GEiUdx2@qhk z4{u^T2V?61w4_zbJ>2K)Fk#V>Z~R0r)%&0D?*PV3@T@y*b`J;)H&TVeBYb;Gh{W$W zkQGxeEDxkSyyx{qk&FbSJUHr)@Y%16ZCG^YfeVntJpU!Z%lxRB4q%h(0oTDk{_S?>&a8O;g!|EI%}|T)HkZHO&y3^6U(`_{*&M-zEE3rFoyl4>QN*|( zT03p;msSfY32tWv^gwDE!2Bxv`*oEDI#q%)XunmKV^NO>-V<*DvYT;J1GjBf=`ot- zf5Y-inV-zwd`Y|#{lu2iWF^5m-~;*R_a9{!y^L$(GIMjOPT2hXq@AWe*s!d`(}$Vw zUp$hOy#E}box!&|l>PkP`930IGmVNisz>&ptK1V2rF7Drbue<6UHe9PG$wmR)7Iox zCOi7eZ^ivbLI@WZMJB7BFyqUleZh}JW`*4IeLHhY7J2|SfV08l13U3ueE&|Y<$bkJYM=1+$;!}QC8pueS*R9zqMQofP}Z6C!3d~|+H--q^n z3UX^sEwOAH31TJF2}0q+?6dxf~^j(Sz+hP1B?0ixs|2asONN zF;7CJZHa96i>ff-D3DWqUu}f5Loy}O7IZcSj>e2}X)Z+v1-=}(_{+HP&p%sO51Kh( z&zJ>IRMtr(!f;(_IfDyNHa-unh%z7t)^`F? zwk@`qza=&RjAXXmOMsBKlB_Ybc{i~3GGNcX<=2gbDLgZw_7Gpln}p0pgF~efyq7wO zZC*^93)zlD1c`ODm&c_U3;(X>7P>#oeRL;N7P&dOS7&-#;(5;za}yKV-v^@?8b*P9j|P<#nf}mrEdboN!_jp_ zElRwVL>x5wG6{6A49nS`w2b~f7_IVm-~p552N+~7%mR;Y)}#kZ#idyAmL8Yxjh?;@ z)y{x*!8Ha~slO{cKTO{sWaqxiqqi@d0#6bdl+x?|Y#g>dzJ*UDjeQhyn0w%I{V%G;Uh2K-Rk(l2tD)VOBy z5ahkzA$<|)Fq`iYH9D2IBp(x`2blf-7sI3VQ68smV%0kUFBGWZgC_c2&|mC>Oe`A1 z0d)Q{cs}tENflfeDS^#+V}G?dIZ>j@C%~d}=Ks0nsPV1%1pT)|t**9g)5@G0_ zESu`=wo=82w?}dGTVTt#!gt>b(4hXK&q0H5vGGKioh%swn2`raCRVf6FX}4*M?A6p z$7h_7%}3MR|G4cS1+pGt^vW^;z(

G_DSGMSYwq-4GHLa4Yh^VFu1v1@|t>dRieQ zYwLjOz{Dgw4c0geJK^1$rx%>n#z*Rxz#=G@TBZ!L!bTk%D#-K;nW>Y9tpS%4`Q62f z3(;MqpU!%#Qh(gF&{8D^v+Db5kTs1X@|RNr@II5?`^a1q^^5&Og%9`gpE$7@4Te#( zUi_wgGBX#WJ42|Xu2Eas3eRQGRXhF_^zaX0hk`!;J?DBz<#WZyIE&5|We{?61t7OO zODW}Z&#j{+0(qN@C_xbE@R>W`3qtqjh_@9s-=_a>bKQ7}4^1>W@bUmyZ@K{>FS_`~ z>??EN09->6#}g6t5{>ubtU=oFGV{ys+fR*Pa~R`h8&YbGG>@Xoq@pfPV!OX2fdf-NJ=_@VlpDi*VJHS-N_ z^J{vk*+juo7>$$NxrYS2c_f_0(sucoKAjFq!TW5=388?m$Qo=sB}?t`UYnEYff=(GO}yr!NbL(kv* z_x0xLojZ4qdknsYfEtp5rY5vnDsO8r4Dr{WFoKx~bEg!WG+@z-#wg0R-Tl?yv6b_d zWh!EZ*0)|%K;K3vYr~t5x742yjb2;dhKkf`JpMe~ zoN1)NSf(_AU;Y37RPw$&S*@}E#rN{2Fg{4a$~!w(t-DV~r$xS_yVM~xu+NaYW^p@PK-qC#>Vi_A;qCB zFIE)6Jmb}I;q!Zd5-ul781{+kTL3%s4dU4M0~6Zx zK;q+sHvX%1jp_Y9Cx=VSMMuxKuJhyw|LPE3`@5PRNC1dne>Z`+bjh|L_|O}ij57%-TmYG z4oH<-PY3QVb%W{;k!wZEaIqFE8o8(>drB5hFBS3b-MiW|a-#6T(NXp@YsEq|ZXu`F ze)z#)lld||#>x-krK@8Pds+$fwGbH&ifiefoHje9ajAbSg73DRHHsn2oWA?=>r>Kh z=h-s77hzYxvD$rF+d&N=& z(=LH~br~<0;n-j1{A3`g1HtHzl?%ns12L!|kh zb5-X%Bon`(uq4O@p;aAW1xki@>D9Xg_dNl~)|3wy{VQfY$li{Xdz|E1k$>qA#;iwM zqrVNEj{NSja9l*R3F8-TK7*_m=MrkP-~Y^3uP?Fz zajpB%>aSlJ7!nY7?zqL)6&2yd#l@Aj43Ceed!9+SubBVao>^O4!|3W#j8&N>eEux@ zl2KM>>ub63C!)7+-!8O(5;W?)p1%rqFx_+a2|b{P9`gsob->cjf~sVa^8h?;DqbUV zzExK6^^QW4pJa|zm?ujlDwKuhXTGUeD=g4z$cQL#^`A~PHenCPf^7*1NVR;6j=$#y zIEoS~WOyYez`FVkE)ViO0IkEW;u+NGqMCbBJ`3s^(tpn`(uJ*&qE6FXGSms3HLi;f zcnzyDhcCe10TT#mQHSx|h5!wX8xbToLtqG`ImqqTZGPw-+E!yiVLUxQZRSw@bR&;4 zjm3Z}h~8)(D|d%f7}wp9l^6^OC3O$#Y)x)i;vzez5o8CB(28JeD@uD3W1a1TKlR1AlRt0|q^W*Yn#M9KM410w`#Jv6n$)R&nVQ$Mff5v> z)!eSqQ8V<`j8$Rced&;v2@jyNLUf355q4l1Kn!`o_02YN>h#&%KP5}cmWB{_pc|qd{KLViq=y|b z_#By=|L@rg2$CsBD39`{V>1YSjfN&QZm1ysV**@R)z@2`TG?%;FJ{75R-X60xl6q9 ztBQtLIvy>-(mNydZ`ytD@37Wlywo<%@ykpE*!-}ZY4A)Jb$nB&0pd=-(j+=Hm70W3 zl;oG`3;OM*1I_PdnGB#Xs7!?sx8y#l4t!w2`=a*b#+Fub&^t4IztSK_z2Dm|vYvh3syH&k<$96g4 z>mXwaO8wfNzUsX(;^b&f}1ZJb9_Dd;cWNGFIB!*RKUv<$WU0;&?V5Vq=k)khO=f^ z*c5&&G$C9)m_2EB`Wx|-+8q#%#I{3GEi{$Tx9gK`vOuo128+-8)*OWs0!(9VunzUv z(&WpmE0bg46=2BcnaH30RN`h1)QOzXRB)6Y?v{Vl<{MbLBZCkHHd6D@^3R)~7^Iwp z7*T%-w|QWoJ@ambc`aKJ$bY3PA4MUcEkDp0E4p-(DwU?LdA!^Y$6S1D{Zn#Omn=uGyAbQmHhiFH0d>b3GW83hZerME z+h9?4gO1dDX4iN6!_89TSnj*WvGXl~mLd$NX+}O5U)g|9&FS0+)>C~uGdIh8Ht=W^H zz&`n+mTH_gW>?6WFdK4z59H$cu9L#lk639ZzZo{#A= zw)dTdvK~{_oOSb$T4xw5)limf4MW_hTt?{Y+aw_020m5$ui;60Ro8d51dxAW`D+Zh z9A9~r_|g{zQ@)*qtmr=bn_Fha47hvn6>$ts3`kq$5$5stDB6R%H(5i;J3q$4# zgR4_&I2_Gi4_G8|y^4ASf6WKa@?HNZk0SkL+zviXsy*#)1F>r0(FQw^^vz@I7qxkR zx*V`{qf9s)sr8QoZlqC93X)(p>mXV43#003CdfP=gQd}v<8MIZ+x)rJ<>MQpYq3L9 z7T+I{@EEPQD6cB)R`#<9by6>hReyF&<@3r;2W#@&gkq9R|Gwen`JtBO-}!0ZT=I4W zu&$hV!6H~7ZpNu{BOx*zyR{6g7{kAPj}(b>!2jW-Etf*~kWWCT$de$w*(*?;KIq;s z6{*MHaMw+b{I!D0=>+EPY>$eWyyQ$5L1uGI56oe^kiiM2vX{@{Ffu>P`t|sDj565j zZo_MzA_q;cN<6whXv`;7zgs*uygl|&`=^*SVtU8OpFe3jHW1`K`mqBK8B@|o`{X{V z<%_c#V~|VT0I{+$a@CX!m+C?A0+H89j|BR^Vk&{>U{Rcq{_U_YY_U&uw%J7E%O!@MCJ%xiePe zq#mPYhX?x`dEov!h1raiC_5%k_%>j4)oG%D1N)IOZ9gh0yzICyXJng02kzBP?Dsoz zfB}pKe4fzo+2;xGhnG$YSx|n2IKxy?fr*X;yc=j?j&NgQ55S13I^t}SE@y9iCKz8xxP8ba{iu0OPw`TEF*#-o_Dg+`wbfD5-%BG2)Dv!42=?Nu2toBP5xKOudBK4#H| zc;$Jp@wZJAr_$S$7TlVN%1Jbn1977;CP^%aB+AI5uOGw_iP5@JjZDVO5!eGmaOZm4CDJo7xh%|+^T48$@_I`pP;_XK?QWqv&j z)}8|6cL zt3B7<;Ad2zAahSxIw~A{08;CePoAxoj>clfb{xOmZSjWfpf?hw13}_0Ugj+BgYG-} zK?3ooHQSuoTv0MV1AcZvme94Sm8i?QgLIC8GO?d%U*=1@36qhOv)wqYoM6FFo@aW& zmIUL8U}`Oo^A!nsj%1=Ryq6)5%d;^dq5tTf$i9`c9a~7Se%b>LxZ8c9dlbc^*k-p6 zr6Hf_ILjcUIF?BuPU+4XF~MUh+a6$Z$*|e}=#8BJ75IhXv8>(qD?eX@o`IKKhQqhs zz%BVk5Q7$f+1(9$VjZelwTZYI%Lm5Azj%O#cX_Tr@m-`&E=w8EWn+d;@q)$QGq;d8}O(MxBYE+gh$;= z__+brWwW8-dG^w<+%7ICFj>bT8eX8`p& zAoRP$3a?-dC>1d{a;IQRuHJRVZBQpj^D8H}-)diZt3zNY&&$CrkoKsTg{8vcJtwG1 z+7r&Sx93hRO^gLqY9FeS9NIpq+K=ZSo@&K^7vmHEzSMkMnhU-uFpl$B&OMM-b53@{fa;C3VMn5#?CVVT8Q4Dcb7pc z7^NADYHst2N!<7^NMANAov?;z9NTnDv_1b_Q?=3OKFEk$qI8aXQCk`_e7d**i$nzu#^eWQb?dtF9 zhaFlimqi7dxfg2lB)RzXw?^9Dyc$TRmiJpLk$U-+g`zd3kO-rZt^dO92Sgn~!TaDZ zWct$8(ye2nz>fGdKw2JGUiw*85BRV$|Gx79ul&QTAXS|YmycH$w+FNoYv4@`d-&cz z_~&pEvi>)n8MnAjeYT=T2pXz;Pv2*w0(TeujyofZYp3sM%oh)NA5davV6VN^BS-0R z3dO#z8o4DJH{qF|df-PG!Yy?m-WN&EzpMN*bs2J|4Xtd3eJjZ5F~t9J zJ%l_)=t23kt%6H5;Hx?WJ4NXkf#mbGu&5Sf`IJJY3aqj+pob1Ze7_Cy6DAI&DU zQU$^T81_CsTNM=u*RKS-jY>0eyBgk~8d|yOF?TxS{;1SQU?HKUSkli&0PrrMVRfChk=zMPAQA)yqa)qv>S-dASY&ZYVhj6SQyggA=iP$=|Q+vssWd zMu815aV6t{c6-YB@F8<&9<)Mmpa2)>(7ZEr%T+bDv*9VhTN03A{#iT}_Qfz6`39`` z^UJW!9c=m^Gj$z<@tA-e;K5zB`LtVd*6HwkH@@n{B6YcWJd1i+&n6Q=E=9Zd)UZe* z7vRcjo2&d}-(lh@<7cnpmuo_FG9Ow8I0wSjokSJLv17h78R@c4C}5Gg%$G@1js-DD z34)Oz71km4`%Wq)#D)f;O#-2rLTJy?g*^IXT4!zz6}Uws;uj=T&jC8xP4pv4tUo`_ zQXyG2nc192zvdQ%g8zg%n2Se%RS;6_TYlFdmEWoN%MJB+*{LL-&5??W60%jVj_bn9 zJ3-RT==)wpY(E@OdIYh*Nyg+B8Q&0!nxTp7z(J5MK(WcFDfsIm#_iL9MwIJ2mtv zm<$dvyPAnl2BU{OE(KCG{(Igd=TjR$pz{}s3)u_Z+fD1Ij%|5qT)qx@Tn#dN5`#J~ zQ2A&1dEfKP^FQt5yt55|I+`0=y%j{73+8uixW=B9DYCn;QPX@^$>0nw0r~v*(4#+` zKuR;wmRr1?ik}%RD4|ayrZrB<0Um9%KL2D%0Ln7sfb+Tv(x92Cm!G8KyWLJ2g#Xq) zJ8pIo(dUe*Mt(W8ZP;nA-nmc1=kJnKuJYoG<@P7=tU}d+31*p5uuFo5h^kZj$M*&Vkx#ARsC&!>v#WSQFlr7kEZnBH>FF^fJ@*H5Hg5w*0{Mn41Px{v!GL^~d1<64T=VcrAy7V8O~b?kyx;Yk*CV5laXv-4G$<0}P>7evPi5rvpj>Vf z;ZiHZ0EhDKtMSr$etq{OZ)*utvaa_}Y>7yS7x$fvJ}Zu;w-SPf$H2y=^<%R?oziCF zYDekGm6rSv@6Lv101I|+Vx9mL;b4zs<=)kiYt|YXw=;K-rGei#E?AfKS?I1Y*RAhU zost0MBEUf>x8bj2mEZh9`By5{E1N&SyRtWbt+}BV$XrV@@}>J7^F(pv4{(bF#wv@%Pi0Sq zf}et^Ij&T_pvllLuNOy&a^RNg1cn|!tcN4dT?hP`6bm8(CVJgZF^e9}YsW083cOtK zxC5UeLf|IUi3~+?jPR85KN$sR~vA;hK{& ztNloitA?5zl=*{w4aWu7G@1+>{iMd*0N&{0j_)5w6Cqd)z1=yU}fFw*~7&`o)^q7fG5(vP;qCzee6IG7H;y1 z0HWq7JTkxvmJ6i>I7n(lA~V7zaYX_wLOy%2Pgj9`_z}E<6!3tN2p=_Z@>!F)z2p;M zc6Y>UVke859=CbNoc1X`I~w=hajYA%q~!drO!Il{=!)~G9s{hjbLMW$P;0Qbz>OJ#MvEXkYHP`)9&g_h9a+pJDKC=No zLgSl${GOtZf(i`Cn4b*xSI9qZM0lJ@h3{)AepPFgz-IB}|4Q_fL)~xH_n@ja`_7W+ zfHZ&js4N_t;iBE3>Q5FmriZg@CQ_?Fg;52QRE(7pv`Ip%`CohU2Qbi_KM}h}gwU?I zB|Qr1W1zBFEoWMid9LYv9L<}SBQ5Vq_vy2lzH@ln(ZHgYt!OQ_i;)~FTAcQn{-YNp z6jUo{xvBip|9!FIH1RK76T0VPVo8i{CX&Nv8QNU2EW>8thFCm0O_IP|_iLfKw)@0s zyDeo58Y2TQ-#amSzp(9*W=NZUhmA<(N;~u0AmitF?3!*N6;~`~1no=BXZ5SB0sY|4 zetqJ(JMzBeJBbj7J)iBHp?ep-{SRjz+=+}gBe0vG(dQXo_-SAs7BPqqKAlp9MOikp zdA2?=5oURhON0{CxO?!!f18%>v+xf=YFs_{#J0;Nus0QO-7F|Y<;uDRPpbZlMO2%bN76;CKjT~pHg`%M=yW^8*sE@9GTY$oWt zmS|x5_Aifc!k3 zBf0$wV<&&`w^_b3@%k4jer4%}sHcgsA3+{PZ5W!MoU`N<4PR;LDX;QUhYp=^-$f+e zd06Y_S56L{bv+hw@_8->*y`l*Lc!j9w&u!hQtp93jlXrKmOTW-&6WOMVwSFu>!VQN zEm17yXRmA@KF}_TTE*CZ=}8?6HkGo$udW~b`9;DGY2F+U(mDE|LN>zQ$jzTwfUMvP ztfYPOd5v=({@EKx59-ICcrrr{PE=J9iL#uBB_;YqxWpfC7N|p+l%+?LL2qg(_*xG> z52{so1AnwZ-GN2B?JuYl+FBm`M+WhpM zYF%Jt^4Ncng%HCNNIm{36peBJdAPtTiSZ&GQpIN%(iIX97e>l`tJJzFrPyF-uL=1oaj;5842bx9Se8Z1l;<1dc2C+)Tw ztWY-x*>UltZVA;qAO@Su9^YfAu$z+>RFJMnF{42KVG^C65cN&_04qT2iLBQ71lUPu zC&HNwTc)QIDK3tCm6#rV6qB*z+0~;&s{PTnAN@{i@~5+_LFna8llbm9P{a~i@lXt2 z_QMmu&>j)Xfm+Ee1J3u;T3#BM1yBKl)6*|w`5Fkomgo1aFLUubL04zKmw#U8X^}yZ z-ZKrNAuBccz%#K*MpFs;YiVia8>!OyRdT7#4K|Ki4*<3hSGF5_aoVg=cW!+}>0UWR zMOmF$D1=C?*A7gOVlM<4WS0XxirtLv8+bcr9Q~Qv2|_tal*2`Az=?meyaG)m7vryCIh(t>FjLPIy>ZWen>hL}Ty<`Ot|U#!Ebb zTQ8()Hk2d9RSNP{n~Sq#H3-t9orNXOu3CsW7vAoe^2T>RwAWdDl>U9uvytd)-fV-n z(36H`{~D#IEsD<}jK9PBJ{>5*fO|dM=3~BHs}Tc>9c6PTB27*--)h8>Z+ohQLvkJ~ z-#$?OvF67effW|VVipc|4(i$?c9rU$kJZ$f*MAlOGp8FK(x-uiv1A$T2~-N zWUPc3_m|mxD?Whv_@2|uoG{N&l?6mOBis!rs1c9P(X4YxxY|HZgC~xFA(~);S4)R= zf=!>*k4!uzD1+Iu6^J0hkgoeN)Y75{>}jB=s;3Y(URK9%`1@k%)g%t2*eFaBE{&|; zhIcXsp{rUhCBe=SmR&6s05r42gIhxp8z;qT_|(uRZu+d)`;9<_&zjlug~O{R%%QdE z1=(ol5Bc8AymS=9@ajUhvEImvw|ftq!qiyf&Ys_ltzU68ORpsqH9xHqdq@56{mteQ zx%J4@_WSKaBNA8}6D-Kfwd>Ng9dH-jP=5qI>jP5mG-(4zWB>s1K{BcRv?)OK9kHjKqLM5Hwtc_}8Aa6_ zzEw6P;ZTAs)L=(}Wn&nYct^3A3Or+gpfJ+sLW@tQ5?sZX@+2)M#~^c=_@TdL4+_K~ zhV*||HbwTpV z0jEshu!b1e8NK)2nY^Aw|1~IPG%B7G)JXfk=z8n0sJ^iMch3yn4bq`>cZ0M6CUFV!X^bb*I_RQLAKkIq!`*Rl! z*$7}d%jbDgA+ODHS^g9i*AxTq_XJUSG;cUFNr<-?VetC zD19;-c5?k(=LYR&s*$m9{FPpTaNtC-xP^(8#YAX7%eaCH5h<741s(>2AFR1~btPA~ zp!~1X--GgcSyrHxvwAGQK^^Gi^c3J3o?g<)`ce(ZoYLk$P7Ugaqy#I0E|wAN)~6jF z-uxK#Mo>iZQ?uU=kbnJHFmBw97w@C~O+$S1na0g;gW-?!L;Ocx8$&~k{ zch3e-Vin3?XU~2vbfIu7lg!>6Y6hdsy{Xx23TSX>DDeDnb@6se2&5GI6Xvn&;C&8s zp6a{I8qZQTk2f4grft3-7#1;!g0={|Dr++4pVxh)2wVd@BN=jmy%wI4wxV%f@k{vp z87Dm6sl!oFmk9ZJ2|BTPfyT^xx6du=glok+R!ba7&+EiZ68VUL@$}znEKL+Z0CW<5 zMSZ8tNIjg@ErMHiJn_T8mG>7&wyQ4If+{;Fl6qmEUZ9fXpTlYac_TgG`W!;aUGK+Q zTU(=mIA#C#66DUiH4omD{=eOfE*I5y<7QybxOF(n`biqSS-_4CHwcdQd2S3+w zEM@xec#Vg3qLz~fDB)EG(MyjKM;C+QjNk{YDYK=s?V|j6L)FVl7n-8F7`ITdMSQLo3EGMcd`2sz#|!)1oxmah|1+#8g!`)YAG?2Q>65_(E4pi?FNZGO}EeAiZ36Hn` zppTK@;n*6C4frl}G4wJK%~#t_wPy>FFsP)Yq@c94|J> zhO=cp`FkC&O5gJS-XrJDlJ90c2^_WH`UQMW?4|#$QbK3>5#xwozvKyBU?)1fZ* zHyC`ZmL3!(sQsAhgsv#-YhO?cw3q!+{)2@XCt)GK^2wBw&aMU4`VV>kB}!R zOJ%@KB6N@mH@{4D>sm-2|D`^4K^$lN%{$(tM3g_=Z$4Hz4LjO5TE2HC*;n&n)qzhU z`OmzKkx2Oj>-~QKwUW7X{HaM0O1d(8b+3E`!GAg~V)v@#?D*^-&fhP0$R7ej*X^C- z1%}`Aa@UJ;x6-d$a0)I3Hx%cL7?8c#O3wt!7jO}Dz@Qp`mNK>72{g|~#VM9PKK|4W&%bY|v&R7a9>1m`-~ zItc}rc6p^o@Z>8yiqq4BU^QCs`N1cC_SZQN5Z@IPOckm>rW{e>v9 zV{ni$i)nIj5P3QqV0`@gV_KTdcPB?jisl=z3L?@R>#V}i#wL%=7<7jXW_cSAet zVFMrV<95oNPJbBo-^3OvXNu zu-%Pscd{6aJV0Xp%_+z!F~PxxA;NQ0Z|Z#|p}A8hR;`jg$IZN8v}}>Vt2HrE(FMRk z>by0WZs{=-`^+?Yr$~EO&ICxZ$X%^#0-}-A?bd(-3aY)iw}cBTX-=KJe9h@6um4YI z5dO4zIB5npo4J?69ZtBB_8Sg?WK7;fkJ9iw`K^`iJ^cD@&RovRN9;KBBJvq*L`YSK#{|*BMU%$4b z2$=I*_EC+DjV;Xh>?%uVPN^6Eufz$6%3kX04^B)J{F4Ggr#s(wcXzvPMJMm*-(Y3k zT&@b3cjDdpV!?4kgdR9=frJbT?TMiw&xI8RH&FMKoM_9rewLJQtF)89!Cm0kQ5g{; zQXZE_Tb((@DZQ>E`@QPqum2Zo__$#hUIDKyKHK9{q{d9X=`eELb#1s?K$ix?B@Zl5 zvx;iyPhWfdE5ifcm-u_5wyLxHG1?tuG9ny)->H?ms5#wrI2$*sw1+?!+Ln~<#Qqc63G{Zejk2%2YOZzWud zDqjVxrRlpmqiH(Q74Ct3BhG;kq7k(x`W~yq9HzwV%vH)vAIoNCYRjr_K#={S`~?12 zB^c?A$xqhNL~6LU7HM)ZKWv6rWc>bcY~y{d6;L2&Jhx+qBJW@(I|-Rm*#7-?b63`T zmTTf$vrgZON+m4u^~VP~J--NEL%qVTMwri8(X(F=m=LVSPg>|l5|#gkz0#>J7SCsU zWr9_U1o~V1`&NHtK}YIic&P+ZQ|6e=|G3C_9vl~f&L)8Pli#cz%Xg7x7DX6v)|Q@W z1vp-MHTHK_`ZFTE4_D^r-!Z5-Iq@a&852OYJN28lf4$h~2Q}o}%k$l2zBR;auLaX( z{d4j{s#RtS06XnR8ehU8@V!RgN~^|RIw`++ai=rt&f8yUVvFCKuXt`BDbcY(C+TZI zns52kFiG4a3JZ290cJJ_gS(py>8<2ds*F8EgUm!c7Z4m}QbmWNO@CG0Xuz`?AhiE7kqlXk zo)rHp8?@^%d1&CQFl%&oa^&{*c43`AAXusR`1te>xE+r^UtH?`Pzz95w`buORhFh# z|BlUay5d<>Updayt!74k$?~UuJ?(<&*sv}7I9;50bF8H8o6T^$L#Fr2om<)7xRDRf z7<(3sNx!U`&QcVOZxrVu|+x{MmGkgt3$R)n@Gk7{2wxUgZt(S_ zZxt4mw(2}LOguTKrlHxt)nAqQmBWW>M^;Owt!AQ=vZ%nkzSIv()1r>}>Z!nT&JWEr zn&isdXOAXu#0`OMU@wwsPQevAY*^>~@Erg?*3?&2Kc}hMqV>!~116P&e3PxH<9VAS zt@NMBw5*x%M^*-f-ENh-kM-$Z1G}LCEX7G9k2N}*JSdnBR~OCf6?r*@P^Q)03;2%g zWKIjc4JNo83~)KS%{TrAZ`MD1oi8VMTs8pG+rrvV9!K7vu<5zKbUcMKI3XcvXiOEF zZtS9L)uR-GWuMOH zFdFYejjpeFd3d-X%c2LQToLzsV8UW(>ANDzkr7(UB8d`6^6t%y_~`7^DLoyfw1RaO z+c}qp!QbQx+E&ftE#3C*MEp9iG*FlZfSczGJP4e@8_mnhL(tLD)h^$7c zte8wb(4l?uaP)E0c{v{p#NZjh#|3%|K5+L;^F7wH#(|YK168Q<@R5P z?X1(;99o~_2C?xk&Wgb-ZASvBCN5W@cAFB$J20+G73yq1x zlDOvN1I0M}AIg{7p!Zzu;C8hW6@6uz!*ei!vIkJ2^ie<9Bw=@hv&SN_$EKY1)Sn3D zyIN`U;(Kpm0xc>cm}X;GfI5zV2lzkwhqi~C zEuNPu0VgO4nD^Y#7sedLVd=yZ=*qwUd0Aa&5HqVQTK&5bGRsLe?6Xzu;E@xi`xc@= zm4+&*F{{K@zgu4X54>};`;W3#>2xY|RBxD{ndYy#ke=WQ7#7*sA&d+eTSMD*%9u~u zzRCl$6p*yU>oJRcMTtaRCgFx-h1kO1!%t16* ziWRw!EFhTFm^1bI(kq$$tG`$$8t9^>eu*hef$Z7D!-ZkuxkERNhvM|pdx2+uVBlVD z5k79BG<V*~)vK)(&ujm;*g%)2u>`Vt z#{nbG89Ht+vtn7RJt1Jc)szCs1Nt+={73isg)VW`F36*C!@5EM3Tu3h$QgVr@inre zSvF5iTJqsGe`gfIBD~Lk>K5ZJ`pq*=1sq9Ar`bf4tRy;Qm2CqvX=Qlij3e-2kp$Tx z3Mf+`55jh@)1RtnsWcqSm1@*YI6ops&g&W2zS%y_HuVzWP5+I;miOzxkketNrxY%! zkLj%DgWD85td&3SZ=_?--Vik0iAm)#fzJIy%=f|Q{adz#XT6-mP8P+LP#WN3@C;p4 zWrWA2AX2U-*5(<1(dtC1Uy#l}ugNy~imoCKX5t|ZI`NU5J+QmKO`9SZ4-_KLtSv8jG=2JBI-Z~wtIMQ4A-gq9>j%%WU8A}l|F)PPR5hd~fHbEEh zaST)Y!CjzqD}l%ZC-8<~!Y(*iECp^@DIuFdvSQNRX>1H;!klP$P})gW`+J+{k22q% zq2*c-m@uPF(?#Tpk^OoRMK0(g@;7Aah=~8Y@w~P4XJnVPYUI@w*jnClT>`+(%gYzW z;+zzB$Xey^vWnW#mzok|#W_#I++rx*9=k~}96QG~@w3|@oge&i7P*%k62AQgBQTo-^e;9utv^)(3W1kx{r>eQ%P9ZsO( zk{`7sQs;h_cx7R=)N9HoWJvGYo^{@7a(_4U?B3<~)w|)HfKXWx6q@BNX`xrWW!0fO z@cnM2Uy$tgE!Hx0?#n`}kY?)3+1`87JL60w3!uBdoF1RjU7cvAFdRly?wVj;k;PHg z#S7M?A3D;vO;doTIB|o*tN+Cz$ zm2nI=9_a55cuJmiyyA^CSXNW`F<5mk;r)|m{(aRyWs>w;QnE0qVjp$*og27eZ8O2F zg}8Ola@|tY(z#nokiAz%jfX!X_a{pz*AB#lax9wYdpq`RCNMD6u*?}>!@M>bW+|Hc z+0dp%7_m8I@t)hb5@RtEj&rzWzQLyuHY@$o72RtYjbdZNqX^I~t~EnvjHaeULhmmu z-KS_puW~uNJQY-`+ejki@vvZoO5bj1M5evus^-}U69J`#{J)O|DE@FHkdh5(W>|Vm zoP}>?wRL0kORtZu5ePN!__|O@LKm5h18mr9LNzgxndq7z#Loc-kp|*#BfEVep&N|; zH=~+!#q;4$oQ>mgzp!QikL;lYN3 zRm0L=0|%bioA(^=KsR%%ia4UP-JlmNXE1M=XPPVT>9b-jjVQ^qBFG*?XG98U`;VBc z>WIR;+PoK?uh_b%$J`Ah%=I;oQkwe!EQ<*4`fQ#YT=H(nKXG>RSqnkhgp$qi=BsE^Bqo^Y>!l}i^l-siZbaVKT&jeZid7|6x4C%T3q`rWmz=l)greA_m^kIn%ts=a){>I)&MTTQ}MT?DWOiGVYJqXDmmvfC<@ z^7VWzd7$$b(Y^oN5YbuF@&p(5NIfZ%H17@T zupN1Rf3k0cd{q0QXW(c$6&qCTWBj7|DGy~uh*@HY3=sc#>T#eIi)tSfH{66q?^atM zT>3HD8L+3Q4GW&CHiL@JduT)=d-C7>U($?;Gk-2=N6MrCSA(Yk!#KYzya7N{>)RoL z$uuf0Tc3uC^*4+1j%rqn&H;eSoe?=Jj8t}$>)sbNdtyZyt?YeZ)0c_QCq~z0f5dIX zd;qW&A>Nydp1#XsQ+opA#p4e@$WM%5XX+WC@HHLAm(ELm^P<{x2dGyD*2XYYmh)X| zc|1b*rMgIXjQeHG`8!9STh|j>veX2IcLIgK!V|2ZiV#LyCX*HRt_blg0NmwrKW2vy zHFWz(-o+f$hZFIal1Gs(4-LY;fo0zlrrApS^KCW%bD1V`$l2{wqw(z5AN> z!U)aJ(NKjFCT~}Hzk=Sc|DdHv5lip8${;5$qPkq#r_Z+gM4z!Ha30jKNJ?Nj8LCC@ z1Qv)3C_9b(gDMX>wA#Z`Q*Z` zkzzGnw>$(ino?rVIfH)U$dz*0=Ma_mE^B$fs(;#%qQQWiD7`4&V=}hB2lD|04)HNn zcPjp^fYt;u!o0;%@H{HuQCHjZ0foBRw`|E-+DC3Sfg>0yt`HEr6u$Si_=~UM_0G4NTyY9Y=Wdp=Tdz^#jb4E!aac$p z5_kqY%0nX<1zV-i0Rqd;*XwMLTaJF~Y~8>5ms@CE0RAZK*T)=+i0AKcBu@<9R0qyK zr9?i!e3EBwN4}4_mp2EdT}D;3@`*&{Z08=JVcwTIY zNi3UEH~pOr3yHQ+3bOZ|HFwN5!*_5Ak);bi>s=i@zhQk+8Po@brxbQD+o$(=UsY4O zIe|OOfH-Vmn*u#MzQkli0i`dF%#F+)Uii7~X=?3Z<}yD0V%m(CZasZs(lu*6QdT4- zY`h*TIG`zjR{Y*)D{C*jcJ@%PkO5GeF8%fKYpnGZ^%Fa)`eC?i@QamG6D2_dpJE7e zF9)AtE;81s!!N*ZR`5Ik3fLcELMwK$R+qu#2NqC$b-i7cm)!3WKi?q@&F0^MGe&sx zKr^3_0A~4KT7?(sXoc!Z`O>rG6%9XH6rsLWMsxeD$c<20=tPsMIb}2-=lMkwMfs!V zBxOcfy5D+KKXR;%Y{>m2HOiR|+Ne2$;`Rqa3Hh&UlC|h5(eArD9%Z)1bM-KX5?5n) z?RcqP!}qMx2;flW*(wvQIu7b7g){VmhD!YcsvP{JkvHch_zs!z8k?vHnJ17s40t)> z#lJCBz?$lf|6^ z@4=Dy0u12>2yKuHEnS~B8{M`vP(O{HP3qR~J`F3Tpp#z&c%;E`v-_s)>GrJc`tK9# z^gqnNzU4!BYryQF@#LRKqv)A?m>Cq#oC6?N*1P!Ty)8BA{$KcRZ$3EsJdawvj?C*SqmNtbT8k9xj#tH%eY86Wt=o*0Z;b1@$Hnl8?cbgrdU)UrJzMAX z{>;yz@=a_OVHz%j^L(~oGi6D4WUzkL{k-nuP#!D=4N8Y=Z(r)?u%Z>nk`rF>Q$gWT zNp07N~|K2UPUHp0v zZQTi*djJTX}iGLzHCb4a>NvjbCG${b6 zaR;DI*aFEmAb}&JLS`66G?f;K1a^dxsIhw#uGg4(4TU}GK96);h1dtJ8y83i2aF?rbC?=J>Un` z7v=N9w_MzekQ){H>#Gk_L%@~Rd@~SJJkZ5vrcE4n0^G`sB$Phe-hUkhF!^UY9tgb3 zMAMr1bvAw@o^W9B28HGpOAwT*b8akZr_?i03`T1F`S^H>c2-6`=3@h}sE&|g8In{; z+ReIfk1gt*0*XS3r+An&LcbKQho6L)7@|~>=0#;J;zuw@4zxcotQ#P?A~>``%Xnm+ z*^Fgpg!bN!K^~JbFwDUuSD<2Ya?#WDpuAn(YImRaC^WZT>BMYx}?k+MOKAAFS?3Yr+SLsCvLUHsD5+%fhl6{bp`k^+%y=< zc6{ce?e$WZh~vJLsLB!C77cbu33z=^i~V_>z;Q@kF2Z`bv1Ia~eM5$KiPBaZS6iP6 zG$+o5>K;4(w8#p$Suur>y`O))%^TDE_!^3>InD85WR6gv7m`RFg!0A z03I5ojG1>grV?4M3vt7E7TW((1!8qlTh|@9jP|uoDqIFCyEa6?4TL-aZiQ!VimA1- zlQZuQcLT^33*ly^F{Fy9-_iPSnnCqtz)MMyDS_7Pvxp@c;7mfPVKeAw+{)=-(vGei z2#>p(5t9?TKTgA895qH0(YiZYYWqO~9LxDOxDeu_q^+$&@2pQQ-w@4FN7-SMCV&=X zPL3qf!aIu&1K2{;ptTNohpU50YPZl_nwj>f8?aB9KJlx1*Xj= zXE_j4B0y26^X98gA;H&gp-|t-#Gc=8JuGQD&;$)P?y9=`m@={#9V^J){$_uk144E0 z#I5a!SKvoYuD+FEw2Cck1M0~j;HAO;Ls`vCq?v@SEc%H;;a|k-vaV>sEX=ETDu8Gl zv6-lO8@*V>duY1Y6C(bHv^6+qi~tv-j`*#9>-4B#GbUlcy1jZ<+WfE=nL)id>YsKY zNGDjMBB-{`(fm5!xC}Oh&F#@%AfSBhphqR1r4Z+zsDNH1fn0B8Yb@v;vuVyVmSoiz zv(Rac2*;>BxgVAJs-UxH9W5kR7pqN)vgqUXz``?YW#M5Uy7`+zl{{V`mMD3oK^xYt zG^9;_l$$?o<-L!&wgy$6??~eOGmyNqq*yqoEd1Aq+wd6R8lpjiIL5ubn<0g8Cr(F zXvN1~$-FFh$TgzU;vHd+0F*ElEYv6M>9m$V-IT1XhP>HYT`FVxq;vls*<$<1%HYnl z3sXeDp(^B=2n5#U;*2<*hzl$dWRlXa7T~?s=|p{g3y=rGZ*?28qv-9gX-$xB_RkzA zc3pQE;@(2`LK;xg#b-};pj1lZ{8^Hz@r1p{=;LGe3iJ0n82KW$4knGtF+q_ozeaT>^qQK|WLlEa}Iy#x5mRQl#n#l;WelS9E z?t_uL_PzUzcdVk;3NyBA<~uXT00$Z(@Kko=U$5JAj=w)OpFs?(9$&wE<<8JOgZy@vky2zuz{+VW{P zM4il(^U93=7Lb37kbE8Qopk-T>*Sw$e z0;uT<*X4)Pey4vY>fG2Y!K@AF-14e*{I2JccwRdH5*eQhrEx@+8a9*zFwZSu!sm2{ z{JV{bgH+s0brbL!(t*Ol|XY}G?!T)KK>B_}Gfs7Qg_R;_Ji;1CrXO$f z3Gs7ag!Nv?QP)Pf=(r8G{u^(@BHb9=B2)aZh4}K*dJe7lsgkTS6nyjm4X5Za^3iy z>#|?VUB7>selGoL(te9S;;hpG8=KBe`v_-4$DG;*(@4+P84||9hkTveLe^KOx zv_T<|iLf>yFXv@48AA*j0U(@-!J{Z(yG75=1U$46oLdZG4blHkG*sbr|5(XOizx(Z zp(AqGarXD`pxX#4v1#YGo>HZxx-Ol+pJv)u52dN5{OW-ZwtXzHBtnXG$C>A+AxP1{ z31+MDkooeUzD|F)5bs+0PFOyUe9?DO&QSe+*iu`|eNfBoKAdxBx!FoU=;bI9t9_q9 z3}zB!8~H)eI51Tay)!ZZwJZb_?UGQQ9q4~S_HP^A0u&Vyx6+z`FLiYiW!qVAHaMQ? z=*)wehFSx||06UqId1nG3*gO?=1Tftu!AGRz zf~K#?`Eg-(sR;J}5CRxSt`I&u+@-zJgSJYzd2Z^>rDTK`vtEfG!a+8DZQp`Fp>$RUVsW} zT^tM!zKMBw&L7{2Yb3nO`){jtT84-`O-`tI=Po583!&F00W9vg%Vzh=o8WOg7zu}| zGL^o?mrkAU5d}BicWVNNn9%ISn6h6IWIrrZlZmUpW1WTgSsL(?pk{t$cK6v;#!j3} zQFBHV51NUepNup;p}xNUtJv?~zGYWbJUxC57O&gP_3^lI*Y(xK{%D!M$fE!MiRxK7 zfB3CsqaWW5x)8jJcBo|LGBjGgU}r9-U!7JB$lJWO^TL!9YSj0$*|zdKVvEd$jJgOb zRMO~V=GvM(BD_cLHw%=QfYT`g!Yhg&kXcsz9#XvzaL+sc5M0SH8q%`^pN)`Iv*Yi-f}1~7g#3TQd$Q>{4M27nkgQFVo4(yR zX}&%#yA2luacx42{%CL*2-fPoP7NH*paB&oW%qV|3mX|G&%{D!T8;x|1JMCfmpfB# zSYSW?TWonRwiq&|(3TVGzmOdo^Y=ofuq!3q# zbZclK2>$${`VFfB<->e|hxW5v2w8zH)(k}eE5KNBu3YRU*#*owxDjDLy`fbDZLx10 zd7v|x_BP%5<_|9ida~1x^>ePU=L2*+7k8Q=_FL>21+_C>LD^&4hNM@dvu0BUQ77IwNHeh)_QBR}}yMR!qTe zQw>r?>?m%-6aLFuqX$`Q_kkj>_UeD-Da}BqcPmf7+>Z@tulLyGclbDc?6s0=#fSwj zty`g1G}aWAmGYxH@pTeN>Zi^;5X$@4O^$;LTm6Xmi6aj(ur|oZs=7yN@{H~Kx4R{| z5=t8VFhq@4+#uK{C6P@?AB?FECh07g zhG1H!TVr1H+R7g~1M)Y9b+ZR`eGkuS9<2X&xtu1*M;1fOXvN}`3CRx2--tc009muJ zY6sh`eDtqLM+CXJ?-{>B4$+M7p*rC38DIX%S5Mnp`k>iU)4WkwR332SAFRN13$)9+ z+OpElJj_pXR4_JXu(h>4nz?Gc{m&9}t?<4In>x9^%etQy*~S#X9vNEm4l7+JlE0(c`^c%gUc=Iu!VnYv%j~Tn*RB7?ObeLgmo?~FspN1 zBbAsQ=FhzU6Goka5C`zzmGAVDI@45&{xxE~Jl{y~E9?BwrJ{su72i~_dWjl zT~LF!Cs+_o+6gEsHS4e}iNgxB$KOX@YIW$s%SV8i2eo)3i3!Sj5Rh=k^n*Ndnw7Ua z@~ciqxO7Ze*MC{6U^}$B8yNz;;o2i^`~AZKhO$2jRs|7!;|gk#59gY9 zY+6rmDm5Fg*THeoz)SQs!GDdj-red2;2dkux;bpq79V_{#!^a#iWae$@K{w|IPE!# z;1#$T@Nb>%9&9jWVI?f;d~dU%GoZ`7=GM0S@JM14SGhy!!2Ku^KKOCruJ)o)&F->9 zF!BXKPP})_mXim5LK1@N$a`<}uro)Dw&SOyH#sjKRyB_5zJ%$`*&Z1No7<`v+xxCf zPo`i6(2`*dgivQpi4)$R5DXdIV6IW-(Sc6tJk$P$$Mcsn+(-<7IMi|OH^yPQJ!fccmX})(zn?1uj9|SAgYPe2)h+;amUD3{o_)X~^ zt~#2SXP)&K7)bh20ijp-mSK15f&5MgBZ6W8cJ!%xe1R|MfELvUScGoShjYh!Nr;qi z@#O5%QN2P^I<2e@U>2E}eNXgg)CP^Q;m%7LOd?2y@FBPK`M4g|)3+f-^sgQrvSVB+ zqP})z9KU|$kPFRSA7AXJVu~$J-84vTyK$grvO~kqCbV&>>;SAD!`IRkF=in3QY2@( z@Y$K0Q+3Pp-}=O|-XGsB-*`{+o=Nj^prw}*W`{G)X#xDoQ0L#RZ<6$EWOczlv9IAj zTfp#*x$1{l)FXOtx>yHEEMzZCxFGAU9p#A5D?a&(fE@IfGRqG|lZ_)+$8bgF|4#pj zt;vkD*EpL!y@$aLfLhu;CwLYt)-}5pHl-H}msYM2Pl%sfc<^Tq^I>^c99WgC9tAnF zo2JColgk*k1tNs}T1UcCh=7;vkq{<7sXe<4GN_0#ksZ)cQq)Q;b6&qHum%5vlQ26lBEwPCEugxh?d3G za$*)D0%;j(W3ZzoBlJGGu8QHSbcA(OTp{Qss}N*Iqnq$QD*9sZlr+vV7TH&AH-RR zd;q~`Ur1xSeJDu)1>2o&MFa`pC^OfV#n*p;KGo*8VcO2QCL4aWuGRqRJL$8R#0x9o*MyPS^fj4a@ zFr2u2gI~temW{)~Eoy50c`b^)`wO8-y`QjPazZq)MBPRJ>Cs?5;eduEI^M)PN?RGa z=DalQLcA@&^_35^!Ayh-d0P6!W0o7oZ`_ zH?7~L2qDzqM9@YN3FVXOar;jECv^+?p&nm;AIx5KX&IDN?Kl5d zP6T8(T}qpliJ8o^YjP;;V&vd*lMT|ugp->d$VlVnB}e-I-ZmhLyG)R2S`gfz|E}^ zs#PRnjqwY|D5@`Tl=|eHqXr-E<&lPML2JzvZ~K#IORZHLH}~aO?~cRwgy*2J@t=&&YEMI_>Crqqnw2al5wqnR+fQ`rgjZN_p1t$ zk%V`H=uIq*`V(Qz@^};3Sg-m_9zd0?NyOC_US}IGDz}wj1tf6MDndqp*&#h^{C)d` z$94UjTOsaCpUiH5J0l0(V)|NJR#3Q@&(z+x2@uPom0y9z63z!lQh$m8d8C;_Gx0N< z1&CFf(cTcfSe=Hd%57mp|LMM|>B%t*9|I9(Bfd^c@ld2-)<1ZP;bMv8PSvy9brm#0 zBuu7_|L#D7zbCXuH(2MNsK-fU_)t~XNKTD`(Q-E}WA%8S<4JxfvKxlR_*w$6#*DVCD(qOHRCC~N@hFJ}sdGb12=pRDoz>%(F z_xv!jF3|TIaIkoOd7r^1gV+H5T%)CA<|9X#CX()$ z>lte@KjxEP$FJ1ispG?5o>9?+k1tXv7KBg)`A$qC7>PH2I6qW0iZPItKypv(_5RI{ zF8yQ?lwGuR0oe{?blQWW1{6sWwacyH3#X&$h@XW>Z&%_UI-DH}w+2#hinivt-L6os z&_&w@feGkjS`?WU#RMTpf&#-i#*Usl1iWyJp8Uzp6^yh&=bfiKtcw6iy!%ddaRb~Y zvS@wfUl3{zCR9wgk2`_Ox3Dzpz)qz;Gf7&jw7@-cuPdSuS%Ut9CYU$j0-#*(5I{>+ z%&9ci%S8N);cU97RwZ7|d`=SzxR3`4RtZWuC7aKkmX;K0MaBpnxwD2gY;j)7tt#7pA5GktU z<>a*TC*K<)h>#k#h&%x9JIMXs$M=J>aq~-*<8;6>UR1BCk$|z*cp zwkA$LsOeGr!&|#|p7aa=@()>bWR-f!4kY*#AkV9H1bzOdDxg$Wkx(kgtqpPaCds^)b{W;%7!+cUCi?pP)s36!8$6LGV^ z+uDl6S7^;wVrD2dP>t3fIHQOLFbR8MeO35)Z4Tuhv|c6=WzqRjiz7u#;bXFB_BDu~ zA;`MJM|D&KItBFxr3um~7%o{O(Z>H=C&+AEX{#<2M{eb!(&fvhEY1gs+@RD_ZQvf3 z;16QtbBBGp>s1vYKh5h3c~4Y?lg&Ybr%ncb%J^A3|-cQb=&l{pN9|Zr9E^2M0mOms4>8XOwZR*>y*Z z&)gsu3GsLOTI5F&s~$O~CM(cn9ZrhGn(`^^+fNToaPFId^?gx9yhMBbF1L0$)lly{ zsHLb8R?4?`N(-7+K6urrKy!At>k%}@U0=>Gf&Am0PcY+1(^6GFBr=I1B@4OaYV*K!*OE{txt?f*v236Z%_UG8~f_mi6&G?-tfrOwk1pug- z)%b%G8bbZc6=EYS65QIIgaLg61e-|h3n}8#+)hry)qbagfKD>jdg>kY@F+jB0fB5j zFf0Ar8Y^h=TW7rRy_?dP# zLcCTG*}0)ab@n6wh!=b(=?HeiQe1o)3Pceapa3n=J(60@;Z%;&r&i1Dt*1qD=r}wfdszl1J*Ge`MXIn!aXX_$XivVU~KALr!ZEQ~h*pAM; z?0hG-(b?zQXD(_0vZ6~!^9Q4C+4Xib7=|6RWu~bXGnu3PW5K)upjv+cRLxHCzo_P3 z1=OYQjIV^9m&nHkj9Xw>{~akc51Jz_iq< zUYH~g>z}mV>qEAVvOA8qSxO`OjAcap$q2ob!?3D35U5Ola&?y^X|+d!rK9Xcl>uuE zQ1XoZ4CxL58h+u6zJIMUP3J5wp9iEEW#2k2d64^$JjcMhO7lczsf5vX=kUJ`LE7Qs z!4RnO$za4!%SS~_X?eDlP%(uf>U;pPqBtfh3Tgiq!}(=s`4vM~FX0!pP9(5nXw$FG z=3j!avXPX)u;Kwz1OAz6RzXF0++g%aK?cW*tyi}&Tf5a*3`RpWAfpH;?Y68(%KRJN z2a&Ox8fm?!8If#2p#Ki^7R!%KXrK`2n+rk02Q{`v;0131e(G<{l(D#LY5<4;T3Vl8 zoj>+|=NtG?vc;CH<1u2M3z;EIz@4lzV_QxDvy?)n&ZR}KE>1>(*h*!_Kdy$@i?;=zftC92-(`sT z3=rN;li@OzaJG=ueEJo4i*_B`>NSL*_?bpy^b!LSk8hzTxlS>J;1J6O z2|TXun-zfgpj2=i7?c!#hUwCq8(z8V{jY6f{zpvH0mFbjDLVxUS|iz z>q(b2@`HFu{!^c9K)d4RG+Dls%Qm@s$E34?Zj~kJ7Phe%?41&z!}Ku1ok_?CXb;hS zE&-w|J7E(G1jYr6*c2qq*8Up~K5fUDV?MY+P6I?CRK_Sx1(p*xdFVk z1(S)?DfBE4B>Q}Y+9xAt6x=|R-3(dRKR`(C*iP0MGAYC2skk;wb|x$4s34INNBUxU z`TAKTIV9euU{SJBV_iMTOask{FP2PmBmVIZCCLJM`-lpswp;gJZGL>L^V?QU2i0G{ zO!fhhQX1c=!!C?D2sRb0aZg)4bt18Jv6xaniuM;-Dy;{Ad3kx*kkul0-HiJxcB%!o z^f$8NID|Y9fV%71i|jF5H^fdC>99|WCT1eG44!C2%}e}cV~{xgW77ty`H4j54@%2_ z(S=d6Kqd6}?D=brnYf6TG9!jPdN-wy#A85dPxa!tV}rnbbIj)^EoUvYms`Xyo>zTU zX1oh+qbAFEh?+H4p(Y>ZAz90w@&!w-LuE#VkF0gYbDwzEHT(nCg{Kcgs`f6*`uR4T zD0_ZRwzskwV?HT_*EBV0Dcse+a(zvewdhX}%7pYzA zHIz~iGFz`ALTTi0JVy`my&EvUE0%JG>lwkBI5A2C99okO>429PFft=`)Z5uOu!IL3 zOQprdou5vBj#L$=TX#+42^m(<>Ia~@f>TEYr$>CbQ^K)S~JzIeKjETdLew6P$BoC;AK`#-8k`CHl4k8cK2=kE1R2N}h=iT}WwcvVRaZF#{r3x;z z8yJ2z|Kk(o<7wGv&Q$1fM?0lyAhBR%Y#ij#uEGE*mk(3>*_eDV6Gs^G|40Dtq8yrB zaGbx}3hPPJt^Xo$ZNB>O&DGTY2ffe5-8724W;uIqH$WSg@}B*?Z5Yo}ygXiUkl?+V zC@}=g=+?&q)xx{(ecz}^OH;92zX=B=#a&y&nea8>Ulk9$F}<-SZBab*&?)!rE~S3r zr3x?GA0MBg=p>831|(fPW{0#=ya!SEfy0W|dqd%`4Dc-${5E~kgT1%Tke=|6Hfx%; zX;X}$bM-P4rY5;JRe%iz;3(&cpV~R4HqgDW=~Lt#p&ufx9?N=cp3S9_qAO)x z&1drbZ2f4wM%>(-yBe^|zwFbPYZAFoc$v|qYQgNxgTwTIAR=quks1rF$mjaU7W-Tq z9W>U=`vbQ&l-H(<2n~B3hR~e|XW_s0xmCjV6p(Z2jY9D;v^?#;(wtoJlD7LE7Q=0U zkl-R4e|&tE7Oz-Iy9{X&48mIDkDHn~!3XJ?v;%jdW~yd$mv20)_duer0A1Ktfe*Xx zt`?1mJ90Z($v5hT0S2Ty=tMMERD9Whi`2!}Sc?)D(~U>aO59S+K!o~@=)T5LLM0%j zC{x?y3w$VcJNZ>P(fSm~xOw{V>qwmSLRQ|5R~wyirn_VyWYc|xOLU$?h!&zE9P~BFUM>q;3E}g!PYc|&T5LL7Kg>}gWE)Lil>!$!sv%z zk)Qqi!X&cZ4xoMR92*3v4WRt!B0IXcw3h}rIZ58UdDD#?uw)CoNy~tA76;f8eCI6* z^op*7DPGo)O#?)z%Uh8oLLu$MQu(mq;Pg$BF>r#5blW`zkhfcT2PwXS#ul08rZPtb zyH|F{GcULImQX^t$#totkMt)GOr67sg>^XQ?m8nAy-#cbL-&pOi<{JvTJ2j)@j#0C zuD!*g*o}kD6~<~k_cse$W$K{Y|2k5OaFgp4=<`Cj0ssOudZ>m zVb&+kxmwkXL+_xA=XXs}8G%IS{G}CH$@aP-VXwH+LuFV8@?cvz*V@X0&8TVqEc z8vSSG7XD`+LOh+RaIv9T=wlt%ffnIA8P7R_onMEm7${tU7Qr;OTaPSTW4`3RYf_Pv z-WMz(!uuerSx2gDVo~KOYkJ2h&8Nc0YIBJB>3frff6iNBqit6za>+A|DJmZMz^p?l z0F;ebUM0J?!SpX{ifrD}^eiIzwskuQet^<;nSAygf^##6KeDg|)$kd!`bVA&W?M4hTqf{be49 zTnE+E)@SP`e*Ub7!XFHR^2O-J@C0>bpQlB(xp6E@*;v(=aWYl&f2hH==&lYpdJDiH zB0GEmBxnQxaT=bhFC0Wme=gMAI<^VOgEpTs4eTr5{TV8#z-dI_nr>g=h_LyZz=G6P z7KG@OMC2>Bs1w2LgCt+ve)vDHDXD=qJMQ(^H|rMC)5BfO$ngfiB4FV@ih{KZ>pcjt$9o{E@@*l%(mhCr)gD zhbKC7XT71~fs2>hc$J#IRSCI=v0_TRScgz9%BKwydyapnxp?P-xQ@FQ^1ui>eECgw8FcenU^S5o1$OjiITs$U$}4>R^$+L; ziVA?|-LwqcShzSXV2rq&#gyOXQFAc`gHx+^N>_YeW0*-+&C^TbqYaNXzW+P`YPjIL zP4Y4|A>_zTOf*Ez=%k`vWSqn(Hwhc0V{lo*Lx7topV)Si`Pdkzt4|*NCq4VLopt>K z*W9<)w^PF*g|2pa3CSf zYkbw)Dr>=$^c_)s(JeVaweb(2C%$WHbk-NLnW{|FaU9*A?p0k;+Y=Cm1qwx-^}&w) z(}tZFki$yie9$ZieL>6!^iL$;@!YVN#c;N@VTD#4H{t^y#^Z|-n$T#;WiDE+@$h~IjAo~kg$_zn{ ze%3+sw$^JFOogS)xx|&u58PT`iRB3TP~=HU_g-U*@DMS-qT*miy+>a`lEE^nv*5yr zjL}0mh9X*Qp}J=T%|jeTu3c7f#ZSF4w6Ft7sv236|sF_zRxt@X>Jm^MCh5yk4~x}Bk>ZeN}!;IP^Z(0@*( z>>J-YWPjkfe&yUPru)by+RQYQW5`!ZrsW~n?{UMO-ZSe_5r;tr)xR8k69nz1 z`PkQ-P<||z0HZ*VJV9l8obf!RDX~2qhfPW+bQOG&V~`tX%Lk-18e+#?w}p?Un6=Wq z3pu-wz#3+2(%iveNjkvP5(1^u&7aG`orhv?QL3zY=$z&sNI29BP>=0-q**LsQ%#K2 zeY@MN4*Zf}95&gQf8JJje7>t!A9ru<&l9+WejsdyE)Qo2m!w8H^Fp6{PH>TWEYv_L zUY^IjsD0XCcisB1aY8o0+-!f$JTht63$y1IJ26yYdp~Rsi(CsC`D)g&y8_N2Qv04e zC8&_k9q6^(T=uweF&M4*+<0J^v*Q4^h_JnGr0|{D(i}C#H?hkRP$HOCU5${fJToWk ztGtFpQ+vF0ZmeCk)Kq&w1~XUg&a+)Hg<6UR7@-_GMopN)-MwabL59wm8+hO^0?wx= zw90HzqZNUJA$e6Tkn zt+=Md(Qa8#oYIj zJNt-sXI2DpV6X0yDV_L)?gfVN0Aa>pobwK{Z2MVT?JK7_uC(*w%J64fw`f1}*QV~2 zCQLeW4vM9)O6viJk5p}jwK{Jf%NsmcJN7fD+V36Y(^UKwqjwE6n!^NyNRHnCH-N?s zfIX@SZ}8gx&yY^HhaKkw|{vPA$j!jY_CS-9IpJ%F_ zJS;1dtPuQ8&{id7DM}RHxl<_1ZK1o;LO_xJ-HZ+<$rCxA0=D~xlI_*|kJohA%CuQv zwsgj9EibKZpit?HDzbFbCe4^>P(G5fsvOjYT_lK!81yIwH|^NoJ8h0~s1xS@LW@d- z$Wtsf_#13#JR05iHiRQB-3G<}1YboiEH`N93EZQOk*8nW$uptc}8SXXb3P1-tmbeRdZ~! zxgyUCAghfS0ne^20V2%sJbI3KUpWQy3AWn}1nP{paJzy04`wm#+Li*sS>kVPI+u2R zcc9sc|M{I0#?4U`+)GDLP;bhb)flq+XdGEpi;|cyl8VcW!}N4C!3AR3p6nBGh(C|T z@`~Q7`Dv8Ae8W0Yh1kvkVTV%dLktgYYhtq@6bBO)f2h&F%;--gMIl5b);H1d(Ly#1 zOizenLf7aqKuS2S@h7eR1%7rSj9KS{Prr=+HQ=VS0m=`owlHgcN3)Pk-e+vZD<#pj z2YMew;h6tpu`@+2NT7>PjOTrA1~5jCsm0X%C_insmESM!40Q)O=op)0n6UpHc^-2`p}H=Es9t0@|H`KTttQKCBr3RFTbZC?bft2+c~?4f2lM*kOM?PW}42W`o5F+5z}YfKo9|9);IZPVPA1mD9g1|$U3 zNH?biW9|Phv}w)U{k+=m?#-LhfTY%usL4h*eDyMo+_z~iImE~!7mK+|tKUu6hplx; z3*eGGpZjjWn;iS-sJGo%+XKLPJao~*!nA~3c!e3QrR)b-(C#-*+$bXGv(t%o05e_q z^Dim$;LOYe6tdvvEQ5#@U_pk83}+J`4btpDCM84}@(&N)aISMA`F896=Nz~_UO}1& zfb5Obl$Wdi{gM|aRqY~rZ&Jvzfp>gz+RAJ{sJ3I&T(yr`$~cUvvbWc9(a8OKBuhfe zA^?7oIv*cLZC?M4DQi6o3yaBc?@BEx-C(@JA-+S;(Rlyk(&r1lcG%52rSQh!+5 zo@*^jK6~p?DAmoyPM5|1$7yT!npD8CwQoxbl5bb0MW+Igi%EqWFHN)ap$~(ymYXZX z9t6*1(q)xtbp5MC-cM=g+bO~*fHp*2J%f_fK=cCV-oW)gB*`uLniq z>g%(|O))%JIyNjFlMg*SHq-<%7qLARnryy!0OR}j9$xrsXfr6lH+Fvc5^h@;?pQPm zPk}G4cU(~Lff96DgRA5-`QrI38jPB<-cr5k=U3pr;*QjUZxdmfZ5Gu>c5jtNohkwU zS*^;g&9&vhrRs>EeI-&F8UFd*i580MfAg!ac zwk13c`{Z5zdvRq+Qh1iXAP;Q)|I}8zn{YRg7UY$rvrsenc zNFQ}!@$7%E<=aoMrRnXh6wWHhQc`iGXt)U@MN`rj3xI-?t4c4kvwsNN%6YZ{*Gz>< zCqaop_19Rf;L;yAHhG>M^!vR4r+EH(V$lGY>9#cBEy4x_jgtFm`91#euHTO^3O?1_u}s-{JW*;4F12Op^8P`&0i`Pd!3fo$2H;sMH)@p0dH(!8gnqQ;#WCyUHGo! zfa(Zz*P+1t+f?l6>e~3`t5x*m*w2R)wR!SeFuOF2(a&(Pnr z9m$ZFlOra(R__gAVAZ{HWq)qv+$l6nQ>^0t4U0w)?jvvK~^@23zkm_PD6)pNl*$!O+*m|VnN0LK0 zU3&e$?8JK0HGQc_8VG`uR6GuR$uZAZrLN7?2%?zTv&*CLqwkf-Iwwxh1A})=P!NWY z?37o<1;C&(D2n~oTFq7+{2(0gZJV2>;Aq)ulzw+m0wNAE8R6iAbfccF8wTHw01Nln88%4T8XkM5~D6m5T72>}u~E5*UH ztq8~VmmL7ol3+Gcw*;f>Lt$5$p$kWmke5P>3`6?>M@@X=s5XSiX3;J>|D24B<5)Tx z5~{{f;Xtkqph1RTl?<|%tbh=b08-B3Wy)yy&x%Km*RStEu*piUTy#Q&m;m*(d}z=k zBtt;g5W%4|=x~>O03npFq)!^C#iRSw7Q6AdI)9JUx#xGV~JHF`+u4ZC>8;-Z3N{H#~^6jS6@{4=r0g} z4REWkGzP5B|A2TaTZuVne+Z!J=tb)rN^#Z6cc_I74q5>tHZiUY)9+7xUpoKs^p!@Q zbm}ET7CvPntS^3jnNkY3i14jxH{#jm@z!aPg^m4pH`m=qF-RwbZ~-lu{Itql5k`xo z-)!sYfbOnCa8Y_Xs;p$8x?I%d_){@~_)!b;DwjGONxR=b5F9;$pMnS97J= zRkG6#p{jn1(-1pkmV0291__C_d?Qe;Egy6tk9&3IDGVD4>0_ zSlYX%C+~KC{v7J}tBhehEl?fO2d5j=7*GlOSQ6Gp9?d7Tz89Lue|&nfyUI^V_2^Mb zqsHcLG%N^b`2C=?p2z%zF}h9AR749IhMo$O!>B4B`6_SURtxEsZ8X%GtxegMmX)~xZ3cmH_y8G7p)8g%b2w)EtWKKzjcD9V|f!#WwpuS-fkt^FB>;;CFVfpWQXJx}e|wHfBMy^rHS&xSH`$U!p`Dsbc-+TTGVqBhJN z-O(>!%-T|q@j|>8zTlWP7~Y!W@Tsx_()-Q=C1P*y#T4JM2g|c{tUvraFziGw(##UR zJw>Ivl#sD};WxT(0fc1HlNH0O!~!EI#1De(hXpcG8v|*isi(&PZxV8$wn*mT1mf|V z>nq_iMf_}Z7?i{sG-kwkD_?q0ymvSJ;mNZr{)MB}(8a>1;;!S>zck$=!a;^3547nD znP(qeYhxxLplco*MaGrIS^5lzp#_wRn-@q`J8J>C>kW5TJIj@}AY> z!9Awpp((&&u8ZE=yN2oqr~66_OMhf0hr$%i@3SWe4h#&Cpwl!I5=M+jXoSE`eXw5> z-Iix<;fkYzl;fI{Qy3fsE1&U4-l45B6=!&qTv9s8WLfn_fO`)u>QJ}vp}toN3tS%+ zB)yB`2y@CUYsg8w>{|`ZWXuW*3MA;%2}Na(wY7Cx>3cxHRj>sHb&_OWrLZ%(`qBmH zK+A{4q>O*KdJZKbcLV!?xx#yf>r0-s?9RQ1buPTQc7LyIGnfBT1$hq$R4KU4`rG3H zr2UFusAk^WT&iIX)@q%s`RCXDh>M7OhNB*j*Kgi1ezo+7H8M9p2(5C=t%I36c#mq`p~l7q9&6g z`{tLHn90ixN}*ty1Mh}q>H1tNDvW!m{_B)R(*8tC;d}S*GcGJFki4s@{6g$eJnPq{ z>!7hv7POl#IzGOwMbGR zH#yqtlvkfvJ-;6%eps!6I>1O_90SA=8mZ4UHW=YtiL9=zp^OMpkQT9C{g@~R=;sKd z*bHdeI$#m0C!4>%x*J8&haS>X{?pzyYy6v3jV%taFWCZToUGUO3HVJPA|xC7k8mU% z^HaRFSd_MxIs&W81fbr*IKGbi=dD0fp54Zjhr5~Y_~`&D<_y(WF87js*M62zHf92? zJ=r8BB?(~qKY8(jF(4pd#0v!}-xvg40YMThY0hXj7R+WXNPC~)z7&d5K%uGP>ea5i z5il?s&R5Uv>G}zjAa20ZG6aprMgcN#w2mG}!%q8~AtU*ruMbvn#UFgP!n6(;0T_6w zH2!0=Y~bTFYOK;AypGqkGU%X5=EN2a%;5I#85met@aC3JN}wqXK$hPDHe!Cjnirb# zMeQ3zR3b>pr9Tp+^<9u?S=rfjm%InUIs$%q@Ep5&1Z5;30hI@wf`-8=KT3$;x~Bgo z%4hPczf5_ik~#}XPyv+ADlc%usmMwO?D@0XlZ`vT$u#UoUK2p&$mr195Wr~%tDh4c z3z`ZNmz1mnr;8g+e+dSzAA&TOP;t--FsX#2tXQba@2@U+z%Vr?51`ab0oWfSdRr4r z#}K#y!(a#3!3mtr5Zu6S&B<=rjYuBA1~+X-qataHvjyN^`Ly?Fe>+|O?AK&rfeZGD urg#X%pDTWc_5M%q Date: Mon, 13 Feb 2023 12:05:35 -0800 Subject: [PATCH 04/24] make import more flexible --- lora_diffusion/cli_lora_add.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lora_diffusion/cli_lora_add.py b/lora_diffusion/cli_lora_add.py index b6ef5c5..69058aa 100644 --- a/lora_diffusion/cli_lora_add.py +++ b/lora_diffusion/cli_lora_add.py @@ -14,7 +14,11 @@ collapse_lora, monkeypatch_remove_lora, ) -except: + + from .lora_manager import lora_join + from .to_ckpt_v2 import convert_to_ckpt + +except: # allows running the repo without installing it (can mess up existing dependencies) from lora_diffusion import ( tune_lora_scale, patch_pipe, @@ -22,8 +26,8 @@ monkeypatch_remove_lora, ) -from lora_diffusion.lora_manager import lora_join -from lora_diffusion.to_ckpt_v2 import convert_to_ckpt + from lora_diffusion.lora_manager import lora_join + from lora_diffusion.to_ckpt_v2 import convert_to_ckpt def _text_lora_path(path: str) -> str: From ef14040d9356168cd95e27f614090c576e892ef5 Mon Sep 17 00:00:00 2001 From: xander Date: Mon, 13 Feb 2023 12:07:15 -0800 Subject: [PATCH 05/24] cleanup --- lora_diffusion/dataset.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lora_diffusion/dataset.py b/lora_diffusion/dataset.py index c204e66..0439217 100644 --- a/lora_diffusion/dataset.py +++ b/lora_diffusion/dataset.py @@ -270,10 +270,8 @@ def __init__( ] ) for idx, mask in enumerate(masks): - # if the entire mask is black, make it white instead: avg_pixel_value = np.array(mask.getdata()).mean() if avg_pixel_value == 1.0: - #mask = Image.new("L", mask.size, 255) print(f"No mask detected for {idx}..") else: if 1: From cfde3e7ae08593607d9e548d1dd091848a68607a Mon Sep 17 00:00:00 2001 From: xander Date: Mon, 13 Feb 2023 12:08:05 -0800 Subject: [PATCH 06/24] cleanup --- lora_diffusion/dataset.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lora_diffusion/dataset.py b/lora_diffusion/dataset.py index 0439217..89cc1ca 100644 --- a/lora_diffusion/dataset.py +++ b/lora_diffusion/dataset.py @@ -353,8 +353,7 @@ def __getitem__(self, index): if self.token_map is not None: for token, value in self.token_map.items(): text = text.replace(token, value) - - mask_str = "using mask! " if self.use_mask else "" + print(text) if self.use_mask: From 9a77b6569d4248fcc09bf2b6523a427717de2a26 Mon Sep 17 00:00:00 2001 From: xander Date: Mon, 13 Feb 2023 12:34:34 -0800 Subject: [PATCH 07/24] fix bug --- lora_diffusion/cli_lora_pti.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lora_diffusion/cli_lora_pti.py b/lora_diffusion/cli_lora_pti.py index 8feb0eb..f065ba4 100644 --- a/lora_diffusion/cli_lora_pti.py +++ b/lora_diffusion/cli_lora_pti.py @@ -939,6 +939,7 @@ def train( if cached_latents: vae = None + # STEP 1 : Perform Inversion if perform_inversion: preview_training_batch(train_dataloader, "inversion") @@ -1108,10 +1109,6 @@ def train( print(f"Training time: {training_time/60:.1f} minutes") args_dict["training_time_s"] = int(training_time) - # get the templates: - if use_template is not None: - args_dict["train_template"] = train_dataloader.templates - # Save the args_dict to the output directory as a json file: with open(os.path.join(output_dir, "lora_training_args.json"), "w") as f: json.dump(args_dict, f, default=lambda o: '', indent=2) From 61f1a19c76af5e1171328692018cbc27a83c3bbb Mon Sep 17 00:00:00 2001 From: xander Date: Mon, 13 Feb 2023 13:39:52 -0800 Subject: [PATCH 08/24] fix bugs --- lora_diffusion/dataset.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lora_diffusion/dataset.py b/lora_diffusion/dataset.py index 89cc1ca..53c63ad 100644 --- a/lora_diffusion/dataset.py +++ b/lora_diffusion/dataset.py @@ -1,7 +1,7 @@ import random from pathlib import Path from typing import Dict, List, Optional, Tuple, Union - +import numpy as np from PIL import Image from torch import zeros_like from torch.utils.data import Dataset @@ -353,7 +353,7 @@ def __getitem__(self, index): if self.token_map is not None: for token, value in self.token_map.items(): text = text.replace(token, value) - + print(text) if self.use_mask: From cf69309bc189e8b7a61790c67f25efeef97cb66b Mon Sep 17 00:00:00 2001 From: xander Date: Mon, 13 Feb 2023 13:46:35 -0800 Subject: [PATCH 09/24] bugfixes --- lora_diffusion/cli_lora_pti.py | 40 ++++++++++++++++++---------------- lora_diffusion/utils.py | 1 - 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/lora_diffusion/cli_lora_pti.py b/lora_diffusion/cli_lora_pti.py index f065ba4..8e906d9 100644 --- a/lora_diffusion/cli_lora_pti.py +++ b/lora_diffusion/cli_lora_pti.py @@ -44,10 +44,31 @@ save_all, prepare_clip_model_sets, evaluate_pipe, + preview_training_batch, UNET_EXTENDED_TARGET_REPLACE, + ) +def preview_training_batch(train_dataloader, mode, n_imgs = 40): + outdir = f"training_batch_preview/{mode}" + os.makedirs(outdir, exist_ok=True) + imgs_saved = 0 + + while True: + for batch_i, batch in enumerate(train_dataloader): + imgs = batch["pixel_values"] + for i, img_torch in enumerate(imgs): + img_torch = (img_torch+1) /2 + # convert to pil and save to disk: + img = Image.fromarray((255.*img_torch).permute(1, 2, 0).detach().cpu().numpy().astype(np.uint8)).convert("RGB") + img.save(f"{outdir}/preview_{imgs_saved}.jpg") + imgs_saved += 1 + + if imgs_saved > n_imgs: + print(f"\nSaved {imgs_saved} preview training imgs to {outdir}") + return + def get_models( pretrained_model_name_or_path, pretrained_vae_name_or_path, @@ -724,25 +745,6 @@ def perform_tuning( target_replace_module_unet=lora_unet_target_modules, ) -def preview_training_batch(train_dataloader, mode, n_imgs = 40): - outdir = f"training_batch_preview/{mode}" - os.makedirs(outdir, exist_ok=True) - imgs_saved = 0 - - while True: - for batch_i, batch in enumerate(train_dataloader): - imgs = batch["pixel_values"] - for i, img_torch in enumerate(imgs): - img_torch = (img_torch+1) /2 - # convert to pil and save to disk: - img = Image.fromarray((255.*img_torch).permute(1, 2, 0).detach().cpu().numpy().astype(np.uint8)) - img.save(f"{outdir}/preview_{imgs_saved}.jpg") - imgs_saved += 1 - - if imgs_saved > n_imgs: - print(f"\nSaved {imgs_saved} preview training imgs to {outdir}") - return - def train( instance_data_dir: str, pretrained_model_name_or_path: str, diff --git a/lora_diffusion/utils.py b/lora_diffusion/utils.py index d8a3410..b8b4483 100644 --- a/lora_diffusion/utils.py +++ b/lora_diffusion/utils.py @@ -50,7 +50,6 @@ "A watercolor painting of on a beach", ] - def image_grid(_imgs, rows=None, cols=None): if rows is None and cols is None: From 7399d821b1d87683b5fb41f91344c5c9beda6e52 Mon Sep 17 00:00:00 2001 From: xander Date: Mon, 13 Feb 2023 13:47:16 -0800 Subject: [PATCH 10/24] bugfixes --- lora_diffusion/cli_lora_pti.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lora_diffusion/cli_lora_pti.py b/lora_diffusion/cli_lora_pti.py index 8e906d9..641a2e3 100644 --- a/lora_diffusion/cli_lora_pti.py +++ b/lora_diffusion/cli_lora_pti.py @@ -44,9 +44,7 @@ save_all, prepare_clip_model_sets, evaluate_pipe, - preview_training_batch, UNET_EXTENDED_TARGET_REPLACE, - ) def preview_training_batch(train_dataloader, mode, n_imgs = 40): @@ -68,7 +66,7 @@ def preview_training_batch(train_dataloader, mode, n_imgs = 40): print(f"\nSaved {imgs_saved} preview training imgs to {outdir}") return - + def get_models( pretrained_model_name_or_path, pretrained_vae_name_or_path, From 49610b540c3b38922bae2d2471ad84ad528f6512 Mon Sep 17 00:00:00 2001 From: xander Date: Mon, 13 Feb 2023 14:30:21 -0800 Subject: [PATCH 11/24] more bugfixes --- lora_diffusion/cli_lora_pti.py | 12 ++++-------- lora_diffusion/preprocess_files.py | 4 ++++ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/lora_diffusion/cli_lora_pti.py b/lora_diffusion/cli_lora_pti.py index 641a2e3..160dfdc 100644 --- a/lora_diffusion/cli_lora_pti.py +++ b/lora_diffusion/cli_lora_pti.py @@ -194,18 +194,12 @@ def collate_fn(examples): return batch - train_dataloader = torch.utils.data.DataLoader( - train_dataset, - batch_size=train_batch_size, - num_workers=4, - shuffle=True, - collate_fn=collate_fn, - ) if cached_latents: train_dataloader = torch.utils.data.DataLoader( cached_latents_dataset, batch_size=train_batch_size, + num_workers=4, shuffle=True, collate_fn=collate_fn, ) @@ -216,6 +210,7 @@ def collate_fn(examples): train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=train_batch_size, + num_workers=4, shuffle=True, collate_fn=collate_fn, ) @@ -277,6 +272,7 @@ def collate_fn(examples): train_dataloader = torch.utils.data.DataLoader( train_dataset, + num_workers = 4, batch_size=train_batch_size, shuffle=True, collate_fn=collate_fn, @@ -941,7 +937,7 @@ def train( vae = None # STEP 1 : Perform Inversion - if perform_inversion: + if perform_inversion and not cached_latents: preview_training_batch(train_dataloader, "inversion") print("PTI : Performing Inversion") diff --git a/lora_diffusion/preprocess_files.py b/lora_diffusion/preprocess_files.py index de4e6fe..eb66e28 100644 --- a/lora_diffusion/preprocess_files.py +++ b/lora_diffusion/preprocess_files.py @@ -324,3 +324,7 @@ def load_and_save_masks_and_captions( def main(): fire.Fire(load_and_save_masks_and_captions) + + +if __name__ == "__main__": + main() \ No newline at end of file From 7445169668e2923d1a694eb8efd0565902fe6834 Mon Sep 17 00:00:00 2001 From: xander Date: Mon, 13 Feb 2023 14:33:54 -0800 Subject: [PATCH 12/24] disable training previews when using cached_latents --- lora_diffusion/cli_lora_pti.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lora_diffusion/cli_lora_pti.py b/lora_diffusion/cli_lora_pti.py index 160dfdc..2980a07 100644 --- a/lora_diffusion/cli_lora_pti.py +++ b/lora_diffusion/cli_lora_pti.py @@ -1071,8 +1071,8 @@ def train( num_warmup_steps=lr_warmup_steps_lora, num_training_steps=max_train_steps_tuning, ) - - preview_training_batch(train_dataloader, "tuning") + if not cached_latents: + preview_training_batch(train_dataloader, "tuning") perform_tuning( unet, From fd154d1b1fbdf280fb68fcf627289aba886c4777 Mon Sep 17 00:00:00 2001 From: xander Date: Mon, 13 Feb 2023 14:42:56 -0800 Subject: [PATCH 13/24] disable h_flip_prob tuning when cached latents is active --- lora_diffusion/cli_lora_pti.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lora_diffusion/cli_lora_pti.py b/lora_diffusion/cli_lora_pti.py index 2980a07..1946ad7 100644 --- a/lora_diffusion/cli_lora_pti.py +++ b/lora_diffusion/cli_lora_pti.py @@ -12,6 +12,7 @@ import random import re from pathlib import Path +import numpy as np from typing import Optional, List, Literal import torch @@ -570,13 +571,13 @@ def train_inversion( return import matplotlib.pyplot as plt -import numpy as np def plot_loss_curve(losses, name, moving_avg=20): losses = np.array(losses) losses = np.convolve(losses, np.ones(moving_avg)/moving_avg, mode='valid') plt.plot(losses) plt.xlabel("Step") plt.ylabel("Loss") + plt.title(f"Losses during {name} phase:") plt.savefig(f"{name}.png") plt.clf() @@ -623,7 +624,8 @@ def perform_tuning( losses = [] for epoch in range(math.ceil(num_steps / len(dataloader))): - dataloader.dataset.tune_h_flip_prob(epoch / math.ceil(num_steps / len(dataloader))) + if not cached_latents: + dataloader.dataset.tune_h_flip_prob(epoch / math.ceil(num_steps / len(dataloader))) for batch in dataloader: lr_scheduler_lora.step() From 3f10e64e3b2a0051984594c89cd76f40acc5223d Mon Sep 17 00:00:00 2001 From: xander Date: Mon, 13 Feb 2023 15:31:34 -0800 Subject: [PATCH 14/24] minor updates --- lora_diffusion/dataset.py | 3 +++ lora_diffusion/preprocess_files.py | 4 +--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/lora_diffusion/dataset.py b/lora_diffusion/dataset.py index 53c63ad..9b65dbe 100644 --- a/lora_diffusion/dataset.py +++ b/lora_diffusion/dataset.py @@ -318,6 +318,9 @@ def __init__( self.blur_amount = blur_amount + print("Captions:") + print(self.captions) + def tune_h_flip_prob(self, training_progress, end_prob = 0.25): if self.h_flip: # Tune the h_flip probability to be 0.5 training_progress is 0 and end_prob when training_progress is 1 diff --git a/lora_diffusion/preprocess_files.py b/lora_diffusion/preprocess_files.py index eb66e28..893a692 100644 --- a/lora_diffusion/preprocess_files.py +++ b/lora_diffusion/preprocess_files.py @@ -262,9 +262,7 @@ def load_and_save_masks_and_captions( # check if it is a directory if os.path.isdir(files): # get all the .png .jpg in the directory - files = glob.glob(os.path.join(files, "*.png")) + glob.glob( - os.path.join(files, "*.jpg") - ) + files = glob.glob(os.path.join(files, "*.png")) + glob.glob(os.path.join(files, "*.jpg")) + glob.glob(os.path.join(files, "*.jpeg")) if len(files) == 0: raise Exception( From 2b214d0440cb1b94be9e2a9870e4cca1bbb38c96 Mon Sep 17 00:00:00 2001 From: xander Date: Tue, 14 Feb 2023 00:45:29 -0800 Subject: [PATCH 15/24] fix ti continuation --- lora_diffusion/cli_lora_pti.py | 44 +++++++++++++++++++++++++++------- lora_diffusion/dataset.py | 36 +++++++++++++++++++++++----- 2 files changed, 65 insertions(+), 15 deletions(-) diff --git a/lora_diffusion/cli_lora_pti.py b/lora_diffusion/cli_lora_pti.py index 1946ad7..20d30dd 100644 --- a/lora_diffusion/cli_lora_pti.py +++ b/lora_diffusion/cli_lora_pti.py @@ -130,6 +130,11 @@ def get_models( initializer_token_id = token_ids[0] token_embeds[placeholder_token_id] = token_embeds[initializer_token_id] + # print some stats about the token embedding: + t = token_embeds[placeholder_token_id] + print(f"init_token {init_tok} --> mean: {t.mean().item():.3f}, std: {t.std().item():.3f}, norm: {t.norm():.4f}") + + vae = AutoencoderKL.from_pretrained( pretrained_vae_name_or_path or pretrained_model_name_or_path, subfolder=None if pretrained_vae_name_or_path else "vae", @@ -288,6 +293,7 @@ def loss_step( vae, text_encoder, scheduler, + optimized_embeddings = None, train_inpainting=False, t_mutliplier=1.0, mixed_precision=False, @@ -392,6 +398,12 @@ def loss_step( .mean() ) + if optimized_embeddings is not None: + embedding_norm = optimized_embeddings.norm(dim=1).mean() + target_norm = 0.39 + embedding_norm_loss = (embedding_norm - target_norm)**2 + loss += 0.005*embedding_norm_loss + return loss @@ -451,6 +463,7 @@ def train_inversion( vae, text_encoder, scheduler, + optimized_embeddings = text_encoder.get_input_embeddings().weight[index_updates, :], train_inpainting=train_inpainting, mixed_precision=mixed_precision, cached_latents=cached_latents, @@ -494,19 +507,18 @@ def train_inversion( ) * ( pre_norm + lambda_ * (0.4 - pre_norm) ) - print(pre_norm) + #print(pre_norm) - current_norm = ( - text_encoder.get_input_embeddings() - .weight[index_updates, :] - .norm(dim=-1) - ) + optimizing_embeds = text_encoder.get_input_embeddings().weight[index_updates, :] + current_norm = (optimizing_embeds.norm(dim=-1)) + # reset original embeddings (we're only optimizing the new token ones) text_encoder.get_input_embeddings().weight[ index_no_updates ] = orig_embeds_params[index_no_updates] - - print(f"Current Norm : {current_norm}") + + for i, t in enumerate(optimizing_embeds): + print(f"token {i} --> mean: {t.mean().item():.3f}, std: {t.std().item():.3f}, norm: {t.norm():.4f}") global_step += 1 progress_bar.update(1) @@ -601,6 +613,7 @@ def perform_tuning( tokenizer, test_image_path: str, cached_latents: bool, + index_no_updates = None, log_wandb: bool = False, wandb_log_prompt_cnt: int = 10, class_token: str = "person", @@ -616,6 +629,9 @@ def perform_tuning( unet.train() text_encoder.train() + # Save the current token embeddings: + orig_embeds_params = text_encoder.get_input_embeddings().weight.data.clone() + if log_wandb: preped_clip = prepare_clip_model_sets() @@ -638,6 +654,7 @@ def perform_tuning( vae, text_encoder, scheduler, + optimized_embeddings = text_encoder.get_input_embeddings().weight[:, :], train_inpainting=train_inpainting, t_mutliplier=0.8, mixed_precision=True, @@ -659,6 +676,13 @@ def perform_tuning( progress_bar.set_postfix(**logs) losses.append(loss.detach().item()) + if index_no_updates is not None: + with torch.no_grad(): + # reset original embeddings (we're only optimizing the new tokens) + text_encoder.get_input_embeddings().weight[ + index_no_updates + ] = orig_embeds_params[index_no_updates] + global_step += 1 @@ -749,7 +773,7 @@ def train( pretrained_vae_name_or_path: str = None, revision: Optional[str] = None, perform_inversion: bool = True, - use_template: Literal[None, "object", "style"] = None, + use_template: Literal[None, "object", "style", "person"] = None, train_inpainting: bool = False, placeholder_tokens: str = "", placeholder_token_at_data: Optional[str] = None, @@ -1044,6 +1068,7 @@ def train( param.requires_grad = False else: text_encoder.requires_grad_(False) + if train_text_encoder: text_encoder_lora_params, _ = inject_trainable_lora( text_encoder, @@ -1082,6 +1107,7 @@ def train( text_encoder, train_dataloader, max_train_steps_tuning, + index_no_updates = index_no_updates, cached_latents=cached_latents, scheduler=noise_scheduler, optimizer=lora_optimizers, diff --git a/lora_diffusion/dataset.py b/lora_diffusion/dataset.py index 9b65dbe..e51c301 100644 --- a/lora_diffusion/dataset.py +++ b/lora_diffusion/dataset.py @@ -40,6 +40,7 @@ ] PERSON_TEMPLATE = [ + "{}", "{}", "a picture of {}", "a closeup of {}", @@ -55,17 +56,21 @@ "a photo of the beautiful {}", "a selfie taken by the handsome {}", "a selfie taken by {}", + "{} taking a selfie", + "{} is having fun, 4k photograph", + "{} wearing a plaidered shirt standing next to another person", + "smiling {} in a hoodie and sweater", "a photo of the cool {}", "a close-up photo of {}", "a bright photo of {}", "a cropped photo of {}", - "a good photo of {}", + "a brilliant HD photo of {}", "a beautiful picture of {}", "a photo showing {}", - "a good photo of {}", + "a great photo of {}", ] -STYLE_TEMPLATE = [ +STYLE_TEMPLATE_ORIG = [ "a painting in the style of {}", "a rendering in the style of {}", "a cropped painting in the style of {}", @@ -87,6 +92,23 @@ "a large painting in the style of {}", ] +STYLE_TEMPLATE = [ + "a painting in the style of {}", + "a rendering in the style of {}", + "an artwork in the style of {}", + "a magnificent painting in the style of {}", + "a picture in the style of {}", + "a photograph, {} style", + "{} style painting", + "a {}-styled artwork", + "a nice painting in the style of {}", + "a goregous example of {} style", + "image in the style of {}", + "{}, painting", + "{} artwork" +] + + NULL_TEMPLATE = ["{}"] TEMPLATE_MAP = { @@ -191,6 +213,7 @@ def __init__( self.resize = resize self.train_inpainting = train_inpainting self.h_flip_prob = 0.5 + self.final_flip_prob = 0.33 if use_template == 'person' else 0.5 instance_data_root = Path(instance_data_root) if not instance_data_root.exists(): @@ -321,10 +344,10 @@ def __init__( print("Captions:") print(self.captions) - def tune_h_flip_prob(self, training_progress, end_prob = 0.25): + def tune_h_flip_prob(self, training_progress): if self.h_flip: # Tune the h_flip probability to be 0.5 training_progress is 0 and end_prob when training_progress is 1 - self.h_flip_prob = 0.5 + (end_prob - 0.5) * training_progress + self.h_flip_prob = 0.5 + (self.final_flip_prob - 0.5) * training_progress print(f"h_flip_prob: {self.h_flip_prob:.3f}") def __len__(self): @@ -357,7 +380,8 @@ def __getitem__(self, index): for token, value in self.token_map.items(): text = text.replace(token, value) - print(text) + if random.random() < 0.1: + print(text) if self.use_mask: img_mask = Image.open(self.mask_path[index % self.num_instance_images]) From 3026c5831d8874c76093201062a64540d7726e16 Mon Sep 17 00:00:00 2001 From: SimoRyu Date: Wed, 15 Feb 2023 17:40:31 +0000 Subject: [PATCH 16/24] format and bugfix --- lora_diffusion/cli_lora_pti.py | 90 ++++++++++++++++++++++------------ 1 file changed, 60 insertions(+), 30 deletions(-) diff --git a/lora_diffusion/cli_lora_pti.py b/lora_diffusion/cli_lora_pti.py index 20d30dd..2ae1d16 100644 --- a/lora_diffusion/cli_lora_pti.py +++ b/lora_diffusion/cli_lora_pti.py @@ -48,7 +48,10 @@ UNET_EXTENDED_TARGET_REPLACE, ) -def preview_training_batch(train_dataloader, mode, n_imgs = 40): +os.environ["CUDA_LAUNCH_BLOCKING"] = "1" + + +def preview_training_batch(train_dataloader, mode, n_imgs=40): outdir = f"training_batch_preview/{mode}" os.makedirs(outdir, exist_ok=True) imgs_saved = 0 @@ -57,9 +60,16 @@ def preview_training_batch(train_dataloader, mode, n_imgs = 40): for batch_i, batch in enumerate(train_dataloader): imgs = batch["pixel_values"] for i, img_torch in enumerate(imgs): - img_torch = (img_torch+1) /2 + img_torch = (img_torch + 1) / 2 # convert to pil and save to disk: - img = Image.fromarray((255.*img_torch).permute(1, 2, 0).detach().cpu().numpy().astype(np.uint8)).convert("RGB") + img = Image.fromarray( + (255.0 * img_torch) + .permute(1, 2, 0) + .detach() + .cpu() + .numpy() + .astype(np.uint8) + ).convert("RGB") img.save(f"{outdir}/preview_{imgs_saved}.jpg") imgs_saved += 1 @@ -132,8 +142,9 @@ def get_models( # print some stats about the token embedding: t = token_embeds[placeholder_token_id] - print(f"init_token {init_tok} --> mean: {t.mean().item():.3f}, std: {t.std().item():.3f}, norm: {t.norm():.4f}") - + print( + f"init_token {init_tok} --> mean: {t.mean().item():.3f}, std: {t.std().item():.3f}, norm: {t.norm():.4f}" + ) vae = AutoencoderKL.from_pretrained( pretrained_vae_name_or_path or pretrained_model_name_or_path, @@ -174,7 +185,7 @@ def text2img_dataloader( batch["instance_images"].unsqueeze(0).to(dtype=vae.dtype).to(vae.device) ).latent_dist.sample() latents = latents * 0.18215 - batch["instance_images"] = latents.squeeze(0) + batch["instance_images"] = latents.squeeze(0).cpu() cached_latents_dataset.append(batch) def collate_fn(examples): @@ -208,6 +219,7 @@ def collate_fn(examples): num_workers=4, shuffle=True, collate_fn=collate_fn, + pin_memory=True, ) print("PTI : Using cached latent.") @@ -278,7 +290,7 @@ def collate_fn(examples): train_dataloader = torch.utils.data.DataLoader( train_dataset, - num_workers = 4, + num_workers=4, batch_size=train_batch_size, shuffle=True, collate_fn=collate_fn, @@ -293,7 +305,7 @@ def loss_step( vae, text_encoder, scheduler, - optimized_embeddings = None, + optimized_embeddings=None, train_inpainting=False, t_mutliplier=1.0, mixed_precision=False, @@ -317,11 +329,11 @@ def loss_step( scale_factor=1 / 8, ) else: - latents = batch["pixel_values"] + latents = batch["pixel_values"].to(dtype=weight_dtype).to(unet.device) if train_inpainting: masked_image_latents = batch["masked_image_latents"] - mask = batch["mask_values"] + mask = batch["mask_values"].to(dtype=weight_dtype).to(unet.device) noise = torch.randn_like(latents) bsz = latents.shape[0] @@ -401,8 +413,8 @@ def loss_step( if optimized_embeddings is not None: embedding_norm = optimized_embeddings.norm(dim=1).mean() target_norm = 0.39 - embedding_norm_loss = (embedding_norm - target_norm)**2 - loss += 0.005*embedding_norm_loss + embedding_norm_loss = (embedding_norm - target_norm) ** 2 + loss += 0.005 * embedding_norm_loss return loss @@ -463,7 +475,7 @@ def train_inversion( vae, text_encoder, scheduler, - optimized_embeddings = text_encoder.get_input_embeddings().weight[index_updates, :], + optimized_embeddings=None, train_inpainting=train_inpainting, mixed_precision=mixed_precision, cached_latents=cached_latents, @@ -507,18 +519,22 @@ def train_inversion( ) * ( pre_norm + lambda_ * (0.4 - pre_norm) ) - #print(pre_norm) + # print(pre_norm) - optimizing_embeds = text_encoder.get_input_embeddings().weight[index_updates, :] - current_norm = (optimizing_embeds.norm(dim=-1)) + optimizing_embeds = text_encoder.get_input_embeddings().weight[ + index_updates, : + ] + current_norm = optimizing_embeds.norm(dim=-1) # reset original embeddings (we're only optimizing the new token ones) text_encoder.get_input_embeddings().weight[ index_no_updates ] = orig_embeds_params[index_no_updates] - + for i, t in enumerate(optimizing_embeds): - print(f"token {i} --> mean: {t.mean().item():.3f}, std: {t.std().item():.3f}, norm: {t.norm():.4f}") + print( + f"token {i} --> mean: {t.mean().item():.3f}, std: {t.std().item():.3f}, norm: {t.norm():.4f}" + ) global_step += 1 progress_bar.update(1) @@ -582,10 +598,13 @@ def train_inversion( if global_step >= num_steps: return + import matplotlib.pyplot as plt + + def plot_loss_curve(losses, name, moving_avg=20): losses = np.array(losses) - losses = np.convolve(losses, np.ones(moving_avg)/moving_avg, mode='valid') + losses = np.convolve(losses, np.ones(moving_avg) / moving_avg, mode="valid") plt.plot(losses) plt.xlabel("Step") plt.ylabel("Loss") @@ -593,6 +612,7 @@ def plot_loss_curve(losses, name, moving_avg=20): plt.savefig(f"{name}.png") plt.clf() + def perform_tuning( unet, vae, @@ -613,7 +633,7 @@ def perform_tuning( tokenizer, test_image_path: str, cached_latents: bool, - index_no_updates = None, + index_no_updates=None, log_wandb: bool = False, wandb_log_prompt_cnt: int = 10, class_token: str = "person", @@ -641,7 +661,9 @@ def perform_tuning( for epoch in range(math.ceil(num_steps / len(dataloader))): if not cached_latents: - dataloader.dataset.tune_h_flip_prob(epoch / math.ceil(num_steps / len(dataloader))) + dataloader.dataset.tune_h_flip_prob( + epoch / math.ceil(num_steps / len(dataloader)) + ) for batch in dataloader: lr_scheduler_lora.step() @@ -654,7 +676,7 @@ def perform_tuning( vae, text_encoder, scheduler, - optimized_embeddings = text_encoder.get_input_embeddings().weight[:, :], + optimized_embeddings=text_encoder.get_input_embeddings().weight[:, :], train_inpainting=train_inpainting, t_mutliplier=0.8, mixed_precision=True, @@ -683,7 +705,6 @@ def perform_tuning( index_no_updates ] = orig_embeds_params[index_no_updates] - global_step += 1 if global_step % save_steps == 0: @@ -765,6 +786,7 @@ def perform_tuning( target_replace_module_unet=lora_unet_target_modules, ) + def train( instance_data_dir: str, pretrained_model_name_or_path: str, @@ -963,8 +985,9 @@ def train( vae = None # STEP 1 : Perform Inversion - if perform_inversion and not cached_latents: - preview_training_batch(train_dataloader, "inversion") + if perform_inversion: + if not cached_latents: + preview_training_batch(train_dataloader, "inversion") print("PTI : Performing Inversion") ti_optimizer = optim.AdamW( @@ -976,7 +999,12 @@ def train( ) token_ids_positions_to_update = np.where(index_no_updates.cpu().numpy() == 0) - print("Training embedding of size", text_encoder.get_input_embeddings().weight[token_ids_positions_to_update].shape) + print( + "Training embedding of size", + text_encoder.get_input_embeddings() + .weight[token_ids_positions_to_update] + .shape, + ) lr_scheduler = get_scheduler( lr_scheduler, @@ -1098,7 +1126,7 @@ def train( num_warmup_steps=lr_warmup_steps_lora, num_training_steps=max_train_steps_tuning, ) - if not cached_latents: + if not cached_latents: preview_training_batch(train_dataloader, "tuning") perform_tuning( @@ -1107,7 +1135,7 @@ def train( text_encoder, train_dataloader, max_train_steps_tuning, - index_no_updates = index_no_updates, + index_no_updates=index_no_updates, cached_latents=cached_latents, scheduler=noise_scheduler, optimizer=lora_optimizers, @@ -1135,10 +1163,12 @@ def train( # Save the args_dict to the output directory as a json file: with open(os.path.join(output_dir, "lora_training_args.json"), "w") as f: - json.dump(args_dict, f, default=lambda o: '', indent=2) + json.dump(args_dict, f, default=lambda o: "", indent=2) + def main(): fire.Fire(train) + if __name__ == "__main__": - main() \ No newline at end of file + main() From c52dfaefdf8d0b2d9e43551389e0478c69cdd250 Mon Sep 17 00:00:00 2001 From: SimoRyu Date: Wed, 15 Feb 2023 18:46:02 +0000 Subject: [PATCH 17/24] now unet trains --- .gitignore | 3 ++- lora_diffusion/cli_lora_pti.py | 20 +++++++++----------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index aa25096..17d80df 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,5 @@ wandb exps* .vscode build -lora_diffusion.egg-info \ No newline at end of file +lora_diffusion.egg-info +training_batch_preview \ No newline at end of file diff --git a/lora_diffusion/cli_lora_pti.py b/lora_diffusion/cli_lora_pti.py index 2ae1d16..f615ac0 100644 --- a/lora_diffusion/cli_lora_pti.py +++ b/lora_diffusion/cli_lora_pti.py @@ -48,8 +48,6 @@ UNET_EXTENDED_TARGET_REPLACE, ) -os.environ["CUDA_LAUNCH_BLOCKING"] = "1" - def preview_training_batch(train_dataloader, mode, n_imgs=40): outdir = f"training_batch_preview/{mode}" @@ -185,7 +183,7 @@ def text2img_dataloader( batch["instance_images"].unsqueeze(0).to(dtype=vae.dtype).to(vae.device) ).latent_dist.sample() latents = latents * 0.18215 - batch["instance_images"] = latents.squeeze(0).cpu() + batch["instance_images"] = latents.squeeze(0) cached_latents_dataset.append(batch) def collate_fn(examples): @@ -216,10 +214,8 @@ def collate_fn(examples): train_dataloader = torch.utils.data.DataLoader( cached_latents_dataset, batch_size=train_batch_size, - num_workers=4, shuffle=True, collate_fn=collate_fn, - pin_memory=True, ) print("PTI : Using cached latent.") @@ -1062,12 +1058,6 @@ def train( unet, r=lora_rank, target_replace_module=lora_unet_target_modules ) - n_optimizable_unet_params = sum( - [el.numel() for el in itertools.chain(*unet_lora_params)] - ) - print("PTI : n_optimizable_unet_params: ", n_optimizable_unet_params) - - print(f"PTI : has {len(unet_lora_params)} lora") print("PTI : Before training:") inspect_lora(unet) @@ -1112,7 +1102,15 @@ def train( inspect_lora(text_encoder) lora_optimizers = optim.AdamW(params_to_optimize, weight_decay=weight_decay_lora) + with torch.no_grad(): + n_optimizable_unet_params = sum( + p.numel() for p in unet.parameters() if p.requires_grad + ) + +sum(p.numel() for p in text_encoder.parameters() if p.requires_grad) + print("PTI : n_optimizable_unet_params: ", n_optimizable_unet_params) + + print(f"PTI : has {len(unet_lora_params)} lora") unet.train() if train_text_encoder: print("Training text encoder!") From b1853d145cf9b40ac854daed33866a73d269a0d5 Mon Sep 17 00:00:00 2001 From: SimoRyu Date: Wed, 15 Feb 2023 19:07:09 +0000 Subject: [PATCH 18/24] format : black --- lora_diffusion/cli_lora_add.py | 2 +- lora_diffusion/cli_pt_to_safetensors.py | 8 +++-- lora_diffusion/dataset.py | 39 +++++++++++++++---------- lora_diffusion/preprocess_files.py | 9 ++++-- lora_diffusion/utils.py | 1 + 5 files changed, 38 insertions(+), 21 deletions(-) diff --git a/lora_diffusion/cli_lora_add.py b/lora_diffusion/cli_lora_add.py index 69058aa..e9612cd 100644 --- a/lora_diffusion/cli_lora_add.py +++ b/lora_diffusion/cli_lora_add.py @@ -202,4 +202,4 @@ def main(): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/lora_diffusion/cli_pt_to_safetensors.py b/lora_diffusion/cli_pt_to_safetensors.py index 9a4be40..aefac92 100644 --- a/lora_diffusion/cli_pt_to_safetensors.py +++ b/lora_diffusion/cli_pt_to_safetensors.py @@ -62,9 +62,11 @@ def convert(*paths, outpath, overwrite=False, **settings): } prefix = f"{name}." - - arg_settings = { k[len(prefix) :]: v for k, v in settings.items() if k.startswith(prefix) } - model_settings = { **model_settings, **arg_settings } + + arg_settings = { + k[len(prefix) :]: v for k, v in settings.items() if k.startswith(prefix) + } + model_settings = {**model_settings, **arg_settings} print(f"Loading Lora for {name} from {path} with settings {model_settings}") diff --git a/lora_diffusion/dataset.py b/lora_diffusion/dataset.py index e51c301..2a42a9b 100644 --- a/lora_diffusion/dataset.py +++ b/lora_diffusion/dataset.py @@ -105,7 +105,7 @@ "a goregous example of {} style", "image in the style of {}", "{}, painting", - "{} artwork" + "{} artwork", ] @@ -169,22 +169,29 @@ def expand_rectangle(mask, f): rows, cols = np.where(mask == 255) top_row, bottom_row = np.min(rows), np.max(rows) left_col, right_col = np.min(cols), np.max(cols) - + rect_height, rect_width = bottom_row - top_row + 1, right_col - left_col + 1 new_height, new_width = np.round(rect_height * f), np.round(rect_width * f) - + center_row, center_col = top_row + rect_height // 2, left_col + rect_width // 2 - top_row, bottom_row = np.round(center_row - new_height / 2), np.round(center_row + new_height / 2) - left_col, right_col = np.round(center_col - new_width / 2), np.round(center_col + new_width / 2) - - top_row, bottom_row = int(np.clip(top_row, 0, mask.shape[0] - 1)), int(np.clip(bottom_row, 0, mask.shape[0] - 1)) - left_col, right_col = int(np.clip(left_col, 0, mask.shape[1] - 1)), int(np.clip(right_col, 0, mask.shape[1] - 1)) - + top_row, bottom_row = np.round(center_row - new_height / 2), np.round( + center_row + new_height / 2 + ) + left_col, right_col = np.round(center_col - new_width / 2), np.round( + center_col + new_width / 2 + ) + + top_row, bottom_row = int(np.clip(top_row, 0, mask.shape[0] - 1)), int( + np.clip(bottom_row, 0, mask.shape[0] - 1) + ) + left_col, right_col = int(np.clip(left_col, 0, mask.shape[1] - 1)), int( + np.clip(right_col, 0, mask.shape[1] - 1) + ) + expanded_mask = np.ones_like(mask) - expanded_mask[top_row:bottom_row + 1, left_col:right_col + 1] = 255 - - return expanded_mask + expanded_mask[top_row : bottom_row + 1, left_col : right_col + 1] = 255 + return expanded_mask class PivotalTuningDatasetCapation(Dataset): @@ -213,7 +220,7 @@ def __init__( self.resize = resize self.train_inpainting = train_inpainting self.h_flip_prob = 0.5 - self.final_flip_prob = 0.33 if use_template == 'person' else 0.5 + self.final_flip_prob = 0.33 if use_template == "person" else 0.5 instance_data_root = Path(instance_data_root) if not instance_data_root.exists(): @@ -229,7 +236,9 @@ def __init__( # Prepare the instance images if use_mask_captioned_data: src_imgs = glob.glob(str(instance_data_root) + "/*src.jpg") - src_imgs = sorted(src_imgs, key=lambda x: int(str(Path(x).stem).split(".")[0])) + src_imgs = sorted( + src_imgs, key=lambda x: int(str(Path(x).stem).split(".")[0]) + ) for f in src_imgs: idx = int(str(Path(f).stem).split(".")[0]) @@ -385,7 +394,7 @@ def __getitem__(self, index): if self.use_mask: img_mask = Image.open(self.mask_path[index % self.num_instance_images]) - example["mask"] = (self.image_transforms(img_mask)* 0.5 + 1.0) + example["mask"] = self.image_transforms(img_mask) * 0.5 + 1.0 if self.h_flip and random.random() < self.h_flip_prob: hflip = transforms.RandomHorizontalFlip(p=1) diff --git a/lora_diffusion/preprocess_files.py b/lora_diffusion/preprocess_files.py index 893a692..ce192e4 100644 --- a/lora_diffusion/preprocess_files.py +++ b/lora_diffusion/preprocess_files.py @@ -150,6 +150,7 @@ def blip_captioning_dataset( return captions + def face_mask_google_mediapipe( images: List[Image.Image], blur_amount: float = 80.0, bias: float = 0.05 ) -> List[Image.Image]: @@ -262,7 +263,11 @@ def load_and_save_masks_and_captions( # check if it is a directory if os.path.isdir(files): # get all the .png .jpg in the directory - files = glob.glob(os.path.join(files, "*.png")) + glob.glob(os.path.join(files, "*.jpg")) + glob.glob(os.path.join(files, "*.jpeg")) + files = ( + glob.glob(os.path.join(files, "*.png")) + + glob.glob(os.path.join(files, "*.jpg")) + + glob.glob(os.path.join(files, "*.jpeg")) + ) if len(files) == 0: raise Exception( @@ -325,4 +330,4 @@ def main(): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/lora_diffusion/utils.py b/lora_diffusion/utils.py index b8b4483..d8a3410 100644 --- a/lora_diffusion/utils.py +++ b/lora_diffusion/utils.py @@ -50,6 +50,7 @@ "A watercolor painting of on a beach", ] + def image_grid(_imgs, rows=None, cols=None): if rows is None and cols is None: From 9a6b552384311f04b5ab330a270e62e5c66678d9 Mon Sep 17 00:00:00 2001 From: SimoRyu Date: Wed, 15 Feb 2023 19:48:15 +0000 Subject: [PATCH 19/24] bugfix : to L --- lora_diffusion/dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lora_diffusion/dataset.py b/lora_diffusion/dataset.py index 2a42a9b..01a22fb 100644 --- a/lora_diffusion/dataset.py +++ b/lora_diffusion/dataset.py @@ -312,7 +312,7 @@ def __init__( # Make the rectangular mask region bigger: mask = expand_rectangle(mask, 1.25) # convert back to PIL image: - mask = Image.fromarray(mask) + mask = Image.fromarray(mask).convert("L") mask.save(f"{instance_data_root}/{idx}.mask.png") From 71c8c1dba595d77d0eabdf9c278630168e5a8ce1 Mon Sep 17 00:00:00 2001 From: SimoRyu Date: Wed, 15 Feb 2023 19:57:40 +0000 Subject: [PATCH 20/24] version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 6d286b3..2b5e609 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ setup( name="lora_diffusion", py_modules=["lora_diffusion"], - version="0.1.7", + version="0.1.8", description="Low Rank Adaptation for Diffusion Models. Works with Stable Diffusion out-of-the-box.", author="Simo Ryu", packages=find_packages(), From cdbaf7aba713594649a393903cb65b97b1690ef8 Mon Sep 17 00:00:00 2001 From: A2va <49582555+A2va@users.noreply.github.com> Date: Fri, 17 Feb 2023 13:50:12 +0100 Subject: [PATCH 21/24] Support captions list in preprocess files --- lora_diffusion/preprocess_files.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lora_diffusion/preprocess_files.py b/lora_diffusion/preprocess_files.py index bedb89f..d3f0358 100644 --- a/lora_diffusion/preprocess_files.py +++ b/lora_diffusion/preprocess_files.py @@ -244,7 +244,7 @@ def _center_of_mass(mask: Image.Image): def load_and_save_masks_and_captions( files: Union[str, List[str]], output_dir: str, - caption_text: Optional[str] = None, + captions_text: Optional[Union[List[str], str]] = None, target_prompts: Optional[Union[List[str], str]] = None, target_size: int = 512, crop_based_on_salience: bool = True, @@ -278,8 +278,10 @@ def load_and_save_masks_and_captions( images = [Image.open(file) for file in files] # captions - print(f"Generating {len(images)} captions...") - captions = blip_captioning_dataset(images, text=caption_text) + captions = caption_text + if not isinstance(caption_text, List): + print(f"Generating {len(images)} captions...") + captions = blip_captioning_dataset(images, text=caption_text) if target_prompts is None: target_prompts = captions From c45d59b910c3d60b25b80405bf2fabfbda2d3564 Mon Sep 17 00:00:00 2001 From: A2va <49582555+A2va@users.noreply.github.com> Date: Fri, 17 Feb 2023 15:55:34 +0100 Subject: [PATCH 22/24] Change typing import to support python 3.7 * Remove unused import --- lora_diffusion/cli_lora_pti.py | 14 ++++++++------ requirements.txt | 1 + 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/lora_diffusion/cli_lora_pti.py b/lora_diffusion/cli_lora_pti.py index 7de4bae..b1b11f8 100644 --- a/lora_diffusion/cli_lora_pti.py +++ b/lora_diffusion/cli_lora_pti.py @@ -1,16 +1,10 @@ # Bootstrapped from: # https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py -import argparse -import hashlib -import inspect import itertools import math import os -import random import re -from pathlib import Path -from typing import Optional, List, Literal import torch import torch.nn.functional as F @@ -32,6 +26,14 @@ import wandb import fire +import sys +if sys.version_info >= (3,8): + from typing import Literal +else : + from typing_extensions import Literal + +from typing import Optional, List + from lora_diffusion import ( PivotalTuningDatasetCapation, extract_lora_ups_down, diff --git a/requirements.txt b/requirements.txt index 89eebcd..0e4db2c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,3 +8,4 @@ safetensors opencv-python torchvision mediapipe +typing_extensions; python_version < '3.8' \ No newline at end of file From 246c06808524bfc2746f1473bf9e4ca06c6b870e Mon Sep 17 00:00:00 2001 From: A2va <49582555+A2va@users.noreply.github.com> Date: Fri, 17 Feb 2023 16:11:14 +0100 Subject: [PATCH 23/24] Other typing fixes --- lora_diffusion/cli_lora_add.py | 7 ++++++- lora_diffusion/lora.py | 7 ++++++- lora_diffusion/preprocess_files.py | 7 ++++++- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/lora_diffusion/cli_lora_add.py b/lora_diffusion/cli_lora_add.py index fc7f7e4..538f8eb 100644 --- a/lora_diffusion/cli_lora_add.py +++ b/lora_diffusion/cli_lora_add.py @@ -1,4 +1,9 @@ -from typing import Literal, Union, Dict +import sys +if sys.version_info >= (3,8): + from typing import Literal +else : + from typing_extensions import Literal +from typing import Union, Dict import os import shutil import fire diff --git a/lora_diffusion/lora.py b/lora_diffusion/lora.py index 8753f15..93d4607 100644 --- a/lora_diffusion/lora.py +++ b/lora_diffusion/lora.py @@ -1,7 +1,12 @@ import json import math from itertools import groupby -from typing import Callable, Dict, List, Optional, Set, Tuple, Type, Union +import sys +if sys.version_info >= (3,9): + from typing import Type +else : + from typing_extensions import Type +from typing import Callable, Dict, List, Optional, Set, Tuple, Union import numpy as np import PIL diff --git a/lora_diffusion/preprocess_files.py b/lora_diffusion/preprocess_files.py index bedb89f..6756f0b 100644 --- a/lora_diffusion/preprocess_files.py +++ b/lora_diffusion/preprocess_files.py @@ -2,7 +2,12 @@ # Have BLIP auto caption # Have CLIPSeg auto mask concept -from typing import List, Literal, Union, Optional, Tuple +import sys +if sys.version_info >= (3,8): + from typing import Literal +else : + from typing_extensions import Literal +from typing import List, Union, Optional, Tuple import os from PIL import Image, ImageFilter import torch From 46116b8bcbb602324a98791ad47bb364998aee7f Mon Sep 17 00:00:00 2001 From: A2va <49582555+A2va@users.noreply.github.com> Date: Fri, 17 Feb 2023 16:15:31 +0100 Subject: [PATCH 24/24] Update requirements.txt --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0e4db2c..f05192c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,4 +8,4 @@ safetensors opencv-python torchvision mediapipe -typing_extensions; python_version < '3.8' \ No newline at end of file +typing_extensions; python_version < '3.9' \ No newline at end of file