From 225ea36285d2f6e9439b0b399e69f05a565bf7b0 Mon Sep 17 00:00:00 2001 From: araleza <70412719+araleza@users.noreply.github.com> Date: Sun, 24 Aug 2025 16:00:38 +0100 Subject: [PATCH 1/8] Support for fused (N)AdamW + Kahan + momentum offloading FFT on a 5090. --- flux_train.py | 37 ++++++++++- library/adafactor_fused.py | 66 +++++++++++++++++++- library/adamw_fused.py | 122 +++++++++++++++++++++++++++++++++++++ library/train_util.py | 15 ++++- 4 files changed, 232 insertions(+), 8 deletions(-) create mode 100644 library/adamw_fused.py diff --git a/flux_train.py b/flux_train.py index 4aa67220f..ad32ebc62 100644 --- a/flux_train.py +++ b/flux_train.py @@ -330,6 +330,8 @@ def train(args): # 学習に必要なクラスを準備する accelerator.print("prepare optimizer, data loader etc.") + fused_optimizers_supported = ['adafactor', 'adamoffload', 'nadamoffload', 'adamwoffload', 'nadamwoffload'] + if args.blockwise_fused_optimizers: # fused backward pass: https://pytorch.org/tutorials/intermediate/optimizer_step_in_backward_tutorial.html # Instead of creating an optimizer for all parameters as in the tutorial, we create an optimizer for each block of parameters. @@ -381,10 +383,25 @@ def train(args): raise ValueError("Schedule-free optimizer is not supported with blockwise fused optimizers") optimizer_train_fn = lambda: None # dummy function optimizer_eval_fn = lambda: None # dummy function + + if (args.optimizer_type not in fused_optimizers_supported) and args.full_bf16: + logger.warning("Use of --blockwise_fused_optimizers with Adafactor optimizer prevents stochastic/Kahan weight updates.") else: _, _, optimizer = train_util.get_optimizer(args, trainable_params=params_to_optimize) optimizer_train_fn, optimizer_eval_fn = train_util.get_optimizer_train_eval_fn(optimizer, args) + # Pass any Kahan summation arg to the optimizer + if args.kahan_summation: + # Self check parameter compatibility + if args.optimizer_type.lower() not in fused_optimizers_supported: + logger.warning("Kahan summation has been requested, but this is not supported by the selected optimizer.") + if not args.full_bf16: + logger.warning("Kahan summation requires --full_bf16") + if args.blockwise_fused_optimizers: + logger.warning("Kahan summation has been requested, but these are not compatible with --blockwise_fused_optimizer. "\ + "Perhaps try --fused_backward_pass instead.") + optimizer.use_kahan_summation = args.kahan_summation + # prepare dataloader # strategies are set here because they cannot be referenced in another process. Copy them with the dataset # some strategies can be None @@ -474,10 +491,18 @@ def train(args): train_util.resume_from_local_or_hf_if_specified(accelerator, args) if args.fused_backward_pass: - # use fused optimizer for backward pass: other optimizers will be supported in the future + # use fused optimizer for backward pass. Only some specific optimizers are supported. import library.adafactor_fused - - library.adafactor_fused.patch_adafactor_fused(optimizer) + import library.adamw_fused + + if args.optimizer_type.lower() == "adafactor": + library.adafactor_fused.patch_adafactor_fused(optimizer) + elif args.optimizer_type.lower() == "adamoffload" or args.optimizer_type.lower() == "adamwoffload": + library.adamw_fused.patch_adamw_offload_fused(optimizer, False) + elif args.optimizer_type.lower() == "nadamoffload" or args.optimizer_type.lower() == "nadamwoffload": + library.adamw_fused.patch_adamw_offload_fused(optimizer, True) # Nesterov + else: + logger.error(f"Optimizer '{args.optimizer}' does not have a --fused_backward_pass implementation available") for param_group, param_name_group in zip(optimizer.param_groups, param_names): for parameter, param_name in zip(param_group["params"], param_name_group): @@ -816,6 +841,12 @@ def setup_parser() -> argparse.ArgumentParser: action="store_true", help="enable blockwise optimizers for fused backward pass and optimizer step / fused backward passとoptimizer step のためブロック単位のoptimizerを有効にする", ) + parser.add_argument( + "--kahan_summation", + action="store_true", + help="Offloads to CPU the float part lost during bf16 quantization, and re-adds it to the next step / "\ + "bf16 量子化中に失われた浮動小数点部分を CPU にオフロードし、次のステップに再度追加します", + ) parser.add_argument( "--skip_latents_validity_check", action="store_true", diff --git a/library/adafactor_fused.py b/library/adafactor_fused.py index b5afa236b..6dfd01743 100644 --- a/library/adafactor_fused.py +++ b/library/adafactor_fused.py @@ -28,6 +28,62 @@ def copy_stochastic_(target: torch.Tensor, source: torch.Tensor): del result +# Kahan summation for bfloat16 +# The implementation was provided by araleza. +# Based on paper "Revisiting BFloat16 Training": https://arxiv.org/pdf/2010.06192 + +def copy_kahan_(target: torch.Tensor, source: torch.Tensor, state, update): + """ + Copies source into target using Kahan summation. + + The lower bits of the float32 weight that are lost on conversion to bfloat16 + are sent to the CPU until the next step, where they are re-added onto the weights + before adding the gradient update. This produces near float32-like weight behavior, + although the copies back and forth to main memory result in slower training steps. + + Args: + target: the target tensor with dtype=bfloat16 + source: the target tensor with dtype=float32 + state: the optimizer state, used to store kahan residuals + update: the change in weights due to the gradient + """ + + # Initialize residuals to 0 for first step + if state.get('kahan_residuals') is None: + state['kahan_residuals'] = torch.zeros_like(source, dtype=torch.int16) + + # Need this in 32 bit as PyTorch doesn't support mixed 32-bit and 16-bit math operations + state['kahan_residuals'] = state['kahan_residuals'].to(source.device).to(dtype=torch.int32) + + # Bring the previous step's lower bits of the weights back from the + # cpu device, and add them back to the weights of the current step. + source_i32 = source.view(dtype=torch.int32) # Can't do math on uint32 + source_i32.add_(state['kahan_residuals']) + + # Reverse any rounding up during the cast to bf16 on the previous step + rounded_up = state['kahan_residuals'] >= 32768 + source_i32[rounded_up] -= 65536 + + # Must add the gradient update after the bottom bits are restored in case + # the exponent is changed by the update, or the -65536 on the line above + # would drop the uint32 value below zero, which is invalid. + source.add_(-update) + + # Get the lower bits into the residual + torch.bitwise_and(source_i32, 0x0000FFFF, out=state['kahan_residuals']) + + # Ensure rounding to bfloat16 matches expectations. These lines may not be + # necessary as target.copy_ should do this rounding anyway. + source_i32.add_(32768) # Add offset so clipping bits performs round-to-nearest + source_i32.bitwise_and_(-65536) # -65536 = FFFF0000 as a signed int32. Leaves only upper bits in source + + # Move the 16-bit Kahan bits from VRAM to main memory + state['kahan_residuals'] = state['kahan_residuals'].to(dtype=torch.uint16).to("cpu") + + # Copy the quantized floats into the target tensor + target.copy_(source) + + @torch.no_grad() def adafactor_step_param(self, p, group): if p.grad is None: @@ -102,13 +158,19 @@ def adafactor_step_param(self, p, group): if group["weight_decay"] != 0: p_data_fp32.add_(p_data_fp32, alpha=(-group["weight_decay"] * lr)) - p_data_fp32.add_(-update) + # Add on gradient update, but not if using kahan summation as the bottom + # bits must be restored first. (This update occurs in copy_kahan_() instead) + if not self.optimizer.use_kahan_summation: + p_data_fp32.add_(-update) # if p.dtype in {torch.float16, torch.bfloat16}: # p.copy_(p_data_fp32) if p.dtype == torch.bfloat16: - copy_stochastic_(p, p_data_fp32) + if self.optimizer.use_kahan_summation: + copy_kahan_(p, p_data_fp32, state, update) + else: + copy_stochastic_(p, p_data_fp32) elif p.dtype == torch.float16: p.copy_(p_data_fp32) diff --git a/library/adamw_fused.py b/library/adamw_fused.py new file mode 100644 index 000000000..2fb99deb6 --- /dev/null +++ b/library/adamw_fused.py @@ -0,0 +1,122 @@ +import math +import torch + +from library.adafactor_fused import copy_stochastic_ +from library.adafactor_fused import copy_kahan_ + + +@torch.no_grad() +def adamw_offload_step_param(self, p, group): + + if p.grad is None: + return + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError("This (N)AdamW implementation does not support sparse gradients.") + + state = self.state[p] + grad_shape = grad.shape + + p_data_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_data_fp32 = p_data_fp32.float() + + # State Initialization + if len(state) == 0: + state["step"] = 0 + state['exp_avg'] = torch.zeros_like(p, dtype=torch.bfloat16) + state['exp_avg_sq'] = torch.zeros_like(p, dtype=torch.bfloat16) + + state["step"] += 1 + + # NAdam + + beta1, beta2 = group['betas'] + eps = group['eps'] # 1e-8 + weight_decay = group.get('weight_decay', 0.0) + + # Bias correction terms + bias_correction1 = 1.0 - math.pow(beta1, state['step']) + bias_correction2 = 1.0 - math.pow(beta2, state['step']) + + eps_p2: float = math.pow(eps, 2) + + # Bring state back from CPU + state['exp_avg'] = state['exp_avg'] .to('cuda').to(dtype=torch.float32) + state['exp_avg_sq'] = state['exp_avg_sq'].to('cuda').to(dtype=torch.float32) + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + # Update biased first and second moment estimates + exp_avg .mul_(beta1).add_ (grad, alpha=1.0 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2) + + # Compute bias-corrected second moment for denominator + exp_avg_sq_corrected = exp_avg_sq / bias_correction2 + + # Compute update based on whether Nesterov momentum (NAdam) is being used + if self.use_nesterov: + # The next step's bias correction for momentum is needed + bias_correction1_next = 1.0 - math.pow(beta1, state['step'] + 1) + + # NAdam update: combines current gradient with momentum look-ahead + momentum_cache = exp_avg / bias_correction1_next + update = (beta1 * momentum_cache + (1.0 - beta1) * grad / bias_correction1) / (exp_avg_sq_corrected.sqrt() + eps) + else: + # Standard Adam update: use bias-corrected first moment directly + exp_avg_corrected = exp_avg / bias_correction1 + update = exp_avg_corrected / (exp_avg_sq_corrected.sqrt() + eps) + + lr: float = group['lr'] + + # Apply learning rate + update.mul_(lr) + + # Apply weight decay + if weight_decay != 0: + p_data_fp32.mul_(1 - lr * weight_decay) + + # Keep state on CPU + state['exp_avg'] = state['exp_avg'] .to(dtype=torch.bfloat16).to('cpu') + state['exp_avg_sq'] = state['exp_avg_sq'].to(dtype=torch.bfloat16).to('cpu') + + # Add on gradient update, but not if using kahan summation as the bottom + # bits must be restored first. (This update occurs in copy_kahan_() instead) + if not self.optimizer.use_kahan_summation: + p_data_fp32.add_(-update) + + if p.dtype == torch.bfloat16: + if self.optimizer.use_kahan_summation: + copy_kahan_(p, p_data_fp32, state, update) + else: + copy_stochastic_(p, p_data_fp32) + elif p.dtype == torch.float16: + p.copy_(p_data_fp32) + + +@torch.no_grad() +def adamw_offload_step(self, closure=None): + """ + Performs a single optimization step + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + adamw_offload_step_param(self, p, group) + + return loss + + +def patch_adamw_offload_fused(optimizer, use_nesterov): + optimizer.use_nesterov = use_nesterov + + optimizer.step_param = adamw_offload_step_param.__get__(optimizer) + optimizer.step = adamw_offload_step.__get__(optimizer) diff --git a/library/train_util.py b/library/train_util.py index 395183957..b29b2cb09 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -4813,9 +4813,6 @@ def get_optimizer(args, trainable_params) -> tuple[str, str, object]: optimizer_type = optimizer_type.lower() if args.fused_backward_pass: - assert ( - optimizer_type == "Adafactor".lower() - ), "fused_backward_pass currently only works with optimizer_type Adafactor / fused_backward_passは現在optimizer_type Adafactorでのみ機能します" assert ( args.gradient_accumulation_steps == 1 ), "fused_backward_pass does not work with gradient_accumulation_steps > 1 / fused_backward_passはgradient_accumulation_steps>1では機能しません" @@ -5059,6 +5056,18 @@ def get_optimizer(args, trainable_params) -> tuple[str, str, object]: optimizer_class = transformers.optimization.Adafactor optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + elif optimizer_type.lower() == "adamoffload" or optimizer_type.lower() == "nadamoffload": + logger.info(f"use [N]AdamOffload optimizer | {optimizer_kwargs}") + + optimizer_class = torch.optim.Adam + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + + elif optimizer_type.lower() == "adamwoffload" or optimizer_type.lower() == "nadamwoffload": + logger.info(f"use [N]AdamWOffload optimizer | {optimizer_kwargs}") + + optimizer_class = torch.optim.AdamW # default weight_decay seems to be 0.01 + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + elif optimizer_type == "AdamW".lower(): logger.info(f"use AdamW optimizer | {optimizer_kwargs}") optimizer_class = torch.optim.AdamW From f583e35673e8aefd6409ba0195123389e71e900b Mon Sep 17 00:00:00 2001 From: araleza <70412719+araleza@users.noreply.github.com> Date: Sun, 24 Aug 2025 16:25:25 +0100 Subject: [PATCH 2/8] Removed mention of Adafactor from warning message as this situation can apply to other optimizers now too. --- flux_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flux_train.py b/flux_train.py index ad32ebc62..99ca46419 100644 --- a/flux_train.py +++ b/flux_train.py @@ -385,7 +385,7 @@ def train(args): optimizer_eval_fn = lambda: None # dummy function if (args.optimizer_type not in fused_optimizers_supported) and args.full_bf16: - logger.warning("Use of --blockwise_fused_optimizers with Adafactor optimizer prevents stochastic/Kahan weight updates.") + logger.warning("Use of --blockwise_fused_optimizers is preventing stochastic/Kahan weight updates.") else: _, _, optimizer = train_util.get_optimizer(args, trainable_params=params_to_optimize) optimizer_train_fn, optimizer_eval_fn = train_util.get_optimizer_train_eval_fn(optimizer, args) From c7b62f7474c198cb34ec046dab9f1bc12384159f Mon Sep 17 00:00:00 2001 From: araleza <70412719+araleza@users.noreply.github.com> Date: Sun, 24 Aug 2025 16:27:15 +0100 Subject: [PATCH 3/8] Fixed fused_backward_pass error message as it was not accessing the correct member variable --- flux_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flux_train.py b/flux_train.py index 99ca46419..9bb58c0f6 100644 --- a/flux_train.py +++ b/flux_train.py @@ -502,7 +502,7 @@ def train(args): elif args.optimizer_type.lower() == "nadamoffload" or args.optimizer_type.lower() == "nadamwoffload": library.adamw_fused.patch_adamw_offload_fused(optimizer, True) # Nesterov else: - logger.error(f"Optimizer '{args.optimizer}' does not have a --fused_backward_pass implementation available") + logger.error(f"Optimizer '{args.optimizer_type}' does not have a --fused_backward_pass implementation available") for param_group, param_name_group in zip(optimizer.param_groups, param_names): for parameter, param_name in zip(param_group["params"], param_name_group): From 1cf1f2b94bfa082d2801d6ede5004c45bfd95fff Mon Sep 17 00:00:00 2001 From: araleza <70412719+araleza@users.noreply.github.com> Date: Sun, 24 Aug 2025 16:49:34 +0100 Subject: [PATCH 4/8] The optimizer type check in a warning message was incorrect --- flux_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flux_train.py b/flux_train.py index 9bb58c0f6..5228ec134 100644 --- a/flux_train.py +++ b/flux_train.py @@ -384,7 +384,7 @@ def train(args): optimizer_train_fn = lambda: None # dummy function optimizer_eval_fn = lambda: None # dummy function - if (args.optimizer_type not in fused_optimizers_supported) and args.full_bf16: + if (args.optimizer_type in fused_optimizers_supported) and args.full_bf16: logger.warning("Use of --blockwise_fused_optimizers is preventing stochastic/Kahan weight updates.") else: _, _, optimizer = train_util.get_optimizer(args, trainable_params=params_to_optimize) From 5ea1ada2e294d5032567a9ff2b98c630ea847f47 Mon Sep 17 00:00:00 2001 From: araleza <70412719+araleza@users.noreply.github.com> Date: Sun, 31 Aug 2025 12:46:12 +0100 Subject: [PATCH 5/8] Changed cpu storage of exp_avg[_sq] from bf16 to powed/scaled u16 --- library/adamw_fused.py | 98 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 91 insertions(+), 7 deletions(-) diff --git a/library/adamw_fused.py b/library/adamw_fused.py index 2fb99deb6..d94df6542 100644 --- a/library/adamw_fused.py +++ b/library/adamw_fused.py @@ -23,11 +23,27 @@ def adamw_offload_step_param(self, p, group): if p.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() + # Tensors with few elements may be more sensitive to quantization + # errors, so keep them in float32 + high_quality = torch.numel(p) <= 4096 + # State Initialization if len(state) == 0: state["step"] = 0 - state['exp_avg'] = torch.zeros_like(p, dtype=torch.bfloat16) - state['exp_avg_sq'] = torch.zeros_like(p, dtype=torch.bfloat16) + + if high_quality: + # Exponential averages stored in f32 format + state['exp_avg'] = torch.zeros_like(p, dtype=torch.float32) + state['exp_avg_sq'] = torch.zeros_like(p, dtype=torch.float32) + else: + # Exponential averages stored in u16 format + state['exp_avg'] = torch.zeros_like(p, dtype=torch.uint16) + state['exp_avg_min'] = 0.0 + state['exp_avg_max'] = 1.0 + + state['exp_avg_sq'] = torch.zeros_like(p, dtype=torch.uint16) + state['exp_avg_sq_min'] = 0.0 + state['exp_avg_sq_max'] = 1.0 state["step"] += 1 @@ -44,8 +60,34 @@ def adamw_offload_step_param(self, p, group): eps_p2: float = math.pow(eps, 2) # Bring state back from CPU - state['exp_avg'] = state['exp_avg'] .to('cuda').to(dtype=torch.float32) - state['exp_avg_sq'] = state['exp_avg_sq'].to('cuda').to(dtype=torch.float32) + + if high_quality: + # These exponential averages are already in float32 format + state['exp_avg'] = state['exp_avg'] .to(p.device) + state['exp_avg_sq'] = state['exp_avg_sq'].to(p.device) + else: + # Unpack these exponential averages from uint16 format + + # A power function was applied to the tensor values, as they are usually + # distributed in an exponential fashion. After the power function was applied, + # the min and max of the results were noted, and then the values were scaled + # to the 0-65535 range for storage. This process is reversed here. + + u16power = 16.0 # This value worked acceptably in testing to spread the values more evenly + + exp_avg_min = state['exp_avg_min'] + exp_avg_max = state['exp_avg_max'] + exp_avg_sq_min = state['exp_avg_sq_min'] + exp_avg_sq_max = state['exp_avg_sq_max'] + + uint16_recreate_a = state['exp_avg'].to(p.device).to(dtype=torch.float32) / 65535.0 * (exp_avg_max - exp_avg_min) + exp_avg_min + state['exp_avg'] = torch.pow(torch.abs(uint16_recreate_a), u16power) * torch.sgn(uint16_recreate_a) + del uint16_recreate_a + + uint16_recreate_a_sq = state['exp_avg_sq'].to(p.device).to(dtype=torch.float32) / 65535.0 * (exp_avg_sq_max - exp_avg_sq_min) + exp_avg_sq_min + state['exp_avg_sq'] = torch.pow(torch.abs(uint16_recreate_a_sq), u16power) * torch.sgn(uint16_recreate_a_sq) + del uint16_recreate_a_sq + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] # Update biased first and second moment estimates @@ -77,9 +119,51 @@ def adamw_offload_step_param(self, p, group): if weight_decay != 0: p_data_fp32.mul_(1 - lr * weight_decay) - # Keep state on CPU - state['exp_avg'] = state['exp_avg'] .to(dtype=torch.bfloat16).to('cpu') - state['exp_avg_sq'] = state['exp_avg_sq'].to(dtype=torch.bfloat16).to('cpu') + if high_quality: + + # These are kept in f32 format between steps + state['exp_avg'] = state['exp_avg'].to('cpu') + state['exp_avg_sq'] = state['exp_avg_sq'].to('cpu') + + else: + + # Compress the exp_avg and exp_avg_sq tensors to cut their size down + # from 32 bit to 16 bit. + # + # A power function is applied to try to linearize the tensor values, as + # they are usually distributed in an exponential fashion. It would have + # been preferable to use a log() function, but the input values can be + # negative, so a pow() function is used instead. The 1/16th power was + # chosen fairly arbitrarily, but seemed to distribute the values fairly + # reasonably in some simple tests. + # + # After the power function is applied, the min and max of the resulting + # values are stored, and the values are then scaled to the 0-65535 range + # for storage. + # + # Doing this instead of storing these values as bf16 reduced the L1 + # error between the stored values and the true f32 values by around 90%, + # with a notable increase in output image quality. + + log_exp_avg = torch.pow(torch.abs(state['exp_avg']), 1.0 / u16power) * torch.sgn(state['exp_avg']) + exp_avg_min = torch.min(log_exp_avg) + exp_avg_max = torch.max(log_exp_avg) + state['exp_avg_min'] = exp_avg_min + state['exp_avg_max'] = exp_avg_max + normalized = (log_exp_avg - exp_avg_min) / (exp_avg_max - exp_avg_min) + del log_exp_avg + + state['exp_avg'] = (normalized * 65535.0).clamp(0, 65535).to(dtype=torch.uint16).to('cpu') + + log_exp_avg_sq = torch.pow(torch.abs(state['exp_avg_sq']), 1.0 / u16power) * torch.sgn(state['exp_avg_sq']) + exp_avg_sq_min = torch.min(log_exp_avg_sq) + exp_avg_sq_max = torch.max(log_exp_avg_sq) + state['exp_avg_sq_min'] = exp_avg_sq_min + state['exp_avg_sq_max'] = exp_avg_sq_max + normalized_sq = (log_exp_avg_sq - exp_avg_sq_min) / (exp_avg_sq_max - exp_avg_sq_min) + del log_exp_avg_sq + + state['exp_avg_sq'] = (normalized_sq * 65535.0).clamp(0, 65535).to(dtype=torch.uint16).to('cpu') # Add on gradient update, but not if using kahan summation as the bottom # bits must be restored first. (This update occurs in copy_kahan_() instead) From 657813346b1709303b61fee4d3cec6b93f48cfc5 Mon Sep 17 00:00:00 2001 From: araleza <70412719+araleza@users.noreply.github.com> Date: Sun, 31 Aug 2025 18:51:50 +0100 Subject: [PATCH 6/8] Changed u16 pow() factor from 16.0 to 8.0, seems to better distribute the exp_avg[_sq] values --- library/adamw_fused.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/adamw_fused.py b/library/adamw_fused.py index d94df6542..b439c5d95 100644 --- a/library/adamw_fused.py +++ b/library/adamw_fused.py @@ -73,7 +73,7 @@ def adamw_offload_step_param(self, p, group): # the min and max of the results were noted, and then the values were scaled # to the 0-65535 range for storage. This process is reversed here. - u16power = 16.0 # This value worked acceptably in testing to spread the values more evenly + u16power = 8.0 # This value worked acceptably in testing to spread the values more evenly exp_avg_min = state['exp_avg_min'] exp_avg_max = state['exp_avg_max'] From f6f3d6e34e795efb66253ab86a297def040ae612 Mon Sep 17 00:00:00 2001 From: araleza <70412719+araleza@users.noreply.github.com> Date: Sun, 28 Sep 2025 15:30:02 +0100 Subject: [PATCH 7/8] Now exp_avg[_sq] are stored on cpu in 24 bit format. Also changed some final Flux stages to f32. --- flux_train.py | 7 ++ library/adamw_fused.py | 148 +++++++++++++++++++---------------------- 2 files changed, 74 insertions(+), 81 deletions(-) diff --git a/flux_train.py b/flux_train.py index 5228ec134..14fb5cc81 100644 --- a/flux_train.py +++ b/flux_train.py @@ -454,6 +454,13 @@ def train(args): ), "full_bf16 requires mixed precision='bf16' / full_bf16を使う場合はmixed_precision='bf16'を指定してください。" accelerator.print("enable full bf16 training.") flux.to(weight_dtype) + + # Experimental: some layers have very few weights, and training quality seems + # to increase significantly if these are left in f32 format while training. + if args.fused_backward_pass: + flux.final_layer.linear.to(dtype=torch.float32) # Loses lower bits from some saved files, + flux.img_in .to(dtype=torch.float32) # but most saved models aren't f32/f16 anyway. + if clip_l is not None: clip_l.to(weight_dtype) t5xxl.to(weight_dtype) diff --git a/library/adamw_fused.py b/library/adamw_fused.py index b439c5d95..8419d422f 100644 --- a/library/adamw_fused.py +++ b/library/adamw_fused.py @@ -5,6 +5,53 @@ from library.adafactor_fused import copy_kahan_ +def to_float24_bytes(tensor_f32: torch.Tensor) -> torch.Tensor: + """ + Converts a float32 tensor to a 'float24' representation for storage. + + This is done by taking the 3 most significant bytes of each float32 element. + On a little-endian system, these are the last 3 bytes. + # TODO - Check this works on Mac, which is a big-endian system + + Args: + tensor_f32: The input tensor with dtype torch.float32. + + Returns: + A 1D tensor of dtype torch.uint8 containing the packed 'float24' data. + """ + if tensor_f32.dtype != torch.float32: + raise TypeError("Input tensor must be of dtype torch.float32") + + tensor_u8 = tensor_f32.view(torch.uint8) + tensor_u8_reshaped = tensor_u8.view(-1, 4) + tensor_f24_bytes = tensor_u8_reshaped[:, 1:] + return tensor_f24_bytes.flatten() + + +def from_float24_bytes(tensor_f24_u8: torch.Tensor, original_shape: torch.Size) -> torch.Tensor: + """ + Restores a 'float24' byte tensor back to a float32 tensor. + + Args: + tensor_f24_u8: A 1D tensor of dtype torch.uint8 from to_float24_bytes. + original_shape: The shape of the original float32 tensor. + device: The device to create the restored tensor on. + + Returns: + The restored tensor with dtype torch.float32 and the original shape. + """ + if tensor_f24_u8.dtype != torch.uint8: + raise TypeError("Input byte tensor must be of dtype torch.uint8") + if tensor_f24_u8.numel() % 3 != 0: + raise ValueError("Input byte tensor size must be a multiple of 3") + + tensor_u8_3bytes = tensor_f24_u8.view(-1, 3) + padding = torch.zeros(tensor_u8_3bytes.shape[0], 1, dtype=torch.uint8, device=tensor_u8_3bytes.device) + tensor_u8_4bytes = torch.cat([padding, tensor_u8_3bytes], dim=1) + tensor_f32_flat = tensor_u8_4bytes.flatten().view(torch.float32) + return tensor_f32_flat.view(original_shape) + + @torch.no_grad() def adamw_offload_step_param(self, p, group): @@ -31,19 +78,10 @@ def adamw_offload_step_param(self, p, group): if len(state) == 0: state["step"] = 0 - if high_quality: - # Exponential averages stored in f32 format - state['exp_avg'] = torch.zeros_like(p, dtype=torch.float32) - state['exp_avg_sq'] = torch.zeros_like(p, dtype=torch.float32) - else: - # Exponential averages stored in u16 format - state['exp_avg'] = torch.zeros_like(p, dtype=torch.uint16) - state['exp_avg_min'] = 0.0 - state['exp_avg_max'] = 1.0 + data_type = torch.float32 if high_quality else torch.uint16 - state['exp_avg_sq'] = torch.zeros_like(p, dtype=torch.uint16) - state['exp_avg_sq_min'] = 0.0 - state['exp_avg_sq_max'] = 1.0 + state['exp_avg'] = torch.zeros_like(p, dtype=data_type) + state['exp_avg_sq'] = torch.zeros_like(p, dtype=data_type) state["step"] += 1 @@ -59,35 +97,20 @@ def adamw_offload_step_param(self, p, group): eps_p2: float = math.pow(eps, 2) - # Bring state back from CPU - - if high_quality: - # These exponential averages are already in float32 format - state['exp_avg'] = state['exp_avg'] .to(p.device) - state['exp_avg_sq'] = state['exp_avg_sq'].to(p.device) - else: - # Unpack these exponential averages from uint16 format + # Bring state back (from CPU, if necessary) - # A power function was applied to the tensor values, as they are usually - # distributed in an exponential fashion. After the power function was applied, - # the min and max of the results were noted, and then the values were scaled - # to the 0-65535 range for storage. This process is reversed here. + # Recover the exp avg states from however they're stored + def unpack_tensor(state, key, target_device): - u16power = 8.0 # This value worked acceptably in testing to spread the values more evenly + # Stored as f24 format? + if state[f'{key}'].dtype == torch.uint8: + return from_float24_bytes(state[f'{key}'].to(target_device), state[f'{key}_shape']) - exp_avg_min = state['exp_avg_min'] - exp_avg_max = state['exp_avg_max'] - exp_avg_sq_min = state['exp_avg_sq_min'] - exp_avg_sq_max = state['exp_avg_sq_max'] - - uint16_recreate_a = state['exp_avg'].to(p.device).to(dtype=torch.float32) / 65535.0 * (exp_avg_max - exp_avg_min) + exp_avg_min - state['exp_avg'] = torch.pow(torch.abs(uint16_recreate_a), u16power) * torch.sgn(uint16_recreate_a) - del uint16_recreate_a - - uint16_recreate_a_sq = state['exp_avg_sq'].to(p.device).to(dtype=torch.float32) / 65535.0 * (exp_avg_sq_max - exp_avg_sq_min) + exp_avg_sq_min - state['exp_avg_sq'] = torch.pow(torch.abs(uint16_recreate_a_sq), u16power) * torch.sgn(uint16_recreate_a_sq) - del uint16_recreate_a_sq + # bf16 / u16 / f32 + return state[f'{key}'].to(target_device).to(dtype=torch.float32) + state['exp_avg'] = unpack_tensor(state, 'exp_avg', p.device) + state['exp_avg_sq'] = unpack_tensor(state, 'exp_avg_sq', p.device) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] # Update biased first and second moment estimates @@ -119,51 +142,14 @@ def adamw_offload_step_param(self, p, group): if weight_decay != 0: p_data_fp32.mul_(1 - lr * weight_decay) - if high_quality: - - # These are kept in f32 format between steps - state['exp_avg'] = state['exp_avg'].to('cpu') - state['exp_avg_sq'] = state['exp_avg_sq'].to('cpu') - - else: + # Reduce the size of large exp_avg and exp_avg_sq tensors to 24-bit, + # and then move them to cpu memory + if not high_quality: + state[f'exp_avg_shape'] = state[f'exp_avg'].shape + state[f'exp_avg'] = to_float24_bytes(state[f'exp_avg']).to('cpu') - # Compress the exp_avg and exp_avg_sq tensors to cut their size down - # from 32 bit to 16 bit. - # - # A power function is applied to try to linearize the tensor values, as - # they are usually distributed in an exponential fashion. It would have - # been preferable to use a log() function, but the input values can be - # negative, so a pow() function is used instead. The 1/16th power was - # chosen fairly arbitrarily, but seemed to distribute the values fairly - # reasonably in some simple tests. - # - # After the power function is applied, the min and max of the resulting - # values are stored, and the values are then scaled to the 0-65535 range - # for storage. - # - # Doing this instead of storing these values as bf16 reduced the L1 - # error between the stored values and the true f32 values by around 90%, - # with a notable increase in output image quality. - - log_exp_avg = torch.pow(torch.abs(state['exp_avg']), 1.0 / u16power) * torch.sgn(state['exp_avg']) - exp_avg_min = torch.min(log_exp_avg) - exp_avg_max = torch.max(log_exp_avg) - state['exp_avg_min'] = exp_avg_min - state['exp_avg_max'] = exp_avg_max - normalized = (log_exp_avg - exp_avg_min) / (exp_avg_max - exp_avg_min) - del log_exp_avg - - state['exp_avg'] = (normalized * 65535.0).clamp(0, 65535).to(dtype=torch.uint16).to('cpu') - - log_exp_avg_sq = torch.pow(torch.abs(state['exp_avg_sq']), 1.0 / u16power) * torch.sgn(state['exp_avg_sq']) - exp_avg_sq_min = torch.min(log_exp_avg_sq) - exp_avg_sq_max = torch.max(log_exp_avg_sq) - state['exp_avg_sq_min'] = exp_avg_sq_min - state['exp_avg_sq_max'] = exp_avg_sq_max - normalized_sq = (log_exp_avg_sq - exp_avg_sq_min) / (exp_avg_sq_max - exp_avg_sq_min) - del log_exp_avg_sq - - state['exp_avg_sq'] = (normalized_sq * 65535.0).clamp(0, 65535).to(dtype=torch.uint16).to('cpu') + state[f'exp_avg_sq_shape'] = state[f'exp_avg_sq'].shape + state[f'exp_avg_sq'] = to_float24_bytes(state[f'exp_avg_sq']).to('cpu') # Add on gradient update, but not if using kahan summation as the bottom # bits must be restored first. (This update occurs in copy_kahan_() instead) From da17be080ec53b863c6ee2045907e608f5c4ebb3 Mon Sep 17 00:00:00 2001 From: araleza <70412719+araleza@users.noreply.github.com> Date: Sat, 11 Oct 2025 21:52:51 +0100 Subject: [PATCH 8/8] Added Adan offloading optimizer, fp32 params, and 'cautious' updates --- flux_train.py | 24 ++++- library/adamw_fused.py | 6 ++ library/adan_fused.py | 218 +++++++++++++++++++++++++++++++++++++++++ library/flux_models.py | 71 ++++++++++---- library/train_util.py | 6 ++ 5 files changed, 302 insertions(+), 23 deletions(-) create mode 100644 library/adan_fused.py diff --git a/flux_train.py b/flux_train.py index 14fb5cc81..647ffc0c3 100644 --- a/flux_train.py +++ b/flux_train.py @@ -330,7 +330,7 @@ def train(args): # 学習に必要なクラスを準備する accelerator.print("prepare optimizer, data loader etc.") - fused_optimizers_supported = ['adafactor', 'adamoffload', 'nadamoffload', 'adamwoffload', 'nadamwoffload'] + fused_optimizers_supported = ['adafactor', 'adamoffload', 'nadamoffload', 'adamwoffload', 'nadamwoffload', 'adanoffload'] if args.blockwise_fused_optimizers: # fused backward pass: https://pytorch.org/tutorials/intermediate/optimizer_step_in_backward_tutorial.html @@ -458,8 +458,23 @@ def train(args): # Experimental: some layers have very few weights, and training quality seems # to increase significantly if these are left in f32 format while training. if args.fused_backward_pass: - flux.final_layer.linear.to(dtype=torch.float32) # Loses lower bits from some saved files, - flux.img_in .to(dtype=torch.float32) # but most saved models aren't f32/f16 anyway. + + from library.flux_models import MixedLinear + from library.flux_models import RMSNorm + + flux.final_layer.linear.to(dtype=torch.float32) + flux.img_in .to(dtype=torch.float32) + + for m in flux.modules(): + num_params = sum(p.numel() for p in m.parameters()) + + if isinstance(m, MixedLinear) and m.bias is not None: + m.bias.data = m.bias.data.to(torch.float32) + if m.weight.data.numel() < 20000000: # Includes first Linear stage with 18m weights + m.weight.data = m.weight.data.to(torch.float32) + + if isinstance(m, RMSNorm): + m.scale.data = m.scale.data.to(torch.float32) if clip_l is not None: clip_l.to(weight_dtype) @@ -501,6 +516,7 @@ def train(args): # use fused optimizer for backward pass. Only some specific optimizers are supported. import library.adafactor_fused import library.adamw_fused + import library.adan_fused if args.optimizer_type.lower() == "adafactor": library.adafactor_fused.patch_adafactor_fused(optimizer) @@ -508,6 +524,8 @@ def train(args): library.adamw_fused.patch_adamw_offload_fused(optimizer, False) elif args.optimizer_type.lower() == "nadamoffload" or args.optimizer_type.lower() == "nadamwoffload": library.adamw_fused.patch_adamw_offload_fused(optimizer, True) # Nesterov + elif args.optimizer_type.lower() == "adanoffload": + library.adan_fused.patch_adan_offload_fused(optimizer, False) # Adan else: logger.error(f"Optimizer '{args.optimizer_type}' does not have a --fused_backward_pass implementation available") diff --git a/library/adamw_fused.py b/library/adamw_fused.py index 8419d422f..cc4cde534 100644 --- a/library/adamw_fused.py +++ b/library/adamw_fused.py @@ -135,6 +135,12 @@ def unpack_tensor(state, key, target_device): lr: float = group['lr'] + # Implement 'cautious optimizer' from https://arxiv.org/pdf/2411.16085 + # The scaling factor - dividing by m.mean() - does not seem to work with parameter + # groups, but it also appears to be an optional step, so it has been removed. + m = (update * grad >= 0).to(grad.dtype) + update = update * m #/ (m.mean() + eps) + # Apply learning rate update.mul_(lr) diff --git a/library/adan_fused.py b/library/adan_fused.py new file mode 100644 index 000000000..3d21ff7db --- /dev/null +++ b/library/adan_fused.py @@ -0,0 +1,218 @@ +import math +import torch + +from library.adafactor_fused import copy_stochastic_ +from library.adafactor_fused import copy_kahan_ + + +# Pack floating point tensors into uint16. Their float32 bytes are interpreted as uint32 +# bytes (not cast to uint32). Since positive floats are in sequential order when interpreted +# as uint32s, the groups of positive and negative floats appear as small ranges in uint32 +# format. The three clumps (negative floats, zeros, postive floats) then have their min/max +# positions noted, and stretched to cover a uint16 range. +def pack_tensor(state, key, support_neg): + + k = state[f'{key}'] + k_uint32_f = torch.abs(k).view(torch.uint32).to(torch.float32) + + min_val, max_val = torch.aminmax(k_uint32_f[k_uint32_f != 0.0]) + + # No support_neg (i.e. input floats are only zero or positive). Outputs values in these uint16 ranges: + # 0 <-- 0.0s + # 1..65535 <-- positive floats + + # support_neg (i.e. input floats can be zero or +/-). Outputs values in these uint16 ranges: + # 0 <-- 0.0s + # 1..32767 <-- positive floats + # 32768 <-- -0.0 ? Not used. + # 32769..65535 <-- negative floats + + range = 32768 if support_neg else 65536 + + k_int32_scale = (k_uint32_f - min_val) * (range - 2) / (max_val - min_val) + 1 # Scale into [1..range] + + packed = torch.where(k > 0, k_int32_scale, 0) # Positive floats and zero + if support_neg: + packed = torch.where(k < 0, k_int32_scale + 32768, packed) # Negative floats + del k_int32_scale + + k_uint16_scale = packed.to(torch.uint16) + + state[f'{key}'] = k_uint16_scale + state[f'{key}_min'] = min_val + state[f'{key}_max'] = max_val + + pass + + +# Recover adan state tensors packed wtih pack_tensor() +def unpack_tensor(state, key, support_neg): + + # uint16 format = packed floats + if state[f'{key}'].dtype == torch.uint16: + packed = state[f'{key}'].to('cuda').to(dtype=torch.float32) + min_val = state[f'{key}_min'] + max_val = state[f'{key}_max'] + + range = 32768.0 if support_neg else 65536.0 + + if support_neg: + pack_merge_signs = torch.where(packed >= 32768, packed - 32768, packed) + else: + pack_merge_signs = packed + upck = (pack_merge_signs - 1) / (range - 2) * (max_val - min_val) + min_val + upck = torch.where(pack_merge_signs == 0, 0, upck) # 0's are special cased + upck = upck.to(torch.uint32) + upck_final_but_no_negs = upck.view(torch.float32) + if support_neg: + upck_final = torch.where(packed >= 32768, -upck_final_but_no_negs, upck_final_but_no_negs) + else: + upck_final = upck_final_but_no_negs + + return upck_final + + # bf16 / f32 + return state[f'{key}'].to('cuda').to(dtype=torch.float32) + + +@torch.no_grad() +def adan_offload_step_param(self, p, group): + + if p.grad is None: + return + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError("This Adan implementation does not support sparse gradients.") + + state = self.state[p] + grad_shape = grad.shape + + p_data_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_data_fp32 = p_data_fp32.float() + + # Tensors with few elements may be more sensitive to quantization + # errors, so keep them in float32 + #global tot_4096, tot_all + high_quality = torch.numel(p) <= 2000000 + + # State Initialization + if len(state) == 0: + state["step"] = 0 + + state['exp_avg'] = torch.zeros_like(p, dtype=torch.float32 if high_quality else torch.bfloat16) + state['exp_avg_sq'] = torch.zeros_like(p, dtype=torch.float32 if high_quality else torch.bfloat16) + state['exp_avg_diff'] = torch.zeros_like(p, dtype=torch.float32 if high_quality else torch.bfloat16) + state['neg_grad_or_diff'] = torch.zeros_like(p, dtype=torch.float32 if high_quality else torch.bfloat16) + else: + pass + + state["step"] += 1 + + #beta1, beta2, beta3 = group['betas'] # Don't have custom class, so beta3 not available + beta1, beta2, beta3 = (0.98, 0.92, 0.99) # Hard coded betas for now + eps = group['eps'] # 1e-8 + weight_decay = group.get('weight_decay', 0.0) # Not currently implemented + + # Bias correction terms + bias_correction1 = 1.0 - math.pow(beta1, state['step']) + bias_correction2 = 1.0 - math.pow(beta2, state['step']) + bias_correction3 = 1.0 - math.pow(beta3, state['step']) + bias_correction3_sqrt = math.sqrt(bias_correction3) + + eps_p2: float = math.pow(eps, 2) + + # Recover the exp avg states from however they're stored + state['exp_avg'] = unpack_tensor(state, 'exp_avg', True) + state['exp_avg_sq'] = unpack_tensor(state, 'exp_avg_sq', False) + state['exp_avg_diff'] = unpack_tensor(state, 'exp_avg_diff', True) + state['neg_grad_or_diff'] = unpack_tensor(state, 'neg_grad_or_diff', True) + + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + exp_avg_diff = state['exp_avg_diff'] + neg_grad_or_diff = state['neg_grad_or_diff'] + + # for memory saving, we use `neg_grad_or_diff` + # to get some temp variable in a inplace way + neg_grad_or_diff.add_(grad) + + exp_avg .mul_(beta1).add_(grad, alpha= 1 - beta1) # m_t + exp_avg_diff.mul_(beta2).add_(neg_grad_or_diff, alpha= 1 - beta2) # diff_t + + neg_grad_or_diff.mul_(beta2).add_(grad) + exp_avg_sq .mul_(beta3).addcmul_(neg_grad_or_diff, neg_grad_or_diff, value= 1 - beta3) # n_t + + lr: float = group['lr'] + + denom = (exp_avg_sq.sqrt() / bias_correction3_sqrt).add_(eps) + step_size = lr / bias_correction1 + step_size_diff = lr * beta2 / bias_correction2 + + # todo: weight decay not supported + update = (exp_avg * step_size ) / denom + update += (exp_avg_diff * step_size_diff) / denom + + neg_grad_or_diff.zero_().add_(grad, alpha=-1.0) + + # Just build momentum for first few steps + if state['step'] <= 3: + update.mul_(0.0) + + # Move the optimizer state tensors to main memory + if not high_quality: + + # float32 to uint16 compression, hopefully provides more precision + pack_tensor(state, 'exp_avg', True) + pack_tensor(state, 'exp_avg_sq', False) # Only positive floats + pack_tensor(state, 'exp_avg_diff', True) + + state[f'exp_avg'] = state[f'exp_avg'] .to('cpu') + state[f'exp_avg_sq'] = state[f'exp_avg_sq'] .to('cpu') + state[f'exp_avg_diff'] = state[f'exp_avg_diff'].to('cpu') + + # Neg_grad is always a bfloat16 (stored in a float32) already apparently! So + # can be stored as a bfloat16 exactly. + state[f'neg_grad_or_diff'] = state[f'neg_grad_or_diff'].to(torch.bfloat16).to('cpu') + + # Add on gradient update, but not if using kahan summation as the bottom + # bits must be restored first. (This update occurs in copy_kahan_() instead) + if not self.optimizer.use_kahan_summation: + p_data_fp32.add_(-update) + + if p.dtype == torch.bfloat16: + if self.optimizer.use_kahan_summation: + copy_kahan_(p, p_data_fp32, state, update) + else: + copy_stochastic_(p, p_data_fp32) + elif p.dtype == torch.float16: + p.copy_(p_data_fp32) + + +@torch.no_grad() +def adan_offload_step(self, closure=None): + """ + Performs a single optimization step + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + adan_offload_step_param(self, p, group) + + return loss + + +def patch_adan_offload_fused(optimizer, use_nesterov): + optimizer.use_nesterov = use_nesterov + + optimizer.step_param = adan_offload_step_param.__get__(optimizer) + optimizer.step = adan_offload_step.__get__(optimizer) diff --git a/library/flux_models.py b/library/flux_models.py index d2d7e06c7..1e47d489c 100644 --- a/library/flux_models.py +++ b/library/flux_models.py @@ -543,12 +543,43 @@ def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 10 return embedding +import torch.nn.functional as F + +# A class that supports having the biases have a dtype of float32 +# while the more numerous weights are still in bfloat16 format. + +class MixedLinear(nn.Module): + def __init__(self, in_features, out_features, bias=True): + super().__init__() + # Initialize weights in float32 first, then cast to bfloat16 + weight = torch.empty(out_features, in_features, dtype=torch.float32) + nn.init.kaiming_uniform_(weight, a=5**0.5) + self.weight = nn.Parameter(weight.to(torch.bfloat16)) + + if bias: + bias_param = torch.empty(out_features, dtype=torch.float32) # High precision + fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weight) + bound = 1 / fan_in**0.5 + nn.init.uniform_(bias_param, -bound, bound) + self.bias = nn.Parameter(bias_param) + else: + self.bias = None + + def forward(self, input: torch.Tensor) -> torch.Tensor: + if self.weight.dtype == torch.bfloat16: + weight_fp32 = self.weight.to(torch.float32) + else: + weight_fp32 = self.weight + + return F.linear(input, weight_fp32, self.bias) + + class MLPEmbedder(nn.Module): def __init__(self, in_dim: int, hidden_dim: int): super().__init__() - self.in_layer = nn.Linear(in_dim, hidden_dim, bias=True) + self.in_layer = MixedLinear(in_dim, hidden_dim, bias=True) self.silu = nn.SiLU() - self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=True) + self.out_layer = MixedLinear(hidden_dim, hidden_dim, bias=True) self.gradient_checkpointing = False @@ -609,9 +640,9 @@ def __init__(self, dim: int, num_heads: int = 8, qkv_bias: bool = False): self.num_heads = num_heads head_dim = dim // num_heads - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.qkv = MixedLinear(dim, dim * 3, bias=qkv_bias) self.norm = QKNorm(head_dim) - self.proj = nn.Linear(dim, dim) + self.proj = MixedLinear(dim, dim) # this is not called from DoubleStreamBlock/SingleStreamBlock because they uses attention function directly def forward(self, x: Tensor, pe: Tensor) -> Tensor: @@ -635,7 +666,7 @@ def __init__(self, dim: int, double: bool): super().__init__() self.is_double = double self.multiplier = 6 if double else 3 - self.lin = nn.Linear(dim, self.multiplier * dim, bias=True) + self.lin = MixedLinear(dim, self.multiplier * dim, bias=True) def forward(self, vec: Tensor) -> tuple[ModulationOut, ModulationOut | None]: out = self.lin(nn.functional.silu(vec))[:, None, :].chunk(self.multiplier, dim=-1) @@ -659,9 +690,9 @@ def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: self.img_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.img_mlp = nn.Sequential( - nn.Linear(hidden_size, mlp_hidden_dim, bias=True), + MixedLinear(hidden_size, mlp_hidden_dim, bias=True), nn.GELU(approximate="tanh"), - nn.Linear(mlp_hidden_dim, hidden_size, bias=True), + MixedLinear(mlp_hidden_dim, hidden_size, bias=True), ) self.txt_mod = Modulation(hidden_size, double=True) @@ -670,9 +701,9 @@ def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: self.txt_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.txt_mlp = nn.Sequential( - nn.Linear(hidden_size, mlp_hidden_dim, bias=True), + MixedLinear(hidden_size, mlp_hidden_dim, bias=True), nn.GELU(approximate="tanh"), - nn.Linear(mlp_hidden_dim, hidden_size, bias=True), + MixedLinear(mlp_hidden_dim, hidden_size, bias=True), ) self.gradient_checkpointing = False @@ -780,9 +811,9 @@ def __init__( self.mlp_hidden_dim = int(hidden_size * mlp_ratio) # qkv and mlp_in - self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim) + self.linear1 = MixedLinear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim) # proj and mlp_out - self.linear2 = nn.Linear(hidden_size + self.mlp_hidden_dim, hidden_size) + self.linear2 = MixedLinear(hidden_size + self.mlp_hidden_dim, hidden_size) self.norm = QKNorm(head_dim) @@ -862,8 +893,8 @@ class LastLayer(nn.Module): def __init__(self, hidden_size: int, patch_size: int, out_channels: int): super().__init__() self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) - self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True) - self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True)) + self.linear = MixedLinear(hidden_size, patch_size * patch_size * out_channels, bias=True) + self.adaLN_modulation = nn.Sequential(nn.SiLU(), MixedLinear(hidden_size, 2 * hidden_size, bias=True)) def forward(self, x: Tensor, vec: Tensor) -> Tensor: shift, scale = self.adaLN_modulation(vec).chunk(2, dim=1) @@ -894,11 +925,11 @@ def __init__(self, params: FluxParams): self.hidden_size = params.hidden_size self.num_heads = params.num_heads self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim) - self.img_in = nn.Linear(self.in_channels, self.hidden_size, bias=True) + self.img_in = MixedLinear(self.in_channels, self.hidden_size, bias=True) self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size) self.guidance_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) if params.guidance_embed else nn.Identity() - self.txt_in = nn.Linear(params.context_in_dim, self.hidden_size) + self.txt_in = MixedLinear(params.context_in_dim, self.hidden_size) self.double_blocks = nn.ModuleList( [ @@ -1114,11 +1145,11 @@ def __init__(self, params: FluxParams, controlnet_depth=2, controlnet_single_dep self.hidden_size = params.hidden_size self.num_heads = params.num_heads self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim) - self.img_in = nn.Linear(self.in_channels, self.hidden_size, bias=True) + self.img_in = MixedLinear(self.in_channels, self.hidden_size, bias=True) self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size) self.guidance_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) if params.guidance_embed else nn.Identity() - self.txt_in = nn.Linear(params.context_in_dim, self.hidden_size) + self.txt_in = MixedLinear(params.context_in_dim, self.hidden_size) self.double_blocks = nn.ModuleList( [ @@ -1151,15 +1182,15 @@ def __init__(self, params: FluxParams, controlnet_depth=2, controlnet_single_dep # add ControlNet blocks self.controlnet_blocks = nn.ModuleList([]) for _ in range(controlnet_depth): - controlnet_block = nn.Linear(self.hidden_size, self.hidden_size) + controlnet_block = MixedLinear(self.hidden_size, self.hidden_size) controlnet_block = zero_module(controlnet_block) self.controlnet_blocks.append(controlnet_block) self.controlnet_blocks_for_single = nn.ModuleList([]) for _ in range(controlnet_single_depth): - controlnet_block = nn.Linear(self.hidden_size, self.hidden_size) + controlnet_block = MixedLinear(self.hidden_size, self.hidden_size) controlnet_block = zero_module(controlnet_block) self.controlnet_blocks_for_single.append(controlnet_block) - self.pos_embed_input = nn.Linear(self.in_channels, self.hidden_size, bias=True) + self.pos_embed_input = MixedLinear(self.in_channels, self.hidden_size, bias=True) self.gradient_checkpointing = False self.input_hint_block = nn.Sequential( nn.Conv2d(3, 16, 3, padding=1), diff --git a/library/train_util.py b/library/train_util.py index b29b2cb09..da8e9f819 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -5068,6 +5068,12 @@ def get_optimizer(args, trainable_params) -> tuple[str, str, object]: optimizer_class = torch.optim.AdamW # default weight_decay seems to be 0.01 optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + elif optimizer_type.lower() == "adanoffload": + logger.info(f"use AdanOffload optimizer | {optimizer_kwargs}") + + optimizer_class = torch.optim.AdamW # todo: can't set beta3 here yet, need a custom Adan class + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + elif optimizer_type == "AdamW".lower(): logger.info(f"use AdamW optimizer | {optimizer_kwargs}") optimizer_class = torch.optim.AdamW