From 22e4ae99e88c6e4e57d9bf3e1671e689e206c55b Mon Sep 17 00:00:00 2001 From: Zhongjie Duan <35051019+Artiprocher@users.noreply.github.com> Date: Fri, 11 Oct 2024 18:41:24 +0800 Subject: [PATCH] Flux lora update (#237) * update flux lora --------- Co-authored-by: tc2000731 --- diffsynth/schedulers/continuous_ode.py | 2 +- diffsynth/schedulers/ddim.py | 6 +++++- diffsynth/schedulers/flow_match.py | 20 +++++++++++++++++-- diffsynth/trainers/text_to_image.py | 17 +++++++++++++--- examples/train/README.md | 10 ++++++---- examples/train/flux/train_flux_lora.py | 7 ++++--- .../hunyuan_dit/train_hunyuan_dit_lora.py | 5 +++-- examples/train/kolors/train_kolors_lora.py | 5 +++-- .../train/stable_diffusion/train_sd_lora.py | 5 +++-- .../stable_diffusion_3/train_sd3_lora.py | 5 +++-- .../stable_diffusion_xl/train_sdxl_lora.py | 5 +++-- 11 files changed, 63 insertions(+), 24 deletions(-) diff --git a/diffsynth/schedulers/continuous_ode.py b/diffsynth/schedulers/continuous_ode.py index e6cd837..c73b9e2 100644 --- a/diffsynth/schedulers/continuous_ode.py +++ b/diffsynth/schedulers/continuous_ode.py @@ -10,7 +10,7 @@ class ContinuousODEScheduler(): self.set_timesteps(num_inference_steps) - def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0): + def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0, **kwargs): ramp = torch.linspace(1-denoising_strength, 1, num_inference_steps) min_inv_rho = torch.pow(torch.tensor((self.sigma_min,)), (1 / self.rho)) max_inv_rho = torch.pow(torch.tensor((self.sigma_max,)), (1 / self.rho)) diff --git a/diffsynth/schedulers/ddim.py b/diffsynth/schedulers/ddim.py index d42c9c3..da52496 100644 --- a/diffsynth/schedulers/ddim.py +++ b/diffsynth/schedulers/ddim.py @@ -38,7 +38,7 @@ class EnhancedDDIMScheduler(): return alphas_bar - def set_timesteps(self, num_inference_steps, denoising_strength=1.0): + def set_timesteps(self, num_inference_steps, denoising_strength=1.0, **kwargs): # The timesteps are aligned to 999...0, which is different from other implementations, # but I think this implementation is more reasonable in theory. max_timestep = max(round(self.num_train_timesteps * denoising_strength) - 1, 0) @@ -99,3 +99,7 @@ class EnhancedDDIMScheduler(): sqrt_one_minus_alpha_prod = math.sqrt(1 - self.alphas_cumprod[int(timestep.flatten().tolist()[0])]) target = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return target + + + def training_weight(self, timestep): + return 1.0 diff --git a/diffsynth/schedulers/flow_match.py b/diffsynth/schedulers/flow_match.py index 009177d..6678dc5 100644 --- a/diffsynth/schedulers/flow_match.py +++ b/diffsynth/schedulers/flow_match.py @@ -12,11 +12,21 @@ class FlowMatchScheduler(): self.set_timesteps(num_inference_steps) - def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0): + def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0, training=False): sigma_start = self.sigma_min + (self.sigma_max - self.sigma_min) * denoising_strength self.sigmas = torch.linspace(sigma_start, self.sigma_min, num_inference_steps) self.sigmas = self.shift * self.sigmas / (1 + (self.shift - 1) * self.sigmas) - self.timesteps = self.sigmas * self.num_train_timesteps + if training: + self.timesteps = torch.linspace(1000, 0, num_inference_steps) + + # prepare timestep weights + x = torch.arange(num_inference_steps, dtype=torch.float32) + y = torch.exp(-2 * ((x - num_inference_steps / 2) / num_inference_steps) ** 2) + y_shifted = y - y.min() + bsmntw_weighing = y_shifted * (num_inference_steps / y_shifted.sum()) + self.linear_timesteps_weights = bsmntw_weighing + else: + self.timesteps = self.sigmas * self.num_train_timesteps def step(self, model_output, timestep, sample, to_final=False): @@ -49,3 +59,9 @@ class FlowMatchScheduler(): def training_target(self, sample, noise, timestep): target = noise - sample return target + + + def training_weight(self, timestep): + timestep_id = torch.argmin((self.timesteps - timestep.to(self.timesteps.device)).abs()) + weights = self.linear_timesteps_weights[timestep_id] + return weights diff --git a/diffsynth/trainers/text_to_image.py b/diffsynth/trainers/text_to_image.py index 50132a5..6dc24e6 100644 --- a/diffsynth/trainers/text_to_image.py +++ b/diffsynth/trainers/text_to_image.py @@ -32,12 +32,15 @@ class LightningModelForT2ILoRA(pl.LightningModule): self.pipe.denoising_model().train() - def add_lora_to_model(self, model, lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out"): + def add_lora_to_model(self, model, lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian"): # Add LoRA to UNet + if init_lora_weights == "kaiming": + init_lora_weights = True + lora_config = LoraConfig( r=lora_rank, lora_alpha=lora_alpha, - init_lora_weights="gaussian", + init_lora_weights=init_lora_weights, target_modules=lora_target_modules.split(","), ) model = inject_adapter_in_model(lora_config, model) @@ -67,7 +70,8 @@ class LightningModelForT2ILoRA(pl.LightningModule): noisy_latents, timestep=timestep, **prompt_emb, **extra_input, use_gradient_checkpointing=self.use_gradient_checkpointing ) - loss = torch.nn.functional.mse_loss(noise_pred, training_target) + loss = torch.nn.functional.mse_loss(noise_pred.float(), training_target.float()) + loss = loss * self.pipe.scheduler.training_weight(timestep) # Record log self.log("train_loss", loss, prog_bar=True) @@ -179,6 +183,13 @@ def add_general_parsers(parser): default=4.0, help="The weight of the LoRA update matrices.", ) + parser.add_argument( + "--init_lora_weights", + type=str, + default="kaiming", + choices=["gaussian", "kaiming"], + help="The initializing method of LoRA weight.", + ) parser.add_argument( "--use_gradient_checkpointing", default=False, diff --git a/examples/train/README.md b/examples/train/README.md index 7848fdc..5c99813 100644 --- a/examples/train/README.md +++ b/examples/train/README.md @@ -123,7 +123,7 @@ models/FLUX/ └── model.safetensors.index.json ``` -Launch the training task using the following command: +Launch the training task using the following command (39G VRAM required): ``` CUDA_VISIBLE_DEVICES="0" python examples/train/flux/train_flux_lora.py \ @@ -134,18 +134,20 @@ CUDA_VISIBLE_DEVICES="0" python examples/train/flux/train_flux_lora.py \ --dataset_path data/dog \ --output_path ./models \ --max_epochs 1 \ - --steps_per_epoch 500 \ + --steps_per_epoch 100 \ --height 1024 \ --width 1024 \ --center_crop \ --precision "bf16" \ --learning_rate 1e-4 \ - --lora_rank 4 \ - --lora_alpha 4 \ + --lora_rank 16 \ + --lora_alpha 16 \ --use_gradient_checkpointing \ --align_to_opensource_format ``` +By adding parameter `--quantize "float8_e4m3fn"`, you can save approximate 10G VRAM. + **`--align_to_opensource_format` means that this script will export the LoRA weights in the opensource format. This format can be loaded in both DiffSynth-Studio and other codebases.** For more information about the parameters, please use `python examples/train/flux/train_flux_lora.py -h` to see the details. diff --git a/examples/train/flux/train_flux_lora.py b/examples/train/flux/train_flux_lora.py index cf77c85..ead67d5 100644 --- a/examples/train/flux/train_flux_lora.py +++ b/examples/train/flux/train_flux_lora.py @@ -10,7 +10,7 @@ class LightningModel(LightningModelForT2ILoRA): self, torch_dtype=torch.float16, pretrained_weights=[], learning_rate=1e-4, use_gradient_checkpointing=True, - lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", + lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="kaiming", state_dict_converter=None, quantize = None ): super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing, state_dict_converter=state_dict_converter) @@ -27,10 +27,10 @@ class LightningModel(LightningModelForT2ILoRA): if quantize is not None: self.pipe.dit.quantize() - self.pipe.scheduler.set_timesteps(1000) + self.pipe.scheduler.set_timesteps(1000, training=True) self.freeze_parameters() - self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules) + self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules, init_lora_weights=init_lora_weights) def parse_args(): @@ -97,6 +97,7 @@ if __name__ == '__main__': lora_rank=args.lora_rank, lora_alpha=args.lora_alpha, lora_target_modules=args.lora_target_modules, + init_lora_weights=args.init_lora_weights, state_dict_converter=FluxLoRAConverter.align_to_opensource_format if args.align_to_opensource_format else None, quantize={"float8_e4m3fn": torch.float8_e4m3fn}.get(args.quantize, None), ) diff --git a/examples/train/hunyuan_dit/train_hunyuan_dit_lora.py b/examples/train/hunyuan_dit/train_hunyuan_dit_lora.py index 5823cf4..6ceba42 100644 --- a/examples/train/hunyuan_dit/train_hunyuan_dit_lora.py +++ b/examples/train/hunyuan_dit/train_hunyuan_dit_lora.py @@ -9,7 +9,7 @@ class LightningModel(LightningModelForT2ILoRA): self, torch_dtype=torch.float16, pretrained_weights=[], learning_rate=1e-4, use_gradient_checkpointing=True, - lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out" + lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian", ): super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing) # Load models @@ -19,7 +19,7 @@ class LightningModel(LightningModelForT2ILoRA): self.pipe.scheduler.set_timesteps(1000) self.freeze_parameters() - self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules) + self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules, init_lora_weights=init_lora_weights) def parse_args(): @@ -56,6 +56,7 @@ if __name__ == '__main__': use_gradient_checkpointing=args.use_gradient_checkpointing, lora_rank=args.lora_rank, lora_alpha=args.lora_alpha, + init_lora_weights=args.init_lora_weights, lora_target_modules=args.lora_target_modules ) launch_training_task(model, args) diff --git a/examples/train/kolors/train_kolors_lora.py b/examples/train/kolors/train_kolors_lora.py index 48d1296..120e41d 100644 --- a/examples/train/kolors/train_kolors_lora.py +++ b/examples/train/kolors/train_kolors_lora.py @@ -9,7 +9,7 @@ class LightningModel(LightningModelForT2ILoRA): self, torch_dtype=torch.float16, pretrained_weights=[], learning_rate=1e-4, use_gradient_checkpointing=True, - lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out" + lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian", ): super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing) # Load models @@ -22,7 +22,7 @@ class LightningModel(LightningModelForT2ILoRA): self.pipe.vae_encoder.to(torch_dtype) self.freeze_parameters() - self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules) + self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules, init_lora_weights=init_lora_weights) def parse_args(): @@ -72,6 +72,7 @@ if __name__ == '__main__': use_gradient_checkpointing=args.use_gradient_checkpointing, lora_rank=args.lora_rank, lora_alpha=args.lora_alpha, + init_lora_weights=args.init_lora_weights, lora_target_modules=args.lora_target_modules ) launch_training_task(model, args) diff --git a/examples/train/stable_diffusion/train_sd_lora.py b/examples/train/stable_diffusion/train_sd_lora.py index 6227534..8dcaf7a 100644 --- a/examples/train/stable_diffusion/train_sd_lora.py +++ b/examples/train/stable_diffusion/train_sd_lora.py @@ -9,7 +9,7 @@ class LightningModel(LightningModelForT2ILoRA): self, torch_dtype=torch.float16, pretrained_weights=[], learning_rate=1e-4, use_gradient_checkpointing=True, - lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out" + lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian", ): super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing) # Load models @@ -19,7 +19,7 @@ class LightningModel(LightningModelForT2ILoRA): self.pipe.scheduler.set_timesteps(1000) self.freeze_parameters() - self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules) + self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules, init_lora_weights=init_lora_weights) def parse_args(): @@ -51,6 +51,7 @@ if __name__ == '__main__': use_gradient_checkpointing=args.use_gradient_checkpointing, lora_rank=args.lora_rank, lora_alpha=args.lora_alpha, + init_lora_weights=args.init_lora_weights, lora_target_modules=args.lora_target_modules ) launch_training_task(model, args) diff --git a/examples/train/stable_diffusion_3/train_sd3_lora.py b/examples/train/stable_diffusion_3/train_sd3_lora.py index 71d6e56..b4ce017 100644 --- a/examples/train/stable_diffusion_3/train_sd3_lora.py +++ b/examples/train/stable_diffusion_3/train_sd3_lora.py @@ -9,7 +9,7 @@ class LightningModel(LightningModelForT2ILoRA): self, torch_dtype=torch.float16, pretrained_weights=[], learning_rate=1e-4, use_gradient_checkpointing=True, - lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out" + lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian", ): super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing) # Load models @@ -19,7 +19,7 @@ class LightningModel(LightningModelForT2ILoRA): self.pipe.scheduler.set_timesteps(1000) self.freeze_parameters() - self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules) + self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules, init_lora_weights=init_lora_weights) def parse_args(): @@ -51,6 +51,7 @@ if __name__ == '__main__': use_gradient_checkpointing=args.use_gradient_checkpointing, lora_rank=args.lora_rank, lora_alpha=args.lora_alpha, + init_lora_weights=args.init_lora_weights, lora_target_modules=args.lora_target_modules ) launch_training_task(model, args) diff --git a/examples/train/stable_diffusion_xl/train_sdxl_lora.py b/examples/train/stable_diffusion_xl/train_sdxl_lora.py index 40abef4..69ca71d 100644 --- a/examples/train/stable_diffusion_xl/train_sdxl_lora.py +++ b/examples/train/stable_diffusion_xl/train_sdxl_lora.py @@ -9,7 +9,7 @@ class LightningModel(LightningModelForT2ILoRA): self, torch_dtype=torch.float16, pretrained_weights=[], learning_rate=1e-4, use_gradient_checkpointing=True, - lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out" + lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian", ): super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing) # Load models @@ -19,7 +19,7 @@ class LightningModel(LightningModelForT2ILoRA): self.pipe.scheduler.set_timesteps(1000) self.freeze_parameters() - self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules) + self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules, init_lora_weights=init_lora_weights) def parse_args(): @@ -51,6 +51,7 @@ if __name__ == '__main__': use_gradient_checkpointing=args.use_gradient_checkpointing, lora_rank=args.lora_rank, lora_alpha=args.lora_alpha, + init_lora_weights=args.init_lora_weights, lora_target_modules=args.lora_target_modules ) launch_training_task(model, args)