Flux lora update (#237)

* update flux lora

---------

Co-authored-by: tc2000731 <tc2000731@163.com>
This commit is contained in:
Zhongjie Duan
2024-10-11 18:41:24 +08:00
committed by GitHub
parent 75ab786afc
commit 22e4ae99e8
11 changed files with 63 additions and 24 deletions

View File

@@ -10,7 +10,7 @@ class ContinuousODEScheduler():
self.set_timesteps(num_inference_steps)
def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0):
def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0, **kwargs):
ramp = torch.linspace(1-denoising_strength, 1, num_inference_steps)
min_inv_rho = torch.pow(torch.tensor((self.sigma_min,)), (1 / self.rho))
max_inv_rho = torch.pow(torch.tensor((self.sigma_max,)), (1 / self.rho))

View File

@@ -38,7 +38,7 @@ class EnhancedDDIMScheduler():
return alphas_bar
def set_timesteps(self, num_inference_steps, denoising_strength=1.0):
def set_timesteps(self, num_inference_steps, denoising_strength=1.0, **kwargs):
# The timesteps are aligned to 999...0, which is different from other implementations,
# but I think this implementation is more reasonable in theory.
max_timestep = max(round(self.num_train_timesteps * denoising_strength) - 1, 0)
@@ -99,3 +99,7 @@ class EnhancedDDIMScheduler():
sqrt_one_minus_alpha_prod = math.sqrt(1 - self.alphas_cumprod[int(timestep.flatten().tolist()[0])])
target = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return target
def training_weight(self, timestep):
return 1.0

View File

@@ -12,11 +12,21 @@ class FlowMatchScheduler():
self.set_timesteps(num_inference_steps)
def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0):
def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0, training=False):
sigma_start = self.sigma_min + (self.sigma_max - self.sigma_min) * denoising_strength
self.sigmas = torch.linspace(sigma_start, self.sigma_min, num_inference_steps)
self.sigmas = self.shift * self.sigmas / (1 + (self.shift - 1) * self.sigmas)
self.timesteps = self.sigmas * self.num_train_timesteps
if training:
self.timesteps = torch.linspace(1000, 0, num_inference_steps)
# prepare timestep weights
x = torch.arange(num_inference_steps, dtype=torch.float32)
y = torch.exp(-2 * ((x - num_inference_steps / 2) / num_inference_steps) ** 2)
y_shifted = y - y.min()
bsmntw_weighing = y_shifted * (num_inference_steps / y_shifted.sum())
self.linear_timesteps_weights = bsmntw_weighing
else:
self.timesteps = self.sigmas * self.num_train_timesteps
def step(self, model_output, timestep, sample, to_final=False):
@@ -49,3 +59,9 @@ class FlowMatchScheduler():
def training_target(self, sample, noise, timestep):
target = noise - sample
return target
def training_weight(self, timestep):
timestep_id = torch.argmin((self.timesteps - timestep.to(self.timesteps.device)).abs())
weights = self.linear_timesteps_weights[timestep_id]
return weights

View File

@@ -32,12 +32,15 @@ class LightningModelForT2ILoRA(pl.LightningModule):
self.pipe.denoising_model().train()
def add_lora_to_model(self, model, lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out"):
def add_lora_to_model(self, model, lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian"):
# Add LoRA to UNet
if init_lora_weights == "kaiming":
init_lora_weights = True
lora_config = LoraConfig(
r=lora_rank,
lora_alpha=lora_alpha,
init_lora_weights="gaussian",
init_lora_weights=init_lora_weights,
target_modules=lora_target_modules.split(","),
)
model = inject_adapter_in_model(lora_config, model)
@@ -67,7 +70,8 @@ class LightningModelForT2ILoRA(pl.LightningModule):
noisy_latents, timestep=timestep, **prompt_emb, **extra_input,
use_gradient_checkpointing=self.use_gradient_checkpointing
)
loss = torch.nn.functional.mse_loss(noise_pred, training_target)
loss = torch.nn.functional.mse_loss(noise_pred.float(), training_target.float())
loss = loss * self.pipe.scheduler.training_weight(timestep)
# Record log
self.log("train_loss", loss, prog_bar=True)
@@ -179,6 +183,13 @@ def add_general_parsers(parser):
default=4.0,
help="The weight of the LoRA update matrices.",
)
parser.add_argument(
"--init_lora_weights",
type=str,
default="kaiming",
choices=["gaussian", "kaiming"],
help="The initializing method of LoRA weight.",
)
parser.add_argument(
"--use_gradient_checkpointing",
default=False,

View File

@@ -123,7 +123,7 @@ models/FLUX/
└── model.safetensors.index.json
```
Launch the training task using the following command:
Launch the training task using the following command (39G VRAM required):
```
CUDA_VISIBLE_DEVICES="0" python examples/train/flux/train_flux_lora.py \
@@ -134,18 +134,20 @@ CUDA_VISIBLE_DEVICES="0" python examples/train/flux/train_flux_lora.py \
--dataset_path data/dog \
--output_path ./models \
--max_epochs 1 \
--steps_per_epoch 500 \
--steps_per_epoch 100 \
--height 1024 \
--width 1024 \
--center_crop \
--precision "bf16" \
--learning_rate 1e-4 \
--lora_rank 4 \
--lora_alpha 4 \
--lora_rank 16 \
--lora_alpha 16 \
--use_gradient_checkpointing \
--align_to_opensource_format
```
By adding parameter `--quantize "float8_e4m3fn"`, you can save approximate 10G VRAM.
**`--align_to_opensource_format` means that this script will export the LoRA weights in the opensource format. This format can be loaded in both DiffSynth-Studio and other codebases.**
For more information about the parameters, please use `python examples/train/flux/train_flux_lora.py -h` to see the details.

View File

@@ -10,7 +10,7 @@ class LightningModel(LightningModelForT2ILoRA):
self,
torch_dtype=torch.float16, pretrained_weights=[],
learning_rate=1e-4, use_gradient_checkpointing=True,
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out",
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="kaiming",
state_dict_converter=None, quantize = None
):
super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing, state_dict_converter=state_dict_converter)
@@ -27,10 +27,10 @@ class LightningModel(LightningModelForT2ILoRA):
if quantize is not None:
self.pipe.dit.quantize()
self.pipe.scheduler.set_timesteps(1000)
self.pipe.scheduler.set_timesteps(1000, training=True)
self.freeze_parameters()
self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules)
self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules, init_lora_weights=init_lora_weights)
def parse_args():
@@ -97,6 +97,7 @@ if __name__ == '__main__':
lora_rank=args.lora_rank,
lora_alpha=args.lora_alpha,
lora_target_modules=args.lora_target_modules,
init_lora_weights=args.init_lora_weights,
state_dict_converter=FluxLoRAConverter.align_to_opensource_format if args.align_to_opensource_format else None,
quantize={"float8_e4m3fn": torch.float8_e4m3fn}.get(args.quantize, None),
)

View File

@@ -9,7 +9,7 @@ class LightningModel(LightningModelForT2ILoRA):
self,
torch_dtype=torch.float16, pretrained_weights=[],
learning_rate=1e-4, use_gradient_checkpointing=True,
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out"
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian",
):
super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing)
# Load models
@@ -19,7 +19,7 @@ class LightningModel(LightningModelForT2ILoRA):
self.pipe.scheduler.set_timesteps(1000)
self.freeze_parameters()
self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules)
self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules, init_lora_weights=init_lora_weights)
def parse_args():
@@ -56,6 +56,7 @@ if __name__ == '__main__':
use_gradient_checkpointing=args.use_gradient_checkpointing,
lora_rank=args.lora_rank,
lora_alpha=args.lora_alpha,
init_lora_weights=args.init_lora_weights,
lora_target_modules=args.lora_target_modules
)
launch_training_task(model, args)

View File

@@ -9,7 +9,7 @@ class LightningModel(LightningModelForT2ILoRA):
self,
torch_dtype=torch.float16, pretrained_weights=[],
learning_rate=1e-4, use_gradient_checkpointing=True,
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out"
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian",
):
super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing)
# Load models
@@ -22,7 +22,7 @@ class LightningModel(LightningModelForT2ILoRA):
self.pipe.vae_encoder.to(torch_dtype)
self.freeze_parameters()
self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules)
self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules, init_lora_weights=init_lora_weights)
def parse_args():
@@ -72,6 +72,7 @@ if __name__ == '__main__':
use_gradient_checkpointing=args.use_gradient_checkpointing,
lora_rank=args.lora_rank,
lora_alpha=args.lora_alpha,
init_lora_weights=args.init_lora_weights,
lora_target_modules=args.lora_target_modules
)
launch_training_task(model, args)

View File

@@ -9,7 +9,7 @@ class LightningModel(LightningModelForT2ILoRA):
self,
torch_dtype=torch.float16, pretrained_weights=[],
learning_rate=1e-4, use_gradient_checkpointing=True,
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out"
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian",
):
super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing)
# Load models
@@ -19,7 +19,7 @@ class LightningModel(LightningModelForT2ILoRA):
self.pipe.scheduler.set_timesteps(1000)
self.freeze_parameters()
self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules)
self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules, init_lora_weights=init_lora_weights)
def parse_args():
@@ -51,6 +51,7 @@ if __name__ == '__main__':
use_gradient_checkpointing=args.use_gradient_checkpointing,
lora_rank=args.lora_rank,
lora_alpha=args.lora_alpha,
init_lora_weights=args.init_lora_weights,
lora_target_modules=args.lora_target_modules
)
launch_training_task(model, args)

View File

@@ -9,7 +9,7 @@ class LightningModel(LightningModelForT2ILoRA):
self,
torch_dtype=torch.float16, pretrained_weights=[],
learning_rate=1e-4, use_gradient_checkpointing=True,
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out"
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian",
):
super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing)
# Load models
@@ -19,7 +19,7 @@ class LightningModel(LightningModelForT2ILoRA):
self.pipe.scheduler.set_timesteps(1000)
self.freeze_parameters()
self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules)
self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules, init_lora_weights=init_lora_weights)
def parse_args():
@@ -51,6 +51,7 @@ if __name__ == '__main__':
use_gradient_checkpointing=args.use_gradient_checkpointing,
lora_rank=args.lora_rank,
lora_alpha=args.lora_alpha,
init_lora_weights=args.init_lora_weights,
lora_target_modules=args.lora_target_modules
)
launch_training_task(model, args)

View File

@@ -9,7 +9,7 @@ class LightningModel(LightningModelForT2ILoRA):
self,
torch_dtype=torch.float16, pretrained_weights=[],
learning_rate=1e-4, use_gradient_checkpointing=True,
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out"
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian",
):
super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing)
# Load models
@@ -19,7 +19,7 @@ class LightningModel(LightningModelForT2ILoRA):
self.pipe.scheduler.set_timesteps(1000)
self.freeze_parameters()
self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules)
self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules, init_lora_weights=init_lora_weights)
def parse_args():
@@ -51,6 +51,7 @@ if __name__ == '__main__':
use_gradient_checkpointing=args.use_gradient_checkpointing,
lora_rank=args.lora_rank,
lora_alpha=args.lora_alpha,
init_lora_weights=args.init_lora_weights,
lora_target_modules=args.lora_target_modules
)
launch_training_task(model, args)