mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-24 01:48:13 +00:00
support flux training
This commit is contained in:
77
examples/train/flux/train_flux_lora.py
Normal file
77
examples/train/flux/train_flux_lora.py
Normal file
@@ -0,0 +1,77 @@
|
||||
from diffsynth import ModelManager, FluxImagePipeline
|
||||
from diffsynth.trainers.text_to_image import LightningModelForT2ILoRA, add_general_parsers, launch_training_task
|
||||
import torch, os, argparse
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "True"
|
||||
|
||||
|
||||
class LightningModel(LightningModelForT2ILoRA):
|
||||
def __init__(
|
||||
self,
|
||||
torch_dtype=torch.float16, pretrained_weights=[],
|
||||
learning_rate=1e-4, use_gradient_checkpointing=True,
|
||||
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out"
|
||||
):
|
||||
super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing)
|
||||
# Load models
|
||||
model_manager = ModelManager(torch_dtype=torch_dtype, device=self.device)
|
||||
model_manager.load_models(pretrained_weights)
|
||||
self.pipe = FluxImagePipeline.from_model_manager(model_manager)
|
||||
self.pipe.scheduler.set_timesteps(1000)
|
||||
|
||||
self.freeze_parameters()
|
||||
self.add_lora_to_model(self.pipe.denoising_model(), lora_rank=lora_rank, lora_alpha=lora_alpha, lora_target_modules=lora_target_modules)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="Simple example of a training script.")
|
||||
parser.add_argument(
|
||||
"--pretrained_text_encoder_path",
|
||||
type=str,
|
||||
default=None,
|
||||
required=True,
|
||||
help="Path to pretrained text encoder model. For example, `models/FLUX/FLUX.1-dev/text_encoder/model.safetensors`.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pretrained_text_encoder_2_path",
|
||||
type=str,
|
||||
default=None,
|
||||
required=True,
|
||||
help="Path to pretrained t5 text encoder model. For example, `models/FLUX/FLUX.1-dev/text_encoder_2`.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pretrained_dit_path",
|
||||
type=str,
|
||||
default=None,
|
||||
required=True,
|
||||
help="Path to pretrained dit model. For example, `models/FLUX/FLUX.1-dev/flux1-dev.safetensors`.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pretrained_vae_path",
|
||||
type=str,
|
||||
default=None,
|
||||
required=True,
|
||||
help="Path to pretrained vae model. For example, `models/FLUX/FLUX.1-dev/ae.safetensors`.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--lora_target_modules",
|
||||
type=str,
|
||||
default="a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp",
|
||||
help="Layers with LoRA modules.",
|
||||
)
|
||||
parser = add_general_parsers(parser)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parse_args()
|
||||
model = LightningModel(
|
||||
torch_dtype={"32": torch.float32, "bf16": torch.bfloat16}.get(args.precision, torch.float16),
|
||||
pretrained_weights=[args.pretrained_text_encoder_path, args.pretrained_text_encoder_2_path, args.pretrained_dit_path, args.pretrained_vae_path],
|
||||
learning_rate=args.learning_rate,
|
||||
use_gradient_checkpointing=args.use_gradient_checkpointing,
|
||||
lora_rank=args.lora_rank,
|
||||
lora_alpha=args.lora_alpha,
|
||||
lora_target_modules=args.lora_target_modules
|
||||
)
|
||||
launch_training_task(model, args)
|
||||
Reference in New Issue
Block a user