From ba421a9ab9ba407ebf95f3f45860bf952a3c08e2 Mon Sep 17 00:00:00 2001 From: Artiprocher Date: Fri, 4 Jul 2025 14:26:33 +0800 Subject: [PATCH] value control --- .../model_training/train_value_controller.py | 120 ++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 examples/flux/model_training/train_value_controller.py diff --git a/examples/flux/model_training/train_value_controller.py b/examples/flux/model_training/train_value_controller.py new file mode 100644 index 0000000..da1ff97 --- /dev/null +++ b/examples/flux/model_training/train_value_controller.py @@ -0,0 +1,120 @@ +import torch, os, json +from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig +from diffsynth.trainers.utils import DiffusionTrainingModule, ImageDataset, ModelLogger, launch_training_task, flux_parser +from diffsynth.models.lora import FluxLoRAConverter +from diffsynth.models.flux_value_control import SingleValueEncoder, MultiValueEncoder +os.environ["TOKENIZERS_PARALLELISM"] = "false" + + + +class FluxTrainingModule(DiffusionTrainingModule): + def __init__( + self, + model_paths=None, model_id_with_origin_paths=None, + trainable_models=None, + lora_base_model=None, lora_target_modules="a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp", lora_rank=32, + use_gradient_checkpointing=True, + use_gradient_checkpointing_offload=False, + extra_inputs=None, + ): + super().__init__() + # Load models + model_configs = [] + if model_paths is not None: + model_paths = json.loads(model_paths) + model_configs += [ModelConfig(path=path) for path in model_paths] + if model_id_with_origin_paths is not None: + model_id_with_origin_paths = model_id_with_origin_paths.split(",") + model_configs += [ModelConfig(model_id=i.split(":")[0], origin_file_pattern=i.split(":")[1]) for i in model_id_with_origin_paths] + self.pipe = FluxImagePipeline.from_pretrained(torch_dtype=torch.bfloat16, device="cpu", model_configs=model_configs) + + self.pipe.value_controller = MultiValueEncoder(encoders=[SingleValueEncoder(), SingleValueEncoder(), SingleValueEncoder(), SingleValueEncoder()]).to(dtype=torch.bfloat16) + + # Reset training scheduler + self.pipe.scheduler.set_timesteps(1000, training=True) + + # Freeze untrainable models + self.pipe.freeze_except([] if trainable_models is None else trainable_models.split(",")) + + # Add LoRA to the base models + if lora_base_model is not None: + model = self.add_lora_to_model( + getattr(self.pipe, lora_base_model), + target_modules=lora_target_modules.split(","), + lora_rank=lora_rank + ) + setattr(self.pipe, lora_base_model, model) + + # Store other configs + self.use_gradient_checkpointing = use_gradient_checkpointing + self.use_gradient_checkpointing_offload = use_gradient_checkpointing_offload + self.extra_inputs = extra_inputs.split(",") if extra_inputs is not None else [] + + + def forward_preprocess(self, data): + # CFG-sensitive parameters + inputs_posi = {"prompt": data["prompt"]} + inputs_nega = {} + + # CFG-unsensitive parameters + inputs_shared = { + # Assume you are using this pipeline for inference, + # please fill in the input parameters. + "input_image": data["image"], + "height": data["image"].size[1], + "width": data["image"].size[0], + # Please do not modify the following parameters + # unless you clearly know what this will cause. + "cfg_scale": 1, + "embedded_guidance": 1, + "t5_sequence_length": 512, + "tiled": False, + "rand_device": self.pipe.device, + "use_gradient_checkpointing": self.use_gradient_checkpointing, + "use_gradient_checkpointing_offload": self.use_gradient_checkpointing_offload, + } + + # Extra inputs + for extra_input in self.extra_inputs: + inputs_shared[extra_input] = data[extra_input] + + # Pipeline units will automatically process the input parameters. + for unit in self.pipe.units: + inputs_shared, inputs_posi, inputs_nega = self.pipe.unit_runner(unit, self.pipe, inputs_shared, inputs_posi, inputs_nega) + return {**inputs_shared, **inputs_posi} + + + def forward(self, data, inputs=None): + if inputs is None: inputs = self.forward_preprocess(data) + models = {name: getattr(self.pipe, name) for name in self.pipe.in_iteration_models} + loss = self.pipe.training_loss(**models, **inputs) + return loss + + + +if __name__ == "__main__": + parser = flux_parser() + args = parser.parse_args() + dataset = ImageDataset(args=args) + model = FluxTrainingModule( + model_paths=args.model_paths, + model_id_with_origin_paths=args.model_id_with_origin_paths, + trainable_models=args.trainable_models, + lora_base_model=args.lora_base_model, + lora_target_modules=args.lora_target_modules, + lora_rank=args.lora_rank, + use_gradient_checkpointing_offload=args.use_gradient_checkpointing_offload, + extra_inputs=args.extra_inputs, + ) + model_logger = ModelLogger( + args.output_path, + remove_prefix_in_ckpt=args.remove_prefix_in_ckpt, + state_dict_converter=FluxLoRAConverter.align_to_opensource_format if args.align_to_opensource_format else lambda x:x, + ) + optimizer = torch.optim.AdamW(model.trainable_modules(), lr=args.learning_rate) + scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer) + launch_training_task( + dataset, model, model_logger, optimizer, scheduler, + num_epochs=args.num_epochs, + gradient_accumulation_steps=args.gradient_accumulation_steps, + )