qwen_image layercontrol v2

This commit is contained in:
mi804
2026-02-24 15:19:16 +08:00
parent 288bbc7128
commit ee73a29885
10 changed files with 171 additions and 3 deletions

View File

@@ -0,0 +1,43 @@
from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
from modelscope import dataset_snapshot_download
from PIL import Image
import torch
pipe = QwenImagePipeline.from_pretrained(
torch_dtype=torch.bfloat16,
device="cuda",
model_configs=[
ModelConfig(model_id="Qwen/Qwen-Image-Layered", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
ModelConfig(model_id="Qwen/Qwen-Image-Layered", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
],
tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
)
pipe.load_lora(pipe.dit, ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Layered-Control-V2", origin_file_pattern="model.safetensors"))
dataset_snapshot_download(
dataset_id="DiffSynth-Studio/example_image_dataset",
local_dir="./data/example_image_dataset",
allow_file_pattern="layer_v2/*.png"
)
prompt = "Text 'APRIL'"
input_image = Image.open("data/example_image_dataset/layer_v2/image_1.png").convert("RGBA").resize((1024, 1024))
image = pipe(
prompt, seed=0,
height=1024, width=1024,
layer_input_image=input_image, layer_num=0,
num_inference_steps=10, cfg_scale=4,
)
image[0].save("image_prompt.png")
mask_image = Image.open("data/example_image_dataset/layer_v2/mask_2.png").convert("RGBA").resize((1024, 1024))
input_image = Image.open("data/example_image_dataset/layer_v2/image_2.png").convert("RGBA").resize((1024, 1024))
image = pipe(
prompt, seed=0,
height=1024, width=1024,
layer_input_image=input_image, layer_num=0,
context_image=mask_image,
num_inference_steps=10, cfg_scale=1.0,
)
image[0].save("image_mask.png")

View File

@@ -0,0 +1,54 @@
from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
from modelscope import dataset_snapshot_download
from PIL import Image
import torch
vram_config = {
"offload_dtype": "disk",
"offload_device": "disk",
"onload_dtype": torch.float8_e4m3fn,
"onload_device": "cpu",
"preparing_dtype": torch.float8_e4m3fn,
"preparing_device": "cuda",
"computation_dtype": torch.bfloat16,
"computation_device": "cuda",
}
pipe = QwenImagePipeline.from_pretrained(
torch_dtype=torch.bfloat16,
device="cuda",
model_configs=[
ModelConfig(model_id="Qwen/Qwen-Image-Layered", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", **vram_config),
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", **vram_config),
ModelConfig(model_id="Qwen/Qwen-Image-Layered", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", **vram_config),
],
tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
)
pipe.load_lora(pipe.dit, ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Layered-Control-V2", origin_file_pattern="model.safetensors", **vram_config))
dataset_snapshot_download(
dataset_id="DiffSynth-Studio/example_image_dataset",
local_dir="./data/example_image_dataset",
allow_file_pattern="layer_v2/*.png"
)
prompt = "Text 'APRIL'"
input_image = Image.open("data/example_image_dataset/layer_v2/image_1.png").convert("RGBA").resize((1024, 1024))
image = pipe(
prompt, seed=0,
height=1024, width=1024,
layer_input_image=input_image, layer_num=0,
num_inference_steps=10, cfg_scale=4,
)
image[0].save("image_prompt.png")
mask_image = Image.open("data/example_image_dataset/layer_v2/mask_2.png").convert("RGBA").resize((1024, 1024))
input_image = Image.open("data/example_image_dataset/layer_v2/image_2.png").convert("RGBA").resize((1024, 1024))
image = pipe(
prompt, seed=0,
height=1024, width=1024,
layer_input_image=input_image, layer_num=0,
context_image=mask_image,
num_inference_steps=10, cfg_scale=1.0,
)
image[0].save("image_mask.png")

View File

@@ -0,0 +1,20 @@
# Example Dataset: https://modelscope.cn/datasets/DiffSynth-Studio/example_image_dataset/tree/master/layer
accelerate launch examples/qwen_image/model_training/train.py \
--dataset_base_path data/example_image_dataset/layer_v2 \
--dataset_metadata_path data/example_image_dataset/layer_v2/metadata_layered_control_v2.json \
--data_file_keys "image,layer_input_image,context_image" \
--max_pixels 1048576 \
--dataset_repeat 50 \
--model_id_with_origin_paths "Qwen/Qwen-Image-Layered:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image-Layered:vae/diffusion_pytorch_model.safetensors" \
--learning_rate 1e-4 \
--num_epochs 5 \
--remove_prefix_in_ckpt "pipe.dit." \
--output_path "./models/train/Qwen-Image-Layered-Control-V2_lora" \
--lora_base_model "dit" \
--lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \
--lora_rank 64 \
--extra_inputs "layer_num,layer_input_image,context_image" \
--use_gradient_checkpointing \
--dataset_num_workers 8 \
--find_unused_parameters

View File

@@ -131,6 +131,10 @@ if __name__ == "__main__":
"image": RouteByType(operator_map=[
(str, ToAbsolutePath(args.dataset_base_path) >> LoadImage() >> ImageCropAndResize(args.height, args.width, args.max_pixels, 16, 16)),
(list, SequencialProcess(ToAbsolutePath(args.dataset_base_path) >> LoadImage(convert_RGB=False, convert_RGBA=True) >> ImageCropAndResize(args.height, args.width, args.max_pixels, 16, 16))),
]),
"context_image": RouteByType(operator_map=[
(str, ToAbsolutePath(args.dataset_base_path) >> LoadImage() >> ImageCropAndResize(args.height, args.width, args.max_pixels, 16, 16)),
(None, lambda x: None),
])
}
)
@@ -152,7 +156,7 @@ if __name__ == "__main__":
fp8_models=args.fp8_models,
offload_models=args.offload_models,
task=args.task,
device="cpu" if args.initialize_model_on_cpu else accelerator.device,
device=accelerator.device,
zero_cond_t=args.zero_cond_t,
)
model_logger = ModelLogger(

View File

@@ -0,0 +1,37 @@
from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
from modelscope import dataset_snapshot_download
from PIL import Image
import torch
pipe = QwenImagePipeline.from_pretrained(
torch_dtype=torch.bfloat16,
device="cuda",
model_configs=[
ModelConfig(model_id="Qwen/Qwen-Image-Layered", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
ModelConfig(model_id="Qwen/Qwen-Image-Layered", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
],
tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
)
pipe.load_lora(pipe.dit, "models/train/Qwen-Image-Layered-Control-V2_lora/epoch-4.safetensors")
prompt = "Text 'APRIL'"
input_image = Image.open("data/example_image_dataset/layer_v2/image_1.png").convert("RGBA").resize((1024, 1024))
image = pipe(
prompt, seed=0,
height=1024, width=1024,
layer_input_image=input_image, layer_num=0,
num_inference_steps=10, cfg_scale=4,
)
image[0].save("image_prompt.png")
mask_image = Image.open("data/example_image_dataset/layer_v2/mask_2.png").convert("RGBA").resize((1024, 1024))
input_image = Image.open("data/example_image_dataset/layer_v2/image_2.png").convert("RGBA").resize((1024, 1024))
image = pipe(
prompt, seed=0,
height=1024, width=1024,
layer_input_image=input_image, layer_num=0,
context_image=mask_image,
num_inference_steps=10, cfg_scale=1.0,
)
image[0].save("image_mask.png")