mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-19 06:48:12 +00:00
support qwen-image-edit-2511
This commit is contained in:
44
examples/qwen_image/model_inference/Qwen-Image-Edit-2511.py
Normal file
44
examples/qwen_image/model_inference/Qwen-Image-Edit-2511.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
|
||||
from modelscope import dataset_snapshot_download
|
||||
from PIL import Image
|
||||
import torch
|
||||
|
||||
pipe = QwenImagePipeline.from_pretrained(
|
||||
torch_dtype=torch.bfloat16,
|
||||
device="cuda",
|
||||
model_configs=[
|
||||
ModelConfig(model_id="Qwen/Qwen-Image-Edit-2511", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
|
||||
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
|
||||
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
|
||||
],
|
||||
processor_config=ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/"),
|
||||
)
|
||||
|
||||
dataset_snapshot_download(
|
||||
"DiffSynth-Studio/example_image_dataset",
|
||||
allow_file_pattern="qwen_image_edit/*",
|
||||
local_dir="data/example_image_dataset",
|
||||
)
|
||||
|
||||
prompt = "生成这两个人的合影"
|
||||
edit_image = [
|
||||
Image.open("data/example_image_dataset/qwen_image_edit/image1.jpg"),
|
||||
Image.open("data/example_image_dataset/qwen_image_edit/image2.jpg"),
|
||||
]
|
||||
image = pipe(
|
||||
prompt,
|
||||
edit_image=edit_image,
|
||||
seed=1,
|
||||
num_inference_steps=40,
|
||||
height=1152,
|
||||
width=896,
|
||||
edit_image_auto_resize=True,
|
||||
zero_cond_t=True, # This is a special parameter introduced by Qwen-Image-Edit-2511
|
||||
)
|
||||
image.save("image.jpg")
|
||||
|
||||
# Qwen-Image-Edit-2511 is a multi-image editing model.
|
||||
# Please use a list to input `edit_image`, even if the input contains only one image.
|
||||
# edit_image = [Image.open("image.jpg")]
|
||||
# Please do not input the image directly.
|
||||
# edit_image = Image.open("image.jpg")
|
||||
@@ -0,0 +1,54 @@
|
||||
from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
|
||||
from modelscope import dataset_snapshot_download
|
||||
from PIL import Image
|
||||
import torch
|
||||
|
||||
vram_config = {
|
||||
"offload_dtype": "disk",
|
||||
"offload_device": "disk",
|
||||
"onload_dtype": torch.float8_e4m3fn,
|
||||
"onload_device": "cpu",
|
||||
"preparing_dtype": torch.float8_e4m3fn,
|
||||
"preparing_device": "cuda",
|
||||
"computation_dtype": torch.bfloat16,
|
||||
"computation_device": "cuda",
|
||||
}
|
||||
pipe = QwenImagePipeline.from_pretrained(
|
||||
torch_dtype=torch.bfloat16,
|
||||
device="cuda",
|
||||
model_configs=[
|
||||
ModelConfig(model_id="Qwen/Qwen-Image-Edit-2511", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", **vram_config),
|
||||
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", **vram_config),
|
||||
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", **vram_config),
|
||||
],
|
||||
processor_config=ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/"),
|
||||
)
|
||||
|
||||
dataset_snapshot_download(
|
||||
"DiffSynth-Studio/example_image_dataset",
|
||||
allow_file_pattern="qwen_image_edit/*",
|
||||
local_dir="data/example_image_dataset",
|
||||
)
|
||||
|
||||
prompt = "生成这两个人的合影"
|
||||
edit_image = [
|
||||
Image.open("data/example_image_dataset/qwen_image_edit/image1.jpg"),
|
||||
Image.open("data/example_image_dataset/qwen_image_edit/image2.jpg"),
|
||||
]
|
||||
image = pipe(
|
||||
prompt,
|
||||
edit_image=edit_image,
|
||||
seed=1,
|
||||
num_inference_steps=40,
|
||||
height=1152,
|
||||
width=896,
|
||||
edit_image_auto_resize=True,
|
||||
zero_cond_t=True, # This is a special parameter introduced by Qwen-Image-Edit-2511
|
||||
)
|
||||
image.save("image.jpg")
|
||||
|
||||
# Qwen-Image-Edit-2511 is a multi-image editing model.
|
||||
# Please use a list to input `edit_image`, even if the input contains only one image.
|
||||
# edit_image = [Image.open("image.jpg")]
|
||||
# Please do not input the image directly.
|
||||
# edit_image = Image.open("image.jpg")
|
||||
@@ -0,0 +1,16 @@
|
||||
accelerate launch --config_file examples/qwen_image/model_training/full/accelerate_config_zero2offload.yaml examples/qwen_image/model_training/train.py \
|
||||
--dataset_base_path data/example_image_dataset \
|
||||
--dataset_metadata_path data/example_image_dataset/metadata_qwen_imgae_edit_multi.json \
|
||||
--data_file_keys "image,edit_image" \
|
||||
--extra_inputs "edit_image" \
|
||||
--max_pixels 1048576 \
|
||||
--dataset_repeat 50 \
|
||||
--model_id_with_origin_paths "Qwen/Qwen-Image-Edit-2511:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \
|
||||
--learning_rate 1e-5 \
|
||||
--num_epochs 2 \
|
||||
--remove_prefix_in_ckpt "pipe.dit." \
|
||||
--output_path "./models/train/Qwen-Image-Edit-2511_full" \
|
||||
--trainable_models "dit" \
|
||||
--use_gradient_checkpointing \
|
||||
--find_unused_parameters \
|
||||
--zero_cond_t # This is a special parameter introduced by Qwen-Image-Edit-2511. Please enable it for this model.
|
||||
@@ -0,0 +1,19 @@
|
||||
accelerate launch examples/qwen_image/model_training/train.py \
|
||||
--dataset_base_path data/example_image_dataset \
|
||||
--dataset_metadata_path data/example_image_dataset/metadata_qwen_imgae_edit_multi.json \
|
||||
--data_file_keys "image,edit_image" \
|
||||
--extra_inputs "edit_image" \
|
||||
--max_pixels 1048576 \
|
||||
--dataset_repeat 50 \
|
||||
--model_id_with_origin_paths "Qwen/Qwen-Image-Edit-2511:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \
|
||||
--learning_rate 1e-4 \
|
||||
--num_epochs 5 \
|
||||
--remove_prefix_in_ckpt "pipe.dit." \
|
||||
--output_path "./models/train/Qwen-Image-Edit-2511_lora" \
|
||||
--lora_base_model "dit" \
|
||||
--lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \
|
||||
--lora_rank 32 \
|
||||
--use_gradient_checkpointing \
|
||||
--dataset_num_workers 8 \
|
||||
--find_unused_parameters \
|
||||
--zero_cond_t # This is a special parameter introduced by Qwen-Image-Edit-2511. Please enable it for this model.
|
||||
@@ -20,6 +20,7 @@ class QwenImageTrainingModule(DiffusionTrainingModule):
|
||||
offload_models=None,
|
||||
device="cpu",
|
||||
task="sft",
|
||||
zero_cond_t=False,
|
||||
):
|
||||
super().__init__()
|
||||
# Load models
|
||||
@@ -43,6 +44,7 @@ class QwenImageTrainingModule(DiffusionTrainingModule):
|
||||
self.extra_inputs = extra_inputs.split(",") if extra_inputs is not None else []
|
||||
self.fp8_models = fp8_models
|
||||
self.task = task
|
||||
self.zero_cond_t = zero_cond_t
|
||||
self.task_to_loss = {
|
||||
"sft:data_process": lambda pipe, *args: args,
|
||||
"direct_distill:data_process": lambda pipe, *args: args,
|
||||
@@ -68,6 +70,7 @@ class QwenImageTrainingModule(DiffusionTrainingModule):
|
||||
"use_gradient_checkpointing": self.use_gradient_checkpointing,
|
||||
"use_gradient_checkpointing_offload": self.use_gradient_checkpointing_offload,
|
||||
"edit_image_auto_resize": True,
|
||||
"zero_cond_t": self.zero_cond_t,
|
||||
}
|
||||
inputs_shared = self.parse_extra_inputs(data, self.extra_inputs, inputs_shared)
|
||||
return inputs_shared, inputs_posi, inputs_nega
|
||||
@@ -87,6 +90,7 @@ def qwen_image_parser():
|
||||
parser = add_image_size_config(parser)
|
||||
parser.add_argument("--tokenizer_path", type=str, default=None, help="Path to tokenizer.")
|
||||
parser.add_argument("--processor_path", type=str, default=None, help="Path to the processor. If provided, the processor will be used for image editing.")
|
||||
parser.add_argument("--zero_cond_t", default=False, action="store_true", help="A special parameter introduced by Qwen-Image-Edit-2511. Please enable it for this model.")
|
||||
return parser
|
||||
|
||||
|
||||
@@ -130,6 +134,7 @@ if __name__ == "__main__":
|
||||
offload_models=args.offload_models,
|
||||
task=args.task,
|
||||
device=accelerator.device,
|
||||
zero_cond_t=args.zero_cond_t,
|
||||
)
|
||||
model_logger = ModelLogger(
|
||||
args.output_path,
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
import torch
|
||||
from PIL import Image
|
||||
from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
|
||||
from diffsynth import load_state_dict
|
||||
|
||||
pipe = QwenImagePipeline.from_pretrained(
|
||||
torch_dtype=torch.bfloat16,
|
||||
device="cuda",
|
||||
model_configs=[
|
||||
ModelConfig(model_id="Qwen/Qwen-Image-Edit-2511", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
|
||||
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
|
||||
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
|
||||
],
|
||||
tokenizer_config=None,
|
||||
processor_config=ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/"),
|
||||
)
|
||||
state_dict = load_state_dict("models/train/Qwen-Image-Edit-2511_full/epoch-1.safetensors")
|
||||
pipe.dit.load_state_dict(state_dict)
|
||||
|
||||
prompt = "Change the color of the dress in Figure 1 to the color shown in Figure 2."
|
||||
images = [
|
||||
Image.open("data/example_image_dataset/edit/image1.jpg").resize((1024, 1024)),
|
||||
Image.open("data/example_image_dataset/edit/image_color.jpg").resize((1024, 1024)),
|
||||
]
|
||||
image = pipe(prompt, edit_image=images, seed=123, num_inference_steps=40, height=1024, width=1024, zero_cond_t=True)
|
||||
image.save("image.jpg")
|
||||
@@ -0,0 +1,24 @@
|
||||
import torch
|
||||
from PIL import Image
|
||||
from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
|
||||
|
||||
pipe = QwenImagePipeline.from_pretrained(
|
||||
torch_dtype=torch.bfloat16,
|
||||
device="cuda",
|
||||
model_configs=[
|
||||
ModelConfig(model_id="Qwen/Qwen-Image-Edit-2511", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
|
||||
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
|
||||
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
|
||||
],
|
||||
tokenizer_config=None,
|
||||
processor_config=ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/"),
|
||||
)
|
||||
pipe.load_lora(pipe.dit, "models/train/Qwen-Image-Edit-2511_lora/epoch-4.safetensors")
|
||||
|
||||
prompt = "Change the color of the dress in Figure 1 to the color shown in Figure 2."
|
||||
images = [
|
||||
Image.open("data/example_image_dataset/edit/image1.jpg").resize((1024, 1024)),
|
||||
Image.open("data/example_image_dataset/edit/image_color.jpg").resize((1024, 1024)),
|
||||
]
|
||||
image = pipe(prompt, edit_image=images, seed=123, num_inference_steps=40, height=1024, width=1024, zero_cond_t=True)
|
||||
image.save("image.jpg")
|
||||
Reference in New Issue
Block a user