diff --git a/examples/qwen_image/model_training/full/Qwen-Image-Edit-2509.sh b/examples/qwen_image/model_training/full/Qwen-Image-Edit-2509.sh index a41f2da..7fda7b7 100644 --- a/examples/qwen_image/model_training/full/Qwen-Image-Edit-2509.sh +++ b/examples/qwen_image/model_training/full/Qwen-Image-Edit-2509.sh @@ -1,6 +1,6 @@ accelerate launch --config_file examples/qwen_image/model_training/full/accelerate_config_zero2offload.yaml examples/qwen_image/model_training/train.py \ --dataset_base_path data/example_image_dataset \ - --dataset_metadata_path data/example_image_dataset/metadata_edit.csv \ + --dataset_metadata_path data/example_image_dataset/metadata_qwen_imgae_edit_multi.json \ --data_file_keys "image,edit_image" \ --extra_inputs "edit_image" \ --max_pixels 1048576 \ diff --git a/examples/qwen_image/model_training/lora/Qwen-Image-Edit-2509.sh b/examples/qwen_image/model_training/lora/Qwen-Image-Edit-2509.sh index 3bbf742..7fc0cf9 100644 --- a/examples/qwen_image/model_training/lora/Qwen-Image-Edit-2509.sh +++ b/examples/qwen_image/model_training/lora/Qwen-Image-Edit-2509.sh @@ -1,6 +1,6 @@ accelerate launch examples/qwen_image/model_training/train.py \ --dataset_base_path data/example_image_dataset \ - --dataset_metadata_path data/example_image_dataset/metadata_edit.csv \ + --dataset_metadata_path data/example_image_dataset/metadata_qwen_imgae_edit_multi.json \ --data_file_keys "image,edit_image" \ --extra_inputs "edit_image" \ --max_pixels 1048576 \ diff --git a/examples/qwen_image/model_training/validate_full/Qwen-Image-Edit-2509.py b/examples/qwen_image/model_training/validate_full/Qwen-Image-Edit-2509.py index ff1fdbf..9295904 100644 --- a/examples/qwen_image/model_training/validate_full/Qwen-Image-Edit-2509.py +++ b/examples/qwen_image/model_training/validate_full/Qwen-Image-Edit-2509.py @@ -17,7 +17,10 @@ pipe = QwenImagePipeline.from_pretrained( state_dict = load_state_dict("models/train/Qwen-Image-Edit-2509_full/epoch-1.safetensors") pipe.dit.load_state_dict(state_dict) -prompt = "将裙子改为粉色" -image = Image.open("data/example_image_dataset/edit/image1.jpg").resize((1024, 1024)) -image = pipe(prompt, edit_image=image, seed=0, num_inference_steps=40, height=1024, width=1024) -image.save(f"image.jpg") +prompt = "Change the color of the dress in Figure 1 to the color shown in Figure 2." +images = [ + Image.open("data/example_image_dataset/edit/image1.jpg").resize((1024, 1024)), + Image.open("data/example_image_dataset/edit/image_color.jpg").resize((1024, 1024)), +] +image = pipe(prompt, edit_image=images, seed=123, num_inference_steps=40, height=1024, width=1024) +image.save("image.jpg") diff --git a/examples/qwen_image/model_training/validate_lora/Qwen-Image-Edit-2509.py b/examples/qwen_image/model_training/validate_lora/Qwen-Image-Edit-2509.py index 4eaf428..e701b07 100644 --- a/examples/qwen_image/model_training/validate_lora/Qwen-Image-Edit-2509.py +++ b/examples/qwen_image/model_training/validate_lora/Qwen-Image-Edit-2509.py @@ -15,7 +15,10 @@ pipe = QwenImagePipeline.from_pretrained( ) pipe.load_lora(pipe.dit, "models/train/Qwen-Image-Edit-2509_lora/epoch-4.safetensors") -prompt = "将裙子改为粉色" -image = Image.open("data/example_image_dataset/edit/image1.jpg").resize((1024, 1024)) -image = pipe(prompt, edit_image=image, seed=0, num_inference_steps=40, height=1024, width=1024) -image.save(f"image.jpg") +prompt = "Change the color of the dress in Figure 1 to the color shown in Figure 2." +images = [ + Image.open("data/example_image_dataset/edit/image1.jpg").resize((1024, 1024)), + Image.open("data/example_image_dataset/edit/image_color.jpg").resize((1024, 1024)), +] +image = pipe(prompt, edit_image=images, seed=123, num_inference_steps=40, height=1024, width=1024) +image.save("image.jpg")