updata wan-vace training scripts

This commit is contained in:
Artiprocher
2026-01-19 17:35:08 +08:00
parent 55e8346da3
commit a18e6233b5
3 changed files with 9 additions and 6 deletions

View File

@@ -6,7 +6,7 @@ accelerate launch examples/wanvideo/model_training/train.py \
--width 832 \
--dataset_repeat 100 \
--model_id_with_origin_paths "iic/VACE-Wan2.1-1.3B-Preview:diffusion_pytorch_model*.safetensors,iic/VACE-Wan2.1-1.3B-Preview:models_t5_umt5-xxl-enc-bf16.pth,iic/VACE-Wan2.1-1.3B-Preview:Wan2.1_VAE.pth" \
--learning_rate 1e-4 \
--learning_rate 5e-5 \
--num_epochs 5 \
--remove_prefix_in_ckpt "pipe.vace." \
--output_path "./models/train/Wan2.1-VACE-1.3B-Preview_lora" \
@@ -14,4 +14,5 @@ accelerate launch examples/wanvideo/model_training/train.py \
--lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
--lora_rank 32 \
--extra_inputs "vace_video,vace_reference_image" \
--use_gradient_checkpointing_offload
--use_gradient_checkpointing_offload
# The learning rate is kept consistent with the settings in the original paper

View File

@@ -6,7 +6,7 @@ accelerate launch examples/wanvideo/model_training/train.py \
--width 832 \
--dataset_repeat 100 \
--model_id_with_origin_paths "Wan-AI/Wan2.1-VACE-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-VACE-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-VACE-1.3B:Wan2.1_VAE.pth" \
--learning_rate 1e-4 \
--learning_rate 5e-5 \
--num_epochs 5 \
--remove_prefix_in_ckpt "pipe.vace." \
--output_path "./models/train/Wan2.1-VACE-1.3B_lora" \
@@ -14,4 +14,5 @@ accelerate launch examples/wanvideo/model_training/train.py \
--lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
--lora_rank 32 \
--extra_inputs "vace_video,vace_reference_image" \
--use_gradient_checkpointing_offload
--use_gradient_checkpointing_offload
# The learning rate is kept consistent with the settings in the original paper

View File

@@ -7,7 +7,7 @@ accelerate launch examples/wanvideo/model_training/train.py \
--num_frames 17 \
--dataset_repeat 100 \
--model_id_with_origin_paths "Wan-AI/Wan2.1-VACE-14B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-VACE-14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-VACE-14B:Wan2.1_VAE.pth" \
--learning_rate 1e-4 \
--learning_rate 5e-5 \
--num_epochs 5 \
--remove_prefix_in_ckpt "pipe.vace." \
--output_path "./models/train/Wan2.1-VACE-14B_lora" \
@@ -15,4 +15,5 @@ accelerate launch examples/wanvideo/model_training/train.py \
--lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
--lora_rank 32 \
--extra_inputs "vace_video,vace_reference_image" \
--use_gradient_checkpointing_offload
--use_gradient_checkpointing_offload
# The learning rate is kept consistent with the settings in the original paper