mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-18 22:08:13 +00:00
17 lines
1015 B
Bash
17 lines
1015 B
Bash
accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
|
|
--dataset_base_path data/example_video_dataset/wans2v \
|
|
--dataset_metadata_path data/example_video_dataset/wans2v/metadata.csv \
|
|
--data_file_keys "video,input_audio,s2v_pose_video" \
|
|
--height 448 \
|
|
--width 832 \
|
|
--num_frames 81 \
|
|
--dataset_repeat 100 \
|
|
--model_id_with_origin_paths "Wan-AI/Wan2.2-S2V-14B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-S2V-14B:wav2vec2-large-xlsr-53-english/model.safetensors,Wan-AI/Wan2.2-S2V-14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-S2V-14B:Wan2.1_VAE.pth" \
|
|
--audio_processor_config "Wan-AI/Wan2.2-S2V-14B:wav2vec2-large-xlsr-53-english/" \
|
|
--learning_rate 1e-5 \
|
|
--num_epochs 1 \
|
|
--trainable_models "dit" \
|
|
--remove_prefix_in_ckpt "pipe.dit." \
|
|
--output_path "./models/train/Wan2.2-S2V-14B_full" \
|
|
--extra_inputs "input_image,input_audio,s2v_pose_video" \
|
|
--use_gradient_checkpointing_offload |