mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-25 02:38:10 +00:00
17 lines
1.0 KiB
Bash
17 lines
1.0 KiB
Bash
modelscope download --dataset DiffSynth-Studio/diffsynth_example_dataset --include "wanvideo/Wan2.1-T2V-1.3B_direct_distill/*" --local_dir ./data/diffsynth_example_dataset
|
|
|
|
accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
|
|
--dataset_base_path data/diffsynth_example_dataset/wanvideo/Wan2.1-T2V-1.3B_direct_distill \
|
|
--dataset_metadata_path data/diffsynth_example_dataset/wanvideo/Wan2.1-T2V-1.3B_direct_distill/metadata.csv \
|
|
--height 480 \
|
|
--width 832 \
|
|
--dataset_repeat 160 \
|
|
--model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-1.3B:Wan2.1_VAE.pth" \
|
|
--learning_rate 1e-5 \
|
|
--num_epochs 2 \
|
|
--remove_prefix_in_ckpt "pipe.dit." \
|
|
--output_path "./models/train/Wan2.1-T2V-1.3B_full_distill" \
|
|
--trainable_models "dit" \
|
|
--task "direct_distill" \
|
|
--extra_inputs "seed,rand_device,num_inference_steps,cfg_scale"
|