mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-22 16:50:47 +00:00
48
examples/wanvideo/model_inference/WanToDance-14B-global.py
Normal file
48
examples/wanvideo/model_inference/WanToDance-14B-global.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import torch
|
||||
from PIL import Image
|
||||
from diffsynth.utils.data import save_video, VideoData
|
||||
from diffsynth.pipelines.wan_video import WanVideoPipeline, ModelConfig
|
||||
from modelscope import dataset_snapshot_download
|
||||
|
||||
|
||||
pipe = WanVideoPipeline.from_pretrained(
|
||||
torch_dtype=torch.bfloat16,
|
||||
device="cuda",
|
||||
model_configs=[
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="global_model.safetensors"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="Wan2.1_VAE.pth"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"),
|
||||
],
|
||||
tokenizer_config=ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/umt5-xxl/"),
|
||||
)
|
||||
dataset_snapshot_download(
|
||||
"DiffSynth-Studio/diffsynth_example_dataset",
|
||||
local_dir="data/diffsynth_example_dataset",
|
||||
allow_file_pattern="wanvideo/WanToDance-14B-global/*"
|
||||
)
|
||||
# This is a specialized model with the following constraints on its input parameters:
|
||||
# * The model outputs a sequence of keyframes rather than a video; therefore, `framewise_decoding=True` must be set.
|
||||
# * When the number of keyframes is $n$, `num_frames` = 4 * (n - 1) + 1.
|
||||
# * Reducing `height`, `width`, `num_frames`, or `num_inference_steps` may lead to severe artifacts or generation failure.
|
||||
# * The audio file specified by `wantodance_music_path` must match the video duration, calculated as (`num_frames` / 7.5) seconds.
|
||||
# * The width and height of `wantodance_reference_image` must be multiples of 16.
|
||||
# * `wantodance_fps` is configurable, but since the model appears to have been trained exclusively at 7.5 FPS, setting it to other values is not recommended.
|
||||
# * The first frame of `wantodance_keyframes` is the `wantodance_reference_image`, while all subsequent frames are solid black.
|
||||
# * `wantodance_keyframes_mask` indicates the positions of valid frames within `wantodance_keyframes`.
|
||||
wantodance_keyframes = VideoData("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/keyframes.mp4")
|
||||
wantodance_keyframes = [wantodance_keyframes[i] for i in range(149)]
|
||||
video = pipe(
|
||||
prompt="一个人正在跳舞,舞蹈种类是韩舞。帧率是7.5000",
|
||||
negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
|
||||
seed=0, tiled=False,
|
||||
height=1280, width=720, num_frames=149,
|
||||
num_inference_steps=48,
|
||||
wantodance_music_path="data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/music.WAV",
|
||||
wantodance_reference_image=Image.open("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/refimage.jpg"),
|
||||
wantodance_fps=7.5,
|
||||
wantodance_keyframes=wantodance_keyframes,
|
||||
wantodance_keyframes_mask=[1] + [0] * 148,
|
||||
framewise_decoding=True,
|
||||
)
|
||||
save_video(video, "video_WanToDance-14B-global.mp4", fps=7.5, quality=5)
|
||||
52
examples/wanvideo/model_inference/WanToDance-14B-local.py
Normal file
52
examples/wanvideo/model_inference/WanToDance-14B-local.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import torch, os
|
||||
from PIL import Image
|
||||
from diffsynth.utils.data import save_video, VideoData
|
||||
from diffsynth.pipelines.wan_video import WanVideoPipeline, ModelConfig
|
||||
from modelscope import dataset_snapshot_download
|
||||
|
||||
|
||||
pipe = WanVideoPipeline.from_pretrained(
|
||||
torch_dtype=torch.bfloat16,
|
||||
device="cuda",
|
||||
model_configs=[
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="local_model.safetensors"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="Wan2.1_VAE.pth"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"),
|
||||
],
|
||||
tokenizer_config=ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/umt5-xxl/"),
|
||||
)
|
||||
dataset_snapshot_download(
|
||||
"DiffSynth-Studio/diffsynth_example_dataset",
|
||||
local_dir="data/diffsynth_example_dataset",
|
||||
allow_file_pattern="wanvideo/WanToDance-14B-local/*"
|
||||
)
|
||||
# This is a specialized model with the following constraints on its input parameters:
|
||||
# * The model renders and outputs video based on a sequence of keyframes; therefore, `wantodance_keyframes` must be provided correctly.
|
||||
# * If you need to generate a long video, please generate it in segments, and ensure that `wantodance_music_path`, `wantodance_keyframes`, and `wantodance_keyframes_mask` are properly split accordingly.
|
||||
# * The audio file specified by `wantodance_music_path` must match the video duration, calculated as (`num_frames` / 30) seconds.
|
||||
# * The width and height of `wantodance_reference_image` must be multiples of 16.
|
||||
# * `wantodance_fps` is configurable, but since the model appears to have been trained exclusively at 30 FPS, setting it to other values is not recommended.
|
||||
# * In `wantodance_keyframes`, frames that are not keyframes should be solid black.
|
||||
# * `wantodance_keyframes_mask` indicates the positions of valid frames within `wantodance_keyframes`.
|
||||
wantodance_keyframes = VideoData("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/keyframes.mp4")
|
||||
wantodance_keyframes = [wantodance_keyframes[i] for i in range(149)]
|
||||
video = pipe(
|
||||
prompt="一个人正在跳舞,舞蹈种类是古典舞,图像清晰程度高,人物动作平均幅度中等,人物动作最大幅度中等。, 帧率是30fps。",
|
||||
negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
|
||||
seed=0, tiled=True,
|
||||
height=1280, width=720, num_frames=149,
|
||||
num_inference_steps=24,
|
||||
wantodance_music_path="data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/music.wav",
|
||||
wantodance_reference_image=Image.open("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/refimage.jpg"),
|
||||
wantodance_fps=30,
|
||||
wantodance_keyframes=wantodance_keyframes,
|
||||
wantodance_keyframes_mask=[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1],
|
||||
)
|
||||
save_video(video, "video_WanToDance-14B-local.mp4", fps=30, quality=5)
|
||||
@@ -0,0 +1,59 @@
|
||||
import torch
|
||||
from PIL import Image
|
||||
from diffsynth.utils.data import save_video, VideoData
|
||||
from diffsynth.pipelines.wan_video import WanVideoPipeline, ModelConfig
|
||||
from modelscope import dataset_snapshot_download
|
||||
|
||||
|
||||
vram_config = {
|
||||
"offload_dtype": "disk",
|
||||
"offload_device": "disk",
|
||||
"onload_dtype": torch.bfloat16,
|
||||
"onload_device": "cpu",
|
||||
"preparing_dtype": torch.bfloat16,
|
||||
"preparing_device": "cuda",
|
||||
"computation_dtype": torch.bfloat16,
|
||||
"computation_device": "cuda",
|
||||
}
|
||||
pipe = WanVideoPipeline.from_pretrained(
|
||||
torch_dtype=torch.bfloat16,
|
||||
device="cuda",
|
||||
model_configs=[
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="global_model.safetensors", **vram_config),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", **vram_config),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="Wan2.1_VAE.pth", **vram_config),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", **vram_config),
|
||||
],
|
||||
tokenizer_config=ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/umt5-xxl/"),
|
||||
vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 2,
|
||||
)
|
||||
dataset_snapshot_download(
|
||||
"DiffSynth-Studio/diffsynth_example_dataset",
|
||||
local_dir="data/diffsynth_example_dataset",
|
||||
allow_file_pattern="wanvideo/WanToDance-14B-global/*"
|
||||
)
|
||||
# This is a specialized model with the following constraints on its input parameters:
|
||||
# * The model outputs a sequence of keyframes rather than a video; therefore, `framewise_decoding=True` must be set.
|
||||
# * When the number of keyframes is $n$, `num_frames` = 4 * (n - 1) + 1.
|
||||
# * Reducing `height`, `width`, `num_frames`, or `num_inference_steps` may lead to severe artifacts or generation failure.
|
||||
# * The audio file specified by `wantodance_music_path` must match the video duration, calculated as (`num_frames` / 7.5) seconds.
|
||||
# * The width and height of `wantodance_reference_image` must be multiples of 16.
|
||||
# * `wantodance_fps` is configurable, but since the model appears to have been trained exclusively at 7.5 FPS, setting it to other values is not recommended.
|
||||
# * The first frame of `wantodance_keyframes` is the `wantodance_reference_image`, while all subsequent frames are solid black.
|
||||
# * `wantodance_keyframes_mask` indicates the positions of valid frames within `wantodance_keyframes`.
|
||||
wantodance_keyframes = VideoData("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/keyframes.mp4")
|
||||
wantodance_keyframes = [wantodance_keyframes[i] for i in range(149)]
|
||||
video = pipe(
|
||||
prompt="一个人正在跳舞,舞蹈种类是韩舞。帧率是7.5000",
|
||||
negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
|
||||
seed=0, tiled=False,
|
||||
height=1280, width=720, num_frames=149,
|
||||
num_inference_steps=48,
|
||||
wantodance_music_path="data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/music.WAV",
|
||||
wantodance_reference_image=Image.open("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/refimage.jpg"),
|
||||
wantodance_fps=7.5,
|
||||
wantodance_keyframes=wantodance_keyframes,
|
||||
wantodance_keyframes_mask=[1] + [0] * 148,
|
||||
framewise_decoding=True,
|
||||
)
|
||||
save_video(video, "video_WanToDance-14B-global.mp4", fps=7.5, quality=5)
|
||||
@@ -0,0 +1,63 @@
|
||||
import torch, os
|
||||
from PIL import Image
|
||||
from diffsynth.utils.data import save_video, VideoData
|
||||
from diffsynth.pipelines.wan_video import WanVideoPipeline, ModelConfig
|
||||
from modelscope import dataset_snapshot_download
|
||||
|
||||
|
||||
vram_config = {
|
||||
"offload_dtype": "disk",
|
||||
"offload_device": "disk",
|
||||
"onload_dtype": torch.bfloat16,
|
||||
"onload_device": "cpu",
|
||||
"preparing_dtype": torch.bfloat16,
|
||||
"preparing_device": "cuda",
|
||||
"computation_dtype": torch.bfloat16,
|
||||
"computation_device": "cuda",
|
||||
}
|
||||
pipe = WanVideoPipeline.from_pretrained(
|
||||
torch_dtype=torch.bfloat16,
|
||||
device="cuda",
|
||||
model_configs=[
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="local_model.safetensors", **vram_config),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", **vram_config),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="Wan2.1_VAE.pth", **vram_config),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", **vram_config),
|
||||
],
|
||||
tokenizer_config=ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/umt5-xxl/"),
|
||||
vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 2,
|
||||
)
|
||||
dataset_snapshot_download(
|
||||
"DiffSynth-Studio/diffsynth_example_dataset",
|
||||
local_dir="data/diffsynth_example_dataset",
|
||||
allow_file_pattern="wanvideo/WanToDance-14B-local/*"
|
||||
)
|
||||
# This is a specialized model with the following constraints on its input parameters:
|
||||
# * The model renders and outputs video based on a sequence of keyframes; therefore, `wantodance_keyframes` must be provided correctly.
|
||||
# * If you need to generate a long video, please generate it in segments, and ensure that `wantodance_music_path`, `wantodance_keyframes`, and `wantodance_keyframes_mask` are properly split accordingly.
|
||||
# * The audio file specified by `wantodance_music_path` must match the video duration, calculated as (`num_frames` / 30) seconds.
|
||||
# * The width and height of `wantodance_reference_image` must be multiples of 16.
|
||||
# * `wantodance_fps` is configurable, but since the model appears to have been trained exclusively at 30 FPS, setting it to other values is not recommended.
|
||||
# * In `wantodance_keyframes`, frames that are not keyframes should be solid black.
|
||||
# * `wantodance_keyframes_mask` indicates the positions of valid frames within `wantodance_keyframes`.
|
||||
wantodance_keyframes = VideoData("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/keyframes.mp4")
|
||||
wantodance_keyframes = [wantodance_keyframes[i] for i in range(149)]
|
||||
video = pipe(
|
||||
prompt="一个人正在跳舞,舞蹈种类是古典舞,图像清晰程度高,人物动作平均幅度中等,人物动作最大幅度中等。, 帧率是30fps。",
|
||||
negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
|
||||
seed=0, tiled=True,
|
||||
height=1280, width=720, num_frames=149,
|
||||
num_inference_steps=24,
|
||||
wantodance_music_path="data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/music.wav",
|
||||
wantodance_reference_image=Image.open("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/refimage.jpg"),
|
||||
wantodance_fps=30,
|
||||
wantodance_keyframes=wantodance_keyframes,
|
||||
wantodance_keyframes_mask=[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1],
|
||||
)
|
||||
save_video(video, "video_WanToDance-14B-local.mp4", fps=30, quality=5)
|
||||
@@ -0,0 +1,20 @@
|
||||
# 8*H200 required
|
||||
modelscope download --dataset DiffSynth-Studio/diffsynth_example_dataset --include "wanvideo/WanToDance-14B-global/*" --local_dir ./data/diffsynth_example_dataset
|
||||
|
||||
accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
|
||||
--dataset_base_path data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global \
|
||||
--dataset_metadata_path data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/metadata.json \
|
||||
--data_file_keys "video,wantodance_reference_image,wantodance_keyframes,wantodance_music_path" \
|
||||
--height 1280 \
|
||||
--width 720 \
|
||||
--num_frames 149 \
|
||||
--dataset_repeat 100 \
|
||||
--model_id_with_origin_paths "Wan-AI/WanToDance-14B:global_model.safetensors,Wan-AI/WanToDance-14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/WanToDance-14B:Wan2.1_VAE.pth,Wan-AI/WanToDance-14B:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
|
||||
--learning_rate 1e-5 \
|
||||
--num_epochs 2 \
|
||||
--remove_prefix_in_ckpt "pipe.dit." \
|
||||
--output_path "./models/train/WanToDance-14B-global_full" \
|
||||
--trainable_models "dit" \
|
||||
--extra_inputs "wantodance_music_path,wantodance_reference_image,wantodance_fps,wantodance_keyframes,wantodance_keyframes_mask,framewise_decoding" \
|
||||
--use_gradient_checkpointing_offload \
|
||||
--framewise_decoding
|
||||
@@ -0,0 +1,19 @@
|
||||
# 8*H200 required
|
||||
modelscope download --dataset DiffSynth-Studio/diffsynth_example_dataset --include "wanvideo/WanToDance-14B-local/*" --local_dir ./data/diffsynth_example_dataset
|
||||
|
||||
accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
|
||||
--dataset_base_path data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local \
|
||||
--dataset_metadata_path data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/metadata.json \
|
||||
--data_file_keys "video,wantodance_reference_image,wantodance_keyframes,wantodance_music_path" \
|
||||
--height 1280 \
|
||||
--width 720 \
|
||||
--num_frames 149 \
|
||||
--dataset_repeat 100 \
|
||||
--model_id_with_origin_paths "Wan-AI/WanToDance-14B:local_model.safetensors,Wan-AI/WanToDance-14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/WanToDance-14B:Wan2.1_VAE.pth,Wan-AI/WanToDance-14B:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
|
||||
--learning_rate 1e-5 \
|
||||
--num_epochs 2 \
|
||||
--remove_prefix_in_ckpt "pipe.dit." \
|
||||
--output_path "./models/train/WanToDance-14B-local_full" \
|
||||
--trainable_models "dit" \
|
||||
--extra_inputs "wantodance_music_path,wantodance_reference_image,wantodance_fps,wantodance_keyframes,wantodance_keyframes_mask" \
|
||||
--use_gradient_checkpointing_offload
|
||||
@@ -0,0 +1,22 @@
|
||||
# 8*H200 required
|
||||
modelscope download --dataset DiffSynth-Studio/diffsynth_example_dataset --include "wanvideo/WanToDance-14B-global/*" --local_dir ./data/diffsynth_example_dataset
|
||||
|
||||
accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
|
||||
--dataset_base_path data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global \
|
||||
--dataset_metadata_path data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/metadata.json \
|
||||
--data_file_keys "video,wantodance_reference_image,wantodance_keyframes,wantodance_music_path" \
|
||||
--height 1280 \
|
||||
--width 720 \
|
||||
--num_frames 149 \
|
||||
--dataset_repeat 100 \
|
||||
--model_id_with_origin_paths "Wan-AI/WanToDance-14B:global_model.safetensors,Wan-AI/WanToDance-14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/WanToDance-14B:Wan2.1_VAE.pth,Wan-AI/WanToDance-14B:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
|
||||
--learning_rate 1e-4 \
|
||||
--num_epochs 5 \
|
||||
--remove_prefix_in_ckpt "pipe.dit." \
|
||||
--output_path "./models/train/WanToDance-14B-global_lora" \
|
||||
--lora_base_model "dit" \
|
||||
--lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
|
||||
--lora_rank 32 \
|
||||
--extra_inputs "wantodance_music_path,wantodance_reference_image,wantodance_fps,wantodance_keyframes,wantodance_keyframes_mask,framewise_decoding" \
|
||||
--use_gradient_checkpointing_offload \
|
||||
--framewise_decoding
|
||||
@@ -0,0 +1,21 @@
|
||||
# 8*H200 required
|
||||
modelscope download --dataset DiffSynth-Studio/diffsynth_example_dataset --include "wanvideo/WanToDance-14B-local/*" --local_dir ./data/diffsynth_example_dataset
|
||||
|
||||
accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
|
||||
--dataset_base_path data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local \
|
||||
--dataset_metadata_path data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/metadata.json \
|
||||
--data_file_keys "video,wantodance_reference_image,wantodance_keyframes,wantodance_music_path" \
|
||||
--height 1280 \
|
||||
--width 720 \
|
||||
--num_frames 149 \
|
||||
--dataset_repeat 100 \
|
||||
--model_id_with_origin_paths "Wan-AI/WanToDance-14B:local_model.safetensors,Wan-AI/WanToDance-14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/WanToDance-14B:Wan2.1_VAE.pth,Wan-AI/WanToDance-14B:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
|
||||
--learning_rate 1e-4 \
|
||||
--num_epochs 5 \
|
||||
--remove_prefix_in_ckpt "pipe.dit." \
|
||||
--output_path "./models/train/WanToDance-14B-local_lora" \
|
||||
--lora_base_model "dit" \
|
||||
--lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
|
||||
--lora_rank 32 \
|
||||
--extra_inputs "wantodance_music_path,wantodance_reference_image,wantodance_fps,wantodance_keyframes,wantodance_keyframes_mask" \
|
||||
--use_gradient_checkpointing_offload
|
||||
@@ -72,6 +72,9 @@ class WanTrainingModule(DiffusionTrainingModule):
|
||||
inputs_shared[extra_input] = data[extra_input][0]
|
||||
else:
|
||||
inputs_shared[extra_input] = data[extra_input]
|
||||
if inputs_shared.get("framewise_decoding", False):
|
||||
# WanToDance global model
|
||||
inputs_shared["num_frames"] = 4 * (len(data["video"]) - 1) + 1
|
||||
return inputs_shared
|
||||
|
||||
def get_pipeline_inputs(self, data):
|
||||
@@ -117,6 +120,7 @@ def wan_parser():
|
||||
parser.add_argument("--max_timestep_boundary", type=float, default=1.0, help="Max timestep boundary (for mixed models, e.g., Wan-AI/Wan2.2-I2V-A14B).")
|
||||
parser.add_argument("--min_timestep_boundary", type=float, default=0.0, help="Min timestep boundary (for mixed models, e.g., Wan-AI/Wan2.2-I2V-A14B).")
|
||||
parser.add_argument("--initialize_model_on_cpu", default=False, action="store_true", help="Whether to initialize models on CPU.")
|
||||
parser.add_argument("--framewise_decoding", default=False, action="store_true", help="Enable it if this model is a WanToDance global model.")
|
||||
return parser
|
||||
|
||||
|
||||
@@ -140,12 +144,13 @@ if __name__ == "__main__":
|
||||
height_division_factor=16,
|
||||
width_division_factor=16,
|
||||
num_frames=args.num_frames,
|
||||
time_division_factor=4,
|
||||
time_division_remainder=1,
|
||||
time_division_factor=4 if not args.framewise_decoding else 1,
|
||||
time_division_remainder=1 if not args.framewise_decoding else 0,
|
||||
),
|
||||
special_operator_map={
|
||||
"animate_face_video": ToAbsolutePath(args.dataset_base_path) >> LoadVideo(args.num_frames, 4, 1, frame_processor=ImageCropAndResize(512, 512, None, 16, 16)),
|
||||
"input_audio": ToAbsolutePath(args.dataset_base_path) >> LoadAudio(sr=16000),
|
||||
"wantodance_music_path": ToAbsolutePath(args.dataset_base_path),
|
||||
}
|
||||
)
|
||||
model = WanTrainingModule(
|
||||
|
||||
@@ -0,0 +1,51 @@
|
||||
import torch
|
||||
from PIL import Image
|
||||
from diffsynth.utils.data import save_video, VideoData
|
||||
from diffsynth.pipelines.wan_video import WanVideoPipeline, ModelConfig
|
||||
from modelscope import dataset_snapshot_download
|
||||
from diffsynth.core import load_state_dict
|
||||
|
||||
|
||||
pipe = WanVideoPipeline.from_pretrained(
|
||||
torch_dtype=torch.bfloat16,
|
||||
device="cuda",
|
||||
model_configs=[
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="global_model.safetensors"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="Wan2.1_VAE.pth"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"),
|
||||
],
|
||||
tokenizer_config=ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/umt5-xxl/"),
|
||||
)
|
||||
state_dict = load_state_dict("models/train/WanToDance-14B-global_full/epoch-1.safetensors")
|
||||
pipe.dit.load_state_dict(state_dict)
|
||||
dataset_snapshot_download(
|
||||
"DiffSynth-Studio/diffsynth_example_dataset",
|
||||
local_dir="data/diffsynth_example_dataset",
|
||||
allow_file_pattern="wanvideo/WanToDance-14B-global/*"
|
||||
)
|
||||
# This is a specialized model with the following constraints on its input parameters:
|
||||
# * The model outputs a sequence of keyframes rather than a video; therefore, `framewise_decoding=True` must be set.
|
||||
# * When the number of keyframes is $n$, `num_frames` = 4 * (n - 1) + 1.
|
||||
# * Reducing `height`, `width`, `num_frames`, or `num_inference_steps` may lead to severe artifacts or generation failure.
|
||||
# * The audio file specified by `wantodance_music_path` must match the video duration, calculated as (`num_frames` / 7.5) seconds.
|
||||
# * The width and height of `wantodance_reference_image` must be multiples of 16.
|
||||
# * `wantodance_fps` is configurable, but since the model appears to have been trained exclusively at 7.5 FPS, setting it to other values is not recommended.
|
||||
# * The first frame of `wantodance_keyframes` is the `wantodance_reference_image`, while all subsequent frames are solid black.
|
||||
# * `wantodance_keyframes_mask` indicates the positions of valid frames within `wantodance_keyframes`.
|
||||
wantodance_keyframes = VideoData("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/keyframes.mp4")
|
||||
wantodance_keyframes = [wantodance_keyframes[i] for i in range(149)]
|
||||
video = pipe(
|
||||
prompt="一个人正在跳舞,舞蹈种类是韩舞。帧率是7.5000",
|
||||
negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
|
||||
seed=0, tiled=False,
|
||||
height=1280, width=720, num_frames=149,
|
||||
num_inference_steps=48,
|
||||
wantodance_music_path="data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/music.WAV",
|
||||
wantodance_reference_image=Image.open("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/refimage.jpg"),
|
||||
wantodance_fps=7.5,
|
||||
wantodance_keyframes=wantodance_keyframes,
|
||||
wantodance_keyframes_mask=[1] + [0] * 148,
|
||||
framewise_decoding=True,
|
||||
)
|
||||
save_video(video, "video_WanToDance-14B-global.mp4", fps=7.5, quality=5)
|
||||
@@ -0,0 +1,55 @@
|
||||
import torch, os
|
||||
from PIL import Image
|
||||
from diffsynth.utils.data import save_video, VideoData
|
||||
from diffsynth.pipelines.wan_video import WanVideoPipeline, ModelConfig
|
||||
from modelscope import dataset_snapshot_download
|
||||
from diffsynth.core import load_state_dict
|
||||
|
||||
|
||||
pipe = WanVideoPipeline.from_pretrained(
|
||||
torch_dtype=torch.bfloat16,
|
||||
device="cuda",
|
||||
model_configs=[
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="local_model.safetensors"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="Wan2.1_VAE.pth"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"),
|
||||
],
|
||||
tokenizer_config=ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/umt5-xxl/"),
|
||||
)
|
||||
state_dict = load_state_dict("models/train/WanToDance-14B-local_full/epoch-1.safetensors")
|
||||
pipe.dit.load_state_dict(state_dict)
|
||||
dataset_snapshot_download(
|
||||
"DiffSynth-Studio/diffsynth_example_dataset",
|
||||
local_dir="data/diffsynth_example_dataset",
|
||||
allow_file_pattern="wanvideo/WanToDance-14B-local/*"
|
||||
)
|
||||
# This is a specialized model with the following constraints on its input parameters:
|
||||
# * The model renders and outputs video based on a sequence of keyframes; therefore, `wantodance_keyframes` must be provided correctly.
|
||||
# * If you need to generate a long video, please generate it in segments, and ensure that `wantodance_music_path`, `wantodance_keyframes`, and `wantodance_keyframes_mask` are properly split accordingly.
|
||||
# * The audio file specified by `wantodance_music_path` must match the video duration, calculated as (`num_frames` / 30) seconds.
|
||||
# * The width and height of `wantodance_reference_image` must be multiples of 16.
|
||||
# * `wantodance_fps` is configurable, but since the model appears to have been trained exclusively at 30 FPS, setting it to other values is not recommended.
|
||||
# * In `wantodance_keyframes`, frames that are not keyframes should be solid black.
|
||||
# * `wantodance_keyframes_mask` indicates the positions of valid frames within `wantodance_keyframes`.
|
||||
wantodance_keyframes = VideoData("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/keyframes.mp4")
|
||||
wantodance_keyframes = [wantodance_keyframes[i] for i in range(149)]
|
||||
video = pipe(
|
||||
prompt="一个人正在跳舞,舞蹈种类是古典舞,图像清晰程度高,人物动作平均幅度中等,人物动作最大幅度中等。, 帧率是30fps。",
|
||||
negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
|
||||
seed=0, tiled=True,
|
||||
height=1280, width=720, num_frames=149,
|
||||
num_inference_steps=24,
|
||||
wantodance_music_path="data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/music.wav",
|
||||
wantodance_reference_image=Image.open("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/refimage.jpg"),
|
||||
wantodance_fps=30,
|
||||
wantodance_keyframes=wantodance_keyframes,
|
||||
wantodance_keyframes_mask=[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1],
|
||||
)
|
||||
save_video(video, "video_WanToDance-14B-local.mp4", fps=30, quality=5)
|
||||
@@ -0,0 +1,49 @@
|
||||
import torch
|
||||
from PIL import Image
|
||||
from diffsynth.utils.data import save_video, VideoData
|
||||
from diffsynth.pipelines.wan_video import WanVideoPipeline, ModelConfig
|
||||
from modelscope import dataset_snapshot_download
|
||||
|
||||
|
||||
pipe = WanVideoPipeline.from_pretrained(
|
||||
torch_dtype=torch.bfloat16,
|
||||
device="cuda",
|
||||
model_configs=[
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="global_model.safetensors"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="Wan2.1_VAE.pth"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"),
|
||||
],
|
||||
tokenizer_config=ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/umt5-xxl/"),
|
||||
)
|
||||
pipe.load_lora(pipe.dit, "models/train/WanToDance-14B-global_lora/epoch-4.safetensors", alpha=1)
|
||||
dataset_snapshot_download(
|
||||
"DiffSynth-Studio/diffsynth_example_dataset",
|
||||
local_dir="data/diffsynth_example_dataset",
|
||||
allow_file_pattern="wanvideo/WanToDance-14B-global/*"
|
||||
)
|
||||
# This is a specialized model with the following constraints on its input parameters:
|
||||
# * The model outputs a sequence of keyframes rather than a video; therefore, `framewise_decoding=True` must be set.
|
||||
# * When the number of keyframes is $n$, `num_frames` = 4 * (n - 1) + 1.
|
||||
# * Reducing `height`, `width`, `num_frames`, or `num_inference_steps` may lead to severe artifacts or generation failure.
|
||||
# * The audio file specified by `wantodance_music_path` must match the video duration, calculated as (`num_frames` / 7.5) seconds.
|
||||
# * The width and height of `wantodance_reference_image` must be multiples of 16.
|
||||
# * `wantodance_fps` is configurable, but since the model appears to have been trained exclusively at 7.5 FPS, setting it to other values is not recommended.
|
||||
# * The first frame of `wantodance_keyframes` is the `wantodance_reference_image`, while all subsequent frames are solid black.
|
||||
# * `wantodance_keyframes_mask` indicates the positions of valid frames within `wantodance_keyframes`.
|
||||
wantodance_keyframes = VideoData("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/keyframes.mp4")
|
||||
wantodance_keyframes = [wantodance_keyframes[i] for i in range(149)]
|
||||
video = pipe(
|
||||
prompt="一个人正在跳舞,舞蹈种类是韩舞。帧率是7.5000",
|
||||
negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
|
||||
seed=0, tiled=False,
|
||||
height=1280, width=720, num_frames=149,
|
||||
num_inference_steps=48,
|
||||
wantodance_music_path="data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/music.WAV",
|
||||
wantodance_reference_image=Image.open("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-global/refimage.jpg"),
|
||||
wantodance_fps=7.5,
|
||||
wantodance_keyframes=wantodance_keyframes,
|
||||
wantodance_keyframes_mask=[1] + [0] * 148,
|
||||
framewise_decoding=True,
|
||||
)
|
||||
save_video(video, "video_WanToDance-14B-global.mp4", fps=7.5, quality=5)
|
||||
@@ -0,0 +1,53 @@
|
||||
import torch, os
|
||||
from PIL import Image
|
||||
from diffsynth.utils.data import save_video, VideoData
|
||||
from diffsynth.pipelines.wan_video import WanVideoPipeline, ModelConfig
|
||||
from modelscope import dataset_snapshot_download
|
||||
|
||||
|
||||
pipe = WanVideoPipeline.from_pretrained(
|
||||
torch_dtype=torch.bfloat16,
|
||||
device="cuda",
|
||||
model_configs=[
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="local_model.safetensors"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="Wan2.1_VAE.pth"),
|
||||
ModelConfig(model_id="Wan-AI/WanToDance-14B", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"),
|
||||
],
|
||||
tokenizer_config=ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/umt5-xxl/"),
|
||||
)
|
||||
pipe.load_lora(pipe.dit, "models/train/WanToDance-14B-global_lora/epoch-4.safetensors", alpha=1)
|
||||
dataset_snapshot_download(
|
||||
"DiffSynth-Studio/diffsynth_example_dataset",
|
||||
local_dir="data/diffsynth_example_dataset",
|
||||
allow_file_pattern="wanvideo/WanToDance-14B-local/*"
|
||||
)
|
||||
# This is a specialized model with the following constraints on its input parameters:
|
||||
# * The model renders and outputs video based on a sequence of keyframes; therefore, `wantodance_keyframes` must be provided correctly.
|
||||
# * If you need to generate a long video, please generate it in segments, and ensure that `wantodance_music_path`, `wantodance_keyframes`, and `wantodance_keyframes_mask` are properly split accordingly.
|
||||
# * The audio file specified by `wantodance_music_path` must match the video duration, calculated as (`num_frames` / 30) seconds.
|
||||
# * The width and height of `wantodance_reference_image` must be multiples of 16.
|
||||
# * `wantodance_fps` is configurable, but since the model appears to have been trained exclusively at 30 FPS, setting it to other values is not recommended.
|
||||
# * In `wantodance_keyframes`, frames that are not keyframes should be solid black.
|
||||
# * `wantodance_keyframes_mask` indicates the positions of valid frames within `wantodance_keyframes`.
|
||||
wantodance_keyframes = VideoData("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/keyframes.mp4")
|
||||
wantodance_keyframes = [wantodance_keyframes[i] for i in range(149)]
|
||||
video = pipe(
|
||||
prompt="一个人正在跳舞,舞蹈种类是古典舞,图像清晰程度高,人物动作平均幅度中等,人物动作最大幅度中等。, 帧率是30fps。",
|
||||
negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
|
||||
seed=0, tiled=True,
|
||||
height=1280, width=720, num_frames=149,
|
||||
num_inference_steps=24,
|
||||
wantodance_music_path="data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/music.wav",
|
||||
wantodance_reference_image=Image.open("data/diffsynth_example_dataset/wanvideo/WanToDance-14B-local/refimage.jpg"),
|
||||
wantodance_fps=30,
|
||||
wantodance_keyframes=wantodance_keyframes,
|
||||
wantodance_keyframes_mask=[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1],
|
||||
)
|
||||
save_video(video, "video_WanToDance-14B-local.mp4", fps=30, quality=5)
|
||||
Reference in New Issue
Block a user