This commit is contained in:
Artiprocher
2023-12-30 21:01:24 +08:00
parent b9771db163
commit d24ddaacaa
19 changed files with 2252 additions and 34 deletions

View File

@@ -0,0 +1,47 @@
from diffsynth import ModelManager, SDImagePipeline, SDVideoPipeline, ControlNetConfigUnit, VideoData, save_video, save_frames
from diffsynth.extensions.RIFE import RIFEInterpolater
import torch
# Download models
# `models/stable_diffusion/dreamshaper_8.safetensors`: [link](https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16)
# `models/AnimateDiff/mm_sd_v15_v2.ckpt`: [link](https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15_v2.ckpt)
# `models/RIFE/flownet.pkl`: [link](https://drive.google.com/file/d/1APIzVeI-4ZZCEuIRE1m6WYfSCaOsi_7_/view?usp=sharing)
# Load models
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
model_manager.load_models([
"models/stable_diffusion/dreamshaper_8.safetensors",
"models/AnimateDiff/mm_sd_v15_v2.ckpt",
"models/RIFE/flownet.pkl"
])
# Text -> Image
pipe_image = SDImagePipeline.from_model_manager(model_manager)
torch.manual_seed(0)
image = pipe_image(
prompt = "lightning storm, sea",
negative_prompt = "",
cfg_scale=7.5,
num_inference_steps=30, height=512, width=768,
)
# Text + Image -> Video (6GB VRAM is enough!)
pipe = SDVideoPipeline.from_model_manager(model_manager)
output_video = pipe(
prompt = "lightning storm, sea",
negative_prompt = "",
cfg_scale=7.5,
num_frames=64,
num_inference_steps=10, height=512, width=768,
animatediff_batch_size=16, animatediff_stride=1, input_frames=[image]*64, denoising_strength=0.9,
vram_limit_level=0,
)
# Video -> Video with high fps
interpolater = RIFEInterpolater.from_model_manager(model_manager)
output_video = interpolater.interpolate(output_video, num_iter=3)
# Save images and video
save_video(output_video, "output_video.mp4", fps=120)

View File

@@ -1,4 +1,5 @@
from diffsynth import ModelManager, SDVideoPipeline, ControlNetConfigUnit, VideoData, save_video, save_frames
from diffsynth.extensions.RIFE import RIFESmoother
import torch
@@ -9,6 +10,8 @@ import torch
# `models/ControlNet/control_v11f1e_sd15_tile.pth`: [link](https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile.pth)
# `models/Annotators/sk_model.pth`: [link](https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model.pth)
# `models/Annotators/sk_model2.pth`: [link](https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model2.pth)
# `models/textual_inversion/verybadimagenegative_v1.3.pt`: [link](https://civitai.com/api/download/models/25820?type=Model&format=PickleTensor&size=full&fp=fp16)
# `models/RIFE/flownet.pkl`: [link](https://drive.google.com/file/d/1APIzVeI-4ZZCEuIRE1m6WYfSCaOsi_7_/view?usp=sharing)
# Load models
@@ -19,6 +22,7 @@ model_manager.load_models([
"models/AnimateDiff/mm_sd_v15_v2.ckpt",
"models/ControlNet/control_v11p_sd15_lineart.pth",
"models/ControlNet/control_v11f1e_sd15_tile.pth",
"models/RIFE/flownet.pkl"
])
pipe = SDVideoPipeline.from_model_manager(
model_manager,
@@ -26,31 +30,36 @@ pipe = SDVideoPipeline.from_model_manager(
ControlNetConfigUnit(
processor_id="lineart",
model_path="models/ControlNet/control_v11p_sd15_lineart.pth",
scale=1.0
scale=0.5
),
ControlNetConfigUnit(
processor_id="tile",
model_path="models/ControlNet/control_v11f1e_sd15_tile.pth",
scale=0.5
),
)
]
)
smoother = RIFESmoother.from_model_manager(model_manager)
# Load video (we only use 16 frames in this example for testing)
video = VideoData(video_file="input_video.mp4", height=1536, width=1536)
input_video = [video[i] for i in range(16)]
# Load video (we only use 60 frames for quick testing)
# The original video is here: https://www.bilibili.com/video/BV19w411A7YJ/
video = VideoData(
video_file="data/bilibili_videos/៸៸᳐_⩊_៸៸᳐ 66 微笑调查队🌻/៸៸᳐_⩊_៸៸᳐ 66 微笑调查队🌻 - 1.66 微笑调查队🌻(Av278681824,P1).mp4",
height=1024, width=1024)
input_video = [video[i] for i in range(40*60, 41*60)]
# Toon shading
# Toon shading (20G VRAM)
torch.manual_seed(0)
output_video = pipe(
prompt="best quality, perfect anime illustration, light, a girl is dancing, smile, solo",
negative_prompt="verybadimagenegative_v1.3",
cfg_scale=5, clip_skip=2,
cfg_scale=3, clip_skip=2,
controlnet_frames=input_video, num_frames=len(input_video),
num_inference_steps=10, height=1536, width=1536,
num_inference_steps=10, height=1024, width=1024,
animatediff_batch_size=32, animatediff_stride=16,
vram_limit_level=0,
)
output_video = smoother(output_video)
# Save images and video
save_frames(output_video, "output_frames")
save_video(output_video, "output_video.mp4", fps=16)
# Save video
save_video(output_video, "output_video.mp4", fps=60)

View File

@@ -0,0 +1,58 @@
from diffsynth import ModelManager, SDVideoPipeline, ControlNetConfigUnit, VideoData, save_video
from diffsynth.extensions.FastBlend import FastBlendSmoother
import torch
# Download models
# `models/stable_diffusion/dreamshaper_8.safetensors`: [link](https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16)
# `models/ControlNet/control_v11f1p_sd15_depth.pth`: [link](https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1p_sd15_depth.pth)
# `models/ControlNet/control_v11p_sd15_softedge.pth`: [link](https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_softedge.pth)
# `models/Annotators/dpt_hybrid-midas-501f0c75.pt`: [link](https://huggingface.co/lllyasviel/Annotators/resolve/main/dpt_hybrid-midas-501f0c75.pt)
# `models/Annotators/ControlNetHED.pth`: [link](https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetHED.pth)
# `models/RIFE/flownet.pkl`: [link](https://drive.google.com/file/d/1APIzVeI-4ZZCEuIRE1m6WYfSCaOsi_7_/view?usp=sharing)
# Load models
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
model_manager.load_models([
"models/stable_diffusion/dreamshaper_8.safetensors",
"models/ControlNet/control_v11f1p_sd15_depth.pth",
"models/ControlNet/control_v11p_sd15_softedge.pth",
"models/RIFE/flownet.pkl"
])
pipe = SDVideoPipeline.from_model_manager(
model_manager,
[
ControlNetConfigUnit(
processor_id="depth",
model_path=rf"models/ControlNet/control_v11f1p_sd15_depth.pth",
scale=0.5
),
ControlNetConfigUnit(
processor_id="softedge",
model_path=rf"models/ControlNet/control_v11p_sd15_softedge.pth",
scale=0.5
)
]
)
smoother = FastBlendSmoother.from_model_manager(model_manager)
# Load video
# Original video: https://pixabay.com/videos/flow-rocks-water-fluent-stones-159627/
video = VideoData(video_file="data/pixabay100/159627 (1080p).mp4", height=512, width=768)
input_video = [video[i] for i in range(128)]
# Rerender
torch.manual_seed(0)
output_video = pipe(
prompt="winter, ice, snow, water, river",
negative_prompt="", cfg_scale=7,
input_frames=input_video, controlnet_frames=input_video, num_frames=len(input_video),
num_inference_steps=10, height=512, width=768,
animatediff_batch_size=32, animatediff_stride=16, unet_batch_size=4,
cross_frame_attention=True,
smoother=smoother, smoother_progress_ids=[4, 9]
)
# Save images and video
save_video(output_video, "output_video.mp4", fps=30)