mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-18 22:08:13 +00:00
* krea-realtime-video * Add Krea real-time video inference and training support * Delete .gitignore * update README * update README --------- Co-authored-by: Artiprocher <wangye87v5@hotmail.com> Co-authored-by: Jintao Huang <huangjintao.hjt@alibaba-inc.com> Co-authored-by: Zhongjie Duan <35051019+Artiprocher@users.noreply.github.com>
26 lines
886 B
Python
26 lines
886 B
Python
import torch
|
|
from diffsynth import save_video
|
|
from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
|
|
|
|
|
|
pipe = WanVideoPipeline.from_pretrained(
|
|
torch_dtype=torch.bfloat16,
|
|
device="cuda",
|
|
model_configs=[
|
|
ModelConfig(model_id="krea/krea-realtime-video", origin_file_pattern="krea-realtime-video-14b.safetensors", offload_device="cpu"),
|
|
ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
|
|
ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
|
|
],
|
|
)
|
|
pipe.enable_vram_management()
|
|
|
|
# Text-to-video
|
|
video = pipe(
|
|
prompt="a cat sitting on a boat",
|
|
num_inference_steps=6, num_frames=81,
|
|
seed=0, tiled=True,
|
|
cfg_scale=1,
|
|
sigma_shift=20,
|
|
)
|
|
save_video(video, "video1.mp4", fps=15, quality=5)
|