mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-25 10:48:11 +00:00
update examples
This commit is contained in:
21
examples/ExVideo/ExVideo_cogvideox_test.py
Normal file
21
examples/ExVideo/ExVideo_cogvideox_test.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
from diffsynth import ModelManager, CogVideoPipeline, save_video, download_models
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
download_models(["CogVideoX-5B", "ExVideo-CogVideoX-LoRA-129f-v1"])
|
||||||
|
model_manager = ModelManager(torch_dtype=torch.bfloat16)
|
||||||
|
model_manager.load_models([
|
||||||
|
"models/CogVideo/CogVideoX-5b/text_encoder",
|
||||||
|
"models/CogVideo/CogVideoX-5b/transformer",
|
||||||
|
"models/CogVideo/CogVideoX-5b/vae/diffusion_pytorch_model.safetensors",
|
||||||
|
])
|
||||||
|
model_manager.load_lora("models/lora/ExVideo-CogVideoX-LoRA-129f-v1.safetensors")
|
||||||
|
pipe = CogVideoPipeline.from_model_manager(model_manager)
|
||||||
|
|
||||||
|
torch.manual_seed(6)
|
||||||
|
video = pipe(
|
||||||
|
prompt="an astronaut riding a horse on Mars.",
|
||||||
|
height=480, width=720, num_frames=129,
|
||||||
|
cfg_scale=7.0, num_inference_steps=100,
|
||||||
|
)
|
||||||
|
save_video(video, "video_with_lora.mp4", fps=8, quality=5)
|
||||||
@@ -4,11 +4,19 @@ ExVideo is a post-tuning technique aimed at enhancing the capability of video ge
|
|||||||
|
|
||||||
* [Project Page](https://ecnu-cilab.github.io/ExVideoProjectPage/)
|
* [Project Page](https://ecnu-cilab.github.io/ExVideoProjectPage/)
|
||||||
* [Technical report](https://arxiv.org/abs/2406.14130)
|
* [Technical report](https://arxiv.org/abs/2406.14130)
|
||||||
* [Demo](https://huggingface.co/spaces/modelscope/ExVideo-SVD-128f-v1)
|
* **[New]** Extended models (ExVideo-CogVideoX)
|
||||||
* Extended models
|
* [HuggingFace](https://huggingface.co/ECNU-CILab/ExVideo-CogVideoX-LoRA-129f-v1)
|
||||||
|
* [ModelScope](https://modelscope.cn/models/ECNU-CILab/ExVideo-CogVideoX-LoRA-129f-v1)
|
||||||
|
* Extended models (ExVideo-SVD)
|
||||||
* [HuggingFace](https://huggingface.co/ECNU-CILab/ExVideo-SVD-128f-v1)
|
* [HuggingFace](https://huggingface.co/ECNU-CILab/ExVideo-SVD-128f-v1)
|
||||||
* [ModelScope](https://modelscope.cn/models/ECNU-CILab/ExVideo-SVD-128f-v1)
|
* [ModelScope](https://modelscope.cn/models/ECNU-CILab/ExVideo-SVD-128f-v1)
|
||||||
|
|
||||||
|
## Example: Text-to-video via extended CogVideoX-5B
|
||||||
|
|
||||||
|
Generate a video using CogVideoX-5B and our extension module. See [ExVideo_cogvideox_test.py](./ExVideo_cogvideox_test.py).
|
||||||
|
|
||||||
|
https://github.com/user-attachments/assets/321ee04b-8c17-479e-8a95-8cbcf21f8d7e
|
||||||
|
|
||||||
## Example: Text-to-video via extended Stable Video Diffusion
|
## Example: Text-to-video via extended Stable Video Diffusion
|
||||||
|
|
||||||
Generate a video using a text-to-image model and our image-to-video model. See [ExVideo_svd_test.py](./ExVideo_svd_test.py).
|
Generate a video using a text-to-image model and our image-to-video model. See [ExVideo_svd_test.py](./ExVideo_svd_test.py).
|
||||||
|
|||||||
Reference in New Issue
Block a user