diff --git a/examples/wanvideo/wan_1.3b_text_to_video.py b/examples/wanvideo/wan_1.3b_text_to_video.py index 0f4f1d9..e444cd2 100644 --- a/examples/wanvideo/wan_1.3b_text_to_video.py +++ b/examples/wanvideo/wan_1.3b_text_to_video.py @@ -4,7 +4,7 @@ from modelscope import snapshot_download # Download models -snapshot_download("Wan-AI/Wan2.1-T2V-1.3B", cache_dir="models") +snapshot_download("Wan-AI/Wan2.1-T2V-1.3B", local_dir="models/Wan-AI/Wan2.1-T2V-1.3B") # Load models model_manager = ModelManager(device="cpu") diff --git a/examples/wanvideo/wan_14b_image_to_video.py b/examples/wanvideo/wan_14b_image_to_video.py index d3153fd..4fe6be7 100644 --- a/examples/wanvideo/wan_14b_image_to_video.py +++ b/examples/wanvideo/wan_14b_image_to_video.py @@ -5,7 +5,7 @@ from PIL import Image # Download models -snapshot_download("Wan-AI/Wan2.1-I2V-14B-480P", cache_dir="models") +snapshot_download("Wan-AI/Wan2.1-I2V-14B-480P", local_dir="models/Wan-AI/Wan2.1-I2V-14B-480P") # Load models model_manager = ModelManager(device="cpu") diff --git a/examples/wanvideo/wan_14b_text_to_video.py b/examples/wanvideo/wan_14b_text_to_video.py index 4c5a281..654565d 100644 --- a/examples/wanvideo/wan_14b_text_to_video.py +++ b/examples/wanvideo/wan_14b_text_to_video.py @@ -4,22 +4,20 @@ from modelscope import snapshot_download # Download models -snapshot_download("Wan-AI/Wan2.1-T2V-14B", cache_dir="models") +snapshot_download("Wan-AI/Wan2.1-T2V-14B", local_dir="models/Wan-AI/Wan2.1-T2V-14B") # Load models model_manager = ModelManager(device="cpu") model_manager.load_models( [ [ - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00007.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00007.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00007.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00007.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00007.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00007.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00007-of-00007.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors", ], - "models/Wan-AI/Wan2.1-T2V-14B/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", "models/Wan-AI/Wan2.1-T2V-14B/models_t5_umt5-xxl-enc-bf16.pth", "models/Wan-AI/Wan2.1-T2V-14B/Wan2.1_VAE.pth", ],