diff --git a/diffsynth/models/wan_video_vae.py b/diffsynth/models/wan_video_vae.py index ebbee9d..01b5484 100644 --- a/diffsynth/models/wan_video_vae.py +++ b/diffsynth/models/wan_video_vae.py @@ -785,6 +785,7 @@ class WanVideoVAE(nn.Module): video = self.single_decode(hidden_state, device) video = video.squeeze(0) videos.append(video) + videos = torch.stack(videos) return videos diff --git a/diffsynth/prompters/wan_prompter.py b/diffsynth/prompters/wan_prompter.py index d2c578d..f8c924a 100644 --- a/diffsynth/prompters/wan_prompter.py +++ b/diffsynth/prompters/wan_prompter.py @@ -2,20 +2,24 @@ from .base_prompter import BasePrompter from ..models.wan_video_text_encoder import WanTextEncoder from transformers import AutoTokenizer import os, torch +import ftfy import html import string import regex as re def basic_clean(text): + text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() + def whitespace_clean(text): text = re.sub(r'\s+', ' ', text) text = text.strip() return text + def canonicalize(text, keep_punctuation_exact_string=None): text = text.replace('_', ' ') if keep_punctuation_exact_string: @@ -28,6 +32,7 @@ def canonicalize(text, keep_punctuation_exact_string=None): text = re.sub(r'\s+', ' ', text) return text.strip() + class HuggingfaceTokenizer: def __init__(self, name, seq_len=None, clean=None, **kwargs): diff --git a/diffsynth/trainers/text_to_image.py b/diffsynth/trainers/text_to_image.py index 317883c..00a352e 100644 --- a/diffsynth/trainers/text_to_image.py +++ b/diffsynth/trainers/text_to_image.py @@ -269,7 +269,6 @@ def launch_training_task(model, args): batch_size=args.batch_size, num_workers=args.dataloader_num_workers ) - # train trainer = pl.Trainer( max_epochs=args.max_epochs, diff --git a/examples/wanvideo/README.md b/examples/wanvideo/README.md index 16e4f5d..ecfc536 100644 --- a/examples/wanvideo/README.md +++ b/examples/wanvideo/README.md @@ -44,6 +44,8 @@ https://github.com/user-attachments/assets/3908bc64-d451-485a-8b61-28f6d32dd92f Wan-Video-14B-I2V adds the functionality of image-to-video based on Wan-Video-14B-T2V. The model size remains the same, therefore the speed and VRAM requirements are also consistent. See [`./wan_14b_image_to_video.py`](./wan_14b_image_to_video.py). +**In the sample code, we use the same settings as the T2V 14B model, with FP8 quantization enabled by default. However, we found that this model is more sensitive to precision, so when the generated video content experiences issues such as artifacts, please switch to bfloat16 precision and use the `num_persistent_param_in_dit` parameter to control VRAM usage.** + ![Image](https://github.com/user-attachments/assets/adf8047f-7943-4aaa-a555-2b32dc415f39) https://github.com/user-attachments/assets/c0bdd5ca-292f-45ed-b9bc-afe193156e75 diff --git a/requirements.txt b/requirements.txt index 2e958ea..63a871b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,3 +10,4 @@ einops sentencepiece protobuf modelscope +ftfy