From 3edf3583b1f08944cee837b94d9f84d669c2729c Mon Sep 17 00:00:00 2001 From: Artiprocher Date: Wed, 30 Apr 2025 11:38:17 +0800 Subject: [PATCH 1/9] wan-fun-v1.1 reference control --- diffsynth/configs/model_config.py | 2 + diffsynth/models/wan_video_dit.py | 38 +++++++++++++++++++ diffsynth/pipelines/wan_video.py | 32 +++++++++++++++- .../wanvideo/wan_fun_reference_control.py | 35 +++++++++++++++++ 4 files changed, 105 insertions(+), 2 deletions(-) create mode 100644 examples/wanvideo/wan_fun_reference_control.py diff --git a/diffsynth/configs/model_config.py b/diffsynth/configs/model_config.py index 6bb9350..dc4a1ff 100644 --- a/diffsynth/configs/model_config.py +++ b/diffsynth/configs/model_config.py @@ -131,6 +131,8 @@ model_loader_configs = [ (None, "349723183fc063b2bfc10bb2835cf677", ["wan_video_dit"], [WanModel], "civitai"), (None, "efa44cddf936c70abd0ea28b6cbe946c", ["wan_video_dit"], [WanModel], "civitai"), (None, "3ef3b1f8e1dab83d5b71fd7b617f859f", ["wan_video_dit"], [WanModel], "civitai"), + (None, "70ddad9d3a133785da5ea371aae09504", ["wan_video_dit"], [WanModel], "civitai"), + (None, "26bde73488a92e64cc20b0a7485b9e5b", ["wan_video_dit"], [WanModel], "civitai"), (None, "a61453409b67cd3246cf0c3bebad47ba", ["wan_video_dit", "wan_video_vace"], [WanModel, VaceWanModel], "civitai"), (None, "cb104773c6c2cb6df4f9529ad5c60d0b", ["wan_video_dit"], [WanModel], "diffusers"), (None, "9c8818c2cbea55eca56c7b447df170da", ["wan_video_text_encoder"], [WanTextEncoder], "civitai"), diff --git a/diffsynth/models/wan_video_dit.py b/diffsynth/models/wan_video_dit.py index d9be8ab..1ca3239 100644 --- a/diffsynth/models/wan_video_dit.py +++ b/diffsynth/models/wan_video_dit.py @@ -272,6 +272,7 @@ class WanModel(torch.nn.Module): num_layers: int, has_image_input: bool, has_image_pos_emb: bool = False, + has_ref_conv: bool = False, ): super().__init__() self.dim = dim @@ -303,7 +304,10 @@ class WanModel(torch.nn.Module): if has_image_input: self.img_emb = MLP(1280, dim, has_pos_emb=has_image_pos_emb) # clip_feature_dim = 1280 + if has_ref_conv: + self.ref_conv = nn.Conv2d(16, dim, kernel_size=(2, 2), stride=(2, 2)) self.has_image_pos_emb = has_image_pos_emb + self.has_ref_conv = has_ref_conv def patchify(self, x: torch.Tensor): x = self.patch_embedding(x) @@ -532,6 +536,7 @@ class WanModelStateDictConverter: "eps": 1e-6 } elif hash_state_dict_keys(state_dict) == "349723183fc063b2bfc10bb2835cf677": + # 1.3B PAI control config = { "has_image_input": True, "patch_size": [1, 2, 2], @@ -546,6 +551,7 @@ class WanModelStateDictConverter: "eps": 1e-6 } elif hash_state_dict_keys(state_dict) == "efa44cddf936c70abd0ea28b6cbe946c": + # 14B PAI control config = { "has_image_input": True, "patch_size": [1, 2, 2], @@ -574,6 +580,38 @@ class WanModelStateDictConverter: "eps": 1e-6, "has_image_pos_emb": True } + elif hash_state_dict_keys(state_dict) == "70ddad9d3a133785da5ea371aae09504": + # 1.3B PAI control v1.1 + config = { + "has_image_input": True, + "patch_size": [1, 2, 2], + "in_dim": 48, + "dim": 1536, + "ffn_dim": 8960, + "freq_dim": 256, + "text_dim": 4096, + "out_dim": 16, + "num_heads": 12, + "num_layers": 30, + "eps": 1e-6, + "has_ref_conv": True + } + elif hash_state_dict_keys(state_dict) == "26bde73488a92e64cc20b0a7485b9e5b": + # 14B PAI control v1.1 + config = { + "has_image_input": True, + "patch_size": [1, 2, 2], + "in_dim": 48, + "dim": 5120, + "ffn_dim": 13824, + "freq_dim": 256, + "text_dim": 4096, + "out_dim": 16, + "num_heads": 40, + "num_layers": 40, + "eps": 1e-6, + "has_ref_conv": True + } else: config = {} return state_dict, config diff --git a/diffsynth/pipelines/wan_video.py b/diffsynth/pipelines/wan_video.py index 77835a4..b84b1b9 100644 --- a/diffsynth/pipelines/wan_video.py +++ b/diffsynth/pipelines/wan_video.py @@ -68,6 +68,7 @@ class WanVideoPipeline(BasePipeline): torch.nn.Conv3d: AutoWrappedModule, torch.nn.LayerNorm: AutoWrappedModule, RMSNorm: AutoWrappedModule, + torch.nn.Conv2d: AutoWrappedModule, }, module_config = dict( offload_dtype=dtype, @@ -237,6 +238,18 @@ class WanVideoPipeline(BasePipeline): return latents + def prepare_reference_image(self, reference_image, height, width): + if reference_image is not None: + self.load_models_to_device(["vae"]) + reference_image = reference_image.resize((width, height)) + reference_image = self.preprocess_images([reference_image]) + reference_image = torch.stack(reference_image, dim=2).to(dtype=self.torch_dtype, device=self.device) + reference_latents = self.vae.encode(reference_image, device=self.device) + return {"reference_latents": reference_latents} + else: + return {} + + def prepare_controlnet_kwargs(self, control_video, num_frames, height, width, clip_feature=None, y=None, tiled=True, tile_size=(34, 34), tile_stride=(18, 16)): if control_video is not None: control_latents = self.encode_control_video(control_video, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride) @@ -339,6 +352,7 @@ class WanVideoPipeline(BasePipeline): end_image=None, input_video=None, control_video=None, + reference_image=None, vace_video=None, vace_video_mask=None, vace_reference_image=None, @@ -398,6 +412,9 @@ class WanVideoPipeline(BasePipeline): else: image_emb = {} + # Reference image + reference_image_kwargs = self.prepare_reference_image(reference_image, height, width) + # ControlNet if control_video is not None: self.load_models_to_device(["image_encoder", "vae"]) @@ -435,14 +452,14 @@ class WanVideoPipeline(BasePipeline): self.dit, motion_controller=self.motion_controller, vace=self.vace, x=latents, timestep=timestep, **prompt_emb_posi, **image_emb, **extra_input, - **tea_cache_posi, **usp_kwargs, **motion_kwargs, **vace_kwargs, + **tea_cache_posi, **usp_kwargs, **motion_kwargs, **vace_kwargs, **reference_image_kwargs, ) if cfg_scale != 1.0: noise_pred_nega = model_fn_wan_video( self.dit, motion_controller=self.motion_controller, vace=self.vace, x=latents, timestep=timestep, **prompt_emb_nega, **image_emb, **extra_input, - **tea_cache_nega, **usp_kwargs, **motion_kwargs, **vace_kwargs, + **tea_cache_nega, **usp_kwargs, **motion_kwargs, **vace_kwargs, **reference_image_kwargs, ) noise_pred = noise_pred_nega + cfg_scale * (noise_pred_posi - noise_pred_nega) else: @@ -526,6 +543,7 @@ def model_fn_wan_video( context: torch.Tensor = None, clip_feature: Optional[torch.Tensor] = None, y: Optional[torch.Tensor] = None, + reference_latents = None, vace_context = None, vace_scale = 1.0, tea_cache: TeaCache = None, @@ -552,6 +570,12 @@ def model_fn_wan_video( x, (f, h, w) = dit.patchify(x) + # Reference image + if reference_latents is not None: + reference_latents = dit.ref_conv(reference_latents[:, :, 0]).flatten(2).transpose(1, 2) + x = torch.concat([reference_latents, x], dim=1) + f += 1 + freqs = torch.cat([ dit.freqs[0][:f].view(f, 1, 1, -1).expand(f, h, w, -1), dit.freqs[1][:h].view(1, h, 1, -1).expand(f, h, w, -1), @@ -580,6 +604,10 @@ def model_fn_wan_video( x = x + vace_hints[vace.vace_layers_mapping[block_id]] * vace_scale if tea_cache is not None: tea_cache.store(x) + + if reference_latents is not None: + x = x[:, reference_latents.shape[1]:] + f -= 1 x = dit.head(x, t) if use_unified_sequence_parallel: diff --git a/examples/wanvideo/wan_fun_reference_control.py b/examples/wanvideo/wan_fun_reference_control.py new file mode 100644 index 0000000..bc82157 --- /dev/null +++ b/examples/wanvideo/wan_fun_reference_control.py @@ -0,0 +1,35 @@ +import torch +from diffsynth import ModelManager, WanVideoPipeline, save_video, VideoData +from modelscope import snapshot_download, dataset_snapshot_download +from PIL import Image + + +# Download models +# snapshot_download("PAI/Wan2.1-Fun-1.3B-Control", local_dir="models/PAI/Wan2.1-Fun-V1.1-1.3B-Control") + +# Load models +model_manager = ModelManager(device="cpu") +model_manager.load_models( + [ + "models/PAI/Wan2.1-Fun-V1.1-14B-Control/diffusion_pytorch_model.safetensors", + "models/PAI/Wan2.1-Fun-V1.1-14B-Control/models_t5_umt5-xxl-enc-bf16.pth", + "models/PAI/Wan2.1-Fun-V1.1-14B-Control/Wan2.1_VAE.pth", + "models/PAI/Wan2.1-Fun-V1.1-14B-Control/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", + ], + torch_dtype=torch.bfloat16, # You can set `torch_dtype=torch.float8_e4m3fn` to enable FP8 quantization. +) +pipe = WanVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda") +pipe.enable_vram_management(num_persistent_param_in_dit=None) + +# Control-to-video +control_video = VideoData("xxx/pose.mp4", height=832, width=480) +control_video = [control_video[i] for i in range(49)] +video = pipe( + prompt="一位年轻女性穿着一件粉色的连衣裙,裙子上有白色的装饰和粉色的纽扣。她的头发是紫色的,头上戴着一个红色的大蝴蝶结,显得非常可爱和精致。她还戴着一个红色的领结,整体造型充满了少女感和活力。她的表情温柔,双手轻轻交叉放在身前,姿态优雅。背景是简单的灰色,没有任何多余的装饰,使得人物更加突出。她的妆容清淡自然,突显了她的清新气质。整体画面给人一种甜美、梦幻的感觉,仿佛置身于童话世界中。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + num_inference_steps=50, + reference_image=Image.open("xxx/6.png").convert("RGB").resize((480, 832)), + control_video=control_video, height=832, width=480, num_frames=49, + seed=1, tiled=True +) +save_video(video, "video1.mp4", fps=15, quality=5) From 451aab01161496fd68510e7682306eaf54ff97f2 Mon Sep 17 00:00:00 2001 From: Artiprocher Date: Sun, 4 May 2025 15:42:11 +0800 Subject: [PATCH 2/9] refactor --- diffsynth/models/wan_video_vae.py | 17 +- diffsynth/pipelines/wan_video_new.py | 981 +++++++++++++++++++++++++++ test.py | 28 + 3 files changed, 1014 insertions(+), 12 deletions(-) create mode 100644 diffsynth/pipelines/wan_video_new.py create mode 100644 test.py diff --git a/diffsynth/models/wan_video_vae.py b/diffsynth/models/wan_video_vae.py index df23076..137fd28 100644 --- a/diffsynth/models/wan_video_vae.py +++ b/diffsynth/models/wan_video_vae.py @@ -774,18 +774,11 @@ class WanVideoVAE(nn.Module): def decode(self, hidden_states, device, tiled=False, tile_size=(34, 34), tile_stride=(18, 16)): - hidden_states = [hidden_state.to("cpu") for hidden_state in hidden_states] - videos = [] - for hidden_state in hidden_states: - hidden_state = hidden_state.unsqueeze(0) - if tiled: - video = self.tiled_decode(hidden_state, device, tile_size, tile_stride) - else: - video = self.single_decode(hidden_state, device) - video = video.squeeze(0) - videos.append(video) - videos = torch.stack(videos) - return videos + if tiled: + video = self.tiled_decode(hidden_states, device, tile_size, tile_stride) + else: + video = self.single_decode(hidden_states, device) + return video @staticmethod diff --git a/diffsynth/pipelines/wan_video_new.py b/diffsynth/pipelines/wan_video_new.py new file mode 100644 index 0000000..dcc4485 --- /dev/null +++ b/diffsynth/pipelines/wan_video_new.py @@ -0,0 +1,981 @@ +import torch, warnings, glob +import numpy as np +from PIL import Image +from einops import repeat, reduce +from typing import Optional, Union +from dataclasses import dataclass +from modelscope import snapshot_download + + +import types +from ..models import ModelManager +from ..models.wan_video_dit import WanModel +from ..models.wan_video_text_encoder import WanTextEncoder +from ..models.wan_video_vae import WanVideoVAE +from ..models.wan_video_image_encoder import WanImageEncoder +from ..models.wan_video_vace import VaceWanModel +from ..schedulers.flow_match import FlowMatchScheduler +from .base import BasePipeline +from ..prompters import WanPrompter +import torch, os +from einops import rearrange +import numpy as np +from PIL import Image +from tqdm import tqdm +from typing import Optional + +from ..vram_management import enable_vram_management, AutoWrappedModule, AutoWrappedLinear +from ..models.wan_video_text_encoder import T5RelativeEmbedding, T5LayerNorm +from ..models.wan_video_dit import RMSNorm, sinusoidal_embedding_1d +from ..models.wan_video_vae import RMS_norm, CausalConv3d, Upsample +from ..models.wan_video_motion_controller import WanMotionControllerModel + + + +class BasePipeline(torch.nn.Module): + + def __init__( + self, + device="cuda", torch_dtype=torch.float16, + height_division_factor=64, width_division_factor=64, + time_division_factor=None, time_division_remainder=None, + ): + super().__init__() + # The device and torch_dtype is used for the storage of intermediate variables, not models. + self.device = device + self.torch_dtype = torch_dtype + # The following parameters are used for shape check. + self.height_division_factor = height_division_factor + self.width_division_factor = width_division_factor + self.time_division_factor = time_division_factor + self.time_division_remainder = time_division_remainder + self.vram_management_enabled = False + + + def check_resize_height_width(self, height, width, num_frames=None): + # Shape check + if height % self.height_division_factor != 0: + height = (height + self.height_division_factor - 1) // self.height_division_factor * self.height_division_factor + print(f"height % {self.height_division_factor} != 0. We round it up to {height}.") + if width % self.width_division_factor != 0: + width = (width + self.width_division_factor - 1) // self.width_division_factor * self.width_division_factor + print(f"width % {self.width_division_factor} != 0. We round it up to {width}.") + if num_frames is None: + return height, width + else: + if num_frames % self.time_division_factor != self.time_division_remainder: + num_frames = (num_frames + self.time_division_factor - 1) // self.time_division_factor * self.time_division_factor + self.time_division_remainder + print(f"num_frames % {self.time_division_factor} != {self.time_division_remainder}. We round it up to {num_frames}.") + return height, width, num_frames + + + def preprocess_image(self, image, torch_dtype=None, device=None, pattern="B C H W", min_value=-1, max_value=1): + # Transform a PIL.Image to torch.Tensor + image = torch.Tensor(np.array(image, dtype=np.float32)) + image = image.to(dtype=torch_dtype or self.torch_dtype, device=device or self.device) + image = image * ((max_value - min_value) / 255) + min_value + image = repeat(image, f"H W C -> {pattern}", **({"B": 1} if "B" in pattern else {})) + return image + + + def preprocess_video(self, video, torch_dtype=None, device=None, pattern="B C T H W", min_value=-1, max_value=1): + # Transform a list of PIL.Image to torch.Tensor + video = [self.preprocess_image(image, torch_dtype=torch_dtype, device=device, min_value=min_value, max_value=max_value) for image in video] + video = torch.stack(video, dim=pattern.index("T") // 2) + return video + + + def vae_output_to_image(self, vae_output, pattern="B C H W", min_value=-1, max_value=1): + # Transform a torch.Tensor to PIL.Image + if pattern != "H W C": + vae_output = reduce(vae_output, f"{pattern} -> H W C", reduction="mean") + image = ((vae_output - min_value) * (255 / (max_value - min_value))).clip(0, 255) + image = image.to(device="cpu", dtype=torch.uint8) + image = Image.fromarray(image.numpy()) + return image + + + def vae_output_to_video(self, vae_output, pattern="B C T H W", min_value=-1, max_value=1): + # Transform a torch.Tensor to list of PIL.Image + if pattern != "T H W C": + vae_output = reduce(vae_output, f"{pattern} -> T H W C", reduction="mean") + video = [self.vae_output_to_image(image, pattern="H W C", min_value=min_value, max_value=max_value) for image in vae_output] + return video + + + def load_models_to_device(self, model_names=[]): + if self.vram_management_enabled: + # offload models + for name, model in self.named_children(): + if name not in model_names: + if hasattr(model, "vram_management_enabled") and model.vram_management_enabled: + for module in model.modules(): + if hasattr(module, "offload"): + module.offload() + else: + model.cpu() + torch.cuda.empty_cache() + # onload models + for name, model in self.named_children(): + if name in model_names: + if hasattr(model, "vram_management_enabled") and model.vram_management_enabled: + for module in model.modules(): + if hasattr(module, "onload"): + module.onload() + else: + model.to(self.device) + + + def generate_noise(self, shape, seed=None, rand_device="cpu", rand_torch_dtype=torch.float32, device=None, torch_dtype=None): + # Initialize Gaussian noise + generator = None if seed is None else torch.Generator(rand_device).manual_seed(seed) + noise = torch.randn(shape, generator=generator, device=rand_device, dtype=rand_torch_dtype) + noise = noise.to(dtype=torch_dtype or self.torch_dtype, device=device or self.device) + return noise + + + def enable_cpu_offload(self): + warnings.warn("enable_cpu_offload is deprecated. This feature is automatically enabled if offload_device != device") + + + +@dataclass +class ModelConfig: + path: Union[str, list[str]] = None + model_id: str = None + origin_file_pattern: Union[str, list[str]] = None + download_resource: str = "ModelScope" + offload_device: Optional[Union[str, torch.device]] = None + quantization_dtype: Optional[torch.dtype] = None + + def download_if_necessary(self, local_model_path="./models", skip_download=False): + if self.path is None: + if self.model_id is None or self.origin_file_pattern is None: + raise ValueError(f"""No valid model files. Please use `ModelConfig(path="xxx")` or `ModelConfig(model_id="xxx/yyy", origin_file_pattern="zzz")`.""") + if not skip_download: + snapshot_download( + self.model_id, + local_dir=os.path.join(local_model_path, self.model_id), + allow_file_pattern=self.origin_file_pattern, + local_files_only=False + ) + self.path = glob.glob(os.path.join(local_model_path, self.model_id, self.origin_file_pattern)) + if isinstance(self.path, list) and len(self.path) == 1: + self.path = self.path[0] + + + +class WanVideoPipeline(BasePipeline): + + def __init__(self, device="cuda", torch_dtype=torch.bfloat16, tokenizer_path=None): + super().__init__( + device=device, torch_dtype=torch_dtype, + height_division_factor=16, width_division_factor=16, time_division_factor=4, time_division_remainder=1 + ) + self.scheduler = FlowMatchScheduler(shift=5, sigma_min=0.0, extra_one_step=True) + self.prompter = WanPrompter(tokenizer_path=tokenizer_path) + self.text_encoder: WanTextEncoder = None + self.image_encoder: WanImageEncoder = None + self.dit: WanModel = None + self.vae: WanVideoVAE = None + self.motion_controller: WanMotionControllerModel = None + self.vace: VaceWanModel = None + self.in_iteration_models = ("dit", "motion_controller", "vace") + self.unit_runner = PipelineUnitRunner() + self.units = [ + WanVideoUnit_ShapeChecker(), + WanVideoUnit_NoiseInitializer(), + WanVideoUnit_InputVideoEmbedder(), + WanVideoUnit_PromptEmbedder(), + WanVideoUnit_ImageEmbedder(), + WanVideoUnit_FunReference(), + WanVideoUnit_FunControl(), + WanVideoUnit_SpeedControl(), + WanVideoUnit_VACE(), + WanVideoUnit_TeaCache(), + WanVideoUnit_CfgMerger(), + ] + + + def enable_vram_management(self, num_persistent_param_in_dit=None): + self.vram_management_enabled = True + if self.text_encoder is not None: + dtype = next(iter(self.text_encoder.parameters())).dtype + enable_vram_management( + self.text_encoder, + module_map = { + torch.nn.Linear: AutoWrappedLinear, + torch.nn.Embedding: AutoWrappedModule, + T5RelativeEmbedding: AutoWrappedModule, + T5LayerNorm: AutoWrappedModule, + }, + module_config = dict( + offload_dtype=dtype, + offload_device="cpu", + onload_dtype=dtype, + onload_device="cpu", + computation_dtype=self.torch_dtype, + computation_device=self.device, + ), + ) + if self.dit is not None: + dtype = next(iter(self.dit.parameters())).dtype + enable_vram_management( + self.dit, + module_map = { + torch.nn.Linear: AutoWrappedLinear, + torch.nn.Conv3d: AutoWrappedModule, + torch.nn.LayerNorm: AutoWrappedModule, + RMSNorm: AutoWrappedModule, + torch.nn.Conv2d: AutoWrappedModule, + }, + module_config = dict( + offload_dtype=dtype, + offload_device="cpu", + onload_dtype=dtype, + onload_device=self.device, + computation_dtype=self.torch_dtype, + computation_device=self.device, + ), + max_num_param=num_persistent_param_in_dit, + overflow_module_config = dict( + offload_dtype=dtype, + offload_device="cpu", + onload_dtype=dtype, + onload_device="cpu", + computation_dtype=self.torch_dtype, + computation_device=self.device, + ), + ) + if self.vae is not None: + dtype = next(iter(self.vae.parameters())).dtype + enable_vram_management( + self.vae, + module_map = { + torch.nn.Linear: AutoWrappedLinear, + torch.nn.Conv2d: AutoWrappedModule, + RMS_norm: AutoWrappedModule, + CausalConv3d: AutoWrappedModule, + Upsample: AutoWrappedModule, + torch.nn.SiLU: AutoWrappedModule, + torch.nn.Dropout: AutoWrappedModule, + }, + module_config = dict( + offload_dtype=dtype, + offload_device="cpu", + onload_dtype=dtype, + onload_device=self.device, + computation_dtype=self.torch_dtype, + computation_device=self.device, + ), + ) + if self.image_encoder is not None: + dtype = next(iter(self.image_encoder.parameters())).dtype + enable_vram_management( + self.image_encoder, + module_map = { + torch.nn.Linear: AutoWrappedLinear, + torch.nn.Conv2d: AutoWrappedModule, + torch.nn.LayerNorm: AutoWrappedModule, + }, + module_config = dict( + offload_dtype=dtype, + offload_device="cpu", + onload_dtype=dtype, + onload_device="cpu", + computation_dtype=dtype, + computation_device=self.device, + ), + ) + if self.motion_controller is not None: + dtype = next(iter(self.motion_controller.parameters())).dtype + enable_vram_management( + self.motion_controller, + module_map = { + torch.nn.Linear: AutoWrappedLinear, + }, + module_config = dict( + offload_dtype=dtype, + offload_device="cpu", + onload_dtype=dtype, + onload_device="cpu", + computation_dtype=dtype, + computation_device=self.device, + ), + ) + if self.vace is not None: + enable_vram_management( + self.vace, + module_map = { + torch.nn.Linear: AutoWrappedLinear, + torch.nn.Conv3d: AutoWrappedModule, + torch.nn.LayerNorm: AutoWrappedModule, + RMSNorm: AutoWrappedModule, + }, + module_config = dict( + offload_dtype=dtype, + offload_device="cpu", + onload_dtype=dtype, + onload_device=self.device, + computation_dtype=self.torch_dtype, + computation_device=self.device, + ), + ) + + + @staticmethod + def from_pretrained( + torch_dtype: torch.dtype = torch.bfloat16, + device: Union[str, torch.device] = "cuda", + model_configs: list[ModelConfig] = [], + tokenizer_config: ModelConfig = ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/*"), + local_model_path: str = "./models", + skip_download: bool = False + ): + # Download and load models + model_manager = ModelManager() + for model_config in model_configs: + model_config.download_if_necessary(local_model_path, skip_download=skip_download) + model_manager.load_model( + model_config.path, + device=model_config.offload_device or device, + torch_dtype=model_config.quantization_dtype or torch_dtype + ) + + # Initialize pipeline + pipe = WanVideoPipeline(device=device, torch_dtype=torch_dtype) + pipe.text_encoder = model_manager.fetch_model("wan_video_text_encoder") + pipe.dit = model_manager.fetch_model("wan_video_dit") + pipe.vae = model_manager.fetch_model("wan_video_vae") + pipe.image_encoder = model_manager.fetch_model("wan_video_image_encoder") + pipe.motion_controller = model_manager.fetch_model("wan_video_motion_controller") + pipe.vace = model_manager.fetch_model("wan_video_vace") + + # Initialize tokenizer + tokenizer_config.download_if_necessary(local_model_path, skip_download=skip_download) + pipe.prompter.fetch_models(pipe.text_encoder) + pipe.prompter.fetch_tokenizer(tokenizer_config.path) + return pipe + + + def denoising_model(self): + return self.dit + + + def encode_video(self, input_video, tiled=True, tile_size=(34, 34), tile_stride=(18, 16)): + latents = self.vae.encode(input_video, device=self.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride) + return latents + + + @torch.no_grad() + def __call__( + self, + # Prompt + prompt, + negative_prompt="", + # Image-to-video + input_image=None, + # First-last-frame-to-video + end_image=None, + # Video-to-video + input_video=None, + denoising_strength=1.0, + # ControlNet + control_video=None, + reference_image=None, + # VACE + vace_video=None, + vace_video_mask=None, + vace_reference_image=None, + vace_scale=1.0, + # Randomness + seed=None, + rand_device="cpu", + # Shape + height=480, + width=832, + num_frames=81, + # Classifier-free guidance + cfg_scale=5.0, + cfg_merge=False, + # Scheduler + num_inference_steps=50, + sigma_shift=5.0, + # Speed control + motion_bucket_id=None, + # VAE tiling + tiled=True, + tile_size=(30, 52), + tile_stride=(15, 26), + # Sliding window + sliding_window_size: Optional[int] = None, + sliding_window_stride: Optional[int] = None, + # Teacache + tea_cache_l1_thresh=None, + tea_cache_model_id="", + # progress_bar + progress_bar_cmd=tqdm, + ): + # Scheduler + self.scheduler.set_timesteps(num_inference_steps, denoising_strength=denoising_strength, shift=sigma_shift) + + # Inputs + inputs_posi = { + "prompt": prompt, + "tea_cache_l1_thresh": tea_cache_l1_thresh, "tea_cache_model_id": tea_cache_model_id, + } + inputs_nega = { + "negative_prompt": negative_prompt, + "tea_cache_l1_thresh": tea_cache_l1_thresh, "tea_cache_model_id": tea_cache_model_id, + } + inputs_shared = { + "input_image": input_image, + "end_image": end_image, + "input_video": input_video, "denoising_strength": denoising_strength, + "control_video": control_video, "reference_image": reference_image, + "vace_video": vace_video, "vace_video_mask": vace_video_mask, "vace_reference_image": vace_reference_image, "vace_scale": vace_scale, + "seed": seed, "rand_device": rand_device, + "height": height, "width": width, "num_frames": num_frames, + "cfg_scale": cfg_scale, "cfg_merge": cfg_merge, + "num_inference_steps": num_inference_steps, "sigma_shift": sigma_shift, + "motion_bucket_id": motion_bucket_id, + "tiled": tiled, "tile_size": tile_size, "tile_stride": tile_stride, + "sliding_window_size": sliding_window_size, "sliding_window_stride": sliding_window_stride, + } + for unit in self.units: + inputs_shared, inputs_posi, inputs_nega = self.unit_runner(unit, self, inputs_shared, inputs_posi, inputs_nega) + + # Denoise + self.load_models_to_device(self.in_iteration_models) + models = {name: getattr(self, name) for name in self.in_iteration_models} + for progress_id, timestep in enumerate(progress_bar_cmd(self.scheduler.timesteps)): + timestep = timestep.unsqueeze(0).to(dtype=self.torch_dtype, device=self.device) + + # Inference + noise_pred_posi = model_fn_wan_video(**models, **inputs_shared, **inputs_posi, timestep=timestep) + if cfg_scale != 1.0: + if cfg_merge: + noise_pred_posi, noise_pred_nega = noise_pred_posi.chunk(2, dim=0) + else: + noise_pred_nega = model_fn_wan_video(**models, **inputs_shared, **inputs_nega, timestep=timestep) + noise_pred = noise_pred_nega + cfg_scale * (noise_pred_posi - noise_pred_nega) + else: + noise_pred = noise_pred_posi + + # Scheduler + inputs_shared["latents"] = self.scheduler.step(noise_pred, self.scheduler.timesteps[progress_id], inputs_shared["latents"]) + + # VACE (TODO: remove it) + if vace_reference_image is not None: + latents = latents[:, :, 1:] + + # Decode + self.load_models_to_device(['vae']) + video = self.vae.decode(inputs_shared["latents"], device=self.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride) + video = self.vae_output_to_video(video) + self.load_models_to_device([]) + + return video + + + +class PipelineUnit: + def __init__( + self, + seperate_cfg: bool = False, + take_over: bool = False, + input_params: tuple[str] = None, + input_params_posi: dict[str, str] = None, + input_params_nega: dict[str, str] = None, + onload_model_names: tuple[str] = None + ): + self.seperate_cfg = seperate_cfg + self.take_over = take_over + self.input_params = input_params + self.input_params_posi = input_params_posi + self.input_params_nega = input_params_nega + self.onload_model_names = onload_model_names + + + def process(self, pipe: WanVideoPipeline, inputs: dict, positive=True, **kwargs) -> dict: + raise NotImplementedError("`process` is not implemented.") + + + +class PipelineUnitRunner: + def __init__(self): + pass + + def __call__(self, unit: PipelineUnit, pipe: WanVideoPipeline, inputs_shared: dict, inputs_posi: dict, inputs_nega: dict) -> tuple[dict, dict]: + if unit.take_over: + # Let the pipeline unit take over this function. + inputs_shared, inputs_posi, inputs_nega = unit.process(pipe, inputs_shared=inputs_shared, inputs_posi=inputs_posi, inputs_nega=inputs_nega) + elif unit.seperate_cfg: + # Positive side + processor_inputs = {name: inputs_posi.get(name_) for name, name_ in unit.input_params_posi.items()} + processor_outputs = unit.process(pipe, **processor_inputs) + inputs_posi.update(processor_outputs) + # Negative side + if inputs_shared["cfg_scale"] != 1: + processor_inputs = {name: inputs_nega.get(name_) for name, name_ in unit.input_params_nega.items()} + processor_outputs = unit.process(pipe, **processor_inputs) + inputs_nega.update(processor_outputs) + else: + inputs_nega.update(processor_outputs) + else: + processor_inputs = {name: inputs_shared.get(name) for name in unit.input_params} + processor_outputs = unit.process(pipe, **processor_inputs) + inputs_shared.update(processor_outputs) + return inputs_shared, inputs_posi, inputs_nega + + + +class WanVideoUnit_ShapeChecker(PipelineUnit): + def __init__(self): + super().__init__(input_params=("height", "width", "num_frames")) + + def process(self, pipe: WanVideoPipeline, height, width, num_frames): + height, width, num_frames = pipe.check_resize_height_width(height, width, num_frames) + return {"height": height, "width": width, "num_frames": num_frames} + + + +class WanVideoUnit_NoiseInitializer(PipelineUnit): + def __init__(self): + super().__init__(input_params=("height", "width", "num_frames", "seed", "rand_device", "vace_reference_image")) + + def process(self, pipe: WanVideoPipeline, height, width, num_frames, seed, rand_device, vace_reference_image): + length = (num_frames - 1) // 4 + 1 + if vace_reference_image is not None: + length += 1 + noise = pipe.generate_noise((1, 16, length, height//8, width//8), seed=seed, rand_device=rand_device) + if vace_reference_image is not None: + noise = torch.concat((noise[:, :, -1:], noise[:, :, :-1]), dim=2) + return {"noise": noise} + + + +class WanVideoUnit_InputVideoEmbedder(PipelineUnit): + def __init__(self): + super().__init__( + input_params=("input_video", "noise", "tiled", "tile_size", "tile_stride"), + onload_model_names=("vae",) + ) + + def process(self, pipe: WanVideoPipeline, input_video, noise, tiled, tile_size, tile_stride): + if input_video is None: + return {"latents": noise} + pipe.load_models_to_device(["vae"]) + input_video = pipe.preprocess_video(input_video) + latents = pipe.encode_video(input_video, tiled, tile_size, tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device) + latents = pipe.scheduler.add_noise(latents, noise, timestep=pipe.scheduler.timesteps[0]) + return {"latents": latents} + + + +class WanVideoUnit_PromptEmbedder(PipelineUnit): + def __init__(self): + super().__init__( + seperate_cfg=True, + input_params_posi={"prompt": "prompt", "positive": "positive"}, + input_params_nega={"prompt": "negative_prompt", "positive": "positive"}, + onload_model_names=("text_encoder",) + ) + + def process(self, pipe: WanVideoPipeline, prompt, positive) -> dict: + pipe.load_models_to_device(self.onload_model_names) + prompt_emb = pipe.prompter.encode_prompt(prompt, positive=positive, device=pipe.device) + return {"context": prompt_emb} + + + +class WanVideoUnit_ImageEmbedder(PipelineUnit): + def __init__(self): + super().__init__( + input_params=("input_image", "end_image", "num_frames", "height", "width", "tiled", "tile_size", "tile_stride"), + onload_model_names=("image_encoder", "vae") + ) + + def process(self, pipe: WanVideoPipeline, input_image, end_image, num_frames, height, width, tiled, tile_size, tile_stride): + if input_image is None: + return {} + pipe.load_models_to_device(self.onload_model_names) + image = pipe.preprocess_image(input_image.resize((width, height))).to(pipe.device) + clip_context = pipe.image_encoder.encode_image([image]) + msk = torch.ones(1, num_frames, height//8, width//8, device=pipe.device) + msk[:, 1:] = 0 + if end_image is not None: + end_image = pipe.preprocess_image(end_image.resize((width, height))).to(pipe.device) + vae_input = torch.concat([image.transpose(0,1), torch.zeros(3, num_frames-2, height, width).to(image.device), end_image.transpose(0,1)],dim=1) + if pipe.dit.has_image_pos_emb: + clip_context = torch.concat([clip_context, pipe.image_encoder.encode_image([end_image])], dim=1) + msk[:, -1:] = 1 + else: + vae_input = torch.concat([image.transpose(0, 1), torch.zeros(3, num_frames-1, height, width).to(image.device)], dim=1) + + msk = torch.concat([torch.repeat_interleave(msk[:, 0:1], repeats=4, dim=1), msk[:, 1:]], dim=1) + msk = msk.view(1, msk.shape[1] // 4, 4, height//8, width//8) + msk = msk.transpose(1, 2)[0] + + y = pipe.vae.encode([vae_input.to(dtype=pipe.torch_dtype, device=pipe.device)], device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride)[0] + y = y.to(dtype=pipe.torch_dtype, device=pipe.device) + y = torch.concat([msk, y]) + y = y.unsqueeze(0) + clip_context = clip_context.to(dtype=pipe.torch_dtype, device=pipe.device) + y = y.to(dtype=pipe.torch_dtype, device=pipe.device) + return {"clip_feature": clip_context, "y": y} + + + +class WanVideoUnit_FunControl(PipelineUnit): + def __init__(self): + super().__init__( + input_params=("control_video", "num_frames", "height", "width", "tiled", "tile_size", "tile_stride", "clip_feature", "y"), + onload_model_names=("vae") + ) + + def process(self, pipe: WanVideoPipeline, control_video, num_frames, height, width, tiled, tile_size, tile_stride, clip_feature, y): + if control_video is None: + return {} + pipe.load_models_to_device(self.onload_model_names) + control_video = pipe.preprocess_video(control_video) + control_latents = pipe.encode_video(control_video, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride) + control_latents = control_latents.to(dtype=pipe.torch_dtype, device=pipe.device) + if clip_feature is None or y is None: + clip_feature = torch.zeros((1, 257, 1280), dtype=pipe.torch_dtype, device=pipe.device) + y = torch.zeros((1, 16, (num_frames - 1) // 4 + 1, height//8, width//8), dtype=pipe.torch_dtype, device=pipe.device) + else: + y = y[:, -16:] + y = torch.concat([control_latents, y], dim=1) + return {"clip_feature": clip_feature, "y": y} + + + +class WanVideoUnit_FunReference(PipelineUnit): + def __init__(self): + super().__init__( + input_params=("reference_image", "height", "width"), + onload_model_names=("vae") + ) + + def process(self, pipe: WanVideoPipeline, reference_image, height, width): + if reference_image is None: + return {} + pipe.load_models_to_device(["vae"]) + reference_image = reference_image.resize((width, height)) + reference_image = pipe.preprocess_video([reference_image]) + reference_latents = pipe.vae.encode(reference_image, device=pipe.device) + return {"reference_latents": reference_latents} + + + +class WanVideoUnit_SpeedControl(PipelineUnit): + def __init__(self): + super().__init__(input_params=("motion_bucket_id",)) + + def process(self, pipe: WanVideoPipeline, motion_bucket_id): + if motion_bucket_id is None: + return {} + motion_bucket_id = torch.Tensor((motion_bucket_id,)).to(dtype=self.torch_dtype, device=self.device) + return {"motion_bucket_id": motion_bucket_id} + + + +class WanVideoUnit_VACE(PipelineUnit): + def __init__(self): + super().__init__( + input_params=("vace_video", "vace_mask", "vace_reference_image", "vace_scale", "height", "width", "num_frames", "tiled", "tile_size", "tile_stride"), + onload_model_names=("vae",) + ) + + def process( + self, + pipe: WanVideoPipeline, + vace_video, vace_mask, vace_reference_image, vace_scale, + height, width, num_frames, + tiled, tile_size, tile_stride + ): + if vace_video is not None or vace_mask is not None or vace_reference_image is not None: + pipe.load_models_to_device(["vae"]) + if vace_video is None: + vace_video = torch.zeros((1, 3, num_frames, height, width), dtype=pipe.torch_dtype, device=pipe.device) + else: + vace_video = pipe.preprocess_video(vace_video) + vace_video = torch.stack(vace_video, dim=2).to(dtype=pipe.torch_dtype, device=pipe.device) + + if vace_mask is None: + vace_mask = torch.ones_like(vace_video) + else: + vace_mask = pipe.preprocess_video(vace_mask) + vace_mask = torch.stack(vace_mask, dim=2).to(dtype=pipe.torch_dtype, device=pipe.device) + + inactive = vace_video * (1 - vace_mask) + 0 * vace_mask + reactive = vace_video * vace_mask + 0 * (1 - vace_mask) + inactive = pipe.encode_video(inactive, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device) + reactive = pipe.encode_video(reactive, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device) + vace_video_latents = torch.concat((inactive, reactive), dim=1) + + vace_mask_latents = rearrange(vace_mask[0,0], "T (H P) (W Q) -> 1 (P Q) T H W", P=8, Q=8) + vace_mask_latents = torch.nn.functional.interpolate(vace_mask_latents, size=((vace_mask_latents.shape[2] + 3) // 4, vace_mask_latents.shape[3], vace_mask_latents.shape[4]), mode='nearest-exact') + + if vace_reference_image is None: + pass + else: + vace_reference_image = pipe.preprocess_video([vace_reference_image]) + vace_reference_image = torch.stack(vace_reference_image, dim=2).to(dtype=pipe.torch_dtype, device=pipe.device) + vace_reference_latents = pipe.encode_video(vace_reference_image, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device) + vace_reference_latents = torch.concat((vace_reference_latents, torch.zeros_like(vace_reference_latents)), dim=1) + vace_video_latents = torch.concat((vace_reference_latents, vace_video_latents), dim=2) + vace_mask_latents = torch.concat((torch.zeros_like(vace_mask_latents[:, :, :1]), vace_mask_latents), dim=2) + + vace_context = torch.concat((vace_video_latents, vace_mask_latents), dim=1) + return {"vace_context": vace_context, "vace_scale": vace_scale} + else: + return {"vace_context": None, "vace_scale": vace_scale} + + + +class WanVideoUnit_TeaCache(PipelineUnit): + def __init__(self): + super().__init__( + seperate_cfg=True, + input_params_posi={"num_inference_steps": "num_inference_steps", "tea_cache_l1_thresh": "tea_cache_l1_thresh", "tea_cache_model_id": "tea_cache_model_id"}, + input_params_nega={"num_inference_steps": "num_inference_steps", "tea_cache_l1_thresh": "tea_cache_l1_thresh", "tea_cache_model_id": "tea_cache_model_id"}, + ) + + def process(self, pipe: WanVideoPipeline, num_inference_steps, tea_cache_l1_thresh, tea_cache_model_id): + if tea_cache_l1_thresh is None: + return {} + return {"tea_cache": TeaCache(num_inference_steps, rel_l1_thresh=tea_cache_l1_thresh, model_id=tea_cache_model_id)} + + + +class WanVideoUnit_CfgMerger(PipelineUnit): + def __init__(self): + super().__init__(take_over=True) + + def process(self, pipe: WanVideoPipeline, inputs_shared, inputs_posi, inputs_nega): + if not inputs_shared["cfg_merge"]: + return inputs_shared, inputs_posi, inputs_nega + inputs_shared["context"] = torch.concat((inputs_posi["context"], inputs_nega["context"]), dim=0) + inputs_posi.clear() + inputs_nega.clear() + return inputs_shared, inputs_posi, inputs_nega + + + +class TeaCache: + def __init__(self, num_inference_steps, rel_l1_thresh, model_id): + self.num_inference_steps = num_inference_steps + self.step = 0 + self.accumulated_rel_l1_distance = 0 + self.previous_modulated_input = None + self.rel_l1_thresh = rel_l1_thresh + self.previous_residual = None + self.previous_hidden_states = None + + self.coefficients_dict = { + "Wan2.1-T2V-1.3B": [-5.21862437e+04, 9.23041404e+03, -5.28275948e+02, 1.36987616e+01, -4.99875664e-02], + "Wan2.1-T2V-14B": [-3.03318725e+05, 4.90537029e+04, -2.65530556e+03, 5.87365115e+01, -3.15583525e-01], + "Wan2.1-I2V-14B-480P": [2.57151496e+05, -3.54229917e+04, 1.40286849e+03, -1.35890334e+01, 1.32517977e-01], + "Wan2.1-I2V-14B-720P": [ 8.10705460e+03, 2.13393892e+03, -3.72934672e+02, 1.66203073e+01, -4.17769401e-02], + } + if model_id not in self.coefficients_dict: + supported_model_ids = ", ".join([i for i in self.coefficients_dict]) + raise ValueError(f"{model_id} is not a supported TeaCache model id. Please choose a valid model id in ({supported_model_ids}).") + self.coefficients = self.coefficients_dict[model_id] + + def check(self, dit: WanModel, x, t_mod): + modulated_inp = t_mod.clone() + if self.step == 0 or self.step == self.num_inference_steps - 1: + should_calc = True + self.accumulated_rel_l1_distance = 0 + else: + coefficients = self.coefficients + rescale_func = np.poly1d(coefficients) + self.accumulated_rel_l1_distance += rescale_func(((modulated_inp-self.previous_modulated_input).abs().mean() / self.previous_modulated_input.abs().mean()).cpu().item()) + if self.accumulated_rel_l1_distance < self.rel_l1_thresh: + should_calc = False + else: + should_calc = True + self.accumulated_rel_l1_distance = 0 + self.previous_modulated_input = modulated_inp + self.step += 1 + if self.step == self.num_inference_steps: + self.step = 0 + if should_calc: + self.previous_hidden_states = x.clone() + return not should_calc + + def store(self, hidden_states): + self.previous_residual = hidden_states - self.previous_hidden_states + self.previous_hidden_states = None + + def update(self, hidden_states): + hidden_states = hidden_states + self.previous_residual + return hidden_states + + + +class TemporalTiler_BCTHW: + def __init__(self): + pass + + def build_1d_mask(self, length, left_bound, right_bound, border_width): + x = torch.ones((length,)) + if not left_bound: + x[:border_width] = (torch.arange(border_width) + 1) / border_width + if not right_bound: + x[-border_width:] = torch.flip((torch.arange(border_width) + 1) / border_width, dims=(0,)) + return x + + def build_mask(self, data, is_bound, border_width): + _, _, T, _, _ = data.shape + t = self.build_1d_mask(T, is_bound[0], is_bound[1], border_width[0]) + mask = repeat(t, "T -> 1 1 T 1 1") + return mask + + def run(self, model_fn, sliding_window_size, sliding_window_stride, computation_device, computation_dtype, model_kwargs, tensor_names): + tensor_names = [tensor_name for tensor_name in tensor_names if model_kwargs.get(tensor_name) is not None] + tensor_dict = {tensor_name: model_kwargs[tensor_name] for tensor_name in tensor_names} + B, C, T, H, W = tensor_dict[tensor_names[0]].shape + data_device, data_dtype = tensor_dict[tensor_names[0]].device, tensor_dict[tensor_names[0]].dtype + value = torch.zeros((B, C, T, H, W), device=data_device, dtype=data_dtype) + weight = torch.zeros((1, 1, T, 1, 1), device=data_device, dtype=data_dtype) + for t in range(0, T, sliding_window_stride): + if t - sliding_window_stride >= 0 and t - sliding_window_stride + sliding_window_size >= T: + continue + t_ = min(t + sliding_window_size, T) + model_kwargs.update({ + tensor_name: tensor_dict[tensor_name][:, :, t: t_:, :].to(device=computation_device, dtype=computation_dtype) \ + for tensor_name in tensor_names + }) + model_output = model_fn(**model_kwargs).to(device=data_device, dtype=data_dtype) + mask = self.build_mask( + model_output, + is_bound=(t == 0, t_ == T), + border_width=(sliding_window_size - sliding_window_stride,) + ).to(device=data_device, dtype=data_dtype) + value[:, :, t: t_, :, :] += model_output * mask + weight[:, :, t: t_, :, :] += mask + value /= weight + model_kwargs.update(tensor_dict) + return value + + + +def model_fn_wan_video( + dit: WanModel, + motion_controller: WanMotionControllerModel = None, + vace: VaceWanModel = None, + latents: torch.Tensor = None, + timestep: torch.Tensor = None, + context: torch.Tensor = None, + clip_feature: Optional[torch.Tensor] = None, + y: Optional[torch.Tensor] = None, + reference_latents = None, + vace_context = None, + vace_scale = 1.0, + tea_cache: TeaCache = None, + use_unified_sequence_parallel: bool = False, + motion_bucket_id: Optional[torch.Tensor] = None, + sliding_window_size: Optional[int] = None, + sliding_window_stride: Optional[int] = None, + **kwargs, +): + if sliding_window_size is not None and sliding_window_stride is not None: + model_kwargs = dict( + dit=dit, + motion_controller=motion_controller, + vace=vace, + latents=latents, + timestep=timestep, + context=context, + clip_feature=clip_feature, + y=y, + reference_latents=reference_latents, + vace_context=vace_context, + vace_scale=vace_scale, + tea_cache=tea_cache, + use_unified_sequence_parallel=use_unified_sequence_parallel, + motion_bucket_id=motion_bucket_id, + ) + return TemporalTiler_BCTHW().run( + model_fn_wan_video, + sliding_window_size, sliding_window_stride, + latents.device, latents.dtype, + model_kwargs=model_kwargs, + tensor_names=["latents", "y"] + ) + + if use_unified_sequence_parallel: + import torch.distributed as dist + from xfuser.core.distributed import (get_sequence_parallel_rank, + get_sequence_parallel_world_size, + get_sp_group) + + t = dit.time_embedding(sinusoidal_embedding_1d(dit.freq_dim, timestep)) + t_mod = dit.time_projection(t).unflatten(1, (6, dit.dim)) + if motion_bucket_id is not None and motion_controller is not None: + t_mod = t_mod + motion_controller(motion_bucket_id).unflatten(1, (6, dit.dim)) + context = dit.text_embedding(context) + + x = latents + # Merged cfg + if x.shape[0] != context.shape[0]: + x = torch.concat([x] * context.shape[0], dim=0) + if timestep.shape[0] != context.shape[0]: + timestep = torch.concat([timestep] * context.shape[0], dim=0) + + if dit.has_image_input: + x = torch.cat([x, y], dim=1) # (b, c_x + c_y, f, h, w) + clip_embdding = dit.img_emb(clip_feature) + context = torch.cat([clip_embdding, context], dim=1) + + x, (f, h, w) = dit.patchify(x) + + # Reference image + if reference_latents is not None: + reference_latents = dit.ref_conv(reference_latents[:, :, 0]).flatten(2).transpose(1, 2) + x = torch.concat([reference_latents, x], dim=1) + f += 1 + + freqs = torch.cat([ + dit.freqs[0][:f].view(f, 1, 1, -1).expand(f, h, w, -1), + dit.freqs[1][:h].view(1, h, 1, -1).expand(f, h, w, -1), + dit.freqs[2][:w].view(1, 1, w, -1).expand(f, h, w, -1) + ], dim=-1).reshape(f * h * w, 1, -1).to(x.device) + + # TeaCache + if tea_cache is not None: + tea_cache_update = tea_cache.check(dit, x, t_mod) + else: + tea_cache_update = False + + if vace_context is not None: + vace_hints = vace(x, vace_context, context, t_mod, freqs) + + # blocks + if use_unified_sequence_parallel: + if dist.is_initialized() and dist.get_world_size() > 1: + x = torch.chunk(x, get_sequence_parallel_world_size(), dim=1)[get_sequence_parallel_rank()] + if tea_cache_update: + x = tea_cache.update(x) + else: + for block_id, block in enumerate(dit.blocks): + x = block(x, context, t_mod, freqs) + if vace_context is not None and block_id in vace.vace_layers_mapping: + x = x + vace_hints[vace.vace_layers_mapping[block_id]] * vace_scale + if tea_cache is not None: + tea_cache.store(x) + + if reference_latents is not None: + x = x[:, reference_latents.shape[1]:] + f -= 1 + + x = dit.head(x, t) + if use_unified_sequence_parallel: + if dist.is_initialized() and dist.get_world_size() > 1: + x = get_sp_group().all_gather(x, dim=1) + x = dit.unpatchify(x, (f, h, w)) + return x diff --git a/test.py b/test.py new file mode 100644 index 0000000..2c6cfea --- /dev/null +++ b/test.py @@ -0,0 +1,28 @@ +import torch +from diffsynth import ModelManager, save_video, VideoData, save_frames, save_video, download_models +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from diffsynth.controlnets.processors import Annotator +from modelscope import snapshot_download +from tqdm import tqdm + + +# Load models +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + # ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management(num_persistent_param_in_dit=0) + +# Text-to-video +video = pipe( + prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + seed=0, tiled=True, +) +save_video(video, "video1.mp4", fps=15, quality=5) From d150bcf622aeed11698b2ec6068208f12495a03a Mon Sep 17 00:00:00 2001 From: Artiprocher Date: Mon, 5 May 2025 13:01:45 +0800 Subject: [PATCH 3/9] ... --- test.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/test.py b/test.py index 2c6cfea..f16ae0e 100644 --- a/test.py +++ b/test.py @@ -4,6 +4,7 @@ from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig from diffsynth.controlnets.processors import Annotator from modelscope import snapshot_download from tqdm import tqdm +from PIL import Image # Load models @@ -11,18 +12,29 @@ pipe = WanVideoPipeline.from_pretrained( torch_dtype=torch.bfloat16, device="cuda", model_configs=[ - ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), - # ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), ], ) -pipe.enable_vram_management(num_persistent_param_in_dit=0) +pipe.enable_vram_management(num_persistent_param_in_dit=10*10**9) + + +video = VideoData(rf"D:\pr_projects\20250503_dance\data\双马尾竖屏暴击!你的微笑就是彩虹的微笑♥ - 1.双马尾竖屏暴击!你的微笑就是彩虹的微笑♥(Av114086629088385,P1).mp4", height=832, width=480) +annotator = Annotator("openpose") +video = [video[i] for i in tqdm(range(450, 450+1*17, 1))] +save_video(video, "video_input.mp4", fps=60, quality=5) +control_video = [annotator(f) for f in tqdm(video)] +save_video(control_video, "video_control.mp4", fps=60, quality=5) +reference_image = Image.open(rf"D:\pr_projects\20250503_dance\data\marmot.png").resize((480, 832)) -# Text-to-video video = pipe( - prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。", + prompt="微距摄影风格特写画面,一只憨态可掬的土拨鼠正用后腿站立在碎石堆上,它在挥舞着双臂。金棕色的绒毛在阳光下泛着丝绸般的光泽,腹部毛发呈现浅杏色渐变,每根毛尖都闪烁着细密的光晕。两只黑曜石般的眼睛透出机警而温顺的光芒,鼻梁两侧的白色触须微微颤动,捕捉着空气中的气息。背景是虚化的灰绿色渐变,几簇嫩绿苔藓从画面右下角探出头来,与前景散落的鹅卵石形成微妙的景深对比。土拨鼠圆润的身形在逆光中勾勒出柔和的轮廓,耳朵紧贴头部的姿态流露出戒备中的天真,整个画面洋溢着自然界生灵特有的灵动与纯真。", negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", seed=0, tiled=True, + height=832, width=480, num_frames=len(control_video), + control_video=control_video, reference_image=reference_image, + # num_inference_steps=30, cfg_scale=1, ) -save_video(video, "video1.mp4", fps=15, quality=5) +save_video(video, "video1.mp4", fps=60, quality=5) From dbef6122e9738a09a747556d10f07df9bb52d398 Mon Sep 17 00:00:00 2001 From: Artiprocher Date: Mon, 5 May 2025 23:23:06 +0800 Subject: [PATCH 4/9] ... --- diffsynth/pipelines/wan_video_new.py | 38 ++++++++++++++++++++-------- diffsynth/vram_management/layers.py | 35 +++++++++++++++++++++++++ test.py | 34 +++++++++++++++---------- 3 files changed, 82 insertions(+), 25 deletions(-) diff --git a/diffsynth/pipelines/wan_video_new.py b/diffsynth/pipelines/wan_video_new.py index dcc4485..de05e50 100644 --- a/diffsynth/pipelines/wan_video_new.py +++ b/diffsynth/pipelines/wan_video_new.py @@ -24,7 +24,7 @@ from PIL import Image from tqdm import tqdm from typing import Optional -from ..vram_management import enable_vram_management, AutoWrappedModule, AutoWrappedLinear +from ..vram_management import enable_vram_management, AutoWrappedModule, AutoWrappedLinear, WanAutoCastLayerNorm from ..models.wan_video_text_encoder import T5RelativeEmbedding, T5LayerNorm from ..models.wan_video_dit import RMSNorm, sinusoidal_embedding_1d from ..models.wan_video_vae import RMS_norm, CausalConv3d, Upsample @@ -188,8 +188,8 @@ class WanVideoPipeline(BasePipeline): WanVideoUnit_InputVideoEmbedder(), WanVideoUnit_PromptEmbedder(), WanVideoUnit_ImageEmbedder(), - WanVideoUnit_FunReference(), WanVideoUnit_FunControl(), + WanVideoUnit_FunReference(), WanVideoUnit_SpeedControl(), WanVideoUnit_VACE(), WanVideoUnit_TeaCache(), @@ -225,7 +225,7 @@ class WanVideoPipeline(BasePipeline): module_map = { torch.nn.Linear: AutoWrappedLinear, torch.nn.Conv3d: AutoWrappedModule, - torch.nn.LayerNorm: AutoWrappedModule, + torch.nn.LayerNorm: WanAutoCastLayerNorm, RMSNorm: AutoWrappedModule, torch.nn.Conv2d: AutoWrappedModule, }, @@ -654,7 +654,7 @@ class WanVideoUnit_FunControl(PipelineUnit): class WanVideoUnit_FunReference(PipelineUnit): def __init__(self): super().__init__( - input_params=("reference_image", "height", "width"), + input_params=("reference_image", "height", "width", "reference_image"), onload_model_names=("vae") ) @@ -663,9 +663,11 @@ class WanVideoUnit_FunReference(PipelineUnit): return {} pipe.load_models_to_device(["vae"]) reference_image = reference_image.resize((width, height)) - reference_image = pipe.preprocess_video([reference_image]) - reference_latents = pipe.vae.encode(reference_image, device=pipe.device) - return {"reference_latents": reference_latents} + reference_latents = pipe.preprocess_video([reference_image]) + reference_latents = pipe.vae.encode(reference_latents, device=pipe.device) + clip_feature = pipe.preprocess_image(reference_image) + clip_feature = pipe.image_encoder.encode_image([clip_feature]) + return {"reference_latents": reference_latents, "clip_feature": clip_feature} @@ -753,11 +755,19 @@ class WanVideoUnit_TeaCache(PipelineUnit): class WanVideoUnit_CfgMerger(PipelineUnit): def __init__(self): super().__init__(take_over=True) + self.concat_tensor_names = ["context", "clip_feature", "y", "reference_latents"] def process(self, pipe: WanVideoPipeline, inputs_shared, inputs_posi, inputs_nega): if not inputs_shared["cfg_merge"]: return inputs_shared, inputs_posi, inputs_nega - inputs_shared["context"] = torch.concat((inputs_posi["context"], inputs_nega["context"]), dim=0) + for name in self.concat_tensor_names: + tensor_posi = inputs_posi.get(name) + tensor_nega = inputs_nega.get(name) + tensor_shared = inputs_shared.get(name) + if tensor_posi is not None and tensor_nega is not None: + inputs_shared[name] = torch.concat((tensor_posi, tensor_nega), dim=0) + elif tensor_shared is not None: + inputs_shared[name] = torch.concat((tensor_shared, tensor_shared), dim=0) inputs_posi.clear() inputs_nega.clear() return inputs_shared, inputs_posi, inputs_nega @@ -835,10 +845,12 @@ class TemporalTiler_BCTHW: mask = repeat(t, "T -> 1 1 T 1 1") return mask - def run(self, model_fn, sliding_window_size, sliding_window_stride, computation_device, computation_dtype, model_kwargs, tensor_names): + def run(self, model_fn, sliding_window_size, sliding_window_stride, computation_device, computation_dtype, model_kwargs, tensor_names, batch_size=None): tensor_names = [tensor_name for tensor_name in tensor_names if model_kwargs.get(tensor_name) is not None] tensor_dict = {tensor_name: model_kwargs[tensor_name] for tensor_name in tensor_names} B, C, T, H, W = tensor_dict[tensor_names[0]].shape + if batch_size is not None: + B *= batch_size data_device, data_dtype = tensor_dict[tensor_names[0]].device, tensor_dict[tensor_names[0]].dtype value = torch.zeros((B, C, T, H, W), device=data_device, dtype=data_dtype) weight = torch.zeros((1, 1, T, 1, 1), device=data_device, dtype=data_dtype) @@ -881,6 +893,7 @@ def model_fn_wan_video( motion_bucket_id: Optional[torch.Tensor] = None, sliding_window_size: Optional[int] = None, sliding_window_stride: Optional[int] = None, + cfg_merge: bool = False, **kwargs, ): if sliding_window_size is not None and sliding_window_stride is not None: @@ -905,7 +918,8 @@ def model_fn_wan_video( sliding_window_size, sliding_window_stride, latents.device, latents.dtype, model_kwargs=model_kwargs, - tensor_names=["latents", "y"] + tensor_names=["latents", "y"], + batch_size=2 if cfg_merge else 1 ) if use_unified_sequence_parallel: @@ -936,7 +950,9 @@ def model_fn_wan_video( # Reference image if reference_latents is not None: - reference_latents = dit.ref_conv(reference_latents[:, :, 0]).flatten(2).transpose(1, 2) + if len(reference_latents.shape) == 5: + reference_latents = reference_latents[:, :, 0] + reference_latents = dit.ref_conv(reference_latents).flatten(2).transpose(1, 2) x = torch.concat([reference_latents, x], dim=1) f += 1 diff --git a/diffsynth/vram_management/layers.py b/diffsynth/vram_management/layers.py index a9df39e..aa2bda2 100644 --- a/diffsynth/vram_management/layers.py +++ b/diffsynth/vram_management/layers.py @@ -38,6 +38,41 @@ class AutoWrappedModule(torch.nn.Module): return module(*args, **kwargs) +class WanAutoCastLayerNorm(torch.nn.LayerNorm): + def __init__(self, module: torch.nn.LayerNorm, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device): + with init_weights_on_device(device=torch.device("meta")): + super().__init__(module.normalized_shape, eps=module.eps, elementwise_affine=module.elementwise_affine, bias=module.bias is not None, dtype=offload_dtype, device=offload_device) + self.weight = module.weight + self.bias = module.bias + self.offload_dtype = offload_dtype + self.offload_device = offload_device + self.onload_dtype = onload_dtype + self.onload_device = onload_device + self.computation_dtype = computation_dtype + self.computation_device = computation_device + self.state = 0 + + def offload(self): + if self.state == 1 and (self.offload_dtype != self.onload_dtype or self.offload_device != self.onload_device): + self.to(dtype=self.offload_dtype, device=self.offload_device) + self.state = 0 + + def onload(self): + if self.state == 0 and (self.offload_dtype != self.onload_dtype or self.offload_device != self.onload_device): + self.to(dtype=self.onload_dtype, device=self.onload_device) + self.state = 1 + + def forward(self, x, *args, **kwargs): + if self.onload_dtype == self.computation_dtype and self.onload_device == self.computation_device: + weight, bias = self.weight, self.bias + else: + weight = None if self.weight is None else cast_to(self.weight, self.computation_dtype, self.computation_device) + bias = None if self.bias is None else cast_to(self.bias, self.computation_dtype, self.computation_device) + with torch.amp.autocast(device_type=x.device.type): + x = torch.nn.functional.layer_norm(x.float(), self.normalized_shape, weight, bias, self.eps).type_as(x) + return x + + class AutoWrappedLinear(torch.nn.Linear): def __init__(self, module: torch.nn.Linear, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device): with init_weights_on_device(device=torch.device("meta")): diff --git a/test.py b/test.py index f16ae0e..f7959ee 100644 --- a/test.py +++ b/test.py @@ -1,7 +1,9 @@ import torch +torch.cuda.set_per_process_memory_fraction(0.999, 0) from diffsynth import ModelManager, save_video, VideoData, save_frames, save_video, download_models -from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig, model_fn_wan_video from diffsynth.controlnets.processors import Annotator +from diffsynth.data.video import crop_and_resize from modelscope import snapshot_download from tqdm import tqdm from PIL import Image @@ -13,28 +15,32 @@ pipe = WanVideoPipeline.from_pretrained( device="cuda", model_configs=[ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + # ModelConfig("D:\projects\VideoX-Fun\models\Wan2.1-Fun-V1.1-1.3B-Control\diffusion_pytorch_model.safetensors", offload_device="cpu"), ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), ], ) -pipe.enable_vram_management(num_persistent_param_in_dit=10*10**9) - +pipe.enable_vram_management(num_persistent_param_in_dit=6*10**9) video = VideoData(rf"D:\pr_projects\20250503_dance\data\双马尾竖屏暴击!你的微笑就是彩虹的微笑♥ - 1.双马尾竖屏暴击!你的微笑就是彩虹的微笑♥(Av114086629088385,P1).mp4", height=832, width=480) annotator = Annotator("openpose") -video = [video[i] for i in tqdm(range(450, 450+1*17, 1))] +video = [video[i] for i in tqdm(range(450, 450+1*81, 1))] save_video(video, "video_input.mp4", fps=60, quality=5) control_video = [annotator(f) for f in tqdm(video)] save_video(control_video, "video_control.mp4", fps=60, quality=5) -reference_image = Image.open(rf"D:\pr_projects\20250503_dance\data\marmot.png").resize((480, 832)) +reference_image = crop_and_resize(Image.open(rf"D:\pr_projects\20250503_dance\data\marmot4.png"), 832, 480) -video = pipe( - prompt="微距摄影风格特写画面,一只憨态可掬的土拨鼠正用后腿站立在碎石堆上,它在挥舞着双臂。金棕色的绒毛在阳光下泛着丝绸般的光泽,腹部毛发呈现浅杏色渐变,每根毛尖都闪烁着细密的光晕。两只黑曜石般的眼睛透出机警而温顺的光芒,鼻梁两侧的白色触须微微颤动,捕捉着空气中的气息。背景是虚化的灰绿色渐变,几簇嫩绿苔藓从画面右下角探出头来,与前景散落的鹅卵石形成微妙的景深对比。土拨鼠圆润的身形在逆光中勾勒出柔和的轮廓,耳朵紧贴头部的姿态流露出戒备中的天真,整个画面洋溢着自然界生灵特有的灵动与纯真。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - seed=0, tiled=True, - height=832, width=480, num_frames=len(control_video), - control_video=control_video, reference_image=reference_image, - # num_inference_steps=30, cfg_scale=1, -) -save_video(video, "video1.mp4", fps=60, quality=5) +with torch.amp.autocast("cuda", torch.bfloat16): + video = pipe( + prompt="微距摄影风格特写画面,一只憨态可掬的土拨鼠正用后腿站立在碎石堆上,它在挥舞着双臂。金棕色的绒毛在阳光下泛着丝绸般的光泽,腹部毛发呈现浅杏色渐变,每根毛尖都闪烁着细密的光晕。两只黑曜石般的眼睛透出机警而温顺的光芒,鼻梁两侧的白色触须微微颤动,捕捉着空气中的气息。背景是虚化的灰绿色渐变,几簇嫩绿苔藓从画面右下角探出头来,与前景散落的鹅卵石形成微妙的景深对比。土拨鼠圆润的身形在逆光中勾勒出柔和的轮廓,耳朵紧贴头部的姿态流露出戒备中的天真,整个画面洋溢着自然界生灵特有的灵动与纯真。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + seed=43, tiled=True, + height=832, width=480, num_frames=len(control_video), + control_video=control_video, reference_image=reference_image, + # sliding_window_size=5, sliding_window_stride=2, + # num_inference_steps=100, + # cfg_merge=True, + sigma_shift=16, + ) + save_video(video, "video1.mp4", fps=60, quality=5) From 675eefa07e28f3ab16121d59b2dcead1c67b879e Mon Sep 17 00:00:00 2001 From: Artiprocher Date: Mon, 12 May 2025 17:48:28 +0800 Subject: [PATCH 5/9] training framework --- diffsynth/pipelines/wan_video_new.py | 222 ++++++++++++------ diffsynth/schedulers/flow_match.py | 3 + diffsynth/trainers/utils.py | 190 +++++++++++++++ diffsynth/vram_management/layers.py | 113 +++++---- .../model_inference/wan_1.3b_speed_control.py | 34 +++ .../model_inference/wan_1.3b_text_to_video.py | 34 +++ .../wanvideo/model_inference/wan_1.3b_vace.py | 52 ++++ .../wanvideo/model_inference/wan_14b_flf2v.py | 36 +++ .../wan_14b_image_to_video_480p.py | 34 +++ .../wan_14b_image_to_video_720p.py | 35 +++ .../model_inference/wan_14b_text_to_video.py | 24 ++ .../model_inference/wan_fun_1.3b_InP.py | 36 +++ .../model_inference/wan_fun_1.3b_control.py | 34 +++ .../model_inference/wan_fun_14b_InP.py | 36 +++ .../model_inference/wan_fun_14b_control.py | 34 +++ .../wan_fun_v1.1_1.3b_reference_control.py | 36 +++ .../wan_fun_v1.1_14b_reference_control.py | 36 +++ requirements.txt | 3 +- test.py | 46 ---- train.py | 75 ++++++ 20 files changed, 939 insertions(+), 174 deletions(-) create mode 100644 diffsynth/trainers/utils.py create mode 100644 examples/wanvideo/model_inference/wan_1.3b_speed_control.py create mode 100644 examples/wanvideo/model_inference/wan_1.3b_text_to_video.py create mode 100644 examples/wanvideo/model_inference/wan_1.3b_vace.py create mode 100644 examples/wanvideo/model_inference/wan_14b_flf2v.py create mode 100644 examples/wanvideo/model_inference/wan_14b_image_to_video_480p.py create mode 100644 examples/wanvideo/model_inference/wan_14b_image_to_video_720p.py create mode 100644 examples/wanvideo/model_inference/wan_14b_text_to_video.py create mode 100644 examples/wanvideo/model_inference/wan_fun_1.3b_InP.py create mode 100644 examples/wanvideo/model_inference/wan_fun_1.3b_control.py create mode 100644 examples/wanvideo/model_inference/wan_fun_14b_InP.py create mode 100644 examples/wanvideo/model_inference/wan_fun_14b_control.py create mode 100644 examples/wanvideo/model_inference/wan_fun_v1.1_1.3b_reference_control.py create mode 100644 examples/wanvideo/model_inference/wan_fun_v1.1_14b_reference_control.py delete mode 100644 test.py create mode 100644 train.py diff --git a/diffsynth/pipelines/wan_video_new.py b/diffsynth/pipelines/wan_video_new.py index de05e50..6110cc2 100644 --- a/diffsynth/pipelines/wan_video_new.py +++ b/diffsynth/pipelines/wan_video_new.py @@ -1,34 +1,26 @@ -import torch, warnings, glob +import torch, warnings, glob, os import numpy as np from PIL import Image from einops import repeat, reduce from typing import Optional, Union from dataclasses import dataclass from modelscope import snapshot_download - - -import types -from ..models import ModelManager -from ..models.wan_video_dit import WanModel -from ..models.wan_video_text_encoder import WanTextEncoder -from ..models.wan_video_vae import WanVideoVAE -from ..models.wan_video_image_encoder import WanImageEncoder -from ..models.wan_video_vace import VaceWanModel -from ..schedulers.flow_match import FlowMatchScheduler -from .base import BasePipeline -from ..prompters import WanPrompter -import torch, os from einops import rearrange import numpy as np from PIL import Image from tqdm import tqdm from typing import Optional -from ..vram_management import enable_vram_management, AutoWrappedModule, AutoWrappedLinear, WanAutoCastLayerNorm -from ..models.wan_video_text_encoder import T5RelativeEmbedding, T5LayerNorm -from ..models.wan_video_dit import RMSNorm, sinusoidal_embedding_1d -from ..models.wan_video_vae import RMS_norm, CausalConv3d, Upsample +from ..models import ModelManager +from ..models.wan_video_dit import WanModel, RMSNorm, sinusoidal_embedding_1d +from ..models.wan_video_text_encoder import WanTextEncoder, T5RelativeEmbedding, T5LayerNorm +from ..models.wan_video_vae import WanVideoVAE, RMS_norm, CausalConv3d, Upsample +from ..models.wan_video_image_encoder import WanImageEncoder +from ..models.wan_video_vace import VaceWanModel from ..models.wan_video_motion_controller import WanMotionControllerModel +from ..schedulers.flow_match import FlowMatchScheduler +from ..prompters import WanPrompter +from ..vram_management import enable_vram_management, AutoWrappedModule, AutoWrappedLinear, WanAutoCastLayerNorm @@ -50,6 +42,16 @@ class BasePipeline(torch.nn.Module): self.time_division_factor = time_division_factor self.time_division_remainder = time_division_remainder self.vram_management_enabled = False + + + def to(self, *args, **kwargs): + device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs) + if device is not None: + self.device = device + if dtype is not None: + self.torch_dtype = dtype + super().to(*args, **kwargs) + return self def check_resize_height_width(self, height, width, num_frames=None): @@ -135,8 +137,20 @@ class BasePipeline(torch.nn.Module): def enable_cpu_offload(self): - warnings.warn("enable_cpu_offload is deprecated. This feature is automatically enabled if offload_device != device") - + warnings.warn("`enable_cpu_offload` is deprecated. Please use `enable_vram_management`.") + + + def get_free_vram(self): + total_memory = torch.cuda.get_device_properties(self.device).total_memory + allocated_memory = torch.cuda.device_memory_used(self.device) + return (total_memory - allocated_memory) / (1024 ** 3) + + + def freeze_except(self, model_names): + for name, model in self.named_children(): + if name not in model_names: + model.eval() + model.requires_grad_(False) @dataclass @@ -146,17 +160,19 @@ class ModelConfig: origin_file_pattern: Union[str, list[str]] = None download_resource: str = "ModelScope" offload_device: Optional[Union[str, torch.device]] = None - quantization_dtype: Optional[torch.dtype] = None + offload_dtype: Optional[torch.dtype] = None def download_if_necessary(self, local_model_path="./models", skip_download=False): if self.path is None: if self.model_id is None or self.origin_file_pattern is None: raise ValueError(f"""No valid model files. Please use `ModelConfig(path="xxx")` or `ModelConfig(model_id="xxx/yyy", origin_file_pattern="zzz")`.""") if not skip_download: + downloaded_files = glob.glob(self.origin_file_pattern, root_dir=os.path.join(local_model_path, self.model_id)) snapshot_download( self.model_id, local_dir=os.path.join(local_model_path, self.model_id), allow_file_pattern=self.origin_file_pattern, + ignore_file_pattern=downloaded_files, local_files_only=False ) self.path = glob.glob(os.path.join(local_model_path, self.model_id, self.origin_file_pattern)) @@ -195,10 +211,36 @@ class WanVideoPipeline(BasePipeline): WanVideoUnit_TeaCache(), WanVideoUnit_CfgMerger(), ] + self.model_fn = model_fn_wan_video + + + def train(self): + super().train() + self.scheduler.set_timesteps(1000, training=True) + + + def training_loss(self, **inputs): + timestep_id = torch.randint(0, self.scheduler.num_train_timesteps, (1,)) + timestep = self.scheduler.timesteps[timestep_id].to(dtype=self.torch_dtype, device=self.device) + + inputs["latents"] = self.scheduler.add_noise(inputs["input_latents"], inputs["noise"], timestep) + training_target = self.scheduler.training_target(inputs["input_latents"], inputs["noise"], timestep) + + noise_pred = self.model_fn(**inputs, timestep=timestep) + + loss = torch.nn.functional.mse_loss(noise_pred.float(), training_target.float()) + loss = loss * self.scheduler.training_weight(timestep) + return loss - def enable_vram_management(self, num_persistent_param_in_dit=None): + def enable_vram_management(self, num_persistent_param_in_dit=None, vram_limit=None, vram_buffer=0.5): self.vram_management_enabled = True + if num_persistent_param_in_dit is not None: + vram_limit = None + else: + if vram_limit is None: + vram_limit = self.get_free_vram() + vram_limit = vram_limit - vram_buffer if self.text_encoder is not None: dtype = next(iter(self.text_encoder.parameters())).dtype enable_vram_management( @@ -217,9 +259,11 @@ class WanVideoPipeline(BasePipeline): computation_dtype=self.torch_dtype, computation_device=self.device, ), + vram_limit=vram_limit, ) if self.dit is not None: dtype = next(iter(self.dit.parameters())).dtype + device = "cpu" if vram_limit is not None else self.device enable_vram_management( self.dit, module_map = { @@ -233,7 +277,7 @@ class WanVideoPipeline(BasePipeline): offload_dtype=dtype, offload_device="cpu", onload_dtype=dtype, - onload_device=self.device, + onload_device=device, computation_dtype=self.torch_dtype, computation_device=self.device, ), @@ -246,6 +290,7 @@ class WanVideoPipeline(BasePipeline): computation_dtype=self.torch_dtype, computation_device=self.device, ), + vram_limit=vram_limit, ) if self.vae is not None: dtype = next(iter(self.vae.parameters())).dtype @@ -304,6 +349,7 @@ class WanVideoPipeline(BasePipeline): ), ) if self.vace is not None: + device = "cpu" if vram_limit is not None else self.device enable_vram_management( self.vace, module_map = { @@ -316,10 +362,11 @@ class WanVideoPipeline(BasePipeline): offload_dtype=dtype, offload_device="cpu", onload_dtype=dtype, - onload_device=self.device, + onload_device=device, computation_dtype=self.torch_dtype, computation_device=self.device, ), + vram_limit=vram_limit, ) @@ -330,8 +377,23 @@ class WanVideoPipeline(BasePipeline): model_configs: list[ModelConfig] = [], tokenizer_config: ModelConfig = ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/*"), local_model_path: str = "./models", - skip_download: bool = False + skip_download: bool = False, + redirect_common_files: bool = True, ): + # Redirect model path + if redirect_common_files: + redirect_dict = { + "models_t5_umt5-xxl-enc-bf16.pth": "Wan-AI/Wan2.1-T2V-1.3B", + "Wan2.1_VAE.pth": "Wan-AI/Wan2.1-T2V-1.3B", + "models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth": "Wan-AI/Wan2.1-I2V-14B-480P", + } + for model_config in model_configs: + if model_config.origin_file_pattern is None or model_config.model_id is None: + continue + if model_config.origin_file_pattern in redirect_dict and model_config.model_id != redirect_dict[model_config.origin_file_pattern]: + print(f"To avoid repeatedly downloading model files, ({model_config.model_id}, {model_config.origin_file_pattern}) is redirected to ({redirect_dict[model_config.origin_file_pattern]}, {model_config.origin_file_pattern}). You can use `redirect_common_files=False` to disable file redirection.") + model_config.model_id = redirect_dict[model_config.origin_file_pattern] + # Download and load models model_manager = ModelManager() for model_config in model_configs: @@ -339,7 +401,7 @@ class WanVideoPipeline(BasePipeline): model_manager.load_model( model_config.path, device=model_config.offload_device or device, - torch_dtype=model_config.quantization_dtype or torch_dtype + torch_dtype=model_config.offload_dtype or torch_dtype ) # Initialize pipeline @@ -356,63 +418,54 @@ class WanVideoPipeline(BasePipeline): pipe.prompter.fetch_models(pipe.text_encoder) pipe.prompter.fetch_tokenizer(tokenizer_config.path) return pipe - - - def denoising_model(self): - return self.dit - - - def encode_video(self, input_video, tiled=True, tile_size=(34, 34), tile_stride=(18, 16)): - latents = self.vae.encode(input_video, device=self.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride) - return latents @torch.no_grad() def __call__( self, # Prompt - prompt, - negative_prompt="", + prompt: str, + negative_prompt: Optional[str] = "", # Image-to-video - input_image=None, + input_image: Optional[Image.Image] = None, # First-last-frame-to-video - end_image=None, + end_image: Optional[Image.Image] = None, # Video-to-video - input_video=None, - denoising_strength=1.0, + input_video: Optional[list[Image.Image]] = None, + denoising_strength: Optional[float] = 1.0, # ControlNet - control_video=None, - reference_image=None, + control_video: Optional[list[Image.Image]] = None, + reference_image: Optional[Image.Image] = None, # VACE - vace_video=None, - vace_video_mask=None, - vace_reference_image=None, - vace_scale=1.0, + vace_video: Optional[list[Image.Image]] = None, + vace_video_mask: Optional[Image.Image] = None, + vace_reference_image: Optional[Image.Image] = None, + vace_scale: Optional[float] = 1.0, # Randomness - seed=None, - rand_device="cpu", + seed: Optional[int] = None, + rand_device: Optional[str] = "cpu", # Shape - height=480, - width=832, + height: Optional[int] = 480, + width: Optional[int] = 832, num_frames=81, # Classifier-free guidance - cfg_scale=5.0, - cfg_merge=False, + cfg_scale: Optional[float] = 5.0, + cfg_merge: Optional[bool] = False, # Scheduler - num_inference_steps=50, - sigma_shift=5.0, + num_inference_steps: Optional[int] = 50, + sigma_shift: Optional[float] = 5.0, # Speed control - motion_bucket_id=None, + motion_bucket_id: Optional[int] = None, # VAE tiling - tiled=True, - tile_size=(30, 52), - tile_stride=(15, 26), + tiled: Optional[bool] = True, + tile_size: Optional[tuple[int, int]] = (30, 52), + tile_stride: Optional[tuple[int, int]] = (15, 26), # Sliding window sliding_window_size: Optional[int] = None, sliding_window_stride: Optional[int] = None, # Teacache - tea_cache_l1_thresh=None, - tea_cache_model_id="", + tea_cache_l1_thresh: Optional[float] = None, + tea_cache_model_id: Optional[str] = "", # progress_bar progress_bar_cmd=tqdm, ): @@ -452,12 +505,12 @@ class WanVideoPipeline(BasePipeline): timestep = timestep.unsqueeze(0).to(dtype=self.torch_dtype, device=self.device) # Inference - noise_pred_posi = model_fn_wan_video(**models, **inputs_shared, **inputs_posi, timestep=timestep) + noise_pred_posi = self.model_fn(**models, **inputs_shared, **inputs_posi, timestep=timestep) if cfg_scale != 1.0: if cfg_merge: noise_pred_posi, noise_pred_nega = noise_pred_posi.chunk(2, dim=0) else: - noise_pred_nega = model_fn_wan_video(**models, **inputs_shared, **inputs_nega, timestep=timestep) + noise_pred_nega = self.model_fn(**models, **inputs_shared, **inputs_nega, timestep=timestep) noise_pred = noise_pred_nega + cfg_scale * (noise_pred_posi - noise_pred_nega) else: noise_pred = noise_pred_posi @@ -467,7 +520,7 @@ class WanVideoPipeline(BasePipeline): # VACE (TODO: remove it) if vace_reference_image is not None: - latents = latents[:, :, 1:] + inputs_shared["latents"] = inputs_shared["latents"][:, :, 1:] # Decode self.load_models_to_device(['vae']) @@ -558,18 +611,21 @@ class WanVideoUnit_NoiseInitializer(PipelineUnit): class WanVideoUnit_InputVideoEmbedder(PipelineUnit): def __init__(self): super().__init__( - input_params=("input_video", "noise", "tiled", "tile_size", "tile_stride"), + input_params=("input_video", "noise", "tiled", "tile_size", "tile_stride", "denoising_strength"), onload_model_names=("vae",) ) - def process(self, pipe: WanVideoPipeline, input_video, noise, tiled, tile_size, tile_stride): + def process(self, pipe: WanVideoPipeline, input_video, noise, tiled, tile_size, tile_stride, denoising_strength): if input_video is None: return {"latents": noise} pipe.load_models_to_device(["vae"]) input_video = pipe.preprocess_video(input_video) - latents = pipe.encode_video(input_video, tiled, tile_size, tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device) - latents = pipe.scheduler.add_noise(latents, noise, timestep=pipe.scheduler.timesteps[0]) - return {"latents": latents} + input_latents = pipe.vae.encode(input_video, device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device) + if pipe.scheduler.training: + return {"latents": noise, "input_latents": input_latents} + else: + latents = pipe.scheduler.add_noise(input_latents, noise, timestep=pipe.scheduler.timesteps[0]) + return {"latents": latents} @@ -639,7 +695,7 @@ class WanVideoUnit_FunControl(PipelineUnit): return {} pipe.load_models_to_device(self.onload_model_names) control_video = pipe.preprocess_video(control_video) - control_latents = pipe.encode_video(control_video, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride) + control_latents = pipe.vae.encode(control_video, device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device) control_latents = control_latents.to(dtype=pipe.torch_dtype, device=pipe.device) if clip_feature is None or y is None: clip_feature = torch.zeros((1, 257, 1280), dtype=pipe.torch_dtype, device=pipe.device) @@ -678,7 +734,7 @@ class WanVideoUnit_SpeedControl(PipelineUnit): def process(self, pipe: WanVideoPipeline, motion_bucket_id): if motion_bucket_id is None: return {} - motion_bucket_id = torch.Tensor((motion_bucket_id,)).to(dtype=self.torch_dtype, device=self.device) + motion_bucket_id = torch.Tensor((motion_bucket_id,)).to(dtype=pipe.torch_dtype, device=pipe.device) return {"motion_bucket_id": motion_bucket_id} @@ -703,18 +759,16 @@ class WanVideoUnit_VACE(PipelineUnit): vace_video = torch.zeros((1, 3, num_frames, height, width), dtype=pipe.torch_dtype, device=pipe.device) else: vace_video = pipe.preprocess_video(vace_video) - vace_video = torch.stack(vace_video, dim=2).to(dtype=pipe.torch_dtype, device=pipe.device) if vace_mask is None: vace_mask = torch.ones_like(vace_video) else: vace_mask = pipe.preprocess_video(vace_mask) - vace_mask = torch.stack(vace_mask, dim=2).to(dtype=pipe.torch_dtype, device=pipe.device) inactive = vace_video * (1 - vace_mask) + 0 * vace_mask reactive = vace_video * vace_mask + 0 * (1 - vace_mask) - inactive = pipe.encode_video(inactive, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device) - reactive = pipe.encode_video(reactive, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device) + inactive = pipe.vae.encode(inactive, device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device) + reactive = pipe.vae.encode(reactive, device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device) vace_video_latents = torch.concat((inactive, reactive), dim=1) vace_mask_latents = rearrange(vace_mask[0,0], "T (H P) (W Q) -> 1 (P Q) T H W", P=8, Q=8) @@ -724,8 +778,7 @@ class WanVideoUnit_VACE(PipelineUnit): pass else: vace_reference_image = pipe.preprocess_video([vace_reference_image]) - vace_reference_image = torch.stack(vace_reference_image, dim=2).to(dtype=pipe.torch_dtype, device=pipe.device) - vace_reference_latents = pipe.encode_video(vace_reference_image, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device) + vace_reference_latents = pipe.vae.encode(vace_reference_image, device=pipe.device, tiled=tiled, tile_size=tile_size, tile_stride=tile_stride).to(dtype=pipe.torch_dtype, device=pipe.device) vace_reference_latents = torch.concat((vace_reference_latents, torch.zeros_like(vace_reference_latents)), dim=1) vace_video_latents = torch.concat((vace_reference_latents, vace_video_latents), dim=2) vace_mask_latents = torch.concat((torch.zeros_like(vace_mask_latents[:, :, :1]), vace_mask_latents), dim=2) @@ -894,6 +947,7 @@ def model_fn_wan_video( sliding_window_size: Optional[int] = None, sliding_window_stride: Optional[int] = None, cfg_merge: bool = False, + use_gradient_checkpointing: bool = False, **kwargs, ): if sliding_window_size is not None and sliding_window_stride is not None: @@ -978,8 +1032,20 @@ def model_fn_wan_video( if tea_cache_update: x = tea_cache.update(x) else: + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + return custom_forward + for block_id, block in enumerate(dit.blocks): - x = block(x, context, t_mod, freqs) + if use_gradient_checkpointing: + x = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), + x, context, t_mod, freqs, + use_reentrant=False, + ) + else: + x = block(x, context, t_mod, freqs) if vace_context is not None and block_id in vace.vace_layers_mapping: x = x + vace_hints[vace.vace_layers_mapping[block_id]] * vace_scale if tea_cache is not None: diff --git a/diffsynth/schedulers/flow_match.py b/diffsynth/schedulers/flow_match.py index d6d0219..9754b98 100644 --- a/diffsynth/schedulers/flow_match.py +++ b/diffsynth/schedulers/flow_match.py @@ -35,6 +35,9 @@ class FlowMatchScheduler(): y_shifted = y - y.min() bsmntw_weighing = y_shifted * (num_inference_steps / y_shifted.sum()) self.linear_timesteps_weights = bsmntw_weighing + self.training = True + else: + self.training = False def step(self, model_output, timestep, sample, to_final=False, **kwargs): diff --git a/diffsynth/trainers/utils.py b/diffsynth/trainers/utils.py new file mode 100644 index 0000000..7bc0f15 --- /dev/null +++ b/diffsynth/trainers/utils.py @@ -0,0 +1,190 @@ +import imageio, os, torch, warnings, torchvision +from peft import LoraConfig, inject_adapter_in_model +from PIL import Image +import pandas as pd +from tqdm import tqdm +from accelerate import Accelerator + + + +class VideoDataset(torch.utils.data.Dataset): + def __init__( + self, + base_path, metadata_path, + frame_interval=1, num_frames=81, + dynamic_resolution=True, max_pixels=1920*1080, height=None, width=None, + height_division_factor=16, width_division_factor=16, + data_file_keys=("video",), + image_file_extension=("jpg", "jpeg", "png", "webp"), + video_file_extension=("mp4", "avi", "mov", "wmv", "mkv", "flv", "webm"), + repeat=1, + ): + metadata = pd.read_csv(metadata_path) + self.data = [metadata.iloc[i].to_dict() for i in range(len(metadata))] + + self.base_path = base_path + self.frame_interval = frame_interval + self.num_frames = num_frames + self.dynamic_resolution = dynamic_resolution + self.max_pixels = max_pixels + self.height = height + self.width = width + self.height_division_factor = height_division_factor + self.width_division_factor = width_division_factor + self.data_file_keys = data_file_keys + self.image_file_extension = image_file_extension + self.video_file_extension = video_file_extension + self.repeat = repeat + + if height is not None and width is not None and dynamic_resolution == True: + print("Height and width are fixed. Setting `dynamic_resolution` to False.") + self.dynamic_resolution = False + + + def crop_and_resize(self, image, target_height, target_width): + width, height = image.size + scale = max(target_width / width, target_height / height) + image = torchvision.transforms.functional.resize( + image, + (round(height*scale), round(width*scale)), + interpolation=torchvision.transforms.InterpolationMode.BILINEAR + ) + image = torchvision.transforms.functional.center_crop(image, (target_height, target_width)) + return image + + + def get_height_width(self, image): + if self.dynamic_resolution: + width, height = image.size + if width * height > self.max_pixels: + scale = (width * height / self.max_pixels) ** 0.5 + height, width = int(height / scale), int(width / scale) + height = height // self.height_division_factor * self.height_division_factor + width = width // self.width_division_factor * self.width_division_factor + else: + height, width = self.height, self.width + return height, width + + + def load_frames_using_imageio(self, file_path, start_frame_id, interval, num_frames): + reader = imageio.get_reader(file_path) + if reader.count_frames() - 1 < start_frame_id + (num_frames - 1) * interval: + reader.close() + return None + frames = [] + for frame_id in range(num_frames): + frame = reader.get_data(start_frame_id + frame_id * interval) + frame = Image.fromarray(frame) + frame = self.crop_and_resize(frame, *self.get_height_width(frame)) + frames.append(frame) + reader.close() + return frames + + + def load_image(self, file_path): + image = Image.open(file_path).convert("RGB") + image = self.crop_and_resize(image, *self.get_height_width(image)) + return image + + + def load_video(self, file_path): + frames = self.load_frames_using_imageio(file_path, 0, self.frame_interval, self.num_frames) + return frames + + + def is_image(self, file_path): + file_ext_name = file_path.split(".")[-1] + return file_ext_name.lower() in self.image_file_extension + + + def is_video(self, file_path): + file_ext_name = file_path.split(".")[-1] + return file_ext_name.lower() in self.video_file_extension + + + def load_data(self, file_path): + if self.is_image(file_path): + return self.load_image(file_path) + elif self.is_video(file_path): + return self.load_video(file_path) + else: + return None + + + def __getitem__(self, data_id): + data = self.data[data_id % len(self.data)].copy() + for key in self.data_file_keys: + if key in data: + path = os.path.join(self.base_path, data[key]) + data[key] = self.load_data(path) + if data[key] is None: + warnings.warn(f"cannot load file {data[key]}.") + return None + return data + + + def __len__(self): + return len(self.data) * self.repeat + + + +class DiffusionTrainingModule(torch.nn.Module): + def __init__(self): + super().__init__() + + + def to(self, *args, **kwargs): + for name, model in self.named_children(): + model.to(*args, **kwargs) + return self + + + def trainable_modules(self): + trainable_modules = filter(lambda p: p.requires_grad, self.parameters()) + return trainable_modules + + + def trainable_param_names(self): + trainable_param_names = list(filter(lambda named_param: named_param[1].requires_grad, self.named_parameters())) + trainable_param_names = set([named_param[0] for named_param in trainable_param_names]) + return trainable_param_names + + + def add_lora_to_model(self, model, target_modules, lora_rank, lora_alpha=None): + if lora_alpha is None: + lora_alpha = lora_rank + lora_config = LoraConfig(r=lora_rank, lora_alpha=lora_alpha, target_modules=target_modules) + model = inject_adapter_in_model(lora_config, model) + return model + + + +def launch_training_task(model: DiffusionTrainingModule, dataset, learning_rate, num_epochs, output_path, remove_prefix=None): + dataloader = torch.utils.data.DataLoader(dataset, shuffle=True, collate_fn=lambda x: x[0]) + optimizer = torch.optim.AdamW(model.trainable_modules(), lr=learning_rate) + scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer) + + accelerator = Accelerator(gradient_accumulation_steps=1) + model, optimizer, dataloader, scheduler = accelerator.prepare(model, optimizer, dataloader, scheduler) + + for epoch in range(num_epochs): + for data in tqdm(dataloader): + with accelerator.accumulate(model): + optimizer.zero_grad() + loss = model(data) + accelerator.backward(loss) + optimizer.step() + scheduler.step() + accelerator.wait_for_everyone() + if accelerator.is_main_process: + state_dict = accelerator.get_state_dict(model) + trainable_param_names = model.trainable_param_names() + state_dict = {name: param for name, param in state_dict.items() if name in trainable_param_names} + if remove_prefix is not None: + state_dict_ = {} + for name, param in state_dict.items(): + if name.startswith(remove_prefix): + name = name[len(remove_prefix):] + state_dict_[name] = param + path = os.path.join(output_path, f"epoch-{epoch}") + accelerator.save(state_dict_, path, safe_serialization=True) diff --git a/diffsynth/vram_management/layers.py b/diffsynth/vram_management/layers.py index aa2bda2..45e7433 100644 --- a/diffsynth/vram_management/layers.py +++ b/diffsynth/vram_management/layers.py @@ -8,8 +8,32 @@ def cast_to(weight, dtype, device): return r -class AutoWrappedModule(torch.nn.Module): - def __init__(self, module: torch.nn.Module, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device): +class AutoTorchModule(torch.nn.Module): + def __init__(self): + super().__init__() + + def check_free_vram(self): + used_memory = torch.cuda.device_memory_used(self.computation_device) / (1024 ** 3) + return used_memory < self.vram_limit + + def offload(self): + if self.state != 0: + self.to(dtype=self.offload_dtype, device=self.offload_device) + self.state = 0 + + def onload(self): + if self.state != 1: + self.to(dtype=self.onload_dtype, device=self.onload_device) + self.state = 1 + + def keep(self): + if self.state != 2: + self.to(dtype=self.computation_dtype, device=self.computation_device) + self.state = 2 + + +class AutoWrappedModule(AutoTorchModule): + def __init__(self, module: torch.nn.Module, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device, vram_limit): super().__init__() self.module = module.to(dtype=offload_dtype, device=offload_device) self.offload_dtype = offload_dtype @@ -18,28 +42,25 @@ class AutoWrappedModule(torch.nn.Module): self.onload_device = onload_device self.computation_dtype = computation_dtype self.computation_device = computation_device + self.vram_limit = vram_limit self.state = 0 - def offload(self): - if self.state == 1 and (self.offload_dtype != self.onload_dtype or self.offload_device != self.onload_device): - self.module.to(dtype=self.offload_dtype, device=self.offload_device) - self.state = 0 - - def onload(self): - if self.state == 0 and (self.offload_dtype != self.onload_dtype or self.offload_device != self.onload_device): - self.module.to(dtype=self.onload_dtype, device=self.onload_device) - self.state = 1 - def forward(self, *args, **kwargs): - if self.onload_dtype == self.computation_dtype and self.onload_device == self.computation_device: + if self.state == 2: module = self.module else: - module = copy.deepcopy(self.module).to(dtype=self.computation_dtype, device=self.computation_device) + if self.onload_dtype == self.computation_dtype and self.onload_device == self.computation_device: + module = self.module + elif self.vram_limit is not None and self.check_free_vram(): + self.keep() + module = self.module + else: + module = copy.deepcopy(self.module).to(dtype=self.computation_dtype, device=self.computation_device) return module(*args, **kwargs) -class WanAutoCastLayerNorm(torch.nn.LayerNorm): - def __init__(self, module: torch.nn.LayerNorm, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device): +class WanAutoCastLayerNorm(torch.nn.LayerNorm, AutoTorchModule): + def __init__(self, module: torch.nn.LayerNorm, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device, vram_limit): with init_weights_on_device(device=torch.device("meta")): super().__init__(module.normalized_shape, eps=module.eps, elementwise_affine=module.elementwise_affine, bias=module.bias is not None, dtype=offload_dtype, device=offload_device) self.weight = module.weight @@ -50,31 +71,28 @@ class WanAutoCastLayerNorm(torch.nn.LayerNorm): self.onload_device = onload_device self.computation_dtype = computation_dtype self.computation_device = computation_device + self.vram_limit = vram_limit self.state = 0 - def offload(self): - if self.state == 1 and (self.offload_dtype != self.onload_dtype or self.offload_device != self.onload_device): - self.to(dtype=self.offload_dtype, device=self.offload_device) - self.state = 0 - - def onload(self): - if self.state == 0 and (self.offload_dtype != self.onload_dtype or self.offload_device != self.onload_device): - self.to(dtype=self.onload_dtype, device=self.onload_device) - self.state = 1 - def forward(self, x, *args, **kwargs): - if self.onload_dtype == self.computation_dtype and self.onload_device == self.computation_device: + if self.state == 2: weight, bias = self.weight, self.bias else: - weight = None if self.weight is None else cast_to(self.weight, self.computation_dtype, self.computation_device) - bias = None if self.bias is None else cast_to(self.bias, self.computation_dtype, self.computation_device) + if self.onload_dtype == self.computation_dtype and self.onload_device == self.computation_device: + weight, bias = self.weight, self.bias + elif self.vram_limit is not None and self.check_free_vram(): + self.keep() + weight, bias = self.weight, self.bias + else: + weight = None if self.weight is None else cast_to(self.weight, self.computation_dtype, self.computation_device) + bias = None if self.bias is None else cast_to(self.bias, self.computation_dtype, self.computation_device) with torch.amp.autocast(device_type=x.device.type): x = torch.nn.functional.layer_norm(x.float(), self.normalized_shape, weight, bias, self.eps).type_as(x) return x -class AutoWrappedLinear(torch.nn.Linear): - def __init__(self, module: torch.nn.Linear, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device): +class AutoWrappedLinear(torch.nn.Linear, AutoTorchModule): + def __init__(self, module: torch.nn.Linear, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device, vram_limit): with init_weights_on_device(device=torch.device("meta")): super().__init__(in_features=module.in_features, out_features=module.out_features, bias=module.bias is not None, dtype=offload_dtype, device=offload_device) self.weight = module.weight @@ -85,28 +103,25 @@ class AutoWrappedLinear(torch.nn.Linear): self.onload_device = onload_device self.computation_dtype = computation_dtype self.computation_device = computation_device + self.vram_limit = vram_limit self.state = 0 - def offload(self): - if self.state == 1 and (self.offload_dtype != self.onload_dtype or self.offload_device != self.onload_device): - self.to(dtype=self.offload_dtype, device=self.offload_device) - self.state = 0 - - def onload(self): - if self.state == 0 and (self.offload_dtype != self.onload_dtype or self.offload_device != self.onload_device): - self.to(dtype=self.onload_dtype, device=self.onload_device) - self.state = 1 - def forward(self, x, *args, **kwargs): - if self.onload_dtype == self.computation_dtype and self.onload_device == self.computation_device: + if self.state == 2: weight, bias = self.weight, self.bias else: - weight = cast_to(self.weight, self.computation_dtype, self.computation_device) - bias = None if self.bias is None else cast_to(self.bias, self.computation_dtype, self.computation_device) + if self.onload_dtype == self.computation_dtype and self.onload_device == self.computation_device: + weight, bias = self.weight, self.bias + elif self.vram_limit is not None and self.check_free_vram(): + self.keep() + weight, bias = self.weight, self.bias + else: + weight = cast_to(self.weight, self.computation_dtype, self.computation_device) + bias = None if self.bias is None else cast_to(self.bias, self.computation_dtype, self.computation_device) return torch.nn.functional.linear(x, weight, bias) -def enable_vram_management_recursively(model: torch.nn.Module, module_map: dict, module_config: dict, max_num_param=None, overflow_module_config: dict = None, total_num_param=0): +def enable_vram_management_recursively(model: torch.nn.Module, module_map: dict, module_config: dict, max_num_param=None, overflow_module_config: dict = None, total_num_param=0, vram_limit=None): for name, module in model.named_children(): for source_module, target_module in module_map.items(): if isinstance(module, source_module): @@ -115,16 +130,16 @@ def enable_vram_management_recursively(model: torch.nn.Module, module_map: dict, module_config_ = overflow_module_config else: module_config_ = module_config - module_ = target_module(module, **module_config_) + module_ = target_module(module, **module_config_, vram_limit=vram_limit) setattr(model, name, module_) total_num_param += num_param break else: - total_num_param = enable_vram_management_recursively(module, module_map, module_config, max_num_param, overflow_module_config, total_num_param) + total_num_param = enable_vram_management_recursively(module, module_map, module_config, max_num_param, overflow_module_config, total_num_param, vram_limit=vram_limit) return total_num_param -def enable_vram_management(model: torch.nn.Module, module_map: dict, module_config: dict, max_num_param=None, overflow_module_config: dict = None): - enable_vram_management_recursively(model, module_map, module_config, max_num_param, overflow_module_config, total_num_param=0) +def enable_vram_management(model: torch.nn.Module, module_map: dict, module_config: dict, max_num_param=None, overflow_module_config: dict = None, vram_limit=None): + enable_vram_management_recursively(model, module_map, module_config, max_num_param, overflow_module_config, total_num_param=0, vram_limit=vram_limit) model.vram_management_enabled = True diff --git a/examples/wanvideo/model_inference/wan_1.3b_speed_control.py b/examples/wanvideo/model_inference/wan_1.3b_speed_control.py new file mode 100644 index 0000000..6efdc65 --- /dev/null +++ b/examples/wanvideo/model_inference/wan_1.3b_speed_control.py @@ -0,0 +1,34 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1", origin_file_pattern="model.safetensors", offload_device="cpu"), + ], +) +pipe.enable_vram_management() + +# Text-to-video +video = pipe( + prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + seed=1, tiled=True, + motion_bucket_id=0 +) +save_video(video, "video_slow.mp4", fps=15, quality=5) + +video = pipe( + prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + seed=1, tiled=True, + motion_bucket_id=100 +) +save_video(video, "video_fast.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_inference/wan_1.3b_text_to_video.py b/examples/wanvideo/model_inference/wan_1.3b_text_to_video.py new file mode 100644 index 0000000..83e300b --- /dev/null +++ b/examples/wanvideo/model_inference/wan_1.3b_text_to_video.py @@ -0,0 +1,34 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management() + +# Text-to-video +video = pipe( + prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + seed=0, tiled=True, +) +save_video(video, "video1.mp4", fps=15, quality=5) + +# Video-to-video +video = VideoData("video1.mp4", height=480, width=832) +video = pipe( + prompt="纪实摄影风格画面,一只活泼的小狗戴着黑色墨镜在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,戴着黑色墨镜,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_video=video, denoising_strength=0.7, + seed=1, tiled=True +) +save_video(video, "video2.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_inference/wan_1.3b_vace.py b/examples/wanvideo/model_inference/wan_1.3b_vace.py new file mode 100644 index 0000000..99c0242 --- /dev/null +++ b/examples/wanvideo/model_inference/wan_1.3b_vace.py @@ -0,0 +1,52 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management() + +dataset_snapshot_download( + dataset_id="DiffSynth-Studio/examples_in_diffsynth", + local_dir="./", + allow_file_pattern=["data/examples/wan/depth_video.mp4", "data/examples/wan/cat_fightning.jpg"] +) + +# Depth video -> Video +control_video = VideoData("data/examples/wan/depth_video.mp4", height=480, width=832) +video = pipe( + prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + vace_video=control_video, + seed=1, tiled=True +) +save_video(video, "video1.mp4", fps=15, quality=5) + +# Reference image -> Video +video = pipe( + prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + vace_reference_image=Image.open("data/examples/wan/cat_fightning.jpg").resize((832, 480)), + seed=1, tiled=True +) +save_video(video, "video2.mp4", fps=15, quality=5) + +# Depth video + Reference image -> Video +video = pipe( + prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + vace_video=control_video, + vace_reference_image=Image.open("data/examples/wan/cat_fightning.jpg").resize((832, 480)), + seed=1, tiled=True +) +save_video(video, "video3.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_inference/wan_14b_flf2v.py b/examples/wanvideo/model_inference/wan_14b_flf2v.py new file mode 100644 index 0000000..3061398 --- /dev/null +++ b/examples/wanvideo/model_inference/wan_14b_flf2v.py @@ -0,0 +1,36 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management() + +dataset_snapshot_download( + dataset_id="DiffSynth-Studio/examples_in_diffsynth", + local_dir="./", + allow_file_pattern=["data/examples/wan/first_frame.jpeg", "data/examples/wan/last_frame.jpeg"] +) + +# First and last frame to video +video = pipe( + prompt="写实风格,一个女生手持枯萎的花站在花园中,镜头逐渐拉远,记录下花园的全貌。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=Image.open("data/examples/wan/first_frame.jpeg").resize((960, 960)), + end_image=Image.open("data/examples/wan/last_frame.jpeg").resize((960, 960)), + seed=0, tiled=True, + height=960, width=960, num_frames=33, + sigma_shift=16, +) +save_video(video, "video.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_inference/wan_14b_image_to_video_480p.py b/examples/wanvideo/model_inference/wan_14b_image_to_video_480p.py new file mode 100644 index 0000000..eb2e5b0 --- /dev/null +++ b/examples/wanvideo/model_inference/wan_14b_image_to_video_480p.py @@ -0,0 +1,34 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management() + +dataset_snapshot_download( + dataset_id="DiffSynth-Studio/examples_in_diffsynth", + local_dir="./", + allow_file_pattern=f"data/examples/wan/input_image.jpg" +) +image = Image.open("data/examples/wan/input_image.jpg") + +# Image-to-video +video = pipe( + prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=image, + seed=0, tiled=True +) +save_video(video, "video.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_inference/wan_14b_image_to_video_720p.py b/examples/wanvideo/model_inference/wan_14b_image_to_video_720p.py new file mode 100644 index 0000000..fb14d24 --- /dev/null +++ b/examples/wanvideo/model_inference/wan_14b_image_to_video_720p.py @@ -0,0 +1,35 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management() + +dataset_snapshot_download( + dataset_id="DiffSynth-Studio/examples_in_diffsynth", + local_dir="./", + allow_file_pattern=f"data/examples/wan/input_image.jpg" +) +image = Image.open("data/examples/wan/input_image.jpg") + +# Image-to-video +video = pipe( + prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=image, + seed=0, tiled=True, + height=720, width=1280, +) +save_video(video, "video.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_inference/wan_14b_text_to_video.py b/examples/wanvideo/model_inference/wan_14b_text_to_video.py new file mode 100644 index 0000000..40cb02d --- /dev/null +++ b/examples/wanvideo/model_inference/wan_14b_text_to_video.py @@ -0,0 +1,24 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management() + +# Text-to-video +video = pipe( + prompt="一名宇航员身穿太空服,面朝镜头骑着一匹机械马在火星表面驰骋。红色的荒凉地表延伸至远方,点缀着巨大的陨石坑和奇特的岩石结构。机械马的步伐稳健,扬起微弱的尘埃,展现出未来科技与原始探索的完美结合。宇航员手持操控装置,目光坚定,仿佛正在开辟人类的新疆域。背景是深邃的宇宙和蔚蓝的地球,画面既科幻又充满希望,让人不禁畅想未来的星际生活。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + seed=0, tiled=True, +) +save_video(video, "video1.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_inference/wan_fun_1.3b_InP.py b/examples/wanvideo/model_inference/wan_fun_1.3b_InP.py new file mode 100644 index 0000000..d921c0c --- /dev/null +++ b/examples/wanvideo/model_inference/wan_fun_1.3b_InP.py @@ -0,0 +1,36 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management() + +dataset_snapshot_download( + dataset_id="DiffSynth-Studio/examples_in_diffsynth", + local_dir="./", + allow_file_pattern=f"data/examples/wan/input_image.jpg" +) +image = Image.open("data/examples/wan/input_image.jpg") + +# First and last frame to video +video = pipe( + prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=image, + seed=0, tiled=True + # You can input `end_image=xxx` to control the last frame of the video. + # The model will automatically generate the dynamic content between `input_image` and `end_image`. +) +save_video(video, "video.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_inference/wan_fun_1.3b_control.py b/examples/wanvideo/model_inference/wan_fun_1.3b_control.py new file mode 100644 index 0000000..43374d2 --- /dev/null +++ b/examples/wanvideo/model_inference/wan_fun_1.3b_control.py @@ -0,0 +1,34 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management() + +dataset_snapshot_download( + dataset_id="DiffSynth-Studio/examples_in_diffsynth", + local_dir="./", + allow_file_pattern=f"data/examples/wan/control_video.mp4" +) + +# Control video +control_video = VideoData("data/examples/wan/control_video.mp4", height=832, width=576) +video = pipe( + prompt="扁平风格动漫,一位长发少女优雅起舞。她五官精致,大眼睛明亮有神,黑色长发柔顺光泽。身穿淡蓝色T恤和深蓝色牛仔短裤。背景是粉色。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + control_video=control_video, height=832, width=576, num_frames=49, + seed=1, tiled=True +) +save_video(video, "video.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_inference/wan_fun_14b_InP.py b/examples/wanvideo/model_inference/wan_fun_14b_InP.py new file mode 100644 index 0000000..af227cb --- /dev/null +++ b/examples/wanvideo/model_inference/wan_fun_14b_InP.py @@ -0,0 +1,36 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management() + +dataset_snapshot_download( + dataset_id="DiffSynth-Studio/examples_in_diffsynth", + local_dir="./", + allow_file_pattern=f"data/examples/wan/input_image.jpg" +) +image = Image.open("data/examples/wan/input_image.jpg") + +# First and last frame to video +video = pipe( + prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=image, + seed=0, tiled=True + # You can input `end_image=xxx` to control the last frame of the video. + # The model will automatically generate the dynamic content between `input_image` and `end_image`. +) +save_video(video, "video.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_inference/wan_fun_14b_control.py b/examples/wanvideo/model_inference/wan_fun_14b_control.py new file mode 100644 index 0000000..db9e5c8 --- /dev/null +++ b/examples/wanvideo/model_inference/wan_fun_14b_control.py @@ -0,0 +1,34 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management() + +dataset_snapshot_download( + dataset_id="DiffSynth-Studio/examples_in_diffsynth", + local_dir="./", + allow_file_pattern=f"data/examples/wan/control_video.mp4" +) + +# Control video +control_video = VideoData("data/examples/wan/control_video.mp4", height=832, width=576) +video = pipe( + prompt="扁平风格动漫,一位长发少女优雅起舞。她五官精致,大眼睛明亮有神,黑色长发柔顺光泽。身穿淡蓝色T恤和深蓝色牛仔短裤。背景是粉色。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + control_video=control_video, height=832, width=576, num_frames=49, + seed=1, tiled=True +) +save_video(video, "video.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_inference/wan_fun_v1.1_1.3b_reference_control.py b/examples/wanvideo/model_inference/wan_fun_v1.1_1.3b_reference_control.py new file mode 100644 index 0000000..0f7e4c8 --- /dev/null +++ b/examples/wanvideo/model_inference/wan_fun_v1.1_1.3b_reference_control.py @@ -0,0 +1,36 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management() + +dataset_snapshot_download( + dataset_id="DiffSynth-Studio/examples_in_diffsynth", + local_dir="./", + allow_file_pattern=["data/examples/wan/control_video.mp4", "data/examples/wan/reference_image_girl.png"] +) + +# Control video +control_video = VideoData("data/examples/wan/control_video.mp4", height=832, width=576) +reference_image = Image.open("data/examples/wan/reference_image_girl.png").resize((576, 832)) +video = pipe( + prompt="扁平风格动漫,一位长发少女优雅起舞。她五官精致,大眼睛明亮有神,黑色长发柔顺光泽。身穿淡蓝色T恤和深蓝色牛仔短裤。背景是粉色。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + control_video=control_video, reference_image=reference_image, + height=832, width=576, num_frames=49, + seed=1, tiled=True +) +save_video(video, "video.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_inference/wan_fun_v1.1_14b_reference_control.py b/examples/wanvideo/model_inference/wan_fun_v1.1_14b_reference_control.py new file mode 100644 index 0000000..78635ff --- /dev/null +++ b/examples/wanvideo/model_inference/wan_fun_v1.1_14b_reference_control.py @@ -0,0 +1,36 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management() + +dataset_snapshot_download( + dataset_id="DiffSynth-Studio/examples_in_diffsynth", + local_dir="./", + allow_file_pattern=["data/examples/wan/control_video.mp4", "data/examples/wan/reference_image_girl.png"] +) + +# Control video +control_video = VideoData("data/examples/wan/control_video.mp4", height=832, width=576) +reference_image = Image.open("data/examples/wan/reference_image_girl.png").resize((576, 832)) +video = pipe( + prompt="扁平风格动漫,一位长发少女优雅起舞。她五官精致,大眼睛明亮有神,黑色长发柔顺光泽。身穿淡蓝色T恤和深蓝色牛仔短裤。背景是粉色。", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + control_video=control_video, reference_image=reference_image, + height=832, width=576, num_frames=49, + seed=1, tiled=True +) +save_video(video, "video1.mp4", fps=15, quality=5) diff --git a/requirements.txt b/requirements.txt index 63a871b..92d8b48 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ torch>=2.0.0 torchvision cupy-cuda12x -transformers==4.46.2 +transformers controlnet-aux==0.0.7 imageio imageio[ffmpeg] @@ -11,3 +11,4 @@ sentencepiece protobuf modelscope ftfy +pynvml diff --git a/test.py b/test.py deleted file mode 100644 index f7959ee..0000000 --- a/test.py +++ /dev/null @@ -1,46 +0,0 @@ -import torch -torch.cuda.set_per_process_memory_fraction(0.999, 0) -from diffsynth import ModelManager, save_video, VideoData, save_frames, save_video, download_models -from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig, model_fn_wan_video -from diffsynth.controlnets.processors import Annotator -from diffsynth.data.video import crop_and_resize -from modelscope import snapshot_download -from tqdm import tqdm -from PIL import Image - - -# Load models -pipe = WanVideoPipeline.from_pretrained( - torch_dtype=torch.bfloat16, - device="cuda", - model_configs=[ - ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), - # ModelConfig("D:\projects\VideoX-Fun\models\Wan2.1-Fun-V1.1-1.3B-Control\diffusion_pytorch_model.safetensors", offload_device="cpu"), - ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), - ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), - ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), - ], -) -pipe.enable_vram_management(num_persistent_param_in_dit=6*10**9) - -video = VideoData(rf"D:\pr_projects\20250503_dance\data\双马尾竖屏暴击!你的微笑就是彩虹的微笑♥ - 1.双马尾竖屏暴击!你的微笑就是彩虹的微笑♥(Av114086629088385,P1).mp4", height=832, width=480) -annotator = Annotator("openpose") -video = [video[i] for i in tqdm(range(450, 450+1*81, 1))] -save_video(video, "video_input.mp4", fps=60, quality=5) -control_video = [annotator(f) for f in tqdm(video)] -save_video(control_video, "video_control.mp4", fps=60, quality=5) -reference_image = crop_and_resize(Image.open(rf"D:\pr_projects\20250503_dance\data\marmot4.png"), 832, 480) - -with torch.amp.autocast("cuda", torch.bfloat16): - video = pipe( - prompt="微距摄影风格特写画面,一只憨态可掬的土拨鼠正用后腿站立在碎石堆上,它在挥舞着双臂。金棕色的绒毛在阳光下泛着丝绸般的光泽,腹部毛发呈现浅杏色渐变,每根毛尖都闪烁着细密的光晕。两只黑曜石般的眼睛透出机警而温顺的光芒,鼻梁两侧的白色触须微微颤动,捕捉着空气中的气息。背景是虚化的灰绿色渐变,几簇嫩绿苔藓从画面右下角探出头来,与前景散落的鹅卵石形成微妙的景深对比。土拨鼠圆润的身形在逆光中勾勒出柔和的轮廓,耳朵紧贴头部的姿态流露出戒备中的天真,整个画面洋溢着自然界生灵特有的灵动与纯真。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - seed=43, tiled=True, - height=832, width=480, num_frames=len(control_video), - control_video=control_video, reference_image=reference_image, - # sliding_window_size=5, sliding_window_stride=2, - # num_inference_steps=100, - # cfg_merge=True, - sigma_shift=16, - ) - save_video(video, "video1.mp4", fps=60, quality=5) diff --git a/train.py b/train.py new file mode 100644 index 0000000..de8afd1 --- /dev/null +++ b/train.py @@ -0,0 +1,75 @@ +import torch, os +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from diffsynth.trainers.utils import DiffusionTrainingModule, VideoDataset, launch_training_task +os.environ["TOKENIZERS_PARALLELISM"] = "false" + + + +class WanTrainingModule(DiffusionTrainingModule): + def __init__(self, model_paths): + super().__init__() + self.pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cpu", + model_configs=[ModelConfig(path=path) for path in model_paths], + ) + self.pipe.freeze_except([]) + self.pipe.dit = self.add_lora_to_model(self.pipe.dit, target_modules="q,k,v,o,ffn.0,ffn.2".split(","), lora_alpha=16) + + + def forward_preprocess(self, data): + inputs_posi = {"prompt": data["prompt"]} + inputs_nega = {} + inputs_shared = { + "input_video": data["video"], + "height": data["video"][0].size[1], + "width": data["video"][0].size[0], + "num_frames": len(data["video"]), + # Please do not modify the following parameters. + "cfg_scale": 1, + "tiled": False, + "rand_device": self.pipe.device, + "use_gradient_checkpointing": True, + "cfg_merge": False, + } + for unit in self.pipe.units: + inputs_shared, inputs_posi, inputs_nega = self.pipe.unit_runner(unit, self.pipe, inputs_shared, inputs_posi, inputs_nega) + return {**inputs_shared, **inputs_posi} + + + def forward(self, data): + inputs = self.forward_preprocess(data) + models = {name: getattr(self.pipe, name) for name in self.pipe.in_iteration_models} + loss = self.pipe.training_loss(**models, **inputs) + return loss + + + +def add_general_parsers(parser): + parser.add_argument("--dataset_base_path", type=str, default="", help="Base path of the Dataset.") + parser.add_argument("--dataset_metadata_path", type=str, default="", required=True, help="Metadata path of the Dataset.") + parser.add_argument("--height", type=int, default=None, help="Image or video height. Leave `height` and `width` None to enable dynamic resolution.") + parser.add_argument("--width", type=int, default=None, help="Image or video width. Leave `height` and `width` None to enable dynamic resolution.") + parser.add_argument("--data_file_keys", type=str, default="image,video", help="Data file keys in metadata. Separated by commas.") + parser.add_argument("--dataset_repeat", type=int, default=1, help="Number of times the dataset is repeated in each epoch.") + parser.add_argument("--model_paths", type=str, default="", help="Model paths to be loaded. Separated by commas.") + parser.add_argument("--num_epochs", type=int, default=1, help="Number of epochs.") + parser.add_argument("--num_epochs", type=int, default=1, help="Number of epochs.") + return parser + + +if __name__ == "__main__": + dataset = VideoDataset( + base_path="data/pixabay100/train", + metadata_path="data/pixabay100/metadata_example.csv", + height=480, width=832, + data_file_keys=["video"], + repeat=400, + ) + model = WanTrainingModule([ + "models/Wan-AI/Wan2.1-T2V-1.3B/diffusion_pytorch_model.safetensors", + "models/Wan-AI/Wan2.1-T2V-1.3B/models_t5_umt5-xxl-enc-bf16.pth", + "models/Wan-AI/Wan2.1-T2V-1.3B/Wan2.1_VAE.pth", + ]) + launch_training_task(model, dataset) + From 8f10a9c353c7bcf581f8e945427ef1c68b2d952e Mon Sep 17 00:00:00 2001 From: Artiprocher Date: Mon, 19 May 2025 19:02:52 +0800 Subject: [PATCH 6/9] training script --- diffsynth/pipelines/wan_video_new.py | 10 +-- diffsynth/trainers/utils.py | 67 +++++++++++++---- examples/wanvideo/model_training/train_i2v.py | 54 +++++++++++++ examples/wanvideo/model_training/train_t2v.py | 53 +++++++++++++ train.py | 75 ------------------- 5 files changed, 165 insertions(+), 94 deletions(-) create mode 100644 examples/wanvideo/model_training/train_i2v.py create mode 100644 examples/wanvideo/model_training/train_t2v.py delete mode 100644 train.py diff --git a/diffsynth/pipelines/wan_video_new.py b/diffsynth/pipelines/wan_video_new.py index 6110cc2..02c0fec 100644 --- a/diffsynth/pipelines/wan_video_new.py +++ b/diffsynth/pipelines/wan_video_new.py @@ -148,7 +148,10 @@ class BasePipeline(torch.nn.Module): def freeze_except(self, model_names): for name, model in self.named_children(): - if name not in model_names: + if name in model_names: + model.train() + model.requires_grad_(True) + else: model.eval() model.requires_grad_(False) @@ -214,11 +217,6 @@ class WanVideoPipeline(BasePipeline): self.model_fn = model_fn_wan_video - def train(self): - super().train() - self.scheduler.set_timesteps(1000, training=True) - - def training_loss(self, **inputs): timestep_id = torch.randint(0, self.scheduler.num_train_timesteps, (1,)) timestep = self.scheduler.timesteps[timestep_id].to(dtype=self.torch_dtype, device=self.device) diff --git a/diffsynth/trainers/utils.py b/diffsynth/trainers/utils.py index 7bc0f15..d306049 100644 --- a/diffsynth/trainers/utils.py +++ b/diffsynth/trainers/utils.py @@ -1,4 +1,4 @@ -import imageio, os, torch, warnings, torchvision +import imageio, os, torch, warnings, torchvision, argparse from peft import LoraConfig, inject_adapter_in_model from PIL import Image import pandas as pd @@ -10,7 +10,7 @@ from accelerate import Accelerator class VideoDataset(torch.utils.data.Dataset): def __init__( self, - base_path, metadata_path, + base_path=None, metadata_path=None, frame_interval=1, num_frames=81, dynamic_resolution=True, max_pixels=1920*1080, height=None, width=None, height_division_factor=16, width_division_factor=16, @@ -18,7 +18,16 @@ class VideoDataset(torch.utils.data.Dataset): image_file_extension=("jpg", "jpeg", "png", "webp"), video_file_extension=("mp4", "avi", "mov", "wmv", "mkv", "flv", "webm"), repeat=1, + args=None, ): + if args is not None: + base_path = args.dataset_base_path + metadata_path = args.dataset_metadata_path + height = args.height + width = args.width + data_file_keys = args.data_file_keys.split(",") + repeat = args.dataset_repeat + metadata = pd.read_csv(metadata_path) self.data = [metadata.iloc[i].to_dict() for i in range(len(metadata))] @@ -156,10 +165,28 @@ class DiffusionTrainingModule(torch.nn.Module): lora_config = LoraConfig(r=lora_rank, lora_alpha=lora_alpha, target_modules=target_modules) model = inject_adapter_in_model(lora_config, model) return model + + + def export_trainable_state_dict(self, state_dict, remove_prefix=None): + trainable_param_names = self.trainable_param_names() + state_dict = {name: param for name, param in state_dict.items() if name in trainable_param_names} + if remove_prefix is not None: + state_dict_ = {} + for name, param in state_dict.items(): + if name.startswith(remove_prefix): + name = name[len(remove_prefix):] + state_dict_[name] = param + state_dict = state_dict_ + return state_dict -def launch_training_task(model: DiffusionTrainingModule, dataset, learning_rate, num_epochs, output_path, remove_prefix=None): +def launch_training_task(model: DiffusionTrainingModule, dataset, learning_rate=1e-4, num_epochs=1, output_path="./models", remove_prefix_in_ckpt=None, args=None): + if args is not None: + learning_rate = args.learning_rate + num_epochs = args.num_epochs + output_path = args.output_path + remove_prefix_in_ckpt = args.remove_prefix_in_ckpt dataloader = torch.utils.data.DataLoader(dataset, shuffle=True, collate_fn=lambda x: x[0]) optimizer = torch.optim.AdamW(model.trainable_modules(), lr=learning_rate) scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer) @@ -178,13 +205,27 @@ def launch_training_task(model: DiffusionTrainingModule, dataset, learning_rate, accelerator.wait_for_everyone() if accelerator.is_main_process: state_dict = accelerator.get_state_dict(model) - trainable_param_names = model.trainable_param_names() - state_dict = {name: param for name, param in state_dict.items() if name in trainable_param_names} - if remove_prefix is not None: - state_dict_ = {} - for name, param in state_dict.items(): - if name.startswith(remove_prefix): - name = name[len(remove_prefix):] - state_dict_[name] = param - path = os.path.join(output_path, f"epoch-{epoch}") - accelerator.save(state_dict_, path, safe_serialization=True) + state_dict = model.export_trainable_state_dict(state_dict, remove_prefix=remove_prefix_in_ckpt) + path = os.path.join(output_path, f"epoch-{epoch}.safetensors") + accelerator.save(state_dict, path, safe_serialization=True) + + + +def wan_parser(): + parser = argparse.ArgumentParser(description="Simple example of a training script.") + parser.add_argument("--dataset_base_path", type=str, default="", help="Base path of the Dataset.") + parser.add_argument("--dataset_metadata_path", type=str, default="", required=True, help="Metadata path of the Dataset.") + parser.add_argument("--height", type=int, default=None, help="Image or video height. Leave `height` and `width` None to enable dynamic resolution.") + parser.add_argument("--width", type=int, default=None, help="Image or video width. Leave `height` and `width` None to enable dynamic resolution.") + parser.add_argument("--data_file_keys", type=str, default="image,video", help="Data file keys in metadata. Separated by commas.") + parser.add_argument("--dataset_repeat", type=int, default=1, help="Number of times the dataset is repeated in each epoch.") + parser.add_argument("--model_paths", type=str, default="", help="Model paths to be loaded. JSON format.") + parser.add_argument("--learning_rate", type=float, default=1e-4, help="Learning rate.") + parser.add_argument("--num_epochs", type=int, default=1, help="Number of epochs.") + parser.add_argument("--output_path", type=str, default="./models", help="Save path.") + parser.add_argument("--remove_prefix_in_ckpt", type=str, default="pipe.dit.", help="Remove prefix in ckpt.") + parser.add_argument("--task", type=str, default="train_lora", choices=["train_lora", "train_full"], help="Task.") + parser.add_argument("--lora_target_modules", type=str, default="q,k,v,o,ffn.0,ffn.2", help="Layers with LoRA modules.") + parser.add_argument("--lora_rank", type=int, default=32, help="LoRA rank.") + return parser + diff --git a/examples/wanvideo/model_training/train_i2v.py b/examples/wanvideo/model_training/train_i2v.py new file mode 100644 index 0000000..1c5c757 --- /dev/null +++ b/examples/wanvideo/model_training/train_i2v.py @@ -0,0 +1,54 @@ +import torch, os, json +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from diffsynth.trainers.utils import DiffusionTrainingModule, VideoDataset, launch_training_task, wan_parser +os.environ["TOKENIZERS_PARALLELISM"] = "false" + + +class WanTrainingModule(DiffusionTrainingModule): + def __init__(self, model_paths, task="train_lora", lora_target_modules="q,k,v,o,ffn.0,ffn.2", lora_rank=32): + super().__init__() + self.pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cpu", + model_configs=[ModelConfig(path=path) for path in model_paths], + ) + self.pipe.scheduler.set_timesteps(1000, training=True) + if task == "train_lora": + self.pipe.freeze_except([]) + self.pipe.dit = self.add_lora_to_model(self.pipe.dit, target_modules=lora_target_modules.split(","), lora_rank=lora_rank) + else: + self.pipe.freeze_except(["dit"]) + + def forward_preprocess(self, data): + inputs_posi = {"prompt": data["prompt"]} + inputs_nega = {} + inputs_shared = { + "input_image": data["video"][0], + "input_video": data["video"], + "height": data["video"][0].size[1], + "width": data["video"][0].size[0], + "num_frames": len(data["video"]), + # Please do not modify the following parameters. + "cfg_scale": 1, + "tiled": False, + "rand_device": self.pipe.device, + "use_gradient_checkpointing": True, + "cfg_merge": False, + } + for unit in self.pipe.units: + inputs_shared, inputs_posi, inputs_nega = self.pipe.unit_runner(unit, self.pipe, inputs_shared, inputs_posi, inputs_nega) + return {**inputs_shared, **inputs_posi} + + def forward(self, data): + inputs = self.forward_preprocess(data) + models = {name: getattr(self.pipe, name) for name in self.pipe.in_iteration_models} + loss = self.pipe.training_loss(**models, **inputs) + return loss + + +if __name__ == "__main__": + parser = wan_parser() + args = parser.parse_args() + dataset = VideoDataset(args=args) + model = WanTrainingModule(json.loads(args.model_paths), task=args.task, lora_target_modules=args.lora_target_modules, lora_rank=args.lora_rank) + launch_training_task(model, dataset, args=args) diff --git a/examples/wanvideo/model_training/train_t2v.py b/examples/wanvideo/model_training/train_t2v.py new file mode 100644 index 0000000..50b49ef --- /dev/null +++ b/examples/wanvideo/model_training/train_t2v.py @@ -0,0 +1,53 @@ +import torch, os, json +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from diffsynth.trainers.utils import DiffusionTrainingModule, VideoDataset, launch_training_task, wan_parser +os.environ["TOKENIZERS_PARALLELISM"] = "false" + + +class WanTrainingModule(DiffusionTrainingModule): + def __init__(self, model_paths, task="train_lora", lora_target_modules="q,k,v,o,ffn.0,ffn.2", lora_rank=32): + super().__init__() + self.pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cpu", + model_configs=[ModelConfig(path=path) for path in model_paths], + ) + self.pipe.scheduler.set_timesteps(1000, training=True) + if task == "train_lora": + self.pipe.freeze_except([]) + self.pipe.dit = self.add_lora_to_model(self.pipe.dit, target_modules=lora_target_modules.split(","), lora_rank=lora_rank) + else: + self.pipe.freeze_except(["dit"]) + + def forward_preprocess(self, data): + inputs_posi = {"prompt": data["prompt"]} + inputs_nega = {} + inputs_shared = { + "input_video": data["video"], + "height": data["video"][0].size[1], + "width": data["video"][0].size[0], + "num_frames": len(data["video"]), + # Please do not modify the following parameters. + "cfg_scale": 1, + "tiled": False, + "rand_device": self.pipe.device, + "use_gradient_checkpointing": True, + "cfg_merge": False, + } + for unit in self.pipe.units: + inputs_shared, inputs_posi, inputs_nega = self.pipe.unit_runner(unit, self.pipe, inputs_shared, inputs_posi, inputs_nega) + return {**inputs_shared, **inputs_posi} + + def forward(self, data): + inputs = self.forward_preprocess(data) + models = {name: getattr(self.pipe, name) for name in self.pipe.in_iteration_models} + loss = self.pipe.training_loss(**models, **inputs) + return loss + + +if __name__ == "__main__": + parser = wan_parser() + args = parser.parse_args() + dataset = VideoDataset(args=args) + model = WanTrainingModule(json.loads(args.model_paths), task=args.task, lora_target_modules=args.lora_target_modules, lora_rank=args.lora_rank) + launch_training_task(model, dataset, args=args) diff --git a/train.py b/train.py deleted file mode 100644 index de8afd1..0000000 --- a/train.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch, os -from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig -from diffsynth.trainers.utils import DiffusionTrainingModule, VideoDataset, launch_training_task -os.environ["TOKENIZERS_PARALLELISM"] = "false" - - - -class WanTrainingModule(DiffusionTrainingModule): - def __init__(self, model_paths): - super().__init__() - self.pipe = WanVideoPipeline.from_pretrained( - torch_dtype=torch.bfloat16, - device="cpu", - model_configs=[ModelConfig(path=path) for path in model_paths], - ) - self.pipe.freeze_except([]) - self.pipe.dit = self.add_lora_to_model(self.pipe.dit, target_modules="q,k,v,o,ffn.0,ffn.2".split(","), lora_alpha=16) - - - def forward_preprocess(self, data): - inputs_posi = {"prompt": data["prompt"]} - inputs_nega = {} - inputs_shared = { - "input_video": data["video"], - "height": data["video"][0].size[1], - "width": data["video"][0].size[0], - "num_frames": len(data["video"]), - # Please do not modify the following parameters. - "cfg_scale": 1, - "tiled": False, - "rand_device": self.pipe.device, - "use_gradient_checkpointing": True, - "cfg_merge": False, - } - for unit in self.pipe.units: - inputs_shared, inputs_posi, inputs_nega = self.pipe.unit_runner(unit, self.pipe, inputs_shared, inputs_posi, inputs_nega) - return {**inputs_shared, **inputs_posi} - - - def forward(self, data): - inputs = self.forward_preprocess(data) - models = {name: getattr(self.pipe, name) for name in self.pipe.in_iteration_models} - loss = self.pipe.training_loss(**models, **inputs) - return loss - - - -def add_general_parsers(parser): - parser.add_argument("--dataset_base_path", type=str, default="", help="Base path of the Dataset.") - parser.add_argument("--dataset_metadata_path", type=str, default="", required=True, help="Metadata path of the Dataset.") - parser.add_argument("--height", type=int, default=None, help="Image or video height. Leave `height` and `width` None to enable dynamic resolution.") - parser.add_argument("--width", type=int, default=None, help="Image or video width. Leave `height` and `width` None to enable dynamic resolution.") - parser.add_argument("--data_file_keys", type=str, default="image,video", help="Data file keys in metadata. Separated by commas.") - parser.add_argument("--dataset_repeat", type=int, default=1, help="Number of times the dataset is repeated in each epoch.") - parser.add_argument("--model_paths", type=str, default="", help="Model paths to be loaded. Separated by commas.") - parser.add_argument("--num_epochs", type=int, default=1, help="Number of epochs.") - parser.add_argument("--num_epochs", type=int, default=1, help="Number of epochs.") - return parser - - -if __name__ == "__main__": - dataset = VideoDataset( - base_path="data/pixabay100/train", - metadata_path="data/pixabay100/metadata_example.csv", - height=480, width=832, - data_file_keys=["video"], - repeat=400, - ) - model = WanTrainingModule([ - "models/Wan-AI/Wan2.1-T2V-1.3B/diffusion_pytorch_model.safetensors", - "models/Wan-AI/Wan2.1-T2V-1.3B/models_t5_umt5-xxl-enc-bf16.pth", - "models/Wan-AI/Wan2.1-T2V-1.3B/Wan2.1_VAE.pth", - ]) - launch_training_task(model, dataset) - From 62f6ca2b8a820040d79b4512924904fec59e132b Mon Sep 17 00:00:00 2001 From: Artiprocher Date: Fri, 6 Jun 2025 14:58:41 +0800 Subject: [PATCH 7/9] new wan trainer --- diffsynth/lora/__init__.py | 45 ++ diffsynth/models/utils.py | 14 +- diffsynth/pipelines/wan_video_new.py | 23 +- diffsynth/trainers/utils.py | 34 +- diffsynth/vram_management/layers.py | 14 +- examples/wanvideo/README.md | 302 +-------- examples/wanvideo/README_zh.md | 313 +++++++++ ...trol.py => Wan2.1-1.3b-speedcontrol-v1.py} | 0 ..._14b_flf2v.py => Wan2.1-FLF2V-14B-720P.py} | 0 ..._control.py => Wan2.1-Fun-1.3B-Control.py} | 0 ...fun_1.3b_InP.py => Wan2.1-Fun-1.3B-InP.py} | 0 ...b_control.py => Wan2.1-Fun-14B-Control.py} | 0 ...n_fun_14b_InP.py => Wan2.1-Fun-14B-InP.py} | 0 ...rol.py => Wan2.1-Fun-V1.1-1.3B-Control.py} | 0 ...trol.py => Wan2.1-Fun-V1.1-14B-Control.py} | 0 ...o_video_480p.py => Wan2.1-I2V-14B-480P.py} | 0 ...o_video_720p.py => Wan2.1-I2V-14B-720P.py} | 0 ...3b_text_to_video.py => Wan2.1-T2V-1.3B.py} | 0 ...14b_text_to_video.py => Wan2.1-T2V-14B.py} | 0 ...3b_vace.py => Wan2.1-VACE-1.3B-Preview.py} | 0 .../full/Wan2.1-1.3b-speedcontrol-v1.sh | 13 + .../full/Wan2.1-FLF2V-14B-720P.sh | 14 + .../full/Wan2.1-Fun-1.3B-Control.sh | 14 + .../full/Wan2.1-Fun-1.3B-InP.sh | 14 + .../full/Wan2.1-Fun-14B-Control.sh | 14 + .../model_training/full/Wan2.1-Fun-14B-InP.sh | 14 + .../full/Wan2.1-Fun-V1.1-1.3B-Control.sh | 15 + .../full/Wan2.1-Fun-V1.1-14B-Control.sh | 15 + .../full/Wan2.1-I2V-14B-480P.sh | 13 + .../full/Wan2.1-I2V-14B-720P.sh | 13 + .../model_training/full/Wan2.1-T2V-1.3B.sh | 12 + .../model_training/full/Wan2.1-T2V-14B.sh | 12 + .../full/accelerate_config_14B.yaml | 22 + .../wanvideo/model_training/full/run_test.py | 38 ++ .../lora/Wan2.1-1.3b-speedcontrol-v1.sh | 15 + .../lora/Wan2.1-FLF2V-14B-720P.sh | 16 + .../lora/Wan2.1-Fun-1.3B-Control.sh | 16 + .../lora/Wan2.1-Fun-1.3B-InP.sh | 16 + .../lora/Wan2.1-Fun-14B-Control.sh | 16 + .../model_training/lora/Wan2.1-Fun-14B-InP.sh | 16 + .../lora/Wan2.1-Fun-V1.1-1.3B-Control.sh | 17 + .../lora/Wan2.1-Fun-V1.1-14B-Control.sh | 17 + .../lora/Wan2.1-I2V-14B-480P.sh | 15 + .../lora/Wan2.1-I2V-14B-720P.sh | 15 + .../model_training/lora/Wan2.1-T2V-1.3B.sh | 14 + .../model_training/lora/Wan2.1-T2V-14B.sh | 14 + .../wanvideo/model_training/lora/run_test.py | 25 + examples/wanvideo/model_training/train.py | 129 ++++ examples/wanvideo/model_training/train_i2v.py | 54 -- examples/wanvideo/model_training/train_t2v.py | 53 -- .../Wan2.1-1.3b-speedcontrol-v1.py | 28 + .../validate_full/Wan2.1-FLF2V-14B-720P.py | 33 + .../validate_full/Wan2.1-Fun-1.3B-Control.py | 32 + .../validate_full/Wan2.1-Fun-1.3B-InP.py | 31 + .../validate_full/Wan2.1-Fun-14B-Control.py | 32 + .../validate_full/Wan2.1-Fun-14B-InP.py | 31 + .../Wan2.1-Fun-V1.1-1.3B-Control.py | 33 + .../Wan2.1-Fun-V1.1-14B-Control.py | 33 + .../validate_full/Wan2.1-I2V-14B-480P.py | 30 + .../validate_full/Wan2.1-I2V-14B-720P.py | 30 + .../validate_full/Wan2.1-T2V-1.3B.py | 25 + .../validate_full/Wan2.1-T2V-14B.py | 25 + .../model_training/validate_full/run_test.py | 25 + .../Wan2.1-1.3b-speedcontrol-v1.py | 27 + .../validate_lora/Wan2.1-FLF2V-14B-720P.py | 32 + .../validate_lora/Wan2.1-Fun-1.3B-Control.py | 31 + .../validate_lora/Wan2.1-Fun-1.3B-InP.py | 30 + .../validate_lora/Wan2.1-Fun-14B-Control.py | 31 + .../validate_lora/Wan2.1-Fun-14B-InP.py | 30 + .../Wan2.1-Fun-V1.1-1.3B-Control.py | 32 + .../Wan2.1-Fun-V1.1-14B-Control.py | 32 + .../validate_lora/Wan2.1-I2V-14B-480P.py | 29 + .../validate_lora/Wan2.1-I2V-14B-720P.py | 29 + .../validate_lora/Wan2.1-T2V-1.3B.py | 24 + .../validate_lora/Wan2.1-T2V-14B.py | 24 + .../model_training/validate_lora/run_test.py | 25 + examples/wanvideo/train_wan_t2v.py | 593 ------------------ .../wanvideo/wan_1.3b_motion_controller.py | 41 -- examples/wanvideo/wan_1.3b_text_to_video.py | 40 -- examples/wanvideo/wan_1.3b_vace.py | 63 -- examples/wanvideo/wan_14B_flf2v.py | 52 -- examples/wanvideo/wan_14b_image_to_video.py | 51 -- examples/wanvideo/wan_14b_text_to_video.py | 36 -- .../wan_14b_text_to_video_tensor_parallel.py | 149 ----- examples/wanvideo/wan_fun_InP.py | 42 -- examples/wanvideo/wan_fun_control.py | 40 -- .../wanvideo/wan_fun_reference_control.py | 35 -- 87 files changed, 1779 insertions(+), 1543 deletions(-) create mode 100644 diffsynth/lora/__init__.py create mode 100644 examples/wanvideo/README_zh.md rename examples/wanvideo/model_inference/{wan_1.3b_speed_control.py => Wan2.1-1.3b-speedcontrol-v1.py} (100%) rename examples/wanvideo/model_inference/{wan_14b_flf2v.py => Wan2.1-FLF2V-14B-720P.py} (100%) rename examples/wanvideo/model_inference/{wan_fun_1.3b_control.py => Wan2.1-Fun-1.3B-Control.py} (100%) rename examples/wanvideo/model_inference/{wan_fun_1.3b_InP.py => Wan2.1-Fun-1.3B-InP.py} (100%) rename examples/wanvideo/model_inference/{wan_fun_14b_control.py => Wan2.1-Fun-14B-Control.py} (100%) rename examples/wanvideo/model_inference/{wan_fun_14b_InP.py => Wan2.1-Fun-14B-InP.py} (100%) rename examples/wanvideo/model_inference/{wan_fun_v1.1_1.3b_reference_control.py => Wan2.1-Fun-V1.1-1.3B-Control.py} (100%) rename examples/wanvideo/model_inference/{wan_fun_v1.1_14b_reference_control.py => Wan2.1-Fun-V1.1-14B-Control.py} (100%) rename examples/wanvideo/model_inference/{wan_14b_image_to_video_480p.py => Wan2.1-I2V-14B-480P.py} (100%) rename examples/wanvideo/model_inference/{wan_14b_image_to_video_720p.py => Wan2.1-I2V-14B-720P.py} (100%) rename examples/wanvideo/model_inference/{wan_1.3b_text_to_video.py => Wan2.1-T2V-1.3B.py} (100%) rename examples/wanvideo/model_inference/{wan_14b_text_to_video.py => Wan2.1-T2V-14B.py} (100%) rename examples/wanvideo/model_inference/{wan_1.3b_vace.py => Wan2.1-VACE-1.3B-Preview.py} (100%) create mode 100644 examples/wanvideo/model_training/full/Wan2.1-1.3b-speedcontrol-v1.sh create mode 100644 examples/wanvideo/model_training/full/Wan2.1-FLF2V-14B-720P.sh create mode 100644 examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-Control.sh create mode 100644 examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-InP.sh create mode 100644 examples/wanvideo/model_training/full/Wan2.1-Fun-14B-Control.sh create mode 100644 examples/wanvideo/model_training/full/Wan2.1-Fun-14B-InP.sh create mode 100644 examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control.sh create mode 100644 examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control.sh create mode 100644 examples/wanvideo/model_training/full/Wan2.1-I2V-14B-480P.sh create mode 100644 examples/wanvideo/model_training/full/Wan2.1-I2V-14B-720P.sh create mode 100644 examples/wanvideo/model_training/full/Wan2.1-T2V-1.3B.sh create mode 100644 examples/wanvideo/model_training/full/Wan2.1-T2V-14B.sh create mode 100644 examples/wanvideo/model_training/full/accelerate_config_14B.yaml create mode 100644 examples/wanvideo/model_training/full/run_test.py create mode 100644 examples/wanvideo/model_training/lora/Wan2.1-1.3b-speedcontrol-v1.sh create mode 100644 examples/wanvideo/model_training/lora/Wan2.1-FLF2V-14B-720P.sh create mode 100644 examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-Control.sh create mode 100644 examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-InP.sh create mode 100644 examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-Control.sh create mode 100644 examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-InP.sh create mode 100644 examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control.sh create mode 100644 examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control.sh create mode 100644 examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-480P.sh create mode 100644 examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-720P.sh create mode 100644 examples/wanvideo/model_training/lora/Wan2.1-T2V-1.3B.sh create mode 100644 examples/wanvideo/model_training/lora/Wan2.1-T2V-14B.sh create mode 100644 examples/wanvideo/model_training/lora/run_test.py create mode 100644 examples/wanvideo/model_training/train.py delete mode 100644 examples/wanvideo/model_training/train_i2v.py delete mode 100644 examples/wanvideo/model_training/train_t2v.py create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-1.3b-speedcontrol-v1.py create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-FLF2V-14B-720P.py create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-Control.py create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-InP.py create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-Control.py create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-InP.py create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control.py create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control.py create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-480P.py create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-720P.py create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-T2V-1.3B.py create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-T2V-14B.py create mode 100644 examples/wanvideo/model_training/validate_full/run_test.py create mode 100644 examples/wanvideo/model_training/validate_lora/Wan2.1-1.3b-speedcontrol-v1.py create mode 100644 examples/wanvideo/model_training/validate_lora/Wan2.1-FLF2V-14B-720P.py create mode 100644 examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-Control.py create mode 100644 examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-InP.py create mode 100644 examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-Control.py create mode 100644 examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-InP.py create mode 100644 examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control.py create mode 100644 examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control.py create mode 100644 examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-480P.py create mode 100644 examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-720P.py create mode 100644 examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-1.3B.py create mode 100644 examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-14B.py create mode 100644 examples/wanvideo/model_training/validate_lora/run_test.py delete mode 100644 examples/wanvideo/train_wan_t2v.py delete mode 100644 examples/wanvideo/wan_1.3b_motion_controller.py delete mode 100644 examples/wanvideo/wan_1.3b_text_to_video.py delete mode 100644 examples/wanvideo/wan_1.3b_vace.py delete mode 100644 examples/wanvideo/wan_14B_flf2v.py delete mode 100644 examples/wanvideo/wan_14b_image_to_video.py delete mode 100644 examples/wanvideo/wan_14b_text_to_video.py delete mode 100644 examples/wanvideo/wan_14b_text_to_video_tensor_parallel.py delete mode 100644 examples/wanvideo/wan_fun_InP.py delete mode 100644 examples/wanvideo/wan_fun_control.py delete mode 100644 examples/wanvideo/wan_fun_reference_control.py diff --git a/diffsynth/lora/__init__.py b/diffsynth/lora/__init__.py new file mode 100644 index 0000000..33bd89c --- /dev/null +++ b/diffsynth/lora/__init__.py @@ -0,0 +1,45 @@ +import torch + + + +class GeneralLoRALoader: + def __init__(self, device="cpu", torch_dtype=torch.float32): + self.device = device + self.torch_dtype = torch_dtype + + + def get_name_dict(self, lora_state_dict): + lora_name_dict = {} + for key in lora_state_dict: + if ".lora_B." not in key: + continue + keys = key.split(".") + if len(keys) > keys.index("lora_B") + 2: + keys.pop(keys.index("lora_B") + 1) + keys.pop(keys.index("lora_B")) + if keys[0] == "diffusion_model": + keys.pop(0) + keys.pop(-1) + target_name = ".".join(keys) + lora_name_dict[target_name] = (key, key.replace(".lora_B.", ".lora_A.")) + return lora_name_dict + + + def load(self, model: torch.nn.Module, state_dict_lora, alpha=1.0): + updated_num = 0 + lora_name_dict = self.get_name_dict(state_dict_lora) + for name, module in model.named_modules(): + if name in lora_name_dict: + weight_up = state_dict_lora[lora_name_dict[name][0]].to(device=self.device, dtype=self.torch_dtype) + weight_down = state_dict_lora[lora_name_dict[name][1]].to(device=self.device, dtype=self.torch_dtype) + if len(weight_up.shape) == 4: + weight_up = weight_up.squeeze(3).squeeze(2) + weight_down = weight_down.squeeze(3).squeeze(2) + weight_lora = alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3) + else: + weight_lora = alpha * torch.mm(weight_up, weight_down) + state_dict = module.state_dict() + state_dict["weight"] = state_dict["weight"].to(device=self.device, dtype=self.torch_dtype) + weight_lora + module.load_state_dict(state_dict) + updated_num += 1 + print(f"{updated_num} tensors are updated by LoRA.") diff --git a/diffsynth/models/utils.py b/diffsynth/models/utils.py index 99f5dee..0d58e4e 100644 --- a/diffsynth/models/utils.py +++ b/diffsynth/models/utils.py @@ -62,16 +62,16 @@ def load_state_dict_from_folder(file_path, torch_dtype=None): return state_dict -def load_state_dict(file_path, torch_dtype=None): +def load_state_dict(file_path, torch_dtype=None, device="cpu"): if file_path.endswith(".safetensors"): - return load_state_dict_from_safetensors(file_path, torch_dtype=torch_dtype) + return load_state_dict_from_safetensors(file_path, torch_dtype=torch_dtype, device=device) else: - return load_state_dict_from_bin(file_path, torch_dtype=torch_dtype) + return load_state_dict_from_bin(file_path, torch_dtype=torch_dtype, device=device) -def load_state_dict_from_safetensors(file_path, torch_dtype=None): +def load_state_dict_from_safetensors(file_path, torch_dtype=None, device="cpu"): state_dict = {} - with safe_open(file_path, framework="pt", device="cpu") as f: + with safe_open(file_path, framework="pt", device=device) as f: for k in f.keys(): state_dict[k] = f.get_tensor(k) if torch_dtype is not None: @@ -79,8 +79,8 @@ def load_state_dict_from_safetensors(file_path, torch_dtype=None): return state_dict -def load_state_dict_from_bin(file_path, torch_dtype=None): - state_dict = torch.load(file_path, map_location="cpu", weights_only=True) +def load_state_dict_from_bin(file_path, torch_dtype=None, device="cpu"): + state_dict = torch.load(file_path, map_location=device, weights_only=True) if torch_dtype is not None: for i in state_dict: if isinstance(state_dict[i], torch.Tensor): diff --git a/diffsynth/pipelines/wan_video_new.py b/diffsynth/pipelines/wan_video_new.py index 02c0fec..eb9dfba 100644 --- a/diffsynth/pipelines/wan_video_new.py +++ b/diffsynth/pipelines/wan_video_new.py @@ -11,7 +11,7 @@ from PIL import Image from tqdm import tqdm from typing import Optional -from ..models import ModelManager +from ..models import ModelManager, load_state_dict from ..models.wan_video_dit import WanModel, RMSNorm, sinusoidal_embedding_1d from ..models.wan_video_text_encoder import WanTextEncoder, T5RelativeEmbedding, T5LayerNorm from ..models.wan_video_vae import WanVideoVAE, RMS_norm, CausalConv3d, Upsample @@ -21,6 +21,7 @@ from ..models.wan_video_motion_controller import WanMotionControllerModel from ..schedulers.flow_match import FlowMatchScheduler from ..prompters import WanPrompter from ..vram_management import enable_vram_management, AutoWrappedModule, AutoWrappedLinear, WanAutoCastLayerNorm +from ..lora import GeneralLoRALoader @@ -137,7 +138,8 @@ class BasePipeline(torch.nn.Module): def enable_cpu_offload(self): - warnings.warn("`enable_cpu_offload` is deprecated. Please use `enable_vram_management`.") + warnings.warn("`enable_cpu_offload` will be deprecated. Please use `enable_vram_management`.") + self.vram_management_enabled = True def get_free_vram(self): @@ -183,7 +185,6 @@ class ModelConfig: self.path = self.path[0] - class WanVideoPipeline(BasePipeline): def __init__(self, device="cuda", torch_dtype=torch.bfloat16, tokenizer_path=None): @@ -216,6 +217,12 @@ class WanVideoPipeline(BasePipeline): ] self.model_fn = model_fn_wan_video + + def load_lora(self, module, path, alpha=1): + loader = GeneralLoRALoader(torch_dtype=self.torch_dtype, device=self.device) + lora = load_state_dict(path, torch_dtype=self.torch_dtype, device=self.device) + loader.load(module, lora, alpha=alpha) + def training_loss(self, **inputs): timestep_id = torch.randint(0, self.scheduler.num_train_timesteps, (1,)) @@ -946,6 +953,7 @@ def model_fn_wan_video( sliding_window_stride: Optional[int] = None, cfg_merge: bool = False, use_gradient_checkpointing: bool = False, + use_gradient_checkpointing_offload: bool = False, **kwargs, ): if sliding_window_size is not None and sliding_window_stride is not None: @@ -1036,7 +1044,14 @@ def model_fn_wan_video( return custom_forward for block_id, block in enumerate(dit.blocks): - if use_gradient_checkpointing: + if use_gradient_checkpointing_offload: + with torch.autograd.graph.save_on_cpu(): + x = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), + x, context, t_mod, freqs, + use_reentrant=False, + ) + elif use_gradient_checkpointing: x = torch.utils.checkpoint.checkpoint( create_custom_forward(block), x, context, t_mod, freqs, diff --git a/diffsynth/trainers/utils.py b/diffsynth/trainers/utils.py index d306049..0a056a6 100644 --- a/diffsynth/trainers/utils.py +++ b/diffsynth/trainers/utils.py @@ -25,6 +25,7 @@ class VideoDataset(torch.utils.data.Dataset): metadata_path = args.dataset_metadata_path height = args.height width = args.width + num_frames = args.num_frames data_file_keys = args.data_file_keys.split(",") repeat = args.dataset_repeat @@ -205,27 +206,52 @@ def launch_training_task(model: DiffusionTrainingModule, dataset, learning_rate= accelerator.wait_for_everyone() if accelerator.is_main_process: state_dict = accelerator.get_state_dict(model) - state_dict = model.export_trainable_state_dict(state_dict, remove_prefix=remove_prefix_in_ckpt) + state_dict = accelerator.unwrap_model(model).export_trainable_state_dict(state_dict, remove_prefix=remove_prefix_in_ckpt) + os.makedirs(output_path, exist_ok=True) path = os.path.join(output_path, f"epoch-{epoch}.safetensors") accelerator.save(state_dict, path, safe_serialization=True) +def launch_data_process_task(model: DiffusionTrainingModule, dataset, output_path="./models"): + dataloader = torch.utils.data.DataLoader(dataset, shuffle=False, collate_fn=lambda x: x[0]) + accelerator = Accelerator() + model, dataloader = accelerator.prepare(model, dataloader) + os.makedirs(os.path.join(output_path, "data_cache"), exist_ok=True) + for data_id, data in enumerate(tqdm(dataloader)): + with torch.no_grad(): + inputs = model.forward_preprocess(data) + inputs = {key: inputs[key] for key in model.model_input_keys if key in inputs} + torch.save(inputs, os.path.join(output_path, "data_cache", f"{data_id}.pth")) + + + def wan_parser(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument("--dataset_base_path", type=str, default="", help="Base path of the Dataset.") parser.add_argument("--dataset_metadata_path", type=str, default="", required=True, help="Metadata path of the Dataset.") parser.add_argument("--height", type=int, default=None, help="Image or video height. Leave `height` and `width` None to enable dynamic resolution.") parser.add_argument("--width", type=int, default=None, help="Image or video width. Leave `height` and `width` None to enable dynamic resolution.") + parser.add_argument("--num_frames", type=int, default=81, help="Number of frames in each video. The frames are sampled from the prefix.") parser.add_argument("--data_file_keys", type=str, default="image,video", help="Data file keys in metadata. Separated by commas.") parser.add_argument("--dataset_repeat", type=int, default=1, help="Number of times the dataset is repeated in each epoch.") - parser.add_argument("--model_paths", type=str, default="", help="Model paths to be loaded. JSON format.") + parser.add_argument("--model_paths", type=str, default=None, help="Model paths to be loaded. JSON format.") + parser.add_argument("--model_id_with_origin_paths", type=str, default=None, help="Model ID with origin path, e.g., Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors. Separated by commas.") parser.add_argument("--learning_rate", type=float, default=1e-4, help="Learning rate.") parser.add_argument("--num_epochs", type=int, default=1, help="Number of epochs.") parser.add_argument("--output_path", type=str, default="./models", help="Save path.") parser.add_argument("--remove_prefix_in_ckpt", type=str, default="pipe.dit.", help="Remove prefix in ckpt.") - parser.add_argument("--task", type=str, default="train_lora", choices=["train_lora", "train_full"], help="Task.") - parser.add_argument("--lora_target_modules", type=str, default="q,k,v,o,ffn.0,ffn.2", help="Layers with LoRA modules.") + parser.add_argument("--trainable_models", type=str, default=None, help="Trainable models, e.g., dit, vae, text_encoder.") + parser.add_argument("--lora_base_model", type=str, default=None, help="Add LoRA on which model.") + parser.add_argument("--lora_target_modules", type=str, default="q,k,v,o,ffn.0,ffn.2", help="Add LoRA on which layer.") parser.add_argument("--lora_rank", type=int, default=32, help="LoRA rank.") + parser.add_argument("--input_contains_input_image", default=False, action="store_true", help="Model input contains 'input_image'.") + parser.add_argument("--input_contains_end_image", default=False, action="store_true", help="Model input contains 'end_image'.") + parser.add_argument("--input_contains_control_video", default=False, action="store_true", help="Model input contains 'control_video'.") + parser.add_argument("--input_contains_reference_image", default=False, action="store_true", help="Model input contains 'reference_image'.") + parser.add_argument("--input_contains_vace_video", default=False, action="store_true", help="Model input contains 'vace_video'.") + parser.add_argument("--input_contains_vace_reference_image", default=False, action="store_true", help="Model input contains 'vace_reference_image'.") + parser.add_argument("--input_contains_motion_bucket_id", default=False, action="store_true", help="Model input contains 'motion_bucket_id'.") + parser.add_argument("--use_gradient_checkpointing_offload", default=False, action="store_true", help="Offload gradient checkpointing to RAM.") return parser diff --git a/diffsynth/vram_management/layers.py b/diffsynth/vram_management/layers.py index 45e7433..dd4a245 100644 --- a/diffsynth/vram_management/layers.py +++ b/diffsynth/vram_management/layers.py @@ -33,7 +33,7 @@ class AutoTorchModule(torch.nn.Module): class AutoWrappedModule(AutoTorchModule): - def __init__(self, module: torch.nn.Module, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device, vram_limit): + def __init__(self, module: torch.nn.Module, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device, vram_limit, **kwargs): super().__init__() self.module = module.to(dtype=offload_dtype, device=offload_device) self.offload_dtype = offload_dtype @@ -60,7 +60,7 @@ class AutoWrappedModule(AutoTorchModule): class WanAutoCastLayerNorm(torch.nn.LayerNorm, AutoTorchModule): - def __init__(self, module: torch.nn.LayerNorm, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device, vram_limit): + def __init__(self, module: torch.nn.LayerNorm, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device, vram_limit, **kwargs): with init_weights_on_device(device=torch.device("meta")): super().__init__(module.normalized_shape, eps=module.eps, elementwise_affine=module.elementwise_affine, bias=module.bias is not None, dtype=offload_dtype, device=offload_device) self.weight = module.weight @@ -92,7 +92,7 @@ class WanAutoCastLayerNorm(torch.nn.LayerNorm, AutoTorchModule): class AutoWrappedLinear(torch.nn.Linear, AutoTorchModule): - def __init__(self, module: torch.nn.Linear, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device, vram_limit): + def __init__(self, module: torch.nn.Linear, offload_dtype, offload_device, onload_dtype, onload_device, computation_dtype, computation_device, vram_limit, name="", **kwargs): with init_weights_on_device(device=torch.device("meta")): super().__init__(in_features=module.in_features, out_features=module.out_features, bias=module.bias is not None, dtype=offload_dtype, device=offload_device) self.weight = module.weight @@ -105,6 +105,7 @@ class AutoWrappedLinear(torch.nn.Linear, AutoTorchModule): self.computation_device = computation_device self.vram_limit = vram_limit self.state = 0 + self.name = name def forward(self, x, *args, **kwargs): if self.state == 2: @@ -121,8 +122,9 @@ class AutoWrappedLinear(torch.nn.Linear, AutoTorchModule): return torch.nn.functional.linear(x, weight, bias) -def enable_vram_management_recursively(model: torch.nn.Module, module_map: dict, module_config: dict, max_num_param=None, overflow_module_config: dict = None, total_num_param=0, vram_limit=None): +def enable_vram_management_recursively(model: torch.nn.Module, module_map: dict, module_config: dict, max_num_param=None, overflow_module_config: dict = None, total_num_param=0, vram_limit=None, name_prefix=""): for name, module in model.named_children(): + layer_name = name if name_prefix == "" else name_prefix + "." + name for source_module, target_module in module_map.items(): if isinstance(module, source_module): num_param = sum(p.numel() for p in module.parameters()) @@ -130,12 +132,12 @@ def enable_vram_management_recursively(model: torch.nn.Module, module_map: dict, module_config_ = overflow_module_config else: module_config_ = module_config - module_ = target_module(module, **module_config_, vram_limit=vram_limit) + module_ = target_module(module, **module_config_, vram_limit=vram_limit, name=layer_name) setattr(model, name, module_) total_num_param += num_param break else: - total_num_param = enable_vram_management_recursively(module, module_map, module_config, max_num_param, overflow_module_config, total_num_param, vram_limit=vram_limit) + total_num_param = enable_vram_management_recursively(module, module_map, module_config, max_num_param, overflow_module_config, total_num_param, vram_limit=vram_limit, name_prefix=layer_name) return total_num_param diff --git a/examples/wanvideo/README.md b/examples/wanvideo/README.md index 92c3c59..46c9670 100644 --- a/examples/wanvideo/README.md +++ b/examples/wanvideo/README.md @@ -1,276 +1,32 @@ -# Wan-Video -Wan-Video is a collection of video synthesis models open-sourced by Alibaba. -Before using this model, please install DiffSynth-Studio from **source code**. +* dataset + * `--dataset_base_path`: Base path of the Dataset. + * `--dataset_metadata_path`: Metadata path of the Dataset. + * `--height`: Image or video height. Leave `height` and `width` None to enable dynamic resolution. + * `--width`: Image or video width. Leave `height` and `width` None to enable dynamic resolution. + * `--num_frames`: Number of frames in each video. The frames are sampled from the prefix. + * `--data_file_keys`: Data file keys in metadata. Separated by commas. + * `--dataset_repeat`: Number of times the dataset is repeated in each epoch. +* Model + * `--model_paths`: Model paths to be loaded. JSON format. + * `--model_id_with_origin_paths`: Model ID with original path, e.g., Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors. Separated by commas. +* Training + * `--learning_rate`: Learning rate. + * `--num_epochs`: Number of epochs. + * `--output_path`: Save path. + * `--remove_prefix_in_ckpt`: Remove prefix in ckpt. +* Trainable module + * `--trainable_models`: Trainable models, e.g., dit, vae, text_encoder. + * `--lora_base_model`: Add LoRA on which model. + * `--lora_target_modules`: Add LoRA on which layer. + * `--lora_rank`: LoRA rank. +* Extra model input + * `--input_contains_input_image`: Model input contains `input_image` + * `--input_contains_end_image`: Model input contains `end_image`. + * `--input_contains_control_video`: Model input contains `control_video`. + * `--input_contains_reference_image`: Model input contains `reference_image`. + * `--input_contains_vace_video`: Model input contains `vace_video`. + * `--input_contains_vace_reference_image`: Model input contains `vace_reference_image`. + * `--input_contains_motion_bucket_id`: Model input contains `motion_bucket_id`. -```shell -git clone https://github.com/modelscope/DiffSynth-Studio.git -cd DiffSynth-Studio -pip install -e . -``` - -## Model Zoo - -|Developer|Name|Link|Scripts| -|-|-|-|-| -|Wan Team|1.3B text-to-video|[Link](https://modelscope.cn/models/Wan-AI/Wan2.1-T2V-1.3B)|[wan_1.3b_text_to_video.py](./wan_1.3b_text_to_video.py)| -|Wan Team|14B text-to-video|[Link](https://modelscope.cn/models/Wan-AI/Wan2.1-T2V-14B)|[wan_14b_text_to_video.py](./wan_14b_text_to_video.py)| -|Wan Team|14B image-to-video 480P|[Link](https://modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-480P)|[wan_14b_image_to_video.py](./wan_14b_image_to_video.py)| -|Wan Team|14B image-to-video 720P|[Link](https://modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-720P)|[wan_14b_image_to_video.py](./wan_14b_image_to_video.py)| -|Wan Team|14B first-last-frame-to-video 720P|[Link](https://modelscope.cn/models/Wan-AI/Wan2.1-FLF2V-14B-720P)|[wan_14B_flf2v.py](./wan_14B_flf2v.py)| -|DiffSynth-Studio Team|1.3B aesthetics LoRA|[Link](https://modelscope.cn/models/DiffSynth-Studio/Wan2.1-1.3b-lora-aesthetics-v1)|Please see the [model card](https://modelscope.cn/models/DiffSynth-Studio/Wan2.1-1.3b-lora-aesthetics-v1).| -|DiffSynth-Studio Team|1.3B Highres-fix LoRA|[Link](https://modelscope.cn/models/DiffSynth-Studio/Wan2.1-1.3b-lora-highresfix-v1)|Please see the [model card](https://modelscope.cn/models/DiffSynth-Studio/Wan2.1-1.3b-lora-highresfix-v1).| -|DiffSynth-Studio Team|1.3B ExVideo LoRA|[Link](https://modelscope.cn/models/DiffSynth-Studio/Wan2.1-1.3b-lora-exvideo-v1)|Please see the [model card](https://modelscope.cn/models/DiffSynth-Studio/Wan2.1-1.3b-lora-exvideo-v1).| -|DiffSynth-Studio Team|1.3B Speed Control adapter|[Link](https://modelscope.cn/models/DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1)|[wan_1.3b_motion_controller.py](./wan_1.3b_motion_controller.py)| -|PAI Team|1.3B InP|[Link](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-InP)|[wan_fun_InP.py](./wan_fun_InP.py)| -|PAI Team|14B InP|[Link](https://modelscope.cn/models/PAI/Wan2.1-Fun-14B-InP)|[wan_fun_InP.py](./wan_fun_InP.py)| -|PAI Team|1.3B Control|[Link](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-Control)|[wan_fun_control.py](./wan_fun_control.py)| -|PAI Team|14B Control|[Link](https://modelscope.cn/models/PAI/Wan2.1-Fun-14B-Control)|[wan_fun_control.py](./wan_fun_control.py)| -|IIC Team|1.3B VACE|[Link](https://modelscope.cn/models/iic/VACE-Wan2.1-1.3B-Preview)|[wan_1.3b_vace.py](./wan_1.3b_vace.py)| - -Base model features - -||Text-to-video|Image-to-video|End frame|Control|Reference image| -|-|-|-|-|-|-| -|1.3B text-to-video|✅||||| -|14B text-to-video|✅||||| -|14B image-to-video 480P||✅|||| -|14B image-to-video 720P||✅|||| -|14B first-last-frame-to-video 720P||✅|✅||| -|1.3B InP||✅|✅||| -|14B InP||✅|✅||| -|1.3B Control||||✅|| -|14B Control||||✅|| -|1.3B VACE||||✅|✅| - -Adapter model compatibility - -||1.3B text-to-video|1.3B InP|1.3B VACE| -|-|-|-|-| -|1.3B aesthetics LoRA|✅||✅| -|1.3B Highres-fix LoRA|✅||✅| -|1.3B ExVideo LoRA|✅||✅| -|1.3B Speed Control adapter|✅|✅|✅| - -## VRAM Usage - -* Fine-grained offload: We recommend that users adjust the `num_persistent_param_in_dit` settings to find an optimal balance between speed and VRAM requirements. See [`./wan_14b_text_to_video.py`](./wan_14b_text_to_video.py). - -* FP8 Quantization: You only need to adjust the `torch_dtype` in the `ModelManager` (not the pipeline!). - -We present a detailed table here. The model (14B text-to-video) is tested on a single A100. - -|`torch_dtype`|`num_persistent_param_in_dit`|Speed|Required VRAM|Default Setting| -|-|-|-|-|-| -|torch.bfloat16|None (unlimited)|18.5s/it|48G|| -|torch.bfloat16|7*10**9 (7B)|20.8s/it|24G|| -|torch.bfloat16|0|23.4s/it|10G|| -|torch.float8_e4m3fn|None (unlimited)|18.3s/it|24G|yes| -|torch.float8_e4m3fn|0|24.0s/it|10G|| - -**We found that 14B image-to-video model is more sensitive to precision, so when the generated video content experiences issues such as artifacts, please switch to bfloat16 precision and use the `num_persistent_param_in_dit` parameter to control VRAM usage.** - -## Efficient Attention Implementation - -DiffSynth-Studio supports multiple Attention implementations. If you have installed any of the following Attention implementations, they will be enabled based on priority. However, we recommend to use the default torch SDPA. - -* [Flash Attention 3](https://github.com/Dao-AILab/flash-attention) -* [Flash Attention 2](https://github.com/Dao-AILab/flash-attention) -* [Sage Attention](https://github.com/thu-ml/SageAttention) -* [torch SDPA](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) (default. `torch>=2.5.0` is recommended.) - -## Acceleration - -We support multiple acceleration solutions: -* [TeaCache](https://github.com/ali-vilab/TeaCache): See [wan_1.3b_text_to_video_accelerate.py](./wan_1.3b_text_to_video_accelerate.py). - -* [Unified Sequence Parallel](https://github.com/xdit-project/xDiT): See [wan_14b_text_to_video_usp.py](./wan_14b_text_to_video_usp.py) - -```bash -pip install xfuser>=0.4.3 -torchrun --standalone --nproc_per_node=8 examples/wanvideo/wan_14b_text_to_video_usp.py -``` - -* Tensor Parallel: See [wan_14b_text_to_video_tensor_parallel.py](./wan_14b_text_to_video_tensor_parallel.py). - -## Gallery - -1.3B text-to-video. - -https://github.com/user-attachments/assets/124397be-cd6a-4f29-a87c-e4c695aaabb8 - -Put sunglasses on the dog. - -https://github.com/user-attachments/assets/272808d7-fbeb-4747-a6df-14a0860c75fb - -14B text-to-video. - -https://github.com/user-attachments/assets/3908bc64-d451-485a-8b61-28f6d32dd92f - -14B image-to-video. - -https://github.com/user-attachments/assets/c0bdd5ca-292f-45ed-b9bc-afe193156e75 - -14B first-last-frame-to-video - -|First frame|Last frame|Video| -|-|-|-| -|![Image](https://github.com/user-attachments/assets/b0d8225b-aee0-4129-b8e5-58c8523221a6)|![Image](https://github.com/user-attachments/assets/2f0c9bc5-07e2-45fa-8320-53d63a4fd203)|https://github.com/user-attachments/assets/2a6a2681-622c-4512-b852-5f22e73830b1| - -## Train - -We support Wan-Video LoRA training and full training. Here is a tutorial. This is an experimental feature. Below is a video sample generated from the character Keqing LoRA: - -https://github.com/user-attachments/assets/9bd8e30b-97e8-44f9-bb6f-da004ba376a9 - -Step 1: Install additional packages - -``` -pip install peft lightning pandas -``` - -Step 2: Prepare your dataset - -You need to manage the training videos as follows: - -``` -data/example_dataset/ -├── metadata.csv -└── train - ├── video_00001.mp4 - └── image_00002.jpg -``` - -`metadata.csv`: - -``` -file_name,text -video_00001.mp4,"video description" -image_00002.jpg,"video description" -``` - -We support both images and videos. An image is treated as a single frame of video. - -Step 3: Data process - -```shell -CUDA_VISIBLE_DEVICES="0" python examples/wanvideo/train_wan_t2v.py \ - --task data_process \ - --dataset_path data/example_dataset \ - --output_path ./models \ - --text_encoder_path "models/Wan-AI/Wan2.1-T2V-1.3B/models_t5_umt5-xxl-enc-bf16.pth" \ - --vae_path "models/Wan-AI/Wan2.1-T2V-1.3B/Wan2.1_VAE.pth" \ - --tiled \ - --num_frames 81 \ - --height 480 \ - --width 832 -``` - -After that, some cached files will be stored in the dataset folder. - -``` -data/example_dataset/ -├── metadata.csv -└── train - ├── video_00001.mp4 - ├── video_00001.mp4.tensors.pth - ├── video_00002.mp4 - └── video_00002.mp4.tensors.pth -``` - -Step 4: Train - -LoRA training: - -```shell -CUDA_VISIBLE_DEVICES="0" python examples/wanvideo/train_wan_t2v.py \ - --task train \ - --train_architecture lora \ - --dataset_path data/example_dataset \ - --output_path ./models \ - --dit_path "models/Wan-AI/Wan2.1-T2V-1.3B/diffusion_pytorch_model.safetensors" \ - --steps_per_epoch 500 \ - --max_epochs 10 \ - --learning_rate 1e-4 \ - --lora_rank 16 \ - --lora_alpha 16 \ - --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ - --accumulate_grad_batches 1 \ - --use_gradient_checkpointing -``` - -Full training: - -```shell -CUDA_VISIBLE_DEVICES="0" python examples/wanvideo/train_wan_t2v.py \ - --task train \ - --train_architecture full \ - --dataset_path data/example_dataset \ - --output_path ./models \ - --dit_path "models/Wan-AI/Wan2.1-T2V-1.3B/diffusion_pytorch_model.safetensors" \ - --steps_per_epoch 500 \ - --max_epochs 10 \ - --learning_rate 1e-4 \ - --accumulate_grad_batches 1 \ - --use_gradient_checkpointing -``` - -If you wish to train the 14B model, please separate the safetensor files with a comma. For example: `models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors,models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors,models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors,models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors,models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors,models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors`. - -If you wish to train the image-to-video model, please add an extra parameter `--image_encoder_path "models/Wan-AI/Wan2.1-I2V-14B-480P/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"`. - -For LoRA training, the Wan-1.3B-T2V model requires 16G of VRAM for processing 81 frames at 480P, while the Wan-14B-T2V model requires 60G of VRAM for the same configuration. To further reduce VRAM requirements by 20%-30%, you can include the parameter `--use_gradient_checkpointing_offload`. - -Step 5: Test - -Test LoRA: - -```python -import torch -from diffsynth import ModelManager, WanVideoPipeline, save_video, VideoData - - -model_manager = ModelManager(torch_dtype=torch.bfloat16, device="cpu") -model_manager.load_models([ - "models/Wan-AI/Wan2.1-T2V-1.3B/diffusion_pytorch_model.safetensors", - "models/Wan-AI/Wan2.1-T2V-1.3B/models_t5_umt5-xxl-enc-bf16.pth", - "models/Wan-AI/Wan2.1-T2V-1.3B/Wan2.1_VAE.pth", -]) -model_manager.load_lora("models/lightning_logs/version_1/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0) -pipe = WanVideoPipeline.from_model_manager(model_manager, device="cuda") -pipe.enable_vram_management(num_persistent_param_in_dit=None) - -video = pipe( - prompt="...", - negative_prompt="...", - num_inference_steps=50, - seed=0, tiled=True -) -save_video(video, "video.mp4", fps=30, quality=5) -``` - -Test fine-tuned base model: - -```python -import torch -from diffsynth import ModelManager, WanVideoPipeline, save_video, VideoData - - -model_manager = ModelManager(torch_dtype=torch.bfloat16, device="cpu") -model_manager.load_models([ - "models/lightning_logs/version_1/checkpoints/epoch=0-step=500.ckpt", - "models/Wan-AI/Wan2.1-T2V-1.3B/models_t5_umt5-xxl-enc-bf16.pth", - "models/Wan-AI/Wan2.1-T2V-1.3B/Wan2.1_VAE.pth", -]) -pipe = WanVideoPipeline.from_model_manager(model_manager, device="cuda") -pipe.enable_vram_management(num_persistent_param_in_dit=None) - -video = pipe( - prompt="...", - negative_prompt="...", - num_inference_steps=50, - seed=0, tiled=True -) -save_video(video, "video.mp4", fps=30, quality=5) -``` diff --git a/examples/wanvideo/README_zh.md b/examples/wanvideo/README_zh.md new file mode 100644 index 0000000..4504f8f --- /dev/null +++ b/examples/wanvideo/README_zh.md @@ -0,0 +1,313 @@ +# 通义万相 2.1(Wan 2.1) + +|模型 ID|类型|额外参数|推理|全量训练|全量训练后验证|LoRA 训练|LoRA 训练后验证| +|-|-|-|-|-|-|-|-| +|[Wan-AI/Wan2.1-T2V-1.3B](https://modelscope.cn/models/Wan-AI/Wan2.1-T2V-1.3B)|基础模型||[code](./model_inference/Wan2.1-T2V-1.3B.py)|[code](./model_training/full/Wan2.1-T2V-1.3B.sh)|[code](./model_training/validate_full/Wan2.1-T2V-1.3B.py)|[code](./model_training/lora/Wan2.1-T2V-1.3B.sh)|[code](./model_training/validate_lora/Wan2.1-T2V-1.3B.py)| +|[Wan-AI/Wan2.1-T2V-14B](https://modelscope.cn/models/Wan-AI/Wan2.1-T2V-14B)|基础模型||[code](./model_inference/Wan2.1-T2V-14B.py)|[code](./model_training/full/Wan2.1-T2V-14B.sh)|[code](./model_training/validate_full/Wan2.1-T2V-14B.py)|[code](./model_training/lora/Wan2.1-T2V-14B.sh)|[code](./model_training/validate_lora/Wan2.1-T2V-14B.py)| +|[Wan-AI/Wan2.1-I2V-14B-480P](https://modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-480P)|基础模型|`input_image`|[code](./model_inference/Wan2.1-I2V-14B-480P.py)|[code](./model_training/full/Wan2.1-I2V-14B-480P.sh)|[code](./model_training/validate_full/Wan2.1-I2V-14B-480P.py)|[code](./model_training/lora/Wan2.1-I2V-14B-480P.sh)|[code](./model_training/validate_lora/Wan2.1-I2V-14B-480P.py)| +|[Wan-AI/Wan2.1-I2V-14B-720P](https://modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-720P)|基础模型|`input_image`|[code](./model_inference/Wan2.1-I2V-14B-720P.py)|[code](./model_training/full/Wan2.1-I2V-14B-720P.sh)|[code](./model_training/validate_full/Wan2.1-I2V-14B-720P.py)|[code](./model_training/lora/Wan2.1-I2V-14B-720P.sh)|[code](./model_training/validate_lora/Wan2.1-I2V-14B-720P.py)| +|[Wan-AI/Wan2.1-FLF2V-14B-720P](https://modelscope.cn/models/Wan-AI/Wan2.1-FLF2V-14B-720P)|基础模型|`input_image`, `end_image`|[code](./model_inference/Wan2.1-FLF2V-14B-720P.py)|[code](./model_training/full/Wan2.1-FLF2V-14B-720P.sh)|[code](./model_training/validate_full/Wan2.1-FLF2V-14B-720P.py)|[code](./model_training/lora/Wan2.1-FLF2V-14B-720P.sh)|[code](./model_training/validate_lora/Wan2.1-FLF2V-14B-720P.py)| +|[PAI/Wan2.1-Fun-1.3B-InP](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-InP)|基础模型|`input_image`, `end_image`|[code](./model_inference/Wan2.1-Fun-1.3B-InP.py)|[code](./model_training/full/Wan2.1-Fun-1.3B-InP.sh)|[code](./model_training/validate_full/Wan2.1-Fun-1.3B-InP.py)|[code](./model_training/lora/Wan2.1-Fun-1.3B-InP.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-1.3B-InP.py)| +|[PAI/Wan2.1-Fun-1.3B-Control](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-Control)|基础模型|`control_video`|[code](./model_inference/Wan2.1-Fun-1.3B-Control.py)|[code](./model_training/full/Wan2.1-Fun-1.3B-Control.sh)|[code](./model_training/validate_full/Wan2.1-Fun-1.3B-Control.py)|[code](./model_training/lora/Wan2.1-Fun-1.3B-Control.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-1.3B-Control.py)| +|[PAI/Wan2.1-Fun-14B-InP](https://modelscope.cn/models/PAI/Wan2.1-Fun-14B-InP)|基础模型|`input_image`, `end_image`|[code](./model_inference/Wan2.1-Fun-14B-InP.py)|[code](./model_training/full/Wan2.1-Fun-14B-InP.sh)|[code](./model_training/validate_full/Wan2.1-Fun-14B-InP.py)|[code](./model_training/lora/Wan2.1-Fun-14B-InP.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-14B-InP.py)| +|[PAI/Wan2.1-Fun-14B-Control](https://modelscope.cn/models/PAI/Wan2.1-Fun-14B-Control)|基础模型|`control_video`|[code](./model_inference/Wan2.1-Fun-14B-Control.py)|[code](./model_training/full/Wan2.1-Fun-14B-Control.sh)|[code](./model_training/validate_full/Wan2.1-Fun-14B-Control.py)|[code](./model_training/lora/Wan2.1-Fun-14B-Control.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-14B-Control.py)| +|[PAI/Wan2.1-Fun-V1.1-1.3B-Control](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-1.3B-Control)|基础模型|`control_video`, `reference_image`|[code](./model_inference/Wan2.1-Fun-V1.1-1.3B-Control.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-1.3B-Control.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control.py)| +|[PAI/Wan2.1-Fun-V1.1-14B-Control](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-Control)|基础模型|`control_video`, `reference_image`|[code](./model_inference/Wan2.1-Fun-V1.1-14B-Control.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-14B-Control.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-14B-Control.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control.py)| +|[PAI/Wan2.1-Fun-V1.1-1.3B-InP](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-1.3B-InP)|基础模型|`input_image`, `end_image`|||||| +|[PAI/Wan2.1-Fun-V1.1-14B-InP](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-InP)|基础模型|`input_image`, `end_image`|||||| +|[PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera)|基础模型||||||| +|[PAI/Wan2.1-Fun-V1.1-14B-Control-Camera](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-Control-Camera)|基础模型||||||| +|[iic/VACE-Wan2.1-1.3B-Preview](https://modelscope.cn/models/iic/VACE-Wan2.1-1.3B-Preview)|适配器|`vace_control_video`, `vace_reference_image`|[code](./model_inference/Wan2.1-VACE-1.3B-Preview.py)|[code](./model_training/full/VACE-Wan2.1-1.3B-Preview.sh)|[code](./model_training/validate_full/VACE-Wan2.1-1.3B-Preview.py)|[code](./model_training/lora/VACE-Wan2.1-1.3B-Preview.sh)|[code](./model_training/validate_lora/VACE-Wan2.1-1.3B-Preview.py)| +|[Wan-AI/Wan2.1-VACE-1.3B](https://modelscope.cn/models/Wan-AI/Wan2.1-VACE-1.3B)|适配器|`vace_control_video`, `vace_reference_image`|||||| +|[Wan-AI/Wan2.1-VACE-14B](https://modelscope.cn/models/Wan-AI/Wan2.1-VACE-14B)|适配器|`vace_control_video`, `vace_reference_image`|||||| +|[DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1](https://modelscope.cn/models/DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1)|适配器|`motion_bucket_id`|[code](./model_inference/Wan2.1-1.3b-speedcontrol-v1.py)|[code](./model_training/full/Wan2.1-1.3b-speedcontrol-v1.sh)|[code](./model_training/validate_full/Wan2.1-1.3b-speedcontrol-v1.py)|[code](./model_training/lora/Wan2.1-1.3b-speedcontrol-v1.sh)|[code](./model_training/validate_lora/Wan2.1-1.3b-speedcontrol-v1.py)| + +## 模型推理 + +### 加载模型 + +模型通过 `from_pretrained` 加载: + +```python +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth"), + ], +) +``` + +其中 `torch_dtype` 和 `device` 是计算精度和计算设备。`model_configs` 可通过多种方式配置模型路径: + +* 从[魔搭社区](https://modelscope.cn/)下载模型并加载。此时需要填写 `model_id` 和 `origin_file_pattern`,例如 + +```python +ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors") +``` + +* 从本地文件路径加载模型。此时需要填写 `path`,例如 + +```python +ModelConfig(path="models/Wan-AI/Wan2.1-T2V-1.3B/diffusion_pytorch_model.safetensors") +``` + +对于从多个文件加载的单一模型,使用列表即可,例如 + +```python +ModelConfig(path=[ + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors", +]) +``` + +`from_pretrained` 还提供了额外的参数用于控制模型加载时的行为: + +* `tokenizer_config`: Wan 模型的 tokenizer 路径,默认值为 `ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/*")`。 +* `local_model_path`: 用于保存下载模型的路径,默认值为 `"./models"`。 +* `skip_download`: 是否跳过下载,默认值为 `False`。当您的网络无法访问[魔搭社区](https://modelscope.cn/)时,请手动下载必要的文件,并将其设置为 `True`。 +* `redirect_common_files`: 是否重定向重复模型文件,默认值为 `True`。由于 Wan 系列模型包括多个基础模型,每个基础模型的 text encoder 等模块都是相同的,为避免重复下载,我们会对模型路径进行重定向。 + +### 显存管理 + +DiffSynth-Studio 为 Wan 模型提供了细粒度的显存管理,让模型能够在低显存设备上进行推理,可通过以下代码开启 offload 功能,在显存有限的设备上将部分模块 offload 到内存中。 + +```python +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ], +) +pipe.enable_vram_management() +``` + +FP8 量化功能也是支持的: + +```python +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_dtype=torch.float8_e4m3fn), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_dtype=torch.float8_e4m3fn), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_dtype=torch.float8_e4m3fn), + ], +) +pipe.enable_vram_management() +``` + +FP8 量化和 offload 可同时开启: + +```python +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu", offload_dtype=torch.float8_e4m3fn), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu", offload_dtype=torch.float8_e4m3fn), + ], +) +pipe.enable_vram_management() +``` + +FP8 量化能够大幅度减少显存占用,但不会加速,部分模型在 FP8 量化下会出现精度不足导致的画面模糊、撕裂、失真问题,请谨慎使用 FP8 量化。 + +`enable_vram_management` 函数提供了以下参数,用于控制显存使用情况: + +* `vram_limit`: 显存占用量(GB),默认占用设备上的剩余显存。注意这不是一个绝对限制,当设置的显存不足以支持模型进行推理,但实际可用显存足够时,将会以最小化显存占用的形式进行推理。 +* `vram_buffer`: 显存缓冲区大小(GB),默认为 0.5GB。由于部分较大的神经网络层在 onload 阶段会不可控地占用更多显存,因此一个显存缓冲区是必要的,理论上的最优值为模型中最大的层所占的显存。 +* `num_persistent_param_in_dit`: DiT 模型中常驻显存的参数数量(个),默认为无限制。我们将会在未来删除这个参数,请不要依赖这个参数。 + +### 输入参数 + +Pipeline 在推理阶段能够接收以下输入参数: + +* `prompt`: 提示词,描述画面中出现的内容。 +* `negative_prompt`: 负向提示词,描述画面中不应该出现的内容,默认值为 `""`。 +* `input_image`: 输入图片,适用于图生视频模型,例如 [`Wan-AI/Wan2.1-I2V-14B-480P`](https://modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-480P)、[`PAI/Wan2.1-Fun-1.3B-InP`](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-InP),以及首尾帧模型,例如 [`Wan-AI/Wan2.1-FLF2V-14B-720P`](Wan-AI/Wan2.1-FLF2V-14B-720P)。 +* `end_image`: 结尾帧,适用于首尾帧模型,例如 [`Wan-AI/Wan2.1-FLF2V-14B-720P`](Wan-AI/Wan2.1-FLF2V-14B-720P)。 +* `input_video`: 输入视频,用于视频生视频,适用于任意 Wan 系列模型,需与参数 `denoising_strength` 配合使用。 +* `denoising_strength`: 去噪强度,范围为 [0, 1]。数值越小,生成的视频越接近 `input_video`。 +* `control_video`: 控制视频,适用于带控制能力的 Wan 系列模型,例如 [`PAI/Wan2.1-Fun-1.3B-Control`](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-Control)。 +* `reference_image`: 参考图片,适用于带参考图能力的 Wan 系列模型,例如 [`PAI/Wan2.1-Fun-V1.1-1.3B-Control`](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-1.3B-Control)。 +* `vace_video`: VACE 模型的输入视频,适用于 VACE 系列模型,例如 [`iic/VACE-Wan2.1-1.3B-Preview`](https://modelscope.cn/models/iic/VACE-Wan2.1-1.3B-Preview)。 +* `vace_video_mask`: VACE 模型的 mask 视频,适用于 VACE 系列模型,例如 [`iic/VACE-Wan2.1-1.3B-Preview`](https://modelscope.cn/models/iic/VACE-Wan2.1-1.3B-Preview)。 +* `vace_reference_image`: VACE 模型的参考图片,适用于 VACE 系列模型,例如 [`iic/VACE-Wan2.1-1.3B-Preview`](https://modelscope.cn/models/iic/VACE-Wan2.1-1.3B-Preview)。 +* `vace_scale`: VACE 模型对基础模型的影响程度,默认为1。数值越大,控制强度越高,但画面崩坏概率越大。 +* `seed`: 随机种子。默认为 `None`,即完全随机。 +* `rand_device`: 生成随机高斯噪声矩阵的计算设备,默认为 `"cpu"`。当设置为 `cuda` 时,在不同 GPU 上会导致不同的生成结果。 +* `height`: 帧高度,默认为 480。需设置为 16 的倍数,不满足时向上取整。 +* `width`: 帧宽度,默认为 832。需设置为 16 的倍数,不满足时向上取整。 +* `num_frames`: 帧数,默认为 81。需设置为 4 的倍数 + 1,不满足时向上取整,最小值为 1。 +* `cfg_scale`: Classifier-free guidance 机制的数值,默认为 5。数值越大,提示词的控制效果越强,但画面崩坏的概率越大。 +* `cfg_merge`: 是否合并 Classifier-free guidance 的两侧进行统一推理,默认为 `False`。该参数目前仅在基础的文生视频和图生视频模型上生效。 +* `num_inference_steps`: 推理次数,默认值为 50。 +* `sigma_shift`: Rectified Flow 理论中的参数,默认为 5。数值越大,模型在去噪的开始阶段停留的步骤数越多,可适当调大这个参数来提高画面质量,但会因生成过程与训练过程不一致导致生成的视频内容与训练数据存在差异。 +* `motion_bucket_id`: 运动幅度,范围为 [0, 100]。适用于速度控制模块,例如 [`DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1`](https://modelscope.cn/models/DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1),数值越大,运动幅度越大。 +* `tiled`: 是否启用 VAE 分块推理,默认为 `False`。设置为 `True` 时可显著减少 VAE 编解码阶段的显存占用,会产生少许误差,以及少量推理时间延长。 +* `tile_size`: VAE 编解码阶段的分块大小,默认为 (30, 52),仅在 `tiled=True` 时生效。 +* `tile_stride`: VAE 编解码阶段的分块步长,默认为 (15, 26),仅在 `tiled=True` 时生效,需保证其数值小于或等于 `tile_size`。 +* `sliding_window_size`: DiT 部分的滑动窗口大小。实验性功能,效果不稳定。 +* `sliding_window_stride`: DiT 部分的滑动窗口步长。实验性功能,效果不稳定。 +* `tea_cache_l1_thresh`: TeaCache 的阈值,数值越大,速度越快,画面质量越差。请注意,开启 TeaCache 后推理速度并非均匀,因此进度条上显示的剩余时间将会变得不准确。 +* `tea_cache_model_id`: TeaCache 的参数模板,可选 `"Wan2.1-T2V-1.3B"`、`Wan2.1-T2V-14B`、`Wan2.1-I2V-14B-480P`、`Wan2.1-I2V-14B-720P` 之一。 +* `progress_bar_cmd`: 进度条,默认为 `tqdm.tqdm`。可通过设置为 `lambda x:x` 来屏蔽进度条。 + +## 模型训练 + +Wan 系列模型训练通过统一的 [`./model_training/train.py`](./model_training/train.py) 脚本进行。 + +脚本包含以下参数: + +* 数据集 + * `--dataset_base_path`: 数据集的根路径。 + * `--dataset_metadata_path`: 数据集的元数据文件路径。 + * `--height`: 图像或视频的高度。将 `height` 和 `width` 留空以启用动态分辨率。 + * `--width`: 图像或视频的宽度。将 `height` 和 `width` 留空以启用动态分辨率。 + * `--num_frames`: 每个视频中的帧数。帧从视频前缀中采样。 + * `--data_file_keys`: 元数据中的数据文件键。用逗号分隔。 + * `--dataset_repeat`: 每个 epoch 中数据集重复的次数。 +* 模型 + * `--model_paths`: 要加载的模型路径。JSON 格式。 + * `--model_id_with_origin_paths`: 带原始路径的模型 ID,例如 Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors。用逗号分隔。 +* 训练 + * `--learning_rate`: 学习率。 + * `--num_epochs`: 轮数(Epoch)数量。 + * `--output_path`: 保存路径。 + * `--remove_prefix_in_ckpt`: 在 ckpt 中移除前缀。 +* 可训练模块 + * `--trainable_models`: 可训练的模型,例如 dit、vae、text_encoder。 + * `--lora_base_model`: LoRA 添加到哪个模型上。 + * `--lora_target_modules`: LoRA 添加到哪一层上。 + * `--lora_rank`: LoRA 的秩(Rank)。 +* 额外模型输入 + * `--input_contains_input_image`: 模型输入包含 `input_image` + * `--input_contains_end_image`: 模型输入包含 `end_image`。 + * `--input_contains_control_video`: 模型输入包含 `control_video`。 + * `--input_contains_reference_image`: 模型输入包含 `reference_image`。 + * `--input_contains_vace_video`: 模型输入包含 `vace_video`。 + * `--input_contains_vace_reference_image`: 模型输入包含 `vace_reference_image`。 + * `--input_contains_motion_bucket_id`: 模型输入包含 `motion_bucket_id`。 +* 显存管理 + * `--use_gradient_checkpointing_offload`: 是否将 gradient checkpointing 卸载到内存中。 + +### Step 1: 准备数据集 + +数据集包含一系列文件,我们建议您这样组织数据集文件: + +``` +data/example_video_dataset/ +├── metadata.csv +├── video1.mp4 +└── video2.mp4 +``` + +其中 `video1.mp4`、`video2.mp4` 为训练用视频数据,`metadata.csv` 为元数据列表,例如 + +``` +video,prompt +video1.mp4,"from sunset to night, a small town, light, house, river" +video2.mp4,"a dog is running" +``` + +数据集支持视频和图片混合训练,支持的视频文件格式包括 `"mp4", "avi", "mov", "wmv", "mkv", "flv", "webm"`,支持的图片格式包括 `"jpg", "jpeg", "png", "webp"`。 + +视频的尺寸可通过脚本参数 `--height`、`--width`、`--num_frames` 控制。在每个视频中,前 `num_frames` 帧会被用于训练,因此当视频长度不足 `num_frames` 帧时会报错,图片文件会被视为单帧视频。当 `--height` 和 `--width` 为空时将会开启动态分辨率,按照数据集中每个视频或图片的实际宽高训练。 + +**我们强烈建议使用固定分辨率训练,并避免图像和视频混合训练,因为在多卡训练中存在负载均衡问题。** + +当模型需要额外输入时,例如具备控制能力的模型 [`PAI/Wan2.1-Fun-1.3B-Control`](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-Control) 所需的 `control_video`,请在数据集中补充相应的列,例如: + +``` +video,prompt,control_video +video1.mp4,"from sunset to night, a small town, light, house, river",video1_softedge.mp4 +``` + +额外输入若包含视频和图像文件,则需要在 `--data_file_keys` 参数中指定要解析的列名。该参数的默认值为 `"image,video"`,即解析列名为 `image` 和 `video` 的列。可根据额外输入增加相应的列名,例如 `--data_file_keys "image,video,control_video"`,同时启用 `--input_contains_control_video`。 + +### Step 2: 加载模型 + +类似于推理时的模型加载逻辑,可直接通过模型 ID 配置要加载的模型。例如,推理时我们通过以下设置加载模型 + +```python +model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="Wan2.1_VAE.pth"), +] +``` + +那么在训练时,填入以下参数即可加载对应的模型。 + +```shell +--model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-1.3B:Wan2.1_VAE.pth" +``` + +如果您希望从本地文件加载模型,例如推理时 + +```python +model_configs=[ + ModelConfig(path=[ + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors", + ]), + ModelConfig(path="models/Wan-AI/Wan2.1-T2V-14B/models_t5_umt5-xxl-enc-bf16.pth"), + ModelConfig(path="models/Wan-AI/Wan2.1-T2V-14B/Wan2.1_VAE.pth"), +] +``` + +那么训练时需设置为 + +```shell +--model_paths '[ + [ + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors", + "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors" + ], + "models/Wan-AI/Wan2.1-T2V-14B/models_t5_umt5-xxl-enc-bf16.pth", + "models/Wan-AI/Wan2.1-T2V-14B/Wan2.1_VAE.pth" +]' \ +``` + +### 设置可训练模块 + +训练框架支持训练基础模型,或 LoRA 模型。以下是几个例子: + +* 全量训练 DiT 部分:`--trainable_models dit` +* 训练 DiT 部分的 LoRA 模型:`--lora_base_model dit --lora_target_modules "q,k,v,o,ffn.0,ffn.2" --lora_rank 32` +* 训练 DiT 部分的 LoRA 和 Motion Controller 部分(是的,可以训练这种花里胡哨的结构):`--trainable_models motion_controller --lora_base_model dit --lora_target_modules "q,k,v,o,ffn.0,ffn.2" --lora_rank 32` + +此外,由于训练脚本中加载了多个模块(text encoder、dit、vae),保存模型文件时需要移除前缀,例如在全量训练 DiT 部分或者训练 DiT 部分的 LoRA 模型时,请设置 `--remove_prefix_in_ckpt pipe.dit.` + +### 启动训练程序 + +我们构建了一个样例视频数据集,以方便您进行测试,通过以下命令可以下载这个数据集: + +```shell +modelscope download --dataset DiffSynth-Studio/example_video_dataset README.md --local_dir ./data/example_video_dataset +``` + +我们为每一个模型编写了训练命令,请参考本文档开头的表格。 + +请注意,14B 模型全量训练需要8个GPU,每个GPU的显存至少为80G。全量训练这些14B模型时需要安装 `deepspeed`(`pip install deepspeed`),我们编写了建议的[配置文件](./model_training/full/accelerate_config_14B.yaml),这个配置文件会在对应的训练脚本中被加载,这些脚本已在 8*A100 上测试过。 + +训练脚本的默认视频尺寸为 `480*832*81`,提升分辨率将可能导致显存不足,请添加参数 `--use_gradient_checkpointing_offload` 降低显存占用。 diff --git a/examples/wanvideo/model_inference/wan_1.3b_speed_control.py b/examples/wanvideo/model_inference/Wan2.1-1.3b-speedcontrol-v1.py similarity index 100% rename from examples/wanvideo/model_inference/wan_1.3b_speed_control.py rename to examples/wanvideo/model_inference/Wan2.1-1.3b-speedcontrol-v1.py diff --git a/examples/wanvideo/model_inference/wan_14b_flf2v.py b/examples/wanvideo/model_inference/Wan2.1-FLF2V-14B-720P.py similarity index 100% rename from examples/wanvideo/model_inference/wan_14b_flf2v.py rename to examples/wanvideo/model_inference/Wan2.1-FLF2V-14B-720P.py diff --git a/examples/wanvideo/model_inference/wan_fun_1.3b_control.py b/examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-Control.py similarity index 100% rename from examples/wanvideo/model_inference/wan_fun_1.3b_control.py rename to examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-Control.py diff --git a/examples/wanvideo/model_inference/wan_fun_1.3b_InP.py b/examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-InP.py similarity index 100% rename from examples/wanvideo/model_inference/wan_fun_1.3b_InP.py rename to examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-InP.py diff --git a/examples/wanvideo/model_inference/wan_fun_14b_control.py b/examples/wanvideo/model_inference/Wan2.1-Fun-14B-Control.py similarity index 100% rename from examples/wanvideo/model_inference/wan_fun_14b_control.py rename to examples/wanvideo/model_inference/Wan2.1-Fun-14B-Control.py diff --git a/examples/wanvideo/model_inference/wan_fun_14b_InP.py b/examples/wanvideo/model_inference/Wan2.1-Fun-14B-InP.py similarity index 100% rename from examples/wanvideo/model_inference/wan_fun_14b_InP.py rename to examples/wanvideo/model_inference/Wan2.1-Fun-14B-InP.py diff --git a/examples/wanvideo/model_inference/wan_fun_v1.1_1.3b_reference_control.py b/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-Control.py similarity index 100% rename from examples/wanvideo/model_inference/wan_fun_v1.1_1.3b_reference_control.py rename to examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-Control.py diff --git a/examples/wanvideo/model_inference/wan_fun_v1.1_14b_reference_control.py b/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-Control.py similarity index 100% rename from examples/wanvideo/model_inference/wan_fun_v1.1_14b_reference_control.py rename to examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-Control.py diff --git a/examples/wanvideo/model_inference/wan_14b_image_to_video_480p.py b/examples/wanvideo/model_inference/Wan2.1-I2V-14B-480P.py similarity index 100% rename from examples/wanvideo/model_inference/wan_14b_image_to_video_480p.py rename to examples/wanvideo/model_inference/Wan2.1-I2V-14B-480P.py diff --git a/examples/wanvideo/model_inference/wan_14b_image_to_video_720p.py b/examples/wanvideo/model_inference/Wan2.1-I2V-14B-720P.py similarity index 100% rename from examples/wanvideo/model_inference/wan_14b_image_to_video_720p.py rename to examples/wanvideo/model_inference/Wan2.1-I2V-14B-720P.py diff --git a/examples/wanvideo/model_inference/wan_1.3b_text_to_video.py b/examples/wanvideo/model_inference/Wan2.1-T2V-1.3B.py similarity index 100% rename from examples/wanvideo/model_inference/wan_1.3b_text_to_video.py rename to examples/wanvideo/model_inference/Wan2.1-T2V-1.3B.py diff --git a/examples/wanvideo/model_inference/wan_14b_text_to_video.py b/examples/wanvideo/model_inference/Wan2.1-T2V-14B.py similarity index 100% rename from examples/wanvideo/model_inference/wan_14b_text_to_video.py rename to examples/wanvideo/model_inference/Wan2.1-T2V-14B.py diff --git a/examples/wanvideo/model_inference/wan_1.3b_vace.py b/examples/wanvideo/model_inference/Wan2.1-VACE-1.3B-Preview.py similarity index 100% rename from examples/wanvideo/model_inference/wan_1.3b_vace.py rename to examples/wanvideo/model_inference/Wan2.1-VACE-1.3B-Preview.py diff --git a/examples/wanvideo/model_training/full/Wan2.1-1.3b-speedcontrol-v1.sh b/examples/wanvideo/model_training/full/Wan2.1-1.3b-speedcontrol-v1.sh new file mode 100644 index 0000000..e70fd13 --- /dev/null +++ b/examples/wanvideo/model_training/full/Wan2.1-1.3b-speedcontrol-v1.sh @@ -0,0 +1,13 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata_motion_bucket_id.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-1.3B:Wan2.1_VAE.pth,DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1:model.safetensors" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.motion_controller." \ + --output_path "./models/train/Wan2.1-1.3b-speedcontrol-v1_full" \ + --trainable_models "motion_controller" \ + --input_contains_motion_bucket_id \ No newline at end of file diff --git a/examples/wanvideo/model_training/full/Wan2.1-FLF2V-14B-720P.sh b/examples/wanvideo/model_training/full/Wan2.1-FLF2V-14B-720P.sh new file mode 100644 index 0000000..c0591ca --- /dev/null +++ b/examples/wanvideo/model_training/full/Wan2.1-FLF2V-14B-720P.sh @@ -0,0 +1,14 @@ +accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.1-FLF2V-14B-720P:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-FLF2V-14B-720P:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-FLF2V-14B-720P:Wan2.1_VAE.pth,Wan-AI/Wan2.1-FLF2V-14B-720P:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-FLF2V-14B-720P_full" \ + --trainable_models "dit" \ + --input_contains_input_image \ + --input_contains_end_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-Control.sh b/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-Control.sh new file mode 100644 index 0000000..499c787 --- /dev/null +++ b/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-Control.sh @@ -0,0 +1,14 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata_control.csv \ + --data_file_keys "video,control_video" \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.1-Fun-1.3B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-1.3B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-1.3B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-1.3B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-Fun-1.3B-Control_full" \ + --trainable_models "dit" \ + --input_contains_control_video \ No newline at end of file diff --git a/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-InP.sh b/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-InP.sh new file mode 100644 index 0000000..1fec876 --- /dev/null +++ b/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-InP.sh @@ -0,0 +1,14 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.1-Fun-1.3B-InP:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-1.3B-InP:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-1.3B-InP:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-1.3B-InP:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-Fun-1.3B-InP_full" \ + --trainable_models "dit" \ + --input_contains_input_image \ + --input_contains_end_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-Control.sh b/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-Control.sh new file mode 100644 index 0000000..2d7272d --- /dev/null +++ b/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-Control.sh @@ -0,0 +1,14 @@ +accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata_control.csv \ + --data_file_keys "video,control_video" \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.1-Fun-14B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-14B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-14B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-14B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-Fun-14B-Control_full" \ + --trainable_models "dit" \ + --input_contains_control_video \ No newline at end of file diff --git a/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-InP.sh b/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-InP.sh new file mode 100644 index 0000000..3463670 --- /dev/null +++ b/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-InP.sh @@ -0,0 +1,14 @@ +accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.1-Fun-14B-InP:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-14B-InP:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-14B-InP:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-14B-InP:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-Fun-14B-InP_full" \ + --trainable_models "dit" \ + --input_contains_input_image \ + --input_contains_end_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control.sh b/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control.sh new file mode 100644 index 0000000..5acda18 --- /dev/null +++ b/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control.sh @@ -0,0 +1,15 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata_reference_control.csv \ + --data_file_keys "video,control_video,reference_image" \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-1.3B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-1.3B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-1.3B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-1.3B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-Fun-V1.1-1.3B-Control_full" \ + --trainable_models "dit" \ + --input_contains_control_video \ + --input_contains_reference_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control.sh b/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control.sh new file mode 100644 index 0000000..2a63311 --- /dev/null +++ b/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control.sh @@ -0,0 +1,15 @@ +accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata_reference_control.csv \ + --data_file_keys "video,control_video,reference_image" \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-14B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-14B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-14B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-14B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-Fun-V1.1-14B-Control_full" \ + --trainable_models "dit" \ + --input_contains_control_video \ + --input_contains_reference_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-480P.sh b/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-480P.sh new file mode 100644 index 0000000..5cea09b --- /dev/null +++ b/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-480P.sh @@ -0,0 +1,13 @@ +accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.1-I2V-14B-480P:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-I2V-14B-480P:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-I2V-14B-480P:Wan2.1_VAE.pth,Wan-AI/Wan2.1-I2V-14B-480P:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-I2V-14B-480P_full" \ + --trainable_models "dit" \ + --input_contains_input_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-720P.sh b/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-720P.sh new file mode 100644 index 0000000..4b0ed11 --- /dev/null +++ b/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-720P.sh @@ -0,0 +1,13 @@ +accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.1-I2V-14B-720P:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-I2V-14B-720P:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-I2V-14B-720P:Wan2.1_VAE.pth,Wan-AI/Wan2.1-I2V-14B-720P:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-I2V-14B-720P_full" \ + --trainable_models "dit" \ + --input_contains_input_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/full/Wan2.1-T2V-1.3B.sh b/examples/wanvideo/model_training/full/Wan2.1-T2V-1.3B.sh new file mode 100644 index 0000000..e0d6e84 --- /dev/null +++ b/examples/wanvideo/model_training/full/Wan2.1-T2V-1.3B.sh @@ -0,0 +1,12 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-1.3B:Wan2.1_VAE.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-T2V-1.3B_full" \ + --trainable_models "dit" \ No newline at end of file diff --git a/examples/wanvideo/model_training/full/Wan2.1-T2V-14B.sh b/examples/wanvideo/model_training/full/Wan2.1-T2V-14B.sh new file mode 100644 index 0000000..ae804b0 --- /dev/null +++ b/examples/wanvideo/model_training/full/Wan2.1-T2V-14B.sh @@ -0,0 +1,12 @@ +accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-14B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-14B:Wan2.1_VAE.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-T2V-14B_full" \ + --trainable_models "dit" \ No newline at end of file diff --git a/examples/wanvideo/model_training/full/accelerate_config_14B.yaml b/examples/wanvideo/model_training/full/accelerate_config_14B.yaml new file mode 100644 index 0000000..3875a9d --- /dev/null +++ b/examples/wanvideo/model_training/full/accelerate_config_14B.yaml @@ -0,0 +1,22 @@ +compute_environment: LOCAL_MACHINE +debug: false +deepspeed_config: + gradient_accumulation_steps: 1 + offload_optimizer_device: cpu + offload_param_device: cpu + zero3_init_flag: false + zero_stage: 2 +distributed_type: DEEPSPEED +downcast_bf16: 'no' +enable_cpu_affinity: false +machine_rank: 0 +main_training_function: main +mixed_precision: bf16 +num_machines: 1 +num_processes: 8 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false diff --git a/examples/wanvideo/model_training/full/run_test.py b/examples/wanvideo/model_training/full/run_test.py new file mode 100644 index 0000000..093becd --- /dev/null +++ b/examples/wanvideo/model_training/full/run_test.py @@ -0,0 +1,38 @@ +import multiprocessing, os + + +def run_task(scripts, thread_id, thread_num): + for script_id, script in enumerate(scripts): + if script_id % thread_num == thread_id: + log_file_name = script.replace("/", "_") + ".txt" + cmd = f"CUDA_VISIBLE_DEVICES={thread_id} bash {script} > data/log/{log_file_name} 2>&1" + os.makedirs("data/log", exist_ok=True) + print(cmd, flush=True) + os.system(cmd) + + +if __name__ == "__main__": + # 1.3B + scripts = [] + for file_name in os.listdir("examples/wanvideo/model_training/full"): + if file_name != "run_test.py" and "14B" not in file_name: + scripts.append(os.path.join("examples/wanvideo/model_training/full", file_name)) + + processes = [multiprocessing.Process(target=run_task, args=(scripts, i, 8)) for i in range(8)] + for p in processes: + p.start() + for p in processes: + p.join() + + # 14B + scripts = [] + for file_name in os.listdir("examples/wanvideo/model_training/full"): + if file_name != "run_test.py" and "14B" in file_name: + scripts.append(os.path.join("examples/wanvideo/model_training/full", file_name)) + for script in scripts: + log_file_name = script.replace("/", "_") + ".txt" + cmd = f"bash {script} > data/log/{log_file_name} 2>&1" + print(cmd, flush=True) + os.system(cmd) + + print("Done!") \ No newline at end of file diff --git a/examples/wanvideo/model_training/lora/Wan2.1-1.3b-speedcontrol-v1.sh b/examples/wanvideo/model_training/lora/Wan2.1-1.3b-speedcontrol-v1.sh new file mode 100644 index 0000000..4fb08bd --- /dev/null +++ b/examples/wanvideo/model_training/lora/Wan2.1-1.3b-speedcontrol-v1.sh @@ -0,0 +1,15 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata_motion_bucket_id.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-1.3B:Wan2.1_VAE.pth,DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1:model.safetensors" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-1.3b-speedcontrol-v1_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ + --lora_rank 32 \ + --input_contains_motion_bucket_id \ No newline at end of file diff --git a/examples/wanvideo/model_training/lora/Wan2.1-FLF2V-14B-720P.sh b/examples/wanvideo/model_training/lora/Wan2.1-FLF2V-14B-720P.sh new file mode 100644 index 0000000..8b98631 --- /dev/null +++ b/examples/wanvideo/model_training/lora/Wan2.1-FLF2V-14B-720P.sh @@ -0,0 +1,16 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.1-FLF2V-14B-720P:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-FLF2V-14B-720P:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-FLF2V-14B-720P:Wan2.1_VAE.pth,Wan-AI/Wan2.1-FLF2V-14B-720P:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-FLF2V-14B-720P_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ + --lora_rank 32 \ + --input_contains_input_image \ + --input_contains_end_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-Control.sh b/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-Control.sh new file mode 100644 index 0000000..72522f2 --- /dev/null +++ b/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-Control.sh @@ -0,0 +1,16 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata_control.csv \ + --data_file_keys "video,control_video" \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.1-Fun-1.3B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-1.3B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-1.3B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-1.3B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-Fun-1.3B-Control_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ + --lora_rank 32 \ + --input_contains_control_video \ No newline at end of file diff --git a/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-InP.sh b/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-InP.sh new file mode 100644 index 0000000..182fccc --- /dev/null +++ b/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-InP.sh @@ -0,0 +1,16 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.1-Fun-1.3B-InP:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-1.3B-InP:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-1.3B-InP:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-1.3B-InP:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-Fun-1.3B-InP_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ + --lora_rank 32 \ + --input_contains_input_image \ + --input_contains_end_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-Control.sh b/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-Control.sh new file mode 100644 index 0000000..a45203c --- /dev/null +++ b/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-Control.sh @@ -0,0 +1,16 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata_control.csv \ + --data_file_keys "video,control_video" \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.1-Fun-14B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-14B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-14B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-14B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-Fun-14B-Control_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ + --lora_rank 32 \ + --input_contains_control_video \ No newline at end of file diff --git a/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-InP.sh b/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-InP.sh new file mode 100644 index 0000000..5392658 --- /dev/null +++ b/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-InP.sh @@ -0,0 +1,16 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.1-Fun-14B-InP:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-14B-InP:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-14B-InP:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-14B-InP:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-Fun-14B-InP_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ + --lora_rank 32 \ + --input_contains_input_image \ + --input_contains_end_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control.sh b/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control.sh new file mode 100644 index 0000000..a342981 --- /dev/null +++ b/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control.sh @@ -0,0 +1,17 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata_reference_control.csv \ + --data_file_keys "video,control_video,reference_image" \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-1.3B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-1.3B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-1.3B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-1.3B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-Fun-V1.1-1.3B-Control_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ + --lora_rank 32 \ + --input_contains_control_video \ + --input_contains_reference_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control.sh b/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control.sh new file mode 100644 index 0000000..a902522 --- /dev/null +++ b/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control.sh @@ -0,0 +1,17 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata_reference_control.csv \ + --data_file_keys "video,control_video,reference_image" \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-14B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-14B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-14B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-14B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-Fun-V1.1-14B-Control_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ + --lora_rank 32 \ + --input_contains_control_video \ + --input_contains_reference_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-480P.sh b/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-480P.sh new file mode 100644 index 0000000..3c085fa --- /dev/null +++ b/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-480P.sh @@ -0,0 +1,15 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.1-I2V-14B-480P:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-I2V-14B-480P:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-I2V-14B-480P:Wan2.1_VAE.pth,Wan-AI/Wan2.1-I2V-14B-480P:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-I2V-14B-480P_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ + --lora_rank 32 \ + --input_contains_input_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-720P.sh b/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-720P.sh new file mode 100644 index 0000000..6193df7 --- /dev/null +++ b/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-720P.sh @@ -0,0 +1,15 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.1-I2V-14B-720P:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-I2V-14B-720P:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-I2V-14B-720P:Wan2.1_VAE.pth,Wan-AI/Wan2.1-I2V-14B-720P:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-I2V-14B-720P_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ + --lora_rank 32 \ + --input_contains_input_image \ No newline at end of file diff --git a/examples/wanvideo/model_training/lora/Wan2.1-T2V-1.3B.sh b/examples/wanvideo/model_training/lora/Wan2.1-T2V-1.3B.sh new file mode 100644 index 0000000..d16a287 --- /dev/null +++ b/examples/wanvideo/model_training/lora/Wan2.1-T2V-1.3B.sh @@ -0,0 +1,14 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-1.3B:Wan2.1_VAE.pth" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-T2V-1.3B_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ + --lora_rank 32 \ No newline at end of file diff --git a/examples/wanvideo/model_training/lora/Wan2.1-T2V-14B.sh b/examples/wanvideo/model_training/lora/Wan2.1-T2V-14B.sh new file mode 100644 index 0000000..1fb55ac --- /dev/null +++ b/examples/wanvideo/model_training/lora/Wan2.1-T2V-14B.sh @@ -0,0 +1,14 @@ +accelerate launch examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-14B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-14B:Wan2.1_VAE.pth" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-T2V-14B_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ + --lora_rank 32 \ No newline at end of file diff --git a/examples/wanvideo/model_training/lora/run_test.py b/examples/wanvideo/model_training/lora/run_test.py new file mode 100644 index 0000000..ec0f9e2 --- /dev/null +++ b/examples/wanvideo/model_training/lora/run_test.py @@ -0,0 +1,25 @@ +import multiprocessing, os + + +def run_task(scripts, thread_id, thread_num): + for script_id, script in enumerate(scripts): + if script_id % thread_num == thread_id: + log_file_name = script.replace("/", "_") + ".txt" + cmd = f"CUDA_VISIBLE_DEVICES={thread_id} bash {script} > data/log/{log_file_name} 2>&1" + os.makedirs("data/log", exist_ok=True) + print(cmd, flush=True) + os.system(cmd) + + +if __name__ == "__main__": + scripts = [] + for file_name in os.listdir("examples/wanvideo/model_training/lora"): + if file_name != "run_test.py": + scripts.append(os.path.join("examples/wanvideo/model_training/lora", file_name)) + + processes = [multiprocessing.Process(target=run_task, args=(scripts, i, 8)) for i in range(8)] + for p in processes: + p.start() + for p in processes: + p.join() + print("Done!") \ No newline at end of file diff --git a/examples/wanvideo/model_training/train.py b/examples/wanvideo/model_training/train.py new file mode 100644 index 0000000..cbace5a --- /dev/null +++ b/examples/wanvideo/model_training/train.py @@ -0,0 +1,129 @@ +import torch, os, json +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from diffsynth.trainers.utils import DiffusionTrainingModule, VideoDataset, launch_training_task, wan_parser +os.environ["TOKENIZERS_PARALLELISM"] = "false" + + + +class WanTrainingModule(DiffusionTrainingModule): + def __init__( + self, + model_paths=None, model_id_with_origin_paths=None, + trainable_models=None, + lora_base_model=None, lora_target_modules="q,k,v,o,ffn.0,ffn.2", lora_rank=32, + use_gradient_checkpointing=True, + use_gradient_checkpointing_offload=False, + # Extra inputs + input_contains_input_image=False, + input_contains_end_image=False, + input_contains_control_video=False, + input_contains_reference_image=False, + input_contains_vace_video=False, + input_contains_vace_reference_image=False, + input_contains_motion_bucket_id=False, + ): + super().__init__() + # Load models + model_configs = [] + if model_paths is not None: + model_paths = json.loads(model_paths) + model_configs += [ModelConfig(path=path) for path in model_paths] + if model_id_with_origin_paths is not None: + model_id_with_origin_paths = model_id_with_origin_paths.split(",") + model_configs += [ModelConfig(model_id=i.split(":")[0], origin_file_pattern=i.split(":")[1]) for i in model_id_with_origin_paths] + self.pipe = WanVideoPipeline.from_pretrained(torch_dtype=torch.bfloat16, device="cpu", model_configs=model_configs) + + # Reset training scheduler + self.pipe.scheduler.set_timesteps(1000, training=True) + + # Freeze untrainable models + self.pipe.freeze_except([] if trainable_models is None else trainable_models.split(",")) + + # Add LoRA to the base models + if lora_base_model is not None: + model = self.add_lora_to_model( + getattr(self.pipe, lora_base_model), + target_modules=lora_target_modules.split(","), + lora_rank=lora_rank + ) + setattr(self.pipe, lora_base_model, model) + + # Store other configs + self.use_gradient_checkpointing = use_gradient_checkpointing + self.use_gradient_checkpointing_offload = use_gradient_checkpointing_offload + self.input_contains_input_image = input_contains_input_image + self.input_contains_end_image = input_contains_end_image + self.input_contains_control_video = input_contains_control_video + self.input_contains_reference_image = input_contains_reference_image + self.input_contains_vace_video = input_contains_vace_video + self.input_contains_vace_reference_image = input_contains_vace_reference_image + self.input_contains_motion_bucket_id = input_contains_motion_bucket_id + + + def forward_preprocess(self, data): + # CFG-sensitive parameters + inputs_posi = {"prompt": data["prompt"]} + inputs_nega = {} + + # CFG-unsensitive parameters + inputs_shared = { + # Assume you are using this pipeline for inference, + # please fill in the input parameters. + "input_video": data["video"], + "height": data["video"][0].size[1], + "width": data["video"][0].size[0], + "num_frames": len(data["video"]), + # Please do not modify the following parameters + # unless you clearly know what this will cause. + "cfg_scale": 1, + "tiled": False, + "rand_device": self.pipe.device, + "use_gradient_checkpointing": self.use_gradient_checkpointing, + "use_gradient_checkpointing_offload": self.use_gradient_checkpointing_offload, + "cfg_merge": False, + "vace_scale": 1, + } + + # Extra inputs + if self.input_contains_input_image: inputs_shared["input_image"] = data["video"][0] + if self.input_contains_end_image: inputs_shared["end_image"] = data["video"][-1] + if self.input_contains_control_video: inputs_shared["control_video"] = data["control_video"] + if self.input_contains_reference_image: inputs_shared["reference_image"] = data["reference_image"] + if self.input_contains_vace_video: inputs_shared["vace_video"] = data["vace_video"] + if self.input_contains_vace_reference_image: inputs_shared["vace_reference_image"] = data["vace_reference_image"] + if self.input_contains_motion_bucket_id: inputs_shared["motion_bucket_id"] = data["motion_bucket_id"] + + # Pipeline units will automatically process the input parameters. + for unit in self.pipe.units: + inputs_shared, inputs_posi, inputs_nega = self.pipe.unit_runner(unit, self.pipe, inputs_shared, inputs_posi, inputs_nega) + return {**inputs_shared, **inputs_posi} + + + def forward(self, data, inputs=None): + if inputs is None: inputs = self.forward_preprocess(data) + models = {name: getattr(self.pipe, name) for name in self.pipe.in_iteration_models} + loss = self.pipe.training_loss(**models, **inputs) + return loss + + +if __name__ == "__main__": + parser = wan_parser() + args = parser.parse_args() + dataset = VideoDataset(args=args) + model = WanTrainingModule( + model_paths=args.model_paths, + model_id_with_origin_paths=args.model_id_with_origin_paths, + trainable_models=args.trainable_models, + lora_base_model=args.lora_base_model, + lora_target_modules=args.lora_target_modules, + lora_rank=args.lora_rank, + use_gradient_checkpointing_offload=args.use_gradient_checkpointing_offload, + input_contains_input_image=args.input_contains_input_image, + input_contains_end_image=args.input_contains_end_image, + input_contains_control_video=args.input_contains_control_video, + input_contains_reference_image=args.input_contains_reference_image, + input_contains_vace_video=args.input_contains_vace_video, + input_contains_vace_reference_image=args.input_contains_vace_reference_image, + input_contains_motion_bucket_id=args.input_contains_motion_bucket_id, + ) + launch_training_task(model, dataset, args=args) diff --git a/examples/wanvideo/model_training/train_i2v.py b/examples/wanvideo/model_training/train_i2v.py deleted file mode 100644 index 1c5c757..0000000 --- a/examples/wanvideo/model_training/train_i2v.py +++ /dev/null @@ -1,54 +0,0 @@ -import torch, os, json -from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig -from diffsynth.trainers.utils import DiffusionTrainingModule, VideoDataset, launch_training_task, wan_parser -os.environ["TOKENIZERS_PARALLELISM"] = "false" - - -class WanTrainingModule(DiffusionTrainingModule): - def __init__(self, model_paths, task="train_lora", lora_target_modules="q,k,v,o,ffn.0,ffn.2", lora_rank=32): - super().__init__() - self.pipe = WanVideoPipeline.from_pretrained( - torch_dtype=torch.bfloat16, - device="cpu", - model_configs=[ModelConfig(path=path) for path in model_paths], - ) - self.pipe.scheduler.set_timesteps(1000, training=True) - if task == "train_lora": - self.pipe.freeze_except([]) - self.pipe.dit = self.add_lora_to_model(self.pipe.dit, target_modules=lora_target_modules.split(","), lora_rank=lora_rank) - else: - self.pipe.freeze_except(["dit"]) - - def forward_preprocess(self, data): - inputs_posi = {"prompt": data["prompt"]} - inputs_nega = {} - inputs_shared = { - "input_image": data["video"][0], - "input_video": data["video"], - "height": data["video"][0].size[1], - "width": data["video"][0].size[0], - "num_frames": len(data["video"]), - # Please do not modify the following parameters. - "cfg_scale": 1, - "tiled": False, - "rand_device": self.pipe.device, - "use_gradient_checkpointing": True, - "cfg_merge": False, - } - for unit in self.pipe.units: - inputs_shared, inputs_posi, inputs_nega = self.pipe.unit_runner(unit, self.pipe, inputs_shared, inputs_posi, inputs_nega) - return {**inputs_shared, **inputs_posi} - - def forward(self, data): - inputs = self.forward_preprocess(data) - models = {name: getattr(self.pipe, name) for name in self.pipe.in_iteration_models} - loss = self.pipe.training_loss(**models, **inputs) - return loss - - -if __name__ == "__main__": - parser = wan_parser() - args = parser.parse_args() - dataset = VideoDataset(args=args) - model = WanTrainingModule(json.loads(args.model_paths), task=args.task, lora_target_modules=args.lora_target_modules, lora_rank=args.lora_rank) - launch_training_task(model, dataset, args=args) diff --git a/examples/wanvideo/model_training/train_t2v.py b/examples/wanvideo/model_training/train_t2v.py deleted file mode 100644 index 50b49ef..0000000 --- a/examples/wanvideo/model_training/train_t2v.py +++ /dev/null @@ -1,53 +0,0 @@ -import torch, os, json -from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig -from diffsynth.trainers.utils import DiffusionTrainingModule, VideoDataset, launch_training_task, wan_parser -os.environ["TOKENIZERS_PARALLELISM"] = "false" - - -class WanTrainingModule(DiffusionTrainingModule): - def __init__(self, model_paths, task="train_lora", lora_target_modules="q,k,v,o,ffn.0,ffn.2", lora_rank=32): - super().__init__() - self.pipe = WanVideoPipeline.from_pretrained( - torch_dtype=torch.bfloat16, - device="cpu", - model_configs=[ModelConfig(path=path) for path in model_paths], - ) - self.pipe.scheduler.set_timesteps(1000, training=True) - if task == "train_lora": - self.pipe.freeze_except([]) - self.pipe.dit = self.add_lora_to_model(self.pipe.dit, target_modules=lora_target_modules.split(","), lora_rank=lora_rank) - else: - self.pipe.freeze_except(["dit"]) - - def forward_preprocess(self, data): - inputs_posi = {"prompt": data["prompt"]} - inputs_nega = {} - inputs_shared = { - "input_video": data["video"], - "height": data["video"][0].size[1], - "width": data["video"][0].size[0], - "num_frames": len(data["video"]), - # Please do not modify the following parameters. - "cfg_scale": 1, - "tiled": False, - "rand_device": self.pipe.device, - "use_gradient_checkpointing": True, - "cfg_merge": False, - } - for unit in self.pipe.units: - inputs_shared, inputs_posi, inputs_nega = self.pipe.unit_runner(unit, self.pipe, inputs_shared, inputs_posi, inputs_nega) - return {**inputs_shared, **inputs_posi} - - def forward(self, data): - inputs = self.forward_preprocess(data) - models = {name: getattr(self.pipe, name) for name in self.pipe.in_iteration_models} - loss = self.pipe.training_loss(**models, **inputs) - return loss - - -if __name__ == "__main__": - parser = wan_parser() - args = parser.parse_args() - dataset = VideoDataset(args=args) - model = WanTrainingModule(json.loads(args.model_paths), task=args.task, lora_target_modules=args.lora_target_modules, lora_rank=args.lora_rank) - launch_training_task(model, dataset, args=args) diff --git a/examples/wanvideo/model_training/validate_full/Wan2.1-1.3b-speedcontrol-v1.py b/examples/wanvideo/model_training/validate_full/Wan2.1-1.3b-speedcontrol-v1.py new file mode 100644 index 0000000..124749a --- /dev/null +++ b/examples/wanvideo/model_training/validate_full/Wan2.1-1.3b-speedcontrol-v1.py @@ -0,0 +1,28 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData, load_state_dict +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1", origin_file_pattern="model.safetensors", offload_device="cpu"), + ], +) +state_dict = load_state_dict("models/train/Wan2.1-1.3b-speedcontrol-v1_full/epoch-1.safetensors") +pipe.motion_controller.load_state_dict(state_dict) +pipe.enable_vram_management() + +# Text-to-video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + seed=1, tiled=True, + motion_bucket_id=50 +) +save_video(video, "video_Wan2.1-1.3b-speedcontrol-v1.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_full/Wan2.1-FLF2V-14B-720P.py b/examples/wanvideo/model_training/validate_full/Wan2.1-FLF2V-14B-720P.py new file mode 100644 index 0000000..41a67ed --- /dev/null +++ b/examples/wanvideo/model_training/validate_full/Wan2.1-FLF2V-14B-720P.py @@ -0,0 +1,33 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData, load_state_dict +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +state_dict = load_state_dict("models/train/Wan2.1-FLF2V-14B-720P_full/epoch-1.safetensors") +pipe.dit.load_state_dict(state_dict) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832) + +# First and last frame to video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=video[0], + end_image=video[80], + seed=0, tiled=True, + sigma_shift=16, +) +save_video(video, "video_Wan2.1-FLF2V-14B-720P.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-Control.py b/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-Control.py new file mode 100644 index 0000000..6726e9c --- /dev/null +++ b/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-Control.py @@ -0,0 +1,32 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData, load_state_dict +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +state_dict = load_state_dict("models/train/Wan2.1-Fun-1.3B-Control_full/epoch-1.safetensors") +pipe.dit.load_state_dict(state_dict) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832) +video = [video[i] for i in range(81)] + +# Control video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + control_video=video, + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-Fun-1.3B-Control.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-InP.py b/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-InP.py new file mode 100644 index 0000000..3e1e6f3 --- /dev/null +++ b/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-InP.py @@ -0,0 +1,31 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData, load_state_dict +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +state_dict = load_state_dict("models/train/Wan2.1-Fun-1.3B-InP_full/epoch-1.safetensors") +pipe.dit.load_state_dict(state_dict) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832) + +# First and last frame to video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=video[0], end_image=video[80], + seed=0, tiled=True +) +save_video(video, "video_Wan2.1-Fun-1.3B-InP.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-Control.py b/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-Control.py new file mode 100644 index 0000000..08b0acb --- /dev/null +++ b/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-Control.py @@ -0,0 +1,32 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData, load_state_dict +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +state_dict = load_state_dict("models/train/Wan2.1-Fun-14B-Control_full/epoch-1.safetensors") +pipe.dit.load_state_dict(state_dict) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832) +video = [video[i] for i in range(81)] + +# Control video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + control_video=video, + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-Fun-14B-Control.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-InP.py b/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-InP.py new file mode 100644 index 0000000..d7e39d7 --- /dev/null +++ b/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-InP.py @@ -0,0 +1,31 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData, load_state_dict +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +state_dict = load_state_dict("models/train/Wan2.1-Fun-14B-InP_full/epoch-1.safetensors") +pipe.dit.load_state_dict(state_dict) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832) + +# First and last frame to video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=video[0], end_image=video[80], + seed=0, tiled=True +) +save_video(video, "video_Wan2.1-Fun-14B-InP.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control.py b/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control.py new file mode 100644 index 0000000..6497e1b --- /dev/null +++ b/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control.py @@ -0,0 +1,33 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData, load_state_dict +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +state_dict = load_state_dict("models/train/Wan2.1-Fun-V1.1-1.3B-Control_full/epoch-1.safetensors") +pipe.dit.load_state_dict(state_dict) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832) +video = [video[i] for i in range(81)] +reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0] + +# Control video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + control_video=video, reference_image=reference_image, + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-Fun-V1.1-1.3B-Control.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control.py b/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control.py new file mode 100644 index 0000000..0dd2516 --- /dev/null +++ b/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control.py @@ -0,0 +1,33 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData, load_state_dict +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +state_dict = load_state_dict("models/train/Wan2.1-Fun-V1.1-14B-Control_full/epoch-1.safetensors") +pipe.dit.load_state_dict(state_dict) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832) +video = [video[i] for i in range(81)] +reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0] + +# Control video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + control_video=video, reference_image=reference_image, + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-Fun-V1.1-14B-Control.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-480P.py b/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-480P.py new file mode 100644 index 0000000..c1c8615 --- /dev/null +++ b/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-480P.py @@ -0,0 +1,30 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData, load_state_dict +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +state_dict = load_state_dict("models/train/Wan2.1-I2V-14B-480P_full/epoch-1.safetensors") +pipe.dit.load_state_dict(state_dict) +pipe.enable_vram_management() + +input_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0] + +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=input_image, + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-I2V-14B-480P.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-720P.py b/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-720P.py new file mode 100644 index 0000000..a8610f3 --- /dev/null +++ b/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-720P.py @@ -0,0 +1,30 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData, load_state_dict +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +state_dict = load_state_dict("models/train/Wan2.1-I2V-14B-720P_full/epoch-1.safetensors") +pipe.dit.load_state_dict(state_dict) +pipe.enable_vram_management() + +input_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0] + +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=input_image, + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-I2V-14B-720P.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-1.3B.py b/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-1.3B.py new file mode 100644 index 0000000..1420514 --- /dev/null +++ b/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-1.3B.py @@ -0,0 +1,25 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData, load_state_dict +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ], +) +state_dict = load_state_dict("models/train/Wan2.1-T2V-1.3B_full/epoch-1.safetensors") +pipe.dit.load_state_dict(state_dict) +pipe.enable_vram_management() + +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-T2V-1.3B.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-14B.py b/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-14B.py new file mode 100644 index 0000000..a0107ae --- /dev/null +++ b/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-14B.py @@ -0,0 +1,25 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData, load_state_dict +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ], +) +state_dict = load_state_dict("models/train/Wan2.1-T2V-14B_full/epoch-1.safetensors") +pipe.dit.load_state_dict(state_dict) +pipe.enable_vram_management() + +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-T2V-14B.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_full/run_test.py b/examples/wanvideo/model_training/validate_full/run_test.py new file mode 100644 index 0000000..a4e3203 --- /dev/null +++ b/examples/wanvideo/model_training/validate_full/run_test.py @@ -0,0 +1,25 @@ +import multiprocessing, os + + +def run_task(scripts, thread_id, thread_num): + for script_id, script in enumerate(scripts): + if script_id % thread_num == thread_id: + log_file_name = script.replace("/", "_") + ".txt" + cmd = f"CUDA_VISIBLE_DEVICES={thread_id} python -u {script} > data/log/{log_file_name} 2>&1" + os.makedirs("data/log", exist_ok=True) + print(cmd, flush=True) + os.system(cmd) + + +if __name__ == "__main__": + scripts = [] + for file_name in os.listdir("examples/wanvideo/model_training/validate_full"): + if file_name != "run_test.py": + scripts.append(os.path.join("examples/wanvideo/model_training/validate_full", file_name)) + + processes = [multiprocessing.Process(target=run_task, args=(scripts, i, 8)) for i in range(8)] + for p in processes: + p.start() + for p in processes: + p.join() + print("Done!") \ No newline at end of file diff --git a/examples/wanvideo/model_training/validate_lora/Wan2.1-1.3b-speedcontrol-v1.py b/examples/wanvideo/model_training/validate_lora/Wan2.1-1.3b-speedcontrol-v1.py new file mode 100644 index 0000000..167b871 --- /dev/null +++ b/examples/wanvideo/model_training/validate_lora/Wan2.1-1.3b-speedcontrol-v1.py @@ -0,0 +1,27 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData, load_state_dict +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1", origin_file_pattern="model.safetensors", offload_device="cpu"), + ], +) +pipe.load_lora(pipe.dit, "models/train/Wan2.1-1.3b-speedcontrol-v1_lora/epoch-4.safetensors", alpha=1) +pipe.enable_vram_management() + +# Text-to-video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + seed=1, tiled=True, + motion_bucket_id=50 +) +save_video(video, "video_Wan2.1-1.3b-speedcontrol-v1.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_lora/Wan2.1-FLF2V-14B-720P.py b/examples/wanvideo/model_training/validate_lora/Wan2.1-FLF2V-14B-720P.py new file mode 100644 index 0000000..cd68f0e --- /dev/null +++ b/examples/wanvideo/model_training/validate_lora/Wan2.1-FLF2V-14B-720P.py @@ -0,0 +1,32 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.load_lora(pipe.dit, "models/train/Wan2.1-FLF2V-14B-720P_lora/epoch-4.safetensors", alpha=1) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832) + +# First and last frame to video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=video[0], + end_image=video[80], + seed=0, tiled=True, + sigma_shift=16, +) +save_video(video, "video_Wan2.1-FLF2V-14B-720P.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-Control.py b/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-Control.py new file mode 100644 index 0000000..7270c38 --- /dev/null +++ b/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-Control.py @@ -0,0 +1,31 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-1.3B-Control_lora/epoch-4.safetensors", alpha=1) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832) +video = [video[i] for i in range(81)] + +# Control video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + control_video=video, + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-Fun-1.3B-Control.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-InP.py b/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-InP.py new file mode 100644 index 0000000..c904dfa --- /dev/null +++ b/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-InP.py @@ -0,0 +1,30 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-1.3B-InP_lora/epoch-4.safetensors", alpha=1) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832) + +# First and last frame to video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=video[0], end_image=video[80], + seed=0, tiled=True +) +save_video(video, "video_Wan2.1-Fun-1.3B-InP.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-Control.py b/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-Control.py new file mode 100644 index 0000000..8631d05 --- /dev/null +++ b/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-Control.py @@ -0,0 +1,31 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-14B-Control_lora/epoch-4.safetensors", alpha=1) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832) +video = [video[i] for i in range(81)] + +# Control video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + control_video=video, + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-Fun-14B-Control.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-InP.py b/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-InP.py new file mode 100644 index 0000000..e020aac --- /dev/null +++ b/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-InP.py @@ -0,0 +1,30 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-14B-InP_lora/epoch-4.safetensors", alpha=1) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832) + +# First and last frame to video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=video[0], end_image=video[80], + seed=0, tiled=True +) +save_video(video, "video_Wan2.1-Fun-14B-InP.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control.py b/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control.py new file mode 100644 index 0000000..ebcfd2f --- /dev/null +++ b/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control.py @@ -0,0 +1,32 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-V1.1-1.3B-Control_lora/epoch-4.safetensors", alpha=1) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832) +video = [video[i] for i in range(81)] +reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0] + +# Control video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + control_video=video, reference_image=reference_image, + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-Fun-V1.1-1.3B-Control.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control.py b/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control.py new file mode 100644 index 0000000..6b11098 --- /dev/null +++ b/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control.py @@ -0,0 +1,32 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-V1.1-14B-Control_lora/epoch-4.safetensors", alpha=1) +pipe.enable_vram_management() + +video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832) +video = [video[i] for i in range(81)] +reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0] + +# Control video +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + control_video=video, reference_image=reference_image, + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-Fun-V1.1-14B-Control.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-480P.py b/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-480P.py new file mode 100644 index 0000000..1687e36 --- /dev/null +++ b/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-480P.py @@ -0,0 +1,29 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.load_lora(pipe.dit, "models/train/Wan2.1-I2V-14B-480P_lora/epoch-4.safetensors", alpha=1) +pipe.enable_vram_management() + +input_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0] + +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=input_image, + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-I2V-14B-480P.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-720P.py b/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-720P.py new file mode 100644 index 0000000..9893e26 --- /dev/null +++ b/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-720P.py @@ -0,0 +1,29 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig +from modelscope import dataset_snapshot_download + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"), + ], +) +pipe.load_lora(pipe.dit, "models/train/Wan2.1-I2V-14B-720P_lora/epoch-4.safetensors", alpha=1) +pipe.enable_vram_management() + +input_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0] + +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + input_image=input_image, + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-I2V-14B-720P.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-1.3B.py b/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-1.3B.py new file mode 100644 index 0000000..7cb6c02 --- /dev/null +++ b/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-1.3B.py @@ -0,0 +1,24 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ], +) +pipe.load_lora(pipe.dit, "models/train/Wan2.1-T2V-1.3B_lora/epoch-4.safetensors", alpha=1) +pipe.enable_vram_management() + +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-T2V-1.3B.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-14B.py b/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-14B.py new file mode 100644 index 0000000..3b66a49 --- /dev/null +++ b/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-14B.py @@ -0,0 +1,24 @@ +import torch +from PIL import Image +from diffsynth import save_video, VideoData +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig + + +pipe = WanVideoPipeline.from_pretrained( + torch_dtype=torch.bfloat16, + device="cuda", + model_configs=[ + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"), + ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"), + ], +) +pipe.load_lora(pipe.dit, "models/train/Wan2.1-T2V-14B_lora/epoch-4.safetensors", alpha=1) +pipe.enable_vram_management() + +video = pipe( + prompt="from sunset to night, a small town, light, house, river", + negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + seed=1, tiled=True +) +save_video(video, "video_Wan2.1-T2V-14B.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/model_training/validate_lora/run_test.py b/examples/wanvideo/model_training/validate_lora/run_test.py new file mode 100644 index 0000000..367ee9d --- /dev/null +++ b/examples/wanvideo/model_training/validate_lora/run_test.py @@ -0,0 +1,25 @@ +import multiprocessing, os + + +def run_task(scripts, thread_id, thread_num): + for script_id, script in enumerate(scripts): + if script_id % thread_num == thread_id: + log_file_name = script.replace("/", "_") + ".txt" + cmd = f"CUDA_VISIBLE_DEVICES={thread_id} python -u {script} > data/log/{log_file_name} 2>&1" + os.makedirs("data/log", exist_ok=True) + print(cmd, flush=True) + os.system(cmd) + + +if __name__ == "__main__": + scripts = [] + for file_name in os.listdir("examples/wanvideo/model_training/validate_lora"): + if file_name != "run_test.py": + scripts.append(os.path.join("examples/wanvideo/model_training/validate_lora", file_name)) + + processes = [multiprocessing.Process(target=run_task, args=(scripts, i, 8)) for i in range(8)] + for p in processes: + p.start() + for p in processes: + p.join() + print("Done!") \ No newline at end of file diff --git a/examples/wanvideo/train_wan_t2v.py b/examples/wanvideo/train_wan_t2v.py deleted file mode 100644 index cd10096..0000000 --- a/examples/wanvideo/train_wan_t2v.py +++ /dev/null @@ -1,593 +0,0 @@ -import torch, os, imageio, argparse -from torchvision.transforms import v2 -from einops import rearrange -import lightning as pl -import pandas as pd -from diffsynth import WanVideoPipeline, ModelManager, load_state_dict -from peft import LoraConfig, inject_adapter_in_model -import torchvision -from PIL import Image -import numpy as np - - - -class TextVideoDataset(torch.utils.data.Dataset): - def __init__(self, base_path, metadata_path, max_num_frames=81, frame_interval=1, num_frames=81, height=480, width=832, is_i2v=False): - metadata = pd.read_csv(metadata_path) - self.path = [os.path.join(base_path, "train", file_name) for file_name in metadata["file_name"]] - self.text = metadata["text"].to_list() - - self.max_num_frames = max_num_frames - self.frame_interval = frame_interval - self.num_frames = num_frames - self.height = height - self.width = width - self.is_i2v = is_i2v - - self.frame_process = v2.Compose([ - v2.CenterCrop(size=(height, width)), - v2.Resize(size=(height, width), antialias=True), - v2.ToTensor(), - v2.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), - ]) - - - def crop_and_resize(self, image): - width, height = image.size - scale = max(self.width / width, self.height / height) - image = torchvision.transforms.functional.resize( - image, - (round(height*scale), round(width*scale)), - interpolation=torchvision.transforms.InterpolationMode.BILINEAR - ) - return image - - - def load_frames_using_imageio(self, file_path, max_num_frames, start_frame_id, interval, num_frames, frame_process): - reader = imageio.get_reader(file_path) - if reader.count_frames() < max_num_frames or reader.count_frames() - 1 < start_frame_id + (num_frames - 1) * interval: - reader.close() - return None - - frames = [] - first_frame = None - for frame_id in range(num_frames): - frame = reader.get_data(start_frame_id + frame_id * interval) - frame = Image.fromarray(frame) - frame = self.crop_and_resize(frame) - if first_frame is None: - first_frame = frame - frame = frame_process(frame) - frames.append(frame) - reader.close() - - frames = torch.stack(frames, dim=0) - frames = rearrange(frames, "T C H W -> C T H W") - - first_frame = v2.functional.center_crop(first_frame, output_size=(self.height, self.width)) - first_frame = np.array(first_frame) - - if self.is_i2v: - return frames, first_frame - else: - return frames - - - def load_video(self, file_path): - start_frame_id = torch.randint(0, self.max_num_frames - (self.num_frames - 1) * self.frame_interval, (1,))[0] - frames = self.load_frames_using_imageio(file_path, self.max_num_frames, start_frame_id, self.frame_interval, self.num_frames, self.frame_process) - return frames - - - def is_image(self, file_path): - file_ext_name = file_path.split(".")[-1] - if file_ext_name.lower() in ["jpg", "jpeg", "png", "webp"]: - return True - return False - - - def load_image(self, file_path): - frame = Image.open(file_path).convert("RGB") - frame = self.crop_and_resize(frame) - first_frame = frame - frame = self.frame_process(frame) - frame = rearrange(frame, "C H W -> C 1 H W") - return frame - - - def __getitem__(self, data_id): - text = self.text[data_id] - path = self.path[data_id] - if self.is_image(path): - if self.is_i2v: - raise ValueError(f"{path} is not a video. I2V model doesn't support image-to-image training.") - video = self.load_image(path) - else: - video = self.load_video(path) - if self.is_i2v: - video, first_frame = video - data = {"text": text, "video": video, "path": path, "first_frame": first_frame} - else: - data = {"text": text, "video": video, "path": path} - return data - - - def __len__(self): - return len(self.path) - - - -class LightningModelForDataProcess(pl.LightningModule): - def __init__(self, text_encoder_path, vae_path, image_encoder_path=None, tiled=False, tile_size=(34, 34), tile_stride=(18, 16)): - super().__init__() - model_path = [text_encoder_path, vae_path] - if image_encoder_path is not None: - model_path.append(image_encoder_path) - model_manager = ModelManager(torch_dtype=torch.bfloat16, device="cpu") - model_manager.load_models(model_path) - self.pipe = WanVideoPipeline.from_model_manager(model_manager) - - self.tiler_kwargs = {"tiled": tiled, "tile_size": tile_size, "tile_stride": tile_stride} - - def test_step(self, batch, batch_idx): - text, video, path = batch["text"][0], batch["video"], batch["path"][0] - - self.pipe.device = self.device - if video is not None: - # prompt - prompt_emb = self.pipe.encode_prompt(text) - # video - video = video.to(dtype=self.pipe.torch_dtype, device=self.pipe.device) - latents = self.pipe.encode_video(video, **self.tiler_kwargs)[0] - # image - if "first_frame" in batch: - first_frame = Image.fromarray(batch["first_frame"][0].cpu().numpy()) - _, _, num_frames, height, width = video.shape - image_emb = self.pipe.encode_image(first_frame, None, num_frames, height, width) - else: - image_emb = {} - data = {"latents": latents, "prompt_emb": prompt_emb, "image_emb": image_emb} - torch.save(data, path + ".tensors.pth") - - - -class TensorDataset(torch.utils.data.Dataset): - def __init__(self, base_path, metadata_path, steps_per_epoch): - metadata = pd.read_csv(metadata_path) - self.path = [os.path.join(base_path, "train", file_name) for file_name in metadata["file_name"]] - print(len(self.path), "videos in metadata.") - self.path = [i + ".tensors.pth" for i in self.path if os.path.exists(i + ".tensors.pth")] - print(len(self.path), "tensors cached in metadata.") - assert len(self.path) > 0 - - self.steps_per_epoch = steps_per_epoch - - - def __getitem__(self, index): - data_id = torch.randint(0, len(self.path), (1,))[0] - data_id = (data_id + index) % len(self.path) # For fixed seed. - path = self.path[data_id] - data = torch.load(path, weights_only=True, map_location="cpu") - return data - - - def __len__(self): - return self.steps_per_epoch - - - -class LightningModelForTrain(pl.LightningModule): - def __init__( - self, - dit_path, - learning_rate=1e-5, - lora_rank=4, lora_alpha=4, train_architecture="lora", lora_target_modules="q,k,v,o,ffn.0,ffn.2", init_lora_weights="kaiming", - use_gradient_checkpointing=True, use_gradient_checkpointing_offload=False, - pretrained_lora_path=None - ): - super().__init__() - model_manager = ModelManager(torch_dtype=torch.bfloat16, device="cpu") - if os.path.isfile(dit_path): - model_manager.load_models([dit_path]) - else: - dit_path = dit_path.split(",") - model_manager.load_models([dit_path]) - - self.pipe = WanVideoPipeline.from_model_manager(model_manager) - self.pipe.scheduler.set_timesteps(1000, training=True) - self.freeze_parameters() - if train_architecture == "lora": - self.add_lora_to_model( - self.pipe.denoising_model(), - lora_rank=lora_rank, - lora_alpha=lora_alpha, - lora_target_modules=lora_target_modules, - init_lora_weights=init_lora_weights, - pretrained_lora_path=pretrained_lora_path, - ) - else: - self.pipe.denoising_model().requires_grad_(True) - - self.learning_rate = learning_rate - self.use_gradient_checkpointing = use_gradient_checkpointing - self.use_gradient_checkpointing_offload = use_gradient_checkpointing_offload - - - def freeze_parameters(self): - # Freeze parameters - self.pipe.requires_grad_(False) - self.pipe.eval() - self.pipe.denoising_model().train() - - - def add_lora_to_model(self, model, lora_rank=4, lora_alpha=4, lora_target_modules="q,k,v,o,ffn.0,ffn.2", init_lora_weights="kaiming", pretrained_lora_path=None, state_dict_converter=None): - # Add LoRA to UNet - self.lora_alpha = lora_alpha - if init_lora_weights == "kaiming": - init_lora_weights = True - - lora_config = LoraConfig( - r=lora_rank, - lora_alpha=lora_alpha, - init_lora_weights=init_lora_weights, - target_modules=lora_target_modules.split(","), - ) - model = inject_adapter_in_model(lora_config, model) - for param in model.parameters(): - # Upcast LoRA parameters into fp32 - if param.requires_grad: - param.data = param.to(torch.float32) - - # Lora pretrained lora weights - if pretrained_lora_path is not None: - state_dict = load_state_dict(pretrained_lora_path) - if state_dict_converter is not None: - state_dict = state_dict_converter(state_dict) - missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) - all_keys = [i for i, _ in model.named_parameters()] - num_updated_keys = len(all_keys) - len(missing_keys) - num_unexpected_keys = len(unexpected_keys) - print(f"{num_updated_keys} parameters are loaded from {pretrained_lora_path}. {num_unexpected_keys} parameters are unexpected.") - - - def training_step(self, batch, batch_idx): - # Data - latents = batch["latents"].to(self.device) - prompt_emb = batch["prompt_emb"] - prompt_emb["context"] = prompt_emb["context"][0].to(self.device) - image_emb = batch["image_emb"] - if "clip_feature" in image_emb: - image_emb["clip_feature"] = image_emb["clip_feature"][0].to(self.device) - if "y" in image_emb: - image_emb["y"] = image_emb["y"][0].to(self.device) - - # Loss - self.pipe.device = self.device - noise = torch.randn_like(latents) - timestep_id = torch.randint(0, self.pipe.scheduler.num_train_timesteps, (1,)) - timestep = self.pipe.scheduler.timesteps[timestep_id].to(dtype=self.pipe.torch_dtype, device=self.pipe.device) - extra_input = self.pipe.prepare_extra_input(latents) - noisy_latents = self.pipe.scheduler.add_noise(latents, noise, timestep) - training_target = self.pipe.scheduler.training_target(latents, noise, timestep) - - # Compute loss - noise_pred = self.pipe.denoising_model()( - noisy_latents, timestep=timestep, **prompt_emb, **extra_input, **image_emb, - use_gradient_checkpointing=self.use_gradient_checkpointing, - use_gradient_checkpointing_offload=self.use_gradient_checkpointing_offload - ) - loss = torch.nn.functional.mse_loss(noise_pred.float(), training_target.float()) - loss = loss * self.pipe.scheduler.training_weight(timestep) - - # Record log - self.log("train_loss", loss, prog_bar=True) - return loss - - - def configure_optimizers(self): - trainable_modules = filter(lambda p: p.requires_grad, self.pipe.denoising_model().parameters()) - optimizer = torch.optim.AdamW(trainable_modules, lr=self.learning_rate) - return optimizer - - - def on_save_checkpoint(self, checkpoint): - checkpoint.clear() - trainable_param_names = list(filter(lambda named_param: named_param[1].requires_grad, self.pipe.denoising_model().named_parameters())) - trainable_param_names = set([named_param[0] for named_param in trainable_param_names]) - state_dict = self.pipe.denoising_model().state_dict() - lora_state_dict = {} - for name, param in state_dict.items(): - if name in trainable_param_names: - lora_state_dict[name] = param - checkpoint.update(lora_state_dict) - - - -def parse_args(): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--task", - type=str, - default="data_process", - required=True, - choices=["data_process", "train"], - help="Task. `data_process` or `train`.", - ) - parser.add_argument( - "--dataset_path", - type=str, - default=None, - required=True, - help="The path of the Dataset.", - ) - parser.add_argument( - "--output_path", - type=str, - default="./", - help="Path to save the model.", - ) - parser.add_argument( - "--text_encoder_path", - type=str, - default=None, - help="Path of text encoder.", - ) - parser.add_argument( - "--image_encoder_path", - type=str, - default=None, - help="Path of image encoder.", - ) - parser.add_argument( - "--vae_path", - type=str, - default=None, - help="Path of VAE.", - ) - parser.add_argument( - "--dit_path", - type=str, - default=None, - help="Path of DiT.", - ) - parser.add_argument( - "--tiled", - default=False, - action="store_true", - help="Whether enable tile encode in VAE. This option can reduce VRAM required.", - ) - parser.add_argument( - "--tile_size_height", - type=int, - default=34, - help="Tile size (height) in VAE.", - ) - parser.add_argument( - "--tile_size_width", - type=int, - default=34, - help="Tile size (width) in VAE.", - ) - parser.add_argument( - "--tile_stride_height", - type=int, - default=18, - help="Tile stride (height) in VAE.", - ) - parser.add_argument( - "--tile_stride_width", - type=int, - default=16, - help="Tile stride (width) in VAE.", - ) - parser.add_argument( - "--steps_per_epoch", - type=int, - default=500, - help="Number of steps per epoch.", - ) - parser.add_argument( - "--num_frames", - type=int, - default=81, - help="Number of frames.", - ) - parser.add_argument( - "--height", - type=int, - default=480, - help="Image height.", - ) - parser.add_argument( - "--width", - type=int, - default=832, - help="Image width.", - ) - parser.add_argument( - "--dataloader_num_workers", - type=int, - default=1, - help="Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=1e-5, - help="Learning rate.", - ) - parser.add_argument( - "--accumulate_grad_batches", - type=int, - default=1, - help="The number of batches in gradient accumulation.", - ) - parser.add_argument( - "--max_epochs", - type=int, - default=1, - help="Number of epochs.", - ) - parser.add_argument( - "--lora_target_modules", - type=str, - default="q,k,v,o,ffn.0,ffn.2", - help="Layers with LoRA modules.", - ) - parser.add_argument( - "--init_lora_weights", - type=str, - default="kaiming", - choices=["gaussian", "kaiming"], - help="The initializing method of LoRA weight.", - ) - parser.add_argument( - "--training_strategy", - type=str, - default="auto", - choices=["auto", "deepspeed_stage_1", "deepspeed_stage_2", "deepspeed_stage_3"], - help="Training strategy", - ) - parser.add_argument( - "--lora_rank", - type=int, - default=4, - help="The dimension of the LoRA update matrices.", - ) - parser.add_argument( - "--lora_alpha", - type=float, - default=4.0, - help="The weight of the LoRA update matrices.", - ) - parser.add_argument( - "--use_gradient_checkpointing", - default=False, - action="store_true", - help="Whether to use gradient checkpointing.", - ) - parser.add_argument( - "--use_gradient_checkpointing_offload", - default=False, - action="store_true", - help="Whether to use gradient checkpointing offload.", - ) - parser.add_argument( - "--train_architecture", - type=str, - default="lora", - choices=["lora", "full"], - help="Model structure to train. LoRA training or full training.", - ) - parser.add_argument( - "--pretrained_lora_path", - type=str, - default=None, - help="Pretrained LoRA path. Required if the training is resumed.", - ) - parser.add_argument( - "--use_swanlab", - default=False, - action="store_true", - help="Whether to use SwanLab logger.", - ) - parser.add_argument( - "--swanlab_mode", - default=None, - help="SwanLab mode (cloud or local).", - ) - args = parser.parse_args() - return args - - -def data_process(args): - dataset = TextVideoDataset( - args.dataset_path, - os.path.join(args.dataset_path, "metadata.csv"), - max_num_frames=args.num_frames, - frame_interval=1, - num_frames=args.num_frames, - height=args.height, - width=args.width, - is_i2v=args.image_encoder_path is not None - ) - dataloader = torch.utils.data.DataLoader( - dataset, - shuffle=False, - batch_size=1, - num_workers=args.dataloader_num_workers - ) - model = LightningModelForDataProcess( - text_encoder_path=args.text_encoder_path, - image_encoder_path=args.image_encoder_path, - vae_path=args.vae_path, - tiled=args.tiled, - tile_size=(args.tile_size_height, args.tile_size_width), - tile_stride=(args.tile_stride_height, args.tile_stride_width), - ) - trainer = pl.Trainer( - accelerator="gpu", - devices="auto", - default_root_dir=args.output_path, - ) - trainer.test(model, dataloader) - - -def train(args): - dataset = TensorDataset( - args.dataset_path, - os.path.join(args.dataset_path, "metadata.csv"), - steps_per_epoch=args.steps_per_epoch, - ) - dataloader = torch.utils.data.DataLoader( - dataset, - shuffle=True, - batch_size=1, - num_workers=args.dataloader_num_workers - ) - model = LightningModelForTrain( - dit_path=args.dit_path, - learning_rate=args.learning_rate, - train_architecture=args.train_architecture, - lora_rank=args.lora_rank, - lora_alpha=args.lora_alpha, - lora_target_modules=args.lora_target_modules, - init_lora_weights=args.init_lora_weights, - use_gradient_checkpointing=args.use_gradient_checkpointing, - use_gradient_checkpointing_offload=args.use_gradient_checkpointing_offload, - pretrained_lora_path=args.pretrained_lora_path, - ) - if args.use_swanlab: - from swanlab.integration.pytorch_lightning import SwanLabLogger - swanlab_config = {"UPPERFRAMEWORK": "DiffSynth-Studio"} - swanlab_config.update(vars(args)) - swanlab_logger = SwanLabLogger( - project="wan", - name="wan", - config=swanlab_config, - mode=args.swanlab_mode, - logdir=os.path.join(args.output_path, "swanlog"), - ) - logger = [swanlab_logger] - else: - logger = None - trainer = pl.Trainer( - max_epochs=args.max_epochs, - accelerator="gpu", - devices="auto", - precision="bf16", - strategy=args.training_strategy, - default_root_dir=args.output_path, - accumulate_grad_batches=args.accumulate_grad_batches, - callbacks=[pl.pytorch.callbacks.ModelCheckpoint(save_top_k=-1)], - logger=logger, - ) - trainer.fit(model, dataloader) - - -if __name__ == '__main__': - args = parse_args() - if args.task == "data_process": - data_process(args) - elif args.task == "train": - train(args) diff --git a/examples/wanvideo/wan_1.3b_motion_controller.py b/examples/wanvideo/wan_1.3b_motion_controller.py deleted file mode 100644 index 8036819..0000000 --- a/examples/wanvideo/wan_1.3b_motion_controller.py +++ /dev/null @@ -1,41 +0,0 @@ -import torch -from diffsynth import ModelManager, WanVideoPipeline, save_video, VideoData -from modelscope import snapshot_download - - -# Download models -snapshot_download("Wan-AI/Wan2.1-T2V-1.3B", local_dir="models/Wan-AI/Wan2.1-T2V-1.3B") -snapshot_download("DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1", local_dir="models/DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1") - -# Load models -model_manager = ModelManager(device="cpu") -model_manager.load_models( - [ - "models/Wan-AI/Wan2.1-T2V-1.3B/diffusion_pytorch_model.safetensors", - "models/Wan-AI/Wan2.1-T2V-1.3B/models_t5_umt5-xxl-enc-bf16.pth", - "models/Wan-AI/Wan2.1-T2V-1.3B/Wan2.1_VAE.pth", - "models/DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1/model.safetensors", - ], - torch_dtype=torch.bfloat16, # You can set `torch_dtype=torch.float8_e4m3fn` to enable FP8 quantization. -) -pipe = WanVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda") -pipe.enable_vram_management(num_persistent_param_in_dit=None) - -# Text-to-video -video = pipe( - prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - num_inference_steps=50, - seed=1, tiled=True, - motion_bucket_id=0 -) -save_video(video, "video_slow.mp4", fps=15, quality=5) - -video = pipe( - prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - num_inference_steps=50, - seed=1, tiled=True, - motion_bucket_id=100 -) -save_video(video, "video_fast.mp4", fps=15, quality=5) \ No newline at end of file diff --git a/examples/wanvideo/wan_1.3b_text_to_video.py b/examples/wanvideo/wan_1.3b_text_to_video.py deleted file mode 100644 index e444cd2..0000000 --- a/examples/wanvideo/wan_1.3b_text_to_video.py +++ /dev/null @@ -1,40 +0,0 @@ -import torch -from diffsynth import ModelManager, WanVideoPipeline, save_video, VideoData -from modelscope import snapshot_download - - -# Download models -snapshot_download("Wan-AI/Wan2.1-T2V-1.3B", local_dir="models/Wan-AI/Wan2.1-T2V-1.3B") - -# Load models -model_manager = ModelManager(device="cpu") -model_manager.load_models( - [ - "models/Wan-AI/Wan2.1-T2V-1.3B/diffusion_pytorch_model.safetensors", - "models/Wan-AI/Wan2.1-T2V-1.3B/models_t5_umt5-xxl-enc-bf16.pth", - "models/Wan-AI/Wan2.1-T2V-1.3B/Wan2.1_VAE.pth", - ], - torch_dtype=torch.bfloat16, # You can set `torch_dtype=torch.float8_e4m3fn` to enable FP8 quantization. -) -pipe = WanVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda") -pipe.enable_vram_management(num_persistent_param_in_dit=None) - -# Text-to-video -video = pipe( - prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - num_inference_steps=50, - seed=0, tiled=True -) -save_video(video, "video1.mp4", fps=15, quality=5) - -# Video-to-video -video = VideoData("video1.mp4", height=480, width=832) -video = pipe( - prompt="纪实摄影风格画面,一只活泼的小狗戴着黑色墨镜在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,戴着黑色墨镜,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - input_video=video, denoising_strength=0.7, - num_inference_steps=50, - seed=1, tiled=True -) -save_video(video, "video2.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/wan_1.3b_vace.py b/examples/wanvideo/wan_1.3b_vace.py deleted file mode 100644 index fb987a7..0000000 --- a/examples/wanvideo/wan_1.3b_vace.py +++ /dev/null @@ -1,63 +0,0 @@ -import torch -from diffsynth import ModelManager, WanVideoPipeline, save_video, VideoData -from modelscope import snapshot_download, dataset_snapshot_download -from PIL import Image - - -# Download models -snapshot_download("iic/VACE-Wan2.1-1.3B-Preview", local_dir="models/iic/VACE-Wan2.1-1.3B-Preview") - -# Load models -model_manager = ModelManager(device="cpu") -model_manager.load_models( - [ - "models/iic/VACE-Wan2.1-1.3B-Preview/diffusion_pytorch_model.safetensors", - "models/iic/VACE-Wan2.1-1.3B-Preview/models_t5_umt5-xxl-enc-bf16.pth", - "models/iic/VACE-Wan2.1-1.3B-Preview/Wan2.1_VAE.pth", - ], - torch_dtype=torch.bfloat16, -) -pipe = WanVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda") -pipe.enable_vram_management(num_persistent_param_in_dit=None) - -# Download example video -dataset_snapshot_download( - dataset_id="DiffSynth-Studio/examples_in_diffsynth", - local_dir="./", - allow_file_pattern=["data/examples/wan/depth_video.mp4", "data/examples/wan/cat_fightning.jpg"] -) - -# Depth video -> Video -control_video = VideoData("data/examples/wan/depth_video.mp4", height=480, width=832) -video = pipe( - prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - num_inference_steps=50, - height=480, width=832, num_frames=81, - vace_video=control_video, - seed=1, tiled=True -) -save_video(video, "video1.mp4", fps=15, quality=5) - -# Reference image -> Video -video = pipe( - prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - num_inference_steps=50, - height=480, width=832, num_frames=81, - vace_reference_image=Image.open("data/examples/wan/cat_fightning.jpg").resize((832, 480)), - seed=1, tiled=True -) -save_video(video, "video2.mp4", fps=15, quality=5) - -# Depth video + Reference image -> Video -video = pipe( - prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - num_inference_steps=50, - height=480, width=832, num_frames=81, - vace_video=control_video, - vace_reference_image=Image.open("data/examples/wan/cat_fightning.jpg").resize((832, 480)), - seed=1, tiled=True -) -save_video(video, "video3.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/wan_14B_flf2v.py b/examples/wanvideo/wan_14B_flf2v.py deleted file mode 100644 index 23109df..0000000 --- a/examples/wanvideo/wan_14B_flf2v.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch -from diffsynth import ModelManager, WanVideoPipeline, save_video, VideoData -from modelscope import snapshot_download, dataset_snapshot_download -from PIL import Image - - -# Download models -snapshot_download("Wan-AI/Wan2.1-FLF2V-14B-720P", local_dir="models/Wan-AI/Wan2.1-FLF2V-14B-720P") - -# Load models -model_manager = ModelManager(device="cpu") -model_manager.load_models( - ["models/Wan-AI/Wan2.1-FLF2V-14B-720P/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"], - torch_dtype=torch.float32, # Image Encoder is loaded with float32 -) -model_manager.load_models( - [ - [ - "models/Wan-AI/Wan2.1-FLF2V-14B-720P/diffusion_pytorch_model-00001-of-00007.safetensors", - "models/Wan-AI/Wan2.1-FLF2V-14B-720P/diffusion_pytorch_model-00002-of-00007.safetensors", - "models/Wan-AI/Wan2.1-FLF2V-14B-720P/diffusion_pytorch_model-00003-of-00007.safetensors", - "models/Wan-AI/Wan2.1-FLF2V-14B-720P/diffusion_pytorch_model-00004-of-00007.safetensors", - "models/Wan-AI/Wan2.1-FLF2V-14B-720P/diffusion_pytorch_model-00005-of-00007.safetensors", - "models/Wan-AI/Wan2.1-FLF2V-14B-720P/diffusion_pytorch_model-00006-of-00007.safetensors", - "models/Wan-AI/Wan2.1-FLF2V-14B-720P/diffusion_pytorch_model-00007-of-00007.safetensors", - ], - "models/Wan-AI/Wan2.1-FLF2V-14B-720P/models_t5_umt5-xxl-enc-bf16.pth", - "models/Wan-AI/Wan2.1-FLF2V-14B-720P/Wan2.1_VAE.pth", - ], - torch_dtype=torch.bfloat16, # You can set `torch_dtype=torch.float8_e4m3fn` to enable FP8 quantization. -) -pipe = WanVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda") -pipe.enable_vram_management(num_persistent_param_in_dit=None) - -# Download example image -dataset_snapshot_download( - dataset_id="DiffSynth-Studio/examples_in_diffsynth", - local_dir="./", - allow_file_pattern=["data/examples/wan/first_frame.jpeg", "data/examples/wan/last_frame.jpeg"] -) - -# First and last frame to video -video = pipe( - prompt="写实风格,一个女生手持枯萎的花站在花园中,镜头逐渐拉远,记录下花园的全貌。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - num_inference_steps=30, - input_image=Image.open("data/examples/wan/first_frame.jpeg").resize((960, 960)), - end_image=Image.open("data/examples/wan/last_frame.jpeg").resize((960, 960)), - height=960, width=960, - seed=1, tiled=True -) -save_video(video, "video.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/wan_14b_image_to_video.py b/examples/wanvideo/wan_14b_image_to_video.py deleted file mode 100644 index 91894ae..0000000 --- a/examples/wanvideo/wan_14b_image_to_video.py +++ /dev/null @@ -1,51 +0,0 @@ -import torch -from diffsynth import ModelManager, WanVideoPipeline, save_video, VideoData -from modelscope import snapshot_download, dataset_snapshot_download -from PIL import Image - - -# Download models -snapshot_download("Wan-AI/Wan2.1-I2V-14B-480P", local_dir="models/Wan-AI/Wan2.1-I2V-14B-480P") - -# Load models -model_manager = ModelManager(device="cpu") -model_manager.load_models( - ["models/Wan-AI/Wan2.1-I2V-14B-480P/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"], - torch_dtype=torch.float32, # Image Encoder is loaded with float32 -) -model_manager.load_models( - [ - [ - "models/Wan-AI/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00001-of-00007.safetensors", - "models/Wan-AI/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00002-of-00007.safetensors", - "models/Wan-AI/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00003-of-00007.safetensors", - "models/Wan-AI/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00004-of-00007.safetensors", - "models/Wan-AI/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00005-of-00007.safetensors", - "models/Wan-AI/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00006-of-00007.safetensors", - "models/Wan-AI/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00007-of-00007.safetensors", - ], - "models/Wan-AI/Wan2.1-I2V-14B-480P/models_t5_umt5-xxl-enc-bf16.pth", - "models/Wan-AI/Wan2.1-I2V-14B-480P/Wan2.1_VAE.pth", - ], - torch_dtype=torch.bfloat16, # You can set `torch_dtype=torch.float8_e4m3fn` to enable FP8 quantization. -) -pipe = WanVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda") -pipe.enable_vram_management(num_persistent_param_in_dit=6*10**9) # You can set `num_persistent_param_in_dit` to a small number to reduce VRAM required. - -# Download example image -dataset_snapshot_download( - dataset_id="DiffSynth-Studio/examples_in_diffsynth", - local_dir="./", - allow_file_pattern=f"data/examples/wan/input_image.jpg" -) -image = Image.open("data/examples/wan/input_image.jpg") - -# Image-to-video -video = pipe( - prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - input_image=image, - num_inference_steps=50, - seed=0, tiled=True -) -save_video(video, "video.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/wan_14b_text_to_video.py b/examples/wanvideo/wan_14b_text_to_video.py deleted file mode 100644 index 654565d..0000000 --- a/examples/wanvideo/wan_14b_text_to_video.py +++ /dev/null @@ -1,36 +0,0 @@ -import torch -from diffsynth import ModelManager, WanVideoPipeline, save_video, VideoData -from modelscope import snapshot_download - - -# Download models -snapshot_download("Wan-AI/Wan2.1-T2V-14B", local_dir="models/Wan-AI/Wan2.1-T2V-14B") - -# Load models -model_manager = ModelManager(device="cpu") -model_manager.load_models( - [ - [ - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors", - ], - "models/Wan-AI/Wan2.1-T2V-14B/models_t5_umt5-xxl-enc-bf16.pth", - "models/Wan-AI/Wan2.1-T2V-14B/Wan2.1_VAE.pth", - ], - torch_dtype=torch.float8_e4m3fn, # You can set `torch_dtype=torch.bfloat16` to disable FP8 quantization. -) -pipe = WanVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda") -pipe.enable_vram_management(num_persistent_param_in_dit=None) # You can set `num_persistent_param_in_dit` to a small number to reduce VRAM required. - -# Text-to-video -video = pipe( - prompt="一名宇航员身穿太空服,面朝镜头骑着一匹机械马在火星表面驰骋。红色的荒凉地表延伸至远方,点缀着巨大的陨石坑和奇特的岩石结构。机械马的步伐稳健,扬起微弱的尘埃,展现出未来科技与原始探索的完美结合。宇航员手持操控装置,目光坚定,仿佛正在开辟人类的新疆域。背景是深邃的宇宙和蔚蓝的地球,画面既科幻又充满希望,让人不禁畅想未来的星际生活。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - num_inference_steps=50, - seed=0, tiled=True -) -save_video(video, "video1.mp4", fps=25, quality=5) diff --git a/examples/wanvideo/wan_14b_text_to_video_tensor_parallel.py b/examples/wanvideo/wan_14b_text_to_video_tensor_parallel.py deleted file mode 100644 index 77c230c..0000000 --- a/examples/wanvideo/wan_14b_text_to_video_tensor_parallel.py +++ /dev/null @@ -1,149 +0,0 @@ -import torch -import lightning as pl -from torch.distributed.tensor.parallel import ColwiseParallel, RowwiseParallel, SequenceParallel, PrepareModuleInput, PrepareModuleOutput -from torch.distributed._tensor import Replicate, Shard -from torch.distributed.tensor.parallel import parallelize_module -from lightning.pytorch.strategies import ModelParallelStrategy -from diffsynth import ModelManager, WanVideoPipeline, save_video -from tqdm import tqdm -from modelscope import snapshot_download - - - -class ToyDataset(torch.utils.data.Dataset): - def __init__(self, tasks=[]): - self.tasks = tasks - - def __getitem__(self, data_id): - return self.tasks[data_id] - - def __len__(self): - return len(self.tasks) - - -class LitModel(pl.LightningModule): - def __init__(self): - super().__init__() - model_manager = ModelManager(device="cpu") - model_manager.load_models( - [ - [ - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors", - "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors", - ], - "models/Wan-AI/Wan2.1-T2V-14B/models_t5_umt5-xxl-enc-bf16.pth", - "models/Wan-AI/Wan2.1-T2V-14B/Wan2.1_VAE.pth", - ], - torch_dtype=torch.bfloat16, - ) - self.pipe = WanVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda") - - def configure_model(self): - tp_mesh = self.device_mesh["tensor_parallel"] - plan = { - "text_embedding.0": ColwiseParallel(), - "text_embedding.2": RowwiseParallel(), - "time_projection.1": ColwiseParallel(output_layouts=Replicate()), - "text_embedding.0": ColwiseParallel(), - "text_embedding.2": RowwiseParallel(), - "blocks.0": PrepareModuleInput( - input_layouts=(Replicate(), None, None, None), - desired_input_layouts=(Replicate(), None, None, None), - ), - "head": PrepareModuleInput( - input_layouts=(Replicate(), None), - desired_input_layouts=(Replicate(), None), - use_local_output=True, - ) - } - self.pipe.dit = parallelize_module(self.pipe.dit, tp_mesh, plan) - for block_id, block in enumerate(self.pipe.dit.blocks): - layer_tp_plan = { - "self_attn": PrepareModuleInput( - input_layouts=(Shard(1), Replicate()), - desired_input_layouts=(Shard(1), Shard(0)), - ), - "self_attn.q": SequenceParallel(), - "self_attn.k": SequenceParallel(), - "self_attn.v": SequenceParallel(), - "self_attn.norm_q": SequenceParallel(), - "self_attn.norm_k": SequenceParallel(), - "self_attn.attn": PrepareModuleInput( - input_layouts=(Shard(1), Shard(1), Shard(1)), - desired_input_layouts=(Shard(2), Shard(2), Shard(2)), - ), - "self_attn.o": RowwiseParallel(input_layouts=Shard(2), output_layouts=Replicate()), - - "cross_attn": PrepareModuleInput( - input_layouts=(Shard(1), Replicate()), - desired_input_layouts=(Shard(1), Replicate()), - ), - "cross_attn.q": SequenceParallel(), - "cross_attn.k": SequenceParallel(), - "cross_attn.v": SequenceParallel(), - "cross_attn.norm_q": SequenceParallel(), - "cross_attn.norm_k": SequenceParallel(), - "cross_attn.attn": PrepareModuleInput( - input_layouts=(Shard(1), Shard(1), Shard(1)), - desired_input_layouts=(Shard(2), Shard(2), Shard(2)), - ), - "cross_attn.o": RowwiseParallel(input_layouts=Shard(2), output_layouts=Replicate(), use_local_output=False), - - "ffn.0": ColwiseParallel(input_layouts=Shard(1)), - "ffn.2": RowwiseParallel(output_layouts=Replicate()), - - "norm1": SequenceParallel(use_local_output=True), - "norm2": SequenceParallel(use_local_output=True), - "norm3": SequenceParallel(use_local_output=True), - "gate": PrepareModuleInput( - input_layouts=(Shard(1), Replicate(), Replicate()), - desired_input_layouts=(Replicate(), Replicate(), Replicate()), - ) - } - parallelize_module( - module=block, - device_mesh=tp_mesh, - parallelize_plan=layer_tp_plan, - ) - - - def test_step(self, batch): - data = batch[0] - data["progress_bar_cmd"] = tqdm if self.local_rank == 0 else lambda x: x - output_path = data.pop("output_path") - with torch.no_grad(), torch.inference_mode(False): - video = self.pipe(**data) - if self.local_rank == 0: - save_video(video, output_path, fps=15, quality=5) - - -if __name__ == "__main__": - snapshot_download("Wan-AI/Wan2.1-T2V-14B", local_dir="models/Wan-AI/Wan2.1-T2V-14B") - dataloader = torch.utils.data.DataLoader( - ToyDataset([ - { - "prompt": "一名宇航员身穿太空服,面朝镜头骑着一匹机械马在火星表面驰骋。红色的荒凉地表延伸至远方,点缀着巨大的陨石坑和奇特的岩石结构。机械马的步伐稳健,扬起微弱的尘埃,展现出未来科技与原始探索的完美结合。宇航员手持操控装置,目光坚定,仿佛正在开辟人类的新疆域。背景是深邃的宇宙和蔚蓝的地球,画面既科幻又充满希望,让人不禁畅想未来的星际生活。", - "negative_prompt": "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - "num_inference_steps": 50, - "seed": 0, - "tiled": False, - "output_path": "video1.mp4", - }, - { - "prompt": "一名宇航员身穿太空服,面朝镜头骑着一匹机械马在火星表面驰骋。红色的荒凉地表延伸至远方,点缀着巨大的陨石坑和奇特的岩石结构。机械马的步伐稳健,扬起微弱的尘埃,展现出未来科技与原始探索的完美结合。宇航员手持操控装置,目光坚定,仿佛正在开辟人类的新疆域。背景是深邃的宇宙和蔚蓝的地球,画面既科幻又充满希望,让人不禁畅想未来的星际生活。", - "negative_prompt": "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - "num_inference_steps": 50, - "seed": 1, - "tiled": False, - "output_path": "video2.mp4", - }, - ]), - collate_fn=lambda x: x - ) - model = LitModel() - trainer = pl.Trainer(accelerator="gpu", devices=torch.cuda.device_count(), strategy=ModelParallelStrategy()) - trainer.test(model, dataloader) \ No newline at end of file diff --git a/examples/wanvideo/wan_fun_InP.py b/examples/wanvideo/wan_fun_InP.py deleted file mode 100644 index ae23ee0..0000000 --- a/examples/wanvideo/wan_fun_InP.py +++ /dev/null @@ -1,42 +0,0 @@ -import torch -from diffsynth import ModelManager, WanVideoPipeline, save_video, VideoData -from modelscope import snapshot_download, dataset_snapshot_download -from PIL import Image - - -# Download models -snapshot_download("PAI/Wan2.1-Fun-1.3B-InP", local_dir="models/PAI/Wan2.1-Fun-1.3B-InP") - -# Load models -model_manager = ModelManager(device="cpu") -model_manager.load_models( - [ - "models/PAI/Wan2.1-Fun-1.3B-InP/diffusion_pytorch_model.safetensors", - "models/PAI/Wan2.1-Fun-1.3B-InP/models_t5_umt5-xxl-enc-bf16.pth", - "models/PAI/Wan2.1-Fun-1.3B-InP/Wan2.1_VAE.pth", - "models/PAI/Wan2.1-Fun-1.3B-InP/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", - ], - torch_dtype=torch.bfloat16, # You can set `torch_dtype=torch.float8_e4m3fn` to enable FP8 quantization. -) -pipe = WanVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda") -pipe.enable_vram_management(num_persistent_param_in_dit=None) - -# Download example image -dataset_snapshot_download( - dataset_id="DiffSynth-Studio/examples_in_diffsynth", - local_dir="./", - allow_file_pattern=f"data/examples/wan/input_image.jpg" -) -image = Image.open("data/examples/wan/input_image.jpg") - -# Image-to-video -video = pipe( - prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - num_inference_steps=50, - input_image=image, - # You can input `end_image=xxx` to control the last frame of the video. - # The model will automatically generate the dynamic content between `input_image` and `end_image`. - seed=1, tiled=True -) -save_video(video, "video1.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/wan_fun_control.py b/examples/wanvideo/wan_fun_control.py deleted file mode 100644 index e2c4d0c..0000000 --- a/examples/wanvideo/wan_fun_control.py +++ /dev/null @@ -1,40 +0,0 @@ -import torch -from diffsynth import ModelManager, WanVideoPipeline, save_video, VideoData -from modelscope import snapshot_download, dataset_snapshot_download -from PIL import Image - - -# Download models -snapshot_download("PAI/Wan2.1-Fun-1.3B-Control", local_dir="models/PAI/Wan2.1-Fun-1.3B-Control") - -# Load models -model_manager = ModelManager(device="cpu") -model_manager.load_models( - [ - "models/PAI/Wan2.1-Fun-1.3B-Control/diffusion_pytorch_model.safetensors", - "models/PAI/Wan2.1-Fun-1.3B-Control/models_t5_umt5-xxl-enc-bf16.pth", - "models/PAI/Wan2.1-Fun-1.3B-Control/Wan2.1_VAE.pth", - "models/PAI/Wan2.1-Fun-1.3B-Control/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", - ], - torch_dtype=torch.bfloat16, # You can set `torch_dtype=torch.float8_e4m3fn` to enable FP8 quantization. -) -pipe = WanVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda") -pipe.enable_vram_management(num_persistent_param_in_dit=None) - -# Download example video -dataset_snapshot_download( - dataset_id="DiffSynth-Studio/examples_in_diffsynth", - local_dir="./", - allow_file_pattern=f"data/examples/wan/control_video.mp4" -) - -# Control-to-video -control_video = VideoData("data/examples/wan/control_video.mp4", height=832, width=576) -video = pipe( - prompt="扁平风格动漫,一位长发少女优雅起舞。她五官精致,大眼睛明亮有神,黑色长发柔顺光泽。身穿淡蓝色T恤和深蓝色牛仔短裤。背景是粉色。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - num_inference_steps=50, - control_video=control_video, height=832, width=576, num_frames=49, - seed=1, tiled=True -) -save_video(video, "video1.mp4", fps=15, quality=5) diff --git a/examples/wanvideo/wan_fun_reference_control.py b/examples/wanvideo/wan_fun_reference_control.py deleted file mode 100644 index bc82157..0000000 --- a/examples/wanvideo/wan_fun_reference_control.py +++ /dev/null @@ -1,35 +0,0 @@ -import torch -from diffsynth import ModelManager, WanVideoPipeline, save_video, VideoData -from modelscope import snapshot_download, dataset_snapshot_download -from PIL import Image - - -# Download models -# snapshot_download("PAI/Wan2.1-Fun-1.3B-Control", local_dir="models/PAI/Wan2.1-Fun-V1.1-1.3B-Control") - -# Load models -model_manager = ModelManager(device="cpu") -model_manager.load_models( - [ - "models/PAI/Wan2.1-Fun-V1.1-14B-Control/diffusion_pytorch_model.safetensors", - "models/PAI/Wan2.1-Fun-V1.1-14B-Control/models_t5_umt5-xxl-enc-bf16.pth", - "models/PAI/Wan2.1-Fun-V1.1-14B-Control/Wan2.1_VAE.pth", - "models/PAI/Wan2.1-Fun-V1.1-14B-Control/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", - ], - torch_dtype=torch.bfloat16, # You can set `torch_dtype=torch.float8_e4m3fn` to enable FP8 quantization. -) -pipe = WanVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda") -pipe.enable_vram_management(num_persistent_param_in_dit=None) - -# Control-to-video -control_video = VideoData("xxx/pose.mp4", height=832, width=480) -control_video = [control_video[i] for i in range(49)] -video = pipe( - prompt="一位年轻女性穿着一件粉色的连衣裙,裙子上有白色的装饰和粉色的纽扣。她的头发是紫色的,头上戴着一个红色的大蝴蝶结,显得非常可爱和精致。她还戴着一个红色的领结,整体造型充满了少女感和活力。她的表情温柔,双手轻轻交叉放在身前,姿态优雅。背景是简单的灰色,没有任何多余的装饰,使得人物更加突出。她的妆容清淡自然,突显了她的清新气质。整体画面给人一种甜美、梦幻的感觉,仿佛置身于童话世界中。", - negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", - num_inference_steps=50, - reference_image=Image.open("xxx/6.png").convert("RGB").resize((480, 832)), - control_video=control_video, height=832, width=480, num_frames=49, - seed=1, tiled=True -) -save_video(video, "video1.mp4", fps=15, quality=5) From 6e977e118122f46c8c2d481d231f71f689694811 Mon Sep 17 00:00:00 2001 From: Artiprocher Date: Fri, 6 Jun 2025 15:19:09 +0800 Subject: [PATCH 8/9] refine wan doc --- examples/wanvideo/README_zh.md | 66 ++++++++++++++++++++++++++++------ 1 file changed, 55 insertions(+), 11 deletions(-) diff --git a/examples/wanvideo/README_zh.md b/examples/wanvideo/README_zh.md index 4504f8f..ee53785 100644 --- a/examples/wanvideo/README_zh.md +++ b/examples/wanvideo/README_zh.md @@ -24,7 +24,12 @@ ## 模型推理 -### 加载模型 +以下部分将会帮助您理解我们的功能并编写推理代码。 + + +
+ +加载模型 模型通过 `from_pretrained` 加载: @@ -74,7 +79,12 @@ ModelConfig(path=[ * `skip_download`: 是否跳过下载,默认值为 `False`。当您的网络无法访问[魔搭社区](https://modelscope.cn/)时,请手动下载必要的文件,并将其设置为 `True`。 * `redirect_common_files`: 是否重定向重复模型文件,默认值为 `True`。由于 Wan 系列模型包括多个基础模型,每个基础模型的 text encoder 等模块都是相同的,为避免重复下载,我们会对模型路径进行重定向。 -### 显存管理 +
+ + +
+ +显存管理 DiffSynth-Studio 为 Wan 模型提供了细粒度的显存管理,让模型能够在低显存设备上进行推理,可通过以下代码开启 offload 功能,在显存有限的设备上将部分模块 offload 到内存中。 @@ -129,7 +139,12 @@ FP8 量化能够大幅度减少显存占用,但不会加速,部分模型在 * `vram_buffer`: 显存缓冲区大小(GB),默认为 0.5GB。由于部分较大的神经网络层在 onload 阶段会不可控地占用更多显存,因此一个显存缓冲区是必要的,理论上的最优值为模型中最大的层所占的显存。 * `num_persistent_param_in_dit`: DiT 模型中常驻显存的参数数量(个),默认为无限制。我们将会在未来删除这个参数,请不要依赖这个参数。 -### 输入参数 +
+ + +
+ +输入参数 Pipeline 在推理阶段能够接收以下输入参数: @@ -164,10 +179,17 @@ Pipeline 在推理阶段能够接收以下输入参数: * `tea_cache_model_id`: TeaCache 的参数模板,可选 `"Wan2.1-T2V-1.3B"`、`Wan2.1-T2V-14B`、`Wan2.1-I2V-14B-480P`、`Wan2.1-I2V-14B-720P` 之一。 * `progress_bar_cmd`: 进度条,默认为 `tqdm.tqdm`。可通过设置为 `lambda x:x` 来屏蔽进度条。 +
+ + ## 模型训练 Wan 系列模型训练通过统一的 [`./model_training/train.py`](./model_training/train.py) 脚本进行。 +
+ +脚本参数 + 脚本包含以下参数: * 数据集 @@ -202,7 +224,12 @@ Wan 系列模型训练通过统一的 [`./model_training/train.py`](./model_trai * 显存管理 * `--use_gradient_checkpointing_offload`: 是否将 gradient checkpointing 卸载到内存中。 -### Step 1: 准备数据集 +
+ + +
+ +Step 1: 准备数据集 数据集包含一系列文件,我们建议您这样组织数据集文件: @@ -221,6 +248,12 @@ video1.mp4,"from sunset to night, a small town, light, house, river" video2.mp4,"a dog is running" ``` +我们构建了一个样例视频数据集,以方便您进行测试,通过以下命令可以下载这个数据集: + +```shell +modelscope download --dataset DiffSynth-Studio/example_video_dataset README.md --local_dir ./data/example_video_dataset +``` + 数据集支持视频和图片混合训练,支持的视频文件格式包括 `"mp4", "avi", "mov", "wmv", "mkv", "flv", "webm"`,支持的图片格式包括 `"jpg", "jpeg", "png", "webp"`。 视频的尺寸可通过脚本参数 `--height`、`--width`、`--num_frames` 控制。在每个视频中,前 `num_frames` 帧会被用于训练,因此当视频长度不足 `num_frames` 帧时会报错,图片文件会被视为单帧视频。当 `--height` 和 `--width` 为空时将会开启动态分辨率,按照数据集中每个视频或图片的实际宽高训练。 @@ -236,7 +269,12 @@ video1.mp4,"from sunset to night, a small town, light, house, river",video1_soft 额外输入若包含视频和图像文件,则需要在 `--data_file_keys` 参数中指定要解析的列名。该参数的默认值为 `"image,video"`,即解析列名为 `image` 和 `video` 的列。可根据额外输入增加相应的列名,例如 `--data_file_keys "image,video,control_video"`,同时启用 `--input_contains_control_video`。 -### Step 2: 加载模型 +
+ + +
+ +Step 2: 加载模型 类似于推理时的模型加载逻辑,可直接通过模型 ID 配置要加载的模型。例如,推理时我们通过以下设置加载模型 @@ -288,7 +326,12 @@ model_configs=[ ]' \ ``` -### 设置可训练模块 +
+ + +
+ +Step 3: 设置可训练模块 训练框架支持训练基础模型,或 LoRA 模型。以下是几个例子: @@ -298,16 +341,17 @@ model_configs=[ 此外,由于训练脚本中加载了多个模块(text encoder、dit、vae),保存模型文件时需要移除前缀,例如在全量训练 DiT 部分或者训练 DiT 部分的 LoRA 模型时,请设置 `--remove_prefix_in_ckpt pipe.dit.` -### 启动训练程序 +
-我们构建了一个样例视频数据集,以方便您进行测试,通过以下命令可以下载这个数据集: -```shell -modelscope download --dataset DiffSynth-Studio/example_video_dataset README.md --local_dir ./data/example_video_dataset -``` +
+ +Step 4: 启动训练程序 我们为每一个模型编写了训练命令,请参考本文档开头的表格。 请注意,14B 模型全量训练需要8个GPU,每个GPU的显存至少为80G。全量训练这些14B模型时需要安装 `deepspeed`(`pip install deepspeed`),我们编写了建议的[配置文件](./model_training/full/accelerate_config_14B.yaml),这个配置文件会在对应的训练脚本中被加载,这些脚本已在 8*A100 上测试过。 训练脚本的默认视频尺寸为 `480*832*81`,提升分辨率将可能导致显存不足,请添加参数 `--use_gradient_checkpointing_offload` 降低显存占用。 + +
From b1afff17285fe1526fd20e1cbed53a8f7aeedb96 Mon Sep 17 00:00:00 2001 From: CD22104 <1242884655@qq.com> Date: Wed, 11 Jun 2025 17:24:09 +0800 Subject: [PATCH 9/9] camera --- .msc | Bin 0 -> 194 bytes .mv | 1 + dchen/7.png | Bin 0 -> 488375 bytes .../camera_adapter.cpython-310.pyc | Bin 0 -> 2011 bytes .../camera_compute.cpython-310.pyc | Bin 0 -> 5707 bytes dchen/camera_adapter.py | 62 +++++ dchen/camera_compute.py | 174 ++++++++++++++ dchen/camera_information.txt | 82 +++++++ diffsynth.egg-info/PKG-INFO | 33 +++ diffsynth.egg-info/SOURCES.txt | 226 ++++++++++++++++++ diffsynth.egg-info/dependency_links.txt | 1 + diffsynth.egg-info/requires.txt | 14 ++ diffsynth.egg-info/top_level.txt | 1 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 287 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 169 bytes .../__pycache__/model_config.cpython-310.pyc | Bin 0 -> 25864 bytes diffsynth/configs/model_config.py | 2 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 372 bytes .../controlnet_unit.cpython-310.pyc | Bin 0 -> 4997 bytes .../__pycache__/processors.cpython-310.pyc | Bin 0 -> 2163 bytes .../data/__pycache__/__init__.cpython-310.pyc | Bin 0 -> 248 bytes .../data/__pycache__/video.cpython-310.pyc | Bin 0 -> 6100 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 6088 bytes .../RIFE/__pycache__/__init__.cpython-310.pyc | Bin 0 -> 10413 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 172 bytes .../lora/__pycache__/__init__.cpython-310.pyc | Bin 0 -> 1774 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 198 bytes .../__pycache__/attention.cpython-310.pyc | Bin 0 -> 2960 bytes .../__pycache__/cog_dit.cpython-310.pyc | Bin 0 -> 15241 bytes .../__pycache__/cog_vae.cpython-310.pyc | Bin 0 -> 17295 bytes .../__pycache__/downloader.cpython-310.pyc | Bin 0 -> 2857 bytes .../flux_controlnet.cpython-310.pyc | Bin 0 -> 12845 bytes .../__pycache__/flux_dit.cpython-310.pyc | Bin 0 -> 25750 bytes .../flux_infiniteyou.cpython-310.pyc | Bin 0 -> 3990 bytes .../flux_ipadapter.cpython-310.pyc | Bin 0 -> 4397 bytes .../flux_text_encoder.cpython-310.pyc | Bin 0 -> 1743 bytes .../__pycache__/flux_vae.cpython-310.pyc | Bin 0 -> 17914 bytes .../__pycache__/hunyuan_dit.cpython-310.pyc | Bin 0 -> 16683 bytes .../hunyuan_dit_text_encoder.cpython-310.pyc | Bin 0 -> 6207 bytes .../hunyuan_video_dit.cpython-310.pyc | Bin 0 -> 33426 bytes ...hunyuan_video_text_encoder.cpython-310.pyc | Bin 0 -> 2535 bytes .../hunyuan_video_vae_decoder.cpython-310.pyc | Bin 0 -> 13511 bytes .../hunyuan_video_vae_encoder.cpython-310.pyc | Bin 0 -> 8697 bytes .../kolors_text_encoder.cpython-310.pyc | Bin 0 -> 47269 bytes .../models/__pycache__/lora.cpython-310.pyc | Bin 0 -> 15717 bytes .../__pycache__/model_manager.cpython-310.pyc | Bin 0 -> 15421 bytes .../__pycache__/omnigen.cpython-310.pyc | Bin 0 -> 20428 bytes .../__pycache__/sd3_dit.cpython-310.pyc | Bin 0 -> 20492 bytes .../sd3_text_encoder.cpython-310.pyc | Bin 0 -> 104176 bytes .../sd3_vae_decoder.cpython-310.pyc | Bin 0 -> 2419 bytes .../sd3_vae_encoder.cpython-310.pyc | Bin 0 -> 2959 bytes .../__pycache__/sd_controlnet.cpython-310.pyc | Bin 0 -> 44122 bytes .../__pycache__/sd_ipadapter.cpython-310.pyc | Bin 0 -> 3415 bytes .../__pycache__/sd_motion.cpython-310.pyc | Bin 0 -> 7191 bytes .../sd_text_encoder.cpython-310.pyc | Bin 0 -> 27730 bytes .../__pycache__/sd_unet.cpython-310.pyc | Bin 0 -> 97995 bytes .../sd_vae_decoder.cpython-310.pyc | Bin 0 -> 18481 bytes .../sd_vae_encoder.cpython-310.pyc | Bin 0 -> 15137 bytes .../sdxl_controlnet.cpython-310.pyc | Bin 0 -> 10428 bytes .../sdxl_ipadapter.cpython-310.pyc | Bin 0 -> 7271 bytes .../__pycache__/sdxl_motion.cpython-310.pyc | Bin 0 -> 4274 bytes .../sdxl_text_encoder.cpython-310.pyc | Bin 0 -> 73164 bytes .../__pycache__/sdxl_unet.cpython-310.pyc | Bin 0 -> 268327 bytes .../sdxl_vae_decoder.cpython-310.pyc | Bin 0 -> 1526 bytes .../sdxl_vae_encoder.cpython-310.pyc | Bin 0 -> 1526 bytes .../step1x_connector.cpython-310.pyc | Bin 0 -> 17534 bytes .../__pycache__/stepvideo_dit.cpython-310.pyc | Bin 0 -> 28389 bytes .../stepvideo_text_encoder.cpython-310.pyc | Bin 0 -> 15770 bytes .../__pycache__/stepvideo_vae.cpython-310.pyc | Bin 0 -> 31503 bytes .../svd_image_encoder.cpython-310.pyc | Bin 0 -> 60737 bytes .../__pycache__/svd_unet.cpython-310.pyc | Bin 0 -> 216148 bytes .../svd_vae_decoder.cpython-310.pyc | Bin 0 -> 36844 bytes .../svd_vae_encoder.cpython-310.pyc | Bin 0 -> 12430 bytes .../models/__pycache__/tiler.cpython-310.pyc | Bin 0 -> 7111 bytes .../models/__pycache__/utils.cpython-310.pyc | Bin 0 -> 6358 bytes .../__pycache__/wan_video_dit.cpython-310.pyc | Bin 0 -> 19838 bytes .../wan_video_image_encoder.cpython-310.pyc | Bin 0 -> 21783 bytes ...an_video_motion_controller.cpython-310.pyc | Bin 0 -> 2222 bytes .../wan_video_text_encoder.cpython-310.pyc | Bin 0 -> 8739 bytes .../wan_video_vace.cpython-310.pyc | Bin 0 -> 3980 bytes .../__pycache__/wan_video_vae.cpython-310.pyc | Bin 0 -> 21276 bytes diffsynth/models/wan_video_dit.py | 53 +++- .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 940 bytes .../__pycache__/base.cpython-310.pyc | Bin 0 -> 5426 bytes .../__pycache__/cog_video.cpython-310.pyc | Bin 0 -> 4473 bytes .../__pycache__/dancer.cpython-310.pyc | Bin 0 -> 4914 bytes .../__pycache__/flux_image.cpython-310.pyc | Bin 0 -> 23329 bytes .../__pycache__/hunyuan_image.cpython-310.pyc | Bin 0 -> 9185 bytes .../__pycache__/hunyuan_video.cpython-310.pyc | Bin 0 -> 12911 bytes .../__pycache__/omnigen_image.cpython-310.pyc | Bin 0 -> 10689 bytes .../pipeline_runner.cpython-310.pyc | Bin 0 -> 4869 bytes .../__pycache__/sd3_image.cpython-310.pyc | Bin 0 -> 5042 bytes .../__pycache__/sd_image.cpython-310.pyc | Bin 0 -> 6117 bytes .../__pycache__/sd_video.cpython-310.pyc | Bin 0 -> 7681 bytes .../__pycache__/sdxl_image.cpython-310.pyc | Bin 0 -> 7127 bytes .../__pycache__/sdxl_video.cpython-310.pyc | Bin 0 -> 6783 bytes .../__pycache__/step_video.cpython-310.pyc | Bin 0 -> 5863 bytes .../__pycache__/svd_video.cpython-310.pyc | Bin 0 -> 8843 bytes .../__pycache__/wan_video.cpython-310.pyc | Bin 0 -> 16678 bytes .../__pycache__/wan_video_new.cpython-310.pyc | Bin 0 -> 34011 bytes diffsynth/pipelines/wan_video_new.py | 54 ++++- .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 172 bytes .../__pycache__/base.cpython-310.pyc | Bin 0 -> 591 bytes .../sequencial_processor.cpython-310.pyc | Bin 0 -> 2030 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 844 bytes .../__pycache__/base_prompter.cpython-310.pyc | Bin 0 -> 2222 bytes .../__pycache__/cog_prompter.cpython-310.pyc | Bin 0 -> 1736 bytes .../__pycache__/flux_prompter.cpython-310.pyc | Bin 0 -> 2455 bytes .../hunyuan_dit_prompter.cpython-310.pyc | Bin 0 -> 2461 bytes .../hunyuan_video_prompter.cpython-310.pyc | Bin 0 -> 8143 bytes .../kolors_prompter.cpython-310.pyc | Bin 0 -> 13221 bytes .../omnigen_prompter.cpython-310.pyc | Bin 0 -> 12099 bytes .../__pycache__/omost.cpython-310.pyc | Bin 0 -> 16109 bytes .../prompt_refiners.cpython-310.pyc | Bin 0 -> 6088 bytes .../__pycache__/sd3_prompter.cpython-310.pyc | Bin 0 -> 3098 bytes .../__pycache__/sd_prompter.cpython-310.pyc | Bin 0 -> 3295 bytes .../__pycache__/sdxl_prompter.cpython-310.pyc | Bin 0 -> 2101 bytes .../stepvideo_prompter.cpython-310.pyc | Bin 0 -> 2306 bytes .../__pycache__/wan_prompter.cpython-310.pyc | Bin 0 -> 3967 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 336 bytes .../continuous_ode.cpython-310.pyc | Bin 0 -> 2565 bytes .../__pycache__/ddim.cpython-310.pyc | Bin 0 -> 3999 bytes .../__pycache__/flow_match.cpython-310.pyc | Bin 0 -> 2904 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 200 bytes .../__pycache__/layers.cpython-310.pyc | Bin 0 -> 5886 bytes .../Wan2.1-Fun-V1.1-1.3B-Control-Camera.py | 48 ++++ .../Wan2.1-Fun-V1.1-1.3B-InP.py | 36 +++ .../Wan2.1-Fun-V1.1-14B-Control-Camera.py | 50 ++++ .../Wan2.1-Fun-V1.1-14B-InP.py | 36 +++ .../full/Wan2.1-Fun-V1.1-1.3B-InP.sh | 14 ++ .../full/Wan2.1-Fun-V1.1-14B-InP.sh | 14 ++ .../validate_full/Wan2.1-Fun-V1.1-1.3B-InP.py | 31 +++ .../validate_full/Wan2.1-Fun-V1.1-14B-InP.py | 31 +++ 133 files changed, 954 insertions(+), 9 deletions(-) create mode 100644 .msc create mode 100644 .mv create mode 100644 dchen/7.png create mode 100644 dchen/__pycache__/camera_adapter.cpython-310.pyc create mode 100644 dchen/__pycache__/camera_compute.cpython-310.pyc create mode 100644 dchen/camera_adapter.py create mode 100644 dchen/camera_compute.py create mode 100644 dchen/camera_information.txt create mode 100644 diffsynth.egg-info/PKG-INFO create mode 100644 diffsynth.egg-info/SOURCES.txt create mode 100644 diffsynth.egg-info/dependency_links.txt create mode 100644 diffsynth.egg-info/requires.txt create mode 100644 diffsynth.egg-info/top_level.txt create mode 100644 diffsynth/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/configs/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/configs/__pycache__/model_config.cpython-310.pyc create mode 100644 diffsynth/controlnets/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/controlnets/__pycache__/controlnet_unit.cpython-310.pyc create mode 100644 diffsynth/controlnets/__pycache__/processors.cpython-310.pyc create mode 100644 diffsynth/data/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/data/__pycache__/video.cpython-310.pyc create mode 100644 diffsynth/extensions/ESRGAN/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/extensions/RIFE/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/extensions/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/lora/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/attention.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/cog_dit.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/cog_vae.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/downloader.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/flux_controlnet.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/flux_dit.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/flux_infiniteyou.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/flux_ipadapter.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/flux_text_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/flux_vae.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/hunyuan_dit.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/hunyuan_dit_text_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/hunyuan_video_dit.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/hunyuan_video_text_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/hunyuan_video_vae_decoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/hunyuan_video_vae_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/kolors_text_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/lora.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/model_manager.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/omnigen.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sd3_dit.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sd3_text_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sd3_vae_decoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sd3_vae_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sd_controlnet.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sd_ipadapter.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sd_motion.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sd_text_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sd_unet.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sd_vae_decoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sd_vae_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sdxl_controlnet.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sdxl_ipadapter.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sdxl_motion.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sdxl_text_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sdxl_unet.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sdxl_vae_decoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/sdxl_vae_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/step1x_connector.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/stepvideo_dit.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/stepvideo_text_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/stepvideo_vae.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/svd_image_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/svd_unet.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/svd_vae_decoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/svd_vae_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/tiler.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/utils.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/wan_video_dit.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/wan_video_image_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/wan_video_motion_controller.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/wan_video_text_encoder.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/wan_video_vace.cpython-310.pyc create mode 100644 diffsynth/models/__pycache__/wan_video_vae.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/base.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/cog_video.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/dancer.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/flux_image.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/hunyuan_image.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/hunyuan_video.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/omnigen_image.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/pipeline_runner.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/sd3_image.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/sd_image.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/sd_video.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/sdxl_image.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/sdxl_video.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/step_video.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/svd_video.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/wan_video.cpython-310.pyc create mode 100644 diffsynth/pipelines/__pycache__/wan_video_new.cpython-310.pyc create mode 100644 diffsynth/processors/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/processors/__pycache__/base.cpython-310.pyc create mode 100644 diffsynth/processors/__pycache__/sequencial_processor.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/base_prompter.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/cog_prompter.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/flux_prompter.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/hunyuan_dit_prompter.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/hunyuan_video_prompter.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/kolors_prompter.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/omnigen_prompter.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/omost.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/prompt_refiners.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/sd3_prompter.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/sd_prompter.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/sdxl_prompter.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/stepvideo_prompter.cpython-310.pyc create mode 100644 diffsynth/prompters/__pycache__/wan_prompter.cpython-310.pyc create mode 100644 diffsynth/schedulers/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/schedulers/__pycache__/continuous_ode.cpython-310.pyc create mode 100644 diffsynth/schedulers/__pycache__/ddim.cpython-310.pyc create mode 100644 diffsynth/schedulers/__pycache__/flow_match.cpython-310.pyc create mode 100644 diffsynth/vram_management/__pycache__/__init__.cpython-310.pyc create mode 100644 diffsynth/vram_management/__pycache__/layers.cpython-310.pyc create mode 100644 examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py create mode 100644 examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-InP.py create mode 100644 examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-Control-Camera.py create mode 100644 examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-InP.py create mode 100644 examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-InP.sh create mode 100644 examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-InP.sh create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-InP.py create mode 100644 examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-InP.py diff --git a/.msc b/.msc new file mode 100644 index 0000000000000000000000000000000000000000..eb82657139c9794dbc0b67c1ef44b0bdf554dc83 GIT binary patch literal 194 zcmZo*nYx_;0%E6V)K1apVF^eq$(Yikm6BMJsGnMqm|KvOTC885n5SQqnwDCWnwOj! zpP8GOo*JK?S(Kw!ke5ECha)JpEVDQ>KW|D8!<15>DH%*X%9sWu=jW9a<>$ngWu~O& l>*W@hOvzx$U;5fKp+6BHB_ z7#J5992^}YA}RmG0H^=}0s{d70RaF200000000041p)&G2LS*G0RRI50RaI300000 z0000000RL600adF5C8%J0|NpC1Oo;H1O)&D4*>!L1OO2d1tCE(VIolzaRxFJBSL`{ z!BUZ-v0`#F(cwezB!ck=7BrGmb2Z{rM8X%cbYmrxHbsM?CJ_Jw0RRI500IL600IL6 z0RaF30ud4+12GdpQ6dCkG6iuIfssNZ|G)qd2n09>fCB*{00I#J0}%lK+5ij#0RRFK z0}%i}0OB%57C5*gg#cVgkdLLmk-AXlH_rYX0}&hNUX!Gr__{+6aOaM1plyvCQNju< zVR-~v?b?TJ($0~x$a@7H&GD~QHH1-}rSg~4=FU4@YBt)(mso}Ik<#7l5VQ{7>R^-Q z$mwm`zNl^j7hn<38OJ{h*Y6`=43#Au$+*M_`mN<(%JuK=NELDQcjp+kTa`Q%PxnFd zBB?5#H(2?KN|@u2FjLC9d246=Ta%EWEbvO@U#Zx3oRZn|?XLFKm6c!i@7GsleIllqCV(3RV z<7XnGT8Rh+VL=@*Y1^{-vA{Th+bX~3Vi$*W$`dEj({0SHC4a@pN`gl9r6zyw> z#n_USwo(ZE!}YnFj)K@>euIwpEQU`*GK>zk9E!^!1QFZ4>Z!K+s)}E^rcFy8a(xQ- zv=x<8Rm-kzD!^lb-yaLa$T03cH%R)2#>}RYIO-#57g__BUr~EOv2E2!?!bJqjZvTB zZ{Rmi;9K}-DgK4{{`-`R_{uUea(mm|pJ$R;^h}JRGm;2CVc~TA;T;t`kt7R-$2?$< z18IFZr|A!Bh8P{BsnYMxoDK=;c@P4~z#NWneMQV_(h0%%^ZoDajM)%l!5{+SPEEmA zCgk2zaK}WX`fjEATlD_-CnSMnQPcbE%6%c4KS7K0*#b>Imnj^glpSuQEd3^gR!tfcpnHqSy;M6i6iHO=IAH6> zG5lAZq1);!Y9o%FbsUDt;Bnt3@2aSHDkP1MDU~`-RGjm3WKilF*qpLJ+AyvBg5ub& z+gH9Lmy#qz4qHL$MxziiRGBYuAsDS!C{a%_;tQ| z0&4c8*(jWBP!iTILUxhG zywpcSBl}=FNHU=Fr#&uFgh$HGNI1u(l^d*!mF>X(mgc8)G|_oshZ1)3f_!aXz3yii z!8`ylWRd=g6@tG$x2&XeXN`lePQ2bv+kIUzati2j8y^dFO8Rvw1{F0EMlt1#bMO}D zZTnVPV%4I3I6>>r7telK+}4h?H9;gEKTjL!&-*f|#PphBpB!N4SMI)R42TV$rE#q= zH3OcQ&VRLn@k<=+poYN!>F1ko4*J{)43b8fNdshZIN;iu>uG6qy(!cjVz zA&n!QIsX6w{?~OiQn+jzuDtb?@=9xq5S7(^X9j(?_CL zik>#f2RS_WSc?-=Ymb6Eh8>9Y7PcyC=3x}mMv{fdRy_gln_#a{BX1|V$PXU4>H6KT zhW!g^Ol$5`7658)Y~8(S_ZsG>dWh3fa9EE}abOCn31OLPVW_5JTS!F6e@ytFTOOHc zVFF5;RuPOGgNYm9JZ_W#e@$gZ(e`yPOHV9vX;Nhh!=4X~sj6zKhWFfiBT;NQ9P^$m zHryy`D(4FX^ocAOjX+{o)PwzRHCIU9(9t}Sq2OhkSH${RYySWfP;7I!sci~eNfRi? zu7BZitD$|uk(11x+oqya<0m5@*4@WXEgdZ^k@;In=Quh2_HyqLJV7T5Po=XW_t*O0 z3$?$qg8qX;O&EfmqehG`oZd;(+>mg}1HhioHavI~i z*0F^XQFy+jQ=F-8_&3oK^Wo7b*zQ&FpKf5l$AlWXao)^I`W?=N;YS6Iyj}{JEmPCb zGRzW11e|s`UdOY(Td{ilaDQp}bJBw{ePD^`>rdonD>nb?Y*aCW3Ta6rhgw(;rsPXI6K*F;hTy*rX zdi_ApXJ}Juch(8Z91n@KsTEB|**55pAF`w=1o`~D918|ydU}~4FiPTap$Y81Iq|!; z&9a|uP$~m3eoRJ2)13FyoMzfIfetd^H5+A)9>=N$T*UYksDQYx(I7%+dR^Vc2hxvI?^ zC8xuw`bZjxKipWT6DqIW>Ss`<5~_pi?ma(Cb0u2S#Rie6#;Cae0KwAG(boIfRl`S7 zjagh2$ARr^+|IJBivTL8N^0R*afWY?fV2UNX*XZToKCo6U2)^4>!fLS3dIc>K^$z0 zfO5kG18pgHS}mZ+YNd!RXXH& zy`YolW`&L+lzl@q)$51?Ox{62%q<_oqXCs?$H4ozIsUixg zV=4q(>P&qvjxmqc)khc$6*Mlnjr5{6d-_{OMeR~+W|{y|V;*R{!=5p8jju4vA{QZ9 z5sn5mW1M5XtEt(lW@gtvwEEdqV>lXf&U#zgXnmQXru(z@u*PPn%XykX*j}KdBZO-) zF28anS1Lw09NXQ_scTR;1bz`GJYy!%S8r8W`-n8gWNj!)DPtHI3)`{v?_^QWRaokk zNeU6k831VYBe}GJf-|X|W61MGt0#;D@v-ZwL?I+X9BT6s(CfDKv^26TraesZ%N00r z$XNdXHatBnRHfSzslj5N(f0ArnUwuRXBKtvA~N-_Ji5#1Qo%D$LVF<=#XLR~@|B#f?!sGW!O3K_e#%hE^8KZ>)j?D+tua_|xSF z82vMCjeS0(q@JcBAvyu)oOLG3Sj=w39Y!&ndI;m3plQ$QE>Kk%DZb%j z_ZgT)r<1@JD6{CJdBkk0c`yB~VKor5BaJ0>9&yy#^_H-1EAB-+DvZZk>IyjMds`ZY zr`!~>7I_tQYb0djxF@ymU;IE{(Oi_tyt*iv6vj?^<36`4Hf^#xr7W{8IVxK?$?zS> z@w&}?RId}LNcFFr@t!#SO|RYRV5+0`qNkOnc3ex@aq#L%zJD?h>YirZX+VumNpAi* zxzp6tMkAtOhXq0O?`w9-I_fh_@=5JRJAixf`># zZq@^TOA~IV6%wSZNh2H&4~?Rl0|HLZKraax~C!O9{ zQQcoBw`}IsZP9xa;Uw}f9D~6ZIx5}Hq)H5}AG84>dvkY5Mcv(9EgNI z5kjjHx&HtbdRe5fZMI_ce%k^(?ZLK`l|a?U8VyAd=lb1Dv#To0gMrBdAJ4|@sCa38 znTDlpBr#le_+K@BB=WkU%|z001C1x1#M=5vqob@2$197%F^?s9^JBf*wcONbup)MbOj)AeJ#yIjqXhv zq82N`Ab)#T7N;fv-sGCWArD?$j%6Yj`&sMI` zvi_&7*-J|$M9z$-Imm3_3${pmq{5}NV;mcmT}4QxFb;4q53g&YS{k}=+FUpv;|CX2 zKi#jk@(hkoJ#U}(BDkuW0x8)}rVEePo4M)!;#ZDrMi3Uye2b2`MN>>b^Bm)^9+slJ zcGon4AeFh&c;Rq6XB^t1L1d90qBm2O1^MvlZxUK@N+jo&;_{Qbn0>T?mykgNu=KZ4 z?=*153{oN1m@GKw`P-8rnUJ(ZfO{Y9ZcSw|hY1#<03XO&y1mYd2n3Ji60eYBp4UYM zMzN$J#*JC&gJoX}MB=OxAH-IV;mzJiXsQU4f(9;`6vvmwte)PN2?_SyRArwB1Jc;R z>f@+zffZN{JaxZ5#%U5((}@TeUY62ISCxi1;e3aJ*d7*a?JPdummGPnft>b)Zr^&y z$4KXsj=8-8#K`!3Y6F}cj^4IyMI_sVf=IBD#@=I|ZZItjFNQJ~UI{tI2c?Oa78nv| zW^Y?{Pe<;`WhWX59~@mV)KI8R2+Yj=xg2`g=4ECvB4|<9uZ_zvrfPWKbX_f^G6x5y zEb6&ntc{}#%jX@(jmEXdy#g$L;>MT^20Qlz-PILw$f!M_Ji|O%B*@gcr`!@gic_D3 z=xaat?c*QrwVUu~nLU)x0OSv+<8{MPA&X4M%y?C~V?3y@nlcmuKtve>(0naK)b(@g zoFT!%?dfghH4wnSs6bCq-q_RCy3#_SHR>Of4yNi#MtN0|k@XA^e|7knSVHPvKse2~ zt%#S?AkwlB4}it1s@q_RV=!uvJb@X3`h0CssA0B2@1&vq1(vDyv7GV9;=tL&`!w|r z%<3DjKyJ#@EmN0OVW^HhE+^?S4J?SXjtK)YK0l>{x>Uj%F{ppb{{Z!Dir<%>qf0!+ zbOJ&H>v(GF8q}-R6jHi&0O?0U+Ur(Yg@N#~Gcor+L1$IU5~0$Z>s<|k zI^F_;3ip{~lUgeV0A%++8_^V-Y-m87w2l7&FY401r1r&u{RKg~{lcdNX#gCIb+*t^ znTtuEDT_{gPI$X1{?piGsgVGX7RGSL`CrmUJXDA&K7vB2@9^(t{TCkvuqwwC!bs%o zG3E5K=&Iro{jGLYNYC?Z_WuBlmqjf90B0|_rzkMTrgQ6lr7Shj&nAJaB9&wLNa>E; z-J+SFoWg47YG`#X7=zCsTOYbsr;W9s;PIY&-?S+4N{8DR2vq+7h?Cyu_Iqko4|i8A zOD0>(;d5gc)58@_FsM8aanv7&79+*(R<4Y&jG$wWOV?4&u+L6uQPCDYO1o$vkmn?r7tRp%~;aIl%4DHZ@2ci05)@>Q*e^>OaozioIE-i>Op2`dtEddFom8>d6>h zo*$))QAO=U@~(3{c>OFj>DVP`E~>IX6b8s*Kc;W!9-g@X&oQ_E0ImN3m5h|^@ldG* z5~KQD=Y-RJI3%9KlXnHLgNP~<-)Q?{Bgy#QCF+_w`SQN+ow4I?tLf?|RrZ$^9BIkZ zseLUiU))CVtgwW5&z|S<8jS=De$X8}0nNflnsoBSIvXCoQnX{+;2z`mUzAb|sP_TX zIUbhPBSj*rN-3$)%gzR;;ACC;DrwgoaUZSKQ5!u(Lj;6xr#)>oe&GcI2oVms^pJDtEIiazMq*zxDedvQlMX6; z(^JYjDi7&w;hAcizi;QYB11GQq>=Ej&Psa+R*+YRLdvfsF3|! z6OK3?ZA@}i`;Rh2?Uu*^Ngezy=wYecD^uLm>@oO!ENT8_w!RRJcreSg%crVw`kPxG z$nsH*C{I7_WwjpQK!w>t>KH}f0d{HKz0DBJh>`Sc3}WN<8bPVHiKx4&)Nn}0Z#Hd> zR@G3ji38*pQ`3NXwlHo&hL%^Wo+%~Mz#qeEX})6nx@jVX4F3R%r+|A8fV-jD$CKDt zTS5(8yMM<0qxpJjsAZH%8nObyGH}g-o&~9=s&sY25;NO2;e}hwlgF+w2W;El_;70( z#UsT9I}G_(68`|3(AYby^1g!;Fu>;nkLPvMNl=nX&l4;x#513#S4>`pkrtb^I8u~4@05rt4kjIy5Dv=UDsmDQV8hAYQx zUaEpUp{RY?UCmHFSjVTw`rfQkZPqZ{rjDMHMTL0B80Uf_SAhW-RET6>8XV9Ru_C=#=v** zx76@0Eh<9_;or?8bj6~m+bSz2kU+lTkL&AwReVBC@iWvLY}L{Q0|Ti(mdktE;oIS- z(Ajp*q5JK=-(42pAGa!KW&U}ww@uXUDj458 z`c3O`p{ACOC&X+?#(3wy^R){MwJr*)Fx7@&G6l*n@m`vaqzFt@!Gw%3aJd$qp<@zP z?Q+pPTAnu&45>q({*dW!_BmvsXqodQ4E(6>rqb0?nmjzR677?N(&(kEluQVa=~JB@ z4chGu>qly-+^7|>HOAU{4_8EuZp)I2&vl|Twp25@xE5-S)P>)L03BFI3<%E zRY~IeWnyf5Y$R7n6S3|!YGe9dI=|<{jeVW6ogqP$*2)VovrnUFsit#)2x6s3U~z-r z`CY99r~X@?(BIWHa@AEmOXJ*k)<_r`_*=++VyTZvH1NJV@r?ffwUu^sKE;w#v=J*Z z70&}UT?A2=4zmAdgE~xG@SZQo{4;zmFjh|!DksaDZvSJ;(IBC6rzY2L1DX#K1OgF4?$8-?ifh z2FEuhoyp)`S<*XSp~fv|V4ShO-%cwhNZFPxXHK^IS3OUKx8D9*ZKN#gRZUq=!~rZ# zo_d0(k$G2~guGNGe8(ZQUuNSg6Y$dMb=&9XJ+J* zdivN{V|EDgxw&(q#Cq$7)(-^c^jmK1Qi@7RQC>GE4f)jGel*mFKDT0x9jsLZIX2Yt zjmC-^Mb+*j^PFI=Z?kNPN3-q8q9|i!1)0hM#k>i=1G3ipE)@=$y}_mb0K?U9bGmK| z4KLy~RF9`RW6qrT_*k{c%}MCf<|@tWf2WaY<_e(k$W8}KZDld0FhYzI@ivL%c@9~bkBKMy-$XXfh)jk3e%ylD1D@j|>$|kD>3(SH zZJ4h8#mfuO+^-ddc)u}rk#f;^Wk6kuE@5tAF?d9D-HR4(EOhwYkx0j(7mGeHFzwCN zi#t=OCnCkcCfLY>i;;}Wz#QFzJX!F9Cid4FwDIF`P{(WAY0^2p_CWH)#y$7VwZlVC3C5J~23k!F1_GbVeQuSE zkMzbZQa09({P#`S8y zmPP|8ozIE9_T{w8wo9T#Jn|2%*Cfeoswo4=p!GPtWj%6JvXp%+ymNVaXctI#3zgV|3By|>i9)TYBTL9d?Vw#mAD;qRS z*iqjHrfzXm&r=brn@Cc+BaHWF{r95TcKUW%VW~xvMoJc50qEYBuMav*g zG5-J?=b4oIIh)pGD))`r2;z!Zf|0;}dDGtX8!qQk;bxj3=$J3?xNZ@>QXItEP#p{h z_qH2G%<ZjB6y$094M@XMxB{XK4G8Ad&K9`hD zl6|@XyonxHsFW~L+0~y>o}F#%0Un?{%0OidanY{oF=%G+3bS|irEPT4q&X!nEkrT~gdI8uC*zKnZ8a$?4KPy~KGrWsU}LNTeP&Y zFcTmMNR8Z_Nsr+dKZI@;4)@5S+=Ba{K!6eKBVgqJ02hs{q}!>WKq+GY#&sOAEW^{2 z*8NS#Y^j*FLpQz#SpGqu_p|o;557q%AgKw0a0u)_eb)wzksZCMWr|5=Z()NFiro*5 zu&0Jslt<;$^`kFQ>ta;HMLaRZFPMz9nmBQ%*4Pso)5kqNRvj8cla2+AY6-+9DwyRj z!KKRD+<2{> zRuhO8ExL|6a(($e;km|m!Su0ccJzXwR$|g4x19P9OH(fUJ4hT;s|F=kM+fX10+9D;eS)X?`Uc$w?nxa`cMpHl?deVfzCkn>1I<=R2b%Z*vdgG4|K z=*SOo-i$NE8YU2uG`CJarLfZm;U=hkzJJsMs68yVjy*9&GAz+{hfo^8{{S8RO+i5n zjIqoTL){ei{npCHTNMR05Z4mMayeEyfcOi$?&5`XMH2bGqtp3X>hT`l6tT{x1HT-e z_DvaG zGP66zMu%kpu78EN$h{Ddl${ML_P0m-V)CSH8w_$Z^&FhovnkxmO-v0uajpk$eQ|1O zK3SJ+ib5pS?hFy0J6oDbRthCtfr35*C|Z5n48+wn5qyNq+sA4dQ-0U(uI#@J2byStlt+HD>#(2%`DYsRnhwXi$tlymdJo?xV z3T^YNmj$IfX*nF*(i6dDEaD}SC2wa(1oz~6X3eJDBKMdiXwB{N$GJc4YU=17r*vIF z#|n%-lhoYNDd}gCBS(1$3Uk}W!M&KD+}A?Wx+H3@2xH;lc5T)E%vwM$#N_*(f$*?u zsp_JDyEl=WFXqYQ{{Tz&lu@k3MB(s(f}WuN0JWLvhjqKvWj!#$LPVT>10&;k__q~Y zd&*gJmKtoT{{UumTujj_aC+m$^7OS2HB&1}qMkP4m~$tEiMl44rTfN}o}qq+10EoB zvf1e*MUkmlq$3K%a-#C}HQv1CAv0AX-C zYSZ+~r%>SF^|iZcNdwcP7{`|WtKQO?2@x!$u5hEV^dqhM^)&j9w#ydtdR^4e)4cxSrjYT&{#zu- zH#*F{R{BhF>DyvQcGd1O;DsKReJrwWEGlGd7|sX#Tl)C+>EcmQ8y8%K_V^h1Uz90C z6h@)+j{203`Zu_&_!TTbl2W|RNbbWIEx&7*ZIj$m7_sLZ_qGSy_kRr>j+hsFR4i_R zk-5#|GEz(+iRAYARVO&-i$kF=ue1u0gOSClnmXrAX;2p(*ZuFthPFmj5@$F9+0*TH0!5AAkE!xF!0CCB ztXoS>x-%(d5-0xvt?;?-TQHMLv`mtIW58d4J#SAr1KW-vr5FCQbk##8Vg+eVmFx%m z*(0LU5!CEIa}0lsT&Ir~`kE6>3MpR77e5P?9^dSb^|&9D^LNcGs-apk$?Br(LM97Q z?wkb!Pg8!X3Rop}Ihlr4ah?Z3a{!cPT$Rsb++7UQldA_Ia5674iNjK6hgl9E_>Oq^ zSrAIPPuv$N{AT47d-UGfAUHWZV*)Z6a^?a3H;axA{j(MD%r0lNNqNrFTZF*R^#*%Pse|IzLO)3zE=bRr)9-@v$ zGDwU7%^gg0A83dVisUx0$OU+PLG#csI zHDU9Da7KMIdf8)FrU?@1ml)NazBkV*c0JXr%ue0#On}*2)Kwc{Q}xG=B*uI(ae`O( zNm(=Pk>$W6Kdp~RL}+PrODY%qr#*+M@xON6tsZ%*M7o@I=g2%rvC8a7KCPHcl4==l z71~#oeW%C(s14Y6H=Sxa)|IoR^)PPzvD8`SrmhB)-HhpQtE=+y*2u_0ChK6f@Sow)KW^zGHd6`!=UMPEj)&%O3!vP8T_rm2lw4n+tk~PaTwxN z2bRd>3k9~S+L`41eG#1LMeq9Dv_k@`ta_Q9nkCSg8kBeOwtGJ16+IlJG|i0p3E+N~ zwpu;YX+GK{bsGH5lgO}s;8B+XHjvH91G1mcSYMDjL4$CWXL&fpnJPUq>umOo%kc_T zlHmOVpuDv0_RS4Lj=OS=r1vzH&$tld3O|Q8aDpd; zdWz^D+G$@@fw;$f*b~-My-b1m)-XDp`dT-krw}yBG^EGXj!8bE>y06i1CMo_^T0XJ z^Re8vCYwWIldAh}ji<)^WcxW$ytp{ehHQos(Z>=?uT!p|A3Cx6*aB6rsd$JwW5%=7 zHa;|&GpNwkt5s2>N0b1Z2iP6X608u-u4u7MLNhN%w@RC zj!(nF>RrF>o=qrwePi={FRRQcng0OWG_-QYB0@xXV&wg7KXtgTmS&O&BjtD{I*;FR z_Ucj9j-!K|6W_w~gf4QKC1JL#|WbpdTAc(*~%&mei<{=d8Tr`Pk6y#;uut;q!y3fyu|E*dDeO z`*_`DQ{NswH+>SCMrI5Qo!Nhcc-VQOc|8Danx36rMrM2i!32^$I{yGGp35J!lgm#7 z!h~bY06!dbwhdLK3o3hz&H4=bTF0uMHpiSVP(3XClL+Z&)05It(rVAV$HU8STB@1^ z8RUN}!-rwmoA`&5yC2luc2DCqJD&8CQLQW#`c%H6{v+XkZ9Ac+*31E5xH&u@{I4{U z5V{XTo07?;i65<}Iw$5lmbO}%8QGL-JP$#8B%_dR;Uklm8nf}fb1BkMz&dy)`XvQ? zbuh*gAcY(rn7=>Qpf%@)E$QT=l0}yp)!#YKjp?@7D59tPsg4h+w0qhA0J=PLo^7Y> zzrUo6yv8I^lhJdJ>u=6^B2?hRaHliEjM@w?KuJRbf`7wq=ssQ8sVUZay0&-z1YXz8 zX(VrbV|klGH)7siJqK&hx6CxOuE-R#DE!>1zUZC>VBW5(IchzsR5798GYoO+ZuJI= z0Fe+J^2BvMxx2Pcn0859HKH)s@~{|RgI>Z4uazn}N{GWB$a`khr$v4+9saL@IA1TGN5;2i} zyiFc6*6%#x=yL7e_85qAEU|_hn+y&w)0>ti6v2ILeEG0@TnRb5nRUh(cOC5v^V?pB zH}OlUP7U7A#j_VA9^^h>uRIT1V-C_D10x+QqmX?^rLU&*hUh>fSxZwiiKWklj0Ow! z=YTrm-cZ%gLzaj}PhDUfd@V{s6}$M^its7Ja(pbf90snYYPyDbJj}2cVym|V*$obunD}3^y)k6=i+JQ*s;2>T4x-_dv5P|yX4BWTJ7g#z zj1X)K^j#vjpucpX>`w_>varXF{WQi76vo zf;q7~&=2W@W2ooW{whVYj*>KI_lZG+r`JF2b^BU|IV5@uG~!20wSlQc7~4$E^X}pMa~6fuoE%+{#$)Swl=8cTB%2T zc2j5Mj;UObGi+6r@2Mm9V~sFfxI%3-(7X)Mu~sN?mF;fpK4Gt{Pi3Xi82pLAKO9;g zY@GGCroyZFn~U+0fxm*2cw9*<5q4%n(%D0{J0Nyp9(e%u7ca1xjuT6IbKd3B3ZaU* z>vpNdp1Va`=Tv%rC%yXHHsUd+27LC@&FvrKWl+k2<17II!65cHJ#D7+VOqH6rcZBD zFONNOWYYYvxU(seneZ@5gli&#lFJ5AuuZR*lgnhA^&B;fEL}t&1HZ%MNPcSza)7yLq((>332_ zl~J`eL{cK)2jEx`R!tP98BV2$>1=bKhlTUMS|$4umuRF#F|L#k7(9S<{{Y6urBAVe zppQ(Tp1fO`CXQgtU?FjZZ1nXu40F>6%4t~&9F_->`dm7ye@U-Um?fc!9XO6qn#kx+ zhsNnm&>2=KaIna@4cM!Cp9`$f!MtmcAK73q6nEn48+tpUJL~rQf#K_Ao{$yl!s}Zx z#E&SIGC_}~2Vy=pOe~OK#t~4i8(7XfMaHSp7!pAcAe?8Sk6TG4a#ATRnN`UI9{&IV za_YCBkJ*l-i4nE5X;i$k0CA_pST!4}K+y^sK*7c(A#tZZJ!~3n{+5_KRDK`1hAJcHXqs6QHzcn-EDZ5=gLRMl`)I*Ak!lYrhjI$u=!9bCE@?j>Z77$M3c5A{@y zY<+X-d0L9S6Q0lQ91?Mpk6W@zT7;%D{m9pmp-*BisdgD8f=a(}S+EHTPd=j_wmS6F zpV5ZaZy#|*QYqmPlNw<_H{JP)C7TsSbw=2%N zLmYiMIOs*!3+@|;_M>0Jj1mq9>FH~#!!0sX`@YpzyjntdA$kCPG2b>ttf`<~x=E*w zSm>&mBr=s{ay6A7nKN&ok9nl?Q`gjL9J}zzQR#1qUBD%bRHP`8`HG) zdyIrhGe(#MVL;Es^}60A^2AqHQC+w_D_2BaV|6N5Dy+k<1+A|6LY&VvRcDdqC8Jd* zoc10#x_7y!+oFsmOU2k)OvVXk{D@R%A@L*KD|yn)}E$G>T01`VN)4xAg&J{t?6Y{Q_-=NR;eJ!+L0^tPeXg3T?&KcO5$PIPT;*8jzAre`b^EY_XzEM*MZAi9 z!yE_k9d5dWtf!JVCUDv1WF1KL@w0i$E*Yx?5#L9p!D$1I1quy-xWs-P?}{uozMh*W}}Ja^6YxzrV{HPKd3 z)6~sE5yNXEJ^39xZY>o`G4`fUGjdr*bpHUo=PEu~+$okS#HbSc(T5rjAfD^%n~f|L zP}MB5yr)RX2iMaUrZANzukPfgmEcxc5@F7JjyV4Sjjm6Zsir_$R2gCjY;byG;%pi` zR#i0h2`Y3NJO2RX>w1@`p>{~tPzzv^eQcN-P?`;nmX=9{ODiscrG^M2qTO}PBp+^+ z5lE=YK>%_Ot^gc*TMEdkAv$5xJ1-=6`hGVW`KOwif~px3I)rj0Ps`t}jjbz)6jRhy zd+PB+Ng{x(>;_#AUV7Q_QmoW5MegZ|Gx&YObL-=GZZa(4Wtxs7BQLj@+sXrXVgc?i zFHv6mJ9`lS0NzA0!NzqDQR*>auQAq|St%&0$|QiocvMv3TOE9E2@I5ztScr_n@cZG zjo?#!tK2H$(wRl$kM@NN&7pnO8P<3NkKwh)uqj}_aiF8wWmZaP{K^P? za7pirPf@WZ=OUQ~ktQ%RfS~ojwkLm+bD!-bjsflYS(pxvsf@lY5?vNI!D0x zTCRpt{{Z@b8boopNbXM^w4(ZV=N&AKvuv!{y~=nPR4F>L{W0r)K(*0Fh~kjz$dy^R zIQ}0)>2*}rR7D#}OEHARsj4^gG>luG{NmDLY;K#iM2(2!F}yrV8A^gjRrMA{RJB#R zXmcS%%Q~K=m-5{owW+HmiV0Uhbo@tDInFr)#N8+Gq<9p_V*db@tnwGB#niN1fvtQM z@T`-{X??G#4*9TmSR#g$Hs33}(z4;T`SkV8=xVmgDJkTHN|Cid3e3YDy))MHHCshK z((!u|8D%4fFO=2SBc?3QSz^7yTIz?TiJ_I;CJs4V5PW#F(0}p_ zG``q)e-lwS;B?~C(frK=QV5}^bpAEfkcYzVsg?sz8&act%5^*TFa8sEC|RJAZ<~@< zRLCiPz%sc5BP+&!2IUn)?;BjnG|{BbtA^u`)6&{SyhB$>BqNX*l{+di-yz(XA4whq>uSS7W!%;HDHD|RN z1Z-uSpkIZHE9R+4Q7DUm-M%&&))kySzd7TMx#)Z>xGH0hDIioiCs5}({?<>*X`s1d zZNxh<5yc)qpT_bi(GAZ5gRM>3Xo{@A=#2g{Wda=foO5^BnT zd;D(G(1w<>6$ewq>NB6GJ+4HMVwFiy$FOO z?jSlm(qwXR*c)lLygQvpnxW*1Gr-frkJ9=R!TbuVV)SmhPO`k?oSzHEZFOW+v?7Gb zBa*)Bu}weP`g0^vN<>3a^d`ic+JqN+WFT{^1B)u8$`RGT@=!D)oU60_VB`-U8%`*N zMNBkl<y$ zgbw6=M$M;`!y3fv$4eSIig>0NjhHV?cQ$ZYs~g?+^hQ3M^t2=B!BEQ@p*il|EPA$< z92s697~m?NN%&aul#R$&E_2*{KMSqG$>wwoqm?GtBs~j_+>As*6YFqwsa78D9Q4!8 z*$W_t-qYsK7YD5N@<(%GcdXl@s`|%S5jD6#x&3WC+q2TDfb0Xd7@x{LZ&uO6Fx2WB zHY}7>HF8Fc2_a5A560diSU$+!^-Snw%9z9Zj(YUAPfbcO?g-{~Jh(Xknm}d02Oq;? zNlKLUPfZ8_5rNNPY4;tzYDj(0eU6`{0qlG+Xou0Fj-n^m_VB29`;an2bnkBU{{Y!* zVfdwd-y7#U40Y7n=a9M~JS%>4<0yxg zZATU#y;g@3%@U#DA6sWouLV4e%c96;#Y0AnB~miX zy@!R7Dn`z}+Y;z<1&Oo3kx1cAR0Ev%w+s?!oQ3w!M;+{Gwod3RVfUk5JUIIKv?))n zLLNs%Jr>(7ENhbU5Kr~CQPWT&gYR?W-phu438^XRk@FdHGstY&6>!N`LPB)l=E|mj zZK?x>(TuOIJ#0ZYvT}@k$BV|$dU_U=GE_i{dOsPuq=h!u)vE)r_}f~VrZEufr%xff z6M4F%p0EN7VDpQXw4Gtm?iDoY<%&NxeA>C^t&cwJ;)+P~s!d}cWSk#QOULdTe&HOr zhDkkn=N=vG-KZg$C9PjUYM3Nmu3oZ~)% z(r$GN6r~B}2p%?lTy>Dt(}tB)#zA#v$kIJLZ#DB`INVlv!6_gO`uqj#ujGnmj-I+W zrGb_*G2Hr_)KpT9&YALbG>eag;P$EKokN6QAmvY}y@t`bNb16=M0h-WF6OXvW^-@( zYOijoc^KgrI6V2kb?QtMuS$O7P6#|6-`3|o@&!>%r$y2F{?qL(HW5w~LBQ2j35 ziLrG*?Al5;#H$;ZAvqcSJT6fr)bgyF^%uqo^t?rFB#9^Ck8wnq$mz}ODeDqQL&^`~ z=>QJk*qxH7XHiQ_Sy0tUCS;9tNDE+R;o8$IlwNZWm6wJM`LXGnlzQWO(VS{3apCa0 z1XPsh9i!IGfIKsI4X}%`C0z|H-e-`XaQ4pw!sr`++e)va+=zWx8T~Iu7TB}O%cnvS zsBxSVWiH(<94uAU-H#!?J{Ianh*K0)>O_i?M-v{ai_q3nMk!2Q#oi{krGRZJ=!juD%-d~F`(xW~3k#aKMJ z<4y<%^tSTH9Zb=|k1MetgV=m+4)OBvrK(UIIet^0F#iBGvXV#x_U%=5N=Z~p3}G@c zrGJ%`Pf!I!vO)N+-IpKr#hXvJ#qE(*O{sw5KybhGEd9QXDPP>DKv(mV&IiWUCPgGG zqnmPur&i+}D+G$+RdKD4M&p&aQZ%wLbg1@|KOVh}jl9*0n$VHtqH@R6Jq4_zmXee1 z5#5y}?bPb<^s!hYPgY}v1ZWR&`uH+uQusv>Sr3jiOcU*T^9nf;UTF3a3>Qa8J^gq_=lWO+^RlJR%NFbgvN59qnG6e&lC65g& zvnsx01Dt#;gHufEz#e1IOm?ue^p<#i;~M+T6ZZVSC7*Ar@y6zZFQev?x;}XANBFO>ndidK_jk}>4AZC%3+GL z@Wx`mQU`}~X4XweV~tVxV}cv9i&T+?l;zd^#brbvQ9rxJ{`Q;Pc7!yKFJqo@Wyopb z#2OYw-M&B;0JF4Hb#;5t9VKdt-~e~f=4erxc#htK8B_whPOpKN|?->WQR z*8NyL&VG?`WR5PnuW&{hQ&ZG#Bt-xWd*JbHDPXPJ>cn%eu*1Hi zZ=KWb6%iAI$M!zA>+Y2@hE}MRf#_P)#}E!X9DLER#j%*QREgRCpo|D#i5Hwwj5Twc z80vG&GY=9?%mSkAAkiqlUXgW)8BSy1`irTJ#a~U8N(qRZl1~2sTf2NoH%PM;l=r`k zYI)QJl_Wd|0_S)81oj~Qx97Ev_VAVr!t2G9>5HSi&K5-u4aA(1Z{vPiFkECCvTZ26 zxi%897C=7iY&x$Z*@`=2=k*JRadyC(&Mi{tMJ?Rju^tGxO1d16ON#JsA)XHRDL$*U z6cd}RRJ&ScFQ;#9fi_#pp@Ha4f_%4PVfZ#TrVE-9PFMP0hyrp)J~laKPR8J5E!Ot5 zV7k~GK^)j2SmVOg0c<3!mFWdL9ICdVVD-DxA27woB-uql7vjbRqFV;V`;KnuMv-&_ z&C#?v6QCP}&#d#vxD$+c++-sciD>y917wedurR?;G9(!msZpD-vQbQH&la%DqSxEz z3zP-@abYOun`1#&G!l{+;|I!n8>*^Bnr%Ts=eQoXhNv4iGr+m~S;)EwVa4LKi6-G? zh4OQLDs{9b$Y?rlK^P0Sl1S-)S{9U^ZXt4R++V7vH$`bU;%99rrH`PyW(t~d`uH0+ zYnanaWKaq1f;b@ev1-&3&FaMbE-8cL3Jz_QD^98zv#46s3_0m%!z#<=D1Sk4Z40ZB zC6y{;LK8?v9wV)^qo#(b;teXs7=XpUDD^j;Nn}FN5rxkH+iGNu)q^k|neAbJK~Vny z=5LjgZ#2f1p=*SyPFI~L^yzsj6D>VXwp}itk-R-^tTj7@6-E>|WEqS;O0xQ>x|P~` zxhARwN#v31{?_>oQL>d^cv@<+?V(YEs4<*)-ja`OtleEDm`M~=-eE2eVa79Qc1iS6 zm=q(1Ja*KC`d+F^{q~1yYC#lf5$D}yCA6M##e&w5^`C9D6sD@KK_f`abme#ec>1;I zB8yjYX>wRx2F+4lsT<;o9X)=#O3x z<`=QkWSxSr^|BU{j*R9>42{^PI8%KGW}$hhBmik;BO18yBlPvV3|oAT zAetD<42E&fiOL_Pk6+U2UANn9B&=+_^|tdR8Bca;V@6*qvhp+An-xq97)2;}nRD)S)#+m9 z+0W~eQ3VAIG*P8nBP67q3chv6t&Z=r)5-WK$_6y&``n?Ss;h;XG*YN}cO-u+E|Y5a z8x2Za;PaD)@fOl-;&-CtiE3xgnBErUi}-&Jjn}zuNfQ_vAP@i(2ao(V?!F;S5=kUq zZ33x6KtESQ@xFL!B9n53Y8}0m&Rebt;M;O@)M=<5;!~z$nlRr#)i6-}RR#Hn!BbqO5 zlc*Yj_%Oa_7zWTBLJ3|JTCc;KI)NZ*9)`oIqM)FZ%C5z7ROtb@^%gODCHGh?stH&dMEMBKz|UJUGD1sI@8(Ql z4*>iJudSC3=LX=S84!ZLVbhz*4JIg{6~b1ho}5mgx0g68tFvRS4UazESuFC;R>+aj z#J~Z-a@~*lhikiYtEZ-QmN_3&VReJYe%9Fk0Q_Q&;f`G;N*T3xVccN$=f?UHU$D52 zT~oHU;ZN^|X;d6AZ>cQWAD3m46*~U_2pon#R~;{<{gpmaAqYnwQ;(&_1~NTw)P_2F zbUG@=c*q=ng691qdMyf#H?bTNw1Jm}bz{L%cP#aSt0<8exEMdd=mpieZDVo<7%E+3 zgy$o+F1^2PtD3h|a~&|oK@a(~j@a?_x%2+vQ_-~gBaPayDn?mEIAA#^xfX0@Dh*^v z3~T{yL=m6uY0QaPm70rN0NO;1bTJCU0P%uX+1lB`&YQY#6tx@lAZ#k#*X^Oq4Dvu#_sWL=?w`5ROO2cJvU!m8@VAw4QtmGx zXtH7%R~%|T!{d1AX#!KisZTs_5zJ`W^N!@%TVKqgYN1Ue5=2;qUn%_wH+zFjHkzH{ zkPU1TLl%twQ_{<=+^XuMq>hf5@C z)A?L-)^1u+Hr-tmFijV-Wrfb9lh2>_7Clu|%-W}lF`vque=BqLNbyb#%(@gf#z`&w zCe}=lq`_p3hyx*kBm#YJHb}OTo_QqqDa?liH@4>^^tW}JZ7$^t6j2noI`tlY81=gA z73gR~sR&($mtOi$hr<1ejtREu-`pZnKMnPAN%(cR!*MExNGK^|h8Sd7g6L`U#*@=e z@9Aez4W6kad9^pSg?7s`hCM>(kB#77i*P>NA9CPHoa)X;$H4Tp6zx~DRXB*e%b&y? zoS*QY_r8xZ)U&MALqk%iV_gC-C}smKr19hNvQFG5b(wVP$1KB-him7%eQxJ3k~28g z{{Tyj<5A7*tIy1hEM}W41SqOdjB9LnzLfARps3=^-o!rcW6K?1Lmxy2*-IrmQ8URN zsg&S=I{IFCZZ*!n_);oL$ zji-hdO$+Vv1&%D4vK6w$d;#Yd>g`fZ6qNGA?Z|nOWntt)>-((A#d$O!kyNjoF+Z5} z{Vuv^o)~8LTU$BvFgePL6&P1VuZ67%%iyWn*a;JR#(!+ zDtV;UCuC2jk6#;Uwr)~x6qPB=e#qqyC-fK2*(|;du8B0auqAfMl2YU!^JBH2Ag0?X zIndo=Vw)bQJRlv&#K=>PKT^e7=;`brn9u@+-=u>LUsjI)Hm08`0(krxN|r zR7)DNDbI4&yQO; z$rKxW;7sW>%v8tep7`|5s)ucrY1v;m#x2j%{`%UI%;_y8dVLPnI(TV52D;YfK_3Yn`)9xZ3K+EhHyQ7ENMPb_b6nk4CZtr$WL-@ z^i5L^u#qvwPDiJ%FUWU#q#Iuiz$9u5Kj|M?WI&ar@6;+6U11R z%>=ukXw{Y;J*|IH#~C)-vvhgb803%zLNG`4YaJFZZZ~kOXjuM+e(NnIVZj9mnfw=ssL+PNT>P<&Ja3xR+v* zLZ{!21WVgLUOjm3Za)Y$7-$>j2r72ymA=gpvr2G(v(3?Ro{sn`SUj=&#Na@)1; z7}V0K)2r(vIP2cXr=g{x{{Xg=N4y6t+=loYt;mUhiy^a&fq|#3-OeF2759Q?iXA`m za@vk~2iDBo=b?p7Mts>+9CM#fh2?5$06PXBhn%0~Vp>9D zc`7O7VH%@P?x4|)Dshad7M^IL6Dz{3qepUa`r`7^OJ5w!$Yyj# zIP3r)%I}Vqnso`JPkn@P2E=?3{{UU>Je4(1Njkbdae=wd2YMg`U-g${2uP#|Qi(^0Cx3Pok{2 zPX1wokM_6pRkTq?#sjH{;2-$hOmP&f9vF~<+JkBD*1#H(iHIDxa!()1{h`dm+c{5j z>Th35vU&Z7_LWXoyIJHiWSrBkLXrsXPpyL$MDDMaeJ7`fh1&77s7M+0zZJwujDEhC zLTHzsFg>&fS9U!vkG5rhb?1h7fp)sx>og6JEXRYLkdFFbeR#M>~@ zmO8Ml8VI!TLBJ>cR~m#!2(^2MDe79Kylw4tVt{7Ps1ZFdkYlS)WazEU=Do<=Jde3G&51pBw?sl zNwM6Iq*`^|+J<~O6DI+b^z}Bvo@(hq5!AIPJm^*9`Q0=-VTz&UoCz6$^B-SZGhG&u zDv=#qeb>FCju$sm!vLX?wU&1C>SNG!9u^H}+G>~?4xq;hy@~X<^e;nON|4D8ohzYO zAFKRLW8!BbXg4priAw>cI3A?`0JYrvni!fP2v$Ca`q>HcX6H&CNSZ0sgNTE6^dq-E z7mcfqzLA^Ujg+f-vyR?^>G>uKyv;Q%)Y1ouwSFkkp2Iglpc>A+28$sWdRhu9=_i}o zL?oPP>7@GJvZ4uTsf>ymQ;#w6xN(3u5Rfb|$0V7N`)59#v*Bk_`_fMg@erBzfelPr`9 z=dfZ){d@GiK2NZOj1mS%rJ-7y$jht4<@|(q$Bmo!3o4{cSs3;oyEgdX)ZutV)JBl) zVD8WgMuC|~56MtDTf6P&!S)*u{6yy$T%UI;kGPmR$C2Cno1|v62vx^9m)jtW1wOaq z1Qh(YDl^}k8TQnu_limF#WdIVxwWyPAijk61&J^;=-@WPSjAE8aK+vQDh>W{JwKhV zN@w=VOXR`pj^3o)rdfA5)7pP*AONVm-s4TVsbyA`;!-&sv0(ZOi)_{Z0Q@jI>Ke-O zMl!5!*(zhK%6-0+A4$`WJPqgPfy7Y0e4GN_hsMfTYF1@}7Gl4ft}mb`92^IigjkR& z_F}m|&g_^plv4Y=VDZpf7K`oaVs<>XGpp3t8;wW(xQ<;egl7nOJ{(*yRagVI%KqUA z*eT7MTLoAGU=->D8oe!b9a^gmKt-#{qxW0A)~Xn2v}Qvg>~ZBEsk!7Svpu*@)Y>0H zg2PMsb74}emG>fI7(P#cH|0eYbx>V7D0-8d_MhIR4H`PXJdyoH?$}SGM}|3CSsmG> z(}i61{wrS8%QL{PKsoN`7CGFe6-reC;Ed|yKMQeH9Ho{=Rp4q|_0NUZXo?bUG!sE1 zRY3YtNCih@@UmjAjeVfRyFb(iWw9$_sM0;@zP?dF;{&DNT0+s&x~hWR$Q+wUl*bU% z)6>OJ(m9>NJ^nRa{S}g)YVi=M=4MWka^YHHe7GHRZ1PFRNyL~3u#ZXysGV5$u3Wn zhUXWgwIcdj#->v>M*wsZ4Ok z3aT){91;hGo^UcoiCv>t^C%3Ofjv0l^0a${31|1_EshW$?`{P&_4MkjS}75Hz#pr^ z^Rd)DB|}9tA}oGw8jJYd;KOODTBlrzA;CUvJ#C#m1vDhdP)?j+1Ho_B{iUytPi#aq zbNIYtBR-!CZ&$igpa9Ytm;AgB=j&?>XJ>kNV`-L0Qk+MpC9&gas<#6(`=(5?AUG$_ z$A!FkAgNba(?C9>oLcIgwuVWfnH9a&TrkI!{N$-(HtiVi<~hZ#g9)*BiK#Ce5VYq3uirtOB?8l?03D=?Q}HY zQ>w|Voh&=&t>o&dsbhwh-Hmc&4cDKEx4Wf$OsPyF&{K^^kLB^TRd7q}0pBC9B!0FU z2ZSmsB%Pt7jYLp~)Jl%$rcb5cQ6#DebEqCYO`)NdYRQtBDpT;!;tY9*KEAi4l9o1Y zFwWjj^-HG<;oqBbVN(~F`%waT$pg?_m)cDw7fY`M_}-Q(ih3QoSm~7|sC?#KK4G5V zdJjv=&0nbmMlCPMFN`149d4MKMZb9>i6UkMk(29-wvMzQMhZO?cmDvRXt<}>l3AP( z3FPx+?ec_;VsV0;t@A#a6NBpdS>juK~Rg6c&a>^~pR`E!%c^1Dfz1?Nskz;)^@QAMUvIwHC-{wy4S zTRvHgv8rj(szJD9rg;q;RVj)+5b>#*kC>keTQw=AC*E&x^uf;-)}dOYt40`MK=Sxn zspF9jdX9$Sc50nHqLF2oMf59oC#93^qE$R`78*S=bK{YJZJTTrA*Pyjh;yEK_9^LvjNkJQV0;EHkkZlltSqD$7BT7vIJJzl)j^Sq3~~tSZk?j0x#N1N zhF*5ajE(^K9@co8Idz6mu002D8`RB7qEU}#HAag_=;Y*|8!A?)nFMjRRQhs1TdG*a zM4$%Bg&E1mOQIBxDHyd>kw!a!r2c~Wb4MRbz0*bIlzX}SxySRnXT;EI5iDI!la4G` ziaDg$46QQyvXIslMM#{iLo4P0i|7sT!*1I&6tS5-=o=jsxxBBv5)UHt@UfXNJKcBa zgyazOvv<04n~nW=+Du~Sz`AeT6L`!aSl`-nekluga91Ms6NNxx{C2tB4sBpbE5CbR z*X5B6xk{e*ZWO7(u`Wu*Z)ndsUM}@>i=~@Y`ljhIU_m(-U$jpm{T$z{bP!P26sEIT)AjVS@m!CTyqN$xvcagwchnEH3uBOf2_ zbzGQoZ{RN&l9PUyBel$6U!K`>#iJGEw|g;gs%}Ct7|F$lBV_nqF#<~)jr)IK+A;2; zab#nQq%JP;$hU$$kDHlOV&ulwSG>0*(Md;{!l=9c&_+h47DREe8B2(0Vpmx*&C|m1 zAF>iGSQtI5M^JlRK%Cv6k6WOLA0(dNOC^|0GP(Fz*aq!@s<&U#@tFk7Y%&i%7C}YM zZpyh}AOVnlZV4nr4NZ%TV45pgL404X!pbzX)d`p-%!8c`!NsmEhSJ;-dk2iWBN)F$ z_%VLy80Ck;=v|)VTn)%*9d0cmlgBoVr+{;F?S3dN+T^hu@^2Z?RV2{U$*w@nfN%#> zbgaS8oN!6V@UtV^<*C6>qFPXfR!)2^%_>$9KHQ)Of=)a5+%#PFJA-yu^OW~YUxo{P zVSNpaSN{NY__3$9r<0yBYZ8*CI>8-A6{O@9?0;K7@fF+MQbc{xdouo)0Tc$(($7Ot z6rr*h2Glth)VDvIVB5ad6o_sSb`HcmcmNUXDG!t8Ah#Rh>iW$5CXLhfH{55Yp_5iK?OGT!H05$Lot_ z7SX0qV4c}Jejwlv0Bq_Q=9z*ljK)u(_W=4`Q^{3HQzUc9K~siY06$Bv6k67?0C;3|U-DrgV(m+xDT=AQW z^eG_cnpT%VR&c%aWDb06*r9mNw5N|h9GlL)eM?9}lw%`~ zo(B0A<3TRrD3RoYF1-&2YH?N#|Zit}kUxv&%l%X{$_1p5c{h0n&APXY#H%0@BWQU5KL6!5v)EWnOmYPx!Ia z4!6SAzK(m10*+d|D@de9*z@V*9yqruAzF@S0i6f853YE~>0`ZhS_s+?W@wQKm01V3 zgN*25+qYw6ZS&CXhDR~S9KZ!#2bEt{vad2{$>_4B>UR32GoN%1`D3rg7REi;z`|$S zz=s$vr;(rH9+q$0(nBcIs%P()^B;|rVv0bo?R$z3;zgVTi*8a4#ULk_YN@Bpkpi0d zTyPt|aoX6u1FqF1`+v8G#;~U>2kF}DrKH;Bd6t~X15_{KMC+*)ho`3w2SDBd=Ja(~TT~%AAf3+qMx=>{KgDP|}w^!!}kuKS(xATa*;C%^K9h6kr`H;AP45 zv{Kf{l|q<2jWZ1B##94f%eIx6ead;{9#fD&IqrQib;Z!%V>zj|@gU1HR8;m;=z5O} zEn|w1h`pg9PvhW!^sh5m)%8^)?NG(+CXaNlP&pvkdwAX^j4ZW@GQL4Vpf|u9--`h7 zZJG+&lN^6_QSIu$Sd|7pL6OfFO*ZXGAPkgGJirZXs0X%Bsm5)5RC}dS2)@gR$r>I0 zl0H737o(0!kGELqmXT^X_W%mH3&HK*<8b6B96%_n^~)ni<|RmvI-4FJzV6%=VykM3 zvWS3c5w94 zT8(nV`ojAbt&FboC;IE@~jg#FfJi`YKj<=kWcp#!^mJe=s7)8c)d@;!J zuy*<#-jTiH2pGw|gNcRKBqMfb7d0&)fq z>TjbB>b6M|lSGP=A2Du=VQ-Mp)IegLKMWruoEGD`0?jd}T&)+sjiYzzf4WrKOc>>6VJ5>!^swhVlvWpNX)ea}@QnMj)0&nilf5kOSl4<91V~NGaycHN)`+gAdg^E-id0E@3Ay0@ zmZpw<({Yt)B4BiVL~+NB?5Js}>vQiLP_D#bJ}3R}Jr>+qkrm`JN!+7kjC$Ux zN|tJ7r>)Aikb$wKum^vMyyXp`1$r1~mD|bfExFB zt;nm8bzk1vg8~w&yatIK9>qXv^AjJBA0QUa~v@J zte(;K^agfb!1c7WRWQcT!s%ww-g8>CYdQb`cLCH zl zBwN`H6p>3Yi6fssOM}jP9~(p>nr2zOsM%p}qzq@n;bp_2c51e`=2obyqeb$lNF#0m zKbe0{mt6ZSH5D;SQ6Y{}y5@xrK=m0Nu86l)mMEk4WrjtY+-)Nf=lzd+*21Y*6!mKv z_g{kQJghuzGgrY?3VRI88@or6S0^Vn&KfaEMXj{Fvq*cMBU9_+YG~G;1ZgLEG$Ujx za_7SqRUYR>TQt(*RKl3jDF!p1#OA>C1D#+Sj8jwUFqfU#H0kWW&i$mJBAPXq4E#2{ z}h`dY(9jqDk`l^|ma*ESr5-+}6ke(7H;V zhdss0jMO%w&?$;MY6m*G>ThlwyoAf5G?U81jtUMB9=BB}T6J?QI%1paamfQb5F^WIp7ogIK90>&_>ZpzIE@P4~3$r6jN0+ zrdCBGvZl9R-(^-@&=pk`Q~*WpI!69s*ZSI{6QiN`nba`JKbOYeQ&h=SP+<4H$jH+k zSCBkm5S+}^L=e8gAE&h3NxgL~QPrQ6{x3n5^n zfKMmF>!vVN(xqKYsHRr)B9r|^#}qCiD*B(aQBLxgl#!PMjCJR=mjx76sqRN2`%8fO zjydW8>vmLA&@;R_B%C&`UD$SEt6~QM)?C{??5k9OUz3uS^8!ZB17N zBoX_tvmS+S%9@Y7b1K83NDI$?I`+J6F8o$KGDOo1@cx3?y??xcl)84&>H7Zwy^Hhq zX|dX4=|<$7Y3qJ_3+4^bkz%S&BWZg`SIWvd`rSgMAOTAP#AUUfbJqH%wFTW6r54)E zJ=noG|V@zSQ>wYUPeAyoYZE1pCe40e_M1L~_#yl-ecGE^; zZE{N^N!^Jg+BeB30C}uI4Oq-^$O)6U1ZV!eZmDCNZ{^oX>cHmOWoUG09d&{L z9miX!$pZdUl1C?jbU|K3p4deya?K~{ag=`0NaNR|@iOPpZMqqCO*&LyAFH@tjbl$$ zO%iC3NR9yj1@iv@dt|ii)8q1>`m@kn>OxH6TiYps4j0UST;5%k^vUh}i~Hvbk?HC2 zy=^S3R}p6#LYx)#uqx@OU_#JG?Y2Il*8SV+qgrhZATQ7l-)|7GW~&iW9uUJh0FK@^ zC47|76_x@KmjJK(UzJ;g@kNYscO}t+XQuwI^>BnmB()zz9>p@njNmXj38pnM`51Xv>T(tO}HRFQ6UY$=uPQp zDk&!0h>{rN_T1&WSn|P1xcw|2PhS16VhTpLa@pVtH7v}({aDZc07}%$Ur)Hs&+fV> zv_RFs1az^g5(-M3OCB731%J~c$Hv*E9VBqeQ>R<7Cr9Mx&|O|(6f36OAX;#;DVq*e zMl--X5N|_COw+<$8nL(g;7%h8YPT?daC~bwbMya14r&1Pm|nZ0G^gxYmL68(nfg}*@O~D zHs2wYA0*1_>nmP1#9}@$K|ETm+i90@iak{VBBLi!Bez?G)4}%X5h7Pb z(pUq|JZyJZL!^%;jP6d-O2R!O3yuLl8?v5`bxlmpLV`KT&!#!X`uuM8+S!E&DCj@>{dE{H#<3}u6aiE^L z=D?&Bw8zMf!MHlPUybMMq(6bHS4%|*FA7VZOE3c&J+o>}ELAf#QpfG;`VewHH!8s# zgWC7w0Je0n{+wNs&mAlt+eo3v0381Sy~6Ao-3OYIvPol!qa~Y^03P1A9nK$R7A5+) z4UYM<;oY|oLo-Hjx>bfU7W133&r?#9B!x&LP|pXOG7f^NU@DG|3y`GgCm+|P{kKql z(kQR^F%~`f14-@9Ernu!0?8W$l&%Hc5VUu+*irq8SYP5)jS0t`+ zp1!uOfoddnG52Ck>lfuBF`w=Um z&AxULh^1V5AKi3IPb5;v<*|}afB0P+j@?m5CSbXIR5qt`U`sv zhI*DyVbx<|IB~`-c_~1via31o{Z4utO}o`e0|Tv?X$_IU=F+D2=^=P!D&%0ZbAjP! z!rN@;h4XN`y_4!LK_gG7pVIvUrwRbg+9EeCagU@Q*5YMj+VJqYI8^tz%@l}xap8AS z!1XeB;dyU`&Z=mHpMpsMbDvvd7wBVm#tp~r8_-;Y?0YD^gqG>Lj2gQUcMV6s7cee} z&?bnW$-1oRDcJUIEH*;7gQw98`u@MIV9&4X`u?_JT8;Uf-WEpn1Fzu{jSpS zLZlA%Jhf$k0>LDkMl6z2=IXz$Kn}Nzd4B4@O586AmnhY9OsV7>tzQ>EcG$4CSiu<< zAkJ>(W@B@tP0{zjGi}yn* z!30|`QQ&Pia=G-n#o49%3ue?nsbhAnKC|g;quOeXKiqd;O9)AUd+E9=W>_ThWD(RJ zZmA^_62*Bp?5mTTr01x-))=}u5O)`Muvq8!*h21ESX_e$_Fb4Ff=TQJkdX+EmKb5s z*zna$45=!fP~?7=;;vC%%T%}y!w!IT^|Vd=JfNa#$@B~&_bht=KYg*8HvN9gah#~Z zlReL^*GCljc=Q$b2i52;zwC7O)m7Fp{{S*s4gm47a_r=E^ac&NDt7IEdF{5czQ^%% z>*IOt(vh~w)-u}o3_sZ08rdj)=;5K2q`~^zBPY}0=x&;gqOxfMw231To*OtGm%fBZ zQ=rr?K(~KwG}L)92*eAWU0%jC&qWa|r_(nfSy3!S1D-}SP zM~_Y{)uX7OWO}-37AEZ8x$SGKGVDqz$4i_gJY&tHaac>OL}GPiz}LwKq0fFSN15sB z38y+&pw2QIKjyTu?XGl7jIU#!*RR+po{3s>k*#ox(>@~GNbU2MTSZ#SSN8gJs3$+> z@0;8uQK+U-G`ZGvroP$gFE#e@2?b1`iF2JQG7d$$riH6$UX_4THeMI%jJ)J}8|JFR zu4FImB&Qmwonv9k3~EOI0Bc!UO#6Xmsx0yz(ywoU@U`&N?i+9VvLoQ(l#J_9#&1Tx z}Az@16-x!nl*G3FYYIdG(qIq#51t%D(>ub14*iI+q9(eO*KeaiOB~Vc<64DL@PO^e$^Bd@J$Sj z_YMA}UW$5w-sMZNRZ7vjY7w8Tw;29+vRW!ys=ZX2s%H9!9r64l!pfCukwB7})mebg zPaOJPbQxZm$RlXvb`=t;C(;yff8lRwSk$waz3u=}5b z4whVLEW<70ENvOu8?6N931XQZMQ+EopXbHE?G$@~NFP?HM+c5^(t*(~~>|$5o9a$e7^sp)@C#1ti zBmj@fJk4>B-K>@1aN3oy82(e}ZK}Z=MJlnf0yT0osEt3)^HqCAB?OI-G!x})=ZkY$K~ObE88XIsRz2`X zUmu07VIst0pGzum8IKP<^~=Mp0v&U;&83QGVIk7%1MH8Vst`H`5B zlkhfFw3|I0O2!Yc%rclHohPq*X-Bqu^sv?hl1@ov?sfE1JuPV1Bx>)wn5rFl_TTcZ zdw;^?LQ5DG*ceAnj>36Vs>zN6dbg#GQv{UAiJ*J>aAfGq(2Jd08Jc8YZG=Zs=}pyE_GSr@)Kc-{Ma5l)g1afwf)w|!oM-oXtyt7MQi zhD}2pUhI)1pm-#jc+M?*Z=BBxFX4HFIsyQ=nQbE+b#fxuFG-keJi z90h+I3l6qe5(yYMR?>9*Zmpr|j*2l$C`n~g(e$=-j}kiBkVQ`<`XYT&1F2kR1wVxN z+FBZzy@?TunI!W&9A}Qg{i3dFiBV!20dl_R<3au_4@)y)-erbHQ&bibNb{WIJ%RAN z&^trAO6slScM6Bw5xLI;gwG&m^c$UheHBiYS9)X^g|G&E2^^o+^YluBe8U9s;B_M zKs~?y(yI{r(!`wbdy{19nny*FIL5D}by5B=OB_}~Or#{OGrtF%cllmPCY7YDV1zf*iIm9Zyl$7S<=E+$q$SDq_Tnt=t}#hwZSf zB!QR_&OC1P+6*-7R-WXKXxE(nzP2nj3b!?>f{H`-^N%)50p%W^ zZnrI#wq44U{p{kMBjIzyIPlL~vUz8Dw8uQsQ?Gbf{aN(zuxF(^opR}?k^vHoM#IX) zb9-^c2yKuPlIqmVd?pvijT#-jv6GFhQw^OekITtOw$TbP#0Cv{L z00GkKtZF5Z2n(UOcISe7TvIdX_MVoSrmAJ9s`n|>>ItU>xb?p!+ka^`R#X8zrFwzZ z#G~75(q=@dfYZK9X(QZ>bjCTl>gydOSn~1Q&v<+h=uYMe7dywc;7Ir-{!1uYj3BTCEzoY$vznAWX-rTB1ckQX)rRR z^K)9n=S`;Fxb4%@Fha>A0t+8g;cKX4o;R7!x`l`74lratuR(e3uA(h%9O%(Rr&L(w znnTk#^|EE$cWOF@SxTJYfa`klcWLL z+>?>$M}X^eO}s8w3oSJA@XgpaO#CgBazzW%)XwcuF0d88po6X6V0mY)T}X`tHATIi zKH-#d5Ae63^}T!_Fa(LH>BMhNxDhsaF_E9w7LvKPIas>9lQi7P08C}E>yFv9B8%G@ zguHn?D<0#}dR-L3_gS|_oX_0j5 zv}-h*gz%wORU}{^$~-Kp`URn^j#kc<0IMFPTyQrLcIopqWKA>%1=6pAKQQOLyNh$H zuZ>*DRaP;G&j8?h_WdtD%?$NVB#fj=eB5UqHyW60BalRg5w<@qyl*ors z$wE&9^zCFR5v7hY_#q1s=415_(Ah~{Ei|Pm1AL^)Sp&vP|Bq>6VyA;BOGUr$i50jGjK9Z8Y2(%$?~6Dx+-O!1zEkr z8iU!eThO)1Ackc~<7q;~bBtY>yif@v_h)jM?4EI7=~bZ)1D2y%`Uj40I4UDo(6q=Zyg2RNS z4K#GiAC-QSj&c1j>fOR>P%!~sx|n4^^|2<~tLhpm$esrzY8f~H8=uA5N->(BzN_}t zb!lHg-t%>mJhQT_4*rZm;^ZppbO9`C#aotB$9ru$*0|1?;{XGkj(!)@3lL<0k{@;e zJ+E!Ye;ci7rEu`*!;j28J~wRD&m2mT$L9RZdW-f7*w&ouS$*bzucn{rcf}IMkikhq zOa#?LOh>L%1O07$j2#4UOs;{0zB`lacgC?q46{b^EQ5@5ro&B+Rx(ddDk8rg-{u15 zr3;RZq?(nGXHu|H)Zm{Bx|V4j1Ld^#(!R=M+(Z0MBh+w{74} z3MFbL%w)EnIVTzP=Qe60rgitnBhcE)T+IIfgfy;l-|)L?-Y8;si0fc*&7MK=x!pFG zdp_KN$XKg%2GehnNfiu+GVGm#biwIuA*eFLOf#r~$+ZN{KF zC)tIl+$4oyR*8dqcq%eDJ(}9k?g_U??nsPUPx)oI0QK$O;C6bw#=Y8OjA20^PaS~p zx1X@$qm(sHsj$CD<#BKEAWl09+V{#fa=yrnG0PuQ`ir)$h?)Jk+4I-~$0Em~+o5KZ z`$jZ>pq}gcUxVv0G?>zGTd~E%k?|4cR7ZwDP_}z?&9kDOSt*h^b$f8?3Rw3)uS*Je z-DH0-k#pre&M)6}>SRb5dw3%Nb+NPpq-HC^)95k)p!raJSM+RssTjTSF4}PpASn{^9?lV{+yy^9BCdOyxBOQpcKyOPs!3Th9~{)wA8{Rht^Ka zPZpj@WfZaNj1Qy~$4h64>#6lR>s0zc;~p0o*=TR9tQrmh35`JKBjbMRjVP7YoI5WW z$^Le}C7mKv5QSWRW82|i)XP0Lm)_IHbOFO%k4Ev;d+1T=fCs)$Z-v~n)T{ZG53Z4U zT0r$iNgbULMs;(H`de7T2z3|M2VYVC_Rz!SXo=Ef3_26m(rz1ivP#NzYf=?@4~4Xb zsv-dt+WbwI3M9{xx22zhZiV{UtZsxEQJXZ?D9mb=-gl8IU; zI-)w2y7AKd_bc}$KXjV&#-YKskWMIKpw?2TjwOO9+cBK;H;K*FTLZKM+l&kkK1Orn zW7Ndsp_X)=5<4j0C7_c$m~DV`XlZzdJ8&Q&B|YLHPRD3@!sy*spOtkYPOFkr`zWm zeggBI3@)gaVJR=7HXLK8^0pMacIh-PGU1g5LXdpkBE+hJ;gy<0#&V2#C(zy0+os?< zNiLp&0Kpcg%-sPR&AvtnH{jI<EW_RZ-KO%g%i%+^f1Wpon)}V zJ$Udp?sq9;RSE3L+s4oM(g?iMHPn<8anhM8l+&rtvU`)@N2iUdi6VE9H~|3RzkV)h zUi8$;m1o{%8U%0g>19Pjn1qpnLyVO_lpcfD)0`rg5iLARTPeig~R z`;tQ%2%|3{KTdRmZK;egIMmEsuh0*c>7EvqlA|8bR?-)?qhW^0OR&gjs`M>H&>)14 zRa?xb#_5`RT6rTm!OlXA_CCDa#(Q5}Aku>BWO;!r zAWjPb*BHD@qX+zF1qtBww-HbFt0Kr5&XT4>`BuG8bHG=AjIsR%lW zwwrZFw6fwoP1K%1_#38Li3@A?N)PXQFdiQ6JBPbX?6hjyMv!sO>vwItZ5&an$cn|e z!RE=^g*{MVBH@Qt9sU>O(j*xXoaykkCUI-}<&P15V*LZC^5iV4Y6_~h3lwbR`U884 z50gzaUvs#!#JI>pKt36~JTp{j?9#~D($B_v;>4%gs%mmkNnWjvKT~}pYT9Am42+^T z5jK?6`WFMY#_IW|mvf<`REc6rb>N7xl6_ge7l(&3sHgTArl7=k#nQPK@qGUKj;o|> z3-^amP5tkuOx+4|aVc%0aJi}uIJ$L{-r#t@trmTYmzMm&7aPVtzpv6Ue*v4rJio5P z6R&Z9BuxFTMX0X+zeph9-!{LU8q#>-_Gj4uNWzm$Io@YV$%AB!Z~_3Z|QP)*$%Yr zc*SoPp4V9Kcj{;DezN1ep&lv6IJrcH`8QX%0L9xAjQ2N;PM$M0NRL)BahoS`+vKN^ z0DZ;AaynerH4?0YrL+~&Q%a`_>VGQ?-10{~-xB?|YoOfdj8RP-Rj4w$zvh32!IqWj zq4pOjQ%3J3F-Rs{YQu0F z9XYVo50e{hBi^73+!b#3klX26uDxm1bD1!H8Rq&@q)4$eNu)@pQu-1ue=O~A(?>01 zs1iu-J_-*{OKaB&{ET-W%Krc-h|}(U)Q}(zq@Gv~_sYI&r`hQ$mOZtZlxN%xo)1iB z_PgKAbhM--ErBHU%mc7EK2-GHMEc zF0RCjswYw2?UE$`IAMe1&E`v5YE5ljHx&a;7b!A%mB&oja_(;E{{V1(=-iMHa2LnM z(4}QcueK#-kN98AKTk_JCkskoc=c-JH>LBm^yYDlUjG1Yjrc%}9u7_24Qs}7WM%Yh zl12C;Ks?-hZix|d(2F7rk*nGEM){H27;Du|HHhjMCP?TBBb$3g@;2S$0{YAH;p1MVNe)>qGAd(B8F+U$$wq}lzPq|4`OIJrc6!nrs zFc`4P^wp0+;e7I<8L4H^%Y)&_Bja?^(>!^R3o$$#F$CFl)l*ambwsWa#~PO zmK}i{-DrO+!POmo11rZYY`>Tic*yB*_Su@Yt~HAb0- z6`W(sB6T?aD;s;*l8&h)qzoc)@1Z@1_ zrKVprFou09c=rM0i@m>U)T=}6FaVq!cO5ffs%RYgwqdD^@YH*O{?&j!-hhAlIq|Y* zP1|%55nKLbCmA;4o_Oh8$0SP9=KxB$1fGL79NR4P)dr+8#^=mXK2-jlt`#tg*@^b* zD)p%L>J|gR`kT1NOz~_t8V$ZrWof1nRF3)^=|82rcw(-IR3Zjw#+f4QN$|1%0Asxp z)733H>UM0Y{G&(l3sui4^JEB&0sw#;R;HK>Z`r(V$zzX4eB`lX6mS!;ayN7 zr`$=Lp2T{aWj!2JbVgD&8J$5sfb=%IcqG$6`a&5001Z%eUWem$;No=jTLpNms3Nb< zv78kgTMF80s`x`is-6mF8CCxPoW8#SdF_Ews}RYnQ?bgCe~XP)cR;nDge3 zJu%Y8+bYoMsZ|JbfsRHG@pxS0K_>9u!MIaH3Z!TpmCKeNNyp{9QE6k6Ou_dtO6SXr z@_j!`J9VpiQk*G5gt1=4ka!wiEL& zsQ&=U<@#tE>_-urhfKMboBV$G3hSV2uk4jGtcuj`V+TK%@rJPA^D#G@E7^B%9^Hl%41+T=3FEnm2gAO#gk=U1S*+^g$)!adlAsFeP5>6=MW zwbZM}ByyO5 z>)~b{)np9QL|s_$Nx?1ZJ#RYOGW$`_6-j1C9`GlCGt<*H+Py&wJ?d3uHEB`MS5A_B**RYI`_dJt%*xtQWWc^GV)7#wCCaB zb<*s#@>NZU3h3nv_Gan*Zn~NZsj5U;ib~zWoF)bI%jmGWjp$ND{{Z&WqcWX2CrXps zu(cHP)>q366SkbLP>kcBT;9T$Ot4E&QxeMYC;|LH>FZ-qMFL7gm3HpDU?B9*t?H+w z)58tDQ5qd_MJB28r&8y&x7ukjw7HZRS=jm5RT^O%`>3W4s#v<-}cwcHwdJh9x<2~>% zS5XyI(bLW5X(mDLpnoclQax@pbONHjFhPnnRndXuk2V4pwHUlSZt#)t*OX=Zqiea;Rpljah_odwd-IXZ*KR9@2vlke5`C_ZZ#f%rLt_( zpMO|j4rBxY$H%>t!7(bd{{U`d3>g5=k6Wc_XQY-w^J-MjwHM?c9~))&dv~+qbPGRFh~8j5JaGRWNffc+p2ac*h% zaInU~;+8@61Ht}wJx<#;=6LJZk za}5hLtM3*(1U#tp$6t+vHyM48bjA@+3bLs9WY|p|WlW+-)+NUds&S8v-1}=jec9jq znev0-VyV_A*;CCOH8aaBU2e0vFz^sA(ot3f4 z{d(WE8>K|v{3PmP0IK_hj!u!+9c}*6y2QGE%S**( zxnMta^r+>hjg}2!Bq~*lU^!b3n`@!>9k%(F7E;Gi{Qm$0dzv3JH$e&6XZH+D(o4## z9#i?*6Q<)UyuWLqNi2I7Hr{w6mO=Lp5^u}qAJ6r=8fS{9N8A}oubeL{k3w#x=t->= z!}iq;9F-F?NsT_-W4Fh~+6a=V!eq-5a&;d!9u_4g+nMAiNqI8So(`W21#4?*>Lp0x zX&rN^x_>JoFGIbZmREmi*g49gu)qvGF6vj82p*p@4 zvBlXx0cupz%kHUU>QlqVH|+VTqGv(%Dah@MUk>91V*dc#2_kpetleBloVB|Iy|kY zE?K=|#funuXdXOxmyYVr+UgFWpq++l`7sZ6M>-fb_L0rc>@B$B0M z_hrf?V~FvLS(|J-Z9{vfL_Fo0yRknWwlyNaCIYa5?iGwT`fPgo*{MksLrIlNj&uXx z!u+nKr;CUpc>rwc&zZbF7hKUaVI09EI{RzucI08gq8fxq(gKKwwoX|802X`Pr1qu- zG)k-weME~@C0VCYnGha|exvZQ4)Hu=%TCxmPv-bon!+%977^c9nKkL}3MbLns6tPfA-URT!I&}?Lr=!M`ueM5#l*T7hD!!Hh#RJ?(8g zODtwea*oPyP@m^ztWPu!u2D$`I(uUC=*(ZbMJC}^WoP#_j_dTUI$WTEU{yytxxXnS zG0@1LO9#@q^z|1R<`J@-1Asm`vC{^|;MBr$nh`pm`ZgBlOdk?Ot1JdW=LGdOwzjfr zxUmPEz5q|*BdxrmsgOjdMv{w$C)U*c7U>FYFw>|E#&f_{9k0btxhw;aRN?XcMU7O_ zHAY|YhlhOJsASS6&kW2>n4#$~M;HpEU|^o4`q~P*ii%Wul1#1$8iu2ug6|S0bM7;r zPBCN7$j-j{5S;+(dR*y`Ag!86pm*axR~WIUcw#Q?oHe*-rYaYx?5Ci*W{x#oMFS3l zy|g_P2%}-GOKBJ+cvw?ZM^jKrojN?IeQ!-0!XkXAQI2^2mzh|h6$75_kUVZ}1Ed=q z^(gF+A%$T5LjV(M{ zM+>Nv<8d;@E`*C9f(vHGdc|)sI8$*cGrDOih(>ju`1oH!(1CNNnA%cx4&AxMi%Gd9 zF?7~3Cm8+{>we8tApkJf7BEk*jpC=N(=#$FV_ruGwX}?$8yeE}6#$uuR09NX7!Of) ztk5fgAl6$LJ@IK`s+L%p+C4cK$6ISh14f|9ENr;WKQQq4+_Md3XykRLjv?)ET#wD+ zVmv}NH2(lkxx$YNF&ZGDqbJMD0xf-gMjo3-I-fY}j=cB0SaegtN~;N7IPTrLbQinK9+Oc2^@TVE;Plmt9uf%kP32n!2{u8ealhPqsF?& zbLu`eV5f@*?&c6{AC%-Djjr51$lzLKk`_n*08N1!o8xs%8>EUPucVM`c-+W2;C8qz zO*E%hu_tc$uvMw(;+15O8ksNxk1*5z9D3N|u2;lxlOB=IulBG%q&~vN=}dJ(43Qp< z+gHNwqk>7qtt8CKs!9etWLgE=KKi6$tr|@IBOUKx+f{uuAW5ZAa!Be2jkJ+347F#y z1E;ji@;Z~C52y9DsRcffC0$!2q~t_786LdfOOjezh*4sRV<7^WFf|`i4dzqi_~flx zh?rI2>LB|1V)Z;Bw1IZ`&0{pePC(KR)8!V{98>BtDU_Z#^BZJkriipgH4WQ9z`^}3 zh^3|W(8dbLpywW7PmPXjpUZ%>+8=C~CL?egPOd!Orqv}aY)}arr5VrUPS%@tNhZ|7 zRv2t)&I=y^J#N~0{?0W~CaD>CR{XykzF1oH(ak*TQsOxy%X;*-@z+OA8aAaYxW*K5 zX{lLbhu>@#J-|HN)9)H!$0-x@lj-SYRiP6TRwObz0t|UM0Ce!bO*;y@Rvdx?gT2` zN@hu2BGZ7!jYGY$o`R4@l8i{+2m9Snkdlc&!5>C@1AP0~I%KBWqJS1?bT4ArBR6#x z-!$&uZyAm?95+@zgVxBZ-72Qh@5X{OInp}WzkD}RqMPuZF@V2M`(2trbTu{v+F~u( z6Wn^*y~>x`{6t~vsyELcrM8o0t(2^@Xn4B@EuBaBM~%~6EIWKUVx~nTfLxt_5CJ@0 zsHgf1hJEVZsx2O8D}j?{(LpgN72LS`P$?>YJYwS0$01*B$jH>i6M%g?V%ZJmt&w4h zKWHzPSV5EiB5!vjG{|l;ZWR+DjxTCx$kdr3WFCZHu|;1T>r+sP50?szPu1&_`>uZF zN(TP`x>*Acsd-`GJuFm~qDqC0^694v(smvr{q1uIy)g>60j2%ZBFQVa(wRC>!rkrF z@=FOxq>vRFa8p42G1BQ&sG-)zBrBiw2nUnt>1-rPWr0}FA3W&`&%m3V-`N#SnaNbW zLB?4F9Ymhr4A~PsBX*UOAHoRF>v#J~t*)emj2@j$!g^_B7zhL{`H*F?(A#TGB6>QB zDXPn0w0!9&JZ9O;GB$^1)rSqBn-)}MO$I}ydu4EcT-fyRQpudo%uhjqkB2t6Bs?}d zb$nCJAwtqKFwQ!E!tCB5b%~}~N}k$meS!$6s+kka8YADaH_q|eBZ5xk|Bgav{Jly zBT2?{n^4rtMKLU_+#UyC*4QSu!1uHOGoMSR?+`&YWLF~~YVLj)B(B71Do3eMkV|Jz z``yxhB(cZl%M9Z;bqw+9X7<#v1n1J~qL*CeX2zgMUrsGP%5%^}DzR4}ei24}I(7BG zV^o#qIqpski(T$bIL{d!Ea+*%#&U7Ruq_y>ACosa?OxnKh5a%7??p}Wp53tzdv~dg zUl~Z3ocJ3_wr(4R0C60WC~JX_OgK^|;fp>c8u#!hb5TUrg#LApnKoS6p~Uu$|a z!W?C9>U9OVvMJzpO<2gq*%dp;`cZi|hS)=zaHA}7=q_y)D$|)G46Tve=TNiY*=m&Q zcvex2<2|fcY2LDDK&4el1auZBc+NLfqM4#;oCuJy$4ee)CZ{@OMnZdP7|rf>nh#{V zd(otWI!oj9x@$Ii-Nthy(}`n10vW!-K6GKP>ub82bdg|za3S)Nd$IL4s^~HG z{c_TAoE%@;8~9Hi2KJ*L7h}OD<9}b*w8z)zBzC_q>t-zs8;1UOGX{Q!2FJfaBJq!k zk=$bLibB3#hWz)AFhD;W_!MK1G@t2s#l{jFIlwnIjA z#ClogvlA0KNRTUIIHHl=aLmPBmxX=$g+0}stK(njef-xCrry}Y^aXmz?e z%o9QuBxyg;+Pyl7&v3%=R@{WSb$52%U^KPk0sn%JY6$UvU3nIQ&k`;yu zh@huAI@BL=^V|=O^NLNNdzGAONfsd4HXY@vu+N zvp??8?UI)c1xP0gGe(AGI9JJC!P|1ZvMf$yU zV)$7+oOa2_!pkv%CNPo7?Qtqsz4{o7lQDZH21kMMc1P!IT{ywAso6su`dTxR>}egd zZ)mHwiFAtUX%k2TR7CT6{{VCoiQe zSLI&A;dj&1)6|j)b~w)$bQIp+U2==jWE|Y-9|%(IQ&bY6!nTq|dmMDIUI|g~!x+5) z{{Xm*Vg0hbZ5*<=au1sa8TGfOPeW+sXs3*1;f_29#@xra{j9Q6)}vdK>b(Na{#J(cpae+z#V=O`+gmZ@cSItrg`XMn_A(e4E$49o5o zY_>c1>0xZXSR234Rc+g&#~X>Mq|-D+^@+bNl@!suF~=e>D=O&Y=>+ukwl#?L%kRqs zsG(F2{3z@!7$$+)PN9_`sBckk3!g#VU1+DSf|VVZ6&lI{21xwC*^yJy(MpoT0F>nA zU!G6LHzlivX=D3?PZ2l+1Dp=I?`Kk0z|AI>Nk*?L0f0ZBTcEq43FcP&sA+-_dudi& zp2S(Tn_v_w(8&2;G)vg&Bj_HZ(!-{pr`~0hvC#1lbE^t|J{LXKp1v1~Q(YUX4{bt@ z!{9EyjrpoF8)H`qDkO{;%uRy5$S3}}y(Y`PZVC4k^%@JQ%s`MeraP#%TV)JXZ1Xj2 zWK|3mJ=Br)AJ>b{Gthq1_Ny=q4W>C`YzAK7=fcM4Sqc&y?i-Ch`Ao4>Qb=l5zY&>y z@v!gX&#j`Lc3K25RSZen1bKiRb9*S_Z@P}63DP*(IFpS|JsSPq_UJ2PVO0uB7yz@! z;(0D1oa%Gi&XBZd!VM$B!(Ir@q8zZRMesx_sUG>az+ z=m(e6QLv<=+G*pGo?z1g!bs#lk-a__JVp~kx%;l_fql4+tiA#D>td}$yOy_{gkO6? z701#Kh1_W9B#YdfN&o&_zy z)c}G|rUj(lDb}0q6_J|*$~u#4cJ1k@>PVQX#@Ws}AcNNY-M7=Z%K(M6{kxnTV<7bg z={}BTw5E|NrP}3K;2Nn}g0SF$>tbws^+w%Lxs9r%n<27ugdK8yEzaEHgw`yQ;UFyEx?<-lh)kf+$g(O8j>JD&EBE!2zz#Zr1$hRHNqP8aY)_k)^sOy37P6+hrWl<$< zJtSgeNT5uNkIj$Dd~KvPv=kAvK%BDadW>*SQ*JieRowyrH9-i;&u1sG@w2GZ6NtQ3 zM_C9hqe{lw6S3fM4XmT2sj8NqGb6?s{E+e&AH+|E>NjY%=o%P)-x5X%lR)6}!SOwA z>LWEsT*B2Tyhj{k4ui$4OUXJaMBB5~#VDpS#^X9vqjSftg-J@(b87p7W$qM{k)MsY ztNCeGXysU{A&@q#7SsVgw?DE+k^byQ?~f|X8w-w#Ztu&X(Pmcq!eoCn17(YR%{=V5xvSk(HCm zJcqbp>vmFZwStzVD;6-Xo*jE*pgx|4`lqm~vl+WrN4GhprDvXJeKO!21oa-dxAgmE z7U~uk6)Q5vIMJt3_2%J3W1nzVDw)xra{%+{{Vly-?Nv!KR8KTM<}iw@q-kIJ=JLc^ zLITx4FRH;LGJ7p4V0!1%HtTPzMH0xx)XZ{<%aVL9=iX`eh*mns5#fs>f%(02*3wPy z5(PbEY{2jwV@NleO@gPy+$0YwBaA6-M~*w%y6k=QM1B!ps$66b<$3Bh-%_Mgl``o% zl2Ql^o$$UE@;UzIS$)RRev$z1>ttOHZ$_^Xl@?ldj-aM>vhWD>#im$lYfQQkBUA*o zeLM}&wQWF>2gpsF0}KMR}{FHz(^LymKCdIf?cj$KJAx0lg;Z^$X-nPhH4CnjFmwaKt= zu*oKZnlmi&G=UV36X++$7LRR6>Zw>p;+8`x{8QdV@Xt5pV3vA$DOPa4?^fh-(EM!_ z6qOLtA(~B1Pd1JYagIUa>Ki&d(V$JXcx5j=assC-frI}5M#-z1D56o5A!Dep8OA%D z_})Y2nz`y)AGSw2bmhlmo|iqgqj9K%+NhK5!eA&d{a>%3zN~Epp_c1e9Iz1#k~T)2 z1qjoBQ_!t7Jv*6;R6FpzfsTUZS1n0H1H1|zGon;nF#vV+KDMfldaI{{9@S@JW39v0OSLnJ~n(?I-pk2 zvdN5L`+P<32F{M4#a0e_f6+HvZ9xH7loOof4xX2MOz6WCrePYDP#4Uj_`NIYGC@Ootb0B2H$ zm)w)t85#Y2ZIv$EXxkD@Bu?>@$-f#?HY30v690li&0y-EgYz_uB4D?MaOJh zK8jQ)qN0`R>SkG++APbT%2z+q&JP}mVgCS9D9FH5%RT4KUgqKBj}a!bgddxmDxO)P zlq`{HbM)!wpHqt-=s>oil}l8(NVN?jH~RFBqr%oE<5tSh#TK-WBrwLWi1oHVb?#Id zDNQNGdZ?R~rdCB8~<;Z|a|z+Om!cT4x)<*|;wJcJ#Nj6oA90ScS+w;cpL5 z6K2-Z7$iUuR1wCUWBOQC^c9s#fpmE}^03sy#}{farO?WqBY_@pc?+d{d~ABCE2W7Z zlM#Zyh~)I~_}EKQ)h$l5rikMj2tj}`)x2z#NnPd;F?5fzYB?j}o1%cFsXJt45($e+ zm@@&!uD_+J71GB6h#chbNb`I%ZeZN%p1lMrOKDZ>jA}o_VAcjngNXE@Jda#;7YSyks8JAHo`BlH zVOs?CS2Tu&$W}Z4{EEMIj zsYhP}c8q{c60?TZ!AIrCm-*S1RI^uOGzS?a|-Ej{$l?C^eoBf{{Rj_AwJrG zeRH1N-5;`{LmZ2jkl<-!>(s5zlGr7EYFE?`WqrPI7Z}mu!0C1xI*P2LwxFYzC!gtN zS3yy>$L>h6xhxbA21b9auL)R_qZpWn+rdBB8Vn^vQ~ozVEx#iqIBtFxu2v|EB9Gs7hGEfd@k0~U%p-OO%B*=~ z(*nr?M}*av5WGYfU{xIaPjg|4x@eR<>oBpQGplzQ74rd7g460-F;^Iz3Y^zbav5V#&?ZoU@HtXh8d zWB3oHAUd+ZY4Z4ylkmA#{J~pVB3Zqi5fmIEuA!d*EMK{u(Sr;xb_2E#OO$XdtP3c> z8nm9c>3C>1S&dYVi?;_nX_1c@^&DIp8hG@_Q9gn>3)}-_w9+~Sh!qR{1*v$Xchu3D zKYihr7|?do-2GVO-O(~BJO%&~Y*9Ng0K)0uMUq)cvLr=W`kvp;)(|n&wKRl~2|AB^8VX|!r9^?WgNEswrJAXMMpn{JNljX@~p(HKb6XrIWDP};qfO)tLWKXxqs;`6up4b`w*3_gVhSarhQdyUT zRmnbv+KHoRRg+2nFHvdIXcbjRD)_?yFrV?ep{ZEif~uU3=IaUz6){O0XDK2#3K!}g zxx1mN1m;Ij{g51f`w=I|JT6CLkNVB|6PX+F5;h~T#yP!$3$2wP8i>nydfZ~K3s0$+ zC!p?ZD$>%bf&jtkaWRDhp7)GMRlL;!N@-T)`du505kyRi!CV8R3$zqCk6sv_o;Pi6 zAFv9!)cCa4n8!i5iDXokGIGNwt}j(3B`b`<7>uaN1Jd*7YLa&^84AE~xa)3qZ+e~z zhp6)%B|$xj^dAcwf^dwJus|czsCNA=g)?qY(upV0ByyZ1FBs2ESP)a8U2+^L?d#t3 z?^{hYFw!aoNRF>?p9>4VCeb>Es7V8lDh&Rj%%+lwaIBo^=PPSsGdj(rU(F<|4yhC_ zfztQoVr*k0(=QM%rYE;cwpkLMP&7u|0(0YPA)H1;zT|7(E@*0EmBEoP+z#VC1)EPJ371o7LD#j$u1IHF=K<~$sZnU*#24Dw)nNxOiZ7~%yG}4>1g%9m1@W^a7!}-jb1k) z^IOzBiB@U-xXzN#=F~g|%S`9=mS~`+qNGI;tW(b2rfzZk?%4K?x``S}sf3LD@G*nv zF?sLW`(0e(HL66@z|wKr@2NC(1tX4Dl$P^0cpnRfiIQI36m$L*LJeO`s505{AHM#t ztg3}Qnd*tpDuf+Ud@j8rDVzTAEKy^Q@*jZ5PmRlQllEi{RFRs4IP#YB702dN&Mzb? zJf^5>>SD0Q)aErS=lY2q+n@Rt7S$DhzCsyF@?J&AQTq0@+jitD<9OwfqnWi~v!oA= z&Y$MVBmV&SVor*HoSwryeEn`J0Xfi`y02!gQJj?=lheobwv?OQL^QxD1soE8i}~IY zcYIVYnV3E@!#sBX2Gm$Lh_!SAwy$(Z$sU13a87aHqLrdJ2VtRF5?p2>_V!^&4BJ zs$#0TuXd#+^9a|%?pdRjHY+$?f0X|FzODlCB5DOo41h3k{`YL#%{JE%jds{d@Dn2} z&FaS%@ET=O$dXM8qYzl(>*VWa?tnf!3c=BV9()IB? z)e0JEQWZ$TsyNP|2X*nYPs5@F3+~G)dT7P*<9fp{{Z!{x$Ng_v6CTfN<7Eb&e`ZAmD}8_5tTob_V75d zT@14r>BvIsh9@}9iZ=Iw@Fp6^)sDX#DxQK&q0II2P*x(wOwlW3@J@VedYVPA-5`ym z@=UtQ=NLT!7O-woMxMqwQheZYe_MqkOC@jIJ7B61IlENRdT42>mX5pa(SybZm}%oz zrgLOfwM{)ikwTbphl~Jq4^IoTXsU*xq{;<+oaedmvZto3Giy(1E5IZayC3w=Icrplt+e`iwSSaq*)j!w-BWE}dBh_!STb1bA8 z524MhCopni4_MUl$g(J1S;rapT%nqGlkH#tgTT*BTKc&jDTJhbON{#2TMy$^9FU-S zyYu>86pCv0>ayinqhphiYM^LiVhB*##|!;$dAig_Rf>*6kbG~OMOF6B<(E2-e@kq$ zOEp47BQQBHgR4h>g^VewCPtD>r&7f8{I1FdZ#<}gkU8U}uG}dqE943Rjz6kW4;b{e zf+ID!y*!d>4W==XmN_rt2TLX@%1RiOqfpETex}BHEwQSZWND;jI&?01_z`}`v(rlM zQdbJRlwrsonYtKdEbwj`*&{&xbKf>#h{QPW{Vu2&?Jx}#;#6k(us@Z~N$06x%AVeO zdRW*lIu9(BplL+N^|f-%FVe}R^}DLxt~bW5Ao3eH82wF=1ssn0jHxdi-~jGG5x@Tc zd)cGndq_RNs=3MF;@iPSs#Ig?^M1CnHTI+oIK}Ab-Q=fs)2xHgWZm&ZXgrYWb!?9> zPlenHGP~&~PX_Duz30|WoGx_s{+HmZXq2eNmimSVIq~mriv9$1noZkJ*R6p)^!EPt z`*GOkmYHXTedyd|9QE`T$}O@aI#vbe_&^5h-)gG1R*E>})lDb-w!zSP`hPo)Y~|J| z*(mo4s7gVV_6`aXLoM#U)@{}=w5?GJrU#g6&rFW?qL#5}qcw2pKHRLZ5-C zWm&xmxP0Ta?$o+kuBc7h@WDD)9GndIyspLgm1+DS<&UN>p+oQC{-GIV;mn_=Q2yPRy%_e zokX|2)RWYUQf5o=zSa06&M3HrKSx4!{Zfw)uLT{0#{ zq0-(Gd^hMxk&B0~c#nF0jbW7p^~bhet~# zQ!+=(V}a}NyXbc2t9cch+V<3z)!+KvXiU-lJyjdP=34o9?H3`KpApj6((YetAwa>S zW5ziCeQhVThsSa2>ue+2<*EnRMsmIIJuDQPVA?^FR!0p$mGw%Xwz6s=&xg zWUe!tgmm)Cj_QocpHTyWdp+0QsDNsO6zIWuJurWe zSMth3UofVbOJRdXMlw7x*81%R*-;F22x#d_g)NyB9b);i(!sXx@jNURHX7Flh4sD7 zHupJsWSUac<2uJi81?kOIr`JDr2bycY4c3*F<6o~(biTcKm!Ypr{R-$`+M^hWi58& zG_=u}(6JvJb+lLMc(&)Wq8_8``u@MH#h+id<^6a^w=F(53;OWO1dO}`auc}px^L@d z2n-}XZZF1xhhh!Uas-RU367PU1o3)U8kEzl6;h9E4=D6M8_!2J7n>eQOB;$rzox?* z6bdsaAoK$#^|5MqJ)SsODXDeHMm@*l9w1&yeX88jIOIrUSjGT}H2(lTZ9Z-{^|KQ!zL(~!js0wBo<;qUaKEh;g@B9u zRy$l)<7j7~kZykCcwKk(rnEy^N}pt_D}-$Go|f}*o(c5mn7ISY0PA@BE*5k-*NdJA zm*=B3mqO_yj=r~{*uVDtdfKtI@yI?um*aUd<-BeL@r^6|uWtJaC zq+JipJCvqan5Krv(0UHS$rHuyi3B*rU=hYOkIpR$sQ) zgMvpm9dmgt2RfP4DYq4@NWJg3c2T5ZMt|{JSayI*RigWvNBU8p>x<{qJB0*_0VX!; zG@kdc+p46aWt4l#7BV!0h8`m3lR76w#)&n|lf3bB&@lf1>0sW2Yz+u6&t1Gt(bSd@`Mv2QhcgL=8B*N=G-ZnkDDOsYO z-b3CsJRtoud0onmmZm5gJ|#y8&H0W`1JdcQ-K%2NH9Vlo{35dPk6S*93ZV=pwc+;5 z39!5|Z$sCMxAct&NUL_wVkN0|3>U~obdm=i9qq>JzL*g0s><4eE6O{Jj>>&UTk+~i zm77AcQ~`!=<4EafYH6wDSy^0{VtGA3pY*!cDxu7fh~W05o;St<62n>X^|JP@ww>Ed z;yG0J5`N>}(a~&)xG0)J5I{8tQcDuQ#1Bi5l3Jozu|ln+E|N7U>OMCt8}u|ak?jyv z`wG*9lgdE<06|IgK9+;srJ~OsDh@HSi#D&^4|{|!BkXF zER!QAofP@8+z!ISRee zPhr=KTe4J16GmBFB;n(K@o~UE0xg2=-*F^V)S9D_C1Efh9ESc!`d(Mj&&a-MqndL_ zz>PRMv)Ov)^)geg-=Z@yo;HV4s{lYZjo#q>lUGZI)Y=G;{{R=JZ&OV-y-ft#ea2P5 zQ`-i?zu0_*Vwx?=IUvHQ$#vr+1JGWcUB-`T*pu2!OCk`U4C%)i>BpeDAW<9>%N;Nh zah!Bq_7_S^NibT9pF9xf5q@IbMn8u8Ch-@~qb*Cj$4Ju6RQ}_(G5{`q2h!c`+vmTQ zil^;V@#Oyc`!H<~T3w?tcbni{ps;X|-r44m;~q?>h3JIMqd(~ihSKl)xb3cD!} zr>lmONUpAxzTr-iPBZAj@EeU@nP7=#dZO}nO!-uv3m%62o;Td4h8ov+BStzH4$J9! zy|$WyhAG-q!bgP?=*u&7Ilh$j4g53vrsX!*1yv}hO=2+09QlI!t>->l-l|$Dr;rCK zNaT0%x%|PiRBryoL;^{q4DhoreI@dw_+aAi+V(b~rD*D8SY40O51Ebg`gyOQ@G5HO zj#2j2ZI&8}V}a@qh3lfPquh3bEgFTIFZCBuDhTh6+#AN%L0AGqP7yif%K4A}FHc+h zVu7}Yo}OueX)~!KIT${kx6VZ%x{cgKVLszR`uyH)L-~4M)9oK|ff;6sc#4dw_s5~} zv`I;}NXhsq?ja!yjIs5|HbivLQPdeyX$QR8j9~s6_yN-8HjJAm1ZzVh$M(${43&}n z0O@KfEB5`sx|Y;eAxfuX;@vNKQBe%kMmJF@VS~s758?;b@w72eP{~>COo(O3e{;=@ z`dOp<1eU~fv@>oMF&MBTg9ljbLGOBp*pilWP|`DBd#vmME#FV_%g>V za&Sv>z;y$QuCA*PNu{C!RBW91cE?LTVD5PuRNWc1OhjTd3M0{j2)y3Myi>r56w(zspOP}Ij@HtQQOzv!>-UVkp*?~19+t_Ozzx}`JSzB&b^5t5Ch$lXwn%f}Y{(Nz(GqXQ-|PBCjF3ZiM2HFZIWP6Kx>C zR25B8K_e=L3n2I(EJcEQ)(hC#ayf0U{ zY=st)WTh100DpBn$4)z&B9zRLnxA{D-6hvF-qRk@V~{USWUT$3!a|O-6^&I+G^qU9 z@v!FFVXUT+sSIA=;iMzX#B>7dqob}wj-A`ge9fM~dVfoo=8&2_#3<;COzJ@j`p!ci zP;c$uXsi;qw24lSrHe7jOU1Zqvdy8XR3uUJFy~Mm&&SfsHS4)UOcq6`NOYfU$X*XN=Ayn? zQaASS9k6^2wx*@?dyd0P05tyqd$s5#V)!=NC}K$9LmHFMJp4{<>9*nyVgRG4$oO%q z<7~py#KJJR)r=B2{&#$gDI_8@$zDqH+;iU|$noOi{pX2vgxT}v5? zzH^KY)`EH{H)-A7J+*-FNgSV1*W-Fw22k|8)lT0dWQhsqIz~tScNN)@pQ|X1>?~|Y zEYgpqNBi2Xy(Zhe$yQHvFnlzFQ*+RG-p^}>aPy-^j(}tp*bj~9Y4+EGO2}Z9B-Kn9&5`6I@~!2e zTIdJ5uB=$;(0DxAP(4Y3kRP^rB+;)?C%-w*VPG3%w&~`?fSCydk2ZVf^89U!kzADv zIQ9bRSjaeho~F!?ZK;k``cW`2`3EQ09zE<`?g&@f61JW-YEzAP=x-1!0vZaPA8=&G zp1DuZ{{VZla|N9}vJxYQkf_TQ{{RiqOGyoUjAMyJQGu7$oPU^GS$1it5W3G#qEY3` zYB)Uxds^gR3f#*aW}0cS&_93BT`@&HDUM%xnesKRt`r~dcEJTqS!N2x5v$t=B!kxD z4J^!Fs$!uINL^f?L5pPn>UswdO9k}>dl`_P!)GaG6nLe*oKjHMTl5%Px z#EdnZKTD=*7Al5~#sO{^4oLC5HXlj0poia{MyZ#&#t)P)h_LB)@M|7#r7{YS-Ep6H zsN3sfNTE+|NQPXGGbVAs?Pf#mq*6*9f-_`i^7sp+i5OMPsfYp%ZGap1+9~8(S%PyR zU#*8AS#MdP9hCz(AO<}FvCT}g@L)g(as}gPT=n%e^acndj393^oQDh4*%T{B_F@%J zb-FP*(obCWwJj4-NgYbWG`bM+>OA=Vx9;CjRQiI$raboSYgzVNedO;d0!ncX~%dB9=(kO1ga`Jvg?sf|Y6Hl3g)Npz;R|<6%Kj zRw62)W=8%u^2eU1Hbvli>7I! z_eTgLJ&Em%biH24W`>qVlA<860uD$gp5GfHDH%y1*3Pl7I*vi~7A0*;R7o4GB0p_J zY=g1KUyYwjFh`PcCajjn>(>@M%=6PXw-D4{J-hz5S~e|eNUBIwrZS)7Bn!S6W}6;=dcJ>wI${h4KyD6X(vu9S3+d1pr}MPH$S;q@KzR0JU62oY3a8}np}_YvC>cQfpt8??zA8!QFNF5wp_+k zPiefN##h$qXhKu_Y)IG3`r9oP!vx^Gp@2?78SXqT$V;?kfiWl~1^nM1h0wFoK>lf?W_4O}p}E0uz+?DG>@0ek_`QGvdCxzkx0;ZZZ3!Q?SNO8P z{{VYcAK^sFBq&{u10G0U3z}q=qcDakM?P&>{ntdbwX!oZZ9BsBRvyaRBZmAyxb8pmx*CAL=5^C)*h60<$xoGIf6JTN<1PSdU@(x>+2?)ZN|b!_&%*vBB*cT9@|=3!(P>A#Bl0_EPI7L_SSf0pnP+InLC^gQ z@>m;K{698xl6(G_YnNs7L>C>{UgV9ZswimmNedc@J>C@YjCk{Nj%KKqSCn~+9z88# zbXCGHIq#0PWi$q0br092){OYGM^HV!$B$gyHB)<%tvfQV4zJ-B)Anpl1iB@OiBfoM zY17Hcyp2pNx;W74#y=^+;M#qdb5u;Kv494QV;6cr$)sS*BaDd0EN?pLHrj*EOCMf9 z{&rs3Q@F)awQ#@U06=0g06p*x^52jqhTEks<+nhO983(oKZicPHn-TTr-fvxkjas$ z$A_ir8-HrbcS+8tj2z^BM%GhQnPO=qP{%maj^g>LaR%$NFIgDeqt_Bh7qC&=^|t$M zJyjpu#UWe_=@{gEZJliC8^;ojIm2Lae@#+`MQs^nTq|eyUh}u1rHZTKuc)2W0fEQ> z;CNq7kn}}wTXZJ%Ylle*SXRHxW$c;HJr;xqx(O(J-qiG~unIS-)We3*7 zHBC!5yh5?nk*k#_$HMKXT1vC-M9i2RFDeP~;?>urg<(^LL6N9)lj&_2qkYS9+&3z7 z9CY-GuiUN!d^30j+G^pd#L2@RNIHfs%uv*XGDhT}`ul>vr&}s2fEQ7YNiaNaUK!&9 zi)5p%j-Zw?G(eHiF~>u9ZM4-i(Htqx2~9_a{cVi|&ACHYM+_PtNy7CfuZ_;WDsi_{ zL|Q>S9$6PS$HN`$G|~lFis{DSs-{>Kl2-E^W=wkc0cXz|1u#cfpq@l-*~h}yjn02& zIB8J>C_3eKB)6ag``ZcjDC;)=0Kc3&ON^gk@u&2$enp`9AqMFZP61gWli44Mr@0;C}$CkTeBHiB(K+hU|HeG49!)twzUB}z)jXko2WBTCuQ7bna8h1sZ2r%OB) zL~`em`h#W0H`+=fh-N8_;9*blxTKL3i`J9wp(Vf8 zQ;$)fU)JjoRYzA0^F@`2U<25J>vwH(oyw>sJCQFh?VdlM!t^zhm}6RgBzFWhM`Lq1 z13NM``_y}m;Hn$h2)G#7tN&HQ;hQY}ENmDD~9ODk;HWy}9pgK3S=T z8hPee{-${_7$C0BK1vb*73a^OklX z06Mkj>Nf;QB#Yf@rE*I$4=Q+n#^E*+MwD>ZtWBb!Ks zl!2oDdMM;vN}R9Np!N8U__dP6AeTxzwmWL+@%UQ_g7{#mjf|3`Xd{hEdC!fR4%}+g zpL^MDKmd5n=o@g72;oUl9YXz0*5EygsR0T&IpFX<7CO;!98&DmbTsKJR)Hcer~d#K zh3D$4X_{8iG{l}%F#etGjQ-q_NvL1}$<3x(IOGd5woGTt4`29NOj&@Hf_8YyHb>?k zzS+SqxaLV%dvUHb@!!B$vP~Rv236F5GW>lmy~=>Gjp9=ne=%J3y`-N`(aR~4B*w1! z>*H{iSmK#7yb*(rmNvg<%6pEN@K&FlVNZ>LuuL;`nOYPoa;gCqWgS8+RP_-mtBk2X z-%zy`Kr})oY?3f>(&tKP3?(Xk)JVn@q3+(M&7&As?%#C90Z`fV5y1T{C@CY6gXz?u z^PFPiH4H)zc0EnkC(^-(-HMu5%VD@x9rJS7(E2UPnwm*RVfla=&xO>J)K{~8wj+!V zL-C*I>v`HZYNUN2d5@_70DC%0YPuZzn*7~*e@m%lXdG}L6h62l}q-+vwskY58_fs&^fRML(ZcIl}P_It0gXWA9+Xk^>GA~lLwWMS%~zw2!Mh($P-9XM{| zP}DvnJ?%!1xPlm*1C2d8-_uepHDaQSs^cJ>bnk;_;E8ltBv}~Z*wMSdrN1f9)GP>; z3CieT3K9=a*X2jJ^nTbN<%sdct^!!XzU}HIIJ=LHiA$z2j=fFaSHxNMy5eUhvXNm> z?VyYf<-g&4m*$PY)$R=#%SRWNj>Oq-^9JJTL}gSTMQhAIEK7Gi``L8Fzuk@T?t08kGT-t*54lCTk@HhL({*+nf)G^gA0mF1O(AE-B6 z5!vRYqjj@yl{L%u)l}xIj$~|>Pl-6S_0>CNB~+#r_Z@~9gTkLrh_w~l^;JmrkyZ`> zCO!%HkOzCu`G-0EZn$VnL5@Kshf|(;zapK{^P3bFJ#2{MarsI8FFz%p0a7)4T@u&G ztZ-L3VcR12ut!HhB0O}6+75Hq`raqy+Sn;P&Oi<2Wj)7;y|Xf~)TA<>q>j12FWTHb zj(g~r%{zzfI#VVBl|ZFNKXs|w`P#XRKcb&o_Ii1Z49tMA&n@ZkwtH17r6hRHNb_g5 zd@rNV-=GbcxG8F(suD3=YhqNYm*P;@a2#!l+rA!+H{=!FHTV_y}GnHE;PTyUUIUQYAL0H zQVxe@&+%a6Bjb1NwZ?*`FW@H*r;&qwf1|6HMwD$d9?MkAuL;m_3H12eeZI09I(<+@ zp(FG8xnuG5wwvW7ZL~!sIzq73$8LJwG)h-(c%xY)mCliY#xIrM_BpA&Gn=h@ZdLD6 zRbw&`aJ{}i8$N0(g3*1xO13fq=RJQ*K_qfSgtL`cDJS^T~KQ2!oi_+BSn{z1RP?2-R zzuLCFrfN!rb0kyqfsZ49#`$lsY>sx=>0_E$g94z91CjB(p6Op5eragpbPs?sFbBuN z^v@b>kP%vjQ~;Rq&mKJ5{{XhA-Ox!AQ_@ow$bLP-JP$w1rSmBM#Qx7{{{Zynk|2$_ zRw_p8-53I=)kkm8-zwbc5{9;T5&hJS$XEU)=y<-yK2wTob|pl zUqv?ES2Z-_5<={a@dv-_-u*u{{Z79o2Y+AJ^^iIB{eNHAjD3G!*Y)EcU)S~hY-hLi z{eNCDD~sZg0wmH_v?w%JN|32Mr~6vT`Wdi2X$xjshR zCRR!?Bslj8CmO%PJVn%2uv|MOuvG0SPnyLhS8fh6qr~4x+3F;to#EVNRDr^^IWcMl z8=*Mki^1(bE7Z@o$D&AKo;gmmnSzx)JM=fu=wRGUD^u3ZMJpP6@^SFI|+BxzR1C6^w4In&}SkJ^+_#HgBDNXDHydmgwy zzWk|%vYIxjr;<4&WI6_~r23yxVbVD!V5X*%c&%eBu~RHOeY&XfIa3P{{Y+jqt7)=(`DF}E}USV{#&7F(K(JrmSxH0`M*JT(+5un6;oAD zC}t1#@v+f?j<;O2b+vP>(Ec2HKrxVf!`I_tQ&sM5PpPAkmNq0g82t$J7Wud6-kk@! znz7D|cl0+T4EB<=6&i$vL#~-3VW>C2Tehi|ZB0!bJmG$=LrjbQQP<;m%6-mQ`B51i zjEps4c;Csr$n_3}NVps_V07$$J?<)wJ4}6DUnr+(g6Q`6k$erNhMtD0q?6n@DX93I zcC#U?1}#$%=_~1exd)3AcBYSO;uGY42kT(2$#lfjuu1GGQ=JaY@ENvr1yW{zaicP} zOR2~I080MaY2c`X25lvS4I{rEww7ZpIQCQ*C5Slm^}c>DM>2c!40B6Rq#toZX%c5A zCiFB^yR=3jOuuldtE@L1^tIL0!VR*B_7X{kC$R6{_Pc#bQ`3FF-$u^BV8`5ycD}gt z=)MAK+S-~)42>H^`Q{cshTgUuv3}8o>S-6-Y#nhi{5JK|bE}hYs&{|ANm@LE87GGS z02StB{{Z-PL{rCG+s*BU!V%)8&oYsc!sE7A$YDC80I3Yw)$RBX;pc*E+Sn>-=+tRg zVe3)`tzN26PH)=!kx?Zq&|oZqT3sEp#&{Qgp(fER0EQm>c$|K>7Un`~NEU`@D_{#O zn*D~zY;sSoZQy$9iYX$BSe6EL$Y2TY$sVT{mRMGi41KxC9k{o&wFn>!q%xDp&N=Wl zr_~Y71<|Ls6VuViuO}soeqSGr=J$PqvRPr4M+hQqXP+~VOkSdnmKBP5+c1^MRwn@X zc-*RzmZRI2Iy^91CfQp7GB|@D_3BW($ zHhb39(=n%L5^*5JW-Nd7oBZ7c-1~8giHky-n0Zb{K{gdVn}{_8f3)X$z0{5W0EKag z&yTJ1jpDYlmbOZGr*|mLlh6UwbiLATu^9aR@?Z~erbfR`Mb2H08DJt63#op&aHJ1k z8|j}k#-PFNyWZg zIK4Bq8=YYH>EqQnF_u|~!CZQH-ecvh-lcx#Hf$MkIUECbIp;o*DUoD2%b;-7 zbL4#?zr+-EzIQ>o?^RVZomzNc5rE)!<*}UKMWd^xg0VHk(~JP}FC_d&h1F3(Nd!}s zh{UsMA%OEXPh(`x$Mnrn)NXJhEYX=lIBzE?*g4Y2s5VdSQq^VNkL{6<9?&@N>HgNs zV5gbc;{-U?7ClF=kLhgw)u-HNBw^5C!^V}zP(QV*#B3P}>E}-{StA1{{{SmScN&u` z$4x|$q2Ni5>;Ct6Z?-H~{$ar$KMS2?^3zbr&TvLFfCfPQEP9wbT3zIZoFvE;lc)p6 zNf&z5k8gp3I%hVfj=%PT#wEjmJCo+mrYzFvhf1aa>vMvcB`)UjvT9#!FVfuz>TJZ9 zS;!==JKFEVV5-A9bEhQWX32@%E}RT>z{l@5deC|E(*FQ&@iM-)-kBsPx5CoqU&sz^ z*53rh1hY$|e+ln*%s1o1MM)b4BLHW=7Qtf!ptZ?Rk%5N?$2aJ!EO4%;;d9SSGsXL3 z!sWV?WL+x4a28kBZ`RJ&1wp|bu8Am@I-?|k+;#r|jgo8| zO&RLB<)@=zQc&T`E}Wd?_}Xec(wba5C{`qZrv!0rcj`KtkRyv8T_l6`9P_}wdr`Ah z#U@f*5OrugFh8ZfO^q5l+KB2VQ%r<7`juB#>Hx9h`)aA=_g+G)*+Sv9`2M$6#wr%F zVKjmk5%Sjj#v_e=y$>d=mi{5}yjGP*P|5zKDyd(kwSRGFDrA@M7D+OIjsY5#x#`ee zo_c8Fh|N(r^Le=B4*_Wu{H3 zVHs%MZ918}(He=DKhWcLa(={>!1bAqCI06$yCd5@*AXr84E&l<)P z=@{hvZggKtCOLuwmcd-NiMt7}Svnl$wtzBllA7C&stFnc*( zLA)}v6+nGPQTF8Udy*`KkVi7fAJqOuU(18xbSJT);`iP{ppXKawr_4oXr-qaD8nT2 zjCc6h=^{GN>W#)kjiH*BH&q%k8wWnUxV2N4tb#f?VOUh2MsuwAo1<`;YRP8Q`p|D5rw<+GX33ORA|Ik6+U0L7{_d{{T?wBRvhBCi}P4Odyf{ z&t*amO7r^nH(ZNI?w((0_fk(E6XA5nAwWY%1mq8M+h1O$?V_AXNWNS%4Cke&`)_ro zQ0&0xQB&m4>2;(MeX&)Qvk~&IufowosN^+a;l`DWjcv&%KAdrPOSwn4#*jscBqzCC zW8-N(`jmy`i5uzAz;$n5jia7z+zo$jPL92Qm8UvGlVe=}0C((2tEIE}r?Xh7Mq7ZS5(jbtvV;q0p`d0iQJ0MImLH{?Ekp(tbVZGLU|WK1 zT|~6vw^>H^-9h!i9CLqEQ#9HgztUdZ`EluJO!^Awsw?9Oh6BDk6K$)emY#hwW7{|( zdgAge3dc}hBp`iRUc$(VtM1)Pq^z04oboMbkn=RtzNl2JgkWU(McUMNpf}N?e|j2d-x?oL1bBMit-k%BSY#?= zkZM20s=%7AzOodZ!mMfrP!4}zON|{i+dPd?TU3w;$S8~n{{W+W(zJ=}zMpe_4(ZDR zNY5kTYH2=Y(LGbtAbnkd{G@*kwUof~x7lfQe#8$ir*4*yartp6-5=Z}xyyyg3*qT~ z6$OtWooY}{WMx+5C^*19teSeH)P1>u$J7|}dFqXlM5Pi5UzPHvIU^pPmqyXHkfwPg z(ib|Ek1_aIn!z(#j;e*iYIcwTj~`oP`()CXU9b$jhoJGi($&2qhE@d+V0brlyQN&x zE5=hGV;cFoO^cHr$NMxq)bj?)u62%ZeJm8Ir%99{`Z>!E{uZi*{il@3bx9*<*4m{t zIIJ^N`4o>XIbPk{_gL!KwUt#!^#}0IT%MO`mFZ&?%^$cy)K7`%ZkehgQK}Wd&I9`R z+&$Jn7{09YsNC~r>ii9AS{0z8>T(U)iMw=_8jw{Uxn3C zxoV3;f-~upeI9h!L>N*EWzfgW{9#Y=cg?J*+oPa#G^h^0tIfrqOsG8ME8ubuetQ#R z{{Y-G<ej61<5w07jRgGv0Al#JSn623{#L*O-R)F{wnp~+?dUEv8!Rt0 zp3;dUQH-AX@gBBqC0tu>L~3DmVs(1_Y%>Z;Z;54Y*lDyhoypRuJ$!6E!k(m+_RtxOn}M9+`>GuGT7tKrGhHv#!-x zYN_QD9)Q>Mu1Nd;4D9L3hCgK;#Tt8Y;+Ys?<#!pdJG< z6$|KdW8&loVUmWNX0(KdkP5bSpHB-ARc%S42%?!031ZO<>+#PVi?ebhP3;zqf~#o$ z6aN4kMzyuIfA*1tgLpm*kVEhadVFjO-L)y8_UTzs znEhNFpF`<;8YQQzXdYJA5~x1f_2cs!Sy5fF?i-h}PaC6hIL@6zzw)v|LE$7Qpm{05 zjgnb-1B1pzvXUx!pplv;52WeU+#8xl1XUVVQ;xa**KKt=FXHMdx-;?Py|$2UnQF`S zdo*#iL8y1h&`y1!{603Onpo$Y`=rJ((~sq9sOi|$M)4NTbAysUT;82xc5)B5lj1*> zqhUavtz==tA~1aUKplL1 zE~gA+9%`D91cM4Y4^M^LNmEM%>XORn#NZ7%QcdV8wuDzn421)oAa}vAprB9M4I8|8 zh-$-QpAmIQ2={S6Zx&_2 zcMj3}ZmPJ`oN2a8Kr+)lvcP12YYAy8J>~bqA~E1EW*)ZM3iqchrX+XC7<0yN=&R~v zpHs##yz%t@mx;sKLsLkI58)Bm_qw7+nxQhtk*Uem*o%(h!lBwkY&YjWg=X`11%{5gT)l3ug1Qu^CF2LZgq%es41+ZVYtcK^$|F(&(dz zEE;8!P^Y>N?R^NbeMH&8Bo;W*6Q{!LDOU|3&Kb$bI2p&GxmTo6BQ+}GQaIJg9wR+$ z_>y{!z6WgQ^JmocChlGXtHDhQn!4I}^r{l;pC}z{eg2+7UnV@DFxNB5!nt^%AZh3 z{?=dKRvz^d{{Zyb=K%gQwn%{e5hBT3k$=aZ5a!V@7s80Or^>+{sAp*;hl#kac`DKMj8|Z|*K?mKLi~&^KM1Vyur;D6vR+ zR}2&rc?x;w+Uma2s{vSk6N8Y}5>&%1Q+w?uMt!_z^t2PnF=Q{OuV~zppN4PG^p0#5 ztvWOGb;4@B%TQI=6za81E)93C_Zhgqd$libtjPE z8`FR88TZ(|idf^OZmv$1A0BR@QesHxRr@!~6YWwpR7i#;Z8Nj^*}Z&k^Jm*dJzFA6 zh>V3%+m9xAV{UipHzGt;HL=L4JUr>1}nCEoyyk$v;HbVNe_>KcO+mNCPb$l}7HMQ2RAN4%>zj>#%{A>fh_AI2ZOlu+ARBvCz3tWU zGSyRw*@ivDP_2R64d08*)oqPcOYXxPWe;)=S%2Z{eR162rD)-+#G92Y@%=0mA3!mE zLt>_baI4Z}(2-%=ADbNMzC*V?4K)K9WKlMBqLa$_-iC*Ahir)-_JnI8Pc{yIu9(5J z4hYB^0sunjgnn3v4Ffztk?!Zya&QqpFQPScg zE)C(4$mnx_8!{STBWCM#gtk%!Jl~pZzii{_xEwFlk@{X9l!nr5){N)}*2UKu{{W4f z0cTzmkS!ZZ0%Ghc7X`Z>*DVh$fkzzT-AlAc$rzE6FupvSmhjpR(zJ;X7{ZJTceO0D zJAeDN&MXgV2&7Ffki8EUd^PV&P?A6vAP3fb$szHuwnLGs@36X>S>lbk!f68|<8F4i zDIuAC*qEtrsd@qHW9`*(?=mT$Z>N!z>3NgNah`+L^)DSgFsoW%T@-*2_s3D63+48_ zNKUv7Q*+z52lk+k+#`Ujas6(}$fTu%+JM5&PD+LzUkgt_zS(71lobJ$y+Qr=t7;ll zfoAr@%Oao6$S19(T8f+GCflL=VpuP>8~s-3bKdl{Tb(>g$xVuZp7_ZJ^t=q4M(F_j zT93mgBx*UwrL)^)j)1dAD#s!0aom5#)GRs`4=U+4+W!D_R8uO=^{j!4mj3_==bO&0 zBs5x#5HOX7JzZmf|g=f((k z683C#1IMM;T9NKiY9Z+5AbueK08N;5FyH@30G zD^s@&F_!l|ZMo5UN+(Y(QfUcYF4vt^Q`OEOdy9%I%#j>>NvB?7`ULm3R@5h2c4 zj+T8t%N3GQ$u}zO`IVpZcndCITpB!djVJ95y>tqO1I^}K_UAUgbopTa8Frn+0g^~v5n_*` zu^8?AU2Mq;(=vxu37u>`Pf=u2?35MKQ&s)c#K)h*k+}>yf$7ilwVS%l_ny(@KbT-~ zet)l{b$~BVr;qOCH7JKeojrLz9u{A-6&waFUh9_00OLm;g~FDv?C9Ba>Ljuv^v|ur zHAhrQqGCbLq&%PV_*o(-9Hs{<%OvoM*=2Z`fgEx0paax(y#-`cQ$QnFdv?LWP(5sT zKJ#&?WsWG<*yT)kw_8g-o74H?`bpj4%^S#7Cn5q0SCHFC#^`ri3o>yy1!{<5@jFQ*(bz z>#+IbpUE6dsEpWboLOs*I3#xw$WE#unnO1kNH86GH<7*hdA&PP%+*62y(u6E1|9vJH-ag*Wk zvP#h|#MQJL6%A=>8JahVmrJth)cym|S#{L()zuJvyzbMw=A^0=8R&6(DYng)Ll~tk zG_nz_FyRmJ3#wFDDq&`D-q+zVzz4t_UGW|nI-RN~qzvL&bh!iCK^d?=0gSmk zdwqw6%|1kY4sD`PvDHM+5o3<{AE!2#bCzll)5};AOzOZSxyM6;a+b4jk8b>CVeNe& z^vSZ0+eNxkywd3WpNQo0M}90^bikWl>-WmRu+HoOa(RurVSPubwu^!!%%1B9(WH`8 z5nw^`ywU5Dn8sI}4nqcS-EJwVaRC_fF>Fi|0HhM+ z@JI02i%iPdDtPJ%82(qHph?|TRY4vi&UqO4-P0$u4mx^Xjc72wmIYlS zeJC@Y=H;O1NR=}FO*!$h73v@TS-@VZ4h5}xS%LJqibMIE`PlnKKF5&F1Ov#XO!)C< z!Bso33lwpK*RAI%^hSQM$4k)DWvdDCh?sMm zBFLDT^@CFyzo+x&&avgCieBRf)Lo$&sZNuRbg#trwbd1Z6Net?{x!DMdrX{^ZGO4n z-b%hHso0D<5&W$((hK{j*|kJ@^dlC0Ra(af3_JMV9$JY|>dr|0Ked$wc$8orGISol zt;(zwPMY@$&8%@K#uy$AsYudlpK75}7k*3d+40Hk@VHUehaS_DtPiO5vK@nGDjU$A z{{YU-GzrcN_kFi(ruQ8sGCRlHkV*8k)Xc35M66>nfEW?!>7Li1NZq9XD!X%l*vCJo zPL?fvCOt1CIc)Y$mwHK_nAfR}k)u{Ze2m=c=hwo>F~*ZAmO&dvyoFwWOO-;UMN}A- zkq~$2Gt>04T9`#M%^Js0qy+P@&%_I97G~={Uy8D&B{O+u%OLDqJe#~hOSX-1lOv!i zLH^d_zEJSR?IAA_)rOlB?@D7|mrxq@P)4jp{F_3^xB5cOuL#}li?+=nBj(eFbP`HoL;jt+fA zu$rnNBuOggLgBdQ2Gr<=CXt~E12=QcqXp8#_iZb_6P%9$WYpy(jcM@w9YBx+W8-uz z5z?HzoJavaml+Id#(0!Sp>TOP8MN5RYWA@sdvRp{01)r##%|`_C5kx%Mx?h1=IQH; zSy4+-GAesZ$t7}0?{NK^=vUox?7fC~9v0A%eH@UQW->8nW+O-(W8-t3poFAq?4uba z2628QFjGe2Sv0nCh&leY3eKzu5f*1kn-R$q3h-3Fk_6lwKSVAR$}?&sMl z8uF+QIq|f$o2_*PFOy1h-x+Kkm!6Y#ub7|0B(U+v#`)R?Zk{u^sGSlpK8-o^4~Oew zOG_&L9Qw5m0~3-5_-{2qSyd2B2Xc+6l}+oHbdI&W!%y*M&WlJ!pD$BOIE`NQ>>VsFT`N0P{c$zsF(zr-kDl8svIeVnL_z zb;SM$j&6mHr6pz8tBjoT2(T$(WsXGDPDZYOFMzXRkMKq%B2Q9zykfeQc$png1L=%p z{46wzpvd#QYzud6@n$iVm^7zQPo(Fi{F60BOKQ}yY5IC(ThTHfdIIVE=gPmW&8oiL z7B*5&KB3SXt8Szcvr8yG%9EGJnS3!}%@p!6)%a>plbn)0O|gbFf#GdAA%+RZYpxx_ zWKU)z_XGa`t7Rh}t&vX|NObytTb31h0|(*F&3rQpql4~5jj1AFzG2)U$7>orzP)Ik zNQ^;4<0qDHhdpkUl|1pOD9_yb+}ukBgC404JjxFl^}arDk<9P1$f1S76;Vl^hp_%v zVy52=iXly2sDCjQ9Q8F`&ONfMcr19}`gquml1w3iIpCQk>_>pJ@pOu7kliNq$lj?6TQ9@vKYGy!q)Jo@8Ph;t3Q~S|JF=hpWX9_sa zg~FUP#tx&WSczCbRX&y+&oLScmea`p0F9Nj%w+Sj<0qf|tUbaU$Xc31E~0f~jC$GM zV`wsxS(6ewWzGOE+eqon)l(`}xOf$Ptwm(VQ`Z)%w!hqsY05H$P#A&f-<$HDdUk`^ zg^Xk#yc6SViYEp2v{eSA=tAf@MI9H$&!?tgENv_%B^cx$_Tq{Zh5rB=r5L{p&VQYo zR3U|ZmSD`?vCeaCHUGwnd;LC(EA zani`5+xI!?J^2+3M}|Y(bk$>+(@N1bJwxmF2Br(b$DT257J7*Qcc%e!p8o*wziOeT zqhPU~Lh?Ng{RE9vf;hD?&m#uN?kSx|eK0i+o3{SDbm9GXi@Hnq-FFHZw@3IQBp zo-opKJl%k{2=EFoZ%A1ezmLMmQBzLLzS(vu{5>z@B89-vIL9_R#*TQCpHCYmDsdPs zHVEhi;Sr%(54jq*m4VJr7&bLF-A<-QhMA|9Ho)b({{TE2XSYKfleBT?%r2CYN_ib3 zc1-D!2qV4pE$#O$+C#3ZN{|Ru z!R_IBf7q%YFDAY^;AGoUH#~^#n~ghbj3>63&U)vqyAQX+Byqcv@(h8K$Hwyyv{hu3 zbzepB^t0b@l4__4{{T)T0ho8rIp*_(4Ne5tS4|r@ri`|Zz5f7}t)Q!_m^8Aa&RfdE z7##E#+`-NvVn<&KYqU>6MJr8S8bwd^qfyH@sTUj(Ttq1n#|)1NmMrpPY=NKF@Rf5+ zlM|AJp1}9M!SfdT4(CiErlnYsPF29eoci8sNFt@##bne`%zeWcZ_R7*p*co2JtvjSAg~FTz>HM!XR+&ht>5LS;*2B^my|XL9mTC*mx@G8 zr`vv+yWo)205|Ot%OpB~5NFBz#p)@lLpSe`m z?eyxB#T&5$&JX-AB!Vu7RPw2$c#Dj-NVIv}qM2B*&`25eTbvbCQ&U4Eo=FrPYo7MQ zZJgG-K%-UA#_N-5sAU$)N~839Lo23W+=n4pD}kFWITabu`VPEg`K^J(&|&m%TC zK8^A8wh-Lxkpk=+3ZiBZr+KQfFXDpCMlGwl>&gnNnQ5hNFKGboC#~B=7tR;#t%}iOCj1b)OY4=#_9%NgM|!r zk&jGw{cK89M@Km2M>kYU?uN)^QJ&pyh+&Y#^(WlsGt>TQ6{bb%93l z!A|KKm&xpVjM*`5u?T(DVy})>yL@a*=^p}`s*e~Hh0kKy!5%of1Z_?hP#aH8cA2K0 z0yG6-pEhqpiT4OVX#yx*fz#n-Otkc0H9y>$OQ4U-l2X#sgP9^8^I$YZ12Qc8Ah4m)e$X2`oHk|>%uW)K_}QhV`{{VzW+y6-(YDJhfF zoL=6GYpI1jmkQxdQGo|OmX2M&a#<$MLa{nMul&~!2I4L!pi#z#E_;Gg(Z#L zuTh>2t*NE`qUh@x!nhg6JAZqXZruB8CMq!>e>O5R&pk10p)E5;0Fh);o+R_-#63hd`E}dL42LOC;S4T@(OoYi)G0ahC_M?h6wyme860_rIR`)1`5m|X zi98Hvk_u;x2+Fs9Jl~ zquo-2awbGR(~T^^aNZ{NE4XaiK>N*Lw7JB7hu7hL7r*)|pG4EsomJNSyRztldKQ{Q zU&=?A{{V{fPxCU>OYN|FN@JfaI4lQFpU%tQrrFxDs-$C7@*nh&;a4|~t(KN)0jGq( zxa1Zsll!mJ{{SZ{U4`>OHI&qhS?J|=UgcyMasKwI{_THXe-P)_AT#=0Z_#vyd4FHm z_2D01*Y*8)%=-Snuj|Ic>-zq`uNx1q>-zogY{!@N$Ri%ZH}T_)UNIZ`{=csn`u@Lg zZfhGg;g#75;dXlgMg0-PVx^ylc9 zEtYDhr~EnMo-lmFAh7WlOc|~|#4(p-Acq4UQ_%QZ`faA4Ziry(>H+M04>q|sX`-o_ zOh0d|XhMm|Hf4yt1FnH6#wJ|9pl(Kf7MRh)Mk1^C6G?o@5)x$6dj9~z-qm|kIgVBV zhb52Ij~fz_I(3+k7D&1_LGupwKA0;p5KL)nY8gz%GcUOlWwE6GM+d0sd)R8r#*8&! zdBN-O&52zMlg%WGBlcA}Pyi=L?|Jw)CU}gV=iw`3f!Ke-<;aw_YsUWoQ559T(jzZrdN) z%Qm(t?na)L*PB5z0%D21$rsw!0q(duNU`QC6fE$nV+B=o@-5Xb&9K$&`WgpU83IH@ zq~nZ#y@<6bRUAO%tLtUh*&!I9|E$@v$4(p|4`z&QjmD~3eddj$ zY@J7*032P_TUBS=c~89B6st+d&!!I-ajlY^NP&FPa%Jg{3uLlKxxG2eku*z&Vf4T9 zHo1S%E{x9QJdsq=K~ttMA1^A8AUN%j*xrJ-by`iisp+axqC(3fGLUpe-3O_(A28Iv z{lqY&a?@o=)CnQGWE)%Z?Ho{3w16uKnSfs9dSjk`7ES1Gjg(C!)X<37g1|SYJ#%o9 zy$vN~G4nh{3Xb{7=GD2DDWX*@0N~@Mdf(6X+igj6Hf3>*eKF%lQWz@%r>9tIfyna$a&wih!t^KYm=k}woKf{aw2ZnttCHA7+7MmeL<;;wD zY;?Xy<+Y&SW}xjA2^pw@Sp2yw44`Ra>N@1wZ)1`?_p1T4W z22h3vR|=!9EE>wQRLi$XnI&Z*CLDspzCIjZZ=CcIA?M3=-u!aTl=8&{BQmp(G>88H z-S}GBcVoNFBhu1Jq&ZBnAm_JU*Q=tYo{wpTT*RKjlOb#X$_HcWZ#^|k8&2OQ=OKyK zry&(^GlPOudXD1D{{Rk&iGRSY>$k16gqc4MR|;1;vZvF~j@BOE^0b3;6=^o1thkFk zoBogVwiKT*ZIe@gj$N0}nLGReHyxLATFEP5KAX7JV74+#`(^Q>pS~O$Q)&9=BrcuSq)8)=Cx? zI%A9T%E!0D>SU-uzbvBd9sKGc`vZPwaScovvUT4?^T{fhcp{nCwn?GXsZpQuOQS4^JvMOZl()iO{{LL`aq z07EA@9=X3>6qiFJQ^*e`nBy<&ZRrh7I7c+=Hw9H(_WuBdzKeSOp+PK7qK$#l3}ZTR z@T0$T}MK@X+3nGA4`_! zN-E+;a*plCl|uIT3$co#9k0nhrF<-Kg)UX7Co zg)L~;?a1a2U+rb4l_x`FXF0HRM_e-0_>gg{Kks+b!1Ut;6W}bZ=y^j}AT>uFfH`a0 zQc;9arE%N(T3vz}9zsaOWNJD5-j}ii7QPe;VBq{L3W(Auj&NB206TMeoz%a$z<65R z2m;8bB!O#Zk5@}6bssBnW(1{-ekQR+%i8hT|O|`VJGQ@pGO51p* zqN+>>&`&~eZE++jUBD;iF8DpK*(4 z9H02y_EI+0`&uy74joTbApZb6$W+BOBkGN@r@tT4%-a575KE^&hH=;r>v|fo4&tXy zhB)Bl41Si0GSs!c`3$E_k^Oh6qo`TH=O?Y(=ttuVe75@P3!fPWq48Zost&de2M;a=$Nv9fsK4MR; zvuHqqmPKtbv~HL^^1sT)s{$8ac(BTXzS;iv&Ny>SNGhyA?rneD!qO^KxyCqU+$3G9Cjf&H9Ot+u?)GC=5hS|fJ%8bG_FZKzk*!WLqBa1Gf4$=r z#VAMjSy9$wgpiz&y+%4&kM@V~!y=M;4D z%d_*y{?{#!P-JvMX>@8?8XO%Sxb(OYscIowb`2>2amdGon*RWCJN?`wDUEa%00EQh zWWhA1R9Wz@ROQt7^v}lQNz*$76$!ysI!3JcSmLu>q)2#U!M9JjknErk;FT(Q&-^V- zVx*@C0?B|#9SzcDF3Q>yE1WKH3D1Srv@=TydVDMAlbiY}W~3#mK~eObJrs`r02`G% zQ`5>GH+82y*~#vE-3*{@)f4IaxuOsP7_m@JMbi6s+)E=;g6sYq=N>lS?hK6U2w9=- z0OXV7;eJw7R~eaNj6xK7@Hk<3Vwfmq5X`yOO6oC_f!5-(PgN)weP@rS z88_gHNn=)tq)5~qOhyAAMfJP(-PF>^8ZBP%I+d7YWA*;lpJLK;xP~V8<5I}O$i7z7 z>VI1%nt0?xENV`#C-5;Lj0p#AMtJ?GP$eK}l>+0b` z${|+4lyS#WZ+rG|?UbK`Bz3)xc;I?n(A0}4_ZWWcOB_t77X$@4{+C^O0UAWcnKCsI zg1PYTbDw^A6G~AC(0xvWl22P+vqK}9grKW(%G`fLaV*VXy%`{4br;CMEn^7LC&Z^# zeB62)`bnwhs|r{Y&Oq&)bh4&lRZy;Z6@m!OQk)6va!^|N!5=Ix3o-%lAbOiVEN;Z z-(ySom^wkn^}nh1_$LI!WsHH&zCY+)w3U?bH?>t8_rPQtvVX!TZJKepVHkUyaE=Hg3814;D2k+8NT6|r?}ln$8Y#ql=T`25=PIT zry!j_3#CRG%N(%DB!$YXocN1dEkrP~11O(Ud5dyZ*@hnC`<~+|>E6{=ZWQs3KU*xlC$WNURFl%q$x9h{kEc&t_h8%p?W?|p_M;4u56pZ3=Qgsc zt8v@mk{UDaGmks5$9|WZqpscdNxt|`7@Lvpo<~0(E!V?*HM7yuOcqHK+xE|!xMSgl z>ts^s)~ihbjt}{zxh?R|Z-w*y<9FOQ23pCaLnAkwMZixG2U~kny~z&HuMiHdx^*7l z@sFpYumDst(+7)1mm=aY64bG53sP+SJEA$?0~#vDL+3mcE^*tB>2# zeXu!DI}VNGZillHwyJI&F`n3O($hvN=+Ajub+T{vOEHas;H@kk{nTJwfIze@{xN702w z?GcoMGpB$VKefbH$5Bq;EMb$CIsha77A&=pg=UI0c0b3!{x!%r+?35?GAu-Z`Jhlj@fsH%D2PqyjCeEt6bowp?E;iq@O zh+t3S+fLy`9p&z^wtEhz^35lI)9j&f|d zrS@WtBSvsW7=I0~-Cn0}pG!vG+2vRcIrX}lyWFQ!6pN|5j@RV@_fO=Q(n?x=-2nJ3%ezpmQI*kk(NK+>m?tBj; z7G%0N!%MktQd1RZ;)(TrM;f0$0BmZC*G0`zM;Lx|oQ~dt)CT(Ek`pxl*Q$P@v{VsgXK0&bV2sjopCb9-u|3F(31+T#fo zvWPy_a)bdX>z@t)7Ra{S<&W`TWjHJtu)FIEY~o@bev^8-Ri1{N$Z2Dk<30L#@p#X> z@ITJm4BdMBxIV{Hmz;k=;hvrkZ?CnhP zWtg#2Fvp(5AKvnF?6m&?wH}(DO-P+dvSg_H2MbRdO3`<11%jK4klL_QTRgQBv#}uR z&m3v+{VrmnR#U8(1Dn%VZFLp&an?l%)<$FiWA(-5>D7{P8?SN!9~%xMowDf^O$>4; zW;$`#J~z z@wG(h?(anbiXAgY5R8v6j(sdm$K}Xt8VMknCSw9lFbM;=?|P4ywmrLaEaGpxWgxU} zar%q5=AEjyYNvVOXp$7j=Y|0Kd8{|d8#Lzor^@vdl>Y#69Kn?bo34HqKH0rI3aML2 zm5>g_N8$tM4WgE=zHG#@O5x5lDL6l+yQJBxX(@G1tqGI;Fngc&zK=irOG7xf3SH7! zqeu(`P7F+PS3U50*sxGdwn$UFa-7C`*c9OH)nAURjtKAQPxTkmY3XU@K{WEF6b4X8 z$m@$2rnU*OZqv0?)YQlCDQKl0O?`p38{f%r$s3s~J)Jy*kQF>W7E~WKP*WLUG9olS zRSk}K?hW&u)8`tQYAPd;3F2T>4P0~j9N6Q=DeFPwvk_GqLs2Yf{^F|;ANaA>$GI`o z#P-Q1<9>`viRkQ=(e?d*U)N0c^x?>z@MtBNoO)i}L=tX+D>Ygm3Hj7w#_@ zjs0`Q`HbE%8~W@Co0x|2kD@wHdz_kGy)FV1k$0Fwb9g}h%O9j_xh!ON7`v8D9j@Oj zz0NNW2&yhFQND)aJ%0Nid~;-0b-{q9+H0pxaYF3A0_;t^7ttvZH(f&od`P~pwo=Ve z12Z^+IA2&;_|B1>HRfJ{{$Q!K^8ZpUGdxK>BheAS{S35{0Yhs)pK+-Y=&E{?6 z=%i+l9wkI1e@lB8%hN#|vHi4Cg6AIR5@V(hjk?=6S-xHi3qp_%h!it_JKLdGL9~A+ zP=3mermQrQt|TN3KlyR!EuBXH06|Ac5;b*kg;Rs5{BFvMo$9GpSlQhq84-J9$^QTg zsx7`<$OZO8)W_y-Ffr+i=C(zXrQ&N7N55n`*5ms*5WRT0PaxmXd#9y28RDR8iZ%Uy0oO{s~7)<_P zZ%>Y!gJHXO&2ULQLeqm%z~yw0Y)SA3wZ4?iT?nm?o;8*(eXT%0{BM#$^6uqNHUc(+ z+I6l#BiADKbsw{~$~8~8m{eox_c$%-@Ut&09}G3LA2i1ycOYBpT-sXN zw%lfS)h#OO@{pyNj~sTmY%&E#tdy-=mC^=qz<&+>SGd$W+ ztvZ^@kNdU@P;g4GnF{{BZ%HYp%=`O>2G6`zh5?KoP5Vc(ZP37iu2V`$$&B-?9;e6F z_LUJk%%+9Wk~GsKa$!`EFi7;l?RjstdYR*m)U`*u`Hz=Qf3_^09WtZ>w`8G2*GyxK zGhj?V>%GdG#21u$D^5_UD$CH!>k) zR?i8JGmgF%X+bbLJ-#oudR^E^?GqOgXBl4$=G(v9L0ZyMNVBLIO!8O!C*gZo>S}Ai zlDc-0+q__C3Oz2jq^FVIDt);$mC2a?R`}iZfap!~%}sL$fu)8qC(^DnjQSpLrP5Q= z#D(ghQ!FTd6ITz8*vD(`k zrPD~mVDk!COXk}jP@aB*#kv0YP2 zjOy|hgd3ZY8I)}j6KeVfARwQhD`STBNJ*|aBZEXZ$mLWXE zk0zZuLHxve4z?D-@-3{}=;&koDsZUhCGqCZr-kEbw%PYx;8S^`Es^c8Ye ztH3Ilm8dygKS(E__*+_iw{J?&BbALzKr)j2bLsxpMTI60QMXp?GAt1Wk>p%(a;=Vp z+g+xSw<*?DMuJ#R&_+i+`MJ>4RZF;3!!{U3qe(r1&urd-+8Q|Ztil#NFpH9N*C)p7 zmKF{3H4O4midB$>frF4Z_W|yw*N*nMe6a@TVS1^v3ZjI7^0AC#$}E=MMPJ-3-hX8X zhC{()j=1leSwpx=ptR^rQKquP!}nOfCuaPPOQrb-Yl$RiOER3cM+JD`V05*#)G*QQ zHAxI|B&WNsbxDqk>2KeBhIp6Qlh}>;B%V)BFUbW+r3Y_xs(6WMBLEgb*CVZ;n`M4R z#oKL0;k2SlQ&Ai-xWo{+^W=J-`MkdAB|h!BB|xZ=WmP}^fP##r#2vjR?ksODxDC_U)&>; zp1>dLcT}*tv>fYCEliR|79u>#Lw#q|-L%tFhAT3Al5pCGhp&ZzzhaO*JbHnRvR(2> z8SDx5uwUGz>fUIjj4&8j+kwXxzQ1>tD9n`N94l#c zjASc%S}8VoV=Ww_RMJL?P#-_7vZbV_ri!8!)M1@4bqo;S#1E~MCo&LkUout1&q+=0 zDgsWG1moexacdym_h}zg$)y>vbqp2e^?PMCO$9tpO)F&q9Pk((Jo; zJ-qV9>P&j?_bCjJo=1H>vO1GtOZ!$yW-(!v#t6=Tr>(Nu)9sR4VxbVRE6#EI?D&5VKqCc^fs=#Pv)06~cmDvlc$?gi6O)mX@x5l{43$tf zzalPisDOAM!+H98IbRXNgWE&(U=n{`_BxecN%~Pr`;0o8Qh+mOqZPpEdI;)K)M*FD zH#lh#MkM}MYotiQVqHYsuD%*p6};dM7Y5qw`*fAk30(ddJ-UGwmWDdg@b%xV>kjO_sZn`?JdY@6orQm_n7efiCo7qC$Y!`@C zN}%>o8IQzpEGo(M6~W^tHbgaANItmF>1*kWrrVnbr`ad}#) z>PygHBmk06JPW+!{szun+2cAC1*5My4N?{Xz1ZMOLxv z)rVCM*xzr7{jp zvSX&u!rs)2)0@*Odw?6c16>MD7+`(Fk>BBQF(^gDbpt;REz{D6PpcL=+kXq{H_(U| zP990pFjGd4sCHB1>tjYDh2(i+_tnk-JU1M1Z59QHfq~=Wbk7>bWS9=aBaXfn$j~%u zX?&|2Ak;phbB~3Tu`I7D1^Ph7dsz`sJkleG;+_4&lHBzsh^vo2J~0CTaA=?{Sqm*4~~THakJLL8I>`kzXD_pGY=x zZO>d)qm;&CD(cc4CO#gwG|p#O4-tcFqiJ_C`gLL`qZ)xD^BjKr$kzPLLoD#k4K$I) zKA^yjUYF4KDWEAdd5%<_LV{ONkBkz&CeYMDwr&){js=aSi8LKd0}?(xZV7duQ&ZK| z(mixhA&ro*C_#}&!=<36hupzBjE&s&Hpgk(cRj920vW_o5|S*LC0KNqcOcfC3JAYFa$@)fBL{L-FO<{ z5qrMVF$yCX4BP|oH|!frJmkkaAZOOgo<1L~(U{IjrTcgRDplJIsyxc0^B;|eDkHgC zWF&X<9(+$X^nv233H0-VPXg?#r)^({G6Vh+e_!W#oB)i|nRS+OFjS}qNF%Qo#(25SOFb-6s!bSIZ0X}sZvOzv{*5J=2PJ%mk~sY_ax&QjRI=$j5_6mOl9nyX zq{c~19^M7tE`EnasluBuCOOA2?aca}XYLB>WfKT9{|o!Q`yT6vev5Y(;TR~>DY1yq}&R7jF6Y=b&k zcyHGE-lB9$B6NTR8b@M4=Rfaa8cLdp6eF=6NoE-Sw<^uzc!~h3pthV2t!GbDB-HUn zsv-e_cVDRIzc;3@_%@SD2a6GzlO1{M>2UWR8On?}90eUsZ$4{B!owk zoM2s|P1PleS9RwRFKlD3~8b{E;9d3s#-j_u5($p1L;d74XJbZ1Q7Ir@*Z(<{i##KB!)E{XwC=fE_78pjWn*2K?Isd zC5CkizNd7olknuDqfGj)KXuELNkFRw{pOWpJ#qg43pSOhcG+pGM2h6TWI%l08NB*Z zBEG2t{aFeQKpvLSWQJ_3K!gG0VVtSB!jm=?8yTPTx40 zUMW$s2v!7v>ydbvX%b?~GnG~Ej=mli=Vq2ozM)bbERMhc*(Xn@B=cz@={m~I)~clY zar=y{Rdsg)>xPQGnUZwDK5r@dzr$~Lt>8mjsw!qI7e*9zIPOU6Yi6sGV+%tZ%ObX- zdw(HjF$dY@Ff^+il=^ZCG0uE`7agvuqAE#CObAv!L)`o z1uQZW*yjfWH?2FA8*NJa(_&3Jo;2i~+@dMywz7w5r;b_Ax{@OWRnx|OMUG}cO0dk+ z12<_&%IWmSLvDBJV%+GXnvIgADb+M!;zd#52RF{-s;ZK4t~uT%1R}EIj)v)ybRD8- z_OMzx{@`h1Bkl`$liZ8)39C1jmZ1A^=pl6+YaXYbmOkH0ST=ussdPuEg(@<~j^y+f zKF(mPj!4(7Wb zU0&NpHAu|xL((l-XZVjBQZ{&JfYn9ij1!ax&VYK4OQf_#Fv;!ecu@zqe8e#g@$Y7u zw2T;p{_5NY8loqh_+EGB2okBIlAcXka9bRaj~mj@^8Is_F|0Ex zj#>V{=YB;(unlu4RpVAU{&%>hUKyAplBG2+QCEV`^k?I3Xz3&mh`qU_13p2MkBze2 zYN%Ewf}M3Mft-CN^Yj(fJ9oOUX^-6FQRRU27fNt#4`hycX%D@C=-{bSjQWosOAC9Y zpr3B{-Y548=?Xre!>=7IK=aiC3$~QcJ-IyQ#wIps*(N1{!z+`}`gXQKhmJ!=%(Ft; zS$lY&&CT0SQ&lZ9(?p8VF()Tc)8agDq;3$?sw&7LsmGXrI85}-jRjnj%^VQYMpW_S z8gY-*UgB;j#%(ih6cT$>s9JeAz+B{iE7V0@ULuwn_#<6I14tPy{v&$Ibf%}Nki`&+ zXxH;#ocdwI3Gk3m&-KBM_~?;hn_x3?T?f}nrb9+t1| zw^*?Zbf#58LN}`Cj9!U%xDq(2A}A~-0(@953rkFd; z0dMtWCs04HrIlX;##yvGC|@Iv0Q5IFE?6Qo03(-n)%i#J-BM7^IZ09UlpbO0Wc#mA z598{6Ur8gM(&_uDeo^n;B+2Q`q}$kB*n@~T0Y=< zWOX)W4&Wei5B^9q9uild_D>B@WuVP)iS`1By2VIR5%>^;^I&g4Dio1tRuz{pXq;UtBe(P zz&;lkswJkt!i_^CQL|v&aNms*={5jK;Iw5nZK~c!hYad|haAi@qjm9r_Gog=58VW{>@FW_UN7ZvRQ<(rrz*PLe z@JEfdrQ8}-X&NBuAJwPpHffzU8No28SIEm`0rq;6wFnIj1kEG z_R1j%_TAWd6oOAYp1&K+)yo#)Gb(YXI*IB$E;wN;>7$BOu8dPFNb20>l#bq+?_;*w zv{DTS&5(QXdM%HCpxbGEs;W0~jfH(WxbQf$cWv`#uAprYOYWca!l?{CC(_F}f*BQy zTE0B89>DkhcCNaPrBnsRqv~U(#+eN_`IN8gJYRC{cB)yJ*T zNW)R>GR$>Ib&=Dhw$f9NOD2te(n1N~_Oh8lC$-9AXkP?S;hp7?h(1t%b}fdOW<_Hw zDi0k8`3%@n(llf_ZEMj$^M5covi9sW@9!yCwBRYo{&#vbgQMC~sx}Z#P6-5cJa*NB zf0fNC=AlPcrV;>O&&wPfbAmZGBwp{n{>x|)rWf+$-&<<)q|)Ue>hC z;cyL~Ry4_z*tUQLi@0t70B-9sk{KW@I7S7r=tZ)R+HbO^EmbC`BPUin zTWS7an{NAE$HDgy z{a&K>-#UEHPrXr7Q&TprMNF7%{{TMSyl;(!$;P9n(3_Vr9IFwJOU@M_oCcFS?NvZi zN?m~Z%K2A6m-M$0?zJ`4&G#-##72%V2=Mg0#8u3W2~A$Pw;)nJm-kFz$0N8~xtv&6 z0@j*Jn$@U(#3N2dGBCat^LM5{WGiXvvI>oGDIlrzBRB8LyS~k|ZVqdQy8|J^_vfb; zqvk!{hj8AdmIpD?oCFD+W2w?Q{B6`3$?n^=t$jb-y(whu(rVI7y##Pr+$fU%qr00TiEldhvM!*Ijax-zidf(U3cB9Ms z{lBqz$Jh2QKwRyNtPXB&I~$l=E;>h=Ho?X(4+Y}y&leAA(433+7`!CNo=y9pTzea^ z25#2ORLgQz;_T7uatn*bJWvJMo2!ndVqK2|u)tTfS(@%6A^z&t;Fhev^Qo`R@^RV-Gy>%Q? zLg=O#r66O5AI~=TO*wS-;fWvf9_Kz6%WPB7qjuWoiRq?@Or^*UQzwk`k!b2E7LRh0 zCH@7gMvnr&Ti@uD=XoS zol2bQ&Is-4ZT@Vari{Yz&Xp3jHKL4ZpeTdY(PQpqJ6HvtLA1$ASQ<7ZG>=#=zS zb>zt~dR~gMs+~z?+x@neh~xk_ zfj9%x`C59tuk2N)U@9kc%xXyBcn=FE34v5^^z?Nr6)cm;n8D}hBht+IZnmPbx}^K2 z71N)0NG!+D_}UMb_R4xHx$0}DGPL2O5&r-HCy|5k7{9-5((SWOkwYT0x8eHye^X$o zC6Bj&@>MS@IXr4%gOBBW*L~S*XMG4W&T-j)oxhK8lA@tST(X8@0Dj6n1(hV~le}?C zYP4fe?Sg!KEDbcydPX~HTHT(Krgm3|$K>zd$KiCPMO9l86&N|mm75Rzv&g-+=eM21 zZ87`)-mI2DJkC8lFDRP{Uum8viZwXY%zJzSBtK;&TIvG%gx3Ph$^1L^^_1K&RjJk-&o$uzB@ zzy)*1Vcz-Ge6>){^z^|Xjw68W%8~pl^ujvR(Cxk&+Pv#gq&OOfC;DEVSm@~^_MPebm(o4}UD6`S)9pVmBN5{8NkMt~yq@(BQEPI>f zw$aA-NbbsJhMH*~LnDbGvR4qrkul4kWR8gH~OUDoO+Li`&m~==`$>fS4de1 zY=vh0*Ig*7+v8mdDxzfkrw5+jrSVgG3fZRHHdH%2a+Mebyv^K&z{Y<|I-ww_Q0ba? zE1(rWSAiZkjH2IbL{cMDPLVEDs~^KS>OC&XdeAE2`-tX^f7KG_C;;hXnqpLK-U`+b z#E+=sOpueoQ`Fij*OG+E?TBf#aqYhz=Za^gy=vXRajb^6imxU#(BVchPZ)0-7k7q| zz8`Y5tG2aRz~HJ5xw7bArcY{p!lH5^_S!;A4=6l(X5Il?RqlI<8nSoQg(t(CeLXc2 zz^8ARK+%O#P6uwJkL`Gk*KDhjV<4-2WD;@oj}ebs+%(^hM{`dcl&b`k(rS$58%|U; zs9H*P+!m>%BS0BY*z*q$L)OIDfA<2_G-aNnCrXA>?TlpSC#})EC$TJba>!!S0#UkthBm z{{TYTM;E&oLqFT1<5r?ce18e`wB#)Lh0#!@V^>nXjBa=m{8;OE zm^~od>7kNZs#hM|fMG%psAnABk7WC5YG;;lH7!|>0prNk@fpRgq2A<{q$Ciolijh; z9r?xW>FX#Y!d5g9H({&0S)mrZnlrGyMO=AOzUB#I>*~aM;{JqF2;?E#jwEm}z>h;` zk7kN`nPY*Y!jq`Ck%vtJ(YEzSw1P zY->|hw#emEf=^<)LGZc8kt9WlSYpn5+TXNu4!NUBiNMN6hf5ERdfsPw{kQBzBdWn0 z5IwZ>7XJVXc>3KY%af!t(g+el(awjms)Bwbp0>KXaf&!_lz1k6hVMNk)aI`#Q-MY&=>5 zvkplhcgVGkx(Fwdcnj%L2bgCW&tQM8!kQ_4=!tPIpmQH^G1m3!v~@8ED&Sa#f7OQ! zPlEyJd7)A>bldCHOgpmjTO;b?+AOL{Ndt@#mQmBf&#Buel*Y8aRgHV&miXKfC@Gin zjt&5^Jth}mC8U_)dy+BnHs@^GuiG)ecRk0h8mx(E`Pf#w2GSf`au~!^=i>OHhgNYa)iM<@ukxicB(ADj2A1Vp7 zA!OAR2ZfP{_}wj4DNQCu{Z08S1$*kXZxaqzHjInR)2C?_hoN3YofhjNAxP9jPj??t zv@J6Z`M;5lb8^ZZ6cWPAtCPt1-J5+xl*GnPRNz{Losow&eKfJV9FcIwYN3-@&U1ss z>h`L^GML-A>+5)GN7CRgQ+kR>%Cg8fAl#<(RA|{7Rt21ts$S;9gIrXvHG3{fmolj3Jiu!VK{{Ua5 zwxZhg0A>Tf)1LP{keX~=OSdc2tg*&*XFA!2Px*1hquk-sBL~E+e8WdP{{X%8IccVJ z3SDD~doDA;{`Z-wr`psP_jS2qxek1!bISA=N|2F)tJ(x@1IHGe5KAwp@A`af)`n`5 z#xlq_K0)R!Zs_BKa*kNdO4G;-X;O2*?Vfne=69GkEw-prtfoQV+)fndp!GJJCaVVf zd(M(7RKU&@{cJl6&~8sdFoRrjNhwq8!k&ZkHZ3mK9YqyCzg4bXTqH%gJ|{he(YVys zyopl=SxYKeWK{t)CJ%l%x>Xe=(vgs)^XZRU9$E^C z(n_=>#)n8z&OSFrY$W@$#LFc5#&Sxi%Bb|Wv|Bt?alq7ai6cz=h4_5`0El{CW(Adk zg~VX_SdLAxruV3(VH0Xe0R=e)zBhrkG?V3B#UggXOAun(LG7NR($Z|t_Y#9EDFD8n zppKWV+V@x5;W4wxB&6)j{94~>K6UZN})j>gB3(6({BQa&iCmxr57Vq{3c4~p`=_EzdBc-Kp znEv&{=1O&A&mF8Eu{JksW|w1}%CzX?8bD$IV0-?jrL*0)J-Ve!O$n!& z#~}u`AFmgl`=}?U66usQgf~*Z_u2noSs)AYCTB=V!q&%r)rB=Buh>7KPx=)Wh|lo@+nKMfLjMX7YdESzipzHDp7{c9n}@P_Zd0GvY?WJB2z}Q9z-RW_wn`c zw#!K!baRpJMq8JSdpG+nQ-5 zW%32?t-;TwxQlFhX(%bx@}#rrJq|lvaltN%YBuUwG%<|(oN{{O*2w*xBw$OZ6WA*9 zK>aR(K~pZE&gFxgo{BSXl9HfOsHbo{2a?d+Gw%`iwMfKcoSuU1sM^Yes?4h!{RdIf zPxx%z!kT0eMV=LETRiVsli~)di>TbzksgZHQj`}qV%*5U^3+fRo$DDFI-KDA=uark0=JJdv zx>B2PnwYn{OWj^cNzlS2Lhp)0Y>P=Wd8uQ zrpwJUl(el_tn)N<9*=+t_#3K9DqWq6ql(ES9J3todgq&IUAE53KH$W#uL*()bJG|- ztprpL6o=j@tJ9O5UPy+pKW(V?oK!&)!zvuMo>or`e(P;6{M8cal9^GTfOG-g)I(Gt ze_{Yx4?Vdp;dfJpt1fCJRdmjdJC9F|)(9*1w)eG1KHl0ShEP=>P6SH*bpm+@h955<$iSEZdCwP+X@R~Z-lN z^(1US4f@Ol2%po(hR%Hy9Ukz2Rz^ixG8MGN6T)D-7g^<^DNyVAAjYNMJID-@$hTy|5_7F5Xu zsHd)_RlqtXIML&CiOT4PShRXEP&4}DH>unvmKc*tsaGMpWxq?z?rl{h!Q{zN+x#~4 z!EpOAG_VjISwRGL8MF4->8K=FH^V(Zb&IIUO>?7|pxe(c_TDWQ5bF)vduG^kJU9 zwqEtLB}F!~EN_wuk?GU)xuvFT#E9OerD4)!jJC6mIQ1sVs+N|7y9t4OVN*U}NBdre zqiT8`x;ls^Q!rdfr}}ewjkbvHHQ20xu1*8vzdgnCKR(W8yBO?Yw}_^Z)X32Dtmpo- zdKyygPCe+!$v8d&^ioYh^2Iozr-&bO1&AJQMW);;ATI)zWsD5zUY6xxg5fdENc76B zBL>u|`BZwgdRkw#sivRZm;w(0WBOEom!-WF6-?Tx4wcdy8RgXa@nTf&5pB{e6GT|I z`iGp417|^G&DP4RK5>-CL4ljpRBsWTnPNE&qXE5qI^0E8)hjA{i4=X?+)>BtepN-c zgZDyY`!F;BysI~;7R^Y=)NWy@)z3b|0fYI8>@748xK!>zC93;=H6o}Ssp;c-`iW?! zj!2-R_mT1E?a!#ViVAdiCy{hKX8@jX4~>r2gW_cBqIPEce|A`-B)pg^3G4B@Cfv-S zVTJ~gSwPDSbK#rPSHT zlgJ(M>1NW;Jw;&=;gl~5IXV9T7p0#T-|j$UVdhTfuj^|>b8XEfT{Ma_^)5fX&(S*= z9Z^Umoml?>3b$|2Si6N|!NFZ@MpatMnpvwMS8}@eIUS9=flXv8{{U=~lo)2?D~`9P z6B4r(6+G1rlFnFyKxOTP@V9j|+hB_@O)Nxq87H5Cv-Hu-+E#rmJE

2yy-h(TE+ zbDqbK?`7$r%NVvI@G?szWsy0_?B5*Q#g>{}G-LrF)Cb1MNE)t{qmg~jf$3`F+GAx! zq?CzQl?;5p3lm6+FOp3S?GyNHJjVk+&g}POrYrWIXt~zMw>|i^>-Q2fK_bWQ4i7$` ztb(gVR@A zEIc%^%}P@zAjE#I{{W{KIuTKuW)Rhg*|0LXJ%9({;b+S&RdA94FS?NGWnj3%{Ce2) z(W4*7K5m%r{O-6aKWEP#58)S3Mlm+EW?-g&Uik8N}1>z{>`uch>mx%_CQ&V!$BOcU@K;{B6$ zrJ6t{r<$3FAU1Hn%OAS#rlpOP%9-!mKiX3go#QdrT5;LaW_>|ZfbZ32zu zl1LG|=@=|?=m$LBGL1GTfptqFk$SctrRLe# zYK&CdN>oIuQ%;!B_rU5!;HmeDgD1Np30L?rfIKdU>g22oPf;^N9socO2hdoxkk;)Z z&8AqG@clj)xUh@NKC^IG>V;J-Qb(_rix~mER`#0>YHu?Ho+BZ_AV}H6bmP+0?Y}o| zaP5KyqYqL!*292*eXfhYyW=p;+eTSn+tQ2;dF{-9r;S zRA3D1I&tfpqA0_TScB5crG{9f5`PbvdgNX%WM>}RKDiN?N2K+$)-SixuTh2Yy+c6^ zhMuR}6D;wAA|3#2cg?M>*iBcSl2t9AEC;w3&1@x4N6^=#4y*4ouWa?j>6X4KSiShL z?WB+x_r88MeawQmb*r=)?B2y-SxC9PevD@K$if1~W!M2U!0YcR^ zGHEi%RE%RDJ~pOF;i;)Htfj<^bVv?DcoB2m_IO=kql?~%E3?SVJfqV|wRAM}^imk5 zSrs+sS5_JC`rPnAu+dgUPMdaqCdIUpxJhr zYDB9qw;^93W0hl!_Ud|D2q=;L!8Gs(iHQoMPC)IPTT+Ud67N*r)R4MN)P_PO$Ua^? zE>H+v54lpaF>L#*;p=5@@=))TvC_oL9ZW|jQXFXO-`4Wf&{1u%Nlva@;N?I+($~gV zZtBuaTqLHa`=o^w$YcDx4o*Ad+F7ah9l^x3qgIyx06_jz<6}{86YY|@rIDB3$Z;p| z^c>!+Z26mSf-=<@rjVa8L6F`j9yUanm~}OG%r8oGz^WI^z|_M%zz5gX(Merdx6B%_ zHmvGU`4|JBy{sQJZAnh@MJBe-m;KGFY3ROS%$tgq&uMgd zmH{3UOKLt@2Y~2oeYy&r!cim6qCw}<>!c{~W5zFD$qbQ{f;Cv7Yzz+L)sKzcJuEwo zI_4!Rykb15Zw@<)ap<+`C9^iQTY66{O6FM)48d}URO18n?RU||1tE4W%w#!c$2@vk z+PXswtr|eEfyNXN+zffVZG{CpnD9;xa07L*uIb}Iw z$D#1G#*CXru9~P+?UTnG%&iy;jDeiqb!aE0%f~0UPh>qUooy{9#-Tl_C1wG&qj%4! zu`4F1r=~Ryvcn3fV(KzQuk2EwWAd$bi0gz)Qdzv{7oNO$0d4lEYE4XuB^0s8E&*2P zM?h@a4bG@VOwl~WWL7ATfFxo%{{Sn_)cng^C1dJVUu@ygGm)Dv7heqq^RUAmD>tza zLCMy`QXf(3ZyMWn+RA;kZG3s$_4U9O!FLDTrNL<(6zbCc9=6E5vw5^a0`cuC{v&vU z;bZSkf7p&HaYpdOwJ#I7$kIp|^|$mCRIy7dO5XF!oEZ+M&N)2ZcX6WKYo`-5Uer+b zaCDOx`2K8hZg#qNV3WBaSIZH0^ArB}+2;B>CX0}JA);1n1|(M)Eg?V_JKQg}YMYgIKV0UgD6sB5LV;Ue*bEi1|R%IM8rE@ukRx;%2RpXx8zrBUHQ0^5K5_@%c zVcICqVd0PKe2FJVJDyC@(`<88K_{_Oq!2y$q$Pm|xgLh)QMcFZlwq=!RQwgd^MHRZ z!}nVM0NDQkG;A@awJDySt~GfZ86fx4PjPar-SAG%PgzFnYB|J)Sta1x@=GoWn=`^xQ_mZJgk`VFVptf|i zlyTD-swRI1l>r=j8>QwRd~h0@YTjeh9Xl~_b0P-RG@i_J>wQb*-MxelCgiHJc0t%I zW8>EO{{YMT#8q{laFuCI?T@Xw))$Om-pYUdLfBR&S)W+|gQ+y=}8_A5X^3krH}HWdb#j1LbZ; zu8P;VVN4ZLa6p_8t;P-W+2q`la(Z-t1=JN#j_aSOd_~g*yje=^+l-3FSt4|mPvI-! z6#D5M$AyKq{I9n>j`fmZ)aNLCxc+_bUn~^jxBmc;v9Mon+5__*rsX|8$G0!FS5#J3 zC5ausTWJQCnVw3mw`qz|9 za3wuWGq#U$@Y(Pp+?nX3tXL$cfno#8DuMN5x5GSJNT!jFj*E7ncaABIOfoqHvwaKO z%byD|;rV}Ro*5RRX&9fxSC1qs?qZy>yznvr%rEK(al(<+P@v6Y0_|i6+hEDy8wG$bt;T4u2c5Woaoc$$Q>{` z+by$xq}+D%w$;LEW0(<7AURx~;d5tvyYi@^e>CQ%MJ!QY&mNZP&tqf!U?b1715NWI zR&H^tA1b>fUG0M=>%T2?3PB~xG7M^XH_wXgAMWh)Tv%+`h z9rK&kZ*>*0(kzcfs}1E&{^9=s7ofGfbn{08Pf+0uovD+AkJFx+y_gG=Ml*ByBtpWV z)esktY5}R6p5n)%*<)GDEY%W6(X?qQe}#cP?^yK z4j}hAWjN{lBi7u|MPT$4e$iCOsONjLnJjb?2YliT-U5#jK39TG0I}qxqX87--u&%nN+?pbPhXeW93lme4{{Y0Ke=&LbxlDpH+fGOGIlWHKkk`ZPO8O27R_l@5 z7RQEVBdOcuGJav6*QYdcZZz@B5CTxXU{62oc72UKFSskm37;-;oc1i)4ju+n?NzVyLKb+DQgKt^Hcl(vzt@#puh9M}{vsT~|FMgWZiL zQ_8XGJbZ249{&LMFneMZ1YqLpNPP=N-~$o^sq|rSeeu-!qSU>3vT7>~itkTOji(`l z<12YN_er%Ck{}15#}>?9=jvM;l#iM_v*-hCgSrmD)K=12l zp;2h0`j-akFptZ2NfVMt>DK5z{{W}*v|?V{oMd*nc~i^CCgg^1OM~myGwE=~G@+EM zfDcRQv8T699!@fu&%kE+eF*da04wRW$JnN#XZi|ZzB$MCxt^GxW8I5)G)R5N1~R~7 zjQ*Xuy_;98>6&+q@&}anIq8exbGjKJXvhi}9VfPYLAOxubIVv3CJ}pT0V6td>&NoG zm0bbj*|DtMnwL`hKF*`&^7Zhydpw9@_ar>@IqmvhNw+lIqev&qFl=RUgM<3pI5!G9 z7+AS7C_ErMk=x;XmAr$>Ne}l5m32PUormat7mlo`Suv(0@0RlLemJ%JkInQ4$xlKF z{wXp5J-NZV>E2qJVJome!~HSbcldGX&s()v3)c{WdI`%mi6MtycVmVe{+&H9U$*_c za4S{R!B?OVioT<^bIs8Ebu_TMD|_ZYsg61L+G3SiEqx@l2!vk1TxX%Wq}wYdi2bEt3ZouZ%AxeYw8wDWHi}S{d6G3GnKf$X)SvBlEIjs(fToek ztH&4tRdRiC$tT3x>YtZ=omD7SRBUJilE2HntlKvA-YE@S^1M+IsA@RWJS~>nS3yNU z?=^Txk@T)kTls$bCnGv5+@gi3;;3khPZI@Vz>E)v@31RrMF!sW`u)Q&;gjfZi0#Gc z>guVg=|q0?GsQCbINeVlU=~jx3#foPT&BjTK@DDsBLPDzpgfMI+)>tu z8cJ4FiaAFFay1{~?}K_8EvIgtnZjsVi0~9S(sTOSb)kpuB!IF!;By$k8jnp~vt{Lj zPruNYB%yv){0_)BMo} z>=qddQ#d+;cRXHJOJgneC|`Eb2b6$;k3szUUHhs@I22Uzv4sHDCOFSql}7Kh%@b71 zsU&nrvH;cP7|%X7nv;9nA&jc%_L-O}u*uR-)pw?rXR5aJ*NsQYaQW;-_~xBJNaM<& z=Zj}LOCRFMz@(p;azCYu1Is3)ibxYYCcI_78MpK@MrG;grv*$-ynDt7POpM`-*xb6@`?M*`Rfx@xq zZkdKjT~8WKsy%WMjz=7J=N3gQ+k5R&txZC#q!OusIrJ8yRH&)MqB9gka)-Wds!B?E z3K@WQEJmLTX^#OWQglZPh7aGv!qrLg%1HkJb3IGP7{F&$)IjtDH>H_H9Ylp87*oc3 z5&Mna)kr93nNBo=g~mp&h3rCTp3NW2?2*M1)K#`f4C|vK)RHaQ)a^9V0FlWClja%8 z{#TQutD~McU)^YCPF*zN81&LdZ-w0=&lOOV$sCd&GV1M&b~(7xF^)HJCg6rmtyV1X zOpT{F{Kq2I#kB4vQKofQPJQiILGZPK_Zo@(fUqr+SAlBPJ<+s?U{ zj^EPLEy*mFfX53YwF}EP9&wIw>uF}(Wu%SH8Zm-&gQ?!I3JNm~aqxIrO)N0)Mxt zmr;pX;_4Tux+hn1r-W006W8?XWeQ5Tyz1!SoU)7t$E#gWRU}etN?&qC`U~=xEXB23 zMO8~hHewnO00scS@B_Vv?47k*sfx)m=Yi;a1<(Hg;#KpcuErL|G$+u5#9O_zQADz( zU!GCUF_6RK>3r_glHLhp{{Xp#nDOp+8TGl+Lr;k#aOelp>c^jmvu2NORx+%-6aJc< z>HRLqWTaJQgcT>dw?ERx{nnDbSx4Gah#TzROr6=LZa^Ir8}g7=Ivp<@x_IXqziMHI zNszKTjX549_2S1707#E9bJM@;eGyuMr=c`7&m}`Uj0m#%9gp~K*&=NFgGy@iY3qMe z)*3o_U}qVS{atys71avs2&RT9#+BCEuumM@(FvoXtEl&8c_JKnV;If{e0bbssgEAu zkJ<_Xk&YaWzpq>O*hw6;p6w|NG^&C?_@5he^mLUFyU!*7zyNjex<(fLz26d>vSEZKsjs3L7h2b9~2x*GZ|wQ42ye;9!n7%nsGbRlxoH6+F0 zE6BkIk_V-YE3B`rBg~)T>}+ZsO3o)&0kh`EIrSFhDrut{8yd&bdYjC^#+nkUWN;=p z^8Q8mn=(4;SYm2;sFm9}d36Jy5yuywl8&ZXUO?Vb#DQ#SDcmD#Qkfi_cNVb9fcIJo zNi;f>l5-;RSNVUnn1X736I0YeB2W~jaNzpsxWzn&&akTlKbx`ex>+KdT%OTMU9+W6 z;R}XSqDxO-T~g9C;f^wjrzGRByHzHPhp5BL*<$FvBzoPR%Tbu7iJCcBcg9qD3r}MxMNp((NQ*&BJd>3d8tt z{vVWTZ^M7nsj;KpCaIt8BZfTo)Cb}GF8V6*SOBYdy{Occ@`B;dS=I7)Grw(N>)ir!DOqYX1Na7UnJEX{9J(p)9z<=bzW(@w}e#L-ylYn3a)$ z)=$&w*zvsNP)jXJ#}i6p4a|V>ZW=cec`ofsSy=x7g&`zzWcp+A&Dj-I5zJDM$N0E7 zyah#E)HSk2=8`8~K|fW!+N1l*cAh$C3r>94IbYSf{4P|J#M!L%iefZR?nb8!1~aG7 zcCspBtgnLtmo5*~#dZF-i;j3|M2shqa&w_LIUVtI$-8~LA!)@!$fr88Zt%@u;(e+e zLMoIiF+5|p!2B(40<0=qwtJ5C0eGcg7@0^s zxhI4J_(h#pQ%piEbmf;ENd1q97AvmFD4L3isrAHxWdt(n~59TsE>u(gE}q zWn8c`P60mRj40{H@36%omLR_DXCoyS>Obc6HcmzEiY2O6sc8f`CGnLWy=|810zzR( z&%3TL&yGT8$F-QTduuF>8mdQ)IKgJ)tK0+7Vf0O7fI z2P=-F;tl&Tk6*P3029-XH|cDJVy!$=dts0tqQEx&;9&9NY@c$9Bkl;b5q$r35b*2KdF8&NvkR) zFi3#1YvVo`@w}dglykTDk8)4KdE{~~KG?=?*qh41k8#D+uZ|7n7)?pw$g^P@NIBJxhQ&PDk?@xB z66Xr54!M2BQn~Mr*|7_=4s4lN8yUWwUcj^PuH+5I{klbyQ;YXrZ|q;4t&FfPuDH6% z!M}6Id;22si0bgWmOVo^R-tneUNPpI`xjxs8Mx$LG4?JxIk{&2&7tY&j?Si++VAH0 z-E@r-NdE6C$;W}Y6;gb?taQNAEtstRun(nx@k~1_ABC2ba>~JD;G1j~9lAYFGf3SK z4m0Cfk9jZ~l*a8GdZ8r;MdMaK)Yrh{mjR z=G9AGQ&xS+swI^F0F2}hfVQ0}3r+O%ewsF3_`GDr$~m$E8!b>_urfzC z2*#z!;ODjA(p0eLt=u9AGDx{4B+?wI7_+H%I+~K$A&QG-;3O&UHjv>>of|m>HZZ=H z4AiejIY|Z?dyh+Fw%eC}VCSzI+cckhO^vMi8FVunc~e$uccXB=~LNf(-S_uT0BRnVj>ft6E? z{4E7`<+wue)3#ttj1}PN?VIOxHAW&49htMtgCvX_+0)d?xx!(dHe(cIiirlF(!;Mw za%-X4J8N{*9}bhLLN03B#btFZ#S|0(3>?iL^C7QNbmS)4srnQ zN5I-`h@>RxjOrskqu18wM@F&F z34zpW4(HT(8{KJKhufVl)41+%Mrq7ZvgDAs$?cC!T3HU=9JM=nq{}N6LaVP(&j;4@ zNPgoVe>y~}e2kz6#nHLG)wV_|X{ProUAU9Gf8rKZ@SQMCu-8!*d18_ZcR2*9$NVhS z+#=eb4kQwZe37noWxIC1nX+!bx3K>=JJui1py;FUZYXrW`spdKQN%6d#-!9WLqpea-Wnu0h z1ZdQGKc&0eep;uenv$A1i^n4UW4f^Dd_66GFikSqH!65fzfr2B%zqT4Ll1Ln2V;RGETOXIIYBsrL zdX+G?3Ngk;e10~{HmIlfYU3@gppzVBO7MLTtC*g#Dy0y#_0vNWAH|PXm~+pK?RLMHcB+(lXzDjA+AdY@F0_j4Z zZzW39Hm^JA)1UZRm1t+Aj*TIVL{7GpV_tjlVRC>cjKOa8jYU$8EY62cAZXiGjDem8 zF1j7+mS|;ysU2!u7W>UVTDM|Xwr^7}%G+H`!Q0<;RlyQ5Dm+D}-061n0;ChhC#J~L z`HX+R!t|{=CbDn8VX4s zDP8r(r!CQcTOxS2%9$1((Py?V$-P^LyTbc&6aBphK4xAZ6^qIVC40-n?Umd8Yz|V za!U6$k9n;7T8dymm05;*6vta?Hp-M>4QSPXcwj(2hduFSie>hiI)hDBf!ruoTy?wA zrYNY}DJm;gx^h?mqz-T}0O!K(lW=5n6+9@B{{R;RH;z4QjmEx~DnU*oZ^g)Yv>hV@ zfu5JoHcH7&bwoy7qU!*Y>FIf`8(M8*mPDDPB?Ia5`9R3@0{);BLQj8AnIwD`WnY$EB&0c>CJ9 zpHd-Xt6Ls9>BkmSwT~JKnwkCBVp7Fp7|uH=$5Up0zwkFDg%WL2Qqz6KR8eV)LVpQ7 z#Akw6_g6h4_KU+S7d77>n=b4ukASy)Ms9wPH*6538k}A_A z?m$%r7ws9(Kzn>{K{_2j?0QL(JVhfp()y#oUj*jJprNFADkW;E6;a6$t9>}_j=bHQ zx7+SJa4YS{QwhW9$DPMH9~(mj1x*z#JHELZSIpW$<$Va}99*_fF#1iyYN#xWRU~Y! zr`whKgVQ&yhI)49X*De(dxYa9RCedRppo;TxZ9%>}Q6Gm@RB*QRY&qsNh7yy8w9aYbfJ-v5NYWP(i{4 z&J}sb(|4y#*_~-#DD}lXQnXkg>LqoKgmoFS==T@g=}jF{EKXZiw&NqvdfQ5hItWr# z{8emu8NPyleXkT!MG%?e6%!B!2V`GS;x5wBx1yOD7Wo@2bjF@Bhc0oZOnf=KE79&X zwJgSo;gyDwpP&KW+3efe?K3qj#lDg>WRtJAW75RX3X~Xj3Z2SddRb72v!Mag!0dC} z-Sl+3bt(5}_a>18f8hOYREBvs3>l0u7`?_*+<1X{AyGjzar;ss&kY=jg3HwBrhPA9 zECqU)swl+pp&(}f_u$@Je2^l=EHDWSpHN%ZN0}j<#}<$q5y#UX@w5*zYx+I1572sg z+*M-*E%GyT^3*RfQu||*o^zg`(%Dc>XPD(kP;t$i^0ebfAbdO6(Z}o%0KwJIjmvs> z`TRE9cH)r|r;ac@ZLamWtu+`3a%S>_=s(`^hE#=A=aw0@wUv)CMJLLfj%{-2I_xg4 zwqOw%$p^CL#*=Y+Sjvy7xE>acvaeNKJn@UFYOQi)-j)-Iuh`j9u_J1C86KWFwHxtR z10sw9#enoC>!y{VrFxOhll?s|JGnJ9kxkT$-EtNP94?Wb`MplceW?L%e42&iW#&jn z@m|7~SmKa3NaG9fHu)K&N4V`zxNS=`yh@HpVcnCaeQ|O5b8#}NQdH?=v-48@!Wh{6%j2N;h>tx0wbIuK@kQE+L zZ8qJ$uC2U_{P?tCwJj>Mg1zsd%p;R!_Zd~%0mer;K9`@{_E}*%-?Hbg?#1aTG&p)( z{{X1GczD?V0Ao3Ybgk{hsOTqCDbLmDF3L^zb*B``7}RmxWSpM%(|FnEqmg~=yeN3a zGmk-P_VSA`m?se&;E&~I78||NN7^>ZStAN1kp{3wQZFM!XRVH1O^E>|Motg=UzNL* zwcqNJ!&>8>00S3wGl4IVX#iyZ0KJL*EwAcPMYlavN8E+nwH_BGpq%9I;^HMf!$590D#H)_K15O^h5xx9h~Q#LO@Q>WJ-;?YN~BgIh)9`oU)v926`LIdwZygc9Nj0uPYEB z4tfi|EC&W}Dcf?OrqLAe2-iP^_+^lur1ZR|=Au07 zMX7O;ugJTj-z2E=OH5-Yzc^f8er!DXv8LSeEWd{<#7H24+59{$pfkfuJgb8aaOmD( z=k>J+@Kv-+Pwp8(z>Xo1Pxy`OwoSKnigsFHBLFx5^6)?$6KEL^8FyHTBua2aCx5*md2fWg!#(sD`g7dTm`8y?v`IIVz|Rb0BaDo?=LZ!GF% z{{TrxY@7p+-C&x>r`t^vR2a#1Ve#?sv~{9T0?F^nfDbw!An?W3GbC!Ho`loKN{*$R zd8Fi`c%MUgJ<<=^sO4R1bHYDPdW>0nX3tYGDyV(AeAzw1+0}b5v+b-?Ek+L!^#<(* zUjl2G;?2glaeox)h5 z6ydUm>ln#5X^bupnW>_MW0zj608W$r>^i;9DwmAK!B<+|Optb^LA z0Irw8EPun{dD%YJpF~Cm4jghv8akVLVZ|Qhu4eL&Hcom1F5lXP3D&te+Dql}ZM_gE&e8{MipR6FTdM{}RtZpJj*pxt->0C(CR=tah&LX7^s?Aq!#ql4^a zL9JI@htyiiPrQrUik`AcMyYV_h&Th&HtFZ~+E|!2=QM*$N?S=q^v&kXC2FLGIK(ic z{{V@x$Br+?nmHvyD*}HuJD-KqR+JR!p!=~%Na0SsJ_hAe6%_CyQY7Yc&$!ElJ_6Q= zJ*ANc;C>W+GsaDg6cuyK=`5=pZl^7S&4IOj;i!=(^CU=cr1Dr^aH}ho}yDw^9W;E8w!qaLLlv=T8eh7ML{$x8Ykt(OAi}lHyEm% z1*Xn8Icxw2@3FKlC?8FCyd7qd5dQ$E{D3?$ZK*5dgcyML%!dME{1fSpmMhiDb*52Z z(S}kp+oo+w!#trx5VA&31xfyQN<`%vWq7GpSn8Ea7Rxf7S^?-hMVSRfEhIno`IlE$ z!0&jP%6f`9L{OKNh}9t{wthF}-iAh%sx|pYQ^~;iSrCmpuv18k(`ror0AGcX4%JN+ zF_xK`-v=T41Jcw0(bYzgNcC{54=Lv((#Wbat!aUEMI3`HtImLYd@h7o^iUWn+{oW^ zEB-NFBUSzd$A683ya%C=6wjys0JO$`Ky1gLT45Y(Gq^9Ux|R51>#b@ksTKfPnL#Dk zaxF6%Rovc@7Es09pE7m=)m41QTlXwPXaHFuJ-D#cCO0N7n{u8R$-(m;K^+gJx1!x@jI#S!ymay)VIg8Ubv-{zabC*^I#n0$QA&j1 zWc1D2Tt)B+>Eo+}I{CT%Y^GSKs9H3QSihTAeCT~Ka;Iom$4JB|Ba#~oE;TYzu=b&e zi9_}0QyhAWvkNII>l4~i+L4c(xC8;88*@5PN#uo#J9zL9g^s~1;#j?-?heH~qyGRE zfh9bYnwJC>8OKs?(8#4U($3Hz)gqps!|VE6Pj0HK6w=gV*yl@gpG;cX8po!0k-rXd zFuhOfZp=|l0x_F1H=MGLLH-+Lumy~|IB9hq9#pB8MDD(i0#GYzijHQNc5Lc z%K&kM>wZ=4l@nFRsI*Za#z%G@hTOX(btD+Xk~7ICY>%aFRcv)pAyCA|-3KP$P}L-~ zGdVg}91j}?G%B9*%n$f@Iq=6f+aC^qN_b;v5z;fNBg}m2bL;D5(rz_dbxE1x){=Ql zKxYf%W=A2YPjWNu$h;Gb5n*bONbb321Cwr%hLsV=x-Mi!NSygcBTt2-uaIn%;)0St zb6e|H#$8W?cm8)a+$->*_)PWizmDr4+>aqLa50V#>tn+MOSGowsU0f2PN*5r_`Nyl zZli;K*yzm+>SBg$wvN&wY>>;GAC50F`=nGApZ6Y6{{YXM=W-_@&~A~^$vr%X3Y>sGHte`jLAPq^5h|6`-1*x8 zX5lqNKWY}3#;+jZ)Onl0_r=k@ZM5KFCI<|81RX-_+4lLQ%0nb8gTa`9Gx51*+!)e8 z=}dj&j;*P)ocwN)+22|Uz9a;5U3ETR%6SY zVEi*`QGAQ2=|wS;DUaZ~V5<(kqte%!tm2jh$WB3GbN$7gRaG1R0MADC80Y!e2`WOu zMU75AqtuVm%DO9(4vIPnV$-;gAEup0t)l=kdol8$`C>f(04}zC^~(%qR4yOaNI%>W zY8jeHKID>I{$4r#)^3fYt14rT21r}ln?16`Fdq_e>u}rOD6QP7Cl;o6CpM zxCC|0krv-w9*N#WP{UT0ZsXS$Dw5LC$!dzZ9X*2=84~H>4~VlPf(T({k~W?CNJ-=4 zVbJcCH5!Amswb6L3=EDrxqjAjFSjD2Mxf=B83WXd9O@noqo!G42~5n7okyG>=VjGy zKW`X@RLQ^$#1_Z{#?!&MZL20@ZDjZ4jx6$MUPWMDWpVUAcw^O1rLB5oi0&;=D3*FH z1QEyuLu8JFziY+QO}Iwt(o?$va#*M!4EH2@UZ$owVX1kdLmXv)#0E!jIUw4f+Z8dv zGs>P$Q|UduK(l6vf$o(_y7^aF_PBDqYVa0}ow1t@9LXwYocT!}fN`7EZPfJj(a6xg zLjYZIjC?F<{#~9h^8%RYz1aSPwU!j3c^uj!4&n)xNhD?MtdnUf<9b=nGP&eqj`vg( zv<;Qw?yH>C6gjYq2*F)FJc2}FsCFb~*;jnax^6Ig6HEAK9#t3|ezus$i;jyVu7b+w zjxWn31OuLKx6(UZ1VwneV~bjop4VAyi}RbqF3`ufOM?PA#m*+cH*_$XjNp$b&o_k8 zO17>p^4MHm44MtcRmbdHOBD+PaFKCk#z0${g=85mWEvNZHxu6HB7|&rxMJ}~*X7F> z<*}Y(=An9*5He+vFmMCU)ky8!#TNVmWjP=bj$3=kofbyJ9sUXMFe%nOEAlJ^|ZB9 zO(y5v8pK=1r5eLEM`ZNOohJ>DlGU^2#c|>DXE12086MohTl!K%Cv|PvfrEj z7FNk%G;Sb}$jlp1E2jJ?CvPrrPfMi3JRC^`gT#w%v{Fk{LLxS@{{YEtZUmQy(NTyb z!EEGl;b&5Avh9^Fi~B?8d=B6f}z`y12j`gNwdORdz>;k~y^Sa=HC1PthF` z^vlIcpD>xGMAazIcE+GTht%71PeWTr5fniug@kU|;{)>>Z&?wlr2-lONKoZwQ-}Ci zbROg~1=OwEP+rV_7tLPS%2M>HrJ~zE4F|f+0cS!;L+H08+Ndg`S#=kAGD2s~pHp`3 zJ31W663G^O;A&i-?PqPDD$FALr>!6k!yNvXYtv4NMG?|Pq8fT?B9fzOd5!|&*l$$R z*FzON2kpvvq-=m69R@CCB9FaWw}Acj);+$DaEbk=k=+v@GZ2U1Ejl9F5p8=t2&tx% zQwdPzx`z1hcsIH+Qkr_dvWAvfoOz3YbmP+U(A6a&R80!ff`pz}dW$9+s+wkFPaJv> zVVfR*9Qat@r)LMF?5czCD`8rdMT1dS3Yt}T=xpzqA%eBiDtVP*QRLDxGl8CdHkWg+ zsh%ZvRm-;u2LVUsAHLY_)g_u0IzqMs>H_IoB{nJ>Dcj?cZBM69Vsfl(I%Wnsty2oR%Y@>1LYHlR~|aL%vf^ z)p4@0Z>&B7raLJA0IqF3TND+N&Fs`Sw-;dj+s7WZ>K&Sj5CNV+j^P2p_zTffR{Xt4 zp4@Rt7z~zW<(tRW%&QS%jkcC$rZ7{xMIiG6IBU+3n;b^!VA8={$`1 z`I~)eiC&sxPXXWwf%2d5^I}p#yj8iG>q{1YIRgNX)6(p%l_sbZ$d2w#03!oA>x9E4xu9ydBQLcOPLqudgBJe zv6;6t_mJhru0|A#)zisI8$x4ej<4Lpgv!MtXBcjV$KQT!+K!fjm@Kf30go6M>+61P zQzrZ=ngt4AEc&`<<8@5u^)|bNFiA%EKx#M{9rKUvV^h%`wiu4HG_vDY93VcPBJm$G z?p0fLWGhut5hPwTFw~>?jy*eK&)U9ghNhZGwKUpQl_iY%z9zuVTJ#u|*wwO*yfXP?7*G%)p1|X9P zi=&0{&*^u?_wTP;;B9U4OmV;BS;el7miRXWs4p<-CfPQU8N zGu(emUqMlWhh$pf)$dm5H4@(1kc-D$_05@1$qWqY`sB_vcgXl$K4GlfsH142(L4~g zm(aLX&mg`!bkBv4^8I{J~BS|wbEZw^UliI>fn`fz4AL--FbKm3o-OdJ- zb7a{ng(U2(*>%dsGv@toL9u+Z3>0uw?h5)D`3=~QO#0s1idFlRKv2b?qNWE2)SIJn zSy4q3$ryl3?qH0bPJE-U7H*3ZKg*jKo_h9;zSHsqA4>uGye%HswCz;*U;BA7zPTPA zH>lhuo}xFWjwhyFIF4*$DM0@K>lfFqY>l60p`#J~)affR9$3DSo}U|ri4eoK(^7l> z+J{$S1W(H!_O{eA$0M~d&_Il{CP5K_)BM+?u1ZROxJnvz_e$a>m&~o6{rx&$K7(f5 zsVk}8pnG2IgkT;UC#maU&cvGvsP-E^+&E0P)Wb<2^zii;I_>sq;Y~==Mvz3OP+w7w zPmR;M?OxX5Sr}xFG-YsD;a6IMbaUT1xuq2qWb|#PL;xt2)N=X9C->bKhL>fMgLI{q zDB-|FX4jHgi1E7invyG)7BVaZ;mPxGI_AsSrQ7H$DuWWRxX^_M9~+k2RRl6kLgjtQ zN%rNy`a9s~={W0RvMNG%J?es*W}~ELt!KvsVPiPXbByzT?MX{HA9Rv2Lx){nMbBZ) z+ex=m!BD|cIOcJfzk?T#XUp8Gpvlq8lr^hWBy@jBc71vs*h^>k6^@z(gpU) zE<CWe^ck-#nBM@zDce%tnlQ&kKwt1ppJ!#e@#o=Nq!J6$}K z6q7*&a!c>Z1{~lNKbc2Nb+Br7owi!cRucn8qY|?n1N?JgiRFA zp;sS>nLKv>wzGAPoYX0Xs8h(O2ey8h7ajdAOjPtWV3-t_h>aoJz7N&~%C43QikOSO zSPws#0&)38^sj-BCWfXm5woR+u<_>O7S$iQ6t40&z2#usvfaMM@$ZQsAz%l|6Jfl+ z6K2mxRTG&3boX#r5uF(R3~)1VMyZt6ZFDq|yL-n<;|hC|>0&KJj-xc}vNsVp;9z)q z-@f8hPcOe9Y^I3g$C;dU$F0U{^GcLLsu^UGGFCJ}+b4oR>2TU9v6B7sDuS}KgBo11 z;~g>9?2?9$bF7X@qhzR0%sAA1Z|AD5sc8L}^e}AWJdtc`o=M)EqbwBxg1C?Yk<|KM zGq<2Ahrvc8l+0elQ7>60hYei7p8)S}j zja}_t;6Dk7;>2|MiAgd|sElA>3zUZ2R8hqYaXOrqKu^Ziz6iQZF`~dYoIff3-y!EFk7ix*(P{RP(J93xRk>wn> z`&h-p3$3PM8I5=czp1*>VScn@KOh7lkw^Exl23o?upEU=E9;YUr(!Q?ht#PSp{aEY zj+edLYUv^hG_XFG&uD6vDL{}gLO>*Q{qIb&#FFMSsCxR_W;g3POqx+i)zcS!Ee?s* z@o4C`A7OPX0Dc_*06RT*xh8cP!5_o_01FO`_vTvRPwm9?X;Z8p^M5>WFPiSVoJ)j` zkW*x1gVf(v-e{-X=#ofV62?FJN_u>6nr=TV?phXF)e$P3gQ#cWcgVs>Tkr;Z=Qh`E zB2}G_Kv|EFyX}80x_fqUW|PWV=0K zQxZ)g!p9{Qh$Q-!`mas5QoB$qs2)EI-Y}~SlIqY)H-1Pt z?gjeL%<*a!D%yuC*iw7}vV`J?dv4z>5Nf7|I&&&yY30}8eQxLZj;(0cg>=g^wvBFl z-v;gc?>z?VTN0Q;B9NsNtDR@z>3BWLmvD}+SS;?%s{y><2DoHly)_ll%JE3i>WJ!f zCnbmQuS?PG4J{i;YAN1Topr<&3o7~!dmH3bRdd5QX4NZ~$<>lmocXV&(QiL8ZuJXK zK{PSx0AQf}G5lSxaR)}0j)I1GBx;yhC6U>)$YsIlYiklMzIt-A`-`iTlpF!^J?uT( zc7tJ3YHR-TsPU#O71jE7yxg1j?LiTqor4_d1DyI;-$45{uk!0v)qc^Qc;$tch}0aX zrZd1F3lB)6GW$Mw7-ff`9mUg8F6#v?I|ebv6IZqV6Wdpau<9%5BcwoPk)(W=3cL>s z$+LE7DrhL5ZH}&$R%DY32w+100GrT@!pxKHo3FAWY5c1sYNspa>&3U&D&tA&>0y)p z(sVAN`6&1q;Nuo2ze`UhDdlFVQ~tB^pnnfGO({^;r{%YvoNg7M?Z!*9>eZ(b~l=&0g}8kDStLKpec0Lw9-z@WqQCcyND(A5DH$0w_DbZV91Wpx#Dul-)N3EAtwo_75#Vj>U z^Zx+ML!T{|_mx{ql&g(ONWW0V2yUYp{{W$Ih8jkVpDw_hg4tYnSrh2214vr3IR4<3 zMt=Q)$HL_-6tK&tl{M+^$1DerHZ+ydPxx@dIS6`khsVOnX(Z8@bbE|C@z0IXi8Tzq z=eh5>EV;+G*m|kXY>*Y>Fr=oQNeBi^XCv2~V$pr~%@jwG6yO|!2;d*fZE2^Pww7a3 zsRNPA6ZD^jp%zWD`PC(uF@=K?2l}JqlVon2%#_Kd3S|+g1B2%N++VaH?y8W5D%9!b zkgyu@)MvlKz>O(r>W8?qR7P7%4tO_)7Ol3U`-VqJ9YN4J^B-RegI3hSGISLD#4nkD zTQ#X^o>Bw_B$WOF_;du_D^FEM(xj}aM-3l-dVedz(0QYZsYbeTBR!lQE>EEY*4j}@ z^s^w2j7ZA7B-!L2&H7l1G~2xittClY|YL!kK1_bkGcRFG-cI&SluQt3W(|| zWO*jl6Ek21844|Q^wjZ|f+lA;)N|w{{CZxWZ;}Bh%}JY2np2JxczD{4%MC2DMHnkK zLXtZVQFy>q>S`lslQDUG$GZOj;U~S)k=FgliYaGkQFRt$fsxP)IvQ%UiAYe&J1FWu zecLTibtI4^2S|+vdwL;s^}sq18c7uXAOPF;TFUBgdX@!9Bp3%s&PRT1NUPvjfRZTV zk;qUu%MVj?WU4ZIAG8vlos_#ojJyvYiMe%%hEYdLBpObn1Rh3mY@fQVjiie}k>nXk zNEkuaE#NMS8d#)}X4GU+VmhU%jhyESqn{9d7Cf|Y#*u5NbMJyc z10heL7MXfmRTKgtB$I^-G8Pe%?0+e>P{d?Lk~NV!fZS^RJ6u$iN2W$bah60Q&Ej)% zqZObS$oH48O)cSQT0E0-Y2iqMg)8-HJ^TQ))!U4St4Zyac5(QOzF7Pe_}-2!zJY1P zwIWg1jOX;Q_X@SCg-tyisIjp8M;QlCZ~U#iC+S|gS4}lhGEBTKIR1A_%Pl0H>}%|H z8k>;7bg~P?zx<~YNaI#lx{n`2bjJj8KA}=xB{}xw>F~KmXl=GtEVD?!X(coIj-$fD zim9q5WtYswkD0jAJw6xf%>^A~6U12P07)N0JU%x}kv{EO^FslqS2~Dh=T7KH>NTE-(r4@UY~p+(e$(Sg@n%2a)l*B!Q=?T6kTdDtyH|u(F<+sO0j2=IF;R zeRW;2GWuH5@_{UP@dDb(JnX_Wh&az2b82Dtqm+3Q3`cX;-pyFvT}UBy?CbQK$Vbn% z*0hM_N}w740L)ku)4)WJBe2huf!pJYX+O*~>r5jeSD1ZH2awvr=B;(Ad4KDr(Tv4x zyLR5B>M}9{yA9ZxAXY&m6@UW_3b$@8eS&sog z#f{9r(Tt=!bZt&)!?|fbprZ;g{8mGJy0L`zrPBOoJdEC!hAClpL_w3^BQ{b-sjFEA zT<1@qx4{b$%zo0gD&nUa$vv~s_xKxeJVl}WR>Z^d1D-w|??EhbiCRQO*Pkn6kUcC) zsN`t%iewUJ%`0}v{=(l0HV17J!(BuvWSv$)#+K~3^|GLfo`$kHB08EQ*O3AK<_ZX|Rg=pi~ zrz5c*w(^bAl!;vlZsg~s)wtBG^20H3BL_M?P2(QPBqD*S#8NdtIWo3zGwEPUPL!U; zuzk#GCpP9C(WiW%jg)>|bNuXT+||sas0X&6OITvMSxA%JEV>VT-;z|RmD(07kNV>M zY9@2-#*f=2dK(>Sk_Jdui4T6YPRV=_~uk$*-kh=2VkDYKnetQWY~%j`F=PJNrI4mkqH)8l zeDm=?4s7a~sTu)3B*D)a&vHBQ(-%+8qcj%{RV_GX1K;CDlar4T>2xfHKzpVRM}}=x zM&ze%XlWzK8D^13n;wV4$k-v8T3K2`LDT@%#~88Km_9mcTX0v5M*szi0OXE!0pC7` z*3VFBgmJu1J;9YKH7}2Ap{dyivl5mq$tmmczo`A_ORR?^V*|bi>wNzJ*)m+T7LlB4 zZZGAV4W#c8I1No>+mNIkhiXb1}sIRu{e8KKdd-<=0AifFS<>etKSpU9uTA)}5luvVu+k=lFeZ znAFo$(blp;G{#N>6*%w5jp~(!a~TLDplx1fZ1ALlpm)F>Z(T-{yeTn83_QRN+}avC zB8c);6lYEqROctd*4olc^=z!uNlshx;FbLeBKiLSV$g|dCV3m))Z`B;FBtfN$NSoi z!fKgnV=o&lD505HVF~p!&HGj)=`+X0)B*e%7Ja964UE5MXvubd~mzBu` ze!N_MWS&V8a>t&XFfViq@)*EwImn7^-2OD?)^xyC+~ z?Q6=L7MY-gt(AHy@Wt*neamgAD3x#siNGv-Up+9kWw=3?>y zav6S{bkB|JcF&nMNh3bbwAlpda6gweAIP*6x2GSeg17A>GQ4Rl%mOJ~4L+wmv3jkt zf|qBf5>HnP$sidKjF3-=viEJi38phMdxhIE!knlcmSLt1Lx!h{ik(Lgv43W6 zGQwU}w%agz!xb?LVn4@yc?72Qrsh{h1d#$-dGnIJ8 zo_NjsS3lbtC=Q)i=dkNzTG=hJ)f7gmIhe@a(v0fI2f$~ogw0(D3mQsS^6La42F@F; zMIAKC(FKWFOnzQC>7P?_r1_?8&k5~BKsX9WzM1I-UY!Ng{JNFuKH^O*NBqAi2gc8b zgk}KH-TH)QXHxE>MROPvMu(y zs;o%JbbzC=&p7bwYCC5uvtRP|-9cLbmZ{L?$c{E35HX%J($wy|dPQ3-boDSI6O@l5 zBL}d>g)ZM)x6;w;Ie4Uyc}{$!ah!D*KeLK@xMnDWqYRxy{{R<1*2vmM<}usFRSieF zSKb;(l0(Q;^%?GMiq=rqRO?;AP_8ozN3JuER`K+-+nmo+Ni-3*l}#W7;IaJKH}1tm zcAacs6Tvd{8pvWh_+rwtE9@t7ks5#dLI*f*JekjN{u`XJss)0fVp$61Sq5@XslQ&H zIHHL?)rqBTM_1R+?zeP%P^HAu15ya`k=cArmy>8ZqZ%7~p{UUWak?<;$Uh&COFkcY zBr75!#W(u#pQTT#{B7K=6cR{QTu9$cXZl@BLeX}a4S2XA+-vD&mYPEx(2!Xp;Uz)buZ`a=8uc`k)YW^9Op((|8WcJLoFHO3KM&IE z+v&_P?o@R-6}ns7Dsl)PNX_A5hNekkk|}Cd8DtSlASGHE&qh9+cfCi;wC@h%QTJw9 zo;^yk2FsE9_qRPTi_}Q5ZL10U*o2iFsEx-Ok7mfmaeq|DCc#LLLpx?+%u(^7kFS0& zRVL2~ohk^H77dnJhNX2p=fK&OI}uqv^eVM50kpR`IPdVYx>!T>IB)Fj&wj5|scB0$ z=948<6UiB}skX^Bjm@EsNhDTMbw%YF9CLkNaA;jbs)yYxh{7+I807JJs=fAuXr&q` z8hVxoRE`KA0DS?mZjNz}{$JepJ+`0Tc28s}QSWlg&#kbd`Q9Tt#1298c?MTKap`z{ zljaqvqmr7goXWCiF&+p%;Xl^J+qTg1Rx*dD_Cq{sM<8@gd)VnTH|EaKBc0w#NfJoJ zOirzQNC@YT4lOlO?aNms3$~YYuTVYMbHTh_1T~e96z=s3boq3gDe%DSZMOYWjf?JYTYWJ~DwvE#eCscpNI%2R^02AR zJyEIqk8NUrYG1_VvFYpZwGuqG9m0=OZuZ^kIOmEOf*9v!)KeHyi$PvxBCeejK@h7l zYslnhj1HZxQ!ZI%@gmczR7&C5uw#G@6-eh-?y%t0*_q-99eZZa+^eV|rP7f#RO~=9 z0hK~J57zN1L%r9?`I1!%3#cAYqsHH#XGEISiWZs=VEb%=noVc99Y0Zb*2(*6;CUld z5bD)0Vo&e9g%d8L6q2*lF~Z}q@wYP{+4IK~v$zCm(txo(9~W z2g!xP{{V`?{hK0|c;b>Ws(!A;yZL==s*jlkd`nFNJO~>qBp4(Q&5oSr^fbDvB8}## zMo9Qm*k{oC-ej{j@=sF)2-2Wv$0eIX=Q-qfbhq^MQEo8PN$-I4tQe^y&VDwQqJb-2 zn7ts@e5IJ7Kg#Z-*rui-BDLMccQfPC;z>viqlFH~;vO)RZ6GCw0xAu>B;UqksH zY-p%z;HXkp+|%a#-mE-5EjiJ)C*3zVA*rLIW1=A*T3@JY=rdtbZgN$m@xaKkN1PVG zjIXY&cfO5N^7hlAz0{3k#tyPD2gH$!=R23mO&_@1MPyRKNoRw(UV0JI=b%()pxkSz zw}|4Ao}t^+Ytlc64@=o?UvE=gBk5;o52j)#z8j~0_eRl1c8H^>s)=N+nQ`2-S49^*3!8bX-^{XB&oJ{JD~ZKT}GsT+@w<4|G=1NF3< zPUBfw3#-hCicUj-H39rKLmCRmlt5V>U%tF*_0NT&4tJThy}G8Ie$^(cE2=rwunFsc zI^8f&9^)d;)XI{tm)!R;+tePy*40o&1nXAvt2{w;<-rP1f9rY8=B#_lVUXpAo7)FQ zej|&9w5}&XlD3;^qLQuCRPnYILubE&-cB!@VW|==O$AiXBk<@-=so&fTXeLJr|vUN zDIB!N$uFh_pHE#)Ng+p$8P-fFF^&N}em2*UmzaZdsjaW6j-G~!MfWg3Cn}AeK)tLI zRcw1qP}GR#X<3B9GwvWhq#pKVR8dgD43zOPs~C|{M|E!N@y8dBn{}zDY2Vt?W-PLM zqd3Xnj(BdDoST9}_$q0rV}G*-pWWcI9D$xQ;cKSb7K(p*qM})uz-|Ec$UV91X-wv5 zw6?8046isg<89qO=7`bDmwIL`9~`m|%bv#NtVC`x^=OG>rFvaOWKpeEKsog`x#*_* zgCS1erRpQ9qiQ&qBS$8ZFcfle*FC>WTT||aB;`YP2fg#tX45}eDUV3)pXYLtF)b_g zY;Z1`n4U)X9N97bII*2RWiA$Kc*jsUv{DT%j~jeKdrUKiQ~fPd0NKxbkm!vzV|_s4 zFI!#nM9|7{Seuxy{m~0&ImNHbRl=2GyVCbCXkjT%MNFiU=np zJ88hr#>JU*xZ|-u%H9&iQ)W!$SaDO9Bf#4!MhV5$j4J;Ci#D4U0aR4zc@;=JXz;Xe zoMPF{7z}ls0f zK;u>B4WO7RB-e)RdU|Q0gAP}mULkiVpi)A&k6+()!@NmRARPhzmL`OEoszCPnpcou z;A0=!`FGAU$m~X}vqVHB9xk)&Q?in3>gB0vm7Zry)N+fwa;Xyhb`7F8>ef;{IW=lWkK+)ULI%_}jDBVxD&;l^>1j>i|&K3txP zEx+yJl19|HWSi8lbUF09nP}&)tj1K+?KB2b^0a4B%H*)*{$6dC&mPXjW)eK)wlLao zr@&r+fB9vEjUlLjBeOCSCp?^m>(cPgw(m5I8adlb951%t*S+6lYdRfW4AiYOx@qFU zY&?=-hrw-KG}~P&iP}+C<+AKa^5?%EmYOZ2cCA70N(m#KZK=POZ6!?&Z3My@qIF07 z;1kEKl_BZTsw#G<)9z^5URk+y(l`gw(6h};E7S_rq4$>=@2GfR+djipATpG7g$5T` z!6(<7yK>uLr$vbhRn$4|e9H=YW43OvX=tKvv*5s1u13$fBa?;y02emehLh~$66z8= z4=K*2T>k(Ki4{E?OEhsqG7y-TC<2BZz&5*Ssgionwu&l-c6|apK3wOIg{K9%M%E3+ zrIG38ky#`kpBVskPlu(lNPbqQSXQE6H(aY~C)C=l){N|a%T%D2M_pduaTmn;+Hc#U zcwRF{0}$Gk`^m>ko+LDGY6r{h8%at7<#YoH(#Jll;d#1^ritKUWKtP}XEhI zTh09Xo1QG(81QY)USHUcNe2wFaiDx{q+cyjQfPBhOpU<`a;y56;D-DrAan zTqy$xKBoA1TKVFupWAi+08U>ZkKxnfbXhtNntvH-RuP9feAv^E*4$4OTr;|lW)UJ} z9YaYT7nzr3YIy-L`%e}d8RXkxD5;(&k~hYj6$e-H^tHtpcQk)-QAsN?dU#b9Oq}6| z$EE#T;<9-nrt-!I{bI(|q6s~)!8>!;<9EiZEGaxbNmO8gWay+W+iPRnQc%&E2|7nO z=RNxyXG>K^H#%`y8yaT>lOGMQ9dX=vSXEJbIDHE8=PaGSTQ)~$q>P16ahsK1;cp3- z?uG(OQymG}MtK9rrfe;zZF-3%S+zE`)S#Ohw0*OpG3CZ^Ip_4c_WH-Bsn;yBrk|wg zY>TKtyJYw*HkGhfSl0lh&)AN6)`t-5t>M9^Q41WB&a;%N3{{Rzr z&C@*zHCh&?GKh71XO5k&@R2Htnipu~DL*@+b#4POCh({OPbgGhBU=Oh825opDfbdd>( zNi^}w4t3|&+0^cd9Vw;|4=*0)RPpV3xR$WIjZ6$hzO3>w>CO8+7u+b*G>Y5_xzAor zvj$U8*H$x#=AF_d4JMKmQCs|7eirq1=G&kwu>pk)9r=EGlVa`kLXxBIe&^>b~d@XX+`w#9TI`DNV;B(jGchS^sk`HLk zW0B8x=gr`5v>O_E>8m3-8n91qr#2L6HEm0UogK1CBy)jnN+6b=Eqb2^REN{2^tx%N zy^4w2AIwSAf3UU4ohlUZt7b`=@!3&MGx4?s%)>4wWfpW8NcFfF|_4eaQ*c&n?(`4-00J(9IUJAYC2TskAi`q>Ic)Ry$oZ?#~+` zkJ}5>^}J%Il9Hl!GPKDXH`1W-{{Ra%3T?CA5j(V~-%dwi@@*uv3-Ly}w*@i(*9)&_OL?(#RS;L(QaPoc6nF z>9*aDTxc%sz#YiXTQ}}y+$zOX$3b2}BN@Q$YAcAU-Tb3bCxXY10ds^jQ(R_*qkuq= z1|I`zm))7(RYQe5o|zw|o4iL@vVh}-0|Xv1VAIyrN0b~ZcNp)RZg=)mE|O0nEXCW? zwtq{ioeg3U{+j@FhQRIdvnkbn>OXM_)^IrD^!tScJ|k*)hq?K*gL-EOeH{9F8TB|E z%b(Qai)%|3=}kxej-bHmW*HgBI0p5!n>ne|Hwf{A--1V_-6)MUMI_0=J(Y(ej>X%) zZBiFTGL2H2GpYj9JDd_q>B+}#K^OMm^|Ya4oCMSZ0U6`K@oZn&GM9{K8gcPI#rpbP zUt=9S$dVUeJ5gV11cYpFKqsL!~vB-3Yy>c7u{bXBZ^Z+fQ^k&jM&4hOBxB!JQ`pWUe{ z7C=yQf^Zl4Sd%?n9;HDWqhnJcImhy|v{J`OEb)`BM&&rj$HS*QTDnSVsTf9x?W?H= zPxHBkX%3>Sgy|9%=eXyCZEAMrU&oA$5Hh(LI{i|3|16DZy z02h8fHb%-a=6cm{Z`}xr4y8Qj!v1HPL;xr`?!*p$7o-0G+Y-dO%$mm;!van_=dU)V zzLupY_QY)*NW3r|d`<1>mM*8HncL`DK>GCa$A^v1BF03B(vuoDBr(tH*6vxFln-t; zU|Y;aLH?J0MOytpm;-+a#q+n>3Yr+!QV10#rwrhXFk()V>7NT&lyy2>FbcAA-2VV! ze#N*(vHLG9lY6cJJOHQs!v3q3>12%-Mq)=(oj18)RK@iy@j3{=3Wp~n8oUj(HLa#P zl}$cfI!7QMTega#9muqGP^40-@W72P>U}Qy?Xp@*e{+K#I90(v$XOL=^yT}5(!FRj zk?P}+g8=+-YU;MftVSiuFzlpe-);32vB~77EW~!kEtH#zz{LH8fck-}`B=HIJWfqM z-B(Jbnl{i5HDiq0{F~Z*%kvcVz*2}!JY(t%npucF?-gfumR=cJ~l!~5U4AUFc&(Q zB54RD6T$dgHXSS#`>Cgu7vkx2!2Jcbk8D{8)WKBuH>uioh+>_NqF1`%V}h9H;yyK=av!f1@>fbI0re^@%=BZ)NP|~3aaYljPQ8H=H~f(eb#2C zi`-sYJ_q4^?%M@%C=P~|T23X8PCUyX?mu3(#O%}&t4gvmE_u*+@fTd1Wc5W-pbAOC zg20SqdXLufZ4FFyGSj5Y`cIij@|+&FDP5e(R-I33+;-i$edpCXkMyHGv;HezzHfEA zjY_|?B3uka1_mx$WmJSj%SeTkC^^CNUW!QFrCO@AhMb_vv*QH(1@(T8?>Xqt(r(hh z0Z}B-j`~N@c0s$#U8*&g++_smAav>4%d3u)a7L06S%(C-mNv)mucxJ;f`C!X>}BQn z(wv`#^HieTpBkBQtqur(Fz82H1A5zc8ks5TT})BVG^C%GKc{QaZWNIj5j~>_3&uM5 z-$ti~8m*cHk<=u8gJZv^Hc5#{1ZWz z$nzh=QGx*eFNZdkdYLMm#;JmbF68oa+a4CPdajmw>UwCO+7YIMo^k5N=#C1iS&WI9 zIKk9-ZyNgtJEiFoeoWg(m|aA6q@36)`-nt*kHv zE=a&UDXM&;pf}z*g;n)Eldg|(Gm$`tx3=CwB~te>ZEx9 z*4Y^V>F4-ts*Sn;%RZ-5BB_u@Pm!_7AamZzifUSDG)Vsdahw&@gtFtIBwMS|>0KeG zSbfr1`Ho}3%V!pT-&|;!;&C8JPE(u}C&SX~l6aLNsKDowr?-4sz2T5GJxx2A3pe5p ztMK#~u_xJFk{HEGvsbs?RA?HBR&Jp4dnzi*XsJq>os~(_K*q0!h4Z@I#+H>#(tbab zC=57X{H)!cx$CH;kzx^uK9QaqKAFC_u8LlUj%p^Yl+PK2k=2d}@VUs(9Xm$yk8yz! zfWBY;mExD?7Nrt@+v;L*<|j|b<90vI0udQnN1k8d=Ronz*G$f|j6W}mNglUxpWH;t zdu!bD!VgX@+x@L{Q2CmHSCv!2Wga-}Z45irU2N(D-}}qpGU;du%%2 za?pPuP+eQV==hp3A)}pSCcpf#1G;Jfy2|pWKRl31FH1KTmM(pY}LIDg3 z;NaStJ*8pW%#%(-Co(CM$`7Z0_9~_zx%cFxmN=4zNn}OQ?j9n?rh4YpZg86To}#5- zrvQSK>On33Q{L#Hl3mtvt&tLaI)5_`$2TeBTH2Rc(U8P6raLx0Y>`>?=<|3XY^1SD zswo{gKzUc=fr~1i<|%gh+G*p4VNnxA$@KHl$4)xi3f-!9jw*>E4GaL6eO_;YAKLL% zTb!EKYHBjkLO(SJN^NCt{p`DXThiF7&5-T3Vo`Ug~xNmRO| zAdhhm{J=N>j{NqrcBp3GDh*V~5e63#t_S{iyr84oXr)P0+>TW5D;+3D_<`W~T`5AO zs6$Z&PVGY?3ix0*_*?&)4)(5Dos%NL3a676Zk7