From 19ce3048c1dd7e7a64db3d0d6908f08f2cf9c70a Mon Sep 17 00:00:00 2001 From: feng0w0 Date: Fri, 9 Jan 2026 18:06:41 +0800 Subject: [PATCH] [model][NPU]:Wan model rope use torch.complex64 in NPU --- diffsynth/models/wan_video_dit.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/diffsynth/models/wan_video_dit.py b/diffsynth/models/wan_video_dit.py index daafa7a..43cd601 100644 --- a/diffsynth/models/wan_video_dit.py +++ b/diffsynth/models/wan_video_dit.py @@ -5,6 +5,8 @@ import math from typing import Tuple, Optional from einops import rearrange from .wan_video_camera_controller import SimpleAdapter +from ..core.device.npu_compatible_device import IS_NPU_AVAILABLE + try: import flash_attn_interface FLASH_ATTN_3_AVAILABLE = True @@ -92,6 +94,7 @@ def rope_apply(x, freqs, num_heads): x = rearrange(x, "b s (n d) -> b s n d", n=num_heads) x_out = torch.view_as_complex(x.to(torch.float64).reshape( x.shape[0], x.shape[1], x.shape[2], -1, 2)) + freqs = freqs.to(torch.complex64) if IS_NPU_AVAILABLE else freqs x_out = torch.view_as_real(x_out * freqs).flatten(2) return x_out.to(x.dtype)