acestep t2m

This commit is contained in:
mi804
2026-04-21 13:16:15 +08:00
parent a604d76339
commit 9d09e0431c
9 changed files with 300 additions and 377 deletions

View File

@@ -540,17 +540,9 @@ class AceStepTimbreEncoder(nn.Module):
) -> BaseModelOutput:
inputs_embeds = refer_audio_acoustic_hidden_states_packed
inputs_embeds = self.embed_tokens(inputs_embeds)
# Handle 2D (packed) or 3D (batched) input
is_packed = inputs_embeds.dim() == 2
if is_packed:
seq_len = inputs_embeds.shape[0]
cache_position = torch.arange(0, seq_len, device=inputs_embeds.device)
position_ids = cache_position.unsqueeze(0)
inputs_embeds = inputs_embeds.unsqueeze(0)
else:
seq_len = inputs_embeds.shape[1]
cache_position = torch.arange(0, seq_len, device=inputs_embeds.device)
position_ids = cache_position.unsqueeze(0)
seq_len = inputs_embeds.shape[1]
cache_position = torch.arange(0, seq_len, device=inputs_embeds.device)
position_ids = cache_position.unsqueeze(0)
dtype = inputs_embeds.dtype
device = inputs_embeds.device
@@ -586,9 +578,8 @@ class AceStepTimbreEncoder(nn.Module):
hidden_states = layer_outputs[0]
hidden_states = self.norm(hidden_states)
hidden_states = hidden_states[:, 0, :]
# For packed input: reshape [1, T, D] -> [T, D] for unpacking
if is_packed:
hidden_states = hidden_states.squeeze(0)
timbre_embs_unpack, timbre_embs_mask = self.unpack_timbre_embeddings(hidden_states, refer_audio_order_mask)
return timbre_embs_unpack, timbre_embs_mask
@@ -686,7 +677,7 @@ class AceStepConditionEncoder(nn.Module):
text_attention_mask: Optional[torch.Tensor] = None,
lyric_hidden_states: Optional[torch.LongTensor] = None,
lyric_attention_mask: Optional[torch.Tensor] = None,
refer_audio_acoustic_hidden_states_packed: Optional[torch.Tensor] = None,
reference_latents: Optional[torch.Tensor] = None,
refer_audio_order_mask: Optional[torch.LongTensor] = None,
):
text_hidden_states = self.text_projector(text_hidden_states)
@@ -695,11 +686,7 @@ class AceStepConditionEncoder(nn.Module):
attention_mask=lyric_attention_mask,
)
lyric_hidden_states = lyric_encoder_outputs.last_hidden_state
timbre_embs_unpack, timbre_embs_mask = self.timbre_encoder(
refer_audio_acoustic_hidden_states_packed,
refer_audio_order_mask
)
timbre_embs_unpack, timbre_embs_mask = self.timbre_encoder(reference_latents, refer_audio_order_mask)
encoder_hidden_states, encoder_attention_mask = pack_sequences(
lyric_hidden_states, timbre_embs_unpack, lyric_attention_mask, timbre_embs_mask
)

View File

@@ -165,7 +165,7 @@ class TimestepEmbedding(nn.Module):
self,
in_channels: int,
time_embed_dim: int,
scale: float = 1000,
scale: float = 1,
):
super().__init__()
@@ -711,7 +711,7 @@ class AceStepDiTModel(nn.Module):
encoder_hidden_states: torch.Tensor,
encoder_attention_mask: torch.Tensor,
context_latents: torch.Tensor,
use_cache: Optional[bool] = None,
use_cache: Optional[bool] = False,
past_key_values: Optional[EncoderDecoderCache] = None,
cache_position: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,

View File

@@ -2,17 +2,6 @@ import torch
class AceStepTextEncoder(torch.nn.Module):
"""
Text encoder for ACE-Step using Qwen3-Embedding-0.6B.
Converts text/lyric tokens to hidden state embeddings that are
further processed by the ACE-Step ConditionEncoder.
Wraps a Qwen3Model transformers model. Config is manually
constructed, and model weights are loaded via DiffSynth's
standard mechanism from safetensors files.
"""
def __init__(
self,
):
@@ -49,8 +38,6 @@ class AceStepTextEncoder(torch.nn.Module):
)
self.model = Qwen3Model(config)
self.config = config
self.hidden_size = config.hidden_size
@torch.no_grad()
def forward(
@@ -58,23 +45,9 @@ class AceStepTextEncoder(torch.nn.Module):
input_ids: torch.LongTensor,
attention_mask: torch.Tensor,
):
"""
Encode text/lyric tokens to hidden states.
Args:
input_ids: [B, T] token IDs
attention_mask: [B, T] attention mask
Returns:
last_hidden_state: [B, T, hidden_size]
"""
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
return_dict=True,
)
return outputs.last_hidden_state
def to(self, *args, **kwargs):
self.model.to(*args, **kwargs)
return self

View File

@@ -226,6 +226,7 @@ class AceStepVAE(nn.Module):
upsampling_ratios=upsampling_ratios,
channel_multiples=channel_multiples,
)
self.sampling_rate = sampling_rate
def encode(self, x: torch.Tensor) -> torch.Tensor:
"""Audio waveform [B, audio_channels, T] → latent [B, encoder_hidden_size, T']."""