fix mix-precision issues in low-version torch

This commit is contained in:
Artiprocher
2026-02-10 11:12:50 +08:00
parent ff10fde47f
commit fddc98ff16

View File

@@ -407,6 +407,7 @@ class Flux2AttnProcessor:
query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1)
key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1)
query, key, value = query.to(hidden_states.dtype), key.to(hidden_states.dtype), value.to(hidden_states.dtype)
hidden_states = attention_forward(
query,
key,
@@ -536,6 +537,7 @@ class Flux2ParallelSelfAttnProcessor:
query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1)
key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1)
query, key, value = query.to(hidden_states.dtype), key.to(hidden_states.dtype), value.to(hidden_states.dtype)
hidden_states = attention_forward(
query,
key,