fix qwen-image-edit-lowvram

This commit is contained in:
mi804
2025-08-20 10:31:43 +08:00
parent 9d0683df25
commit c9fea729d8

View File

@@ -103,7 +103,7 @@ class QwenImagePipeline(BasePipeline):
vram_limit = vram_limit - vram_buffer vram_limit = vram_limit - vram_buffer
if self.text_encoder is not None: if self.text_encoder is not None:
from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLRotaryEmbedding, Qwen2RMSNorm from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLRotaryEmbedding, Qwen2RMSNorm, Qwen2_5_VisionPatchEmbed, Qwen2_5_VisionRotaryEmbedding
dtype = next(iter(self.text_encoder.parameters())).dtype dtype = next(iter(self.text_encoder.parameters())).dtype
enable_vram_management( enable_vram_management(
self.text_encoder, self.text_encoder,
@@ -112,6 +112,8 @@ class QwenImagePipeline(BasePipeline):
torch.nn.Embedding: AutoWrappedModule, torch.nn.Embedding: AutoWrappedModule,
Qwen2_5_VLRotaryEmbedding: AutoWrappedModule, Qwen2_5_VLRotaryEmbedding: AutoWrappedModule,
Qwen2RMSNorm: AutoWrappedModule, Qwen2RMSNorm: AutoWrappedModule,
Qwen2_5_VisionPatchEmbed: AutoWrappedModule,
Qwen2_5_VisionRotaryEmbedding: AutoWrappedModule,
}, },
module_config = dict( module_config = dict(
offload_dtype=dtype, offload_dtype=dtype,