diff --git a/examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit-2511-Lightning.py b/examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit-2511-Lightning.py index d59def8..cbe43a2 100644 --- a/examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit-2511-Lightning.py +++ b/examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit-2511-Lightning.py @@ -17,9 +17,9 @@ pipe = QwenImagePipeline.from_pretrained( torch_dtype=torch.bfloat16, device="cuda", model_configs=[ - ModelConfig(model_id="Qwen/Qwen-Image-Edit-2511", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"), - ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"), - ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"), + ModelConfig(model_id="Qwen/Qwen-Image-Edit-2511", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", **vram_config), + ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", **vram_config), + ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", **vram_config), ], processor_config=ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/"), )