Merge pull request #921 from modelscope/qwen-image-distill-dmd2-lora

support qwen-image-distill-dmd2-lora
This commit is contained in:
Zhongjie Duan
2025-09-16 19:43:59 +08:00
committed by GitHub
3 changed files with 33 additions and 3 deletions

View File

@@ -371,6 +371,7 @@ class QwenImagePipeline(BasePipeline):
rand_device: str = "cpu",
# Steps
num_inference_steps: int = 30,
exponential_shift_mu: float = None,
# Blockwise ControlNet
blockwise_controlnet_inputs: list[ControlNetInput] = None,
# EliGen
@@ -393,7 +394,7 @@ class QwenImagePipeline(BasePipeline):
progress_bar_cmd = tqdm,
):
# Scheduler
self.scheduler.set_timesteps(num_inference_steps, denoising_strength=denoising_strength, dynamic_shift_len=(height // 16) * (width // 16))
self.scheduler.set_timesteps(num_inference_steps, denoising_strength=denoising_strength, dynamic_shift_len=(height // 16) * (width // 16), exponential_shift_mu=exponential_shift_mu)
# Parameters
inputs_posi = {

View File

@@ -31,7 +31,7 @@ class FlowMatchScheduler():
self.set_timesteps(num_inference_steps)
def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0, training=False, shift=None, dynamic_shift_len=None):
def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0, training=False, shift=None, dynamic_shift_len=None, exponential_shift_mu=None):
if shift is not None:
self.shift = shift
sigma_start = self.sigma_min + (self.sigma_max - self.sigma_min) * denoising_strength
@@ -42,7 +42,12 @@ class FlowMatchScheduler():
if self.inverse_timesteps:
self.sigmas = torch.flip(self.sigmas, dims=[0])
if self.exponential_shift:
mu = self.calculate_shift(dynamic_shift_len) if dynamic_shift_len is not None else self.exponential_shift_mu
if exponential_shift_mu is not None:
mu = exponential_shift_mu
elif dynamic_shift_len is not None:
mu = self.calculate_shift(dynamic_shift_len)
else:
mu = self.exponential_shift_mu
self.sigmas = math.exp(mu) / (math.exp(mu) + (1 / self.sigmas - 1))
else:
self.sigmas = self.shift * self.sigmas / (1 + (self.shift - 1) * self.sigmas)

View File

@@ -0,0 +1,24 @@
from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, load_state_dict
from modelscope import snapshot_download
import torch, math
pipe = QwenImagePipeline.from_pretrained(
torch_dtype=torch.bfloat16,
device="cuda",
model_configs=[
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
],
tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
)
snapshot_download("MusePublic/Qwen-Image-Distill", allow_file_pattern="qwen_image_distill_3step.safetensors", cache_dir="models")
lora_state_dict = load_state_dict("models/MusePublic/Qwen-Image-Distill/qwen_image_distill_3step.safetensors")
lora_state_dict = {i.replace("base_model.model.", ""): j for i, j in lora_state_dict.items()}
pipe.load_lora(pipe.dit, state_dict=lora_state_dict)
prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
image = pipe(prompt, seed=0, num_inference_steps=3, cfg_scale=1, exponential_shift_mu=math.log(2.5))
image.save("image.jpg")