mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-18 13:58:15 +00:00
44 lines
1.8 KiB
Python
44 lines
1.8 KiB
Python
from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, ControlNetInput
|
|
from PIL import Image
|
|
import torch
|
|
from modelscope import dataset_snapshot_download
|
|
|
|
|
|
vram_config = {
|
|
"offload_dtype": "disk",
|
|
"offload_device": "disk",
|
|
"onload_dtype": torch.float8_e4m3fn,
|
|
"onload_device": "cpu",
|
|
"preparing_dtype": torch.float8_e4m3fn,
|
|
"preparing_device": "cuda",
|
|
"computation_dtype": torch.bfloat16,
|
|
"computation_device": "cuda",
|
|
}
|
|
pipe = QwenImagePipeline.from_pretrained(
|
|
torch_dtype=torch.bfloat16,
|
|
device="cuda",
|
|
model_configs=[
|
|
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", **vram_config),
|
|
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", **vram_config),
|
|
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", **vram_config),
|
|
ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Depth", origin_file_pattern="model.safetensors", **vram_config),
|
|
],
|
|
tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
|
|
vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5,
|
|
)
|
|
|
|
dataset_snapshot_download(
|
|
dataset_id="DiffSynth-Studio/example_image_dataset",
|
|
local_dir="./data/example_image_dataset",
|
|
allow_file_pattern="depth/image_1.jpg"
|
|
)
|
|
|
|
controlnet_image = Image.open("data/example_image_dataset/depth/image_1.jpg").resize((1328, 1328))
|
|
|
|
prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
|
|
image = pipe(
|
|
prompt, seed=0,
|
|
blockwise_controlnet_inputs=[ControlNetInput(image=controlnet_image)]
|
|
)
|
|
image.save("image.jpg")
|