From 53890bafa48b3d89089c8c92c1f953da47f92b63 Mon Sep 17 00:00:00 2001 From: Artiprocher Date: Thu, 5 Feb 2026 11:10:55 +0800 Subject: [PATCH] update examples --- examples/flux2/model_inference_low_vram/FLUX.2-klein-4B.py | 1 + examples/flux2/model_inference_low_vram/FLUX.2-klein-9B.py | 1 + examples/flux2/model_inference_low_vram/FLUX.2-klein-base-4B.py | 1 + examples/flux2/model_inference_low_vram/FLUX.2-klein-base-9B.py | 1 + .../z_image/model_inference_low_vram/Z-Image-Omni-Base-i2L.py | 1 + examples/z_image/model_inference_low_vram/Z-Image-Omni-Base.py | 1 + .../Z-Image-Turbo-Fun-Controlnet-Tile-2.1-8steps.py | 1 + .../Z-Image-Turbo-Fun-Controlnet-Union-2.1-8steps.py | 1 + .../Z-Image-Turbo-Fun-Controlnet-Union-2.1.py | 1 + examples/z_image/model_inference_low_vram/Z-Image-i2L.py | 1 + examples/z_image/model_inference_low_vram/Z-Image.py | 1 + 11 files changed, 11 insertions(+) diff --git a/examples/flux2/model_inference_low_vram/FLUX.2-klein-4B.py b/examples/flux2/model_inference_low_vram/FLUX.2-klein-4B.py index dbdc8e4..b1f6f40 100644 --- a/examples/flux2/model_inference_low_vram/FLUX.2-klein-4B.py +++ b/examples/flux2/model_inference_low_vram/FLUX.2-klein-4B.py @@ -21,6 +21,7 @@ pipe = Flux2ImagePipeline.from_pretrained( ModelConfig(model_id="black-forest-labs/FLUX.2-klein-4B", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"), ], tokenizer_config=ModelConfig(model_id="black-forest-labs/FLUX.2-klein-4B", origin_file_pattern="tokenizer/"), + vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5, ) prompt = "Masterpiece, best quality. Anime-style portrait of a woman in a blue dress, underwater, surrounded by colorful bubbles." image = pipe(prompt, seed=0, rand_device="cuda", num_inference_steps=4) diff --git a/examples/flux2/model_inference_low_vram/FLUX.2-klein-9B.py b/examples/flux2/model_inference_low_vram/FLUX.2-klein-9B.py index dc7b9a7..f79d8b3 100644 --- a/examples/flux2/model_inference_low_vram/FLUX.2-klein-9B.py +++ b/examples/flux2/model_inference_low_vram/FLUX.2-klein-9B.py @@ -21,6 +21,7 @@ pipe = Flux2ImagePipeline.from_pretrained( ModelConfig(model_id="black-forest-labs/FLUX.2-klein-4B", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"), ], tokenizer_config=ModelConfig(model_id="black-forest-labs/FLUX.2-klein-4B", origin_file_pattern="tokenizer/"), + vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5, ) prompt = "Masterpiece, best quality. Anime-style portrait of a woman in a blue dress, underwater, surrounded by colorful bubbles." image = pipe(prompt, seed=0, rand_device="cuda", num_inference_steps=4) diff --git a/examples/flux2/model_inference_low_vram/FLUX.2-klein-base-4B.py b/examples/flux2/model_inference_low_vram/FLUX.2-klein-base-4B.py index 5a1517f..4538fdb 100644 --- a/examples/flux2/model_inference_low_vram/FLUX.2-klein-base-4B.py +++ b/examples/flux2/model_inference_low_vram/FLUX.2-klein-base-4B.py @@ -21,6 +21,7 @@ pipe = Flux2ImagePipeline.from_pretrained( ModelConfig(model_id="black-forest-labs/FLUX.2-klein-4B", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"), ], tokenizer_config=ModelConfig(model_id="black-forest-labs/FLUX.2-klein-4B", origin_file_pattern="tokenizer/"), + vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5, ) prompt = "Masterpiece, best quality. Anime-style portrait of a woman in a blue dress, underwater, surrounded by colorful bubbles." image = pipe(prompt, seed=0, rand_device="cuda", num_inference_steps=50, cfg_scale=4) diff --git a/examples/flux2/model_inference_low_vram/FLUX.2-klein-base-9B.py b/examples/flux2/model_inference_low_vram/FLUX.2-klein-base-9B.py index e0df8a6..65a59f6 100644 --- a/examples/flux2/model_inference_low_vram/FLUX.2-klein-base-9B.py +++ b/examples/flux2/model_inference_low_vram/FLUX.2-klein-base-9B.py @@ -21,6 +21,7 @@ pipe = Flux2ImagePipeline.from_pretrained( ModelConfig(model_id="black-forest-labs/FLUX.2-klein-9B", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"), ], tokenizer_config=ModelConfig(model_id="black-forest-labs/FLUX.2-klein-9B", origin_file_pattern="tokenizer/"), + vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5, ) prompt = "Masterpiece, best quality. Anime-style portrait of a woman in a blue dress, underwater, surrounded by colorful bubbles." image = pipe(prompt, seed=0, rand_device="cuda", num_inference_steps=50, cfg_scale=4) diff --git a/examples/z_image/model_inference_low_vram/Z-Image-Omni-Base-i2L.py b/examples/z_image/model_inference_low_vram/Z-Image-Omni-Base-i2L.py index 7378ada..6da6960 100644 --- a/examples/z_image/model_inference_low_vram/Z-Image-Omni-Base-i2L.py +++ b/examples/z_image/model_inference_low_vram/Z-Image-Omni-Base-i2L.py @@ -33,6 +33,7 @@ pipe = ZImagePipeline.from_pretrained( ModelConfig(model_id="DiffSynth-Studio/Z-Image-Omni-Base-i2L", origin_file_pattern="model.safetensors", **vram_config), ], tokenizer_config=ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="tokenizer/"), + vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5, ) # Load images diff --git a/examples/z_image/model_inference_low_vram/Z-Image-Omni-Base.py b/examples/z_image/model_inference_low_vram/Z-Image-Omni-Base.py index 0af1e53..b9fa293 100644 --- a/examples/z_image/model_inference_low_vram/Z-Image-Omni-Base.py +++ b/examples/z_image/model_inference_low_vram/Z-Image-Omni-Base.py @@ -22,6 +22,7 @@ pipe = ZImagePipeline.from_pretrained( ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", **vram_config), ], tokenizer_config=ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="tokenizer/"), + vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5, ) prompt = "Young Chinese woman in red Hanfu, intricate embroidery. Impeccable makeup, red floral forehead pattern. Elaborate high bun, golden phoenix headdress, red flowers, beads. Holds round folding fan with lady, trees, bird. Neon lightning-bolt lamp (⚡️), bright yellow glow, above extended left palm. Soft-lit outdoor night background, silhouetted tiered pagoda (西安大雁塔), blurred colorful distant lights." image = pipe(prompt=prompt, seed=0, num_inference_steps=40, cfg_scale=4) diff --git a/examples/z_image/model_inference_low_vram/Z-Image-Turbo-Fun-Controlnet-Tile-2.1-8steps.py b/examples/z_image/model_inference_low_vram/Z-Image-Turbo-Fun-Controlnet-Tile-2.1-8steps.py index cd4276f..61ea96f 100644 --- a/examples/z_image/model_inference_low_vram/Z-Image-Turbo-Fun-Controlnet-Tile-2.1-8steps.py +++ b/examples/z_image/model_inference_low_vram/Z-Image-Turbo-Fun-Controlnet-Tile-2.1-8steps.py @@ -24,6 +24,7 @@ pipe = ZImagePipeline.from_pretrained( ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", **vram_config), ], tokenizer_config=ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="tokenizer/"), + vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5, ) dataset_snapshot_download( diff --git a/examples/z_image/model_inference_low_vram/Z-Image-Turbo-Fun-Controlnet-Union-2.1-8steps.py b/examples/z_image/model_inference_low_vram/Z-Image-Turbo-Fun-Controlnet-Union-2.1-8steps.py index f325508..54811c0 100644 --- a/examples/z_image/model_inference_low_vram/Z-Image-Turbo-Fun-Controlnet-Union-2.1-8steps.py +++ b/examples/z_image/model_inference_low_vram/Z-Image-Turbo-Fun-Controlnet-Union-2.1-8steps.py @@ -24,6 +24,7 @@ pipe = ZImagePipeline.from_pretrained( ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", **vram_config), ], tokenizer_config=ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="tokenizer/"), + vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5, ) # Control diff --git a/examples/z_image/model_inference_low_vram/Z-Image-Turbo-Fun-Controlnet-Union-2.1.py b/examples/z_image/model_inference_low_vram/Z-Image-Turbo-Fun-Controlnet-Union-2.1.py index 6fe170f..0c81bd6 100644 --- a/examples/z_image/model_inference_low_vram/Z-Image-Turbo-Fun-Controlnet-Union-2.1.py +++ b/examples/z_image/model_inference_low_vram/Z-Image-Turbo-Fun-Controlnet-Union-2.1.py @@ -24,6 +24,7 @@ pipe = ZImagePipeline.from_pretrained( ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", **vram_config), ], tokenizer_config=ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="tokenizer/"), + vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5, ) # Control diff --git a/examples/z_image/model_inference_low_vram/Z-Image-i2L.py b/examples/z_image/model_inference_low_vram/Z-Image-i2L.py index 98b3ba3..a98537c 100644 --- a/examples/z_image/model_inference_low_vram/Z-Image-i2L.py +++ b/examples/z_image/model_inference_low_vram/Z-Image-i2L.py @@ -32,6 +32,7 @@ pipe = ZImagePipeline.from_pretrained( ModelConfig(model_id="DiffSynth-Studio/Z-Image-i2L", origin_file_pattern="model.safetensors", **vram_config), ], tokenizer_config=ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="tokenizer/"), + vram_limit=0, ) # Load images diff --git a/examples/z_image/model_inference_low_vram/Z-Image.py b/examples/z_image/model_inference_low_vram/Z-Image.py index 344ae50..5eee761 100644 --- a/examples/z_image/model_inference_low_vram/Z-Image.py +++ b/examples/z_image/model_inference_low_vram/Z-Image.py @@ -20,6 +20,7 @@ pipe = ZImagePipeline.from_pretrained( ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", **vram_config), ], tokenizer_config=ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="tokenizer/"), + vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5, ) prompt = "Young Chinese woman in red Hanfu, intricate embroidery. Impeccable makeup, red floral forehead pattern. Elaborate high bun, golden phoenix headdress, red flowers, beads. Holds round folding fan with lady, trees, bird. Neon lightning-bolt lamp (⚡️), bright yellow glow, above extended left palm. Soft-lit outdoor night background, silhouetted tiered pagoda (西安大雁塔), blurred colorful distant lights." image = pipe(prompt=prompt, seed=42, rand_device="cuda", num_inference_steps=50, cfg_scale=4)