mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-18 13:58:15 +00:00
62 lines
2.1 KiB
Python
62 lines
2.1 KiB
Python
import torch
|
|
from diffsynth.pipelines.flux_image import FluxImagePipeline, ModelConfig
|
|
from diffsynth.utils.controlnet import Annotator
|
|
import numpy as np
|
|
from PIL import Image
|
|
|
|
|
|
vram_config = {
|
|
"offload_dtype": torch.float8_e4m3fn,
|
|
"offload_device": "cpu",
|
|
"onload_dtype": torch.float8_e4m3fn,
|
|
"onload_device": "cpu",
|
|
"preparing_dtype": torch.float8_e4m3fn,
|
|
"preparing_device": "cuda",
|
|
"computation_dtype": torch.bfloat16,
|
|
"computation_device": "cuda",
|
|
}
|
|
pipe = FluxImagePipeline.from_pretrained(
|
|
torch_dtype=torch.bfloat16,
|
|
device="cuda",
|
|
model_configs=[
|
|
ModelConfig(model_id="ostris/Flex.2-preview", origin_file_pattern="Flex.2-preview.safetensors", **vram_config),
|
|
ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors", **vram_config),
|
|
ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/*.safetensors", **vram_config),
|
|
ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors", **vram_config),
|
|
],
|
|
vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5,
|
|
)
|
|
|
|
image = pipe(
|
|
prompt="portrait of a beautiful Asian girl, long hair, red t-shirt, sunshine, beach",
|
|
num_inference_steps=50, embedded_guidance=3.5,
|
|
seed=0
|
|
)
|
|
image.save("image_1.jpg")
|
|
|
|
mask = np.zeros((1024, 1024, 3), dtype=np.uint8)
|
|
mask[200:400, 400:700] = 255
|
|
mask = Image.fromarray(mask)
|
|
mask.save("image_mask.jpg")
|
|
|
|
inpaint_image = image
|
|
|
|
image = pipe(
|
|
prompt="portrait of a beautiful Asian girl with sunglasses, long hair, red t-shirt, sunshine, beach",
|
|
num_inference_steps=50, embedded_guidance=3.5,
|
|
flex_inpaint_image=inpaint_image, flex_inpaint_mask=mask,
|
|
seed=4
|
|
)
|
|
image.save("image_2.jpg")
|
|
|
|
control_image = Annotator("canny")(image)
|
|
control_image.save("image_control.jpg")
|
|
|
|
image = pipe(
|
|
prompt="portrait of a beautiful Asian girl with sunglasses, long hair, yellow t-shirt, sunshine, beach",
|
|
num_inference_steps=50, embedded_guidance=3.5,
|
|
flex_control_image=control_image,
|
|
seed=4
|
|
)
|
|
image.save("image_3.jpg")
|