mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-23 17:38:10 +00:00
update examples
This commit is contained in:
@@ -85,19 +85,3 @@ We highly recommend you to use this model in the WebUI.
|
||||
|"black car"|"red car"|
|
||||
|-|-|
|
||||
|||
|
||||
|
||||
### Example: Prompt Processing
|
||||
|
||||
If you are not native English user, we provide translation service for you. Our prompter can translate other language to English and refine it using "BeautifulPrompt" models. Please see [`sd_prompt_refining.py`](./sd_prompt_refining.py) for more details.
|
||||
|
||||
Prompt: "一个漂亮的女孩". The [translation model](https://huggingface.co/Helsinki-NLP/opus-mt-en-zh) will translate it to English.
|
||||
|
||||
|seed=0|seed=1|seed=2|seed=3|
|
||||
|-|-|-|-|
|
||||
|||||
|
||||
|
||||
Prompt: "一个漂亮的女孩". The [translation model](https://huggingface.co/Helsinki-NLP/opus-mt-en-zh) will translate it to English. Then the [refining model](https://huggingface.co/alibaba-pai/pai-bloom-1b1-text2prompt-sd) will refine the translated prompt for better visual quality.
|
||||
|
||||
|seed=0|seed=1|seed=2|seed=3|
|
||||
|-|-|-|-|
|
||||
|||||
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
import torch
|
||||
from diffsynth import download_models, ModelManager, OmostPromter, FluxImagePipeline
|
||||
|
||||
|
||||
download_models(["OmostPrompt"])
|
||||
download_models(["FLUX.1-dev"])
|
||||
|
||||
model_manager = ModelManager(torch_dtype=torch.bfloat16)
|
||||
model_manager.load_models([
|
||||
"models/OmostPrompt/omost-llama-3-8b-4bits",
|
||||
"models/FLUX/FLUX.1-dev/text_encoder/model.safetensors",
|
||||
"models/FLUX/FLUX.1-dev/text_encoder_2",
|
||||
"models/FLUX/FLUX.1-dev/ae.safetensors",
|
||||
"models/FLUX/FLUX.1-dev/flux1-dev.safetensors"
|
||||
])
|
||||
|
||||
pipe_omost = FluxImagePipeline.from_model_manager(model_manager, prompt_extender_classes=[OmostPromter])
|
||||
pipe = FluxImagePipeline.from_model_manager(model_manager)
|
||||
|
||||
prompt = "A witch uses ice magic to fight against wild beasts"
|
||||
seed = 7
|
||||
|
||||
torch.manual_seed(seed)
|
||||
image = pipe_omost(
|
||||
prompt=prompt,
|
||||
num_inference_steps=30, embedded_guidance=3.5
|
||||
)
|
||||
image.save(f"image_omost.jpg")
|
||||
|
||||
torch.manual_seed(seed)
|
||||
image2= pipe(
|
||||
prompt=prompt,
|
||||
num_inference_steps=30, embedded_guidance=3.5
|
||||
)
|
||||
image2.save(f"image.jpg")
|
||||
@@ -1,25 +0,0 @@
|
||||
from diffsynth import ModelManager, SDXLImagePipeline, download_models, QwenPrompt
|
||||
import torch
|
||||
|
||||
|
||||
download_models(["StableDiffusionXL_v1", "QwenPrompt"])
|
||||
|
||||
# Load models
|
||||
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
|
||||
model_manager.load_models([
|
||||
"models/stable_diffusion_xl/sd_xl_base_1.0.safetensors",
|
||||
"models/QwenPrompt/qwen2-1.5b-instruct",
|
||||
])
|
||||
pipe = SDXLImagePipeline.from_model_manager(model_manager, prompt_refiner_classes=[QwenPrompt])
|
||||
|
||||
prompt = "一个漂亮的女孩"
|
||||
negative_prompt = ""
|
||||
|
||||
for seed in range(4):
|
||||
torch.manual_seed(seed)
|
||||
image = pipe(
|
||||
prompt=prompt, negative_prompt=negative_prompt,
|
||||
height=1024, width=1024,
|
||||
num_inference_steps=30
|
||||
)
|
||||
image.save(f"{seed}.jpg")
|
||||
@@ -1,30 +0,0 @@
|
||||
from diffsynth import ModelManager, SDXLImagePipeline, download_models, Translator, BeautifulPrompt
|
||||
import torch
|
||||
|
||||
|
||||
# Download models (automatically)
|
||||
# `models/stable_diffusion_xl/sd_xl_base_1.0.safetensors`: [link](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors)
|
||||
# `models/BeautifulPrompt/pai-bloom-1b1-text2prompt-sd/`: [link](https://huggingface.co/alibaba-pai/pai-bloom-1b1-text2prompt-sd)
|
||||
# `models/translator/opus-mt-zh-en/`: [link](https://huggingface.co/Helsinki-NLP/opus-mt-en-zh)
|
||||
download_models(["StableDiffusionXL_v1", "BeautifulPrompt", "opus-mt-zh-en"])
|
||||
|
||||
# Load models
|
||||
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
|
||||
model_manager.load_models([
|
||||
"models/stable_diffusion_xl/sd_xl_base_1.0.safetensors",
|
||||
"models/BeautifulPrompt/pai-bloom-1b1-text2prompt-sd",
|
||||
"models/translator/opus-mt-zh-en"
|
||||
])
|
||||
pipe = SDXLImagePipeline.from_model_manager(model_manager, prompt_refiner_classes=[Translator, BeautifulPrompt])
|
||||
|
||||
prompt = "一个漂亮的女孩"
|
||||
negative_prompt = ""
|
||||
|
||||
for seed in range(4):
|
||||
torch.manual_seed(seed)
|
||||
image = pipe(
|
||||
prompt=prompt, negative_prompt=negative_prompt,
|
||||
height=1024, width=1024,
|
||||
num_inference_steps=30
|
||||
)
|
||||
image.save(f"{seed}.jpg")
|
||||
Reference in New Issue
Block a user