Add files via upload

再改一次
This commit is contained in:
yrk111222
2024-10-22 09:56:03 +08:00
committed by GitHub
parent 157ba2e426
commit f6e676cdf9
46 changed files with 2525 additions and 0 deletions

50
docs/source_en/conf.py Normal file
View File

@@ -0,0 +1,50 @@
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import os
import sys
sys.path.insert(0, os.path.abspath('../../diffsynth'))
project = 'DiffSynth-Studio'
copyright = '2024, ModelScope'
author = 'ModelScope'
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'recommonmark',
'sphinx_markdown_tables'
]
templates_path = ['_templates']
exclude_patterns = []
source_suffix = ['.rst', '.md']
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
# multi-language docs
language = 'en'
locale_dirs = ['../locales/'] # path is example but recommended.
gettext_compact = False # optional.
gettext_uuid = True # optional.

View File

@@ -0,0 +1,135 @@
# ControlNet、LoRA、IP-Adapter——Precision Control Technology
Based on the text-to-images model, various adapter-based models can be used to control the generation process.
Let's download the models we'll be using in the upcoming examples:
* A highly praised Stable Diffusion XL architecture anime-style model
* A ControlNet model that supports multiple control modes
* A LoRA model for the Stable Diffusion XL model
* An IP-Adapter model and its corresponding image encoder
Please note that the names of the models are kept in English as per your instruction to retain specific terminology.
```python
from diffsynth import download_models
download_models([
"BluePencilXL_v200",
"ControlNet_union_sdxl_promax",
"SDXL_lora_zyd23ble_diffusion_xl/bluePencilXL_v200.safetensors"])
pipe = SDXLImagePipeline.from_model_ma2_ChineseInkStyle_SDXL_v1_0",
"IP-Adapter-SDXL"
])
```
Using basic text-to-image functionality to generate a picture.
```python
from diffsynth import ModelManager, SDXLImagePipeline
import torch
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
model_manager.load_models(["models/stanager(model_manager)
torch.manual_seed(1)
image = pipe(
prompt="masterpiece, best quality, solo, long hair, wavy hair, silver hair, blue eyes, blue dress, medium breasts, dress, underwater, air bubble, floating hair, refraction, portrait,",
negative_prompt="worst quality, low quality, monochrome, zombie, interlocked fingers, Aissist, cleavage, nsfw,",
cfg_scale=6, num_inference_steps=60,
)
image.save("image.jpg")
```
![image](https://github.com/user-attachments/assets/cc094e8f-ff6a-4f9e-ba05-7a5c2e0e609f)
Next, let's transform this graceful underwater dancer into a fire mage! We'll activate the ControlNet to maintain the structure of the image while modifying the prompt.
```python
from diffsynth import ModelManager, SDXLImagePipeline, ControlNetConfigUnit
import torch
from PIL import Image
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
model_manager.load_models([
"models/stable_diffusion_xl/bluePencilXL_v200.safetensors",
"models/ControlNet/controlnet_union/diffusion_pytorch_model_promax.safetensors"
])
pipe = SDXLImagePipeline.from_model_manager(model_manager, controlnet_config_units=[
ControlNetConfigUnit("depth", "models/ControlNet/controlnet_union/diffusion_pytorch_model_promax.safetensors", scale=1)
])
torch.manual_seed(2)
image = pipe(
prompt="masterpiece, best quality, solo, long hair, wavy hair, pink hair, red eyes, red dress, medium breasts, dress, fire ball, fire background, floating hair, refraction, portrait,",
negative_prompt="worst quality, low quality, monochrome, zombie, interlocked fingers, Aissist, cleavage, nsfw, white background",
cfg_scale=6, num_inference_steps=60,
controlnet_image=Image.open("image.jpg")
)
image.save("image_controlnet.jpg")
```
![image_controlnet](https://github.com/user-attachments/assets/d50d173e-e81a-4d7e-93e3-b2787d69953e)
Isn't that cool? There's more! Add a LoRA to make the image closer to the flat style of hand-drawn comics. This LoRA requires certain trigger words to take effect, which is mentioned on the original author's model page. Remember to add the trigger words at the beginning of the prompt.
```python
from diffsynth import ModelManager, SDXLImagePipeline, ControlNetConfigUnit
import torch
from PIL import Image
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
model_manager.load_models([
"models/stable_diffusion_xl/bluePencilXL_v200.safetensors",
"models/ControlNet/controlnet_union/diffusion_pytorch_model_promax.safetensors"
])
model_manager.load_lora("models/lora/zyd232_ChineseInkStyle_SDXL_v1_0.safetensors", lora_alpha=1.0)
pipe = SDXLImagePipeline.from_model_manager(model_manager, controlnet_config_units=[
ControlNetConfigUnit("depth", "models/ControlNet/controlnet_union/diffusion_pytorch_model_promax.safetensors", scale=1.0)
])
torch.manual_seed(3)
image = pipe(
prompt="zydink, ink sketch, flat anime, masterpiece, best quality, solo, long hair, wavy hair, pink hair, red eyes, red dress, medium breasts, dress, fire ball, fire background, floating hair, refraction, portrait,",
negative_prompt="worst quality, low quality, monochrome, zombie, interlocked fingers, Aissist, cleavage, nsfw, white background",
cfg_scale=6, num_inference_steps=60,
controlnet_image=Image.open("image.jpg")
)
image.save("image_lora.jpg")
```
![image_lora](https://github.com/user-attachments/assets/c599b2f8-8351-4be5-a6ae-8380889cb9d8)
Not done yet! Find a Chinese painting with ink-wash style as a style guide, activate the IP-Adapter, and let classical art collide with modern aesthetics!
| Let's use this image as a style guide. |![ink_style](https://github.com/user-attachments/assets/e47c5a03-9c7b-402b-b260-d8bfd56abbc5)|
|-|-|
```python
from diffsynth import ModelManager, SDXLImagePipeline, ControlNetConfigUnit
import torch
from PIL import Image
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
model_manager.load_models([
"models/stable_diffusion_xl/bluePencilXL_v200.safetensors",
"models/ControlNet/controlnet_union/diffusion_pytorch_model_promax.safetensors",
"models/IpAdapter/stable_diffusion_xl/ip-adapter_sdxl.bin",
"models/IpAdapter/stable_diffusion_xl/image_encoder/model.safetensors",
])
model_manager.load_lora("models/lora/zyd232_ChineseInkStyle_SDXL_v1_0.safetensors", lora_alpha=1.0)
pipe = SDXLImagePipeline.from_model_manager(model_manager, controlnet_config_units=[
ControlNetConfigUnit("depth", "models/ControlNet/controlnet_union/diffusion_pytorch_model_promax.safetensors", scale=1.0)
])
torch.manual_seed(2)
image = pipe(
prompt="zydink, ink sketch, flat anime, masterpiece, best quality, solo, long hair, wavy hair, pink hair, red eyes, red dress, medium breasts, dress, fire ball, fire background, floating hair, refraction, portrait,",
negative_prompt="worst quality, low quality, monochrome, zombie, interlocked fingers, Aissist, cleavage, nsfw, white background",
cfg_scale=6, num_inference_steps=60,
controlnet_image=Image.open("image.jpg"),
ipadapter_images=[Image.open("ink_style.jpg")],
ipadapter_use_instant_style=True, ipadapter_scale=0.5
)
image.save("image_ipadapter.jpg")
```
![image_ipadapter](https://github.com/user-attachments/assets/e5924aef-03b0-4462-811f-a60e2523fd7f)
The joy of generating images with Diffusion lies in the combination of various ecosystem models, which can realize all kinds of creative ideas.

View File

@@ -0,0 +1,64 @@
# Text-to-Image, Image-to-Image, and High-Resolution Restoration - First Encounter with the Dazzling Diffusion.
Load the text-to-image model, here we use an anime-style model from Civitai as an example.
```python
import torch
from diffsynth import ModelManager, SDImagePipeline, download_models
download_models(["AingDiffusion_v12"])
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
model_manager.load_models(["models/stable_diffusion/aingdiffusion_v12.safetensors"])
pipe = SDImagePipeline.from_model_manager(model_manager)
```
Generate a picture to give it a try.
```python
torch.manual_seed(0)
image = pipe(
prompt="masterpiece, best quality, a girl with long silver hair",
negative_prompt="worst quality, low quality, monochrome, zombie, interlocked fingers, Aissist, cleavage, nsfw,",
height=512, width=512, num_inference_steps=80,
)
image.save("image.jpg")
```
Ah, a lovely young lady.
![image](https://github.com/user-attachments/assets/999100d2-1c39-4f18-b37e-aa9d5b4e519c)
Use the image-to-image feature to turn her hair red, simply by adding `input_image` and `denoising_strength` as parameters. The `denoising_strength` controls the intensity of the noise added, when set to 0, the generated image will be identical to the input image, and when set to 1, it will be completely randomly generated.
```python
torch.manual_seed(1)
image_edited = pipe(
prompt="masterpiece, best quality, a girl with long red hair",
negative_prompt="worst quality, low quality, monochrome, zombie, interlocked fingers, Aissist, cleavage, nsfw,",
height=512, width=512, num_inference_steps=80,
input_image=image, denoising_strength=0.6,
)
image_edited.save("image_edited.jpg")
```
Ah, a cute girl with red hair.
![image_edited](https://github.com/user-attachments/assets/e3de8bc1-037f-4d4d-aacf-8919143c2375)
Since the model itself was trained at a resolution of 512*512, the image appears a bit blurry. However, we can utilize the model's own capabilities to refine the image and add details. Specifically, this involves increasing the resolution and then using image-to-image generation.
```python
torch.manual_seed(2)
image_highres = pipe(
prompt="masterpiece, best quality, a girl with long red hair",
negative_prompt="worst quality, low quality, monochrome, zombie, interlocked fingers, Aissist, cleavage, nsfw,",
height=1024, width=1024, num_inference_steps=80,
input_image=image_edited.resize((1024, 1024)), denoising_strength=0.6,
)
image_highres.save("image_highres.jpg")
```
Ah, a clear and lovely girl with red hair.
![image_highres](https://github.com/user-attachments/assets/4466353e-662c-49f5-9211-b11bb0bb7fb7)
It's worth noting that the image-to-image and high-resolution restoration features are globally supported, and currently, all of our image generation pipelines can be used in this way.

View File

@@ -0,0 +1,77 @@
# Translation and Polishing — The Magic of Prompt Words
When generating images, we need to write prompt words to describe the content of the image. Prompt words directly affect the outcome of the generation, but crafting them is also an art. Good prompt words can produce images with a high degree of aesthetic appeal. We offer a range of models to help users handle prompt words effectively.
## Translation
Most text-to-image models currently only support English prompt words, which can be challenging for users who are not native English speakers. To address this, we can use open-source translation models to translate the prompt words into English. In the following example, we take "一个女孩" (a girl) as the prompt word and use the model opus-mt-zh-en (which can be downloaded from [HuggingFace](https://huggingface.co/Helsinki-NLP/opus-mt-zh-en) or [ModelScope](https://modelscope.cn/models/moxying/opus-mt-zh-en)) for translation.
```python
from diffsynth import ModelManager, SDXLImagePipeline, Translator
import torch
model_manager = ModelManager(
torch_dtype=torch.float16, device="cuda",
model_id_list=["BluePencilXL_v200", "opus-mt-zh-en"]
)
pipe = SDXLImagePipeline.from_model_manager(model_manager, prompt_refiner_classes=[Translator])
torch.manual_seed(0)
prompt = "一个女孩"
image = pipe(
prompt=prompt, negative_prompt="",
height=1024, width=1024, num_inference_steps=30
)
image.save("image_1.jpg")
```
![image_1](https://github.com/user-attachments/assets/c8070a6b-3d2f-4faf-a806-c403b91f1a94)
## Polishing
Detailed prompt words can generate images with richer details. We can use a prompt polishing model like BeautifulPrompt(which can be downloaded from [HuggingFace](https://huggingface.co/Helsinki-NLP/opus-mt-zh-en) or [ModelScope](https://modelscope.cn/models/moxying/opus-mt-zh-en)) to embellish simple prompt words. This model can make the overall picture style more gorgeous.
This module can be activated simultaneously with the translation module, but please pay attention to the order: translate first, then polish.
```python
from diffsynth import ModelManager, SDXLImagePipeline, Translator, BeautifulPrompt
import torch
model_manager = ModelManager(
torch_dtype=torch.float16, device="cuda",
model_id_list=["BluePencilXL_v200", "opus-mt-zh-en", "BeautifulPrompt"]
)
pipe = SDXLImagePipeline.from_model_manager(model_manager, prompt_refiner_classes=[Translator, BeautifulPrompt])
torch.manual_seed(0)
prompt = "一个女孩"
image = pipe(
prompt=prompt, negative_prompt="",
height=1024, width=1024, num_inference_steps=30
)
image.save("image_2.jpg")
```
![image_2](https://github.com/user-attachments/assets/94f64a7d-b14a-41e2-a013-c9a74635a84d)
We have also integrated a Tongyi Qwen model that can seamlessly complete the translation and polishing of prompt words in one step.
```python
from diffsynth import ModelManager, SDXLImagePipeline, QwenPrompt
import torch
model_manager = ModelManager(
torch_dtype=torch.float16, device="cuda",
model_id_list=["BluePencilXL_v200", "QwenPrompt"]
)
pipe = SDXLImagePipeline.from_model_manager(model_manager, prompt_refiner_classes=[QwenPrompt])
torch.manual_seed(0)
prompt = "一个女孩"
image = pipe(
prompt=prompt, negative_prompt="",
height=1024, width=1024, num_inference_steps=30
)
image.save("image_3.jpg")
```
![image_3](https://github.com/user-attachments/assets/fc1a201d-aef1-4e6a-81d6-2e2249ffa230)

View File

@@ -0,0 +1,95 @@
# When Image Models Meet AnimateDiff—Model Combination Technology
We have already witnessed the powerful image generation capabilities of the Stable Diffusion model and its ecosystem models. Now, we introduce a new module: AnimateDiff, which allows us to transfer the capabilities of image models to videos. In this article, we showcase an anime-style video rendering solution built on DiffSynth-Studio: Diffutoon.
## Download Models
The following examples will use many models, so let's download them first.
* An anime-style Stable Diffusion architecture model
* Two ControlNet models
* A Textual Inversion model
* An AnimateDiff model
```python
from diffsynth import download_models
download_models([
"AingDiffusion_v12",
"AnimateDiff_v2",
"ControlNet_v11p_sd15_lineart",
"ControlNet_v11f1e_sd15_tile",
"TextualInversion_VeryBadImageNegative_v1.3"
])
```
## Download Video
You can choose any video you like. We use [this video](https://www.bilibili.com/video/BV1iG411a7sQ) as a demonstration. You can download this video file with the following command, but please note, do not use it for commercial purposes without obtaining the commercial copyright from the original video creator.
```
modelscope download --dataset Artiprocher/examples_in_diffsynth data/examples/diffutoon/input_video.mp4 --local_dir ./
```
## Generate Anime
```python
from diffsynth import ModelManager, SDVideoPipeline, ControlNetConfigUnit, VideoData, save_video
import torch
# Load models
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
model_manager.load_models([
"models/stable_diffusion/aingdiffusion_v12.safetensors",
"models/AnimateDiff/mm_sd_v15_v2.ckpt",
"models/ControlNet/control_v11p_sd15_lineart.pth",
"models/ControlNet/control_v11f1e_sd15_tile.pth",
])
# Build pipeline
pipe = SDVideoPipeline.from_model_manager(
model_manager,
[
ControlNetConfigUnit(
processor_id="tile",
model_path="models/ControlNet/control_v11f1e_sd15_tile.pth",
scale=0.5
),
ControlNetConfigUnit(
processor_id="lineart",
model_path="models/ControlNet/control_v11p_sd15_lineart.pth",
scale=0.5
)
]
)
pipe.prompter.load_textual_inversions(["models/textual_inversion/verybadimagenegative_v1.3.pt"])
# Load video
video = VideoData(
video_file="data/examples/diffutoon/input_video.mp4",
height=1536, width=1536
)
input_video = [video[i] for i in range(30)]
# Generate
torch.manual_seed(0)
output_video = pipe(
prompt="best quality, perfect anime illustration, light, a girl is dancing, smile, solo",
negative_prompt="verybadimagenegative_v1.3",
cfg_scale=7, clip_skip=2,
input_frames=input_video, denoising_strength=1.0,
controlnet_frames=input_video, num_frames=len(input_video),
num_inference_steps=10, height=1536, width=1536,
animatediff_batch_size=16, animatediff_stride=8,
)
# Save video
save_video(output_video, "output_video.mp4", fps=30)
```
## Effect Display
<video width="512" height="256" controls>
<source src="https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/b54c05c5-d747-4709-be5e-b39af82404dd" type="video/mp4">
Your browser does not support the Video tag.
</video>

View File

@@ -0,0 +1,98 @@
# Training Framework
We have implemented a training framework for text-to-image diffusion models, allowing users to effortlessly train LoRA models with our framework. Our provided scripts come with the following features:
* **Comprehensive Functionality**: Our training framework supports multi-GPU and multi-node configurations, is optimized for acceleration with DeepSpeed, and includes gradient checkpointing to accommodate models with higher memory requirements.
* **Succinct Code**: We have avoided large, complex code blocks. The general module is implemented in `diffsynth/trainers/text_to_image.py`, while model-specific training scripts contain only the minimal code necessary for the model architecture, facilitating ease of use for academic researchers.
* **Modular Design**: Built on the versatile PyTorch-Lightning framework, our training framework is decoupled in functionality, enabling developers to easily incorporate additional training techniques by modifying our scripts to suit their specific needs.
Examples of images fine-tuned with LoRA. Prompts are "一只小狗蹦蹦跳跳,周围是姹紫嫣红的鲜花,远处是山脉" (for Chinese models) or "a dog is jumping, flowers around the dog, the background is mountains and clouds" (for English models).
||FLUX.1-dev|Kolors|Stable Diffusion 3|Hunyuan-DiT|
|-|-|-|-|-|
|Without LoRA|![image_without_lora](https://github.com/user-attachments/assets/df62cef6-d54f-4e3d-a602-5dd290079d49)|![image_without_lora](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/9d79ed7a-e8cf-4d98-800a-f182809db318)|![image_without_lora](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/ddb834a5-6366-412b-93dc-6d957230d66e)|![image_without_lora](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/1aa21de5-a992-4b66-b14f-caa44e08876e)|
|With LoRA|![image_with_lora](https://github.com/user-attachments/assets/4fd39890-0291-4d19-8a88-d70d0ae18533)|![image_with_lora](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/02f62323-6ee5-4788-97a1-549732dbe4f0)|![image_with_lora](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/8e7b2888-d874-4da4-a75b-11b6b214b9bf)|![image_with_lora](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/83a0a41a-691f-4610-8e7b-d8e17c50a282)|
## Install Additional Packages
```bash
pip install peft lightning
```
## Prepare the Dataset
We provide an [example dataset](https://modelscope.cn/datasets/buptwq/lora-stable-diffusion-finetune/files). You need to organize your training dataset in the following structure:
```
data/dog/
└── train
├── 00.jpg
├── 01.jpg
├── 02.jpg
├── 03.jpg
├── 04.jpg
└── metadata.csv
```
`metadata.csv`:
```
file_name,text
00.jpg,a dog
01.jpg,a dog
02.jpg,a dog
03.jpg,a dog
04.jpg,a dog
```
Please note that if the model is a Chinese model (e.g., Hunyuan-DiT and Kolors), we recommend using Chinese text in the dataset. For example:
```
file_name,text
00.jpg,一只小狗
01.jpg,一只小狗
02.jpg,一只小狗
03.jpg,一只小狗
04.jpg,一只小狗
```
## Train LoRA Model
General parameter options:
```
--lora_target_modules LORA_TARGET_MODULES
Layers where the LoRA modules are located.
--dataset_path DATASET_PATH
Path to the dataset.
--output_path OUTPUT_PATH
Path where the model will be saved.
--steps_per_epoch STEPS_PER_EPOCH
Number of steps per epoch.
--height HEIGHT The height of the image.
--width WIDTH The width of the image.
--center_crop Whether to center crop the input image to the specified resolution. If not set, the image will be randomly cropped. The image will be resized to the specified resolution before cropping.
--random_flip Whether to randomly horizontally flip the image.
--batch_size BATCH_SIZE
Batch size for the training data loader (per device).
--dataloader_num_workers DATALOADER_NUM_WORKERS
The number of subprocesses used for data loading. A value of 0 means the data will be loaded in the main process.
--precision {32,16,16-mixed}
The precision for training.
--learning_rate LEARNING_RATE
The learning rate.
--lora_rank LORA_RANK
The dimension of the LoRA update matrix.
--lora_alpha LORA_ALPHA
The weight of the LoRA update matrix.
--use_gradient_checkpointing
Whether to use gradient checkpointing.
--accumulate_grad_batches ACCUMULATE_GRAD_BATCHES
The number of batches for gradient accumulation.
--training_strategy {auto,deepspeed_stage_1,deepspeed_stage_2,deepspeed_stage_3}
The training strategy.
--max_epochs MAX_EPOCHS
The number of training epochs.
--modelscope_model_id MODELSCOPE_MODEL_ID
The model ID on ModelScope (https://www.modelscope.cn/). If the model ID is provided, the model will be automatically uploaded to ModelScope.
```

View File

@@ -0,0 +1,70 @@
# Training FLUX LoRA
The following files will be used to build the FLUX model. You can download them from [huggingface](https://huggingface.co/black-forest-labs/FLUX.1-dev)或[modelscope](https://www.modelscope.cn/models/ai-modelscope/flux.1-dev), or you can use the following code to download these files:
```python
from diffsynth import download_models
download_models(["FLUX.1-dev"])
```
```
models/FLUX/
└── FLUX.1-dev
├── ae.safetensors
├── flux1-dev.safetensors
├── text_encoder
│ └── model.safetensors
└── text_encoder_2
├── config.json
├── model-00001-of-00002.safetensors
├── model-00002-of-00002.safetensors
└── model.safetensors.index.json
```
Start the training task with the following command:
```
CUDA_VISIBLE_DEVICES="0" python examples/train/flux/train_flux_lora.py \
--pretrained_text_encoder_path models/FLUX/FLUX.1-dev/text_encoder/model.safetensors \
--pretrained_text_encoder_2_path models/FLUX/FLUX.1-dev/text_encoder_2 \
--pretrained_dit_path models/FLUX/FLUX.1-dev/flux1-dev.safetensors \
--pretrained_vae_path models/FLUX/FLUX.1-dev/ae.safetensors \
--dataset_path data/dog \
--output_path ./models \
--max_epochs 1 \
--steps_per_epoch 500 \
--height 1024 \
--width 1024 \
--center_crop \
--precision "bf16" \
--learning_rate 1e-4 \
--lora_rank 4 \
--lora_alpha 4 \
--use_gradient_checkpointing
```
For more information on the parameters, please use `python examples/train/flux/train_flux_lora.py -h` to view detailed information.
After the training is complete, use `model_manager.load_lora` to load the LoRA for inference.
```python
from diffsynth import ModelManager, FluxImagePipeline
import torch
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
file_path_list=[
"models/FLUX/FLUX.1-dev/text_encoder/model.safetensors",
"models/FLUX/FLUX.1-dev/text_encoder_2",
"models/FLUX/FLUX.1-dev/ae.safetensors",
"models/FLUX/FLUX.1-dev/flux1-dev.safetensors"
])
model_manager.load_lora("models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0)
pipe = SDXLImagePipeline.from_model_manager(model_manager)
torch.manual_seed(0)
image = pipe(
prompt=prompt,
num_inference_steps=30, embedded_guidance=3.5
)
image.save("image_with_lora.jpg")
```

View File

@@ -0,0 +1,72 @@
# Training Hunyuan-DiT LoRA
Building the Hunyuan DiT model requires four files. You can download these files from [HuggingFace](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT) or [ModelScope](https://www.modelscope.cn/models/modelscope/HunyuanDiT/summary). You can use the following code to download these files:
```python
from diffsynth import download_models
download_models(["HunyuanDiT"])
```
```
models/HunyuanDiT/
├── Put Hunyuan DiT checkpoints here.txt
└── t2i
├── clip_text_encoder
│ └── pytorch_model.bin
├── model
│ └── pytorch_model_ema.pt
├── mt5
│ └── pytorch_model.bin
└── sdxl-vae-fp16-fix
└── diffusion_pytorch_model.bin
```
Use the following command to start the training task:
```
CUDA_VISIBLE_DEVICES="0" python examples/train/hunyuan_dit/train_hunyuan_dit_lora.py \
--pretrained_path models/HunyuanDiT/t2i \
--dataset_path data/dog \
--output_path ./models \
--max_epochs 1 \
--steps_per_epoch 500 \
--height 1024 \
--width 1024 \
--center_crop \
--precision "16-mixed" \
--learning_rate 1e-4 \
--lora_rank 4 \
--lora_alpha 4 \
--use_gradient_checkpointing
```
For more information about the parameters, please use `python examples/train/hunyuan_dit/train_hunyuan_dit_lora.py -h` to view detailed information.
After the training is complete, use `model_manager.load_lora` to load the LoRA for inference.
```python
from diffsynth import ModelManager, HunyuanDiTImagePipeline
import torch
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
file_path_list=[
"models/HunyuanDiT/t2i/clip_text_encoder/pytorch_model.bin",
"models/HunyuanDiT/t2i/model/pytorch_model_ema.pt",
"models/HunyuanDiT/t2i/mt5/pytorch_model.bin",
"models/HunyuanDiT/t2i/sdxl-vae-fp16-fix/diffusion_pytorch_model.bin"
])
model_manager.load_lora("models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0)
pipe = HunyuanDiTImagePipeline.from_model_manager(model_manager)
torch.manual_seed(0)
image = pipe(
prompt="A little puppy hops and jumps playfully, surrounded by a profusion of colorful flowers, with a mountain range visible in the distance.
",
negative_prompt="",
cfg_scale=7.5,
num_inference_steps=100, width=1024, height=1024,
)
image.save("image_with_lora.jpg")
```

View File

@@ -0,0 +1,77 @@
# Training Kolors LoRA
The following files will be used to build Kolors. You can download Kolors from [HuggingFace](https://huggingface.co/Kwai-Kolors/Kolors) or [ModelScope](https://modelscope.cn/models/Kwai-Kolors/Kolors). Due to precision overflow issues, we need to download an additional VAE model from [HuggingFace](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix) or [ModelScope](https://modelscope.cn/models/AI-ModelScope/sdxl-vae-fp16-fix). You can use the following code to download these files:
```python
from diffsynth import download_models
download_models(["Kolors", "SDXL-vae-fp16-fix"])
```
```
models
├── kolors
│ └── Kolors
│ ├── text_encoder
│ │ ├── config.json
│ │ ├── pytorch_model-00001-of-00007.bin
│ │ ├── pytorch_model-00002-of-00007.bin
│ │ ├── pytorch_model-00003-of-00007.bin
│ │ ├── pytorch_model-00004-of-00007.bin
│ │ ├── pytorch_model-00005-of-00007.bin
│ │ ├── pytorch_model-00006-of-00007.bin
│ │ ├── pytorch_model-00007-of-00007.bin
│ │ └── pytorch_model.bin.index.json
│ ├── unet
│ │ └── diffusion_pytorch_model.safetensors
│ └── vae
│ └── diffusion_pytorch_model.safetensors
└── sdxl-vae-fp16-fix
└── diffusion_pytorch_model.safetensors
```
Use the following command to start the training task:
```
CUDA_VISIBLE_DEVICES="0" python examples/train/kolors/train_kolors_lora.py \
--pretrained_unet_path models/kolors/Kolors/unet/diffusion_pytorch_model.safetensors \
--pretrained_text_encoder_path models/kolors/Kolors/text_encoder \
--pretrained_fp16_vae_path models/sdxl-vae-fp16-fix/diffusion_pytorch_model.safetensors \
--dataset_path data/dog \
--output_path ./models \
--max_epochs 1 \
--steps_per_epoch 500 \
--height 1024 \
--width 1024 \
--center_crop \
--precision "16-mixed" \
--learning_rate 1e-4 \
--lora_rank 4 \
--lora_alpha 4 \
--use_gradient_checkpointing
```
For more information on the parameters, please use `python examples/train/kolors/train_kolors_lora.py -h` to view detailed information.
After the training is complete, use `model_manager.load_lora` to load the LoRA for inference.
```python
from diffsynth import ModelManager, SD3ImagePipeline
import torch
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
file_path_list=["models/stable_diffusion_3/sd3_medium_incl_clips.safetensors"])
model_manager.load_lora("models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0)
pipe = SD3ImagePipeline.from_model_manager(model_manager)
torch.manual_seed(0)
image = pipe(
prompt="a dog is jumping, flowers around the dog, the background is mountains and clouds",
negative_prompt="bad quality, poor quality, doll, disfigured, jpg, toy, bad anatomy, missing limbs, missing fingers, 3d, cgi, extra tails",
cfg_scale=7.5,
num_inference_steps=100, width=1024, height=1024,
)
image.save("image_with_lora.jpg")
```

View File

@@ -0,0 +1,57 @@
# Training Stable Diffusion 3 LoRA
The training script only requires one file. You can use [`sd3_medium_incl_clips.safetensors`](https://huggingface.co/stabilityai/stable-diffusion-3-medium/resolve/main/sd3_medium_incl_clips.safetensors)without T5 Encoder或 [`sd3_medium_incl_clips_t5xxlfp16.safetensors`](https://huggingface.co/stabilityai/stable-diffusion-3-medium/resolve/main/sd3_medium_incl_clips_t5xxlfp16.safetensors)with T5 Encoder. Please use the following code to download these files:
```python
from diffsynth import download_models
download_models(["StableDiffusion3", "StableDiffusion3_without_T5"])
```
```
models/stable_diffusion_3/
├── Put Stable Diffusion 3 checkpoints here.txt
├── sd3_medium_incl_clips.safetensors
└── sd3_medium_incl_clips_t5xxlfp16.safetensors
```
Use the following command to start the training task:
```
CUDA_VISIBLE_DEVICES="0" python examples/train/stable_diffusion_3/train_sd3_lora.py \
--pretrained_path models/stable_diffusion_3/sd3_medium_incl_clips.safetensors \
--dataset_path data/dog \
--output_path ./models \
--max_epochs 1 \
--steps_per_epoch 500 \
--height 1024 \
--width 1024 \
--center_crop \
--precision "16-mixed" \
--learning_rate 1e-4 \
--lora_rank 4 \
--lora_alpha 4 \
--use_gradient_checkpointing
```
For more information on the parameters, please use `python examples/train/stable_diffusion_3/train_sd3_lora.py -h` to view detailed information.
After training is completed, use `model_manager.load_lora` to load LoRA for inference.
```python
from diffsynth import ModelManager, SD3ImagePipeline
import torch
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
file_path_list=["models/stable_diffusion_3/sd3_medium_incl_clips.safetensors"])
model_manager.load_lora("models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0)
pipe = SD3ImagePipeline.from_model_manager(model_manager)
torch.manual_seed(0)
image = pipe(
prompt="a dog is jumping, flowers around the dog, the background is mountains and clouds",
negative_prompt="bad quality, poor quality, doll, disfigured, jpg, toy, bad anatomy, missing limbs, missing fingers, 3d, cgi, extra tails",
cfg_scale=7.5,
num_inference_steps=100, width=1024, height=1024,
)
image.save("image_with_lora.jpg")
```

View File

@@ -0,0 +1,58 @@
# Training Stable Diffusion LoRA
The training script only requires one file. We support mainstream checkpoints on [CivitAI](https://civitai.com/). By default, we use the basic Stable Diffusion v1.5. You can download it from [HuggingFace](https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors) or [ModelScope](https://www.modelscope.cn/models/AI-ModelScope/stable-diffusion-v1-5/resolve/master/v1-5-pruned-emaonly.safetensors). You can use the following code to download this file:
```python
from diffsynth import download_models
download_models(["StableDiffusion_v15"])
```
```
models/stable_diffusion
├── Put Stable Diffusion checkpoints here.txt
└── v1-5-pruned-emaonly.safetensors
```
Start the training task with the following command:
```
CUDA_VISIBLE_DEVICES="0" python examples/train/stable_diffusion/train_sd_lora.py \
--pretrained_path models/stable_diffusion/v1-5-pruned-emaonly.safetensors \
--dataset_path data/dog \
--output_path ./models \
--max_epochs 1 \
--steps_per_epoch 500 \
--height 512 \
--width 512 \
--center_crop \
--precision "16-mixed" \
--learning_rate 1e-4 \
--lora_rank 4 \
--lora_alpha 4 \
--use_gradient_checkpointing
```
For more information about the parameters, please use `python examples/train/stable_diffusion/train_sd_lora.py -h` to view detailed information.
After training is complete, use `model_manager.load_lora` to load LoRA for inference.
```python
from diffsynth import ModelManager, SDImagePipeline
import torch
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
file_path_list=["models/stable_diffusion/v1-5-pruned-emaonly.safetensors"])
model_manager.load_lora("models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0)
pipe = SDImagePipeline.from_model_manager(model_manager)
torch.manual_seed(0)
image = pipe(
prompt="a dog is jumping, flowers around the dog, the background is mountains and clouds",
negative_prompt="bad quality, poor quality, doll, disfigured, jpg, toy, bad anatomy, missing limbs, missing fingers, 3d, cgi, extra tails",
cfg_scale=7.5,
num_inference_steps=100, width=512, height=512,
)
image.save("image_with_lora.jpg")
```

View File

@@ -0,0 +1,57 @@
# Training Stable Diffusion XL LoRA
The training script only requires one file. We support mainstream checkpoints on [CivitAI](https://civitai.com/). By default, we use the basic Stable Diffusion XL. You can download it from [HuggingFace](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors) 或 [ModelScope](https://www.modelscope.cn/models/AI-ModelScope/stable-diffusion-xl-base-1.0/resolve/master/sd_xl_base_1.0.safetensors). You can also use the following code to download this file:
```python
from diffsynth import download_models
download_models(["StableDiffusionXL_v1"])
```
```
models/stable_diffusion_xl
├── Put Stable Diffusion XL checkpoints here.txt
└── sd_xl_base_1.0.safetensors
```
We have observed that Stable Diffusion XL may experience numerical precision overflows when using float16 precision, so we recommend that users train with float32 precision. To start the training task, use the following command:
```
CUDA_VISIBLE_DEVICES="0" python examples/train/stable_diffusion_xl/train_sdxl_lora.py \
--pretrained_path models/stable_diffusion_xl/sd_xl_base_1.0.safetensors \
--dataset_path data/dog \
--output_path ./models \
--max_epochs 1 \
--steps_per_epoch 500 \
--height 1024 \
--width 1024 \
--center_crop \
--precision "32" \
--learning_rate 1e-4 \
--lora_rank 4 \
--lora_alpha 4 \
--use_gradient_checkpointing
```
For more information about the parameters, please use `python examples/train/stable_diffusion_xl/train_sdxl_lora.py -h` to view detailed information.
After training is complete, use `model_manager.load_lora` to load LoRA for inference.
```python
from diffsynth import ModelManager, SDXLImagePipeline
import torch
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
file_path_list=["models/stable_diffusion_xl/sd_xl_base_1.0.safetensors"])
model_manager.load_lora("models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0)
pipe = SDXLImagePipeline.from_model_manager(model_manager)
torch.manual_seed(0)
image = pipe(
prompt="a dog is jumping, flowers around the dog, the background is mountains and clouds",
negative_prompt="bad quality, poor quality, doll, disfigured, jpg, toy, bad anatomy, missing limbs, missing fingers, 3d, cgi, extra tails",
cfg_scale=7.5,
num_inference_steps=100, width=1024, height=1024,
)
image.save("image_with_lora.jpg")
```

32
docs/source_en/index.rst Normal file
View File

@@ -0,0 +1,32 @@
.. DiffSynth-Studio documentation master file, created by
sphinx-quickstart on Thu Sep 5 16:39:24 2024.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
DiffSynth-Studio documentation
==============================
Add your content using ``reStructuredText`` syntax. See the
`reStructuredText <https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html>`_
documentation for details.
.. toctree::
:maxdepth: 1
:caption: Contents:
GetStarted/A_simple_example.md
GetStarted/Download_models.md
GetStarted/ModelManager.md
GetStarted/Models.md
GetStarted/Pipelines.md
GetStarted/PromptProcessing.md
GetStarted/Schedulers.md
GetStarted/Fine-tuning.md
GetStarted/Extensions.md
GetStarted/WebUI.md
.. toctree::
:maxdepth: 1
:caption: API Docs

View File

@@ -0,0 +1,4 @@
recommonmark
sphinx_rtd_theme
myst-parser
sphinx-markdown-tables

View File

@@ -0,0 +1,85 @@
# Quick Start
In this document, we introduce how to quickly get started with DiffSynth-Studio for creation through a piece of code.
## Installation
Use the following command to clone and install DiffSynth-Studio from GitHub. For more information, please refer to [Installation](./Installation.md).
```shell
git clone https://github.com/modelscope/DiffSynth-Studio.git
cd DiffSynth-Studio
pip install -e .
```
## One-click Run!
By running the following code, we will download the model, load the model, and generate an image.
```python
import torch
from diffsynth import ModelManager, FluxImagePipeline
model_manager = ModelManager(
torch_dtype=torch.bfloat16,
device="cuda",
model_id_list=["FLUX.1-dev"]
)
pipe = FluxImagePipeline.from_model_manager(model_manager)
torch.manual_seed(0)
image = pipe(
prompt="In a forest, a wooden plank sign reading DiffSynth",
height=576, width=1024,
)
image.save("image.jpg")
```
![image](https://github.com/user-attachments/assets/15a52a2b-2f18-46fe-810c-cb3ad2853919)
From this example, we can see that there are two key modules in DiffSynth: `ModelManager` and `Pipeline`. We will introduce them in detail next.
## Downloading and Loading Models
`ModelManager` is responsible for downloading and loading models, which can be done in one step with the following code.
```python
import torch
from diffsynth import ModelManager
model_manager = ModelManager(
torch_dtype=torch.bfloat16,
device="cuda",
model_id_list=["FLUX.1-dev"]
)
```
Of course, we also support completing this step by step, and the following code is equivalent to the above.
```python
import torch
from diffsynth import download_models, ModelManager
download_models(["FLUX.1-dev"])
model_manager = ModelManager(torch_dtype=torch.bfloat16, device="cuda")
model_manager.load_models([
"models/FLUX/FLUX.1-dev/text_encoder/model.safetensors",
"models/FLUX/FLUX.1-dev/text_encoder_2",
"models/FLUX/FLUX.1-dev/ae.safetensors",
"models/FLUX/FLUX.1-dev/flux1-dev.safetensors"
])
```
When downloading models, we support downloading from [ModelScope](https://www.modelscope.cn/) and [HuggingFace](https://huggingface.co/), and we also support downloading non-preset models. For more information about model downloading, please refer to [Model Download](./DownloadModels.md).
When loading models, you can put all the model paths you want to load into it. For model weight files in formats such as `.safetensors`, `ModelManager` will automatically determine the model type after loading; for folder format models, `ModelManager` will try to parse the `config.json` file within and try to call the corresponding module in third-party libraries such as `transformers`. For models supported by DiffSynth-Studio, please refer to [Supported Models](./Models.md).
## Building Pipeline
DiffSynth-Studio provides multiple inference `Pipeline`s, which can be directly obtained through `ModelManager` to get the required models and initialize. For example, the text-to-image `Pipeline` for the FLUX.1-dev model can be constructed as follows:
```python
pipe = FluxImagePipeline.from_model_manager(model_manager)
```
For more `Pipeline`s used for image generation and video generation, see [Inference Pipelines](./Pipelines.md).

View File

@@ -0,0 +1,34 @@
# Download Models
We have preset some mainstream Diffusion model download links in DiffSynth-Studio, which you can download and use.
## Download Preset Models
You can directly use the `download_models` function to download the preset model files, where the model ID can refer to the [config file](/diffsynth/configs/model_config.py).
```python
from diffsynth import download_models
download_models(["FLUX.1-dev"])
```
For VSCode users, after activating Pylance or other Python language services, typing `""` in the code will display all supported model IDs.
![image](https://github.com/user-attachments/assets/2bbfec32-e015-45a7-98d9-57af13200b7c)
## Download Non-Preset Models
You can select models from two download sources: [ModelScope](https://modelscope.cn/models) and [HuggingFace](https://huggingface.co/models). Of course, you can also manually download the models you need through browsers or other tools.
```python
from diffsynth import download_customized_models
download_customized_models(
model_id="Kwai-Kolors/Kolors",
origin_file_path="vae/diffusion_pytorch_model.fp16.bin",
local_dir="models/kolors/Kolors/vae",
downloading_priority=["ModelScope", "HuggingFace"]
)
```
In this code snippet, we will prioritize downloading from `ModelScope` according to the download priority, and download the file `vae/diffusion_pytorch_model.fp16.bin` from the model repository with ID `Kwai-Kolors/Kolors` in the [model library](https://modelscope.cn/models/Kwai-Kolors/Kolors) to the local path `models/kolors/Kolors/vae`.

View File

@@ -0,0 +1,49 @@
# Extension Features
This document introduces some technologies related to the Diffusion models implemented in DiffSynth, which have significant application potential in image and video processing.
- **[RIFE](https://github.com/hzwer/ECCV2022-RIFE)**: RIFE is a frame interpolation method based on real-time intermediate flow estimation. It uses a model with an IFNet structure that can quickly estimate intermediate flows end-to-end. RIFE does not rely on pre-trained optical flow models and supports frame interpolation at arbitrary time steps, processing through time-encoded inputs.
In this code snippet, we use the RIFE model to double the frame rate of a video.
```python
from diffsynth import VideoData, ModelManager, save_video
from diffsynth.extensions.RIFE import RIFEInterpolater
model_manager = ModelManager(model_id_list=["RIFE"])
rife = RIFEInterpolater.from_model_manager(model_manager)
video = VideoData("input_video.mp4", height=512, width=768).raw_data()
video = rife.interpolate(video)
save_video(video, "output_video.mp4", fps=60)
```
- **[ESRGAN](https://github.com/xinntao/ESRGAN)**: ESRGAN is an image super-resolution model that can achieve a fourfold increase in resolution. This method significantly enhances the realism of generated images by optimizing network architecture, adversarial loss, and perceptual loss.
In this code snippet, we use the ESRGAN model to quadruple the resolution of an image.
```python
from PIL import Image
from diffsynth import ModelManager
from diffsynth.extensions.ESRGAN import ESRGAN
model_manager = ModelManager(model_id_list=["ESRGAN_x4"])
esrgan = ESRGAN.from_model_manager(model_manager)
image = Image.open("input_image.jpg")
image = esrgan.upscale(image)
image.save("output_image.jpg")
```
- **[FastBlend](https://arxiv.org/abs/2311.09265)**: FastBlend is a model-free video de-flickering algorithm. Flicker often occurs in style videos processed frame by frame using image generation models. FastBlend can eliminate flicker in style videos based on the motion features in the original video (guide video).
In this code snippet, we use FastBlend to remove the flicker effect from a style video.
```python
from diffsynth import VideoData, save_video
from diffsynth.extensions.FastBlend import FastBlendSmoother
fastblend = FastBlendSmoother()
guide_video = VideoData("guide_video.mp4", height=512, width=768).raw_data()
style_video = VideoData("style_video.mp4", height=512, width=768).raw_data()
output_video = fastblend(style_video, original_frames=guide_video)
save_video(output_video, "output_video.mp4", fps=30)
```

View File

@@ -0,0 +1,26 @@
# Installation
Currently, DiffSynth-Studio supports installation via cloning from GitHub or using pip. We recommend users to clone from GitHub to experience the latest features.
## From Source
1. Clone the source repository:
```bash
git clone https://github.com/modelscope/DiffSynth-Studio.git
```
2. Navigate to the project directory and install:
```bash
cd DiffSynth-Studio
pip install -e .
```
## From PyPI
Install directly via PyPI:
```bash
pip install diffsynth
```

View File

@@ -0,0 +1,18 @@
# 模型
So far, the models supported by DiffSynth Studio are as follows:
* [CogVideoX](https://huggingface.co/THUDM/CogVideoX-5b)
* [FLUX](https://huggingface.co/black-forest-labs/FLUX.1-dev)
* [ExVideo](https://huggingface.co/ECNU-CILab/ExVideo-SVD-128f-v1)
* [Kolors](https://huggingface.co/Kwai-Kolors/Kolors)
* [Stable Diffusion 3](https://huggingface.co/stabilityai/stable-diffusion-3-medium)
* [Stable Video Diffusion](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt)
* [Hunyuan-DiT](https://github.com/Tencent/HunyuanDiT)
* [RIFE](https://github.com/hzwer/ECCV2022-RIFE)
* [ESRGAN](https://github.com/xinntao/ESRGAN)
* [Ip-Adapter](https://github.com/tencent-ailab/IP-Adapter)
* [AnimateDiff](https://github.com/guoyww/animatediff/)
* [ControlNet](https://github.com/lllyasviel/ControlNet)
* [Stable Diffusion XL](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
* [Stable Diffusion](https://huggingface.co/runwayml/stable-diffusion-v1-5)

View File

@@ -0,0 +1,22 @@
# Pipelines
DiffSynth-Studio includes multiple pipelines, categorized into two types: image generation and video generation.
## Image Pipelines
| Pipeline | Models |
|----------------------------|----------------------------------------------------------------|
| SDImagePipeline | text_encoder: SDTextEncoder<br>unet: SDUNet<br>vae_decoder: SDVAEDecoder<br>vae_encoder: SDVAEEncoder<br>controlnet: MultiControlNetManager<br>ipadapter_image_encoder: IpAdapterCLIPImageEmbedder<br>ipadapter: SDIpAdapter |
| SDXLImagePipeline | text_encoder: SDXLTextEncoder<br>text_encoder_2: SDXLTextEncoder2<br>text_encoder_kolors: ChatGLMModel<br>unet: SDXLUNet<br>vae_decoder: SDXLVAEDecoder<br>vae_encoder: SDXLVAEEncoder<br>controlnet: MultiControlNetManager<br>ipadapter_image_encoder: IpAdapterXLCLIPImageEmbedder<br>ipadapter: SDXLIpAdapter |
| SD3ImagePipeline | text_encoder_1: SD3TextEncoder1<br>text_encoder_2: SD3TextEncoder2<br>text_encoder_3: SD3TextEncoder3<br>dit: SD3DiT<br>vae_decoder: SD3VAEDecoder<br>vae_encoder: SD3VAEEncoder |
| HunyuanDiTImagePipeline | text_encoder: HunyuanDiTCLIPTextEncoder<br>text_encoder_t5: HunyuanDiTT5TextEncoder<br>dit: HunyuanDiT<br>vae_decoder: SDVAEDecoder<br>vae_encoder: SDVAEEncoder |
| FluxImagePipeline | text_encoder_1: FluxTextEncoder1<br>text_encoder_2: FluxTextEncoder2<br>dit: FluxDiT<br>vae_decoder: FluxVAEDecoder<br>vae_encoder: FluxVAEEncoder |
## Video Pipelines
| Pipeline | Models |
|----------------------------|----------------------------------------------------------------|
| SDVideoPipeline | text_encoder: SDTextEncoder<br>unet: SDUNet<br>vae_decoder: SDVAEDecoder<br>vae_encoder: SDVAEEncoder<br>controlnet: MultiControlNetManager<br>ipadapter_image_encoder: IpAdapterCLIPImageEmbedder<br>ipadapter: SDIpAdapter<br>motion_modules: SDMotionModel |
| SDXLVideoPipeline | text_encoder: SDXLTextEncoder<br>text_encoder_2: SDXLTextEncoder2<br>text_encoder_kolors: ChatGLMModel<br>unet: SDXLUNet<br>vae_decoder: SDXLVAEDecoder<br>vae_encoder: SDXLVAEEncoder<br>ipadapter_image_encoder: IpAdapterXLCLIPImageEmbedder<br>ipadapter: SDXLIpAdapter<br>motion_modules: SDXLMotionModel |
| SVDVideoPipeline | image_encoder: SVDImageEncoder<br>unet: SVDUNet<br>vae_encoder: SVDVAEEncoder<br>vae_decoder: SVDVAEDecoder |
| CogVideoPipeline | text_encoder: FluxTextEncoder2<br>dit: CogDiT<br>vae_encoder: CogVAEEncoder<br>vae_decoder: CogVAEDecoder |

View File

@@ -0,0 +1,35 @@
# Prompt Processing
DiffSynth includes prompt processing functionality, which is divided into:
- **Prompt Refiners (`prompt_refiner_classes`)**: Includes prompt refinement, prompt translation from Chinese to English, and both refinement and translation of prompts. Available parameters are as follows:
- **English Prompt Refinement**: 'BeautifulPrompt', using the model [pai-bloom-1b1-text2prompt-sd](https://modelscope.cn/models/AI-ModelScope/pai-bloom-1b1-text2prompt-sd).
- **Prompt Translation from Chinese to English**: 'Translator', using the model [opus-mt-zh-e](https://modelscope.cn/models/moxying/opus-mt-zh-en).
- **Prompt Translation and Refinement**: 'QwenPrompt', using the model [Qwen2-1.5B-Instruct](https://modelscope.cn/models/qwen/Qwen2-1.5B-Instruct).
- **Prompt Extenders (`prompt_extender_classes`)**: Based on Omost's prompt partition control expansion. Available parameter is:
- **Prompt Partition Expansion**: 'OmostPromter'.
## Usage Instructions
### Prompt Refiners
When loading the model pipeline, you can specify the desired prompt refiner functionality using the `prompt_refiner_classes` parameter. For example code, refer to [sd_prompt_refining.py](examples/image_synthesis/sd_prompt_refining.py).
Available `prompt_refiner_classes` parameters include: Translator, BeautifulPrompt, QwenPrompt.
```python
pipe = SDXLImagePipeline.from_model_manager(model_manager, prompt_refiner_classes=[Translator, BeautifulPrompt])
```
### Prompt Extenders
When loading the model pipeline, you can specify the desired prompt extender using the `prompt_extender_classes` parameter. For example code, refer to [omost_flux_text_to_image.py](examples/image_synthesis/omost_flux_text_to_image.py).
```python
pipe = FluxImagePipeline.from_model_manager(model_manager, prompt_extender_classes=[OmostPromter])
```

View File

@@ -0,0 +1,11 @@
# Schedulers
Schedulers control the entire denoising (or sampling) process of the model. When loading the Pipeline, DiffSynth automatically selects the most suitable schedulers for the current Pipeline, **requiring no additional configuration**.
The supported schedulers are:
- **EnhancedDDIMScheduler**: Extends the denoising process introduced in the Denoising Diffusion Probabilistic Models (DDPM) with non-Markovian guidance.
- **FlowMatchScheduler**: Implements the flow matching sampling method introduced in [Stable Diffusion 3](https://arxiv.org/abs/2403.03206).
- **ContinuousODEScheduler**: A scheduler based on Ordinary Differential Equations (ODE).