Merge pull request #101 from modelscope/Artiprocher-sd3-lora

Support SD3 LoRA
This commit is contained in:
Zhongjie Duan
2024-07-10 13:42:54 +08:00
committed by GitHub
13 changed files with 554 additions and 127 deletions

View File

@@ -80,15 +80,15 @@ https://github.com/modelscope/DiffSynth-Studio/assets/35051019/d97f6aa9-8064-4b5
### Image Synthesis
Generate high-resolution images, by breaking the limitation of diffusion models! [`examples/image_synthesis`](./examples/image_synthesis/)
Generate high-resolution images, by breaking the limitation of diffusion models! [`examples/image_synthesis`](./examples/image_synthesis/).
|512*512|1024*1024|2048*2048|4096*4096|
|-|-|-|-|
|![512](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/55f679e9-7445-4605-9315-302e93d11370)|![1024](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/6fc84611-8da6-4a1f-8fee-9a34eba3b4a5)|![2048](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/9087a73c-9164-4c58-b2a0-effc694143fb)|![4096](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/edee9e71-fc39-4d1c-9ca9-fa52002c67ac)|
LoRA fine-tuning is supported in [`examples/train`](./examples/train/).
|1024*1024|2048*2048|
|Stable Diffusion|Stable Diffusion XL|
|-|-|
|![1024](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/67687748-e738-438c-aee5-96096f09ac90)|![2048](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/584186bc-9855-4140-878e-99541f9a757f)|
|![1024](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/6fc84611-8da6-4a1f-8fee-9a34eba3b4a5)|![1024](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/67687748-e738-438c-aee5-96096f09ac90)|![2048](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/584186bc-9855-4140-878e-99541f9a757f)|
|Stable Diffusion 3|Hunyuan-DiT|
|![image_1024](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/4df346db-6f91-420a-b4c1-26e205376098)|![image_1024](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/60b022c8-df3f-4541-95ab-bf39f2fa8bb5)|
### Toon Shading
@@ -104,22 +104,6 @@ Video stylization without video models. [`examples/diffsynth`](./examples/diffsy
https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/59fb2f7b-8de0-4481-b79f-0c3a7361a1ea
### Chinese Models
Use Hunyuan-DiT to generate images with Chinese prompts. We also support LoRA fine-tuning of this model. [`examples/hunyuan_dit`](./examples/hunyuan_dit/)
Prompt: 少女手捧鲜花,坐在公园的长椅上,夕阳的余晖洒在少女的脸庞,整个画面充满诗意的美感
|1024x1024|2048x2048 (highres-fix)|
|-|-|
|![image_1024](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/2b6528cf-a229-46e9-b7dd-4a9475b07308)|![image_2048](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/11d264ec-966b-45c9-9804-74b60428b866)|
Prompt: 一只小狗蹦蹦跳跳,周围是姹紫嫣红的鲜花,远处是山脉
|Without LoRA|With LoRA|
|-|-|
|![image_without_lora](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/1aa21de5-a992-4b66-b14f-caa44e08876e)|![image_with_lora](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/83a0a41a-691f-4610-8e7b-d8e17c50a282)|
## Usage (in WebUI)
```

View File

@@ -567,7 +567,7 @@ class ModelManager:
if component == "sd3_text_encoder_3":
if "text_encoders.t5xxl.transformer.encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight" not in state_dict:
continue
elif component == "sd3_text_encoder_1":
if component == "sd3_text_encoder_1":
# Add additional token embeddings to text encoder
token_embeddings = [state_dict["text_encoders.clip_l.transformer.text_model.embeddings.token_embedding.weight"]]
for keyword in self.textual_inversion_dict:

View File

@@ -199,7 +199,7 @@ class SD3DiT(torch.nn.Module):
)
return hidden_states
def forward(self, hidden_states, timestep, prompt_emb, pooled_prompt_emb, tiled=False, tile_size=128, tile_stride=64):
def forward(self, hidden_states, timestep, prompt_emb, pooled_prompt_emb, tiled=False, tile_size=128, tile_stride=64, use_gradient_checkpointing=False):
if tiled:
return self.tiled_forward(hidden_states, timestep, prompt_emb, pooled_prompt_emb, tile_size, tile_stride)
conditioning = self.time_embedder(timestep, hidden_states.dtype) + self.pooled_text_embedder(pooled_prompt_emb)
@@ -207,8 +207,22 @@ class SD3DiT(torch.nn.Module):
height, width = hidden_states.shape[-2:]
hidden_states = self.pos_embedder(hidden_states)
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
for block in self.blocks:
hidden_states, prompt_emb = block(hidden_states, prompt_emb, conditioning)
if self.training and use_gradient_checkpointing:
hidden_states, prompt_emb = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states, prompt_emb, conditioning,
use_reentrant=False,
)
else:
hidden_states, prompt_emb = block(hidden_states, prompt_emb, conditioning)
hidden_states = self.norm_out(hidden_states, conditioning)
hidden_states = self.proj_out(hidden_states)
hidden_states = rearrange(hidden_states, "B (H W) (P Q C) -> B C (H P) (W Q)", P=2, Q=2, H=height//2, W=width//2)

View File

@@ -69,7 +69,7 @@ class SD3Prompter(Prompter):
# T5
if text_encoder_3 is None:
prompt_emb_3 = torch.zeros((1, 256, 4096), dtype=prompt_emb_1.dtype, device=device)
prompt_emb_3 = torch.zeros((prompt_emb_1.shape[0], 256, 4096), dtype=prompt_emb_1.dtype, device=device)
else:
prompt_emb_3 = self.encode_prompt_using_t5(pure_prompt, text_encoder_3, self.tokenizer_3, 256, device)
prompt_emb_3 = prompt_emb_3.to(prompt_emb_1.dtype) # float32 -> float16

View File

@@ -124,6 +124,13 @@ but make sure there is a correlation between the input and output.\n\
return prompt
def process_prompt(self, prompt, positive=True, require_pure_prompt=False):
if isinstance(prompt, list):
prompt = [self.process_prompt(prompt_, positive=positive, require_pure_prompt=require_pure_prompt) for prompt_ in prompt]
if require_pure_prompt:
prompt, pure_prompt = [i[0] for i in prompt], [i[1] for i in prompt]
return prompt, pure_prompt
else:
return prompt
prompt, pure_prompt = self.add_textual_inversion_tokens(prompt), self.del_textual_inversion_tokens(prompt)
if positive and self.translator is not None:
prompt = self.translator(prompt)

View File

@@ -40,3 +40,8 @@ class FlowMatchScheduler():
sigma = self.sigmas[timestep_id]
sample = (1 - sigma) * original_samples + sigma * noise
return sample
def training_target(self, sample, noise, timestep):
target = noise - sample
return target

View File

@@ -1,10 +1,10 @@
# Image Synthesis
Image synthesis is the base feature of DiffSynth Studio.
Image synthesis is the base feature of DiffSynth Studio. We can generate images with very high resolution.
### Example: Stable Diffusion
We can generate images with very high resolution. Please see [`sd_text_to_image.py`](./sd_text_to_image.py) for more details.
Example script: [`sd_text_to_image.py`](./sd_text_to_image.py)
|512*512|1024*1024|2048*2048|4096*4096|
|-|-|-|-|
@@ -12,7 +12,7 @@ We can generate images with very high resolution. Please see [`sd_text_to_image.
### Example: Stable Diffusion XL
Generate images with Stable Diffusion XL. Please see [`sdxl_text_to_image.py`](./sdxl_text_to_image.py) for more details.
Example script: [`sdxl_text_to_image.py`](./sdxl_text_to_image.py)
|1024*1024|2048*2048|
|-|-|
@@ -20,15 +20,29 @@ Generate images with Stable Diffusion XL. Please see [`sdxl_text_to_image.py`](.
### Example: Stable Diffusion 3
Generate images with Stable Diffusion 3. High resolution is also supported in this model. See [`sd3_text_to_image.py`](./sd3_text_to_image.py).
Example script: [`sd3_text_to_image.py`](./sd3_text_to_image.py)
LoRA Training: [`../train/stable_diffusion_3/`](../train/stable_diffusion_3/)
|1024*1024|2048*2048|
|-|-|
|![image_1024](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/4df346db-6f91-420a-b4c1-26e205376098)|![image_2048](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/1386c802-e580-4101-939d-f1596802df9d)|
### Example: Hunyuan-DiT
Example script: [`hunyuan_dit_text_to_image.py`](./hunyuan_dit_text_to_image.py)
LoRA Training: [`../train/hunyuan_dit/`](../train/hunyuan_dit/)
|1024*1024|2048*2048|
|-|-|
|![image_1024](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/60b022c8-df3f-4541-95ab-bf39f2fa8bb5)|![image_2048](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/87919ea8-d428-4963-8257-da05f3901bbb)|
### Example: Stable Diffusion XL Turbo
Generate images with Stable Diffusion XL Turbo. You can see [`sdxl_turbo.py`](./sdxl_turbo.py) for more details, but we highly recommend you to use it in the WebUI.
Example script: [`sdxl_turbo.py`](./sdxl_turbo.py)
We highly recommend you to use this model in the WebUI.
|"black car"|"red car"|
|-|-|

View File

@@ -0,0 +1,42 @@
from diffsynth import ModelManager, HunyuanDiTImagePipeline, download_models
import torch
# Download models (automatically)
# `models/HunyuanDiT/t2i/clip_text_encoder/pytorch_model.bin`: [link](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT/resolve/main/t2i/clip_text_encoder/pytorch_model.bin)
# `models/HunyuanDiT/t2i/mt5/pytorch_model.bin`: [link](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT/resolve/main/t2i/mt5/pytorch_model.bin)
# `models/HunyuanDiT/t2i/model/pytorch_model_ema.pt`: [link](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT/resolve/main/t2i/model/pytorch_model_ema.pt)
# `models/HunyuanDiT/t2i/sdxl-vae-fp16-fix/diffusion_pytorch_model.bin`: [link](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT/resolve/main/t2i/sdxl-vae-fp16-fix/diffusion_pytorch_model.bin)
download_models(["HunyuanDiT"])
# Load models
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
model_manager.load_models([
"models/HunyuanDiT/t2i/clip_text_encoder/pytorch_model.bin",
"models/HunyuanDiT/t2i/mt5/pytorch_model.bin",
"models/HunyuanDiT/t2i/model/pytorch_model_ema.pt",
"models/HunyuanDiT/t2i/sdxl-vae-fp16-fix/diffusion_pytorch_model.bin"
])
pipe = HunyuanDiTImagePipeline.from_model_manager(model_manager)
prompt = "一幅充满诗意美感的全身肖像画,画中一位银发、蓝色眼睛、身穿蓝色连衣裙的少女漂浮在水下,周围是光彩的气泡,和煦的阳光透过水面折射进水下"
negative_prompt = "错误的眼睛,糟糕的人脸,毁容,糟糕的艺术,变形,多余的肢体,模糊的颜色,模糊,重复,病态,残缺,"
# Enjoy!
torch.manual_seed(0)
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=50, height=1024, width=1024,
)
image.save("image_1024.png")
# Highres fix
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
input_image=image.resize((2048, 2048)),
num_inference_steps=50, height=2048, width=2048,
denoising_strength=0.4, tiled=True,
)
image.save("image_2048.png")

View File

@@ -4,9 +4,9 @@ import torch
# Download models (automatically)
# `models/stable_diffusion_3/sd3_medium_incl_clips.safetensors`: [link](https://huggingface.co/stabilityai/stable-diffusion-3-medium/resolve/main/sd3_medium_incl_clips.safetensors)
download_models(["StableDiffusion3"])
download_models(["StableDiffusion3_without_T5"])
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
file_path_list=["models/stable_diffusion_3/sd3_medium_incl_clips_t5xxlfp16.safetensors"])
file_path_list=["models/stable_diffusion_3/sd3_medium_incl_clips.safetensors"])
pipe = SD3ImagePipeline.from_model_manager(model_manager)

View File

@@ -28,99 +28,6 @@ from diffsynth import download_models
download_models(["HunyuanDiT"])
```
## Inference
### Text-to-image with highres-fix
The original resolution of Hunyuan DiT is 1024x1024. If you want to use larger resolutions, please use highres-fix.
Hunyuan DiT is also supported in our UI.
```python
from diffsynth import ModelManager, HunyuanDiTImagePipeline
import torch
# Load models
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
model_manager.load_models([
"models/HunyuanDiT/t2i/clip_text_encoder/pytorch_model.bin",
"models/HunyuanDiT/t2i/mt5/pytorch_model.bin",
"models/HunyuanDiT/t2i/model/pytorch_model_ema.pt",
"models/HunyuanDiT/t2i/sdxl-vae-fp16-fix/diffusion_pytorch_model.bin"
])
pipe = HunyuanDiTImagePipeline.from_model_manager(model_manager)
# Enjoy!
torch.manual_seed(0)
image = pipe(
prompt="少女手捧鲜花,坐在公园的长椅上,夕阳的余晖洒在少女的脸庞,整个画面充满诗意的美感",
negative_prompt="错误的眼睛,糟糕的人脸,毁容,糟糕的艺术,变形,多余的肢体,模糊的颜色,模糊,重复,病态,残缺,",
num_inference_steps=50, height=1024, width=1024,
)
image.save("image_1024.png")
# Highres fix
image = pipe(
prompt="少女手捧鲜花,坐在公园的长椅上,夕阳的余晖洒在少女的脸庞,整个画面充满诗意的美感",
negative_prompt="错误的眼睛,糟糕的人脸,毁容,糟糕的艺术,变形,多余的肢体,模糊的颜色,模糊,重复,病态,残缺,",
input_image=image.resize((2048, 2048)),
num_inference_steps=50, height=2048, width=2048,
cfg_scale=3.0, denoising_strength=0.5, tiled=True,
)
image.save("image_2048.png")
```
Prompt: 少女手捧鲜花,坐在公园的长椅上,夕阳的余晖洒在少女的脸庞,整个画面充满诗意的美感
|1024x1024|2048x2048 (highres-fix)|
|-|-|
|![image_1024](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/2b6528cf-a229-46e9-b7dd-4a9475b07308)|![image_2048](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/11d264ec-966b-45c9-9804-74b60428b866)|
### In-context reference (experimental)
This feature is similar to the "reference-only" mode in ControlNets. By extending the self-attention layer, the content in the reference image can be retained in the new image. Any number of reference images are supported, and the influence from each reference image can be controled by independent `reference_strengths` parameters.
```python
from diffsynth import ModelManager, HunyuanDiTImagePipeline
import torch
# Load models
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
model_manager.load_models([
"models/HunyuanDiT/t2i/clip_text_encoder/pytorch_model.bin",
"models/HunyuanDiT/t2i/mt5/pytorch_model.bin",
"models/HunyuanDiT/t2i/model/pytorch_model_ema.pt",
"models/HunyuanDiT/t2i/sdxl-vae-fp16-fix/diffusion_pytorch_model.bin"
])
pipe = HunyuanDiTImagePipeline.from_model_manager(model_manager)
# Generate an image as reference
torch.manual_seed(0)
reference_image = pipe(
prompt="梵高,星空,油画,明亮",
negative_prompt="",
num_inference_steps=50, height=1024, width=1024,
)
reference_image.save("image_reference.png")
# Generate a new image with reference
image = pipe(
prompt="层峦叠嶂的山脉,郁郁葱葱的森林,皎洁明亮的月光,夜色下的自然美景",
negative_prompt="",
reference_images=[reference_image], reference_strengths=[0.4],
num_inference_steps=50, height=1024, width=1024,
)
image.save("image_with_reference.png")
```
Prompt: 层峦叠嶂的山脉,郁郁葱葱的森林,皎洁明亮的月光,夜色下的自然美景
|Reference image|Generated new image|
|-|-|
|![image_reference](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/99b0189d-6175-4842-b480-3c0d2f9f7e17)|![image_with_reference](https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/8e41dddb-f302-4a2d-9e52-5487d1f47ae6)|
## Train
### Install training dependency
@@ -254,7 +161,8 @@ pipe = HunyuanDiTImagePipeline.from_model_manager(model_manager)
# Generate an image with lora
pipe.dit = load_lora(
pipe.dit, lora_rank=4, lora_alpha=4.0,
pipe.dit,
lora_rank=4, lora_alpha=4.0, # The two parameters should be consistent with those in your training script.
lora_path="path/to/your/lora/model/lightning_logs/version_x/checkpoints/epoch=x-step=xxx.ckpt"
)
torch.manual_seed(0)

View File

@@ -0,0 +1,160 @@
# Stable Diffusion 3
Stable Diffusion 3 is a powerful text-to-image model. We provide training scripts here.
## Download models
Only one file is required in the training script. You can use [`sd3_medium_incl_clips.safetensors`](https://huggingface.co/stabilityai/stable-diffusion-3-medium/resolve/main/sd3_medium_incl_clips.safetensors) (without T5 encoder) or [`sd3_medium_incl_clips_t5xxlfp16.safetensors`](https://huggingface.co/stabilityai/stable-diffusion-3-medium/resolve/main/sd3_medium_incl_clips_t5xxlfp16.safetensors) (with T5 encoder).
```
models/stable_diffusion_3/
├── Put Stable Diffusion 3 checkpoints here.txt
├── sd3_medium_incl_clips.safetensors
└── sd3_medium_incl_clips_t5xxlfp16.safetensors
```
You can use the following code to download these files:
```python
from diffsynth import download_models
download_models(["StableDiffusion3", "StableDiffusion3_without_T5"])
```
## Train
### Install training dependency
```
pip install peft lightning pandas torchvision
```
### Prepare your dataset
We provide an example dataset [here](https://modelscope.cn/datasets/buptwq/lora-stable-diffusion-finetune/files). You need to manage the training images as follows:
```
data/dog/
└── train
├── 00.jpg
├── 01.jpg
├── 02.jpg
├── 03.jpg
├── 04.jpg
└── metadata.csv
```
`metadata.csv`:
```
file_name,text
00.jpg,a dog
01.jpg,a dog
02.jpg,a dog
03.jpg,a dog
04.jpg,a dog
```
### Train a LoRA model
We provide a training script `train_sd3_lora.py`. Before you run this training script, please copy it to the root directory of this project.
We recommand to enable gradient checkpointing. 10GB VRAM is enough if you train LoRA without the T5 encoder (use `sd3_medium_incl_clips.safetensors`), and 19GB VRAM is required if you enable the T5 encoder (use `sd3_medium_incl_clips_t5xxlfp16.safetensors`).
```
CUDA_VISIBLE_DEVICES="0" python train_sd3_lora.py \
--pretrained_path models/stable_diffusion_3/sd3_medium_incl_clips.safetensors \
--dataset_path data/dog \
--output_path ./models \
--max_epochs 1 \
--center_crop \
--use_gradient_checkpointing
```
Optional arguments:
```
-h, --help show this help message and exit
--pretrained_path PRETRAINED_PATH
Path to pretrained model. For example, `models/stable_diffusion_3/sd3_medium_incl_clips.safetensors` or `models/stable_diffusion_3/sd3_medium_incl_clips_t5xxlfp16.safetensors`.
--dataset_path DATASET_PATH
The path of the Dataset.
--output_path OUTPUT_PATH
Path to save the model.
--steps_per_epoch STEPS_PER_EPOCH
Number of steps per epoch.
--height HEIGHT Image height.
--width WIDTH Image width.
--center_crop Whether to center crop the input images to the resolution. If not set, the images will be randomly cropped. The images will be resized to the resolution first before cropping.
--random_flip Whether to randomly flip images horizontally
--batch_size BATCH_SIZE
Batch size (per device) for the training dataloader.
--dataloader_num_workers DATALOADER_NUM_WORKERS
Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process.
--precision {32,16,16-mixed}
Training precision
--learning_rate LEARNING_RATE
Learning rate.
--lora_rank LORA_RANK
The dimension of the LoRA update matrices.
--lora_alpha LORA_ALPHA
The weight of the LoRA update matrices.
--use_gradient_checkpointing
Whether to use gradient checkpointing.
--accumulate_grad_batches ACCUMULATE_GRAD_BATCHES
The number of batches in gradient accumulation.
--training_strategy {auto,deepspeed_stage_1,deepspeed_stage_2,deepspeed_stage_3}
Training strategy
--max_epochs MAX_EPOCHS
Number of epochs.
```
### Inference with your own LoRA model
After training, you can use your own LoRA model to generate new images. Here are some examples.
```python
from diffsynth import ModelManager, SD3ImagePipeline
import torch
from peft import LoraConfig, inject_adapter_in_model
def load_lora(dit, lora_rank, lora_alpha, lora_path):
lora_config = LoraConfig(
r=lora_rank,
lora_alpha=lora_alpha,
init_lora_weights="gaussian",
target_modules=["a_to_qkv", "b_to_qkv"],
)
dit = inject_adapter_in_model(lora_config, dit)
state_dict = torch.load(lora_path, map_location="cpu")
dit.load_state_dict(state_dict, strict=False)
return dit
# Load models
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
file_path_list=["models/stable_diffusion_3/sd3_medium_incl_clips.safetensors"])
pipe = SD3ImagePipeline.from_model_manager(model_manager)
# Generate an image with lora
pipe.dit = load_lora(
pipe.dit,
lora_rank=4, lora_alpha=4.0, # The two parameters should be consistent with those in your training script.
lora_path="path/to/your/lora/model/lightning_logs/version_x/checkpoints/epoch=x-step=xxx.ckpt"
)
torch.manual_seed(0)
image = pipe(
prompt="a dog is jumping, flowers around the dog, the background is mountains and clouds",
negative_prompt="bad quality, poor quality, doll, disfigured, jpg, toy, bad anatomy, missing limbs, missing fingers, 3d, cgi, extra tails",
cfg_scale=7.5,
num_inference_steps=100, width=1024, height=1024,
)
image.save("image_with_lora.jpg")
```
Prompt:
|Without LoRA|With LoRA|
|-|-|
|![image_without_lora](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/ddb834a5-6366-412b-93dc-6d957230d66e)|![image_with_lora](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/8e7b2888-d874-4da4-a75b-11b6b214b9bf)|

View File

@@ -0,0 +1,293 @@
from diffsynth import ModelManager, SD3ImagePipeline
from peft import LoraConfig, inject_adapter_in_model
from torchvision import transforms
from PIL import Image
import lightning as pl
import pandas as pd
import torch, os, argparse
os.environ["TOKENIZERS_PARALLELISM"] = "True"
class TextImageDataset(torch.utils.data.Dataset):
def __init__(self, dataset_path, steps_per_epoch=10000, height=1024, width=1024, center_crop=True, random_flip=False):
self.steps_per_epoch = steps_per_epoch
metadata = pd.read_csv(os.path.join(dataset_path, "train/metadata.csv"))
self.path = [os.path.join(dataset_path, "train", file_name) for file_name in metadata["file_name"]]
self.text = metadata["text"].to_list()
self.image_processor = transforms.Compose(
[
transforms.Resize(max(height, width), interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop((height, width)) if center_crop else transforms.RandomCrop((height, width)),
transforms.RandomHorizontalFlip() if random_flip else transforms.Lambda(lambda x: x),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __getitem__(self, index):
data_id = torch.randint(0, len(self.path), (1,))[0]
data_id = (data_id + index) % len(self.path) # For fixed seed.
text = self.text[data_id]
image = Image.open(self.path[data_id]).convert("RGB")
image = self.image_processor(image)
return {"text": text, "image": image}
def __len__(self):
return self.steps_per_epoch
class LightningModel(pl.LightningModule):
def __init__(self, torch_dtype=torch.float16, learning_rate=1e-4, pretrained_weights=[], lora_rank=4, lora_alpha=4, use_gradient_checkpointing=True):
super().__init__()
# Load models
model_manager = ModelManager(torch_dtype=torch_dtype, device=self.device)
model_manager.load_models(pretrained_weights)
self.pipe = SD3ImagePipeline.from_model_manager(model_manager)
# Freeze parameters
self.pipe.text_encoder_1.requires_grad_(False)
self.pipe.text_encoder_2.requires_grad_(False)
if self.pipe.text_encoder_3 is not None:
self.pipe.text_encoder_3.requires_grad_(False)
self.pipe.dit.requires_grad_(False)
self.pipe.vae_decoder.requires_grad_(False)
self.pipe.vae_encoder.requires_grad_(False)
self.pipe.text_encoder_1.eval()
self.pipe.text_encoder_2.eval()
if self.pipe.text_encoder_3 is not None:
self.pipe.text_encoder_3.eval()
self.pipe.dit.train()
self.pipe.vae_decoder.eval()
self.pipe.vae_encoder.eval()
# Add LoRA to DiT
lora_config = LoraConfig(
r=lora_rank,
lora_alpha=lora_alpha,
init_lora_weights="gaussian",
target_modules=["a_to_qkv", "b_to_qkv"],
)
self.pipe.dit = inject_adapter_in_model(lora_config, self.pipe.dit)
for param in self.pipe.dit.parameters():
# Upcast LoRA parameters into fp32
if param.requires_grad:
param.data = param.to(torch.float32)
# Set other parameters
self.learning_rate = learning_rate
self.use_gradient_checkpointing = use_gradient_checkpointing
self.pipe.scheduler.set_timesteps(1000)
def training_step(self, batch, batch_idx):
# Data
text, image = batch["text"], batch["image"]
# Prepare input parameters
self.pipe.device = self.device
prompt_emb, pooled_prompt_emb = self.pipe.prompter.encode_prompt(
self.pipe.text_encoder_1, self.pipe.text_encoder_2, self.pipe.text_encoder_3,
text, device=self.device
)
latents = self.pipe.vae_encoder(image.to(dtype=self.pipe.torch_dtype, device=self.device))
noise = torch.randn_like(latents)
timestep_id = torch.randint(0, 1000, (1,))
timestep = self.pipe.scheduler.timesteps[timestep_id].to(self.device)
noisy_latents = self.pipe.scheduler.add_noise(latents, noise, self.pipe.scheduler.timesteps[timestep_id])
training_target = self.pipe.scheduler.training_target(latents, noise, timestep)
# Compute loss
noise_pred = self.pipe.dit(noisy_latents, timestep, prompt_emb, pooled_prompt_emb, use_gradient_checkpointing=self.use_gradient_checkpointing)
loss = torch.nn.functional.mse_loss(noise_pred, training_target)
# Record log
self.log("train_loss", loss, prog_bar=True)
return loss
def configure_optimizers(self):
trainable_modules = filter(lambda p: p.requires_grad, self.pipe.dit.parameters())
optimizer = torch.optim.AdamW(trainable_modules, lr=self.learning_rate)
return optimizer
def on_save_checkpoint(self, checkpoint):
checkpoint.clear()
trainable_param_names = list(filter(lambda named_param: named_param[1].requires_grad, self.pipe.dit.named_parameters()))
trainable_param_names = set([named_param[0] for named_param in trainable_param_names])
state_dict = self.pipe.dit.state_dict()
for name, param in state_dict.items():
if name in trainable_param_names:
checkpoint[name] = param
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_path",
type=str,
default=None,
required=True,
help="Path to pretrained model. For example, `models/stable_diffusion_3/sd3_medium_incl_clips.safetensors` or `models/stable_diffusion_3/sd3_medium_incl_clips_t5xxlfp16.safetensors`.",
)
parser.add_argument(
"--dataset_path",
type=str,
default=None,
required=True,
help="The path of the Dataset.",
)
parser.add_argument(
"--output_path",
type=str,
default="./",
help="Path to save the model.",
)
parser.add_argument(
"--steps_per_epoch",
type=int,
default=500,
help="Number of steps per epoch.",
)
parser.add_argument(
"--height",
type=int,
default=1024,
help="Image height.",
)
parser.add_argument(
"--width",
type=int,
default=1024,
help="Image width.",
)
parser.add_argument(
"--center_crop",
default=False,
action="store_true",
help=(
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
" cropped. The images will be resized to the resolution first before cropping."
),
)
parser.add_argument(
"--random_flip",
default=False,
action="store_true",
help="Whether to randomly flip images horizontally",
)
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help="Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process.",
)
parser.add_argument(
"--precision",
type=str,
default="16-mixed",
choices=["32", "16", "16-mixed"],
help="Training precision",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Learning rate.",
)
parser.add_argument(
"--lora_rank",
type=int,
default=4,
help="The dimension of the LoRA update matrices.",
)
parser.add_argument(
"--lora_alpha",
type=float,
default=4.0,
help="The weight of the LoRA update matrices.",
)
parser.add_argument(
"--use_gradient_checkpointing",
default=False,
action="store_true",
help="Whether to use gradient checkpointing.",
)
parser.add_argument(
"--accumulate_grad_batches",
type=int,
default=1,
help="The number of batches in gradient accumulation.",
)
parser.add_argument(
"--training_strategy",
type=str,
default="auto",
choices=["auto", "deepspeed_stage_1", "deepspeed_stage_2", "deepspeed_stage_3"],
help="Training strategy",
)
parser.add_argument(
"--max_epochs",
type=int,
default=1,
help="Number of epochs.",
)
args = parser.parse_args()
return args
if __name__ == '__main__':
# args
args = parse_args()
# dataset and data loader
dataset = TextImageDataset(
args.dataset_path,
steps_per_epoch=args.steps_per_epoch * args.batch_size,
height=args.height,
width=args.width,
center_crop=args.center_crop,
random_flip=args.random_flip
)
train_loader = torch.utils.data.DataLoader(
dataset,
shuffle=True,
batch_size=args.batch_size,
num_workers=args.dataloader_num_workers
)
# model
model = LightningModel(
pretrained_weights=[args.pretrained_path],
torch_dtype=torch.float32 if args.precision == "32" else torch.float16,
learning_rate=args.learning_rate,
lora_rank=args.lora_rank,
lora_alpha=args.lora_alpha,
use_gradient_checkpointing=args.use_gradient_checkpointing
)
# train
trainer = pl.Trainer(
max_epochs=args.max_epochs,
accelerator="gpu",
devices="auto",
precision=args.precision,
strategy=args.training_strategy,
default_root_dir=args.output_path,
accumulate_grad_batches=args.accumulate_grad_batches,
callbacks=[pl.pytorch.callbacks.ModelCheckpoint(save_top_k=-1)]
)
trainer.fit(model=model, train_dataloaders=train_loader)