support kolors! (#106)

This commit is contained in:
Zhongjie Duan
2024-07-11 21:43:45 +08:00
committed by GitHub
parent 2a4709e572
commit 9c6607f78d
20 changed files with 2510 additions and 281 deletions

View File

@@ -28,6 +28,16 @@ LoRA Training: [`../train/stable_diffusion_3/`](../train/stable_diffusion_3/)
|-|-|
|![image_1024](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/4df346db-6f91-420a-b4c1-26e205376098)|![image_2048](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/1386c802-e580-4101-939d-f1596802df9d)|
### Example: Kolors
Example script: [`kolors_text_to_image.py`](./kolors_text_to_image.py)
LoRA Training: [`../train/kolors/`](../train/kolors/)
|1024*1024|2048*2048|
|-|-|
|![image_1024](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/53ef6f41-da11-4701-8665-9f64392607bf)|![image_2048](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/66bb7a75-fe31-44e5-90eb-d3140ee4686d)|
### Example: Hunyuan-DiT
Example script: [`hunyuan_dit_text_to_image.py`](./hunyuan_dit_text_to_image.py)

View File

@@ -0,0 +1,34 @@
from diffsynth import ModelManager, KolorsImagePipeline, download_models
import torch
# Download models
# https://huggingface.co/Kwai-Kolors/Kolors
download_models(["Kolors"])
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
file_path_list=[
"models/kolors/Kolors/text_encoder",
"models/kolors/Kolors/unet/diffusion_pytorch_model.safetensors",
"models/kolors/Kolors/vae/diffusion_pytorch_model.safetensors"
])
pipe = KolorsImagePipeline.from_model_manager(model_manager)
prompt = "一幅充满诗意美感的全身画,泛红的肤色,画中一位银色长发、蓝色眼睛、肤色红润、身穿蓝色吊带连衣裙的少女漂浮在水下,面向镜头,周围是光彩的气泡,和煦的阳光透过水面折射进水下"
negative_prompt = "半身,苍白的肤色,蜡黄的肤色,尸体,错误的眼睛,糟糕的人脸,毁容,糟糕的艺术,变形,多余的肢体,模糊的颜色,模糊,重复,病态,残缺,错误的手指,口红,腮红"
torch.manual_seed(7)
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=50,
cfg_scale=4,
)
image.save(f"image_1024.jpg")
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
input_image=image.resize((2048, 2048)), denoising_strength=0.4, height=2048, width=2048,
num_inference_steps=50,
cfg_scale=4,
)
image.save("image_2048.jpg")

View File

@@ -0,0 +1,175 @@
# Kolors
Kolors is a Chinese diffusion model, which is based on ChatGLM and Stable Diffusion XL. We provide training scripts here.
## Download models
The following files will be used for constructing Kolors. You can download them from [huggingface](https://huggingface.co/Kwai-Kolors/Kolors) or [modelscope](https://modelscope.cn/models/Kwai-Kolors/Kolors).
```
models/kolors/Kolors
├── text_encoder
│ ├── config.json
│ ├── pytorch_model-00001-of-00007.bin
│ ├── pytorch_model-00002-of-00007.bin
│ ├── pytorch_model-00003-of-00007.bin
│ ├── pytorch_model-00004-of-00007.bin
│ ├── pytorch_model-00005-of-00007.bin
│ ├── pytorch_model-00006-of-00007.bin
│ ├── pytorch_model-00007-of-00007.bin
│ └── pytorch_model.bin.index.json
├── unet
│ └── diffusion_pytorch_model.safetensors
└── vae
└── diffusion_pytorch_model.safetensors
```
You can use the following code to download these files:
```python
from diffsynth import download_models
download_models(["Kolors"])
```
## Train
### Install training dependency
```
pip install peft lightning pandas torchvision
```
### Prepare your dataset
We provide an example dataset [here](https://modelscope.cn/datasets/buptwq/lora-stable-diffusion-finetune/files). You need to manage the training images as follows:
```
data/dog/
└── train
├── 00.jpg
├── 01.jpg
├── 02.jpg
├── 03.jpg
├── 04.jpg
└── metadata.csv
```
`metadata.csv`:
```
file_name,text
00.jpg,一只小狗
01.jpg,一只小狗
02.jpg,一只小狗
03.jpg,一只小狗
04.jpg,一只小狗
```
### Train a LoRA model
We provide a training script `train_kolors_lora.py`. Before you run this training script, please copy it to the root directory of this project.
The following settings are recommended. **We found the UNet model suffers from precision overflow issues, thus the training script doesn't support float16. 40GB VRAM is required. We are working on overcoming this pitfall.**
```
CUDA_VISIBLE_DEVICES="0" python examples/train/kolors/train_kolors_lora.py \
--pretrained_path models/kolors/Kolors \
--dataset_path data/dog \
--output_path ./models \
--max_epochs 10 \
--center_crop \
--use_gradient_checkpointing \
--precision 32
```
Optional arguments:
```
-h, --help show this help message and exit
--pretrained_path PRETRAINED_PATH
Path to pretrained model. For example, `models/kolors/Kolors`.
--dataset_path DATASET_PATH
The path of the Dataset.
--output_path OUTPUT_PATH
Path to save the model.
--steps_per_epoch STEPS_PER_EPOCH
Number of steps per epoch.
--height HEIGHT Image height.
--width WIDTH Image width.
--center_crop Whether to center crop the input images to the resolution. If not set, the images will be randomly cropped. The images will be resized to the resolution first before cropping.
--random_flip Whether to randomly flip images horizontally
--batch_size BATCH_SIZE
Batch size (per device) for the training dataloader.
--dataloader_num_workers DATALOADER_NUM_WORKERS
Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process.
--precision {32,16,16-mixed}
Training precision
--learning_rate LEARNING_RATE
Learning rate.
--lora_rank LORA_RANK
The dimension of the LoRA update matrices.
--lora_alpha LORA_ALPHA
The weight of the LoRA update matrices.
--use_gradient_checkpointing
Whether to use gradient checkpointing.
--accumulate_grad_batches ACCUMULATE_GRAD_BATCHES
The number of batches in gradient accumulation.
--training_strategy {auto,deepspeed_stage_1,deepspeed_stage_2,deepspeed_stage_3}
Training strategy
--max_epochs MAX_EPOCHS
Number of epochs.
```
### Inference with your own LoRA model
After training, you can use your own LoRA model to generate new images. Here are some examples.
```python
from diffsynth import ModelManager, KolorsImagePipeline
from peft import LoraConfig, inject_adapter_in_model
import torch
def load_lora(model, lora_rank, lora_alpha, lora_path):
lora_config = LoraConfig(
r=lora_rank,
lora_alpha=lora_alpha,
init_lora_weights="gaussian",
target_modules=["to_q", "to_k", "to_v", "to_out"],
)
model = inject_adapter_in_model(lora_config, model)
state_dict = torch.load(lora_path, map_location="cpu")
model.load_state_dict(state_dict, strict=False)
return model
# Load models
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
file_path_list=[
"models/kolors/Kolors/text_encoder",
"models/kolors/Kolors/unet/diffusion_pytorch_model.safetensors",
"models/kolors/Kolors/vae/diffusion_pytorch_model.safetensors"
])
pipe = KolorsImagePipeline.from_model_manager(model_manager)
# Generate an image with lora
pipe.unet = load_lora(
pipe.unet,
lora_rank=4, lora_alpha=4.0, # The two parameters should be consistent with those in your training script.
lora_path="path/to/your/lora/model/lightning_logs/version_x/checkpoints/epoch=x-step=xxx.ckpt"
)
torch.manual_seed(0)
image = pipe(
prompt="一只小狗蹦蹦跳跳,周围是姹紫嫣红的鲜花,远处是山脉",
negative_prompt="",
cfg_scale=4,
num_inference_steps=50, height=1024, width=1024,
)
image.save("image_with_lora.jpg")
```
Prompt: 一只小狗蹦蹦跳跳,周围是姹紫嫣红的鲜花,远处是山脉
|Without LoRA|With LoRA|
|-|-|
|![image_without_lora](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/9d79ed7a-e8cf-4d98-800a-f182809db318)|![image_with_lora](https://github.com/modelscope/DiffSynth-Studio/assets/35051019/02f62323-6ee5-4788-97a1-549732dbe4f0)|

View File

@@ -0,0 +1,293 @@
from diffsynth import ModelManager, KolorsImagePipeline
from peft import LoraConfig, inject_adapter_in_model
from torchvision import transforms
from PIL import Image
import lightning as pl
import pandas as pd
import torch, os, argparse
os.environ["TOKENIZERS_PARALLELISM"] = "True"
class TextImageDataset(torch.utils.data.Dataset):
def __init__(self, dataset_path, steps_per_epoch=10000, height=1024, width=1024, center_crop=True, random_flip=False):
self.steps_per_epoch = steps_per_epoch
metadata = pd.read_csv(os.path.join(dataset_path, "train/metadata.csv"))
self.path = [os.path.join(dataset_path, "train", file_name) for file_name in metadata["file_name"]]
self.text = metadata["text"].to_list()
self.image_processor = transforms.Compose(
[
transforms.Resize(max(height, width), interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop((height, width)) if center_crop else transforms.RandomCrop((height, width)),
transforms.RandomHorizontalFlip() if random_flip else transforms.Lambda(lambda x: x),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __getitem__(self, index):
data_id = torch.randint(0, len(self.path), (1,))[0]
data_id = (data_id + index) % len(self.path) # For fixed seed.
text = self.text[data_id]
image = Image.open(self.path[data_id]).convert("RGB")
image = self.image_processor(image)
return {"text": text, "image": image}
def __len__(self):
return self.steps_per_epoch
class LightningModel(pl.LightningModule):
def __init__(self, torch_dtype=torch.float16, learning_rate=1e-4, pretrained_weights=[], lora_rank=4, lora_alpha=4, use_gradient_checkpointing=True):
super().__init__()
# Load models
model_manager = ModelManager(torch_dtype=torch_dtype, device=self.device)
model_manager.load_models(pretrained_weights)
self.pipe = KolorsImagePipeline.from_model_manager(model_manager)
# Freeze parameters
self.pipe.text_encoder.requires_grad_(False)
self.pipe.unet.requires_grad_(False)
self.pipe.vae_decoder.requires_grad_(False)
self.pipe.vae_encoder.requires_grad_(False)
self.pipe.text_encoder.eval()
self.pipe.unet.train()
self.pipe.vae_decoder.eval()
self.pipe.vae_encoder.eval()
# Add LoRA to UNet
lora_config = LoraConfig(
r=lora_rank,
lora_alpha=lora_alpha,
init_lora_weights="gaussian",
target_modules=["to_q", "to_k", "to_v", "to_out"],
)
self.pipe.unet = inject_adapter_in_model(lora_config, self.pipe.unet)
for param in self.pipe.unet.parameters():
# Upcast LoRA parameters into fp32
if param.requires_grad:
param.data = param.to(torch.float32)
# Set other parameters
self.learning_rate = learning_rate
self.use_gradient_checkpointing = use_gradient_checkpointing
self.pipe.scheduler.set_timesteps(1100)
def training_step(self, batch, batch_idx):
# Data
text, image = batch["text"], batch["image"]
# Prepare input parameters
self.pipe.device = self.device
add_prompt_emb, prompt_emb = self.pipe.prompter.encode_prompt(
self.pipe.text_encoder, text, clip_skip=2, device=self.device, positive=True,
)
height, width = image.shape[-2:]
latents = self.pipe.vae_encoder(image.to(dtype=torch.float32, device=self.device)).to(self.pipe.torch_dtype)
noise = torch.randn_like(latents)
timestep = torch.randint(0, 1100, (1,), device=self.device)[0]
add_time_id = torch.tensor([height, width, 0, 0, height, width], device=self.device)
noisy_latents = self.pipe.scheduler.add_noise(latents, noise, timestep)
# Compute loss
noise_pred = self.pipe.unet(
noisy_latents, timestep, prompt_emb, add_time_id, add_prompt_emb,
use_gradient_checkpointing=self.use_gradient_checkpointing
)
loss = torch.nn.functional.mse_loss(noise_pred, noise)
# Record log
self.log("train_loss", loss, prog_bar=True)
return loss
def configure_optimizers(self):
trainable_modules = filter(lambda p: p.requires_grad, self.pipe.unet.parameters())
optimizer = torch.optim.AdamW(trainable_modules, lr=self.learning_rate)
return optimizer
def on_save_checkpoint(self, checkpoint):
checkpoint.clear()
trainable_param_names = list(filter(lambda named_param: named_param[1].requires_grad, self.pipe.unet.named_parameters()))
trainable_param_names = set([named_param[0] for named_param in trainable_param_names])
state_dict = self.pipe.unet.state_dict()
for name, param in state_dict.items():
if name in trainable_param_names:
checkpoint[name] = param
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_path",
type=str,
default=None,
required=True,
help="Path to pretrained model. For example, `models/kolors/Kolors`.",
)
parser.add_argument(
"--dataset_path",
type=str,
default=None,
required=True,
help="The path of the Dataset.",
)
parser.add_argument(
"--output_path",
type=str,
default="./",
help="Path to save the model.",
)
parser.add_argument(
"--steps_per_epoch",
type=int,
default=500,
help="Number of steps per epoch.",
)
parser.add_argument(
"--height",
type=int,
default=1024,
help="Image height.",
)
parser.add_argument(
"--width",
type=int,
default=1024,
help="Image width.",
)
parser.add_argument(
"--center_crop",
default=False,
action="store_true",
help=(
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
" cropped. The images will be resized to the resolution first before cropping."
),
)
parser.add_argument(
"--random_flip",
default=False,
action="store_true",
help="Whether to randomly flip images horizontally",
)
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help="Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process.",
)
parser.add_argument(
"--precision",
type=str,
default="16-mixed",
choices=["32", "16", "16-mixed"],
help="Training precision",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Learning rate.",
)
parser.add_argument(
"--lora_rank",
type=int,
default=4,
help="The dimension of the LoRA update matrices.",
)
parser.add_argument(
"--lora_alpha",
type=float,
default=4.0,
help="The weight of the LoRA update matrices.",
)
parser.add_argument(
"--use_gradient_checkpointing",
default=False,
action="store_true",
help="Whether to use gradient checkpointing.",
)
parser.add_argument(
"--accumulate_grad_batches",
type=int,
default=1,
help="The number of batches in gradient accumulation.",
)
parser.add_argument(
"--training_strategy",
type=str,
default="auto",
choices=["auto", "deepspeed_stage_1", "deepspeed_stage_2", "deepspeed_stage_3"],
help="Training strategy",
)
parser.add_argument(
"--max_epochs",
type=int,
default=1,
help="Number of epochs.",
)
args = parser.parse_args()
return args
if __name__ == '__main__':
# args
args = parse_args()
# dataset and data loader
dataset = TextImageDataset(
args.dataset_path,
steps_per_epoch=args.steps_per_epoch * args.batch_size,
height=args.height,
width=args.width,
center_crop=args.center_crop,
random_flip=args.random_flip
)
train_loader = torch.utils.data.DataLoader(
dataset,
shuffle=True,
batch_size=args.batch_size,
num_workers=args.dataloader_num_workers
)
# model
model = LightningModel(
pretrained_weights=[
os.path.join(args.pretrained_path, "text_encoder"),
os.path.join(args.pretrained_path, "unet/diffusion_pytorch_model.safetensors"),
os.path.join(args.pretrained_path, "vae/diffusion_pytorch_model.safetensors"),
],
torch_dtype=torch.float32 if args.precision == "32" else torch.float16,
learning_rate=args.learning_rate,
lora_rank=args.lora_rank,
lora_alpha=args.lora_alpha,
use_gradient_checkpointing=args.use_gradient_checkpointing
)
# train
trainer = pl.Trainer(
max_epochs=args.max_epochs,
accelerator="gpu",
devices="auto",
precision=args.precision,
strategy=args.training_strategy,
default_root_dir=args.output_path,
accumulate_grad_batches=args.accumulate_grad_batches,
callbacks=[pl.pytorch.callbacks.ModelCheckpoint(save_top_k=-1)]
)
trainer.fit(model=model, train_dataloaders=train_loader)

View File

@@ -153,7 +153,7 @@ image = pipe(
image.save("image_with_lora.jpg")
```
Prompt:
Prompt: a dog is jumping, flowers around the dog, the background is mountains and clouds
|Without LoRA|With LoRA|
|-|-|