mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-18 22:08:13 +00:00
434 lines
16 KiB
Python
434 lines
16 KiB
Python
import json
|
|
import logging
|
|
import os
|
|
import pathlib
|
|
import re
|
|
from copy import deepcopy
|
|
from pathlib import Path
|
|
from turtle import forward
|
|
from typing import Any, Dict, Optional, Tuple, Union
|
|
|
|
import torch
|
|
|
|
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
|
from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
|
|
resize_pos_embed, get_cast_dtype
|
|
from .coca_model import CoCa
|
|
from .loss import ClipLoss, DistillClipLoss, CoCaLoss
|
|
from .openai import load_openai_model
|
|
from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model, download_pretrained_from_hf
|
|
from .transform import image_transform, AugmentationCfg
|
|
from .tokenizer import HFTokenizer, SimpleTokenizer
|
|
|
|
|
|
HF_HUB_PREFIX = 'hf-hub:'
|
|
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
|
|
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
|
|
|
|
|
|
def _natural_key(string_):
|
|
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
|
|
|
|
|
|
def _rescan_model_configs():
|
|
global _MODEL_CONFIGS
|
|
|
|
config_ext = ('.json',)
|
|
config_files = []
|
|
for config_path in _MODEL_CONFIG_PATHS:
|
|
if config_path.is_file() and config_path.suffix in config_ext:
|
|
config_files.append(config_path)
|
|
elif config_path.is_dir():
|
|
for ext in config_ext:
|
|
config_files.extend(config_path.glob(f'*{ext}'))
|
|
|
|
for cf in config_files:
|
|
with open(cf, 'r') as f:
|
|
model_cfg = json.load(f)
|
|
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
|
|
_MODEL_CONFIGS[cf.stem] = model_cfg
|
|
|
|
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
|
|
|
|
|
|
_rescan_model_configs() # initial populate of model config registry
|
|
|
|
|
|
def list_models():
|
|
""" enumerate available model architectures based on config files """
|
|
return list(_MODEL_CONFIGS.keys())
|
|
|
|
|
|
def add_model_config(path):
|
|
""" add model config path or file and update registry """
|
|
if not isinstance(path, Path):
|
|
path = Path(path)
|
|
_MODEL_CONFIG_PATHS.append(path)
|
|
_rescan_model_configs()
|
|
|
|
|
|
def get_model_config(model_name):
|
|
if model_name in _MODEL_CONFIGS:
|
|
return deepcopy(_MODEL_CONFIGS[model_name])
|
|
else:
|
|
return None
|
|
|
|
|
|
def get_tokenizer(model_name, open_clip_bpe_path=None):
|
|
if model_name.startswith(HF_HUB_PREFIX):
|
|
tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):])
|
|
else:
|
|
config = get_model_config(model_name)
|
|
tokenizer = HFTokenizer(
|
|
config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else SimpleTokenizer(open_clip_bpe_path)
|
|
return tokenizer
|
|
|
|
|
|
def load_state_dict(checkpoint_path: str, map_location='cpu'):
|
|
checkpoint = torch.load(checkpoint_path, map_location=map_location)
|
|
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
|
|
state_dict = checkpoint['state_dict']
|
|
else:
|
|
state_dict = checkpoint
|
|
if next(iter(state_dict.items()))[0].startswith('module'):
|
|
state_dict = {k[7:]: v for k, v in state_dict.items()}
|
|
return state_dict
|
|
|
|
|
|
def load_checkpoint(model, checkpoint_path, strict=True):
|
|
state_dict = load_state_dict(checkpoint_path)
|
|
# detect old format and make compatible with new format
|
|
if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
|
|
state_dict = convert_to_custom_text_state_dict(state_dict)
|
|
resize_pos_embed(state_dict, model)
|
|
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
|
|
return incompatible_keys
|
|
|
|
|
|
def create_model(
|
|
model_name: str,
|
|
pretrained: Optional[str] = None,
|
|
precision: str = 'fp32',
|
|
device: Union[str, torch.device] = 'cpu',
|
|
jit: bool = False,
|
|
force_quick_gelu: bool = False,
|
|
force_custom_text: bool = False,
|
|
force_patch_dropout: Optional[float] = None,
|
|
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
|
|
pretrained_image: bool = False,
|
|
pretrained_hf: bool = True,
|
|
cache_dir: Optional[str] = None,
|
|
output_dict: Optional[bool] = None,
|
|
require_pretrained: bool = False,
|
|
):
|
|
has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX)
|
|
if has_hf_hub_prefix:
|
|
model_id = model_name[len(HF_HUB_PREFIX):]
|
|
checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
|
|
config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir)
|
|
|
|
with open(config_path, 'r', encoding='utf-8') as f:
|
|
config = json.load(f)
|
|
pretrained_cfg = config['preprocess_cfg']
|
|
model_cfg = config['model_cfg']
|
|
else:
|
|
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
|
|
checkpoint_path = None
|
|
pretrained_cfg = {}
|
|
model_cfg = None
|
|
|
|
if isinstance(device, str):
|
|
device = torch.device(device)
|
|
|
|
if pretrained and pretrained.lower() == 'openai':
|
|
logging.info(f'Loading pretrained {model_name} from OpenAI.')
|
|
model = load_openai_model(
|
|
model_name,
|
|
precision=precision,
|
|
device=device,
|
|
jit=jit,
|
|
cache_dir=cache_dir,
|
|
)
|
|
|
|
# to always output dict even if it is clip
|
|
if output_dict and hasattr(model, "output_dict"):
|
|
model.output_dict = True
|
|
else:
|
|
model_cfg = model_cfg or get_model_config(model_name)
|
|
if model_cfg is not None:
|
|
logging.info(f'Loaded {model_name} model config.')
|
|
else:
|
|
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
|
|
raise RuntimeError(f'Model config for {model_name} not found.')
|
|
|
|
if force_quick_gelu:
|
|
# override for use of QuickGELU on non-OpenAI transformer models
|
|
model_cfg["quick_gelu"] = True
|
|
|
|
if force_patch_dropout is not None:
|
|
# override the default patch dropout value
|
|
model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout
|
|
|
|
if force_image_size is not None:
|
|
# override model config's image size
|
|
model_cfg["vision_cfg"]["image_size"] = force_image_size
|
|
|
|
if pretrained_image:
|
|
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
|
|
# pretrained weight loading for timm models set via vision_cfg
|
|
model_cfg['vision_cfg']['timm_model_pretrained'] = True
|
|
else:
|
|
assert False, 'pretrained image towers currently only supported for timm models'
|
|
|
|
cast_dtype = get_cast_dtype(precision)
|
|
is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {})
|
|
custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model
|
|
|
|
if custom_text:
|
|
if is_hf_model:
|
|
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
|
|
if "coca" in model_name:
|
|
model = CoCa(**model_cfg, cast_dtype=cast_dtype)
|
|
else:
|
|
model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype)
|
|
else:
|
|
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
|
|
|
|
pretrained_loaded = False
|
|
if pretrained:
|
|
checkpoint_path = ''
|
|
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
|
|
if pretrained_cfg:
|
|
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
|
|
elif os.path.exists(pretrained):
|
|
checkpoint_path = pretrained
|
|
|
|
if checkpoint_path:
|
|
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
|
|
load_checkpoint(model, checkpoint_path)
|
|
else:
|
|
error_str = (
|
|
f'Pretrained weights ({pretrained}) not found for model {model_name}.'
|
|
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
|
|
logging.warning(error_str)
|
|
raise RuntimeError(error_str)
|
|
pretrained_loaded = True
|
|
elif has_hf_hub_prefix:
|
|
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
|
|
load_checkpoint(model, checkpoint_path)
|
|
pretrained_loaded = True
|
|
|
|
if require_pretrained and not pretrained_loaded:
|
|
# callers of create_model_from_pretrained always expect pretrained weights
|
|
raise RuntimeError(
|
|
f'Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded.')
|
|
|
|
model.to(device=device)
|
|
if precision in ("fp16", "bf16"):
|
|
convert_weights_to_lp(model, dtype=torch.bfloat16 if precision == 'bf16' else torch.float16)
|
|
|
|
# set image / mean metadata from pretrained_cfg if available, or use default
|
|
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
|
|
model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
|
|
|
|
# to always output dict even if it is clip
|
|
if output_dict and hasattr(model, "output_dict"):
|
|
model.output_dict = True
|
|
|
|
if jit:
|
|
model = torch.jit.script(model)
|
|
|
|
return model
|
|
|
|
|
|
def create_loss(args):
|
|
if args.distill:
|
|
return DistillClipLoss(
|
|
local_loss=args.local_loss,
|
|
gather_with_grad=args.gather_with_grad,
|
|
cache_labels=True,
|
|
rank=args.rank,
|
|
world_size=args.world_size,
|
|
use_horovod=args.horovod,
|
|
)
|
|
elif "coca" in args.model.lower():
|
|
return CoCaLoss(
|
|
caption_loss_weight=args.coca_caption_loss_weight,
|
|
clip_loss_weight=args.coca_contrastive_loss_weight,
|
|
local_loss=args.local_loss,
|
|
gather_with_grad=args.gather_with_grad,
|
|
cache_labels=True,
|
|
rank=args.rank,
|
|
world_size=args.world_size,
|
|
use_horovod=args.horovod,
|
|
)
|
|
return ClipLoss(
|
|
local_loss=args.local_loss,
|
|
gather_with_grad=args.gather_with_grad,
|
|
cache_labels=True,
|
|
rank=args.rank,
|
|
world_size=args.world_size,
|
|
use_horovod=args.horovod,
|
|
)
|
|
|
|
class MLP(torch.nn.Module):
|
|
def __init__(self, input_size):
|
|
super().__init__()
|
|
self.input_size = input_size
|
|
self.layers = torch.nn.Sequential(
|
|
torch.nn.Linear(self.input_size, 1024),
|
|
torch.nn.Dropout(0.2),
|
|
torch.nn.Linear(1024, 128),
|
|
torch.nn.Dropout(0.2),
|
|
torch.nn.Linear(128, 64),
|
|
torch.nn.Dropout(0.1),
|
|
torch.nn.Linear(64, 16),
|
|
torch.nn.Linear(16, 1)
|
|
)
|
|
|
|
def forward(self, x):
|
|
return self.layers(x)
|
|
|
|
# class semantic_head(torch.nn.Module):
|
|
# def __init__(self, input_size):
|
|
# super().__init__()
|
|
# self.input_size = input_size # for ViT-L-14 is 1024
|
|
# self.seg_head = torch.nn.Sequential(
|
|
# torch.nn.Linear(input_size, 128),
|
|
# torch.nn.Dropout(0.2),
|
|
# torch.nn.Linear(128, 64),
|
|
# torch.nn.Dropout(0.1),
|
|
# torch.nn.Linear(64, 16),
|
|
# torch.nn.Linear(16, 1),
|
|
# )
|
|
# self.sigmoid = torch.nn.Sigmoid()
|
|
|
|
# def forward(self, x):
|
|
# return self.sigmoid(self.seg_head(x))
|
|
|
|
def create_model_and_transforms(
|
|
model_name: str,
|
|
pretrained: Optional[str] = None,
|
|
precision: str = 'fp32',
|
|
device: Union[str, torch.device] = 'cpu',
|
|
jit: bool = False,
|
|
force_quick_gelu: bool = False,
|
|
force_custom_text: bool = False,
|
|
force_patch_dropout: Optional[float] = None,
|
|
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
|
|
pretrained_image: bool = False,
|
|
pretrained_hf: bool = True,
|
|
image_mean: Optional[Tuple[float, ...]] = None,
|
|
image_std: Optional[Tuple[float, ...]] = None,
|
|
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
|
|
cache_dir: Optional[str] = None,
|
|
light_augmentation = False,
|
|
output_dict: Optional[bool] = None,
|
|
with_score_predictor: bool = False,
|
|
with_region_predictor: bool = False
|
|
):
|
|
model = create_model(
|
|
model_name,
|
|
pretrained,
|
|
precision=precision,
|
|
device=device,
|
|
jit=jit,
|
|
force_quick_gelu=force_quick_gelu,
|
|
force_custom_text=force_custom_text,
|
|
force_patch_dropout=force_patch_dropout,
|
|
force_image_size=force_image_size,
|
|
pretrained_image=pretrained_image,
|
|
pretrained_hf=pretrained_hf,
|
|
cache_dir=cache_dir,
|
|
output_dict=output_dict,
|
|
)
|
|
|
|
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
|
|
image_std = image_std or getattr(model.visual, 'image_std', None)
|
|
|
|
if with_score_predictor:
|
|
model.score_predictor = MLP(model.visual.proj.size(1)).to(device=device, dtype=model.visual.proj.dtype)
|
|
|
|
if with_region_predictor:
|
|
# model.region_predictor = semantic_head(model.visual.proj.size(1)).to(device=device, dtype=model.visual.proj.dtype)
|
|
model.region_predictor = torch.nn.Linear(model.visual.proj.size(0), 1).to(device=device, dtype=model.visual.proj.dtype)
|
|
# preprocess_train = image_transform_region(
|
|
# model.visual.image_size,
|
|
# is_train=True,
|
|
# mean=image_mean,
|
|
# std=image_std
|
|
# )
|
|
# preprocess_val = image_transform_region(
|
|
# model.visual.image_size,
|
|
# is_train=False,
|
|
# mean=image_mean,
|
|
# std=image_std
|
|
# )
|
|
|
|
if light_augmentation:
|
|
preprocess_val = image_transform(
|
|
model.visual.image_size,
|
|
is_train=False,
|
|
mean=image_mean,
|
|
std=image_std,
|
|
resize_longest_max=True,
|
|
)
|
|
preprocess_train = preprocess_val
|
|
else:
|
|
preprocess_train = image_transform(
|
|
model.visual.image_size,
|
|
is_train=True,
|
|
mean=image_mean,
|
|
std=image_std
|
|
)
|
|
preprocess_val = image_transform(
|
|
model.visual.image_size,
|
|
is_train=False,
|
|
mean=image_mean,
|
|
std=image_std
|
|
)
|
|
|
|
return model, preprocess_train, preprocess_val
|
|
|
|
|
|
def create_model_from_pretrained(
|
|
model_name: str,
|
|
pretrained: Optional[str] = None,
|
|
precision: str = 'fp32',
|
|
device: Union[str, torch.device] = 'cpu',
|
|
jit: bool = False,
|
|
force_quick_gelu: bool = False,
|
|
force_custom_text: bool = False,
|
|
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
|
|
return_transform: bool = True,
|
|
image_mean: Optional[Tuple[float, ...]] = None,
|
|
image_std: Optional[Tuple[float, ...]] = None,
|
|
cache_dir: Optional[str] = None,
|
|
):
|
|
model = create_model(
|
|
model_name,
|
|
pretrained,
|
|
precision=precision,
|
|
device=device,
|
|
jit=jit,
|
|
force_quick_gelu=force_quick_gelu,
|
|
force_custom_text=force_custom_text,
|
|
force_image_size=force_image_size,
|
|
cache_dir=cache_dir,
|
|
require_pretrained=True,
|
|
)
|
|
|
|
if not return_transform:
|
|
return model
|
|
|
|
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
|
|
image_std = image_std or getattr(model.visual, 'image_std', None)
|
|
preprocess = image_transform(
|
|
model.visual.image_size,
|
|
is_train=False,
|
|
mean=image_mean,
|
|
std=image_std,
|
|
)
|
|
|
|
return model, preprocess
|