This commit is contained in:
Artiprocher
2024-05-05 22:48:38 +08:00
parent cc37860438
commit 0965477750
15 changed files with 2991 additions and 79 deletions

View File

@@ -2,7 +2,7 @@ import torch
from .attention import Attention
from .sd_unet import ResnetBlock, UpSampler
from .tiler import TileWorker
from einops import rearrange
from einops import rearrange, repeat
class VAEAttentionBlock(torch.nn.Module):
@@ -119,14 +119,13 @@ class SVDVAEDecoder(torch.nn.Module):
self.conv_out = torch.nn.Conv2d(128, 3, kernel_size=3, padding=1)
self.time_conv_out = torch.nn.Conv3d(3, 3, kernel_size=(3, 1, 1), padding=(1, 0, 0))
def forward(self, sample):
# 1. pre-process
hidden_states = sample.flatten(0, 1)
hidden_states = rearrange(sample, "C T H W -> T C H W")
hidden_states = hidden_states / self.scaling_factor
hidden_states = self.conv_in(hidden_states)
time_emb = None
text_emb = None
res_stack = None
time_emb, text_emb, res_stack = None, None, None
# 2. blocks
for i, block in enumerate(self.blocks):
@@ -136,11 +135,70 @@ class SVDVAEDecoder(torch.nn.Module):
hidden_states = self.conv_norm_out(hidden_states)
hidden_states = self.conv_act(hidden_states)
hidden_states = self.conv_out(hidden_states)
hidden_states = rearrange(hidden_states, "T C H W -> 1 C T H W")
hidden_states = rearrange(hidden_states, "T C H W -> C T H W")
hidden_states = self.time_conv_out(hidden_states)
return hidden_states
def build_mask(self, data, is_bound):
_, T, H, W = data.shape
t = repeat(torch.arange(T), "T -> T H W", T=T, H=H, W=W)
h = repeat(torch.arange(H), "H -> T H W", T=T, H=H, W=W)
w = repeat(torch.arange(W), "W -> T H W", T=T, H=H, W=W)
border_width = (T + H + W) // 6
pad = torch.ones_like(t) * border_width
mask = torch.stack([
pad if is_bound[0] else t + 1,
pad if is_bound[1] else T - t,
pad if is_bound[2] else h + 1,
pad if is_bound[3] else H - h,
pad if is_bound[4] else w + 1,
pad if is_bound[5] else W - w
]).min(dim=0).values
mask = mask.clip(1, border_width)
mask = (mask / border_width).to(dtype=data.dtype, device=data.device)
mask = rearrange(mask, "T H W -> 1 T H W")
return mask
def decode_video(
self, sample,
batch_time=8, batch_height=128, batch_width=128,
stride_time=4, stride_height=32, stride_width=32,
progress_bar=lambda x:x
):
sample = sample.permute(1, 0, 2, 3)
data_device = sample.device
computation_device = self.conv_in.weight.device
torch_dtype = sample.dtype
_, T, H, W = sample.shape
weight = torch.zeros((1, T, H*8, W*8), dtype=torch_dtype, device=data_device)
values = torch.zeros((3, T, H*8, W*8), dtype=torch_dtype, device=data_device)
# Split tasks
tasks = []
for t in range(0, T, stride_time):
for h in range(0, H, stride_height):
for w in range(0, W, stride_width):
if (t-stride_time >= 0 and t-stride_time+batch_time >= T)\
or (h-stride_height >= 0 and h-stride_height+batch_height >= H)\
or (w-stride_width >= 0 and w-stride_width+batch_width >= W):
continue
tasks.append((t, t+batch_time, h, h+batch_height, w, w+batch_width))
# Run
for tl, tr, hl, hr, wl, wr in progress_bar(tasks):
sample_batch = sample[:, tl:tr, hl:hr, wl:wr].to(computation_device)
sample_batch = self.forward(sample_batch).to(data_device)
mask = self.build_mask(sample_batch, is_bound=(tl==0, tr>=T, hl==0, hr>=H, wl==0, wr>=W))
values[:, tl:tr, hl*8:hr*8, wl*8:wr*8] += sample_batch * mask
weight[:, tl:tr, hl*8:hr*8, wl*8:wr*8] += mask
values /= weight
return values
def state_dict_converter(self):
return SVDVAEDecoderStateDictConverter()
@@ -238,3 +296,282 @@ class SVDVAEDecoderStateDictConverter:
state_dict_[name_] = state_dict[name]
return state_dict_
def from_civitai(self, state_dict):
rename_dict = {
"first_stage_model.decoder.conv_in.bias": "conv_in.bias",
"first_stage_model.decoder.conv_in.weight": "conv_in.weight",
"first_stage_model.decoder.conv_out.bias": "conv_out.bias",
"first_stage_model.decoder.conv_out.time_mix_conv.bias": "time_conv_out.bias",
"first_stage_model.decoder.conv_out.time_mix_conv.weight": "time_conv_out.weight",
"first_stage_model.decoder.conv_out.weight": "conv_out.weight",
"first_stage_model.decoder.mid.attn_1.k.bias": "blocks.2.transformer_blocks.0.to_k.bias",
"first_stage_model.decoder.mid.attn_1.k.weight": "blocks.2.transformer_blocks.0.to_k.weight",
"first_stage_model.decoder.mid.attn_1.norm.bias": "blocks.2.norm.bias",
"first_stage_model.decoder.mid.attn_1.norm.weight": "blocks.2.norm.weight",
"first_stage_model.decoder.mid.attn_1.proj_out.bias": "blocks.2.transformer_blocks.0.to_out.bias",
"first_stage_model.decoder.mid.attn_1.proj_out.weight": "blocks.2.transformer_blocks.0.to_out.weight",
"first_stage_model.decoder.mid.attn_1.q.bias": "blocks.2.transformer_blocks.0.to_q.bias",
"first_stage_model.decoder.mid.attn_1.q.weight": "blocks.2.transformer_blocks.0.to_q.weight",
"first_stage_model.decoder.mid.attn_1.v.bias": "blocks.2.transformer_blocks.0.to_v.bias",
"first_stage_model.decoder.mid.attn_1.v.weight": "blocks.2.transformer_blocks.0.to_v.weight",
"first_stage_model.decoder.mid.block_1.conv1.bias": "blocks.0.conv1.bias",
"first_stage_model.decoder.mid.block_1.conv1.weight": "blocks.0.conv1.weight",
"first_stage_model.decoder.mid.block_1.conv2.bias": "blocks.0.conv2.bias",
"first_stage_model.decoder.mid.block_1.conv2.weight": "blocks.0.conv2.weight",
"first_stage_model.decoder.mid.block_1.mix_factor": "blocks.1.mix_factor",
"first_stage_model.decoder.mid.block_1.norm1.bias": "blocks.0.norm1.bias",
"first_stage_model.decoder.mid.block_1.norm1.weight": "blocks.0.norm1.weight",
"first_stage_model.decoder.mid.block_1.norm2.bias": "blocks.0.norm2.bias",
"first_stage_model.decoder.mid.block_1.norm2.weight": "blocks.0.norm2.weight",
"first_stage_model.decoder.mid.block_1.time_stack.in_layers.0.bias": "blocks.1.norm1.bias",
"first_stage_model.decoder.mid.block_1.time_stack.in_layers.0.weight": "blocks.1.norm1.weight",
"first_stage_model.decoder.mid.block_1.time_stack.in_layers.2.bias": "blocks.1.conv1.bias",
"first_stage_model.decoder.mid.block_1.time_stack.in_layers.2.weight": "blocks.1.conv1.weight",
"first_stage_model.decoder.mid.block_1.time_stack.out_layers.0.bias": "blocks.1.norm2.bias",
"first_stage_model.decoder.mid.block_1.time_stack.out_layers.0.weight": "blocks.1.norm2.weight",
"first_stage_model.decoder.mid.block_1.time_stack.out_layers.3.bias": "blocks.1.conv2.bias",
"first_stage_model.decoder.mid.block_1.time_stack.out_layers.3.weight": "blocks.1.conv2.weight",
"first_stage_model.decoder.mid.block_2.conv1.bias": "blocks.3.conv1.bias",
"first_stage_model.decoder.mid.block_2.conv1.weight": "blocks.3.conv1.weight",
"first_stage_model.decoder.mid.block_2.conv2.bias": "blocks.3.conv2.bias",
"first_stage_model.decoder.mid.block_2.conv2.weight": "blocks.3.conv2.weight",
"first_stage_model.decoder.mid.block_2.mix_factor": "blocks.4.mix_factor",
"first_stage_model.decoder.mid.block_2.norm1.bias": "blocks.3.norm1.bias",
"first_stage_model.decoder.mid.block_2.norm1.weight": "blocks.3.norm1.weight",
"first_stage_model.decoder.mid.block_2.norm2.bias": "blocks.3.norm2.bias",
"first_stage_model.decoder.mid.block_2.norm2.weight": "blocks.3.norm2.weight",
"first_stage_model.decoder.mid.block_2.time_stack.in_layers.0.bias": "blocks.4.norm1.bias",
"first_stage_model.decoder.mid.block_2.time_stack.in_layers.0.weight": "blocks.4.norm1.weight",
"first_stage_model.decoder.mid.block_2.time_stack.in_layers.2.bias": "blocks.4.conv1.bias",
"first_stage_model.decoder.mid.block_2.time_stack.in_layers.2.weight": "blocks.4.conv1.weight",
"first_stage_model.decoder.mid.block_2.time_stack.out_layers.0.bias": "blocks.4.norm2.bias",
"first_stage_model.decoder.mid.block_2.time_stack.out_layers.0.weight": "blocks.4.norm2.weight",
"first_stage_model.decoder.mid.block_2.time_stack.out_layers.3.bias": "blocks.4.conv2.bias",
"first_stage_model.decoder.mid.block_2.time_stack.out_layers.3.weight": "blocks.4.conv2.weight",
"first_stage_model.decoder.norm_out.bias": "conv_norm_out.bias",
"first_stage_model.decoder.norm_out.weight": "conv_norm_out.weight",
"first_stage_model.decoder.up.0.block.0.conv1.bias": "blocks.26.conv1.bias",
"first_stage_model.decoder.up.0.block.0.conv1.weight": "blocks.26.conv1.weight",
"first_stage_model.decoder.up.0.block.0.conv2.bias": "blocks.26.conv2.bias",
"first_stage_model.decoder.up.0.block.0.conv2.weight": "blocks.26.conv2.weight",
"first_stage_model.decoder.up.0.block.0.mix_factor": "blocks.27.mix_factor",
"first_stage_model.decoder.up.0.block.0.nin_shortcut.bias": "blocks.26.conv_shortcut.bias",
"first_stage_model.decoder.up.0.block.0.nin_shortcut.weight": "blocks.26.conv_shortcut.weight",
"first_stage_model.decoder.up.0.block.0.norm1.bias": "blocks.26.norm1.bias",
"first_stage_model.decoder.up.0.block.0.norm1.weight": "blocks.26.norm1.weight",
"first_stage_model.decoder.up.0.block.0.norm2.bias": "blocks.26.norm2.bias",
"first_stage_model.decoder.up.0.block.0.norm2.weight": "blocks.26.norm2.weight",
"first_stage_model.decoder.up.0.block.0.time_stack.in_layers.0.bias": "blocks.27.norm1.bias",
"first_stage_model.decoder.up.0.block.0.time_stack.in_layers.0.weight": "blocks.27.norm1.weight",
"first_stage_model.decoder.up.0.block.0.time_stack.in_layers.2.bias": "blocks.27.conv1.bias",
"first_stage_model.decoder.up.0.block.0.time_stack.in_layers.2.weight": "blocks.27.conv1.weight",
"first_stage_model.decoder.up.0.block.0.time_stack.out_layers.0.bias": "blocks.27.norm2.bias",
"first_stage_model.decoder.up.0.block.0.time_stack.out_layers.0.weight": "blocks.27.norm2.weight",
"first_stage_model.decoder.up.0.block.0.time_stack.out_layers.3.bias": "blocks.27.conv2.bias",
"first_stage_model.decoder.up.0.block.0.time_stack.out_layers.3.weight": "blocks.27.conv2.weight",
"first_stage_model.decoder.up.0.block.1.conv1.bias": "blocks.28.conv1.bias",
"first_stage_model.decoder.up.0.block.1.conv1.weight": "blocks.28.conv1.weight",
"first_stage_model.decoder.up.0.block.1.conv2.bias": "blocks.28.conv2.bias",
"first_stage_model.decoder.up.0.block.1.conv2.weight": "blocks.28.conv2.weight",
"first_stage_model.decoder.up.0.block.1.mix_factor": "blocks.29.mix_factor",
"first_stage_model.decoder.up.0.block.1.norm1.bias": "blocks.28.norm1.bias",
"first_stage_model.decoder.up.0.block.1.norm1.weight": "blocks.28.norm1.weight",
"first_stage_model.decoder.up.0.block.1.norm2.bias": "blocks.28.norm2.bias",
"first_stage_model.decoder.up.0.block.1.norm2.weight": "blocks.28.norm2.weight",
"first_stage_model.decoder.up.0.block.1.time_stack.in_layers.0.bias": "blocks.29.norm1.bias",
"first_stage_model.decoder.up.0.block.1.time_stack.in_layers.0.weight": "blocks.29.norm1.weight",
"first_stage_model.decoder.up.0.block.1.time_stack.in_layers.2.bias": "blocks.29.conv1.bias",
"first_stage_model.decoder.up.0.block.1.time_stack.in_layers.2.weight": "blocks.29.conv1.weight",
"first_stage_model.decoder.up.0.block.1.time_stack.out_layers.0.bias": "blocks.29.norm2.bias",
"first_stage_model.decoder.up.0.block.1.time_stack.out_layers.0.weight": "blocks.29.norm2.weight",
"first_stage_model.decoder.up.0.block.1.time_stack.out_layers.3.bias": "blocks.29.conv2.bias",
"first_stage_model.decoder.up.0.block.1.time_stack.out_layers.3.weight": "blocks.29.conv2.weight",
"first_stage_model.decoder.up.0.block.2.conv1.bias": "blocks.30.conv1.bias",
"first_stage_model.decoder.up.0.block.2.conv1.weight": "blocks.30.conv1.weight",
"first_stage_model.decoder.up.0.block.2.conv2.bias": "blocks.30.conv2.bias",
"first_stage_model.decoder.up.0.block.2.conv2.weight": "blocks.30.conv2.weight",
"first_stage_model.decoder.up.0.block.2.mix_factor": "blocks.31.mix_factor",
"first_stage_model.decoder.up.0.block.2.norm1.bias": "blocks.30.norm1.bias",
"first_stage_model.decoder.up.0.block.2.norm1.weight": "blocks.30.norm1.weight",
"first_stage_model.decoder.up.0.block.2.norm2.bias": "blocks.30.norm2.bias",
"first_stage_model.decoder.up.0.block.2.norm2.weight": "blocks.30.norm2.weight",
"first_stage_model.decoder.up.0.block.2.time_stack.in_layers.0.bias": "blocks.31.norm1.bias",
"first_stage_model.decoder.up.0.block.2.time_stack.in_layers.0.weight": "blocks.31.norm1.weight",
"first_stage_model.decoder.up.0.block.2.time_stack.in_layers.2.bias": "blocks.31.conv1.bias",
"first_stage_model.decoder.up.0.block.2.time_stack.in_layers.2.weight": "blocks.31.conv1.weight",
"first_stage_model.decoder.up.0.block.2.time_stack.out_layers.0.bias": "blocks.31.norm2.bias",
"first_stage_model.decoder.up.0.block.2.time_stack.out_layers.0.weight": "blocks.31.norm2.weight",
"first_stage_model.decoder.up.0.block.2.time_stack.out_layers.3.bias": "blocks.31.conv2.bias",
"first_stage_model.decoder.up.0.block.2.time_stack.out_layers.3.weight": "blocks.31.conv2.weight",
"first_stage_model.decoder.up.1.block.0.conv1.bias": "blocks.19.conv1.bias",
"first_stage_model.decoder.up.1.block.0.conv1.weight": "blocks.19.conv1.weight",
"first_stage_model.decoder.up.1.block.0.conv2.bias": "blocks.19.conv2.bias",
"first_stage_model.decoder.up.1.block.0.conv2.weight": "blocks.19.conv2.weight",
"first_stage_model.decoder.up.1.block.0.mix_factor": "blocks.20.mix_factor",
"first_stage_model.decoder.up.1.block.0.nin_shortcut.bias": "blocks.19.conv_shortcut.bias",
"first_stage_model.decoder.up.1.block.0.nin_shortcut.weight": "blocks.19.conv_shortcut.weight",
"first_stage_model.decoder.up.1.block.0.norm1.bias": "blocks.19.norm1.bias",
"first_stage_model.decoder.up.1.block.0.norm1.weight": "blocks.19.norm1.weight",
"first_stage_model.decoder.up.1.block.0.norm2.bias": "blocks.19.norm2.bias",
"first_stage_model.decoder.up.1.block.0.norm2.weight": "blocks.19.norm2.weight",
"first_stage_model.decoder.up.1.block.0.time_stack.in_layers.0.bias": "blocks.20.norm1.bias",
"first_stage_model.decoder.up.1.block.0.time_stack.in_layers.0.weight": "blocks.20.norm1.weight",
"first_stage_model.decoder.up.1.block.0.time_stack.in_layers.2.bias": "blocks.20.conv1.bias",
"first_stage_model.decoder.up.1.block.0.time_stack.in_layers.2.weight": "blocks.20.conv1.weight",
"first_stage_model.decoder.up.1.block.0.time_stack.out_layers.0.bias": "blocks.20.norm2.bias",
"first_stage_model.decoder.up.1.block.0.time_stack.out_layers.0.weight": "blocks.20.norm2.weight",
"first_stage_model.decoder.up.1.block.0.time_stack.out_layers.3.bias": "blocks.20.conv2.bias",
"first_stage_model.decoder.up.1.block.0.time_stack.out_layers.3.weight": "blocks.20.conv2.weight",
"first_stage_model.decoder.up.1.block.1.conv1.bias": "blocks.21.conv1.bias",
"first_stage_model.decoder.up.1.block.1.conv1.weight": "blocks.21.conv1.weight",
"first_stage_model.decoder.up.1.block.1.conv2.bias": "blocks.21.conv2.bias",
"first_stage_model.decoder.up.1.block.1.conv2.weight": "blocks.21.conv2.weight",
"first_stage_model.decoder.up.1.block.1.mix_factor": "blocks.22.mix_factor",
"first_stage_model.decoder.up.1.block.1.norm1.bias": "blocks.21.norm1.bias",
"first_stage_model.decoder.up.1.block.1.norm1.weight": "blocks.21.norm1.weight",
"first_stage_model.decoder.up.1.block.1.norm2.bias": "blocks.21.norm2.bias",
"first_stage_model.decoder.up.1.block.1.norm2.weight": "blocks.21.norm2.weight",
"first_stage_model.decoder.up.1.block.1.time_stack.in_layers.0.bias": "blocks.22.norm1.bias",
"first_stage_model.decoder.up.1.block.1.time_stack.in_layers.0.weight": "blocks.22.norm1.weight",
"first_stage_model.decoder.up.1.block.1.time_stack.in_layers.2.bias": "blocks.22.conv1.bias",
"first_stage_model.decoder.up.1.block.1.time_stack.in_layers.2.weight": "blocks.22.conv1.weight",
"first_stage_model.decoder.up.1.block.1.time_stack.out_layers.0.bias": "blocks.22.norm2.bias",
"first_stage_model.decoder.up.1.block.1.time_stack.out_layers.0.weight": "blocks.22.norm2.weight",
"first_stage_model.decoder.up.1.block.1.time_stack.out_layers.3.bias": "blocks.22.conv2.bias",
"first_stage_model.decoder.up.1.block.1.time_stack.out_layers.3.weight": "blocks.22.conv2.weight",
"first_stage_model.decoder.up.1.block.2.conv1.bias": "blocks.23.conv1.bias",
"first_stage_model.decoder.up.1.block.2.conv1.weight": "blocks.23.conv1.weight",
"first_stage_model.decoder.up.1.block.2.conv2.bias": "blocks.23.conv2.bias",
"first_stage_model.decoder.up.1.block.2.conv2.weight": "blocks.23.conv2.weight",
"first_stage_model.decoder.up.1.block.2.mix_factor": "blocks.24.mix_factor",
"first_stage_model.decoder.up.1.block.2.norm1.bias": "blocks.23.norm1.bias",
"first_stage_model.decoder.up.1.block.2.norm1.weight": "blocks.23.norm1.weight",
"first_stage_model.decoder.up.1.block.2.norm2.bias": "blocks.23.norm2.bias",
"first_stage_model.decoder.up.1.block.2.norm2.weight": "blocks.23.norm2.weight",
"first_stage_model.decoder.up.1.block.2.time_stack.in_layers.0.bias": "blocks.24.norm1.bias",
"first_stage_model.decoder.up.1.block.2.time_stack.in_layers.0.weight": "blocks.24.norm1.weight",
"first_stage_model.decoder.up.1.block.2.time_stack.in_layers.2.bias": "blocks.24.conv1.bias",
"first_stage_model.decoder.up.1.block.2.time_stack.in_layers.2.weight": "blocks.24.conv1.weight",
"first_stage_model.decoder.up.1.block.2.time_stack.out_layers.0.bias": "blocks.24.norm2.bias",
"first_stage_model.decoder.up.1.block.2.time_stack.out_layers.0.weight": "blocks.24.norm2.weight",
"first_stage_model.decoder.up.1.block.2.time_stack.out_layers.3.bias": "blocks.24.conv2.bias",
"first_stage_model.decoder.up.1.block.2.time_stack.out_layers.3.weight": "blocks.24.conv2.weight",
"first_stage_model.decoder.up.1.upsample.conv.bias": "blocks.25.conv.bias",
"first_stage_model.decoder.up.1.upsample.conv.weight": "blocks.25.conv.weight",
"first_stage_model.decoder.up.2.block.0.conv1.bias": "blocks.12.conv1.bias",
"first_stage_model.decoder.up.2.block.0.conv1.weight": "blocks.12.conv1.weight",
"first_stage_model.decoder.up.2.block.0.conv2.bias": "blocks.12.conv2.bias",
"first_stage_model.decoder.up.2.block.0.conv2.weight": "blocks.12.conv2.weight",
"first_stage_model.decoder.up.2.block.0.mix_factor": "blocks.13.mix_factor",
"first_stage_model.decoder.up.2.block.0.norm1.bias": "blocks.12.norm1.bias",
"first_stage_model.decoder.up.2.block.0.norm1.weight": "blocks.12.norm1.weight",
"first_stage_model.decoder.up.2.block.0.norm2.bias": "blocks.12.norm2.bias",
"first_stage_model.decoder.up.2.block.0.norm2.weight": "blocks.12.norm2.weight",
"first_stage_model.decoder.up.2.block.0.time_stack.in_layers.0.bias": "blocks.13.norm1.bias",
"first_stage_model.decoder.up.2.block.0.time_stack.in_layers.0.weight": "blocks.13.norm1.weight",
"first_stage_model.decoder.up.2.block.0.time_stack.in_layers.2.bias": "blocks.13.conv1.bias",
"first_stage_model.decoder.up.2.block.0.time_stack.in_layers.2.weight": "blocks.13.conv1.weight",
"first_stage_model.decoder.up.2.block.0.time_stack.out_layers.0.bias": "blocks.13.norm2.bias",
"first_stage_model.decoder.up.2.block.0.time_stack.out_layers.0.weight": "blocks.13.norm2.weight",
"first_stage_model.decoder.up.2.block.0.time_stack.out_layers.3.bias": "blocks.13.conv2.bias",
"first_stage_model.decoder.up.2.block.0.time_stack.out_layers.3.weight": "blocks.13.conv2.weight",
"first_stage_model.decoder.up.2.block.1.conv1.bias": "blocks.14.conv1.bias",
"first_stage_model.decoder.up.2.block.1.conv1.weight": "blocks.14.conv1.weight",
"first_stage_model.decoder.up.2.block.1.conv2.bias": "blocks.14.conv2.bias",
"first_stage_model.decoder.up.2.block.1.conv2.weight": "blocks.14.conv2.weight",
"first_stage_model.decoder.up.2.block.1.mix_factor": "blocks.15.mix_factor",
"first_stage_model.decoder.up.2.block.1.norm1.bias": "blocks.14.norm1.bias",
"first_stage_model.decoder.up.2.block.1.norm1.weight": "blocks.14.norm1.weight",
"first_stage_model.decoder.up.2.block.1.norm2.bias": "blocks.14.norm2.bias",
"first_stage_model.decoder.up.2.block.1.norm2.weight": "blocks.14.norm2.weight",
"first_stage_model.decoder.up.2.block.1.time_stack.in_layers.0.bias": "blocks.15.norm1.bias",
"first_stage_model.decoder.up.2.block.1.time_stack.in_layers.0.weight": "blocks.15.norm1.weight",
"first_stage_model.decoder.up.2.block.1.time_stack.in_layers.2.bias": "blocks.15.conv1.bias",
"first_stage_model.decoder.up.2.block.1.time_stack.in_layers.2.weight": "blocks.15.conv1.weight",
"first_stage_model.decoder.up.2.block.1.time_stack.out_layers.0.bias": "blocks.15.norm2.bias",
"first_stage_model.decoder.up.2.block.1.time_stack.out_layers.0.weight": "blocks.15.norm2.weight",
"first_stage_model.decoder.up.2.block.1.time_stack.out_layers.3.bias": "blocks.15.conv2.bias",
"first_stage_model.decoder.up.2.block.1.time_stack.out_layers.3.weight": "blocks.15.conv2.weight",
"first_stage_model.decoder.up.2.block.2.conv1.bias": "blocks.16.conv1.bias",
"first_stage_model.decoder.up.2.block.2.conv1.weight": "blocks.16.conv1.weight",
"first_stage_model.decoder.up.2.block.2.conv2.bias": "blocks.16.conv2.bias",
"first_stage_model.decoder.up.2.block.2.conv2.weight": "blocks.16.conv2.weight",
"first_stage_model.decoder.up.2.block.2.mix_factor": "blocks.17.mix_factor",
"first_stage_model.decoder.up.2.block.2.norm1.bias": "blocks.16.norm1.bias",
"first_stage_model.decoder.up.2.block.2.norm1.weight": "blocks.16.norm1.weight",
"first_stage_model.decoder.up.2.block.2.norm2.bias": "blocks.16.norm2.bias",
"first_stage_model.decoder.up.2.block.2.norm2.weight": "blocks.16.norm2.weight",
"first_stage_model.decoder.up.2.block.2.time_stack.in_layers.0.bias": "blocks.17.norm1.bias",
"first_stage_model.decoder.up.2.block.2.time_stack.in_layers.0.weight": "blocks.17.norm1.weight",
"first_stage_model.decoder.up.2.block.2.time_stack.in_layers.2.bias": "blocks.17.conv1.bias",
"first_stage_model.decoder.up.2.block.2.time_stack.in_layers.2.weight": "blocks.17.conv1.weight",
"first_stage_model.decoder.up.2.block.2.time_stack.out_layers.0.bias": "blocks.17.norm2.bias",
"first_stage_model.decoder.up.2.block.2.time_stack.out_layers.0.weight": "blocks.17.norm2.weight",
"first_stage_model.decoder.up.2.block.2.time_stack.out_layers.3.bias": "blocks.17.conv2.bias",
"first_stage_model.decoder.up.2.block.2.time_stack.out_layers.3.weight": "blocks.17.conv2.weight",
"first_stage_model.decoder.up.2.upsample.conv.bias": "blocks.18.conv.bias",
"first_stage_model.decoder.up.2.upsample.conv.weight": "blocks.18.conv.weight",
"first_stage_model.decoder.up.3.block.0.conv1.bias": "blocks.5.conv1.bias",
"first_stage_model.decoder.up.3.block.0.conv1.weight": "blocks.5.conv1.weight",
"first_stage_model.decoder.up.3.block.0.conv2.bias": "blocks.5.conv2.bias",
"first_stage_model.decoder.up.3.block.0.conv2.weight": "blocks.5.conv2.weight",
"first_stage_model.decoder.up.3.block.0.mix_factor": "blocks.6.mix_factor",
"first_stage_model.decoder.up.3.block.0.norm1.bias": "blocks.5.norm1.bias",
"first_stage_model.decoder.up.3.block.0.norm1.weight": "blocks.5.norm1.weight",
"first_stage_model.decoder.up.3.block.0.norm2.bias": "blocks.5.norm2.bias",
"first_stage_model.decoder.up.3.block.0.norm2.weight": "blocks.5.norm2.weight",
"first_stage_model.decoder.up.3.block.0.time_stack.in_layers.0.bias": "blocks.6.norm1.bias",
"first_stage_model.decoder.up.3.block.0.time_stack.in_layers.0.weight": "blocks.6.norm1.weight",
"first_stage_model.decoder.up.3.block.0.time_stack.in_layers.2.bias": "blocks.6.conv1.bias",
"first_stage_model.decoder.up.3.block.0.time_stack.in_layers.2.weight": "blocks.6.conv1.weight",
"first_stage_model.decoder.up.3.block.0.time_stack.out_layers.0.bias": "blocks.6.norm2.bias",
"first_stage_model.decoder.up.3.block.0.time_stack.out_layers.0.weight": "blocks.6.norm2.weight",
"first_stage_model.decoder.up.3.block.0.time_stack.out_layers.3.bias": "blocks.6.conv2.bias",
"first_stage_model.decoder.up.3.block.0.time_stack.out_layers.3.weight": "blocks.6.conv2.weight",
"first_stage_model.decoder.up.3.block.1.conv1.bias": "blocks.7.conv1.bias",
"first_stage_model.decoder.up.3.block.1.conv1.weight": "blocks.7.conv1.weight",
"first_stage_model.decoder.up.3.block.1.conv2.bias": "blocks.7.conv2.bias",
"first_stage_model.decoder.up.3.block.1.conv2.weight": "blocks.7.conv2.weight",
"first_stage_model.decoder.up.3.block.1.mix_factor": "blocks.8.mix_factor",
"first_stage_model.decoder.up.3.block.1.norm1.bias": "blocks.7.norm1.bias",
"first_stage_model.decoder.up.3.block.1.norm1.weight": "blocks.7.norm1.weight",
"first_stage_model.decoder.up.3.block.1.norm2.bias": "blocks.7.norm2.bias",
"first_stage_model.decoder.up.3.block.1.norm2.weight": "blocks.7.norm2.weight",
"first_stage_model.decoder.up.3.block.1.time_stack.in_layers.0.bias": "blocks.8.norm1.bias",
"first_stage_model.decoder.up.3.block.1.time_stack.in_layers.0.weight": "blocks.8.norm1.weight",
"first_stage_model.decoder.up.3.block.1.time_stack.in_layers.2.bias": "blocks.8.conv1.bias",
"first_stage_model.decoder.up.3.block.1.time_stack.in_layers.2.weight": "blocks.8.conv1.weight",
"first_stage_model.decoder.up.3.block.1.time_stack.out_layers.0.bias": "blocks.8.norm2.bias",
"first_stage_model.decoder.up.3.block.1.time_stack.out_layers.0.weight": "blocks.8.norm2.weight",
"first_stage_model.decoder.up.3.block.1.time_stack.out_layers.3.bias": "blocks.8.conv2.bias",
"first_stage_model.decoder.up.3.block.1.time_stack.out_layers.3.weight": "blocks.8.conv2.weight",
"first_stage_model.decoder.up.3.block.2.conv1.bias": "blocks.9.conv1.bias",
"first_stage_model.decoder.up.3.block.2.conv1.weight": "blocks.9.conv1.weight",
"first_stage_model.decoder.up.3.block.2.conv2.bias": "blocks.9.conv2.bias",
"first_stage_model.decoder.up.3.block.2.conv2.weight": "blocks.9.conv2.weight",
"first_stage_model.decoder.up.3.block.2.mix_factor": "blocks.10.mix_factor",
"first_stage_model.decoder.up.3.block.2.norm1.bias": "blocks.9.norm1.bias",
"first_stage_model.decoder.up.3.block.2.norm1.weight": "blocks.9.norm1.weight",
"first_stage_model.decoder.up.3.block.2.norm2.bias": "blocks.9.norm2.bias",
"first_stage_model.decoder.up.3.block.2.norm2.weight": "blocks.9.norm2.weight",
"first_stage_model.decoder.up.3.block.2.time_stack.in_layers.0.bias": "blocks.10.norm1.bias",
"first_stage_model.decoder.up.3.block.2.time_stack.in_layers.0.weight": "blocks.10.norm1.weight",
"first_stage_model.decoder.up.3.block.2.time_stack.in_layers.2.bias": "blocks.10.conv1.bias",
"first_stage_model.decoder.up.3.block.2.time_stack.in_layers.2.weight": "blocks.10.conv1.weight",
"first_stage_model.decoder.up.3.block.2.time_stack.out_layers.0.bias": "blocks.10.norm2.bias",
"first_stage_model.decoder.up.3.block.2.time_stack.out_layers.0.weight": "blocks.10.norm2.weight",
"first_stage_model.decoder.up.3.block.2.time_stack.out_layers.3.bias": "blocks.10.conv2.bias",
"first_stage_model.decoder.up.3.block.2.time_stack.out_layers.3.weight": "blocks.10.conv2.weight",
"first_stage_model.decoder.up.3.upsample.conv.bias": "blocks.11.conv.bias",
"first_stage_model.decoder.up.3.upsample.conv.weight": "blocks.11.conv.weight",
}
state_dict_ = {}
for name in state_dict:
if name in rename_dict:
param = state_dict[name]
if "blocks.2.transformer_blocks.0" in rename_dict[name]:
param = param.squeeze()
state_dict_[rename_dict[name]] = param
return state_dict_