mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-19 14:58:12 +00:00
79 lines
4.8 KiB
Python
79 lines
4.8 KiB
Python
def WanVideoMotStateDictConverter(state_dict):
|
|
rename_dict = {
|
|
"blocks.0.attn1.norm_k.weight": "blocks.0.self_attn.norm_k.weight",
|
|
"blocks.0.attn1.norm_q.weight": "blocks.0.self_attn.norm_q.weight",
|
|
"blocks.0.attn1.to_k.bias": "blocks.0.self_attn.k.bias",
|
|
"blocks.0.attn1.to_k.weight": "blocks.0.self_attn.k.weight",
|
|
"blocks.0.attn1.to_out.0.bias": "blocks.0.self_attn.o.bias",
|
|
"blocks.0.attn1.to_out.0.weight": "blocks.0.self_attn.o.weight",
|
|
"blocks.0.attn1.to_q.bias": "blocks.0.self_attn.q.bias",
|
|
"blocks.0.attn1.to_q.weight": "blocks.0.self_attn.q.weight",
|
|
"blocks.0.attn1.to_v.bias": "blocks.0.self_attn.v.bias",
|
|
"blocks.0.attn1.to_v.weight": "blocks.0.self_attn.v.weight",
|
|
"blocks.0.attn2.norm_k.weight": "blocks.0.cross_attn.norm_k.weight",
|
|
"blocks.0.attn2.norm_q.weight": "blocks.0.cross_attn.norm_q.weight",
|
|
"blocks.0.attn2.to_k.bias": "blocks.0.cross_attn.k.bias",
|
|
"blocks.0.attn2.to_k.weight": "blocks.0.cross_attn.k.weight",
|
|
"blocks.0.attn2.to_out.0.bias": "blocks.0.cross_attn.o.bias",
|
|
"blocks.0.attn2.to_out.0.weight": "blocks.0.cross_attn.o.weight",
|
|
"blocks.0.attn2.to_q.bias": "blocks.0.cross_attn.q.bias",
|
|
"blocks.0.attn2.to_q.weight": "blocks.0.cross_attn.q.weight",
|
|
"blocks.0.attn2.to_v.bias": "blocks.0.cross_attn.v.bias",
|
|
"blocks.0.attn2.to_v.weight": "blocks.0.cross_attn.v.weight",
|
|
"blocks.0.attn2.add_k_proj.bias":"blocks.0.cross_attn.k_img.bias",
|
|
"blocks.0.attn2.add_k_proj.weight":"blocks.0.cross_attn.k_img.weight",
|
|
"blocks.0.attn2.add_v_proj.bias":"blocks.0.cross_attn.v_img.bias",
|
|
"blocks.0.attn2.add_v_proj.weight":"blocks.0.cross_attn.v_img.weight",
|
|
"blocks.0.attn2.norm_added_k.weight":"blocks.0.cross_attn.norm_k_img.weight",
|
|
"blocks.0.ffn.net.0.proj.bias": "blocks.0.ffn.0.bias",
|
|
"blocks.0.ffn.net.0.proj.weight": "blocks.0.ffn.0.weight",
|
|
"blocks.0.ffn.net.2.bias": "blocks.0.ffn.2.bias",
|
|
"blocks.0.ffn.net.2.weight": "blocks.0.ffn.2.weight",
|
|
"blocks.0.norm2.bias": "blocks.0.norm3.bias",
|
|
"blocks.0.norm2.weight": "blocks.0.norm3.weight",
|
|
"blocks.0.scale_shift_table": "blocks.0.modulation",
|
|
"condition_embedder.text_embedder.linear_1.bias": "text_embedding.0.bias",
|
|
"condition_embedder.text_embedder.linear_1.weight": "text_embedding.0.weight",
|
|
"condition_embedder.text_embedder.linear_2.bias": "text_embedding.2.bias",
|
|
"condition_embedder.text_embedder.linear_2.weight": "text_embedding.2.weight",
|
|
"condition_embedder.time_embedder.linear_1.bias": "time_embedding.0.bias",
|
|
"condition_embedder.time_embedder.linear_1.weight": "time_embedding.0.weight",
|
|
"condition_embedder.time_embedder.linear_2.bias": "time_embedding.2.bias",
|
|
"condition_embedder.time_embedder.linear_2.weight": "time_embedding.2.weight",
|
|
"condition_embedder.time_proj.bias": "time_projection.1.bias",
|
|
"condition_embedder.time_proj.weight": "time_projection.1.weight",
|
|
"condition_embedder.image_embedder.ff.net.0.proj.bias":"img_emb.proj.1.bias",
|
|
"condition_embedder.image_embedder.ff.net.0.proj.weight":"img_emb.proj.1.weight",
|
|
"condition_embedder.image_embedder.ff.net.2.bias":"img_emb.proj.3.bias",
|
|
"condition_embedder.image_embedder.ff.net.2.weight":"img_emb.proj.3.weight",
|
|
"condition_embedder.image_embedder.norm1.bias":"img_emb.proj.0.bias",
|
|
"condition_embedder.image_embedder.norm1.weight":"img_emb.proj.0.weight",
|
|
"condition_embedder.image_embedder.norm2.bias":"img_emb.proj.4.bias",
|
|
"condition_embedder.image_embedder.norm2.weight":"img_emb.proj.4.weight",
|
|
"patch_embedding.bias": "patch_embedding.bias",
|
|
"patch_embedding.weight": "patch_embedding.weight",
|
|
"scale_shift_table": "head.modulation",
|
|
"proj_out.bias": "head.head.bias",
|
|
"proj_out.weight": "head.head.weight",
|
|
}
|
|
mot_layers = (0, 4, 8, 12, 16, 20, 24, 28, 32, 36)
|
|
mot_layers_mapping = {i:n for n, i in enumerate(mot_layers)}
|
|
state_dict_ = {}
|
|
for name in state_dict:
|
|
if "_mot_ref" not in name:
|
|
continue
|
|
param = state_dict[name]
|
|
name = name.replace("_mot_ref", "")
|
|
if name in rename_dict:
|
|
state_dict_[rename_dict[name]] = param
|
|
else:
|
|
if name.split(".")[1].isdigit():
|
|
block_id = int(name.split(".")[1])
|
|
name = name.replace(str(block_id), str(mot_layers_mapping[block_id]))
|
|
name_ = ".".join(name.split(".")[:1] + ["0"] + name.split(".")[2:])
|
|
if name_ in rename_dict:
|
|
name_ = rename_dict[name_]
|
|
name_ = ".".join(name_.split(".")[:1] + [name.split(".")[1]] + name_.split(".")[2:])
|
|
state_dict_[name_] = param
|
|
return state_dict_
|