diff --git a/diffsynth/configs/model_config.py b/diffsynth/configs/model_config.py index f736c1b..5066a02 100644 --- a/diffsynth/configs/model_config.py +++ b/diffsynth/configs/model_config.py @@ -657,6 +657,25 @@ preset_models_on_modelscope = { "models/HunyuanVideo/transformers/mp_rank_00_model_states.pt" ], }, + "HunyuanVideo-fp8":{ + "file_list": [ + ("AI-ModelScope/clip-vit-large-patch14", "model.safetensors", "models/HunyuanVideo/text_encoder"), + ("DiffSynth-Studio/HunyuanVideo_MLLM_text_encoder", "model-00001-of-00004.safetensors", "models/HunyuanVideo/text_encoder_2"), + ("DiffSynth-Studio/HunyuanVideo_MLLM_text_encoder", "model-00002-of-00004.safetensors", "models/HunyuanVideo/text_encoder_2"), + ("DiffSynth-Studio/HunyuanVideo_MLLM_text_encoder", "model-00003-of-00004.safetensors", "models/HunyuanVideo/text_encoder_2"), + ("DiffSynth-Studio/HunyuanVideo_MLLM_text_encoder", "model-00004-of-00004.safetensors", "models/HunyuanVideo/text_encoder_2"), + ("DiffSynth-Studio/HunyuanVideo_MLLM_text_encoder", "config.json", "models/HunyuanVideo/text_encoder_2"), + ("DiffSynth-Studio/HunyuanVideo_MLLM_text_encoder", "model.safetensors.index.json", "models/HunyuanVideo/text_encoder_2"), + ("AI-ModelScope/HunyuanVideo", "hunyuan-video-t2v-720p/vae/pytorch_model.pt", "models/HunyuanVideo/vae"), + ("AI-ModelScope/HunyuanVideo", "hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states_fp8.pt", "models/HunyuanVideo/transformers") + ], + "load_path": [ + "models/HunyuanVideo/text_encoder/model.safetensors", + "models/HunyuanVideo/text_encoder_2", + "models/HunyuanVideo/vae/pytorch_model.pt", + "models/HunyuanVideo/transformers/mp_rank_00_model_states.pt" + ], + }, } Preset_model_id: TypeAlias = Literal[ "HunyuanDiT", @@ -713,4 +732,5 @@ Preset_model_id: TypeAlias = Literal[ "StableDiffusion3.5-large", "StableDiffusion3.5-medium", "HunyuanVideo", + "HunyuanVideo-fp8", ]