diff --git a/examples/HunyuanVideo/hunyuanvideo_24G.py b/examples/HunyuanVideo/hunyuanvideo_24G.py index 87cb5f7..e4f8b17 100644 --- a/examples/HunyuanVideo/hunyuanvideo_24G.py +++ b/examples/HunyuanVideo/hunyuanvideo_24G.py @@ -11,7 +11,7 @@ model_manager.load_models( [ "models/HunyuanVideo/transformers/mp_rank_00_model_states.pt" ], - torch_dtype=torch.bfloat16, + torch_dtype=torch.bfloat16, # you can use torch_dtype=torch.float8_e4m3fn to enable quantization. device="cpu" ) diff --git a/examples/HunyuanVideo/hunyuanvideo_6G.py b/examples/HunyuanVideo/hunyuanvideo_6G.py index 7d895fc..a20dbd0 100644 --- a/examples/HunyuanVideo/hunyuanvideo_6G.py +++ b/examples/HunyuanVideo/hunyuanvideo_6G.py @@ -11,7 +11,7 @@ model_manager.load_models( [ "models/HunyuanVideo/transformers/mp_rank_00_model_states.pt" ], - torch_dtype=torch.bfloat16, + torch_dtype=torch.bfloat16, # you can use torch_dtype=torch.float8_e4m3fn to enable quantization. device="cpu" ) diff --git a/examples/HunyuanVideo/hunyuanvideo_80G.py b/examples/HunyuanVideo/hunyuanvideo_80G.py index b498c94..073ee57 100644 --- a/examples/HunyuanVideo/hunyuanvideo_80G.py +++ b/examples/HunyuanVideo/hunyuanvideo_80G.py @@ -11,7 +11,7 @@ model_manager.load_models( [ "models/HunyuanVideo/transformers/mp_rank_00_model_states.pt" ], - torch_dtype=torch.bfloat16, + torch_dtype=torch.bfloat16, # you can use torch_dtype=torch.float8_e4m3fn to enable quantization. device="cuda" )