mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-18 13:58:15 +00:00
78 lines
4.5 KiB
Python
78 lines
4.5 KiB
Python
import torch
|
|
from diffsynth.pipelines.ltx2_audio_video import LTX2AudioVideoPipeline, ModelConfig
|
|
from diffsynth.utils.data.media_io_ltx2 import write_video_audio_ltx2
|
|
from diffsynth.utils.data import VideoData
|
|
from modelscope import dataset_snapshot_download
|
|
|
|
vram_config = {
|
|
"offload_dtype": torch.bfloat16,
|
|
"offload_device": "cpu",
|
|
"onload_dtype": torch.bfloat16,
|
|
"onload_device": "cuda",
|
|
"preparing_dtype": torch.bfloat16,
|
|
"preparing_device": "cuda",
|
|
"computation_dtype": torch.bfloat16,
|
|
"computation_device": "cuda",
|
|
}
|
|
pipe = LTX2AudioVideoPipeline.from_pretrained(
|
|
torch_dtype=torch.bfloat16,
|
|
device="cuda",
|
|
model_configs=[
|
|
ModelConfig(model_id="google/gemma-3-12b-it-qat-q4_0-unquantized", origin_file_pattern="model-*.safetensors", **vram_config),
|
|
ModelConfig(model_id="DiffSynth-Studio/LTX-2-Repackage", origin_file_pattern="transformer.safetensors", **vram_config),
|
|
ModelConfig(model_id="DiffSynth-Studio/LTX-2-Repackage", origin_file_pattern="text_encoder_post_modules.safetensors", **vram_config),
|
|
ModelConfig(model_id="DiffSynth-Studio/LTX-2-Repackage", origin_file_pattern="video_vae_decoder.safetensors", **vram_config),
|
|
ModelConfig(model_id="DiffSynth-Studio/LTX-2-Repackage", origin_file_pattern="audio_vae_decoder.safetensors", **vram_config),
|
|
ModelConfig(model_id="DiffSynth-Studio/LTX-2-Repackage", origin_file_pattern="audio_vocoder.safetensors", **vram_config),
|
|
ModelConfig(model_id="DiffSynth-Studio/LTX-2-Repackage", origin_file_pattern="video_vae_encoder.safetensors", **vram_config),
|
|
ModelConfig(model_id="Lightricks/LTX-2", origin_file_pattern="ltx-2-spatial-upscaler-x2-1.0.safetensors", **vram_config),
|
|
],
|
|
tokenizer_config=ModelConfig(model_id="google/gemma-3-12b-it-qat-q4_0-unquantized"),
|
|
stage2_lora_config=ModelConfig(model_id="Lightricks/LTX-2", origin_file_pattern="ltx-2-19b-distilled-lora-384.safetensors"),
|
|
)
|
|
pipe.load_lora(pipe.dit, ModelConfig(model_id="Lightricks/LTX-2-19b-IC-LoRA-Union-Control", origin_file_pattern="ltx-2-19b-ic-lora-union-control-ref0.5.safetensors"))
|
|
dataset_snapshot_download("DiffSynth-Studio/example_video_dataset", allow_file_pattern="ltx2/*", local_dir="data/example_video_dataset")
|
|
|
|
prompt = "[VISUAL]:Two cute orange cats, wearing boxing gloves, stand on a boxing ring and fight each other. [SOUNDS]:the sound of two cats boxing"
|
|
negative_prompt = (
|
|
"blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, "
|
|
"grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, "
|
|
"deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, "
|
|
"wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of "
|
|
"field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent "
|
|
"lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny "
|
|
"valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, "
|
|
"mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, "
|
|
"off-sync audio, incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward "
|
|
"pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, "
|
|
"inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts."
|
|
)
|
|
height, width, num_frames = 512 * 2, 768 * 2, 121
|
|
ref_scale_factor = 2
|
|
frame_rate = 24
|
|
# the frame rate of the video should better be the same with the reference video
|
|
# the spatial resolution of the first frame should be the resolution of stage 1 video generation divided by ref_scale_factor
|
|
input_video = VideoData("data/example_video_dataset/ltx2/depth_video.mp4", height=height // ref_scale_factor // 2, width=width // ref_scale_factor // 2)
|
|
input_video = input_video.raw_data()
|
|
video, audio = pipe(
|
|
prompt=prompt,
|
|
negative_prompt=negative_prompt,
|
|
seed=43,
|
|
height=height,
|
|
width=width,
|
|
num_frames=num_frames,
|
|
frame_rate=frame_rate,
|
|
in_context_videos=[input_video],
|
|
in_context_downsample_factor=ref_scale_factor,
|
|
tiled=True,
|
|
use_two_stage_pipeline=True,
|
|
clear_lora_before_state_two=True,
|
|
)
|
|
write_video_audio_ltx2(
|
|
video=video,
|
|
audio=audio,
|
|
output_path='ltx2_twostage_iclora.mp4',
|
|
fps=frame_rate,
|
|
audio_sample_rate=24000,
|
|
)
|