mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-18 22:08:13 +00:00
67 lines
3.6 KiB
Python
67 lines
3.6 KiB
Python
import torch
|
|
from diffsynth.pipelines.ltx2_audio_video import LTX2AudioVideoPipeline, ModelConfig
|
|
from diffsynth.utils.data.media_io_ltx2 import write_video_audio_ltx2
|
|
|
|
vram_config = {
|
|
"offload_dtype": torch.float8_e5m2,
|
|
"offload_device": "cpu",
|
|
"onload_dtype": torch.float8_e5m2,
|
|
"onload_device": "cpu",
|
|
"preparing_dtype": torch.float8_e5m2,
|
|
"preparing_device": "cuda",
|
|
"computation_dtype": torch.bfloat16,
|
|
"computation_device": "cuda",
|
|
}
|
|
pipe = LTX2AudioVideoPipeline.from_pretrained(
|
|
torch_dtype=torch.bfloat16,
|
|
device="cuda",
|
|
model_configs=[
|
|
ModelConfig(model_id="google/gemma-3-12b-it-qat-q4_0-unquantized", origin_file_pattern="model-*.safetensors", **vram_config),
|
|
ModelConfig(model_id="DiffSynth-Studio/LTX-2-Repackage", origin_file_pattern="transformer.safetensors", **vram_config),
|
|
ModelConfig(model_id="DiffSynth-Studio/LTX-2-Repackage", origin_file_pattern="text_encoder_post_modules.safetensors", **vram_config),
|
|
ModelConfig(model_id="DiffSynth-Studio/LTX-2-Repackage", origin_file_pattern="video_vae_decoder.safetensors", **vram_config),
|
|
ModelConfig(model_id="DiffSynth-Studio/LTX-2-Repackage", origin_file_pattern="audio_vae_decoder.safetensors", **vram_config),
|
|
ModelConfig(model_id="DiffSynth-Studio/LTX-2-Repackage", origin_file_pattern="audio_vocoder.safetensors", **vram_config),
|
|
ModelConfig(model_id="DiffSynth-Studio/LTX-2-Repackage", origin_file_pattern="video_vae_encoder.safetensors", **vram_config),
|
|
ModelConfig(model_id="Lightricks/LTX-2", origin_file_pattern="ltx-2-spatial-upscaler-x2-1.0.safetensors", **vram_config),
|
|
],
|
|
tokenizer_config=ModelConfig(model_id="google/gemma-3-12b-it-qat-q4_0-unquantized"),
|
|
stage2_lora_config=ModelConfig(model_id="Lightricks/LTX-2", origin_file_pattern="ltx-2-19b-distilled-lora-384.safetensors"),
|
|
)
|
|
pipe.load_lora(
|
|
pipe.dit,
|
|
ModelConfig(model_id="Lightricks/LTX-2-19b-LoRA-Camera-Control-Static", origin_file_pattern="ltx-2-19b-lora-camera-control-static.safetensors"),
|
|
)
|
|
prompt = "A beautiful sunset over the ocean."
|
|
negative_prompt = (
|
|
"blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, "
|
|
"grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, "
|
|
"deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, "
|
|
"wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of "
|
|
"field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent "
|
|
"lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny "
|
|
"valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, "
|
|
"mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, "
|
|
"off-sync audio, incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward "
|
|
"pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, "
|
|
"inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts."
|
|
)
|
|
height, width, num_frames = 512 * 2, 768 * 2, 121
|
|
video, audio = pipe(
|
|
prompt=prompt,
|
|
negative_prompt=negative_prompt,
|
|
seed=43,
|
|
height=height,
|
|
width=width,
|
|
num_frames=num_frames,
|
|
tiled=True,
|
|
use_two_stage_pipeline=True,
|
|
)
|
|
write_video_audio_ltx2(
|
|
video=video,
|
|
audio=audio,
|
|
output_path='ltx2_camera_static.mp4',
|
|
fps=24,
|
|
audio_sample_rate=24000,
|
|
)
|