mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-24 18:28:10 +00:00
add diffutoon
This commit is contained in:
22
README.md
22
README.md
@@ -54,25 +54,37 @@ Generate images with Stable Diffusion XL Turbo. You can see `examples/sdxl_turbo
|
|||||||
|-|-|
|
|-|-|
|
||||||
|||
|
|||
|
||||||
|
|
||||||
### Example 4: Toon Shading
|
### Example 4: Toon Shading (Diffutoon)
|
||||||
|
|
||||||
A very interesting example. Please see `examples/sd_toon_shading.py` for more details.
|
This example is implemented based on [Diffutoon](https://arxiv.org/abs/2401.16224). This approach is adept for rendering high-resoluton videos with rapid motion. You can easily modify the parameters in the config dict.
|
||||||
|
|
||||||
|
https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/b54c05c5-d747-4709-be5e-b39af82404dd
|
||||||
|
|
||||||
|
### Example 5: Toon Shading with Editing Signals (Diffutoon)
|
||||||
|
|
||||||
|
Coming soon.
|
||||||
|
|
||||||
|
https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/20528af5-5100-474a-8cdc-440b9efdd86c
|
||||||
|
|
||||||
|
### Example 6: Toon Shading (in native Python code)
|
||||||
|
|
||||||
|
This example is provided for developers. If you don't want to use the config to manage parameters, you can see `examples/sd_toon_shading.py` to learn how to use it in native Python code.
|
||||||
|
|
||||||
https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/607c199b-6140-410b-a111-3e4ffb01142c
|
https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/607c199b-6140-410b-a111-3e4ffb01142c
|
||||||
|
|
||||||
### Example 5: Text to Video
|
### Example 7: Text to Video
|
||||||
|
|
||||||
Given a prompt, DiffSynth Studio can generate a video using a Stable Diffusion model and an AnimateDiff model. We can break the limitation of number of frames! See `examples/sd_text_to_video.py`.
|
Given a prompt, DiffSynth Studio can generate a video using a Stable Diffusion model and an AnimateDiff model. We can break the limitation of number of frames! See `examples/sd_text_to_video.py`.
|
||||||
|
|
||||||
https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/8f556355-4079-4445-9b48-e9da77699437
|
https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/8f556355-4079-4445-9b48-e9da77699437
|
||||||
|
|
||||||
### Example 6: Video Stylization
|
### Example 8: Video Stylization
|
||||||
|
|
||||||
We provide an example for video stylization. In this pipeline, the rendered video is completely different from the original video, thus we need a powerful deflickering algorithm. We use FastBlend to implement the deflickering module. Please see `examples/sd_video_rerender.py` for more details.
|
We provide an example for video stylization. In this pipeline, the rendered video is completely different from the original video, thus we need a powerful deflickering algorithm. We use FastBlend to implement the deflickering module. Please see `examples/sd_video_rerender.py` for more details.
|
||||||
|
|
||||||
https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/59fb2f7b-8de0-4481-b79f-0c3a7361a1ea
|
https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/59fb2f7b-8de0-4481-b79f-0c3a7361a1ea
|
||||||
|
|
||||||
### Example 7: Prompt Processing
|
### Example 9: Prompt Processing
|
||||||
|
|
||||||
If you are not native English user, we provide translation service for you. Our prompter can translate other language to English and refine it using "BeautifulPrompt" models. Please see `examples/sd_prompt_refining.py` for more details.
|
If you are not native English user, we provide translation service for you. Our prompter can translate other language to English and refine it using "BeautifulPrompt" models. Please see `examples/sd_prompt_refining.py` for more details.
|
||||||
|
|
||||||
|
|||||||
94
examples/diffutoon_toon_shading.py
Normal file
94
examples/diffutoon_toon_shading.py
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
from diffsynth import SDVideoPipelineRunner
|
||||||
|
|
||||||
|
|
||||||
|
# Download models
|
||||||
|
# `models/stable_diffusion/aingdiffusion_v12.safetensors`: [link](https://civitai.com/api/download/models/229575)
|
||||||
|
# `models/AnimateDiff/mm_sd_v15_v2.ckpt`: [link](https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15_v2.ckpt)
|
||||||
|
# `models/ControlNet/control_v11p_sd15_lineart.pth`: [link](https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart.pth)
|
||||||
|
# `models/ControlNet/control_v11f1e_sd15_tile.pth`: [link](https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile.pth)
|
||||||
|
# `models/Annotators/sk_model.pth`: [link](https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model.pth)
|
||||||
|
# `models/Annotators/sk_model2.pth`: [link](https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model2.pth)
|
||||||
|
# `models/textual_inversion/verybadimagenegative_v1.3.pt`: [link](https://civitai.com/api/download/models/25820?type=Model&format=PickleTensor&size=full&fp=fp16)
|
||||||
|
|
||||||
|
# The original video in the example is https://www.bilibili.com/video/BV1iG411a7sQ/.
|
||||||
|
|
||||||
|
config = {
|
||||||
|
"models": {
|
||||||
|
"model_list": [
|
||||||
|
"models/stable_diffusion/aingdiffusion_v12.safetensors",
|
||||||
|
"models/AnimateDiff/mm_sd_v15_v2.ckpt",
|
||||||
|
"models/ControlNet/control_v11f1e_sd15_tile.pth",
|
||||||
|
"models/ControlNet/control_v11p_sd15_lineart.pth"
|
||||||
|
],
|
||||||
|
"textual_inversion_folder": "models/textual_inversion",
|
||||||
|
"device": "cuda",
|
||||||
|
"lora_alphas": [],
|
||||||
|
"controlnet_units": [
|
||||||
|
{
|
||||||
|
"processor_id": "tile",
|
||||||
|
"model_path": "models/ControlNet/control_v11f1e_sd15_tile.pth",
|
||||||
|
"scale": 0.5
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"processor_id": "lineart",
|
||||||
|
"model_path": "models/ControlNet/control_v11p_sd15_lineart.pth",
|
||||||
|
"scale": 0.5
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {
|
||||||
|
"input_frames": {
|
||||||
|
"video_file": "data/examples/diffutoon/input_video.mp4",
|
||||||
|
"image_folder": None,
|
||||||
|
"height": 1536,
|
||||||
|
"width": 1536,
|
||||||
|
"start_frame_id": 0,
|
||||||
|
"end_frame_id": 30
|
||||||
|
},
|
||||||
|
"controlnet_frames": [
|
||||||
|
{
|
||||||
|
"video_file": "data/examples/diffutoon/input_video.mp4",
|
||||||
|
"image_folder": None,
|
||||||
|
"height": 1536,
|
||||||
|
"width": 1536,
|
||||||
|
"start_frame_id": 0,
|
||||||
|
"end_frame_id": 30
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"video_file": "data/examples/diffutoon/input_video.mp4",
|
||||||
|
"image_folder": None,
|
||||||
|
"height": 1536,
|
||||||
|
"width": 1536,
|
||||||
|
"start_frame_id": 0,
|
||||||
|
"end_frame_id": 30
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"output_folder": "data/examples/diffutoon/output",
|
||||||
|
"fps": 30
|
||||||
|
},
|
||||||
|
"pipeline": {
|
||||||
|
"seed": 0,
|
||||||
|
"pipeline_inputs": {
|
||||||
|
"prompt": "best quality, perfect anime illustration, light, a girl is dancing, smile, solo",
|
||||||
|
"negative_prompt": "verybadimagenegative_v1.3",
|
||||||
|
"cfg_scale": 7.0,
|
||||||
|
"clip_skip": 2,
|
||||||
|
"denoising_strength": 1.0,
|
||||||
|
"num_inference_steps": 10,
|
||||||
|
"animatediff_batch_size": 16,
|
||||||
|
"animatediff_stride": 8,
|
||||||
|
"unet_batch_size": 1,
|
||||||
|
"controlnet_batch_size": 1,
|
||||||
|
"cross_frame_attention": False,
|
||||||
|
# The following parameters will be overwritten. You don't need to modify them.
|
||||||
|
"input_frames": [],
|
||||||
|
"num_frames": 30,
|
||||||
|
"width": 1536,
|
||||||
|
"height": 1536,
|
||||||
|
"controlnet_frames": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
runner = SDVideoPipelineRunner()
|
||||||
|
runner.run(config)
|
||||||
Reference in New Issue
Block a user