From 9cc1697d4d65338937f4401baaefb0ec28a0aed5 Mon Sep 17 00:00:00 2001 From: feng0w0 Date: Tue, 30 Dec 2025 15:57:13 +0800 Subject: [PATCH 1/9] Docs:Supplement NPU environment installation document --- docs/en/Pipeline_Usage/Setup.md | 13 +++++++++---- docs/zh/Pipeline_Usage/Setup.md | 13 +++++++++---- pyproject.toml | 12 ++++++++++++ 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/docs/en/Pipeline_Usage/Setup.md b/docs/en/Pipeline_Usage/Setup.md index c95e5b7..31fb771 100644 --- a/docs/en/Pipeline_Usage/Setup.md +++ b/docs/en/Pipeline_Usage/Setup.md @@ -30,11 +30,16 @@ pip install torch torchvision --index-url https://download.pytorch.org/whl/rocm6 * **Ascend NPU** -Ascend NPU support is provided via the `torch-npu` package. Taking version `2.1.0.post17` (as of the article update date: December 15, 2025) as an example, run the following command: +1. Install [CANN](https://www.hiascend.com/document/detail/zh/canncommercial/83RC1/softwareinst/instg/instg_quick.html?Mode=PmIns&InstallType=local&OS=openEuler&Software=cannToolKit) through official documentation. -```shell -pip install torch-npu==2.1.0.post17 -``` +2. Install from source + ```shell + git clone https://github.com/modelscope/DiffSynth-Studio.git + cd DiffSynth-Studio + # aarch64/ARM + pip install -e .[npu_aarch64] --extra-index-url "https://download.pytorch.org/whl/cpu" + # x86 + pip install -e .[npu] When using Ascend NPU, please replace `"cuda"` with `"npu"` in your Python code. For details, see [NPU Support](/docs/en/Pipeline_Usage/GPU_support.md#ascend-npu). diff --git a/docs/zh/Pipeline_Usage/Setup.md b/docs/zh/Pipeline_Usage/Setup.md index 0c99840..e4a022c 100644 --- a/docs/zh/Pipeline_Usage/Setup.md +++ b/docs/zh/Pipeline_Usage/Setup.md @@ -30,11 +30,16 @@ pip install torch torchvision --index-url https://download.pytorch.org/whl/rocm6 * Ascend NPU -Ascend NPU 通过 `torch-npu` 包提供支持,以 `2.1.0.post17` 版本(本文更新于 2025 年 12 月 15 日)为例,请运行以下命令 +1. 通过官方文档安装[CANN](https://www.hiascend.com/document/detail/zh/canncommercial/83RC1/softwareinst/instg/instg_quick.html?Mode=PmIns&InstallType=local&OS=openEuler&Software=cannToolKit) -```shell -pip install torch-npu==2.1.0.post17 -``` +2. 从源码安装 + ```shell + git clone https://github.com/modelscope/DiffSynth-Studio.git + cd DiffSynth-Studio + # aarch64/ARM + pip install -e .[npu_aarch64] --extra-index-url "https://download.pytorch.org/whl/cpu" + # x86 + pip install -e .[npu] 使用 Ascend NPU 时,请将 Python 代码中的 `"cuda"` 改为 `"npu"`,详见[NPU 支持](/docs/zh/Pipeline_Usage/GPU_support.md#ascend-npu)。 diff --git a/pyproject.toml b/pyproject.toml index cb00b4d..2522cff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,5 +34,17 @@ classifiers = [ [tool.setuptools.packages.find] +[project.optional-dependencies] +npu_aarch64 = [ + "torch==2.7.1", + "torch-npu==2.7.1", + "torchvision==0.22.1" +] +npu = [ + "torch==2.7.1+cpu", + "torch-npu==2.7.1", + "torchvision==0.22.1+cpu" +] + [tool.setuptools] include-package-data = true From 507e7e5d361f790589499ed1eca6bbf265551313 Mon Sep 17 00:00:00 2001 From: feng0w0 Date: Tue, 30 Dec 2025 19:58:47 +0800 Subject: [PATCH 2/9] Docs:Supplement NPU training script samples and documentation instruction --- diffsynth/core/device/__init__.py | 3 +- diffsynth/core/vram/layers.py | 4 +- diffsynth/diffusion/base_pipeline.py | 3 +- docs/en/Pipeline_Usage/GPU_support.md | 30 ++++++++++++- docs/zh/Pipeline_Usage/GPU_support.md | 32 +++++++++++-- .../npu_scripts/FLUX.1-Kontext-dev-NPU.sh | 17 +++++++ .../special/npu_scripts/FLUX.1-dev-NPU.sh | 15 +++++++ .../special/npu_scripts/FLUX.2-dev-NPU.sh | 35 +++++++++++++++ .../Qwen-Image-Edit-2509-LoRA-NPU.sh | 38 ++++++++++++++++ .../npu_scripts/Qwen-Image-LoRA-NPU.sh | 38 ++++++++++++++++ .../special/npu_scripts/Wan2.1-T2V-14B-NPU.sh | 16 +++++++ .../npu_scripts/Wan2.2-T2V-A14B-NPU.sh | 38 ++++++++++++++++ .../npu_scripts/Wan2.2-VACE-Fun-A14B-NPU.sh | 45 +++++++++++++++++++ .../special/npu_scripts/Z-Image-Turbo-NPU.sh | 17 +++++++ 14 files changed, 322 insertions(+), 9 deletions(-) create mode 100644 examples/flux/model_training/special/npu_scripts/FLUX.1-Kontext-dev-NPU.sh create mode 100644 examples/flux/model_training/special/npu_scripts/FLUX.1-dev-NPU.sh create mode 100644 examples/flux2/model_training/special/npu_scripts/FLUX.2-dev-NPU.sh create mode 100644 examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-Edit-2509-LoRA-NPU.sh create mode 100644 examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-LoRA-NPU.sh create mode 100644 examples/wanvideo/model_training/special/npu_scripts/Wan2.1-T2V-14B-NPU.sh create mode 100644 examples/wanvideo/model_training/special/npu_scripts/Wan2.2-T2V-A14B-NPU.sh create mode 100644 examples/wanvideo/model_training/special/npu_scripts/Wan2.2-VACE-Fun-A14B-NPU.sh create mode 100644 examples/z_image/model_training/special/npu_scripts/Z-Image-Turbo-NPU.sh diff --git a/diffsynth/core/device/__init__.py b/diffsynth/core/device/__init__.py index e53364f..8373471 100644 --- a/diffsynth/core/device/__init__.py +++ b/diffsynth/core/device/__init__.py @@ -1 +1,2 @@ -from .npu_compatible_device import parse_device_type, parse_nccl_backend, get_available_device_type \ No newline at end of file +from .npu_compatible_device import parse_device_type, parse_nccl_backend, get_available_device_type, get_device_name +from .npu_compatible_device import IS_NPU_AVAILABLE diff --git a/diffsynth/core/vram/layers.py b/diffsynth/core/vram/layers.py index 751792d..0f99b0d 100644 --- a/diffsynth/core/vram/layers.py +++ b/diffsynth/core/vram/layers.py @@ -2,7 +2,7 @@ import torch, copy from typing import Union from .initialization import skip_model_initialization from .disk_map import DiskMap -from ..device import parse_device_type +from ..device import parse_device_type, get_device_name, IS_NPU_AVAILABLE class AutoTorchModule(torch.nn.Module): @@ -63,7 +63,7 @@ class AutoTorchModule(torch.nn.Module): return r def check_free_vram(self): - device = self.computation_device if self.computation_device != "npu" else "npu:0" + device = self.computation_device if not IS_NPU_AVAILABLE else get_device_name() gpu_mem_state = getattr(torch, self.computation_device_type).mem_get_info(device) used_memory = (gpu_mem_state[1] - gpu_mem_state[0]) / (1024**3) return used_memory < self.vram_limit diff --git a/diffsynth/diffusion/base_pipeline.py b/diffsynth/diffusion/base_pipeline.py index fa355a1..b4e79c0 100644 --- a/diffsynth/diffusion/base_pipeline.py +++ b/diffsynth/diffusion/base_pipeline.py @@ -7,6 +7,7 @@ from ..core import AutoTorchModule, AutoWrappedLinear, load_state_dict, ModelCon from ..utils.lora import GeneralLoRALoader from ..models.model_loader import ModelPool from ..utils.controlnet import ControlNetInput +from ..core.device import get_device_name, IS_NPU_AVAILABLE class PipelineUnit: @@ -177,7 +178,7 @@ class BasePipeline(torch.nn.Module): def get_vram(self): - device = self.device if self.device != "npu" else "npu:0" + device = self.device if not IS_NPU_AVAILABLE else get_device_name() return getattr(torch, self.device_type).mem_get_info(device)[1] / (1024 ** 3) def get_module(self, model, name): diff --git a/docs/en/Pipeline_Usage/GPU_support.md b/docs/en/Pipeline_Usage/GPU_support.md index 789d26a..d6ecdd0 100644 --- a/docs/en/Pipeline_Usage/GPU_support.md +++ b/docs/en/Pipeline_Usage/GPU_support.md @@ -13,7 +13,7 @@ All sample code provided by this project supports NVIDIA GPUs by default, requir AMD provides PyTorch packages based on ROCm, so most models can run without code changes. A small number of models may not be compatible due to their reliance on CUDA-specific instructions. ## Ascend NPU - +### Inference When using Ascend NPU, you need to replace `"cuda"` with `"npu"` in your code. For example, here is the inference code for **Wan2.1-T2V-1.3B**, modified for Ascend NPU: @@ -22,6 +22,7 @@ For example, here is the inference code for **Wan2.1-T2V-1.3B**, modified for As import torch from diffsynth.utils.data import save_video, VideoData from diffsynth.pipelines.wan_video import WanVideoPipeline, ModelConfig +from diffsynth.core.device.npu_compatible_device import get_device_name vram_config = { "offload_dtype": "disk", @@ -46,7 +47,7 @@ pipe = WanVideoPipeline.from_pretrained( ], tokenizer_config=ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/umt5-xxl/"), - vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 2, -+ vram_limit=torch.npu.mem_get_info("npu:0")[1] / (1024 ** 3) - 2, ++ vram_limit=torch.npu.mem_get_info(get_device_name())[1] / (1024 ** 3) - 2, ) video = pipe( @@ -56,3 +57,28 @@ video = pipe( ) save_video(video, "video.mp4", fps=15, quality=5) ``` + +### Training +NPU startup script samples have been added for each type of model,the scripts are stored in the `examples/xxx/special/npu_scripts`, for examples `examples/wanvideo/model_training/special/npu_scripts/Wan2.2-T2V-A14B-NPU.sh`. + +In the NPU training scripts, NPU specific environment variables that can optimize performance have been added, and relevant parameters have been enabled for specific models. + +#### Environment variables +```shell +export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True +``` +`expandable_segments:`: Enable the memory pool expansion segment function, which is the virtual memory feature. + +```shell +export CPU_AFFINITY_CONF=1 +``` +Set 0 or not set: indicates not enabling the binding function + +1: Indicates enabling coarse-grained kernel binding + +2: Indicates enabling fine-grained kernel binding + +#### Parameters for specific models +| Model | Parameter | Note | +|-----------|---------------------------|-------------------| +| Wan 14B系列 | --initialize_model_on_cpu | The 14B model needs to be initialized on the CPU | \ No newline at end of file diff --git a/docs/zh/Pipeline_Usage/GPU_support.md b/docs/zh/Pipeline_Usage/GPU_support.md index 56d78f7..b955f56 100644 --- a/docs/zh/Pipeline_Usage/GPU_support.md +++ b/docs/zh/Pipeline_Usage/GPU_support.md @@ -13,7 +13,7 @@ AMD 提供了基于 ROCm 的 torch 包,所以大多数模型无需修改代码即可运行,少数模型由于依赖特定的 cuda 指令无法运行。 ## Ascend NPU - +### 推理 使用 Ascend NPU 时,需把代码中的 `"cuda"` 改为 `"npu"`。 例如,Wan2.1-T2V-1.3B 的推理代码: @@ -22,6 +22,7 @@ AMD 提供了基于 ROCm 的 torch 包,所以大多数模型无需修改代码 import torch from diffsynth.utils.data import save_video, VideoData from diffsynth.pipelines.wan_video import WanVideoPipeline, ModelConfig +from diffsynth.core.device.npu_compatible_device import get_device_name vram_config = { "offload_dtype": "disk", @@ -33,7 +34,7 @@ vram_config = { + "preparing_device": "npu", "computation_dtype": torch.bfloat16, - "computation_device": "cuda", -+ "preparing_device": "npu", ++ "computation_device": "npu", } pipe = WanVideoPipeline.from_pretrained( torch_dtype=torch.bfloat16, @@ -46,7 +47,7 @@ pipe = WanVideoPipeline.from_pretrained( ], tokenizer_config=ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/umt5-xxl/"), - vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 2, -+ vram_limit=torch.npu.mem_get_info("npu:0")[1] / (1024 ** 3) - 2, ++ vram_limit=torch.npu.mem_get_info(get_device_name())[1] / (1024 ** 3) - 2, ) video = pipe( @@ -56,3 +57,28 @@ video = pipe( ) save_video(video, "video.mp4", fps=15, quality=5) ``` + +### 训练 +当前已为每类模型添加NPU的启动脚本样例,脚本存放在`examples/xxx/special/npu_scripts`目录下,例如 `examples/wanvideo/model_training/special/npu_scripts/Wan2.2-T2V-A14B-NPU.sh`。 + +在NPU训练脚本中,添加了可以优化性能的NPU特有环境变量,并针对特定模型开启了相关参数。 + +#### 环境变量 +```shell +export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True +``` +`expandable_segments:`: 使能内存池扩展段功能,即虚拟内存特征。 + +```shell +export CPU_AFFINITY_CONF=1 +``` +设置0或未设置: 表示不启用绑核功能 + +1: 表示开启粗粒度绑核 + +2: 表示开启细粒度绑核 + +#### 特定模型需要开启的参数 +| 模型 | 参数 | 备注 | +|-----------|------|-------------------| +| Wan 14B系列 | --initialize_model_on_cpu | 14B模型需要在cpu上进行初始化 | \ No newline at end of file diff --git a/examples/flux/model_training/special/npu_scripts/FLUX.1-Kontext-dev-NPU.sh b/examples/flux/model_training/special/npu_scripts/FLUX.1-Kontext-dev-NPU.sh new file mode 100644 index 0000000..7004a15 --- /dev/null +++ b/examples/flux/model_training/special/npu_scripts/FLUX.1-Kontext-dev-NPU.sh @@ -0,0 +1,17 @@ +export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True +export CPU_AFFINITY_CONF=1 + +accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \ + --dataset_base_path data/example_image_dataset \ + --dataset_metadata_path data/example_image_dataset/metadata_kontext.csv \ + --data_file_keys "image,kontext_images" \ + --max_pixels 1048576 \ + --dataset_repeat 400 \ + --model_id_with_origin_paths "black-forest-labs/FLUX.1-Kontext-dev:flux1-kontext-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/*.safetensors,black-forest-labs/FLUX.1-dev:ae.safetensors" \ + --learning_rate 1e-5 \ + --num_epochs 1 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/FLUX.1-Kontext-dev_full" \ + --trainable_models "dit" \ + --extra_inputs "kontext_images" \ + --use_gradient_checkpointing diff --git a/examples/flux/model_training/special/npu_scripts/FLUX.1-dev-NPU.sh b/examples/flux/model_training/special/npu_scripts/FLUX.1-dev-NPU.sh new file mode 100644 index 0000000..04e039a --- /dev/null +++ b/examples/flux/model_training/special/npu_scripts/FLUX.1-dev-NPU.sh @@ -0,0 +1,15 @@ +export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True +export CPU_AFFINITY_CONF=1 + +accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \ + --dataset_base_path data/example_image_dataset \ + --dataset_metadata_path data/example_image_dataset/metadata.csv \ + --max_pixels 1048576 \ + --dataset_repeat 400 \ + --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/*.safetensors,black-forest-labs/FLUX.1-dev:ae.safetensors" \ + --learning_rate 1e-5 \ + --num_epochs 1 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/FLUX.1-dev_full" \ + --trainable_models "dit" \ + --use_gradient_checkpointing diff --git a/examples/flux2/model_training/special/npu_scripts/FLUX.2-dev-NPU.sh b/examples/flux2/model_training/special/npu_scripts/FLUX.2-dev-NPU.sh new file mode 100644 index 0000000..c32544a --- /dev/null +++ b/examples/flux2/model_training/special/npu_scripts/FLUX.2-dev-NPU.sh @@ -0,0 +1,35 @@ +export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True +export CPU_AFFINITY_CONF=1 + +accelerate launch examples/flux2/model_training/train.py \ + --dataset_base_path data/example_image_dataset \ + --dataset_metadata_path data/example_image_dataset/metadata.csv \ + --max_pixels 1048576 \ + --dataset_repeat 1 \ + --model_id_with_origin_paths "black-forest-labs/FLUX.2-dev:text_encoder/*.safetensors,black-forest-labs/FLUX.2-dev:vae/diffusion_pytorch_model.safetensors" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/FLUX.2-dev-LoRA-splited-cache" \ + --lora_base_model "dit" \ + --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_qkv_mlp_proj,to_out.0,to_add_out,linear_in,linear_out,single_transformer_blocks.0.attn.to_out,single_transformer_blocks.1.attn.to_out,single_transformer_blocks.2.attn.to_out,single_transformer_blocks.3.attn.to_out,single_transformer_blocks.4.attn.to_out,single_transformer_blocks.5.attn.to_out,single_transformer_blocks.6.attn.to_out,single_transformer_blocks.7.attn.to_out,single_transformer_blocks.8.attn.to_out,single_transformer_blocks.9.attn.to_out,single_transformer_blocks.10.attn.to_out,single_transformer_blocks.11.attn.to_out,single_transformer_blocks.12.attn.to_out,single_transformer_blocks.13.attn.to_out,single_transformer_blocks.14.attn.to_out,single_transformer_blocks.15.attn.to_out,single_transformer_blocks.16.attn.to_out,single_transformer_blocks.17.attn.to_out,single_transformer_blocks.18.attn.to_out,single_transformer_blocks.19.attn.to_out,single_transformer_blocks.20.attn.to_out,single_transformer_blocks.21.attn.to_out,single_transformer_blocks.22.attn.to_out,single_transformer_blocks.23.attn.to_out,single_transformer_blocks.24.attn.to_out,single_transformer_blocks.25.attn.to_out,single_transformer_blocks.26.attn.to_out,single_transformer_blocks.27.attn.to_out,single_transformer_blocks.28.attn.to_out,single_transformer_blocks.29.attn.to_out,single_transformer_blocks.30.attn.to_out,single_transformer_blocks.31.attn.to_out,single_transformer_blocks.32.attn.to_out,single_transformer_blocks.33.attn.to_out,single_transformer_blocks.34.attn.to_out,single_transformer_blocks.35.attn.to_out,single_transformer_blocks.36.attn.to_out,single_transformer_blocks.37.attn.to_out,single_transformer_blocks.38.attn.to_out,single_transformer_blocks.39.attn.to_out,single_transformer_blocks.40.attn.to_out,single_transformer_blocks.41.attn.to_out,single_transformer_blocks.42.attn.to_out,single_transformer_blocks.43.attn.to_out,single_transformer_blocks.44.attn.to_out,single_transformer_blocks.45.attn.to_out,single_transformer_blocks.46.attn.to_out,single_transformer_blocks.47.attn.to_out" \ + --lora_rank 32 \ + --use_gradient_checkpointing \ + --dataset_num_workers 8 \ + --task "sft:data_process" + +accelerate launch examples/flux2/model_training/train.py \ + --dataset_base_path "./models/train/FLUX.2-dev-LoRA-splited-cache" \ + --max_pixels 1048576 \ + --dataset_repeat 50 \ + --model_id_with_origin_paths "black-forest-labs/FLUX.2-dev:transformer/*.safetensors" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/FLUX.2-dev-LoRA-splited" \ + --lora_base_model "dit" \ + --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_qkv_mlp_proj,to_out.0,to_add_out,linear_in,linear_out,single_transformer_blocks.0.attn.to_out,single_transformer_blocks.1.attn.to_out,single_transformer_blocks.2.attn.to_out,single_transformer_blocks.3.attn.to_out,single_transformer_blocks.4.attn.to_out,single_transformer_blocks.5.attn.to_out,single_transformer_blocks.6.attn.to_out,single_transformer_blocks.7.attn.to_out,single_transformer_blocks.8.attn.to_out,single_transformer_blocks.9.attn.to_out,single_transformer_blocks.10.attn.to_out,single_transformer_blocks.11.attn.to_out,single_transformer_blocks.12.attn.to_out,single_transformer_blocks.13.attn.to_out,single_transformer_blocks.14.attn.to_out,single_transformer_blocks.15.attn.to_out,single_transformer_blocks.16.attn.to_out,single_transformer_blocks.17.attn.to_out,single_transformer_blocks.18.attn.to_out,single_transformer_blocks.19.attn.to_out,single_transformer_blocks.20.attn.to_out,single_transformer_blocks.21.attn.to_out,single_transformer_blocks.22.attn.to_out,single_transformer_blocks.23.attn.to_out,single_transformer_blocks.24.attn.to_out,single_transformer_blocks.25.attn.to_out,single_transformer_blocks.26.attn.to_out,single_transformer_blocks.27.attn.to_out,single_transformer_blocks.28.attn.to_out,single_transformer_blocks.29.attn.to_out,single_transformer_blocks.30.attn.to_out,single_transformer_blocks.31.attn.to_out,single_transformer_blocks.32.attn.to_out,single_transformer_blocks.33.attn.to_out,single_transformer_blocks.34.attn.to_out,single_transformer_blocks.35.attn.to_out,single_transformer_blocks.36.attn.to_out,single_transformer_blocks.37.attn.to_out,single_transformer_blocks.38.attn.to_out,single_transformer_blocks.39.attn.to_out,single_transformer_blocks.40.attn.to_out,single_transformer_blocks.41.attn.to_out,single_transformer_blocks.42.attn.to_out,single_transformer_blocks.43.attn.to_out,single_transformer_blocks.44.attn.to_out,single_transformer_blocks.45.attn.to_out,single_transformer_blocks.46.attn.to_out,single_transformer_blocks.47.attn.to_out" \ + --lora_rank 32 \ + --use_gradient_checkpointing \ + --dataset_num_workers 8 \ + --task "sft:train" diff --git a/examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-Edit-2509-LoRA-NPU.sh b/examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-Edit-2509-LoRA-NPU.sh new file mode 100644 index 0000000..6599f51 --- /dev/null +++ b/examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-Edit-2509-LoRA-NPU.sh @@ -0,0 +1,38 @@ +# Due to memory limitations, split training is required to train the model on NPU +export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True +export CPU_AFFINITY_CONF=1 + +accelerate launch examples/qwen_image/model_training/train.py \ + --dataset_base_path data/example_image_dataset \ + --dataset_metadata_path data/example_image_dataset/metadata.csv \ + --max_pixels 1048576 \ + --dataset_repeat 1 \ + --model_id_with_origin_paths "Qwen/Qwen-Image-Edit-2509:text_encoder/model*.safetensors,Qwen/Qwen-Image-Edit-2509:vae/diffusion_pytorch_model.safetensors" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Qwen-Image-Edit-2509-LoRA-splited-cache" \ + --lora_base_model "dit" \ + --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \ + --lora_rank 32 \ + --use_gradient_checkpointing \ + --dataset_num_workers 8 \ + --find_unused_parameters \ + --task "sft:data_process" + +accelerate launch examples/qwen_image/model_training/train.py \ + --dataset_base_path "./models/train/Qwen-Image-LoRA-splited-cache" \ + --max_pixels 1048576 \ + --dataset_repeat 50 \ + --model_id_with_origin_paths "Qwen/Qwen-Image-Edit-2509:transformer/diffusion_pytorch_model*.safetensors" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Qwen-Image-Edit-2509-LoRA-splited" \ + --lora_base_model "dit" \ + --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \ + --lora_rank 32 \ + --use_gradient_checkpointing \ + --dataset_num_workers 8 \ + --find_unused_parameters \ + --task "sft:train" diff --git a/examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-LoRA-NPU.sh b/examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-LoRA-NPU.sh new file mode 100644 index 0000000..08978c0 --- /dev/null +++ b/examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-LoRA-NPU.sh @@ -0,0 +1,38 @@ +# Due to memory limitations, split training is required to train the model on NPU +export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True +export CPU_AFFINITY_CONF=1 + +accelerate launch examples/qwen_image/model_training/train.py \ + --dataset_base_path data/example_image_dataset \ + --dataset_metadata_path data/example_image_dataset/metadata.csv \ + --max_pixels 1048576 \ + --dataset_repeat 1 \ + --model_id_with_origin_paths "Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Qwen-Image-LoRA-splited-cache" \ + --lora_base_model "dit" \ + --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \ + --lora_rank 32 \ + --use_gradient_checkpointing \ + --dataset_num_workers 8 \ + --find_unused_parameters \ + --task "sft:data_process" + +accelerate launch examples/qwen_image/model_training/train.py \ + --dataset_base_path "./models/train/Qwen-Image-LoRA-splited-cache" \ + --max_pixels 1048576 \ + --dataset_repeat 50 \ + --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors" \ + --learning_rate 1e-4 \ + --num_epochs 5 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Qwen-Image-LoRA-splited" \ + --lora_base_model "dit" \ + --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \ + --lora_rank 32 \ + --use_gradient_checkpointing \ + --dataset_num_workers 8 \ + --find_unused_parameters \ + --task "sft:train" diff --git a/examples/wanvideo/model_training/special/npu_scripts/Wan2.1-T2V-14B-NPU.sh b/examples/wanvideo/model_training/special/npu_scripts/Wan2.1-T2V-14B-NPU.sh new file mode 100644 index 0000000..ac2d9dd --- /dev/null +++ b/examples/wanvideo/model_training/special/npu_scripts/Wan2.1-T2V-14B-NPU.sh @@ -0,0 +1,16 @@ +export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True +export CPU_AFFINITY_CONF=1 + +accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-14B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-14B:Wan2.1_VAE.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.1-T2V-14B_full" \ + --trainable_models "dit" \ + --initialize_model_on_cpu \ No newline at end of file diff --git a/examples/wanvideo/model_training/special/npu_scripts/Wan2.2-T2V-A14B-NPU.sh b/examples/wanvideo/model_training/special/npu_scripts/Wan2.2-T2V-A14B-NPU.sh new file mode 100644 index 0000000..4748f87 --- /dev/null +++ b/examples/wanvideo/model_training/special/npu_scripts/Wan2.2-T2V-A14B-NPU.sh @@ -0,0 +1,38 @@ +export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True +export CPU_AFFINITY_CONF=1 + +accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --num_frames 49 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.2-T2V-A14B:high_noise_model/diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-T2V-A14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-T2V-A14B:Wan2.1_VAE.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.2-T2V-A14B_high_noise_full" \ + --trainable_models "dit" \ + --max_timestep_boundary 0.417 \ + --min_timestep_boundary 0 \ + --initialize_model_on_cpu +# boundary corresponds to timesteps [875, 1000] + +accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata.csv \ + --height 480 \ + --width 832 \ + --num_frames 49 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "Wan-AI/Wan2.2-T2V-A14B:low_noise_model/diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-T2V-A14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-T2V-A14B:Wan2.1_VAE.pth" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Wan2.2-T2V-A14B_low_noise_full" \ + --trainable_models "dit" \ + --max_timestep_boundary 1 \ + --min_timestep_boundary 0.417 \ + --initialize_model_on_cpu +# boundary corresponds to timesteps [0, 875) \ No newline at end of file diff --git a/examples/wanvideo/model_training/special/npu_scripts/Wan2.2-VACE-Fun-A14B-NPU.sh b/examples/wanvideo/model_training/special/npu_scripts/Wan2.2-VACE-Fun-A14B-NPU.sh new file mode 100644 index 0000000..304d53d --- /dev/null +++ b/examples/wanvideo/model_training/special/npu_scripts/Wan2.2-VACE-Fun-A14B-NPU.sh @@ -0,0 +1,45 @@ +export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True +export CPU_AFFINITY_CONF=1 + +accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata_vace.csv \ + --data_file_keys "video,vace_video,vace_reference_image" \ + --height 480 \ + --width 832 \ + --num_frames 17 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.2-VACE-Fun-A14B:high_noise_model/diffusion_pytorch_model*.safetensors,PAI/Wan2.2-VACE-Fun-A14B:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.2-VACE-Fun-A14B:Wan2.1_VAE.pth" \ + --learning_rate 1e-4 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.vace." \ + --output_path "./models/train/Wan2.2-VACE-Fun-A14B_high_noise_full" \ + --trainable_models "vace" \ + --extra_inputs "vace_video,vace_reference_image" \ + --use_gradient_checkpointing_offload \ + --max_timestep_boundary 0.358 \ + --min_timestep_boundary 0 \ + --initialize_model_on_cpu +# boundary corresponds to timesteps [900, 1000] + + +accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ + --dataset_base_path data/example_video_dataset \ + --dataset_metadata_path data/example_video_dataset/metadata_vace.csv \ + --data_file_keys "video,vace_video,vace_reference_image" \ + --height 480 \ + --width 832 \ + --num_frames 17 \ + --dataset_repeat 100 \ + --model_id_with_origin_paths "PAI/Wan2.2-VACE-Fun-A14B:low_noise_model/diffusion_pytorch_model*.safetensors,PAI/Wan2.2-VACE-Fun-A14B:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.2-VACE-Fun-A14B:Wan2.1_VAE.pth" \ + --learning_rate 1e-4 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.vace." \ + --output_path "./models/train/Wan2.2-VACE-Fun-A14B_low_noise_full" \ + --trainable_models "vace" \ + --extra_inputs "vace_video,vace_reference_image" \ + --use_gradient_checkpointing_offload \ + --max_timestep_boundary 1 \ + --min_timestep_boundary 0.358 \ + --initialize_model_on_cpu +# boundary corresponds to timesteps [0, 900] \ No newline at end of file diff --git a/examples/z_image/model_training/special/npu_scripts/Z-Image-Turbo-NPU.sh b/examples/z_image/model_training/special/npu_scripts/Z-Image-Turbo-NPU.sh new file mode 100644 index 0000000..ee9d6d6 --- /dev/null +++ b/examples/z_image/model_training/special/npu_scripts/Z-Image-Turbo-NPU.sh @@ -0,0 +1,17 @@ +# This example is tested on 8*A100 +export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True +export CPU_AFFINITY_CONF=1 + +accelerate launch --config_file examples/z_image/model_training/full/accelerate_config.yaml examples/z_image/model_training/train.py \ + --dataset_base_path data/example_image_dataset \ + --dataset_metadata_path data/example_image_dataset/metadata.csv \ + --max_pixels 1048576 \ + --dataset_repeat 400 \ + --model_id_with_origin_paths "Tongyi-MAI/Z-Image-Turbo:transformer/*.safetensors,Tongyi-MAI/Z-Image-Turbo:text_encoder/*.safetensors,Tongyi-MAI/Z-Image-Turbo:vae/diffusion_pytorch_model.safetensors" \ + --learning_rate 1e-5 \ + --num_epochs 2 \ + --remove_prefix_in_ckpt "pipe.dit." \ + --output_path "./models/train/Z-Image-Turbo_full" \ + --trainable_models "dit" \ + --use_gradient_checkpointing \ + --dataset_num_workers 8 From 07b1f5702f4d204ac6a96e649c1d93533fc81338 Mon Sep 17 00:00:00 2001 From: feng0w0 Date: Wed, 31 Dec 2025 10:01:21 +0800 Subject: [PATCH 3/9] Docs:Supplement NPU training script samples and documentation instruction --- docs/en/Pipeline_Usage/GPU_support.md | 8 ++++---- .../special/npu_scripts/Qwen-Image-Edit-2509-LoRA-NPU.sh | 2 +- .../special/npu_scripts/Z-Image-Turbo-NPU.sh | 1 - 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/en/Pipeline_Usage/GPU_support.md b/docs/en/Pipeline_Usage/GPU_support.md index d6ecdd0..6c27de7 100644 --- a/docs/en/Pipeline_Usage/GPU_support.md +++ b/docs/en/Pipeline_Usage/GPU_support.md @@ -59,7 +59,7 @@ save_video(video, "video.mp4", fps=15, quality=5) ``` ### Training -NPU startup script samples have been added for each type of model,the scripts are stored in the `examples/xxx/special/npu_scripts`, for examples `examples/wanvideo/model_training/special/npu_scripts/Wan2.2-T2V-A14B-NPU.sh`. +NPU startup script samples have been added for each type of model,the scripts are stored in the `examples/xxx/special/npu_scripts`, for example `examples/wanvideo/model_training/special/npu_scripts/Wan2.2-T2V-A14B-NPU.sh`. In the NPU training scripts, NPU specific environment variables that can optimize performance have been added, and relevant parameters have been enabled for specific models. @@ -79,6 +79,6 @@ Set 0 or not set: indicates not enabling the binding function 2: Indicates enabling fine-grained kernel binding #### Parameters for specific models -| Model | Parameter | Note | -|-----------|---------------------------|-------------------| -| Wan 14B系列 | --initialize_model_on_cpu | The 14B model needs to be initialized on the CPU | \ No newline at end of file +| Model | Parameter | Note | +|----------------|---------------------------|-------------------| +| Wan 14B series | --initialize_model_on_cpu | The 14B model needs to be initialized on the CPU | \ No newline at end of file diff --git a/examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-Edit-2509-LoRA-NPU.sh b/examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-Edit-2509-LoRA-NPU.sh index 6599f51..9c3f02c 100644 --- a/examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-Edit-2509-LoRA-NPU.sh +++ b/examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-Edit-2509-LoRA-NPU.sh @@ -21,7 +21,7 @@ accelerate launch examples/qwen_image/model_training/train.py \ --task "sft:data_process" accelerate launch examples/qwen_image/model_training/train.py \ - --dataset_base_path "./models/train/Qwen-Image-LoRA-splited-cache" \ + --dataset_base_path "./models/train/Qwen-Image-Edit-2509-LoRA-splited-cache" \ --max_pixels 1048576 \ --dataset_repeat 50 \ --model_id_with_origin_paths "Qwen/Qwen-Image-Edit-2509:transformer/diffusion_pytorch_model*.safetensors" \ diff --git a/examples/z_image/model_training/special/npu_scripts/Z-Image-Turbo-NPU.sh b/examples/z_image/model_training/special/npu_scripts/Z-Image-Turbo-NPU.sh index ee9d6d6..93cc645 100644 --- a/examples/z_image/model_training/special/npu_scripts/Z-Image-Turbo-NPU.sh +++ b/examples/z_image/model_training/special/npu_scripts/Z-Image-Turbo-NPU.sh @@ -1,4 +1,3 @@ -# This example is tested on 8*A100 export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True export CPU_AFFINITY_CONF=1 From 86829120c282c2f39b3baa7d389d7aa51c2ed84f Mon Sep 17 00:00:00 2001 From: feng0w0 Date: Mon, 5 Jan 2026 09:59:11 +0800 Subject: [PATCH 4/9] Docs:Supplement NPU training script samples and documentation instruction --- ...-NPU.sh => FLUX.1-Kontext-dev-LoRA-NPU.sh} | 13 ++++--- ...UX.1-dev-NPU.sh => FLUX.1-dev-LoRA-NPU.sh} | 15 ++++---- .../special/npu_scripts/FLUX.2-dev-NPU.sh | 35 ------------------- 3 files changed, 17 insertions(+), 46 deletions(-) rename examples/flux/model_training/special/npu_scripts/{FLUX.1-Kontext-dev-NPU.sh => FLUX.1-Kontext-dev-LoRA-NPU.sh} (63%) rename examples/flux/model_training/special/npu_scripts/{FLUX.1-dev-NPU.sh => FLUX.1-dev-LoRA-NPU.sh} (57%) delete mode 100644 examples/flux2/model_training/special/npu_scripts/FLUX.2-dev-NPU.sh diff --git a/examples/flux/model_training/special/npu_scripts/FLUX.1-Kontext-dev-NPU.sh b/examples/flux/model_training/special/npu_scripts/FLUX.1-Kontext-dev-LoRA-NPU.sh similarity index 63% rename from examples/flux/model_training/special/npu_scripts/FLUX.1-Kontext-dev-NPU.sh rename to examples/flux/model_training/special/npu_scripts/FLUX.1-Kontext-dev-LoRA-NPU.sh index 7004a15..51397e6 100644 --- a/examples/flux/model_training/special/npu_scripts/FLUX.1-Kontext-dev-NPU.sh +++ b/examples/flux/model_training/special/npu_scripts/FLUX.1-Kontext-dev-LoRA-NPU.sh @@ -1,17 +1,20 @@ export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True export CPU_AFFINITY_CONF=1 -accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \ +accelerate launch examples/flux/model_training/train.py \ --dataset_base_path data/example_image_dataset \ --dataset_metadata_path data/example_image_dataset/metadata_kontext.csv \ --data_file_keys "image,kontext_images" \ --max_pixels 1048576 \ --dataset_repeat 400 \ --model_id_with_origin_paths "black-forest-labs/FLUX.1-Kontext-dev:flux1-kontext-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/*.safetensors,black-forest-labs/FLUX.1-dev:ae.safetensors" \ - --learning_rate 1e-5 \ - --num_epochs 1 \ + --learning_rate 1e-4 \ + --num_epochs 5 \ --remove_prefix_in_ckpt "pipe.dit." \ - --output_path "./models/train/FLUX.1-Kontext-dev_full" \ - --trainable_models "dit" \ + --output_path "./models/train/FLUX.1-Kontext-dev_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \ + --lora_rank 32 \ + --align_to_opensource_format \ --extra_inputs "kontext_images" \ --use_gradient_checkpointing diff --git a/examples/flux/model_training/special/npu_scripts/FLUX.1-dev-NPU.sh b/examples/flux/model_training/special/npu_scripts/FLUX.1-dev-LoRA-NPU.sh similarity index 57% rename from examples/flux/model_training/special/npu_scripts/FLUX.1-dev-NPU.sh rename to examples/flux/model_training/special/npu_scripts/FLUX.1-dev-LoRA-NPU.sh index 04e039a..c167503 100644 --- a/examples/flux/model_training/special/npu_scripts/FLUX.1-dev-NPU.sh +++ b/examples/flux/model_training/special/npu_scripts/FLUX.1-dev-LoRA-NPU.sh @@ -1,15 +1,18 @@ export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True export CPU_AFFINITY_CONF=1 -accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \ +accelerate launch examples/flux/model_training/train.py \ --dataset_base_path data/example_image_dataset \ --dataset_metadata_path data/example_image_dataset/metadata.csv \ --max_pixels 1048576 \ - --dataset_repeat 400 \ + --dataset_repeat 50 \ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/*.safetensors,black-forest-labs/FLUX.1-dev:ae.safetensors" \ - --learning_rate 1e-5 \ - --num_epochs 1 \ + --learning_rate 1e-4 \ + --num_epochs 5 \ --remove_prefix_in_ckpt "pipe.dit." \ - --output_path "./models/train/FLUX.1-dev_full" \ - --trainable_models "dit" \ + --output_path "./models/train/FLUX.1-dev_lora" \ + --lora_base_model "dit" \ + --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \ + --lora_rank 32 \ + --align_to_opensource_format \ --use_gradient_checkpointing diff --git a/examples/flux2/model_training/special/npu_scripts/FLUX.2-dev-NPU.sh b/examples/flux2/model_training/special/npu_scripts/FLUX.2-dev-NPU.sh deleted file mode 100644 index c32544a..0000000 --- a/examples/flux2/model_training/special/npu_scripts/FLUX.2-dev-NPU.sh +++ /dev/null @@ -1,35 +0,0 @@ -export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True -export CPU_AFFINITY_CONF=1 - -accelerate launch examples/flux2/model_training/train.py \ - --dataset_base_path data/example_image_dataset \ - --dataset_metadata_path data/example_image_dataset/metadata.csv \ - --max_pixels 1048576 \ - --dataset_repeat 1 \ - --model_id_with_origin_paths "black-forest-labs/FLUX.2-dev:text_encoder/*.safetensors,black-forest-labs/FLUX.2-dev:vae/diffusion_pytorch_model.safetensors" \ - --learning_rate 1e-4 \ - --num_epochs 5 \ - --remove_prefix_in_ckpt "pipe.dit." \ - --output_path "./models/train/FLUX.2-dev-LoRA-splited-cache" \ - --lora_base_model "dit" \ - --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_qkv_mlp_proj,to_out.0,to_add_out,linear_in,linear_out,single_transformer_blocks.0.attn.to_out,single_transformer_blocks.1.attn.to_out,single_transformer_blocks.2.attn.to_out,single_transformer_blocks.3.attn.to_out,single_transformer_blocks.4.attn.to_out,single_transformer_blocks.5.attn.to_out,single_transformer_blocks.6.attn.to_out,single_transformer_blocks.7.attn.to_out,single_transformer_blocks.8.attn.to_out,single_transformer_blocks.9.attn.to_out,single_transformer_blocks.10.attn.to_out,single_transformer_blocks.11.attn.to_out,single_transformer_blocks.12.attn.to_out,single_transformer_blocks.13.attn.to_out,single_transformer_blocks.14.attn.to_out,single_transformer_blocks.15.attn.to_out,single_transformer_blocks.16.attn.to_out,single_transformer_blocks.17.attn.to_out,single_transformer_blocks.18.attn.to_out,single_transformer_blocks.19.attn.to_out,single_transformer_blocks.20.attn.to_out,single_transformer_blocks.21.attn.to_out,single_transformer_blocks.22.attn.to_out,single_transformer_blocks.23.attn.to_out,single_transformer_blocks.24.attn.to_out,single_transformer_blocks.25.attn.to_out,single_transformer_blocks.26.attn.to_out,single_transformer_blocks.27.attn.to_out,single_transformer_blocks.28.attn.to_out,single_transformer_blocks.29.attn.to_out,single_transformer_blocks.30.attn.to_out,single_transformer_blocks.31.attn.to_out,single_transformer_blocks.32.attn.to_out,single_transformer_blocks.33.attn.to_out,single_transformer_blocks.34.attn.to_out,single_transformer_blocks.35.attn.to_out,single_transformer_blocks.36.attn.to_out,single_transformer_blocks.37.attn.to_out,single_transformer_blocks.38.attn.to_out,single_transformer_blocks.39.attn.to_out,single_transformer_blocks.40.attn.to_out,single_transformer_blocks.41.attn.to_out,single_transformer_blocks.42.attn.to_out,single_transformer_blocks.43.attn.to_out,single_transformer_blocks.44.attn.to_out,single_transformer_blocks.45.attn.to_out,single_transformer_blocks.46.attn.to_out,single_transformer_blocks.47.attn.to_out" \ - --lora_rank 32 \ - --use_gradient_checkpointing \ - --dataset_num_workers 8 \ - --task "sft:data_process" - -accelerate launch examples/flux2/model_training/train.py \ - --dataset_base_path "./models/train/FLUX.2-dev-LoRA-splited-cache" \ - --max_pixels 1048576 \ - --dataset_repeat 50 \ - --model_id_with_origin_paths "black-forest-labs/FLUX.2-dev:transformer/*.safetensors" \ - --learning_rate 1e-4 \ - --num_epochs 5 \ - --remove_prefix_in_ckpt "pipe.dit." \ - --output_path "./models/train/FLUX.2-dev-LoRA-splited" \ - --lora_base_model "dit" \ - --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_qkv_mlp_proj,to_out.0,to_add_out,linear_in,linear_out,single_transformer_blocks.0.attn.to_out,single_transformer_blocks.1.attn.to_out,single_transformer_blocks.2.attn.to_out,single_transformer_blocks.3.attn.to_out,single_transformer_blocks.4.attn.to_out,single_transformer_blocks.5.attn.to_out,single_transformer_blocks.6.attn.to_out,single_transformer_blocks.7.attn.to_out,single_transformer_blocks.8.attn.to_out,single_transformer_blocks.9.attn.to_out,single_transformer_blocks.10.attn.to_out,single_transformer_blocks.11.attn.to_out,single_transformer_blocks.12.attn.to_out,single_transformer_blocks.13.attn.to_out,single_transformer_blocks.14.attn.to_out,single_transformer_blocks.15.attn.to_out,single_transformer_blocks.16.attn.to_out,single_transformer_blocks.17.attn.to_out,single_transformer_blocks.18.attn.to_out,single_transformer_blocks.19.attn.to_out,single_transformer_blocks.20.attn.to_out,single_transformer_blocks.21.attn.to_out,single_transformer_blocks.22.attn.to_out,single_transformer_blocks.23.attn.to_out,single_transformer_blocks.24.attn.to_out,single_transformer_blocks.25.attn.to_out,single_transformer_blocks.26.attn.to_out,single_transformer_blocks.27.attn.to_out,single_transformer_blocks.28.attn.to_out,single_transformer_blocks.29.attn.to_out,single_transformer_blocks.30.attn.to_out,single_transformer_blocks.31.attn.to_out,single_transformer_blocks.32.attn.to_out,single_transformer_blocks.33.attn.to_out,single_transformer_blocks.34.attn.to_out,single_transformer_blocks.35.attn.to_out,single_transformer_blocks.36.attn.to_out,single_transformer_blocks.37.attn.to_out,single_transformer_blocks.38.attn.to_out,single_transformer_blocks.39.attn.to_out,single_transformer_blocks.40.attn.to_out,single_transformer_blocks.41.attn.to_out,single_transformer_blocks.42.attn.to_out,single_transformer_blocks.43.attn.to_out,single_transformer_blocks.44.attn.to_out,single_transformer_blocks.45.attn.to_out,single_transformer_blocks.46.attn.to_out,single_transformer_blocks.47.attn.to_out" \ - --lora_rank 32 \ - --use_gradient_checkpointing \ - --dataset_num_workers 8 \ - --task "sft:train" From 62c3d406d920e7a7e6778b67059299eb90011d41 Mon Sep 17 00:00:00 2001 From: feng0w0 Date: Mon, 5 Jan 2026 15:42:55 +0800 Subject: [PATCH 5/9] Docs:Supplement NPU training script samples and documentation instruction --- .../FLUX.1-Kontext-dev-NPU.sh} | 13 +++++-------- .../FLUX.1-dev-NPU.sh} | 15 ++++++--------- .../Qwen-Image-Edit-2509-LoRA-NPU.sh | 0 .../Qwen-Image-LoRA-NPU.sh | 0 .../Wan2.1-T2V-14B-NPU.sh | 0 .../Wan2.2-T2V-A14B-NPU.sh | 0 .../Wan2.2-VACE-Fun-A14B-NPU.sh | 0 .../Z-Image-Turbo-NPU.sh | 0 8 files changed, 11 insertions(+), 17 deletions(-) rename examples/flux/model_training/special/{npu_scripts/FLUX.1-Kontext-dev-LoRA-NPU.sh => npu_training/FLUX.1-Kontext-dev-NPU.sh} (63%) rename examples/flux/model_training/special/{npu_scripts/FLUX.1-dev-LoRA-NPU.sh => npu_training/FLUX.1-dev-NPU.sh} (57%) rename examples/qwen_image/model_training/special/{npu_scripts => npu_training}/Qwen-Image-Edit-2509-LoRA-NPU.sh (100%) rename examples/qwen_image/model_training/special/{npu_scripts => npu_training}/Qwen-Image-LoRA-NPU.sh (100%) rename examples/wanvideo/model_training/special/{npu_scripts => npu_training}/Wan2.1-T2V-14B-NPU.sh (100%) rename examples/wanvideo/model_training/special/{npu_scripts => npu_training}/Wan2.2-T2V-A14B-NPU.sh (100%) rename examples/wanvideo/model_training/special/{npu_scripts => npu_training}/Wan2.2-VACE-Fun-A14B-NPU.sh (100%) rename examples/z_image/model_training/special/{npu_scripts => npu_training}/Z-Image-Turbo-NPU.sh (100%) diff --git a/examples/flux/model_training/special/npu_scripts/FLUX.1-Kontext-dev-LoRA-NPU.sh b/examples/flux/model_training/special/npu_training/FLUX.1-Kontext-dev-NPU.sh similarity index 63% rename from examples/flux/model_training/special/npu_scripts/FLUX.1-Kontext-dev-LoRA-NPU.sh rename to examples/flux/model_training/special/npu_training/FLUX.1-Kontext-dev-NPU.sh index 51397e6..7ec976d 100644 --- a/examples/flux/model_training/special/npu_scripts/FLUX.1-Kontext-dev-LoRA-NPU.sh +++ b/examples/flux/model_training/special/npu_training/FLUX.1-Kontext-dev-NPU.sh @@ -1,20 +1,17 @@ export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True export CPU_AFFINITY_CONF=1 -accelerate launch examples/flux/model_training/train.py \ +accelerate launch --config_file examples/flux/model_training/full/accelerate_config_zero2offload.yaml examples/flux/model_training/train.py \ --dataset_base_path data/example_image_dataset \ --dataset_metadata_path data/example_image_dataset/metadata_kontext.csv \ --data_file_keys "image,kontext_images" \ --max_pixels 1048576 \ --dataset_repeat 400 \ --model_id_with_origin_paths "black-forest-labs/FLUX.1-Kontext-dev:flux1-kontext-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/*.safetensors,black-forest-labs/FLUX.1-dev:ae.safetensors" \ - --learning_rate 1e-4 \ - --num_epochs 5 \ + --learning_rate 1e-5 \ + --num_epochs 1 \ --remove_prefix_in_ckpt "pipe.dit." \ - --output_path "./models/train/FLUX.1-Kontext-dev_lora" \ - --lora_base_model "dit" \ - --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \ - --lora_rank 32 \ - --align_to_opensource_format \ + --output_path "./models/train/FLUX.1-Kontext-dev_full" \ + --trainable_models "dit" \ --extra_inputs "kontext_images" \ --use_gradient_checkpointing diff --git a/examples/flux/model_training/special/npu_scripts/FLUX.1-dev-LoRA-NPU.sh b/examples/flux/model_training/special/npu_training/FLUX.1-dev-NPU.sh similarity index 57% rename from examples/flux/model_training/special/npu_scripts/FLUX.1-dev-LoRA-NPU.sh rename to examples/flux/model_training/special/npu_training/FLUX.1-dev-NPU.sh index c167503..8133594 100644 --- a/examples/flux/model_training/special/npu_scripts/FLUX.1-dev-LoRA-NPU.sh +++ b/examples/flux/model_training/special/npu_training/FLUX.1-dev-NPU.sh @@ -1,18 +1,15 @@ export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True export CPU_AFFINITY_CONF=1 -accelerate launch examples/flux/model_training/train.py \ +accelerate launch --config_file examples/flux/model_training/full/accelerate_config_zero2offload.yaml examples/flux/model_training/train.py \ --dataset_base_path data/example_image_dataset \ --dataset_metadata_path data/example_image_dataset/metadata.csv \ --max_pixels 1048576 \ - --dataset_repeat 50 \ + --dataset_repeat 400 \ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/*.safetensors,black-forest-labs/FLUX.1-dev:ae.safetensors" \ - --learning_rate 1e-4 \ - --num_epochs 5 \ + --learning_rate 1e-5 \ + --num_epochs 1 \ --remove_prefix_in_ckpt "pipe.dit." \ - --output_path "./models/train/FLUX.1-dev_lora" \ - --lora_base_model "dit" \ - --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \ - --lora_rank 32 \ - --align_to_opensource_format \ + --output_path "./models/train/FLUX.1-dev_full" \ + --trainable_models "dit" \ --use_gradient_checkpointing diff --git a/examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-Edit-2509-LoRA-NPU.sh b/examples/qwen_image/model_training/special/npu_training/Qwen-Image-Edit-2509-LoRA-NPU.sh similarity index 100% rename from examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-Edit-2509-LoRA-NPU.sh rename to examples/qwen_image/model_training/special/npu_training/Qwen-Image-Edit-2509-LoRA-NPU.sh diff --git a/examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-LoRA-NPU.sh b/examples/qwen_image/model_training/special/npu_training/Qwen-Image-LoRA-NPU.sh similarity index 100% rename from examples/qwen_image/model_training/special/npu_scripts/Qwen-Image-LoRA-NPU.sh rename to examples/qwen_image/model_training/special/npu_training/Qwen-Image-LoRA-NPU.sh diff --git a/examples/wanvideo/model_training/special/npu_scripts/Wan2.1-T2V-14B-NPU.sh b/examples/wanvideo/model_training/special/npu_training/Wan2.1-T2V-14B-NPU.sh similarity index 100% rename from examples/wanvideo/model_training/special/npu_scripts/Wan2.1-T2V-14B-NPU.sh rename to examples/wanvideo/model_training/special/npu_training/Wan2.1-T2V-14B-NPU.sh diff --git a/examples/wanvideo/model_training/special/npu_scripts/Wan2.2-T2V-A14B-NPU.sh b/examples/wanvideo/model_training/special/npu_training/Wan2.2-T2V-A14B-NPU.sh similarity index 100% rename from examples/wanvideo/model_training/special/npu_scripts/Wan2.2-T2V-A14B-NPU.sh rename to examples/wanvideo/model_training/special/npu_training/Wan2.2-T2V-A14B-NPU.sh diff --git a/examples/wanvideo/model_training/special/npu_scripts/Wan2.2-VACE-Fun-A14B-NPU.sh b/examples/wanvideo/model_training/special/npu_training/Wan2.2-VACE-Fun-A14B-NPU.sh similarity index 100% rename from examples/wanvideo/model_training/special/npu_scripts/Wan2.2-VACE-Fun-A14B-NPU.sh rename to examples/wanvideo/model_training/special/npu_training/Wan2.2-VACE-Fun-A14B-NPU.sh diff --git a/examples/z_image/model_training/special/npu_scripts/Z-Image-Turbo-NPU.sh b/examples/z_image/model_training/special/npu_training/Z-Image-Turbo-NPU.sh similarity index 100% rename from examples/z_image/model_training/special/npu_scripts/Z-Image-Turbo-NPU.sh rename to examples/z_image/model_training/special/npu_training/Z-Image-Turbo-NPU.sh From 3ee5f53a366c1a8321285184b0da14ec7b8b4e8c Mon Sep 17 00:00:00 2001 From: feng0w0 Date: Wed, 7 Jan 2026 11:31:22 +0800 Subject: [PATCH 6/9] [model][NPU]:Z-image model support NPU --- diffsynth/models/z_image_dit.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/diffsynth/models/z_image_dit.py b/diffsynth/models/z_image_dit.py index 7664fc5..4c5622c 100644 --- a/diffsynth/models/z_image_dit.py +++ b/diffsynth/models/z_image_dit.py @@ -8,6 +8,7 @@ from torch.nn.utils.rnn import pad_sequence from torch.nn import RMSNorm from ..core.attention import attention_forward +from ..core.device.npu_compatible_device import IS_NPU_AVAILABLE from ..core.gradient import gradient_checkpoint_forward @@ -274,7 +275,10 @@ class RopeEmbedder: result = [] for i in range(len(self.axes_dims)): index = ids[:, i] - result.append(self.freqs_cis[i][index]) + if IS_NPU_AVAILABLE: + result.append(self.freqs_cis[i][index]) + else: + result.append(torch.index_select(self.freqs_cis[i], 0, index)) return torch.cat(result, dim=-1) From c1c9a4853b2b862a3010637d16f5d9ddb241f87e Mon Sep 17 00:00:00 2001 From: feng0w0 Date: Wed, 7 Jan 2026 11:42:19 +0800 Subject: [PATCH 7/9] [model][NPU]:Z-image model support NPU --- diffsynth/models/z_image_dit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/diffsynth/models/z_image_dit.py b/diffsynth/models/z_image_dit.py index 4c5622c..d20ec51 100644 --- a/diffsynth/models/z_image_dit.py +++ b/diffsynth/models/z_image_dit.py @@ -276,9 +276,9 @@ class RopeEmbedder: for i in range(len(self.axes_dims)): index = ids[:, i] if IS_NPU_AVAILABLE: - result.append(self.freqs_cis[i][index]) - else: result.append(torch.index_select(self.freqs_cis[i], 0, index)) + else: + result.append(self.freqs_cis[i][index]) return torch.cat(result, dim=-1) From 3235393fb55b4671f658f2a9b9c831783a228ce1 Mon Sep 17 00:00:00 2001 From: Jiaqi Xu Date: Sat, 10 Jan 2026 09:28:45 +0800 Subject: [PATCH 8/9] Update package inclusion pattern in pyproject.toml Update to install all the sub-packages inside diffsynth. Otherwise, the installed packages only contain __init__.py --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 537ba77..29f2d14 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,7 @@ classifiers = [ [tool.setuptools.packages.find] where = ["./"] -include = ["diffsynth"] +include = ["diffsynth*"] [tool.setuptools] include-package-data = true From f4bf5920649d1174d83f9d5d84e25fb65f93323f Mon Sep 17 00:00:00 2001 From: Jiaqi Xu Date: Sat, 10 Jan 2026 09:32:35 +0800 Subject: [PATCH 9/9] Update pyproject.toml Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 29f2d14..6ce319d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,7 @@ classifiers = [ [tool.setuptools.packages.find] where = ["./"] -include = ["diffsynth*"] +include = ["diffsynth", "diffsynth.*"] [tool.setuptools] include-package-data = true