fix qwen_rope

This commit is contained in:
mi804
2025-08-18 17:31:18 +08:00
parent ad1da43476
commit d93de98a21
8 changed files with 36 additions and 54 deletions

View File

@@ -236,7 +236,6 @@ The script includes the following parameters:
* `--model_paths`: Model paths to load. In JSON format.
* `--model_id_with_origin_paths`: Model ID with original paths, e.g., Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors. Separate with commas.
* `--tokenizer_path`: Tokenizer path. Leave empty to auto-download.
* `--edit_model`: Whether to use Qwen-Image-Edit. If True, the model will be used for image editing.
* `--processor_path`: Path to the processor of Qwen-Image-Edit. Leave empty to auto-download.
* Training
* `--learning_rate`: Learning rate.

View File

@@ -236,7 +236,6 @@ Qwen-Image 系列模型训练通过统一的 [`./model_training/train.py`](./mod
* `--model_paths`: 要加载的模型路径。JSON 格式。
* `--model_id_with_origin_paths`: 带原始路径的模型 ID例如 Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors。用逗号分隔。
* `--tokenizer_path`: tokenizer 路径,留空将会自动下载。
* `--edit_model`:是否使用 Qwen-Image-Edit。若为 True则将使用该模型进行图像编辑。
* `--processor_path`Qwen-Image-Edit 的 processor 路径。留空则自动下载。
* 训练
* `--learning_rate`: 学习率。

View File

@@ -1,5 +1,4 @@
accelerate launch --config_file examples/qwen_image/model_training/full/accelerate_config_zero2offload.yaml examples/qwen_image/model_training/train.py \
--edit_model \
--dataset_base_path data/example_image_dataset \
--dataset_metadata_path data/example_image_dataset/metadata_edit.csv \
--max_pixels 1048576 \

View File

@@ -1,5 +1,4 @@
accelerate launch examples/qwen_image/model_training/train.py \
--edit_model \
--dataset_base_path data/example_image_dataset \
--dataset_metadata_path data/example_image_dataset/metadata_edit.csv \
--max_pixels 1048576 \

View File

@@ -11,7 +11,7 @@ class QwenImageTrainingModule(DiffusionTrainingModule):
def __init__(
self,
model_paths=None, model_id_with_origin_paths=None,
tokenizer_path=None, processor_path=None, edit_model=False,
tokenizer_path=None, processor_path=None,
trainable_models=None,
lora_base_model=None, lora_target_modules="", lora_rank=32, lora_checkpoint=None,
use_gradient_checkpointing=True,
@@ -28,12 +28,8 @@ class QwenImageTrainingModule(DiffusionTrainingModule):
model_id_with_origin_paths = model_id_with_origin_paths.split(",")
model_configs += [ModelConfig(model_id=i.split(":")[0], origin_file_pattern=i.split(":")[1]) for i in model_id_with_origin_paths]
if edit_model:
tokenizer_config = None
processor_config = ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/") if processor_path is None else ModelConfig(processor_path)
else:
tokenizer_config = ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/") if tokenizer_path is None else ModelConfig(tokenizer_path)
processor_config = None
tokenizer_config = ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/") if tokenizer_path is None else ModelConfig(tokenizer_path)
processor_config = ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/") if processor_path is None else ModelConfig(processor_path)
self.pipe = QwenImagePipeline.from_pretrained(torch_dtype=torch.bfloat16, device="cpu", model_configs=model_configs, tokenizer_config=tokenizer_config, processor_config=processor_config)
# Reset training scheduler (do it in each training step)
@@ -120,7 +116,6 @@ if __name__ == "__main__":
model_id_with_origin_paths=args.model_id_with_origin_paths,
tokenizer_path=args.tokenizer_path,
processor_path=args.processor_path,
edit_model=args.edit_model,
trainable_models=args.trainable_models,
lora_base_model=args.lora_base_model,
lora_target_modules=args.lora_target_modules,