mirror of
https://github.com/modelscope/DiffSynth-Studio.git
synced 2026-03-18 13:58:15 +00:00
40 lines
3.9 KiB
Bash
40 lines
3.9 KiB
Bash
modelscope download --dataset DiffSynth-Studio/diffsynth_example_dataset --include "flux2/FLUX.2-klein-base-4B/*" --local_dir ./data/diffsynth_example_dataset
|
|
|
|
accelerate launch examples/flux2/model_training/train.py \
|
|
--dataset_base_path data/diffsynth_example_dataset/flux2/FLUX.2-klein-base-4B \
|
|
--dataset_metadata_path data/diffsynth_example_dataset/flux2/FLUX.2-klein-base-4B/metadata.csv \
|
|
--max_pixels 1048576 \
|
|
--dataset_repeat 50 \
|
|
--model_id_with_origin_paths "black-forest-labs/FLUX.2-klein-4B:text_encoder/*.safetensors,black-forest-labs/FLUX.2-klein-base-4B:transformer/*.safetensors,black-forest-labs/FLUX.2-klein-4B:vae/diffusion_pytorch_model.safetensors" \
|
|
--tokenizer_path "black-forest-labs/FLUX.2-klein-4B:tokenizer/" \
|
|
--learning_rate 1e-4 \
|
|
--num_epochs 5 \
|
|
--remove_prefix_in_ckpt "pipe.dit." \
|
|
--output_path "./models/train/FLUX.2-klein-base-4B_lora" \
|
|
--lora_base_model "dit" \
|
|
--lora_target_modules "to_q,to_k,to_v,to_out.0,add_q_proj,add_k_proj,add_v_proj,to_add_out,linear_in,linear_out,to_qkv_mlp_proj,single_transformer_blocks.0.attn.to_out,single_transformer_blocks.1.attn.to_out,single_transformer_blocks.2.attn.to_out,single_transformer_blocks.3.attn.to_out,single_transformer_blocks.4.attn.to_out,single_transformer_blocks.5.attn.to_out,single_transformer_blocks.6.attn.to_out,single_transformer_blocks.7.attn.to_out,single_transformer_blocks.8.attn.to_out,single_transformer_blocks.9.attn.to_out,single_transformer_blocks.10.attn.to_out,single_transformer_blocks.11.attn.to_out,single_transformer_blocks.12.attn.to_out,single_transformer_blocks.13.attn.to_out,single_transformer_blocks.14.attn.to_out,single_transformer_blocks.15.attn.to_out,single_transformer_blocks.16.attn.to_out,single_transformer_blocks.17.attn.to_out,single_transformer_blocks.18.attn.to_out,single_transformer_blocks.19.attn.to_out" \
|
|
--lora_rank 32 \
|
|
--use_gradient_checkpointing
|
|
|
|
# Edit
|
|
|
|
# modelscope download --dataset DiffSynth-Studio/diffsynth_example_dataset --include "qwen_image/Qwen-Image-Edit-2511/*" --local_dir ./data/diffsynth_example_dataset
|
|
|
|
# accelerate launch examples/flux2/model_training/train.py \
|
|
# --dataset_base_path data/diffsynth_example_dataset/qwen_image/Qwen-Image-Edit-2511 \
|
|
# --dataset_metadata_path data/diffsynth_example_dataset/qwen_image/Qwen-Image-Edit-2511/metadata.json \
|
|
# --data_file_keys "image,edit_image" \
|
|
# --extra_inputs "edit_image" \
|
|
# --max_pixels 1048576 \
|
|
# --dataset_repeat 50 \
|
|
# --model_id_with_origin_paths "black-forest-labs/FLUX.2-klein-4B:text_encoder/*.safetensors,black-forest-labs/FLUX.2-klein-base-4B:transformer/*.safetensors,black-forest-labs/FLUX.2-klein-4B:vae/diffusion_pytorch_model.safetensors" \
|
|
# --tokenizer_path "black-forest-labs/FLUX.2-klein-4B:tokenizer/" \
|
|
# --learning_rate 1e-4 \
|
|
# --num_epochs 5 \
|
|
# --remove_prefix_in_ckpt "pipe.dit." \
|
|
# --output_path "./models/train/FLUX.2-klein-base-4B_lora" \
|
|
# --lora_base_model "dit" \
|
|
# --lora_target_modules "to_q,to_k,to_v,to_out.0,add_q_proj,add_k_proj,add_v_proj,to_add_out,linear_in,linear_out,to_qkv_mlp_proj,single_transformer_blocks.0.attn.to_out,single_transformer_blocks.1.attn.to_out,single_transformer_blocks.2.attn.to_out,single_transformer_blocks.3.attn.to_out,single_transformer_blocks.4.attn.to_out,single_transformer_blocks.5.attn.to_out,single_transformer_blocks.6.attn.to_out,single_transformer_blocks.7.attn.to_out,single_transformer_blocks.8.attn.to_out,single_transformer_blocks.9.attn.to_out,single_transformer_blocks.10.attn.to_out,single_transformer_blocks.11.attn.to_out,single_transformer_blocks.12.attn.to_out,single_transformer_blocks.13.attn.to_out,single_transformer_blocks.14.attn.to_out,single_transformer_blocks.15.attn.to_out,single_transformer_blocks.16.attn.to_out,single_transformer_blocks.17.attn.to_out,single_transformer_blocks.18.attn.to_out,single_transformer_blocks.19.attn.to_out" \
|
|
# --lora_rank 32 \
|
|
# --use_gradient_checkpointing
|