fix a finetune bug

This commit is contained in:
josc146 2023-11-17 22:37:21 +08:00
parent 01d3c89ea4
commit f739c61197
4 changed files with 13 additions and 6 deletions

View File

@ -49,6 +49,10 @@ fi
echo "loading $loadModel"
modelInfo=$(python3 ./finetune/get_layer_and_embd.py $loadModel)
echo $modelInfo
python3 ./finetune/lora/train.py $modelInfo $@ --proj_dir lora-models --data_type binidx --lora \
if [[ $modelInfo =~ "--n_layer" ]]; then
python3 ./finetune/lora/train.py $modelInfo $@ --proj_dir lora-models --data_type binidx --lora \
--lora_parts=att,ffn,time,ln --strategy deepspeed_stage_2 --accelerator gpu
else
echo "modelInfo is invalid"
exit 1
fi

View File

@ -268,5 +268,6 @@
"Enable WebUI": "WebUIを有効化",
"Server is working on deployment mode, please close the terminal window manually": "サーバーはデプロイモードで動作しています、ターミナルウィンドウを手動で閉じてください",
"Server is working on deployment mode, please exit the program manually to stop the server": "サーバーはデプロイモードで動作しています、サーバーを停止するにはプログラムを手動で終了してください",
"You can increase the number of stored layers in Configs page to improve performance": "パフォーマンスを向上させるために、保存されるレイヤーの数を設定ページで増やすことができます"
"You can increase the number of stored layers in Configs page to improve performance": "パフォーマンスを向上させるために、保存されるレイヤーの数を設定ページで増やすことができます",
"Failed to load model, try to increase the virtual memory (Swap of WSL) or use a smaller base model.": "モデルの読み込みに失敗しました、仮想メモリ (WSL Swap) を増やすか小さなベースモデルを使用してみてください。"
}

View File

@ -268,5 +268,6 @@
"Enable WebUI": "启用WebUI",
"Server is working on deployment mode, please close the terminal window manually": "服务器正在部署模式下运行,请手动关闭终端窗口",
"Server is working on deployment mode, please exit the program manually to stop the server": "服务器正在部署模式下运行,请手动退出程序以停止服务器",
"You can increase the number of stored layers in Configs page to improve performance": "你可以在配置页面增加载入显存层数以提升性能"
"You can increase the number of stored layers in Configs page to improve performance": "你可以在配置页面增加载入显存层数以提升性能",
"Failed to load model, try to increase the virtual memory (Swap of WSL) or use a smaller base model.": "模型载入失败,尝试增加虚拟内存(WSL Swap),或使用一个更小规模的基底模型"
}

View File

@ -133,7 +133,8 @@ const errorsMap = Object.entries({
'size mismatch for blocks': 'Size mismatch for blocks. You are attempting to continue training from the LoRA model, but it does not match the base model. Please set LoRA model to None.',
'cuda_home environment variable is not set': 'Matched CUDA is not installed',
'unsupported gpu architecture': 'Matched CUDA is not installed',
'error building extension \'fused_adam\'': 'Matched CUDA is not installed'
'error building extension \'fused_adam\'': 'Matched CUDA is not installed',
'modelinfo is invalid': 'Failed to load model, try to increase the virtual memory (Swap of WSL) or use a smaller base model.'
});
export const wslHandler = (data: string) => {