RWKV-Runner/finetune/install-wsl-dep-and-train.sh

66 lines
2.1 KiB
Bash
Raw Permalink Normal View History

2023-08-24 22:48:54 +08:00
echo $@
2023-07-03 17:41:47 +08:00
if [[ ${cnMirror} == 1 ]]; then
export PIP_INDEX_URL="https://pypi.tuna.tsinghua.edu.cn/simple"
if grep -q "mirrors.aliyun.com" /etc/apt/sources.list; then
echo "apt cnMirror already set"
else
sudo sed -i 's/http:\/\/archive.ubuntu.com\/ubuntu\//http:\/\/mirrors.aliyun.com\/ubuntu\//g' /etc/apt/sources.list
sudo apt update
fi
fi
2023-07-07 18:57:51 +08:00
if dpkg -s "gcc" >/dev/null 2>&1; then
echo "gcc installed"
else
sudo apt -y install gcc
fi
2023-07-03 17:41:47 +08:00
if dpkg -s "python3-pip" >/dev/null 2>&1; then
echo "pip installed"
else
sudo apt -y install python3-pip
2023-07-03 17:41:47 +08:00
fi
if dpkg -s "python3-dev" >/dev/null 2>&1; then
echo "python3-dev installed"
else
sudo apt -y install python3-dev
fi
2023-07-03 17:41:47 +08:00
if dpkg -s "ninja-build" >/dev/null 2>&1; then
echo "ninja installed"
else
sudo apt -y install ninja-build
2023-07-03 17:41:47 +08:00
fi
2023-07-07 18:57:51 +08:00
if dpkg -s "cuda" >/dev/null 2>&1 && dpkg -s "cuda" | grep Version | awk '{print $2}' | grep -q "12"; then
echo "cuda 12 installed"
2023-07-03 17:41:47 +08:00
else
2023-07-07 18:57:51 +08:00
wget -N https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin
2023-07-03 17:41:47 +08:00
sudo mv cuda-wsl-ubuntu.pin /etc/apt/preferences.d/cuda-repository-pin-600
2023-07-07 18:57:51 +08:00
wget -N https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda-repo-wsl-ubuntu-12-2-local_12.2.0-1_amd64.deb
sudo dpkg -i cuda-repo-wsl-ubuntu-12-2-local_12.2.0-1_amd64.deb
sudo cp /var/cuda-repo-wsl-ubuntu-12-2-local/cuda-*-keyring.gpg /usr/share/keyrings/
2023-07-03 17:41:47 +08:00
sudo apt-get update
sudo apt-get -y install cuda
fi
if python3 -c "import pkg_resources; pkg_resources.require(open('./finetune/requirements.txt',mode='r'))" &>/dev/null; then
echo "requirements satisfied"
else
python3 -m pip install -r ./finetune/requirements.txt
fi
echo "loading $loadModel"
modelInfo=$(python3 ./finetune/get_layer_and_embd.py $loadModel 5.2)
2023-07-03 17:41:47 +08:00
echo $modelInfo
2023-11-17 22:37:21 +08:00
if [[ $modelInfo =~ "--n_layer" ]]; then
2024-02-04 19:33:32 +08:00
sudo rm -rf /root/.cache/torch_extensions
python3 ./finetune/lora/$modelInfo $@ --proj_dir lora-models --data_type binidx --lora \
2024-02-04 19:33:32 +08:00
--lora_parts=att,ffn,time,ln --strategy deepspeed_stage_2 --accelerator gpu --ds_bucket_mb 2
2023-11-17 22:37:21 +08:00
else
echo "modelInfo is invalid"
exit 1
fi