Compare commits

..

No commits in common. "master" and "v1.4.0" have entirely different histories.

311 changed files with 4334 additions and 185412 deletions

4
.gitattributes vendored
View File

@ -1,11 +1,7 @@
* text=auto eol=lf
backend-python/rwkv_pip/** linguist-vendored
backend-python/wkv_cuda_utils/** linguist-vendored
backend-python/get-pip.py linguist-vendored
backend-python/convert_model.py linguist-vendored
backend-python/convert_safetensors.py linguist-vendored
backend-python/convert_pytorch_to_ggml.py linguist-vendored
backend-python/utils/midi.py linguist-vendored
build/** linguist-vendored
finetune/lora/** linguist-vendored

View File

@ -1,9 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
commit-message:
prefix: "chore"
include: "scope"

View File

@ -1,171 +0,0 @@
name: Publish Docker Image
on: [push]
concurrency:
group: ${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
docker_build:
name: Build ${{ matrix.arch }} Image
runs-on: ubuntu-latest
strategy:
matrix:
include:
- arch: amd64
name: amd64
# - arch: arm64
# name: arm64
steps:
- name: Free up disk spaces
run: |
sudo rm -rf /usr/share/dotnet || true
sudo rm -rf /opt/ghc || true
sudo rm -rf "/usr/local/share/boost" || true
sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
- name: Get lowercase string for the repository name
id: lowercase-repo-name
uses: ASzc/change-string-case-action@v2
with:
string: ${{ github.event.repository.name }}
- name: Checkout base
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ github.ref }}-${{ matrix.arch }}
restore-keys: |
${{ github.ref }}-${{ matrix.arch }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: linux/${{ matrix.arch }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Docker login
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Get commit SHA
id: vars
run: echo "::set-output name=sha_short::$(git rev-parse --short HEAD)"
- name: Build and export
id: build
if: github.ref == 'refs/heads/master'
uses: docker/build-push-action@v3
with:
push: true
platforms: linux/${{ matrix.arch }}
tags: ${{ secrets.DOCKER_USERNAME }}/${{ steps.lowercase-repo-name.outputs.lowercase }}:${{ matrix.name }}-latest
build-args: |
SHA=${{ steps.vars.outputs.sha_short }}
outputs: type=image,push=true
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Replace tag without `v`
if: startsWith(github.ref, 'refs/tags/')
uses: actions/github-script@v1
id: version
with:
script: |
return context.payload.ref.replace(/\/?refs\/tags\/v/, '')
result-encoding: string
- name: Build release and export
id: build_rel
if: startsWith(github.ref, 'refs/tags/')
uses: docker/build-push-action@v3
with:
push: true
platforms: linux/${{ matrix.arch }}
tags: ${{ secrets.DOCKER_USERNAME }}/${{ steps.lowercase-repo-name.outputs.lowercase }}:${{ matrix.name }}-${{steps.version.outputs.result}}
build-args: |
SHA=${{ steps.version.outputs.result }}
outputs: type=image,push=true
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Save digest
if: github.ref == 'refs/heads/master'
run: echo ${{ steps.build.outputs.digest }} > /tmp/digest.txt
- name: Save release digest
if: startsWith(github.ref, 'refs/tags/')
run: echo ${{ steps.build_rel.outputs.digest }} > /tmp/digest.txt
- name: Upload artifact
uses: actions/upload-artifact@v3
with:
name: digest_${{ matrix.name }}
path: /tmp/digest.txt
manifests:
name: Build manifests
needs: [docker_build]
runs-on: ubuntu-latest
steps:
- name: Get lowercase string for the repository name
id: lowercase-repo-name
uses: ASzc/change-string-case-action@v2
with:
string: ${{ github.event.repository.name }}
- name: Checkout base
uses: actions/checkout@v2
with:
fetch-depth: 0
# https://github.com/docker/setup-qemu-action
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
# https://github.com/docker/setup-buildx-action
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
config-inline: |
[worker.oci]
max-parallelism = 1
- name: Download artifact
uses: actions/download-artifact@v3
with:
path: /tmp/images/
- name: Docker login
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Replace tag without `v`
if: startsWith(github.ref, 'refs/tags/')
uses: actions/github-script@v1
id: version
with:
script: |
return context.payload.ref.replace(/\/?refs\/tags\/v/, '')
result-encoding: string
- name: Merge and push manifest on master branch
if: github.ref == 'refs/heads/master'
run: python scripts/merge_manifest.py "${{ secrets.DOCKER_USERNAME }}/${{ steps.lowercase-repo-name.outputs.lowercase }}"
- name: Merge and push manifest on release
if: startsWith(github.ref, 'refs/tags/')
run: python scripts/merge_manifest.py "${{ secrets.DOCKER_USERNAME }}/${{ steps.lowercase-repo-name.outputs.lowercase }}" ${{steps.version.outputs.result}}

View File

@ -1,114 +0,0 @@
name: pre-release
on:
workflow_dispatch:
push:
branches:
- master
paths:
- "backend-python/**"
tags-ignore:
- "v*"
jobs:
windows:
runs-on: windows-2022
steps:
- uses: actions/checkout@v4
with:
ref: master
- uses: actions/setup-go@v5
with:
go-version: "1.20.5"
- uses: actions/setup-python@v5
id: cp310
with:
python-version: "3.10"
- uses: crazy-max/ghaction-chocolatey@v3
with:
args: install upx
- run: |
Start-BitsTransfer https://github.com/josStorer/ai00_rwkv_server/releases/latest/download/webgpu_server_windows_x86_64.exe ./backend-rust/webgpu_server.exe
Start-BitsTransfer https://github.com/josStorer/web-rwkv-converter/releases/latest/download/web-rwkv-converter_windows_x86_64.exe ./backend-rust/web-rwkv-converter.exe
Start-BitsTransfer https://github.com/josStorer/LibreHardwareMonitor.Console/releases/latest/download/LibreHardwareMonitor.Console.zip ./LibreHardwareMonitor.Console.zip
Expand-Archive ./LibreHardwareMonitor.Console.zip -DestinationPath ./components/LibreHardwareMonitor.Console
Start-BitsTransfer https://www.python.org/ftp/python/3.10.11/python-3.10.11-embed-amd64.zip ./python-3.10.11-embed-amd64.zip
Expand-Archive ./python-3.10.11-embed-amd64.zip -DestinationPath ./py310
$content=Get-Content "./py310/python310._pth"; $content | ForEach-Object {if ($_.ReadCount -eq 3) {"Lib\\site-packages"} else {$_}} | Set-Content ./py310/python310._pth
./py310/python ./backend-python/get-pip.py
./py310/python -m pip install Cython==3.0.4
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../include" -Destination "py310/include" -Recurse
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../libs" -Destination "py310/libs" -Recurse
./py310/python -m pip install cyac==1.9
go install github.com/wailsapp/wails/v2/cmd/wails@v2.8.0
del ./backend-python/rwkv_pip/cpp/librwkv.dylib
del ./backend-python/rwkv_pip/cpp/librwkv.so
(Get-Content -Path ./backend-golang/app.go) -replace "//go:custom_build windows ", "" | Set-Content -Path ./backend-golang/app.go
(Get-Content -Path ./backend-golang/utils.go) -replace "//go:custom_build windows ", "" | Set-Content -Path ./backend-golang/utils.go
make
Rename-Item -Path "build/bin/RWKV-Runner.exe" -NewName "RWKV-Runner_windows_x64.exe"
- uses: actions/upload-artifact@v4
with:
name: RWKV-Runner_windows_x64.exe
path: build/bin/RWKV-Runner_windows_x64.exe
linux:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
with:
ref: master
- uses: actions/setup-go@v5
with:
go-version: "1.20.5"
- run: |
wget https://github.com/josStorer/ai00_rwkv_server/releases/latest/download/webgpu_server_linux_x86_64 -O ./backend-rust/webgpu_server
wget https://github.com/josStorer/web-rwkv-converter/releases/latest/download/web-rwkv-converter_linux_x86_64 -O ./backend-rust/web-rwkv-converter
sudo apt-get update
sudo apt-get install upx
sudo apt-get install build-essential libgtk-3-dev libwebkit2gtk-4.0-dev libasound2-dev
go install github.com/wailsapp/wails/v2/cmd/wails@v2.8.0
rm ./backend-python/rwkv_pip/wkv_cuda.pyd
rm ./backend-python/rwkv_pip/rwkv5.pyd
rm ./backend-python/rwkv_pip/rwkv6.pyd
rm ./backend-python/get-pip.py
rm ./backend-python/rwkv_pip/cpp/librwkv.dylib
rm ./backend-python/rwkv_pip/cpp/rwkv.dll
rm ./backend-python/rwkv_pip/webgpu/web_rwkv_py.cp310-win_amd64.pyd
make
mv build/bin/RWKV-Runner build/bin/RWKV-Runner_linux_x64
- uses: actions/upload-artifact@v4
with:
name: RWKV-Runner_linux_x64
path: build/bin/RWKV-Runner_linux_x64
macos:
runs-on: macos-13
steps:
- uses: actions/checkout@v4
with:
ref: master
- uses: actions/setup-go@v5
with:
go-version: "1.20.5"
- run: |
wget https://github.com/josStorer/ai00_rwkv_server/releases/latest/download/webgpu_server_darwin_aarch64 -O ./backend-rust/webgpu_server
wget https://github.com/josStorer/web-rwkv-converter/releases/latest/download/web-rwkv-converter_darwin_aarch64 -O ./backend-rust/web-rwkv-converter
go install github.com/wailsapp/wails/v2/cmd/wails@v2.8.0
rm ./backend-python/rwkv_pip/wkv_cuda.pyd
rm ./backend-python/rwkv_pip/rwkv5.pyd
rm ./backend-python/rwkv_pip/rwkv6.pyd
rm ./backend-python/get-pip.py
rm ./backend-python/rwkv_pip/cpp/rwkv.dll
rm ./backend-python/rwkv_pip/cpp/librwkv.so
rm ./backend-python/rwkv_pip/webgpu/web_rwkv_py.cp310-win_amd64.pyd
make
cp build/darwin/Readme_Install.txt build/bin/Readme_Install.txt
cp build/bin/RWKV-Runner.app/Contents/MacOS/RWKV-Runner build/bin/RWKV-Runner_darwin_universal
cd build/bin && zip -r RWKV-Runner_macos_universal.zip RWKV-Runner.app Readme_Install.txt
- uses: actions/upload-artifact@v4
with:
name: RWKV-Runner_macos_universal.zip
path: build/bin/RWKV-Runner_macos_universal.zip

View File

@ -11,14 +11,14 @@ env:
jobs:
create-draft:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
steps:
- run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: master
- uses: jossef/action-set-json-field@v2.2
- uses: jossef/action-set-json-field@v2.1
with:
file: manifest.json
field: version
@ -35,40 +35,32 @@ jobs:
gh release create ${{github.ref_name}} -d -F CURRENT_CHANGE.md -t ${{github.ref_name}}
windows:
runs-on: windows-2022
runs-on: windows-latest
needs: create-draft
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: master
- uses: actions/setup-go@v5
- uses: actions/setup-go@v4
with:
go-version: "1.20.5"
- uses: actions/setup-python@v5
go-version: '1.20.5'
- uses: actions/setup-python@v4
id: cp310
with:
python-version: "3.10"
- uses: crazy-max/ghaction-chocolatey@v3
python-version: '3.10'
- uses: crazy-max/ghaction-chocolatey@v2
with:
args: install upx
- run: |
Start-BitsTransfer https://github.com/josStorer/ai00_rwkv_server/releases/latest/download/webgpu_server_windows_x86_64.exe ./backend-rust/webgpu_server.exe
Start-BitsTransfer https://github.com/josStorer/web-rwkv-converter/releases/latest/download/web-rwkv-converter_windows_x86_64.exe ./backend-rust/web-rwkv-converter.exe
Start-BitsTransfer https://github.com/josStorer/LibreHardwareMonitor.Console/releases/latest/download/LibreHardwareMonitor.Console.zip ./LibreHardwareMonitor.Console.zip
Expand-Archive ./LibreHardwareMonitor.Console.zip -DestinationPath ./components/LibreHardwareMonitor.Console
Start-BitsTransfer https://www.python.org/ftp/python/3.10.11/python-3.10.11-embed-amd64.zip ./python-3.10.11-embed-amd64.zip
Expand-Archive ./python-3.10.11-embed-amd64.zip -DestinationPath ./py310
$content=Get-Content "./py310/python310._pth"; $content | ForEach-Object {if ($_.ReadCount -eq 3) {"Lib\\site-packages"} else {$_}} | Set-Content ./py310/python310._pth
./py310/python ./backend-python/get-pip.py
./py310/python -m pip install Cython==3.0.4
./py310/python -m pip install Cython
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../include" -Destination "py310/include" -Recurse
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../libs" -Destination "py310/libs" -Recurse
./py310/python -m pip install cyac==1.9
go install github.com/wailsapp/wails/v2/cmd/wails@v2.8.0
del ./backend-python/rwkv_pip/cpp/librwkv.dylib
del ./backend-python/rwkv_pip/cpp/librwkv.so
(Get-Content -Path ./backend-golang/app.go) -replace "//go:custom_build windows ", "" | Set-Content -Path ./backend-golang/app.go
(Get-Content -Path ./backend-golang/utils.go) -replace "//go:custom_build windows ", "" | Set-Content -Path ./backend-golang/utils.go
./py310/python -m pip install cyac
go install github.com/wailsapp/wails/v2/cmd/wails@latest
make
Rename-Item -Path "build/bin/RWKV-Runner.exe" -NewName "RWKV-Runner_windows_x64.exe"
@ -78,26 +70,22 @@ jobs:
runs-on: ubuntu-20.04
needs: create-draft
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: master
- uses: actions/setup-go@v5
- uses: actions/setup-go@v4
with:
go-version: "1.20.5"
go-version: '1.20.5'
- run: |
wget https://github.com/josStorer/ai00_rwkv_server/releases/latest/download/webgpu_server_linux_x86_64 -O ./backend-rust/webgpu_server
wget https://github.com/josStorer/web-rwkv-converter/releases/latest/download/web-rwkv-converter_linux_x86_64 -O ./backend-rust/web-rwkv-converter
sudo apt-get update
sudo apt-get install upx
sudo apt-get install build-essential libgtk-3-dev libwebkit2gtk-4.0-dev libasound2-dev
go install github.com/wailsapp/wails/v2/cmd/wails@v2.8.0
rm ./backend-python/rwkv_pip/wkv_cuda.pyd
rm ./backend-python/rwkv_pip/rwkv5.pyd
rm ./backend-python/rwkv_pip/rwkv6.pyd
sudo apt-get install build-essential libgtk-3-dev libwebkit2gtk-4.0-dev
go install github.com/wailsapp/wails/v2/cmd/wails@latest
rm -rf ./backend-python/wkv_cuda_utils
rm ./backend-python/get-pip.py
rm ./backend-python/rwkv_pip/cpp/librwkv.dylib
rm ./backend-python/rwkv_pip/cpp/rwkv.dll
rm ./backend-python/rwkv_pip/webgpu/web_rwkv_py.cp310-win_amd64.pyd
sed -i '1,2d' ./backend-golang/wsl_not_windows.go
rm ./backend-golang/wsl.go
mv ./backend-golang/wsl_not_windows.go ./backend-golang/wsl.go
make
mv build/bin/RWKV-Runner build/bin/RWKV-Runner_linux_x64
@ -107,23 +95,19 @@ jobs:
runs-on: macos-13
needs: create-draft
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: master
- uses: actions/setup-go@v5
- uses: actions/setup-go@v4
with:
go-version: "1.20.5"
go-version: '1.20.5'
- run: |
wget https://github.com/josStorer/ai00_rwkv_server/releases/latest/download/webgpu_server_darwin_aarch64 -O ./backend-rust/webgpu_server
wget https://github.com/josStorer/web-rwkv-converter/releases/latest/download/web-rwkv-converter_darwin_aarch64 -O ./backend-rust/web-rwkv-converter
go install github.com/wailsapp/wails/v2/cmd/wails@v2.8.0
rm ./backend-python/rwkv_pip/wkv_cuda.pyd
rm ./backend-python/rwkv_pip/rwkv5.pyd
rm ./backend-python/rwkv_pip/rwkv6.pyd
go install github.com/wailsapp/wails/v2/cmd/wails@latest
rm -rf ./backend-python/wkv_cuda_utils
rm ./backend-python/get-pip.py
rm ./backend-python/rwkv_pip/cpp/rwkv.dll
rm ./backend-python/rwkv_pip/cpp/librwkv.so
rm ./backend-python/rwkv_pip/webgpu/web_rwkv_py.cp310-win_amd64.pyd
sed -i '' '1,2d' ./backend-golang/wsl_not_windows.go
rm ./backend-golang/wsl.go
mv ./backend-golang/wsl_not_windows.go ./backend-golang/wsl.go
make
cp build/darwin/Readme_Install.txt build/bin/Readme_Install.txt
cp build/bin/RWKV-Runner.app/Contents/MacOS/RWKV-Runner build/bin/RWKV-Runner_darwin_universal
@ -132,8 +116,8 @@ jobs:
- run: gh release upload ${{github.ref_name}} build/bin/RWKV-Runner_macos_universal.zip build/bin/RWKV-Runner_darwin_universal
publish-release:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
needs: [ windows, linux, macos ]
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- run: gh release edit ${{github.ref_name}} --draft=false

4
.gitignore vendored
View File

@ -5,10 +5,7 @@ __pycache__
.idea
.vs
*.pth
*.st
*.safetensors
*.bin
*.mid
/config.json
/cache.json
/presets.json
@ -27,4 +24,3 @@ __pycache__
train_log.txt
finetune/json2binidx_tool/data
/wsl.state
/components

View File

@ -1,31 +1,21 @@
## v1.8.4
## Changes
- fix f05a4a, __init__.py is not embedded
## v1.8.3
### Deprecations
- rwkv-beta is deprecated
### Upgrades
- bump webgpu(python) (https://github.com/cryscan/web-rwkv-py)
- sync https://github.com/JL-er/RWKV-PEFT (LoRA)
### Improvements
- improve default LoRA fine-tune params
### Fixes
- fix #342, #345: cannot import name 'packaging' from 'pkg_resources'
- fix the huge error prompt that pops up when running in webgpu mode
- add Composition Page (RWKV-Music)
- improve RunButton prompt
- support for `stop` array api params
- improve embeddings API results
- improve python backend startup speed
- add support for MIDI RWKV
- add midi api
- add CPU-120M-Music config
- improve sse fetch
- update manifest (a lot of new models)
- update presets
- remove LoraFinetunePrecision fp32
## Install
- Windows: https://github.com/josStorer/RWKV-Runner/blob/master/build/windows/Readme_Install.txt
- MacOS: https://github.com/josStorer/RWKV-Runner/blob/master/build/darwin/Readme_Install.txt
- Linux: https://github.com/josStorer/RWKV-Runner/blob/master/build/linux/Readme_Install.txt
- Simple Deploy Example: https://github.com/josStorer/RWKV-Runner/blob/master/README.md#simple-deploy-example
- Server Deploy Examples: https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples
- Server-Deploy-Examples: https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples

View File

@ -1,55 +0,0 @@
FROM node:21-slim AS frontend
RUN echo "registry=https://registry.npmmirror.com/" > ~/.npmrc
WORKDIR /app
COPY manifest.json manifest.json
COPY frontend frontend
WORKDIR /app/frontend
RUN npm ci
RUN npm run build
FROM nvidia/cuda:11.6.1-devel-ubuntu20.04 AS runtime
ENV DEBIAN_FRONTEND=noninteractive
RUN apt update && \
apt install -yq git curl wget build-essential ninja-build aria2 jq software-properties-common
RUN add-apt-repository -y ppa:deadsnakes/ppa && \
add-apt-repository -y ppa:ubuntu-toolchain-r/test && \
apt install -y g++-11 python3.10 python3.10-distutils python3.10-dev && \
curl -sS http://mirrors.aliyun.com/pypi/get-pip.py | python3.10
RUN python3.10 -m pip install cmake
FROM runtime AS librwkv
WORKDIR /app
RUN git clone https://github.com/RWKV/rwkv.cpp.git && \
cd rwkv.cpp && \
git submodule update --init --recursive && \
mkdir -p build && \
cd build && \
cmake -G Ninja .. && \
cmake --build .
FROM runtime AS final
WORKDIR /app
COPY ./backend-python/requirements.txt ./backend-python/requirements.txt
RUN python3.10 -m pip install --quiet -r ./backend-python/requirements.txt
COPY . .
COPY --from=frontend /app/frontend/dist /app/frontend/dist
COPY --from=librwkv /app/rwkv.cpp/build/librwkv.so /app/backend-python/rwkv_pip/cpp/librwkv.so
EXPOSE 27777
CMD ["python3.10", "./backend-python/main.py", "--port", "27777", "--host", "0.0.0.0", "--webui"]

View File

@ -8,28 +8,16 @@ endif
build-windows:
@echo ---- build for windows
wails build -ldflags '-s -w -extldflags "-static"' -platform windows/amd64
upx -9 --lzma ./build/bin/RWKV-Runner.exe
wails build -upx -ldflags "-s -w" -platform windows/amd64
build-macos:
@echo ---- build for macos
wails build -ldflags '-s -w' -platform darwin/universal
wails build -ldflags "-s -w" -platform darwin/universal
build-linux:
@echo ---- build for linux
wails build -ldflags '-s -w' -platform linux/amd64
upx -9 --lzma ./build/bin/RWKV-Runner
build-web:
@echo ---- build for web
cd frontend && npm run build
wails build -upx -ldflags "-s -w" -platform linux/amd64
dev:
wails dev
dev-web:
cd frontend && npm run dev
preview:
cd frontend && npm run preview

158
README.md
View File

@ -1,5 +1,5 @@
<p align="center">
<img src="https://github.com/josStorer/RWKV-Runner/assets/13366013/65c46133-7506-4b54-b64f-fe49f188afa7">
<img src="https://github.com/josStorer/RWKV-Runner/assets/13366013/d24834b0-265d-45f5-93c0-fac1e19562af">
</p>
<h1 align="center">RWKV Runner</h1>
@ -12,7 +12,6 @@ compatible with the OpenAI API, which means that every ChatGPT client is an RWKV
[![license][license-image]][license-url]
[![release][release-image]][release-url]
[![py-version][py-version-image]][py-version-url]
English | [简体中文](README_ZH.md) | [日本語](README_JA.md)
@ -22,7 +21,7 @@ English | [简体中文](README_ZH.md) | [日本語](README_JA.md)
[![MacOS][MacOS-image]][MacOS-url]
[![Linux][Linux-image]][Linux-url]
[FAQs](https://github.com/josStorer/RWKV-Runner/wiki/FAQs) | [Preview](#Preview) | [Download][download-url] | [Simple Deploy Example](#Simple-Deploy-Example) | [Server Deploy Examples](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples) | [MIDI Hardware Input](#MIDI-Input)
[FAQs](https://github.com/josStorer/RWKV-Runner/wiki/FAQs) | [Preview](#Preview) | [Download][download-url] | [Server-Deploy-Examples](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples)
[license-image]: http://img.shields.io/badge/license-MIT-blue.svg
@ -32,10 +31,6 @@ English | [简体中文](README_ZH.md) | [日本語](README_JA.md)
[release-url]: https://github.com/josStorer/RWKV-Runner/releases/latest
[py-version-image]: https://img.shields.io/pypi/pyversions/fastapi.svg
[py-version-url]: https://github.com/josStorer/RWKV-Runner/tree/master/backend-python
[download-url]: https://github.com/josStorer/RWKV-Runner/releases
[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows
@ -52,75 +47,28 @@ English | [简体中文](README_ZH.md) | [日本語](README_JA.md)
</div>
## Tips
#### Default configs has enabled custom CUDA kernel acceleration, which is much faster and consumes much less VRAM. If you encounter possible compatibility issues, go to the Configs page and turn off `Use Custom CUDA kernel to Accelerate`.
- You can deploy [backend-python](./backend-python/) on a server and use this program as a client only. Fill in
your server address in the Settings `API URL`.
#### If Windows Defender claims this is a virus, you can try downloading [v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip) and letting it update automatically to the latest version, or add it to the trusted list (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`).
- If you are deploying and providing public services, please limit the request size through API gateway to prevent
excessive resource usage caused by submitting overly long prompts. Additionally, please restrict the upper limit of
requests' max_tokens based on your actual
situation: https://github.com/josStorer/RWKV-Runner/blob/master/backend-python/utils/rwkv.py#L567, the default is set
as le=102400, which may result in significant resource consumption for individual responses in extreme cases.
- Default configs has enabled custom CUDA kernel acceleration, which is much faster and consumes much less VRAM. If you
encounter possible compatibility issues (output garbled), go to the Configs page and turn
off `Use Custom CUDA kernel to Accelerate`, or try to upgrade your gpu driver.
- If Windows Defender claims this is a virus, you can try
downloading [v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip)
and letting it update automatically to the latest version, or add it to the trusted
list (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`).
- For different tasks, adjusting API parameters can achieve better results. For example, for translation tasks, you can
try setting Temperature to 1 and Top_P to 0.3.
#### For different tasks, adjusting API parameters can achieve better results. For example, for translation tasks, you can try setting Temperature to 1 and Top_P to 0.3.
## Features
- RWKV model management and one-click startup.
- Front-end and back-end separation, if you don't want to use the client, also allows for separately deploying the
front-end service, or the back-end inference service, or the back-end inference service with a WebUI.
[Simple Deploy Example](#Simple-Deploy-Example) | [Server Deploy Examples](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples)
- Compatible with the OpenAI API, making every ChatGPT client an RWKV client. After starting the model,
- RWKV model management and one-click startup
- Fully compatible with the OpenAI API, making every ChatGPT client an RWKV client. After starting the model,
open http://127.0.0.1:8000/docs to view more details.
- Automatic dependency installation, requiring only a lightweight executable program.
- Pre-set multi-level VRAM configs, works well on almost all computers. In Configs page, switch Strategy to WebGPU, it
can also run on AMD, Intel, and other graphics cards.
- User-friendly chat, completion, and composition interaction interface included. Also supports chat presets, attachment
uploads, MIDI hardware input, and track editing.
[Preview](#Preview) | [MIDI Hardware Input](#MIDI-Input)
- Built-in WebUI option, one-click start of Web service, sharing your hardware resources.
- Easy-to-understand and operate parameter configuration, along with various operation guidance prompts.
- Built-in model conversion tool.
- Built-in download management and remote model inspection.
- Built-in one-click LoRA Finetune. (Windows Only)
- Can also be used as an OpenAI ChatGPT, GPT-Playground, Ollama and more clients. (Fill in the API URL and API Key in
Settings page)
- Multilingual localization.
- Theme switching.
- Automatic updates.
## Simple Deploy Example
```bash
git clone https://github.com/josStorer/RWKV-Runner
# Then
cd RWKV-Runner
python ./backend-python/main.py #The backend inference service has been started, request /switch-model API to load the model, refer to the API documentation: http://127.0.0.1:8000/docs
# Or
cd RWKV-Runner/frontend
npm ci
npm run build #Compile the frontend
cd ..
python ./backend-python/webui_server.py #Start the frontend service separately
# Or
python ./backend-python/main.py --webui #Start the frontend and backend service at the same time
# Help Info
python ./backend-python/main.py -h
```
- Automatic dependency installation, requiring only a lightweight executable program
- Configs with 2G to 32G VRAM are included, works well on almost all computers
- User-friendly chat and completion interaction interface included
- Easy-to-understand and operate parameter configuration
- Built-in model conversion tool
- Built-in download management and remote model inspection
- Built-in one-click LoRA Finetune
- Can also be used as an OpenAI ChatGPT and GPT-Playground client
- Multilingual localization
- Theme switching
- Automatic updates
## API Concurrency Stress Testing
@ -183,100 +131,36 @@ for i in np.argsort(embeddings_cos_sim)[::-1]:
print(f"{embeddings_cos_sim[i]:.10f} - {values[i]}")
```
## MIDI Input
Tip: You can download https://github.com/josStorer/sgm_plus and unzip it to the program's `assets/sound-font` directory
to use it as an offline sound source. Please note that if you are compiling the program from source code, do not place
it in the source code directory.
If you don't have a MIDI keyboard, you can use virtual MIDI input software like `Virtual Midi Controller 3 LE`, along
with [loopMIDI](https://www.tobias-erichsen.de/wp-content/uploads/2020/01/loopMIDISetup_1_0_16_27.zip), to use a regular
computer keyboard as MIDI input.
### USB MIDI Connection
- USB MIDI devices are plug-and-play, and you can select your input device in the Composition page
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/13bb92c3-4504-482d-ab82-026ac6c31095)
### Mac MIDI Bluetooth Connection
- For Mac users who want to use Bluetooth input,
please install [Bluetooth MIDI Connect](https://apps.apple.com/us/app/bluetooth-midi-connect/id1108321791), then click
the tray icon to connect after launching,
afterwards, you can select your input device in the Composition page.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c079a109-1e3d-45c1-bbf5-eed85da1550e)
### Windows MIDI Bluetooth Connection
- Windows seems to have implemented Bluetooth MIDI support only for UWP (Universal Windows Platform) apps. Therefore, it
requires multiple steps to establish a connection. We need to create a local virtual MIDI device and then launch a UWP
application. Through this UWP application, we will redirect Bluetooth MIDI input to the virtual MIDI device, and then
this software will listen to the input from the virtual MIDI device.
- So, first, you need to
download [loopMIDI](https://www.tobias-erichsen.de/wp-content/uploads/2020/01/loopMIDISetup_1_0_16_27.zip)
to create a virtual MIDI device. Click the plus sign in the bottom left corner to create the device.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b75998ff-115c-4ddd-b97c-deeb5c106255)
- Next, you need to download [Bluetooth LE Explorer](https://apps.microsoft.com/detail/9N0ZTKF1QD98) to discover and
connect to Bluetooth MIDI devices. Click "Start" to search for devices, and then click "Pair" to bind the MIDI device.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c142c3ea-a973-4531-9807-4c385d640a2b)
- Finally, you need to install [MIDIberry](https://apps.microsoft.com/detail/9N39720H2M05),
This UWP application can redirect Bluetooth MIDI input to the virtual MIDI device. After launching it, double-click
your actual Bluetooth MIDI device name in the input field, and in the output field, double-click the virtual MIDI
device name we created earlier.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/5ad6a1d9-4f68-4d95-ae17-4296107d1669)
- Now, you can select the virtual MIDI device as the input in the Composition page. Bluetooth LE Explorer no longer
needs to run, and you can also close the loopMIDI window, it will run automatically in the background. Just keep
MIDIberry open.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/1c371821-c7b7-4c18-8e42-9e315efbe427)
## Related Repositories:
- RWKV-5-World: https://huggingface.co/BlinkDL/rwkv-5-world/tree/main
- RWKV-4-World: https://huggingface.co/BlinkDL/rwkv-4-world/tree/main
- RWKV-4-Raven: https://huggingface.co/BlinkDL/rwkv-4-raven/tree/main
- ChatRWKV: https://github.com/BlinkDL/ChatRWKV
- RWKV-LM: https://github.com/BlinkDL/RWKV-LM
- RWKV-LM-LoRA: https://github.com/Blealtan/RWKV-LM-LoRA
- RWKV-v5-lora: https://github.com/JL-er/RWKV-v5-lora
- MIDI-LLM-tokenizer: https://github.com/briansemrau/MIDI-LLM-tokenizer
- ai00_rwkv_server: https://github.com/cgisky1980/ai00_rwkv_server
- rwkv.cpp: https://github.com/saharNooby/rwkv.cpp
- web-rwkv-py: https://github.com/cryscan/web-rwkv-py
- web-rwkv: https://github.com/cryscan/web-rwkv
## Preview
### Homepage
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c1923ed8-22f7-48b4-a274-e215e27a8e01)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/d7f24d80-f382-428d-8b28-edf87e1549e2)
### Chat
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/80009872-528f-4932-aeb2-f724fa892e7c)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/de8d3fa7-c31f-4941-a22b-ded785427ac0)
### Completion
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/bf49de8e-3b89-4543-b1ef-7cd4b19a1836)
### Composition
Tip: You can download https://github.com/josStorer/sgm_plus and unzip it to the program's `assets/sound-font` directory
to use it as an offline sound source. Please note that if you are compiling the program from source code, do not place
it in the source code directory.
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/e8ad908d-3fd2-4e92-bcdb-96815cb836ee)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b2ce4761-9e75-477e-a182-d0255fb8ac76)
### Configuration
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/f41060dc-5517-44af-bb3f-8ef71720016d)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/48befdc6-e03c-4851-9bee-22f77ee2640e)
### Model Management
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b1581147-a6ce-4493-8010-e33c0ddeca0a)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/367fe4f8-cc12-475f-9371-3cf62cdbf293)
### Download Management

View File

@ -1,5 +1,5 @@
<p align="center">
<img src="https://github.com/josStorer/RWKV-Runner/assets/13366013/65c46133-7506-4b54-b64f-fe49f188afa7">
<img src="https://github.com/josStorer/RWKV-Runner/assets/13366013/d24834b0-265d-45f5-93c0-fac1e19562af">
</p>
<h1 align="center">RWKV Runner</h1>
@ -12,7 +12,6 @@
[![license][license-image]][license-url]
[![release][release-image]][release-url]
[![py-version][py-version-image]][py-version-url]
[English](README.md) | [简体中文](README_ZH.md) | 日本語
@ -22,7 +21,7 @@
[![MacOS][MacOS-image]][MacOS-url]
[![Linux][Linux-image]][Linux-url]
[FAQs](https://github.com/josStorer/RWKV-Runner/wiki/FAQs) | [プレビュー](#Preview) | [ダウンロード][download-url] | [シンプルなデプロイの例](#Simple-Deploy-Example) | [サーバーデプロイ例](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples) | [MIDIハードウェア入力](#MIDI-Input)
[FAQs](https://github.com/josStorer/RWKV-Runner/wiki/FAQs) | [プレビュー](#Preview) | [ダウンロード][download-url] | [サーバーデプロイ例](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples)
[license-image]: http://img.shields.io/badge/license-MIT-blue.svg
@ -32,10 +31,6 @@
[release-url]: https://github.com/josStorer/RWKV-Runner/releases/latest
[py-version-image]: https://img.shields.io/pypi/pyversions/fastapi.svg
[py-version-url]: https://github.com/josStorer/RWKV-Runner/tree/master/backend-python
[download-url]: https://github.com/josStorer/RWKV-Runner/releases
[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows
@ -52,71 +47,29 @@
</div>
## ヒント
#### デフォルトの設定はカスタム CUDA カーネルアクセラレーションを有効にしています。互換性の問題が発生する可能性がある場合は、コンフィグページに移動し、`Use Custom CUDA kernel to Accelerate` をオフにしてください。
- サーバーに [backend-python](./backend-python/)
をデプロイし、このプログラムをクライアントとして使用することができます。設定された`API URL`にサーバーアドレスを入力してください。
#### Windows Defender がこれをウイルスだと主張する場合は、[v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip) をダウンロードして最新版に自動更新させるか、信頼済みリストに追加してみてください (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`)。
- もし、あなたがデプロイし、外部に公開するサービスを提供している場合、APIゲートウェイを使用してリクエストのサイズを制限し、
長すぎるプロンプトの提出がリソースを占有しないようにしてください。さらに、実際の状況に応じて、リクエストの max_tokens
の上限を制限してくださいhttps://github.com/josStorer/RWKV-Runner/blob/master/backend-python/utils/rwkv.py#L567
、デフォルトは le=102400 ですが、極端な場合には単一の応答が大量のリソースを消費する可能性があります。
- デフォルトの設定はカスタム CUDA カーネルアクセラレーションを有効にしています。互換性の問題 (文字化けを出力する)
が発生する可能性がある場合は、コンフィグページに移動し、`Use Custom CUDA kernel to Accelerate`
をオフにしてください、あるいは、GPUドライバーをアップグレードしてみてください。
- Windows Defender
がこれをウイルスだと主張する場合は、[v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip)
をダウンロードして最新版に自動更新させるか、信頼済みリストに追加してみてください (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`)。
- 異なるタスクについては、API パラメータを調整することで、より良い結果を得ることができます。例えば、翻訳タスクの場合、Temperature
を 1 に、Top_P を 0.3 に設定してみてください。
#### 異なるタスクについては、API パラメータを調整することで、より良い結果を得ることができます。例えば、翻訳タスクの場合、Temperature を 1 に、Top_P を 0.3 に設定してみてください。
## 特徴
- RWKV モデル管理とワンクリック起動
- フロントエンドとバックエンドの分離は、クライアントを使用しない場合でも、フロントエンドサービス、またはバックエンド推論サービス、またはWebUIを備えたバックエンド推論サービスを個別に展開することを可能にします。
[シンプルなデプロイの例](#Simple-Deploy-Example) | [サーバーデプロイ例](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples)
- OpenAI API と互換性があり、すべての ChatGPT クライアントを RWKV クライアントにします。モデル起動後、
- OpenAI API と完全に互換性があり、すべての ChatGPT クライアントを RWKV クライアントにします。モデル起動後、
http://127.0.0.1:8000/docs を開いて詳細をご覧ください。
- 依存関係の自動インストールにより、軽量な実行プログラムのみを必要とします
- 事前設定された多段階のVRAM設定、ほとんどのコンピュータで動作します。配置ページで、ストラテジーをWebGPUに切り替えると、AMD、インテル、その他のグラフィックカードでも動作します
- ユーザーフレンドリーなチャット、完成、および作曲インターフェイスが含まれています。また、チャットプリセット、添付ファイルのアップロード、MIDIハードウェア入力、トラック編集もサポートしています。
[プレビュー](#Preview) | [MIDIハードウェア入力](#MIDI-Input)
- 内蔵WebUIオプション、Webサービスのワンクリック開始、ハードウェアリソースの共有
- 分かりやすく操作しやすいパラメータ設定、各種操作ガイダンスプロンプトとともに
- 2G から 32G の VRAM のコンフィグが含まれており、ほとんどのコンピュータで動作します
- ユーザーフレンドリーなチャットと完成インタラクションインターフェースを搭載
- 分かりやすく操作しやすいパラメータ設定
- 内蔵モデル変換ツール
- ダウンロード管理とリモートモデル検査機能内蔵
- 内蔵のLoRA微調整機能を搭載しています (Windowsのみ)
- このプログラムは、OpenAI ChatGPT、GPT Playground、Ollama などのクライアントとしても使用できます(設定ページで `API URL`
`API Key` を入力してください)
- 内蔵のLoRA微調整機能を搭載しています
- このプログラムは、OpenAI ChatGPTとGPT Playgroundのクライアントとしても使用できます
- 多言語ローカライズ
- テーマ切り替え
- 自動アップデート
## Simple Deploy Example
```bash
git clone https://github.com/josStorer/RWKV-Runner
# Then
cd RWKV-Runner
python ./backend-python/main.py #The backend inference service has been started, request /switch-model API to load the model, refer to the API documentation: http://127.0.0.1:8000/docs
# Or
cd RWKV-Runner/frontend
npm ci
npm run build #Compile the frontend
cd ..
python ./backend-python/webui_server.py #Start the frontend service separately
# Or
python ./backend-python/main.py --webui #Start the frontend and backend service at the same time
# Help Info
python ./backend-python/main.py -h
```
## API 同時実行ストレステスト
```bash
@ -138,8 +91,8 @@ body.json:
## 埋め込み API の例
注意: v1.4.0 では、埋め込み API の品質が向上しました。生成される結果は、以前のバージョンとは互換性がありません。
もし、embeddings API を使って知識ベースなどを生成している場合は、再生成してください。
Note: v1.4.0 has improved the quality of embeddings API. The generated results are not compatible
with previous versions. If you are using embeddings API to generate knowledge bases or similar, please regenerate.
LangChain を使用している場合は、`OpenAIEmbeddings(openai_api_base="http://127.0.0.1:8000", openai_api_key="sk-")`
を使用してください
@ -179,100 +132,36 @@ for i in np.argsort(embeddings_cos_sim)[::-1]:
print(f"{embeddings_cos_sim[i]:.10f} - {values[i]}")
```
## MIDI Input
Tip: You can download https://github.com/josStorer/sgm_plus and unzip it to the program's `assets/sound-font` directory
to use it as an offline sound source. Please note that if you are compiling the program from source code, do not place
it in the source code directory.
MIDIキーボードをお持ちでない場合、`Virtual Midi Controller 3 LE`
などの仮想MIDI入力ソフトウェアを使用することができます。[loopMIDI](https://www.tobias-erichsen.de/wp-content/uploads/2020/01/loopMIDISetup_1_0_16_27.zip)
を組み合わせて、通常のコンピュータキーボードをMIDI入力として使用できます。
### USB MIDI Connection
- USB MIDI devices are plug-and-play, and you can select your input device in the Composition page
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/13bb92c3-4504-482d-ab82-026ac6c31095)
### Mac MIDI Bluetooth Connection
- For Mac users who want to use Bluetooth input,
please install [Bluetooth MIDI Connect](https://apps.apple.com/us/app/bluetooth-midi-connect/id1108321791), then click
the tray icon to connect after launching,
afterwards, you can select your input device in the Composition page.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c079a109-1e3d-45c1-bbf5-eed85da1550e)
### Windows MIDI Bluetooth Connection
- Windows seems to have implemented Bluetooth MIDI support only for UWP (Universal Windows Platform) apps. Therefore, it
requires multiple steps to establish a connection. We need to create a local virtual MIDI device and then launch a UWP
application. Through this UWP application, we will redirect Bluetooth MIDI input to the virtual MIDI device, and then
this software will listen to the input from the virtual MIDI device.
- So, first, you need to
download [loopMIDI](https://www.tobias-erichsen.de/wp-content/uploads/2020/01/loopMIDISetup_1_0_16_27.zip)
to create a virtual MIDI device. Click the plus sign in the bottom left corner to create the device.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b75998ff-115c-4ddd-b97c-deeb5c106255)
- Next, you need to download [Bluetooth LE Explorer](https://apps.microsoft.com/detail/9N0ZTKF1QD98) to discover and
connect to Bluetooth MIDI devices. Click "Start" to search for devices, and then click "Pair" to bind the MIDI device.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c142c3ea-a973-4531-9807-4c385d640a2b)
- Finally, you need to install [MIDIberry](https://apps.microsoft.com/detail/9N39720H2M05),
This UWP application can redirect Bluetooth MIDI input to the virtual MIDI device. After launching it, double-click
your actual Bluetooth MIDI device name in the input field, and in the output field, double-click the virtual MIDI
device name we created earlier.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/5ad6a1d9-4f68-4d95-ae17-4296107d1669)
- Now, you can select the virtual MIDI device as the input in the Composition page. Bluetooth LE Explorer no longer
needs to run, and you can also close the loopMIDI window, it will run automatically in the background. Just keep
MIDIberry open.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/1c371821-c7b7-4c18-8e42-9e315efbe427)
## 関連リポジトリ:
- RWKV-5-World: https://huggingface.co/BlinkDL/rwkv-5-world/tree/main
- RWKV-4-World: https://huggingface.co/BlinkDL/rwkv-4-world/tree/main
- RWKV-4-Raven: https://huggingface.co/BlinkDL/rwkv-4-raven/tree/main
- ChatRWKV: https://github.com/BlinkDL/ChatRWKV
- RWKV-LM: https://github.com/BlinkDL/RWKV-LM
- RWKV-LM-LoRA: https://github.com/Blealtan/RWKV-LM-LoRA
- RWKV-v5-lora: https://github.com/JL-er/RWKV-v5-lora
- MIDI-LLM-tokenizer: https://github.com/briansemrau/MIDI-LLM-tokenizer
- ai00_rwkv_server: https://github.com/cgisky1980/ai00_rwkv_server
- rwkv.cpp: https://github.com/saharNooby/rwkv.cpp
- web-rwkv-py: https://github.com/cryscan/web-rwkv-py
- web-rwkv: https://github.com/cryscan/web-rwkv
## Preview
## プレビュー
### ホームページ
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c1923ed8-22f7-48b4-a274-e215e27a8e01)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/d7f24d80-f382-428d-8b28-edf87e1549e2)
### チャット
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/80009872-528f-4932-aeb2-f724fa892e7c)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/de8d3fa7-c31f-4941-a22b-ded785427ac0)
### 補完
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/bf49de8e-3b89-4543-b1ef-7cd4b19a1836)
### 作曲
Tip: You can download https://github.com/josStorer/sgm_plus and unzip it to the program's `assets/sound-font` directory
to use it as an offline sound source. Please note that if you are compiling the program from source code, do not place
it in the source code directory.
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/e8ad908d-3fd2-4e92-bcdb-96815cb836ee)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b2ce4761-9e75-477e-a182-d0255fb8ac76)
### コンフィグ
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/f41060dc-5517-44af-bb3f-8ef71720016d)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/48befdc6-e03c-4851-9bee-22f77ee2640e)
### モデル管理
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b1581147-a6ce-4493-8010-e33c0ddeca0a)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/367fe4f8-cc12-475f-9371-3cf62cdbf293)
### ダウンロード管理

View File

@ -1,5 +1,5 @@
<p align="center">
<img src="https://github.com/josStorer/RWKV-Runner/assets/13366013/65c46133-7506-4b54-b64f-fe49f188afa7">
<img src="https://github.com/josStorer/RWKV-Runner/assets/13366013/d24834b0-265d-45f5-93c0-fac1e19562af">
</p>
<h1 align="center">RWKV Runner</h1>
@ -11,7 +11,6 @@ API兼容的接口这意味着一切ChatGPT客户端都是RWKV客户端。
[![license][license-image]][license-url]
[![release][release-image]][release-url]
[![py-version][py-version-image]][py-version-url]
[English](README.md) | 简体中文 | [日本語](README_JA.md)
@ -21,7 +20,7 @@ API兼容的接口这意味着一切ChatGPT客户端都是RWKV客户端。
[![MacOS][MacOS-image]][MacOS-url]
[![Linux][Linux-image]][Linux-url]
[视频演示](https://www.bilibili.com/video/BV1hM4y1v76R) | [疑难解答](https://www.bilibili.com/read/cv23921171) | [预览](#Preview) | [下载][download-url] | [懒人包](https://pan.baidu.com/s/1zdzZ_a0uM3gDqi6pXIZVAA?pwd=1111) | [简明服务部署示例](#Simple-Deploy-Example) | [服务器部署示例](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples) | [MIDI硬件输入](#MIDI-Input)
[视频演示](https://www.bilibili.com/video/BV1hM4y1v76R) | [疑难解答](https://www.bilibili.com/read/cv23921171) | [预览](#Preview) | [下载][download-url] | [懒人包](https://pan.baidu.com/s/1zdzZ_a0uM3gDqi6pXIZVAA?pwd=1111) | [服务器部署示例](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples)
[license-image]: http://img.shields.io/badge/license-MIT-blue.svg
@ -31,10 +30,6 @@ API兼容的接口这意味着一切ChatGPT客户端都是RWKV客户端。
[release-url]: https://github.com/josStorer/RWKV-Runner/releases/latest
[py-version-image]: https://img.shields.io/pypi/pyversions/fastapi.svg
[py-version-url]: https://github.com/josStorer/RWKV-Runner/tree/master/backend-python
[download-url]: https://github.com/josStorer/RWKV-Runner/releases
[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows
@ -51,65 +46,28 @@ API兼容的接口这意味着一切ChatGPT客户端都是RWKV客户端。
</div>
## 小贴士
#### 预设配置已经开启自定义CUDA算子加速速度更快且显存消耗更少。如果你遇到可能的兼容性问题前往配置页面关闭`使用自定义CUDA算子加速`
- 你可以在服务器部署[backend-python](./backend-python/),然后将此程序仅用作客户端,在设置的`API URL`中填入你的服务器地址
#### 如果Windows Defender说这是一个病毒你可以尝试下载[v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip),然后让其自动更新到最新版,或添加信任 (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`)
- 如果你正在部署并对外提供公开服务请通过API网关限制请求大小避免过长的prompt提交占用资源。此外请根据你的实际情况限制请求的
max_tokens 上限: https://github.com/josStorer/RWKV-Runner/blob/master/backend-python/utils/rwkv.py#L567,
默认le=102400, 这可能导致极端情况下单个响应消耗大量资源
- 预设配置已经开启自定义CUDA算子加速速度更快且显存消耗更少。如果你遇到可能的兼容性(输出乱码)
问题,前往配置页面,关闭`使用自定义CUDA算子加速`,或更新你的显卡驱动
- 如果 Windows Defender
说这是一个病毒,你可以尝试下载[v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip)
然后让其自动更新到最新版,或添加信任 (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`)
- 对于不同的任务调整API参数会获得更好的效果例如对于翻译任务你可以尝试设置Temperature为1Top_P为0.3
#### 对于不同的任务调整API参数会获得更好的效果例如对于翻译任务你可以尝试设置Temperature为1Top_P为0.3
## 功能
- RWKV模型管理一键启动
- 前后端分离如果你不想使用客户端也允许单独部署前端服务或后端推理服务或具有WebUI的后端推理服务。
[简明服务部署示例](#Simple-Deploy-Example) | [服务器部署示例](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples)
- 与OpenAI API兼容一切ChatGPT客户端都是RWKV客户端。启动模型后打开 http://127.0.0.1:8000/docs 查看API文档
- 与OpenAI API完全兼容一切ChatGPT客户端都是RWKV客户端。启动模型后打开 http://127.0.0.1:8000/docs 查看详细内容
- 全自动依赖安装,你只需要一个轻巧的可执行程序
- 预设多级显存配置几乎在各种电脑上工作良好。通过配置页面切换Strategy到WebGPU还可以在AMDIntel等显卡上运行
- 自带用户友好的聊天续写作曲交互页面。支持聊天预设附件上传MIDI硬件输入及音轨编辑。
[预览](#Preview) | [MIDI硬件输入](#MIDI-Input)
- 内置WebUI选项一键启动Web服务共享硬件资源
- 易于理解和操作的参数配置,及各类操作引导提示
- 预设了2G至32G显存的配置几乎在各种电脑上工作良好
- 自带用户友好的聊天和续写交互页面
- 易于理解和操作的参数配置
- 内置模型转换工具
- 内置下载管理和远程模型检视
- 内置一键LoRA微调 (仅限Windows)
- 也可用作 OpenAI ChatGPT, GPT Playground, Ollama 等服务的客户端 (在设置内填写API URL和API Key)
- 内置一键LoRA微调
- 也可用作 OpenAI ChatGPT 和 GPT Playground 客户端
- 多语言本地化
- 主题切换
- 自动更新
## Simple Deploy Example
```bash
git clone https://github.com/josStorer/RWKV-Runner
# 然后
cd RWKV-Runner
python ./backend-python/main.py #后端推理服务已启动, 调用/switch-model载入模型, 参考API文档: http://127.0.0.1:8000/docs
# 或者
cd RWKV-Runner/frontend
npm ci
npm run build #编译前端
cd ..
python ./backend-python/webui_server.py #单独启动前端服务
# 或者
python ./backend-python/main.py --webui #同时启动前后端服务
# 帮助参数
python ./backend-python/main.py -h
```
## API并发压力测试
```bash
@ -170,90 +128,36 @@ for i in np.argsort(embeddings_cos_sim)[::-1]:
print(f"{embeddings_cos_sim[i]:.10f} - {values[i]}")
```
## MIDI Input
小贴士: 你可以下载 https://github.com/josStorer/sgm_plus, 并解压到程序的`assets/sound-font`目录, 以使用离线音源. 注意,
如果你正在从源码编译程序, 请不要将其放置在源码目录中
如果你没有MIDI键盘, 你可以使用像 `Virtual Midi Controller 3 LE` 这样的虚拟MIDI输入软件,
配合[loopMIDI](https://www.tobias-erichsen.de/wp-content/uploads/2020/01/loopMIDISetup_1_0_16_27.zip), 使用普通电脑键盘作为MIDI输入
### USB MIDI 连接
- USB MIDI设备是即插即用的, 你能够在作曲页面选择你的输入设备
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/a448c34a-56d8-46eb-8dc2-dd11e8e0c4ce)
### Mac MIDI 蓝牙连接
- 对于想要使用蓝牙输入的Mac用户,
请安装[Bluetooth MIDI Connect](https://apps.apple.com/us/app/bluetooth-midi-connect/id1108321791), 启动后点击托盘连接,
之后你可以在作曲页面选择你的输入设备
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c079a109-1e3d-45c1-bbf5-eed85da1550e)
### Windows MIDI 蓝牙连接
- Windows似乎只为UWP实现了蓝牙MIDI支持, 因此需要多个步骤进行连接, 我们需要创建一个本地的虚拟MIDI设备, 然后启动一个UWP应用,
通过此UWP应用将蓝牙MIDI输入重定向到虚拟MIDI设备, 然后本软件监听虚拟MIDI设备的输入
- 因此, 首先你需要下载[loopMIDI](https://www.tobias-erichsen.de/wp-content/uploads/2020/01/loopMIDISetup_1_0_16_27.zip),
用于创建虚拟MIDI设备, 点击左下角的加号创建设备
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b75998ff-115c-4ddd-b97c-deeb5c106255)
- 然后, 你需要下载[Bluetooth LE Explorer](https://apps.microsoft.com/detail/9N0ZTKF1QD98), 以发现并连接蓝牙MIDI设备,
点击Start搜索设备, 然后点击Pair绑定MIDI设备
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c142c3ea-a973-4531-9807-4c385d640a2b)
- 最后, 你需要安装[MIDIberry](https://apps.microsoft.com/detail/9N39720H2M05), 这个UWP应用能将MIDI蓝牙输入重定向到虚拟MIDI设备,
启动后, 在输入栏, 双击你实际的蓝牙MIDI设备名称, 在输出栏, 双击我们先前创建的虚拟MIDI设备名称
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/5ad6a1d9-4f68-4d95-ae17-4296107d1669)
- 现在, 你可以在作曲页面选择虚拟MIDI设备作为输入. Bluetooth LE Explorer不再需要运行, loopMIDI窗口也可以退出, 它会自动在后台运行,
仅保持MIDIberry打开即可
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/6460c355-884e-4b28-a2eb-8ab7a2e3a01a)
## 相关仓库:
- RWKV-5-World: https://huggingface.co/BlinkDL/rwkv-5-world/tree/main
- RWKV-4-World: https://huggingface.co/BlinkDL/rwkv-4-world/tree/main
- RWKV-4-Raven: https://huggingface.co/BlinkDL/rwkv-4-raven/tree/main
- ChatRWKV: https://github.com/BlinkDL/ChatRWKV
- RWKV-LM: https://github.com/BlinkDL/RWKV-LM
- RWKV-LM-LoRA: https://github.com/Blealtan/RWKV-LM-LoRA
- RWKV-v5-lora: https://github.com/JL-er/RWKV-v5-lora
- MIDI-LLM-tokenizer: https://github.com/briansemrau/MIDI-LLM-tokenizer
- ai00_rwkv_server: https://github.com/cgisky1980/ai00_rwkv_server
- rwkv.cpp: https://github.com/saharNooby/rwkv.cpp
- web-rwkv-py: https://github.com/cryscan/web-rwkv-py
- web-rwkv: https://github.com/cryscan/web-rwkv
## Preview
### 主页
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/3265b11a-ab19-4e19-bfea-fc687f59aaf9)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/ff2b1eef-dd3b-4cbf-98fb-b5a1ecee43e1)
### 聊天
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/9570e73b-dca2-4316-9e92-09961f3c48c4)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/162fce43-8568-4850-a6af-ab60af988da6)
### 续写
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/69f9ba7a-2fe8-4a5e-94cb-aa655aa409e2)
### 作曲
小贴士: 你可以下载 https://github.com/josStorer/sgm_plus, 并解压到程序的`assets/sound-font`目录, 以使用离线音源. 注意,
如果你正在从源码编译程序, 请不要将其放置在源码目录中
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/95b34893-80c2-4706-87f9-bc141032ed4b)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/3cb31ca8-d708-42f1-8768-1605fb0b2174)
### 配置
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/0f4d4f21-8abe-4f4d-8c4f-cd7d5607f20e)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/59460f69-b172-4c7a-86cb-573262543076)
### 模型管理
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/871f2d2a-7e41-4be7-9b32-be1b3e00dc3e)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/551121ee-1bfe-421b-a9d1-24125126ab4b)
### 下载管理

View File

@ -1,24 +1,13 @@
package backend_golang
import (
"archive/zip"
"bufio"
"bytes"
"context"
"errors"
"io"
"log"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"syscall"
"time"
"github.com/fsnotify/fsnotify"
"github.com/minio/selfupdate"
@ -30,8 +19,6 @@ type App struct {
ctx context.Context
HasConfigData bool
ConfigData map[string]any
Dev bool
proxyPort int
exDir string
cmdPrefix string
}
@ -41,63 +28,6 @@ func NewApp() *App {
return &App{}
}
func (a *App) newFetchProxy() {
go func() {
handler := func(w http.ResponseWriter, r *http.Request) {
if r.Method == "OPTIONS" {
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "*")
w.Header().Set("Access-Control-Allow-Origin", "*")
return
}
proxy := &httputil.ReverseProxy{
ModifyResponse: func(res *http.Response) error {
res.Header.Set("Access-Control-Allow-Origin", "*")
return nil
},
Director: func(req *http.Request) {
realTarget := req.Header.Get("Real-Target")
if realTarget != "" {
realTarget, err := url.PathUnescape(realTarget)
if err != nil {
log.Printf("Error decoding target URL: %v\n", err)
return
}
target, err := url.Parse(realTarget)
if err != nil {
log.Printf("Error parsing target URL: %v\n", err)
return
}
req.Header.Set("Accept", "*/*")
req.Header.Del("Origin")
req.Header.Del("Referer")
req.Header.Del("Real-Target")
req.Header.Del("Sec-Fetch-Dest")
req.Header.Del("Sec-Fetch-Mode")
req.Header.Del("Sec-Fetch-Site")
req.URL.Scheme = target.Scheme
req.URL.Host = target.Host
req.URL.Path = target.Path
req.URL.RawQuery = url.PathEscape(target.RawQuery)
log.Println("Proxying to", realTarget)
} else {
log.Println("Real-Target header is missing")
}
},
}
proxy.ServeHTTP(w, r)
}
http.HandleFunc("/", handler)
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return
}
a.proxyPort = listener.Addr().(*net.TCPAddr).Port
http.Serve(listener, nil)
}()
}
// startup is called when the app starts. The context is saved
// so we can call the runtime methods
func (a *App) OnStartup(ctx context.Context) {
@ -105,56 +35,26 @@ func (a *App) OnStartup(ctx context.Context) {
a.exDir = ""
a.cmdPrefix = ""
ex, err := os.Executable()
if err == nil {
if runtime.GOOS == "darwin" {
a.exDir = filepath.Dir(ex) + "/../../../"
a.cmdPrefix = "cd " + a.exDir + " && "
} else {
a.exDir = filepath.Dir(ex) + "/"
a.cmdPrefix = "cd " + a.exDir + " && "
}
if a.Dev {
a.exDir = ""
} else {
os.Chdir(a.exDir)
}
if runtime.GOOS == "darwin" {
ex, _ := os.Executable()
a.exDir = filepath.Dir(ex) + "/../../../"
a.cmdPrefix = "cd " + a.exDir + " && "
}
os.Chmod(a.exDir+"backend-rust/webgpu_server", 0777)
os.Chmod(a.exDir+"backend-rust/web-rwkv-converter", 0777)
os.Mkdir(a.exDir+"models", os.ModePerm)
os.Mkdir(a.exDir+"lora-models", os.ModePerm)
os.Mkdir(a.exDir+"state-models", os.ModePerm)
os.Mkdir(a.exDir+"finetune/json2binidx_tool/data", os.ModePerm)
trainLogPath := "lora-models/train_log.txt"
if !a.FileExists(trainLogPath) {
f, err := os.Create(a.exDir + trainLogPath)
if err == nil {
f.Close()
}
f, err := os.Create(a.exDir + "lora-models/train_log.txt")
if err == nil {
f.Close()
}
a.downloadLoop()
a.midiLoop()
a.watchFs()
a.monitorHardware()
a.newFetchProxy()
}
func (a *App) OnBeforeClose(ctx context.Context) bool {
if monitor != nil {
monitor.Process.Kill()
}
return false
}
func (a *App) watchFs() {
watcher, err := fsnotify.NewWatcher()
if err == nil {
watcher.Add(a.exDir + "./models")
watcher.Add(a.exDir + "./lora-models")
watcher.Add(a.exDir + "./state-models")
watcher.Add("./lora-models")
watcher.Add("./models")
go func() {
for {
select {
@ -162,7 +62,7 @@ func (a *App) watchFs() {
if !ok {
return
}
wruntime.EventsEmit(a.ctx, "fsnotify", event.Name)
wruntime.EventsEmit(ctx, "fsnotify", event.Name)
case _, ok := <-watcher.Errors:
if !ok {
return
@ -173,110 +73,19 @@ func (a *App) watchFs() {
}
}
var monitor *exec.Cmd
func (a *App) monitorHardware() {
if runtime.GOOS != "windows" {
return
}
monitor = exec.Command("./components/LibreHardwareMonitor.Console/LibreHardwareMonitor.Console.exe")
stdout, err := monitor.StdoutPipe()
if err != nil {
monitor = nil
return
}
go func() {
reader := bufio.NewReader(stdout)
for {
line, _, err := reader.ReadLine()
if err != nil {
wruntime.EventsEmit(a.ctx, "monitorerr", err.Error())
break
}
wruntime.EventsEmit(a.ctx, "monitor", string(line))
}
}()
monitor.SysProcAttr = &syscall.SysProcAttr{}
//go:custom_build windows monitor.SysProcAttr.HideWindow = true
monitor.Start()
}
type ProgressReader struct {
reader io.Reader
total int64
err error
}
func (pr *ProgressReader) Read(p []byte) (n int, err error) {
n, err = pr.reader.Read(p)
pr.err = err
pr.total += int64(n)
return
}
func (a *App) UpdateApp(url string) (broken bool, err error) {
resp, err := http.Get(url)
if err != nil {
return false, err
}
defer resp.Body.Close()
pr := &ProgressReader{reader: resp.Body}
ticker := time.NewTicker(250 * time.Millisecond)
defer ticker.Stop()
// update progress
go func() {
for {
<-ticker.C
wruntime.EventsEmit(a.ctx, "updateApp", &DownloadStatus{
Name: filepath.Base(url),
Path: "",
Url: url,
Transferred: pr.total,
Size: resp.ContentLength,
Speed: 0,
Progress: 100 * (float64(pr.total) / float64(resp.ContentLength)),
Downloading: pr.err == nil && pr.total < resp.ContentLength,
Done: pr.total == resp.ContentLength,
})
if pr.err != nil || pr.total == resp.ContentLength {
break
}
}
}()
var updateFile io.Reader = pr
// extract macos binary from zip
if strings.HasSuffix(url, ".zip") && runtime.GOOS == "darwin" {
zipBytes, err := io.ReadAll(pr)
if err != nil {
return false, err
}
archive, err := zip.NewReader(bytes.NewReader(zipBytes), int64(len(zipBytes)))
if err != nil {
return false, err
}
file, err := archive.Open("RWKV-Runner.app/Contents/MacOS/RWKV-Runner")
if err != nil {
return false, err
}
defer file.Close()
updateFile = file
}
// apply update
err = selfupdate.Apply(updateFile, selfupdate.Options{})
err = selfupdate.Apply(resp.Body, selfupdate.Options{})
if err != nil {
if rerr := selfupdate.RollbackError(err); rerr != nil {
return true, rerr
}
return false, err
}
// restart app
if runtime.GOOS == "windows" {
name, err := os.Executable()
if err != nil {
@ -304,7 +113,3 @@ func (a *App) RestartApp() error {
func (a *App) GetPlatform() string {
return runtime.GOOS
}
func (a *App) GetProxyPort() int {
return a.proxyPort
}

View File

@ -10,11 +10,7 @@ import (
)
func (a *App) DownloadFile(path string, url string) error {
absPath, err := a.GetAbsPath(path)
if err != nil {
return err
}
_, err = grab.Get(absPath, url)
_, err := grab.Get(a.exDir+path, url)
if err != nil {
return err
}
@ -37,9 +33,9 @@ type DownloadStatus struct {
var downloadList []*DownloadStatus
func existsInDownloadList(path string, url string) bool {
func existsInDownloadList(url string) bool {
for _, ds := range downloadList {
if ds.Path == path || ds.Url == url {
if ds.Url == url {
return true
}
}
@ -92,15 +88,11 @@ func (a *App) ContinueDownload(url string) {
}
func (a *App) AddToDownloadList(path string, url string) {
absPath, err := a.GetAbsPath(path)
if err != nil {
return
}
if !existsInDownloadList(absPath, url) {
if !existsInDownloadList(url) {
downloadList = append(downloadList, &DownloadStatus{
resp: nil,
Name: filepath.Base(path),
Path: absPath,
Path: a.exDir + path,
Url: url,
Downloading: false,
})

View File

@ -14,55 +14,20 @@ import (
wruntime "github.com/wailsapp/wails/v2/pkg/runtime"
)
func (a *App) GetAbsPath(path string) (string, error) {
var absPath string
var err error
if filepath.IsAbs(path) {
absPath = filepath.Clean(path)
} else {
absPath, err = filepath.Abs(filepath.Join(a.exDir, path))
if err != nil {
return "", err
}
}
absPath = strings.ReplaceAll(absPath, "/", string(os.PathSeparator))
println("GetAbsPath:", absPath)
return absPath, nil
}
func (a *App) SaveFile(path string, savedContent []byte) error {
absPath, err := a.GetAbsPath(path)
if err != nil {
return err
}
if err := os.WriteFile(absPath, savedContent, 0644); err != nil {
return err
}
return nil
}
func (a *App) SaveJson(path string, jsonData any) error {
func (a *App) SaveJson(fileName string, jsonData any) error {
text, err := json.MarshalIndent(jsonData, "", " ")
if err != nil {
return err
}
absPath, err := a.GetAbsPath(path)
if err != nil {
return err
}
if err := os.WriteFile(absPath, text, 0644); err != nil {
if err := os.WriteFile(a.exDir+fileName, text, 0644); err != nil {
return err
}
return nil
}
func (a *App) ReadJson(path string) (any, error) {
absPath, err := a.GetAbsPath(path)
if err != nil {
return nil, err
}
file, err := os.ReadFile(absPath)
func (a *App) ReadJson(fileName string) (any, error) {
file, err := os.ReadFile(a.exDir + fileName)
if err != nil {
return nil, err
}
@ -76,12 +41,8 @@ func (a *App) ReadJson(path string) (any, error) {
return data, nil
}
func (a *App) FileExists(path string) bool {
absPath, err := a.GetAbsPath(path)
if err != nil {
return false
}
_, err = os.Stat(absPath)
func (a *App) FileExists(fileName string) bool {
_, err := os.Stat(a.exDir + fileName)
return err == nil
}
@ -92,16 +53,12 @@ type FileInfo struct {
ModTime string `json:"modTime"`
}
func (a *App) ReadFileInfo(path string) (*FileInfo, error) {
absPath, err := a.GetAbsPath(path)
func (a *App) ReadFileInfo(fileName string) (FileInfo, error) {
info, err := os.Stat(a.exDir + fileName)
if err != nil {
return nil, err
return FileInfo{}, err
}
info, err := os.Stat(absPath)
if err != nil {
return nil, err
}
return &FileInfo{
return FileInfo{
Name: info.Name(),
Size: info.Size(),
IsDir: info.IsDir(),
@ -110,11 +67,7 @@ func (a *App) ReadFileInfo(path string) (*FileInfo, error) {
}
func (a *App) ListDirFiles(dirPath string) ([]FileInfo, error) {
absDirPath, err := a.GetAbsPath(dirPath)
if err != nil {
return nil, err
}
files, err := os.ReadDir(absDirPath)
files, err := os.ReadDir(a.exDir + dirPath)
if err != nil {
return nil, err
}
@ -136,11 +89,7 @@ func (a *App) ListDirFiles(dirPath string) ([]FileInfo, error) {
}
func (a *App) DeleteFile(path string) error {
absPath, err := a.GetAbsPath(path)
if err != nil {
return err
}
err = os.Remove(absPath)
err := os.Remove(a.exDir + path)
if err != nil {
return err
}
@ -148,27 +97,18 @@ func (a *App) DeleteFile(path string) error {
}
func (a *App) CopyFile(src string, dst string) error {
absSrc, err := a.GetAbsPath(src)
if err != nil {
return err
}
absDst, err := a.GetAbsPath(dst)
if err != nil {
return err
}
sourceFile, err := os.Open(absSrc)
sourceFile, err := os.Open(a.exDir + src)
if err != nil {
return err
}
defer sourceFile.Close()
err = os.MkdirAll(filepath.Dir(absDst), 0755)
err = os.MkdirAll(a.exDir+dst[:strings.LastIndex(dst, "/")], 0755)
if err != nil {
return err
}
destFile, err := os.Create(absDst)
destFile, err := os.Create(a.exDir + dst)
if err != nil {
return err
}
@ -205,22 +145,14 @@ func (a *App) OpenSaveFileDialogBytes(filterPattern string, defaultFileName stri
return path, nil
}
// Only return the path of the selected file, because communication between frontend and backend is slow. Use AssetServer Handler to read the file.
func (a *App) OpenOpenFileDialog(filterPattern string) (string, error) {
path, err := wruntime.OpenFileDialog(a.ctx, wruntime.OpenDialogOptions{
Filters: []wruntime.FileFilter{{Pattern: filterPattern}},
})
if err != nil {
return "", err
func (a *App) OpenFileFolder(path string, relative bool) error {
var absPath string
var err error
if relative {
absPath, err = filepath.Abs(a.exDir + path)
} else {
absPath, err = filepath.Abs(path)
}
if path == "" {
return "", nil
}
return path, nil
}
func (a *App) OpenFileFolder(path string) error {
absPath, err := a.GetAbsPath(path)
if err != nil {
return err
}
@ -249,12 +181,3 @@ func (a *App) OpenFileFolder(path string) error {
}
return errors.New("unsupported OS")
}
func (a *App) StartFile(path string) error {
cmd, err := CmdHelper(true, path)
if err != nil {
return err
}
err = cmd.Start()
return err
}

View File

@ -1,170 +0,0 @@
package backend_golang
import (
"errors"
"fmt"
"time"
"github.com/mattrtaylor/go-rtmidi"
"github.com/wailsapp/wails/v2/pkg/runtime"
)
type Port struct {
Name string `json:"name"`
}
type MIDIMessage struct {
MessageType string `json:"messageType"`
Channel int `json:"channel"`
Note int `json:"note"`
Velocity int `json:"velocity"`
Control int `json:"control"`
Value int `json:"value"`
}
var ports []Port
var input rtmidi.MIDIIn
var out rtmidi.MIDIOut
var activeIndex int = -1
var lastNoteTime time.Time
func (a *App) midiLoop() {
var err error
input, err = rtmidi.NewMIDIInDefault()
if err != nil {
runtime.EventsEmit(a.ctx, "midiError", err.Error())
return
}
out, err = rtmidi.NewMIDIOutDefault()
if err != nil {
runtime.EventsEmit(a.ctx, "midiError", err.Error())
}
err = out.OpenPort(0, "")
if err != nil {
runtime.EventsEmit(a.ctx, "midiError", err.Error())
}
ticker := time.NewTicker(500 * time.Millisecond)
go func() {
for {
<-ticker.C
count, err := input.PortCount()
if err != nil {
continue
}
ports = make([]Port, count)
for i := 0; i < count; i++ {
name, err := input.PortName(i)
if err == nil {
ports[i].Name = name
}
}
runtime.EventsEmit(a.ctx, "midiPorts", &ports)
}
}()
}
func (a *App) OpenMidiPort(index int) error {
if input == nil {
return errors.New("failed to initialize MIDI input")
}
if activeIndex == index {
return nil
}
input.Destroy()
var err error
input, err = rtmidi.NewMIDIInDefault()
if err != nil {
return err
}
err = input.SetCallback(func(msg rtmidi.MIDIIn, bytes []byte, t float64) {
// https://www.midi.org/specifications-old/item/table-1-summary-of-midi-message
// https://www.rfc-editor.org/rfc/rfc6295.html
//
// msgType channel
// 1001 0000
//
msgType := bytes[0] >> 4
channel := bytes[0] & 0x0f
switch msgType {
case 0x8:
elapsed := time.Since(lastNoteTime)
lastNoteTime = time.Now()
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
MessageType: "ElapsedTime",
Value: int(elapsed.Milliseconds()),
})
note := bytes[1]
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
MessageType: "NoteOff",
Channel: int(channel),
Note: int(note),
})
case 0x9:
elapsed := time.Since(lastNoteTime)
lastNoteTime = time.Now()
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
MessageType: "ElapsedTime",
Value: int(elapsed.Milliseconds()),
})
note := bytes[1]
velocity := bytes[2]
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
MessageType: "NoteOn",
Channel: int(channel),
Note: int(note),
Velocity: int(velocity),
})
case 0xb:
// control 12 => K1 knob, control 13 => K2 knob
control := bytes[1]
value := bytes[2]
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
MessageType: "ControlChange",
Channel: int(channel),
Control: int(control),
Value: int(value),
})
default:
fmt.Printf("Unknown midi message: %v\n", bytes)
}
})
if err != nil {
return err
}
err = input.OpenPort(index, "")
if err != nil {
return err
}
activeIndex = index
lastNoteTime = time.Now()
return nil
}
func (a *App) CloseMidiPort() error {
if input == nil {
return errors.New("failed to initialize MIDI input")
}
if activeIndex == -1 {
return nil
}
activeIndex = -1
input.Destroy()
var err error
input, err = rtmidi.NewMIDIInDefault()
if err != nil {
return err
}
return nil
}
func (a *App) PlayNote(msg MIDIMessage) error {
if out == nil {
return errors.New("failed to initialize MIDI output")
}
channelByte := byte(msg.Channel)
if msg.MessageType == "NoteOn" {
out.SendMessage([]byte{0x90 | channelByte, byte(msg.Note), byte(msg.Velocity)})
} else if msg.MessageType == "NoteOff" {
out.SendMessage([]byte{0x80 | channelByte, byte(msg.Note), byte(msg.Velocity)})
}
return nil
}

View File

@ -1,4 +1,3 @@
// Considering some whitespace and multilingual support, the functions in rwkv.go should always be executed with cwd as RWKV-Runner, and never use a.GetAbsPath() here.
package backend_golang
import (
@ -11,126 +10,30 @@ import (
"strings"
)
func (a *App) StartServer(python string, port int, host string, webui bool, rwkvBeta bool, rwkvcpp bool, webgpu bool) (string, error) {
execFile := "./backend-python/main.py"
_, err := os.Stat(execFile)
if err != nil {
return "", err
}
func (a *App) StartServer(python string, port int, host string) (string, error) {
var err error
if python == "" {
python, err = GetPython()
}
if err != nil {
return "", err
}
args := []string{python, execFile}
if webui {
args = append(args, "--webui")
}
if rwkvBeta {
// args = append(args, "--rwkv-beta")
}
if rwkvcpp {
args = append(args, "--rwkv.cpp")
}
if webgpu {
args = append(args, "--webgpu")
}
args = append(args, "--port", strconv.Itoa(port), "--host", host)
return Cmd(args...)
}
func (a *App) StartWebGPUServer(port int, host string) (string, error) {
var execFile string
execFiles := []string{"./backend-rust/webgpu_server", "./backend-rust/webgpu_server.exe"}
for _, file := range execFiles {
_, err := os.Stat(file)
if err == nil {
execFile = file
break
}
}
if execFile == "" {
return "", errors.New(execFiles[0] + " not found")
}
args := []string{execFile}
args = append(args, "--port", strconv.Itoa(port), "--ip", host)
return Cmd(args...)
return Cmd(python, "./backend-python/main.py", strconv.Itoa(port), host)
}
func (a *App) ConvertModel(python string, modelPath string, strategy string, outPath string) (string, error) {
execFile := "./backend-python/convert_model.py"
_, err := os.Stat(execFile)
if err != nil {
return "", err
}
var err error
if python == "" {
python, err = GetPython()
}
if err != nil {
return "", err
}
return Cmd(python, execFile, "--in", modelPath, "--out", outPath, "--strategy", strategy)
}
func (a *App) ConvertSafetensors(modelPath string, outPath string) (string, error) {
var execFile string
execFiles := []string{"./backend-rust/web-rwkv-converter", "./backend-rust/web-rwkv-converter.exe"}
for _, file := range execFiles {
_, err := os.Stat(file)
if err == nil {
execFile = file
break
}
}
if execFile == "" {
return "", errors.New(execFiles[0] + " not found")
}
args := []string{execFile}
args = append(args, "--input", modelPath, "--output", outPath)
return Cmd(args...)
}
func (a *App) ConvertSafetensorsWithPython(python string, modelPath string, outPath string) (string, error) {
execFile := "./backend-python/convert_safetensors.py"
_, err := os.Stat(execFile)
if err != nil {
return "", err
}
if python == "" {
python, err = GetPython()
}
if err != nil {
return "", err
}
return Cmd(python, execFile, "--input", modelPath, "--output", outPath)
}
func (a *App) ConvertGGML(python string, modelPath string, outPath string, Q51 bool) (string, error) {
execFile := "./backend-python/convert_pytorch_to_ggml.py"
_, err := os.Stat(execFile)
if err != nil {
return "", err
}
if python == "" {
python, err = GetPython()
}
if err != nil {
return "", err
}
dataType := "FP16"
if Q51 {
dataType = "Q5_1"
}
return Cmd(python, execFile, modelPath, outPath, dataType)
return Cmd(python, "./backend-python/convert_model.py", "--in", modelPath, "--out", outPath, "--strategy", strategy)
}
func (a *App) ConvertData(python string, input string, outputPrefix string, vocab string) (string, error) {
execFile := "./finetune/json2binidx_tool/tools/preprocess_data.py"
_, err := os.Stat(execFile)
if err != nil {
return "", err
}
var err error
if python == "" {
python, err = GetPython()
}
@ -174,23 +77,19 @@ func (a *App) ConvertData(python string, input string, outputPrefix string, voca
return "", err
}
return Cmd(python, execFile, "--input", input, "--output-prefix", outputPrefix, "--vocab", vocab,
return Cmd(python, "./finetune/json2binidx_tool/tools/preprocess_data.py", "--input", input, "--output-prefix", outputPrefix, "--vocab", vocab,
"--tokenizer-type", tokenizerType, "--dataset-impl", "mmap", "--append-eod")
}
func (a *App) MergeLora(python string, useGpu bool, loraAlpha int, baseModel string, loraPath string, outputPath string) (string, error) {
execFile := "./finetune/lora/merge_lora.py"
_, err := os.Stat(execFile)
if err != nil {
return "", err
}
var err error
if python == "" {
python, err = GetPython()
}
if err != nil {
return "", err
}
args := []string{python, execFile}
args := []string{python, "./finetune/lora/merge_lora.py"}
if useGpu {
args = append(args, "--use-gpu")
}
@ -206,21 +105,17 @@ func (a *App) DepCheck(python string) error {
if err != nil {
return err
}
out, err := exec.Command(python, a.exDir+"backend-python/dep_check.py").CombinedOutput()
out, err := exec.Command(python, a.exDir+"./backend-python/dep_check.py").CombinedOutput()
if err != nil {
return errors.New("DepCheck Error: " + string(out) + " GError: " + err.Error())
return errors.New("DepCheck Error: " + string(out))
}
return nil
}
func (a *App) InstallPyDep(python string, cnMirror bool) (string, error) {
var err error
torchWhlUrl := "torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cu117"
if python == "" {
python, err = GetPython()
if cnMirror && python == "py310/python.exe" {
torchWhlUrl = "https://mirrors.aliyun.com/pytorch-wheels/cu117/torch-1.13.1+cu117-cp310-cp310-win_amd64.whl"
}
if runtime.GOOS == "windows" {
python = `"%CD%/` + python + `"`
}
@ -231,14 +126,15 @@ func (a *App) InstallPyDep(python string, cnMirror bool) (string, error) {
if runtime.GOOS == "windows" {
ChangeFileLine("./py310/python310._pth", 3, "Lib\\site-packages")
installScript := python + " ./backend-python/get-pip.py -i https://mirrors.aliyun.com/pypi/simple --no-warn-script-location\n" +
python + " -m pip install " + torchWhlUrl + " --no-warn-script-location\n" +
python + " -m pip install -r ./backend-python/requirements.txt -i https://mirrors.aliyun.com/pypi/simple --no-warn-script-location\n" +
installScript := python + " ./backend-python/get-pip.py -i https://pypi.tuna.tsinghua.edu.cn/simple\n" +
python + " -m pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cu117\n" +
python + " -m pip install -r ./backend-python/requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple\n" +
"exit"
if !cnMirror {
installScript = strings.Replace(installScript, " -i https://mirrors.aliyun.com/pypi/simple", "", -1)
installScript = strings.Replace(installScript, " -i https://pypi.tuna.tsinghua.edu.cn/simple", "", -1)
installScript = strings.Replace(installScript, "requirements.txt", "requirements_versions.txt", -1)
}
err = os.WriteFile(a.exDir+"install-py-dep.bat", []byte(installScript), 0644)
err = os.WriteFile("./install-py-dep.bat", []byte(installScript), 0644)
if err != nil {
return "", err
}
@ -246,7 +142,7 @@ func (a *App) InstallPyDep(python string, cnMirror bool) (string, error) {
}
if cnMirror {
return Cmd(python, "-m", "pip", "install", "-r", "./backend-python/requirements_without_cyac.txt", "-i", "https://mirrors.aliyun.com/pypi/simple")
return Cmd(python, "-m", "pip", "install", "-r", "./backend-python/requirements_without_cyac.txt", "-i", "https://pypi.tuna.tsinghua.edu.cn/simple")
} else {
return Cmd(python, "-m", "pip", "install", "-r", "./backend-python/requirements_without_cyac.txt")
}

View File

@ -3,68 +3,42 @@ package backend_golang
import (
"archive/zip"
"bufio"
"crypto/sha256"
"embed"
"errors"
"fmt"
"io"
"io/fs"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
)
func CmdHelper(hideWindow bool, args ...string) (*exec.Cmd, error) {
if runtime.GOOS != "windows" {
return nil, errors.New("unsupported OS")
}
ex, err := os.Executable()
if err != nil {
return nil, err
}
exDir := filepath.Dir(ex) + "/"
path := exDir + "cmd-helper.bat"
_, err = os.Stat(path)
if err != nil {
if err := os.WriteFile(path, []byte("start %*"), 0644); err != nil {
return nil, err
}
}
cmdHelper, err := filepath.Abs(path)
if err != nil {
return nil, err
}
if strings.Contains(cmdHelper, " ") {
for _, arg := range args {
if strings.Contains(arg, " ") {
return nil, errors.New("path contains space") // golang bug https://github.com/golang/go/issues/17149#issuecomment-473976818
}
}
}
cmd := exec.Command(cmdHelper, args...)
cmd.SysProcAttr = &syscall.SysProcAttr{}
//go:custom_build windows cmd.SysProcAttr.HideWindow = hideWindow
return cmd, nil
}
func Cmd(args ...string) (string, error) {
switch platform := runtime.GOOS; platform {
case "windows":
cmd, err := CmdHelper(true, args...)
if err := os.WriteFile("./cmd-helper.bat", []byte("start %*"), 0644); err != nil {
return "", err
}
cmdHelper, err := filepath.Abs("./cmd-helper")
if err != nil {
return "", err
}
_, err = cmd.CombinedOutput()
if strings.Contains(cmdHelper, " ") {
for _, arg := range args {
if strings.Contains(arg, " ") {
return "", errors.New("path contains space") // golang bug https://github.com/golang/go/issues/17149#issuecomment-473976818
}
}
}
cmd := exec.Command(cmdHelper, args...)
out, err := cmd.CombinedOutput()
if err != nil {
return "", err
}
return "", nil
return string(out), nil
case "darwin":
ex, err := os.Executable()
if err != nil {
@ -91,18 +65,16 @@ func Cmd(args ...string) (string, error) {
}
func CopyEmbed(efs embed.FS) error {
ex, err := os.Executable()
if err != nil {
return err
}
var prefix string
prefix := ""
if runtime.GOOS == "darwin" {
ex, err := os.Executable()
if err != nil {
return err
}
prefix = filepath.Dir(ex) + "/../../../"
} else {
prefix = filepath.Dir(ex) + "/"
}
err = fs.WalkDir(efs, ".", func(path string, d fs.DirEntry, err error) error {
err := fs.WalkDir(efs, ".", func(path string, d fs.DirEntry, err error) error {
if d.IsDir() {
return nil
}
@ -120,19 +92,9 @@ func CopyEmbed(efs embed.FS) error {
return err
}
executeWrite := true
existedContent, err := os.ReadFile(path)
if err == nil {
if fmt.Sprintf("%x", sha256.Sum256(existedContent)) == fmt.Sprintf("%x", sha256.Sum256(content)) {
executeWrite = false
}
}
if executeWrite {
err = os.WriteFile(path, content, 0644)
if err != nil {
return err
}
err = os.WriteFile(path, content, 0644)
if err != nil {
return err
}
return nil
@ -143,19 +105,13 @@ func CopyEmbed(efs embed.FS) error {
func GetPython() (string, error) {
switch platform := runtime.GOOS; platform {
case "windows":
ex, err := os.Executable()
_, err := os.Stat("py310/python.exe")
if err != nil {
return "", err
}
exDir := filepath.Dir(ex) + "/"
pyexe := exDir + "py310/python.exe"
_, err = os.Stat(pyexe)
if err != nil {
_, err := os.Stat(exDir + "python-3.10.11-embed-amd64.zip")
_, err := os.Stat("python-3.10.11-embed-amd64.zip")
if err != nil {
return "", errors.New("python zip not found")
} else {
err := Unzip(exDir+"python-3.10.11-embed-amd64.zip", exDir+"py310")
err := Unzip("python-3.10.11-embed-amd64.zip", "py310")
if err != nil {
return "", errors.New("failed to unzip python")
} else {
@ -249,12 +205,3 @@ func Unzip(source, destination string) error {
}
return nil
}
func (a *App) IsPortAvailable(port int) bool {
l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%s", strconv.Itoa(port)))
if err != nil {
return false
}
defer l.Close()
return true
}

View File

@ -9,6 +9,7 @@ import (
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
@ -132,20 +133,26 @@ func (a *App) WslStop() error {
}
func (a *App) WslIsEnabled() error {
data, err := os.ReadFile(a.exDir + "wsl.state")
ex, err := os.Executable()
if err != nil {
return err
}
exDir := filepath.Dir(ex)
data, err := os.ReadFile(exDir + "/wsl.state")
if err == nil {
if strings.Contains(string(data), "Enabled") {
return nil
}
}
cmd := `-Command (Get-WindowsOptionalFeature -Online -FeatureName VirtualMachinePlatform).State | Out-File -Encoding utf8 -FilePath ` + a.exDir + "wsl.state"
_, err = su.ShellExecute(su.RUNAS, "powershell", cmd, a.exDir)
cmd := `-Command (Get-WindowsOptionalFeature -Online -FeatureName Microsoft-Windows-Subsystem-Linux).State | Out-File -Encoding utf8 -FilePath ` + exDir + "/wsl.state"
_, err = su.ShellExecute(su.RUNAS, "powershell", cmd, exDir)
if err != nil {
return err
}
time.Sleep(2 * time.Second)
data, err = os.ReadFile(a.exDir + "wsl.state")
data, err = os.ReadFile(exDir + "/wsl.state")
if err != nil {
return err
}
@ -157,13 +164,13 @@ func (a *App) WslIsEnabled() error {
}
func (a *App) WslEnable(forceMode bool) error {
cmd := `/online /enable-feature /featurename:VirtualMachinePlatform`
cmd := `/online /enable-feature /featurename:Microsoft-Windows-Subsystem-Linux`
_, err := su.ShellExecute(su.RUNAS, "dism", cmd, `C:\`)
if err != nil {
return err
}
if forceMode {
os.WriteFile(a.exDir+"wsl.state", []byte("Enabled"), 0644)
os.WriteFile("./wsl.state", []byte("Enabled"), 0644)
}
return nil
}

View File

@ -231,6 +231,5 @@ try:
convert_and_save_and_exit=args.out,
)
except Exception as e:
print(e)
with open("error.txt", "w") as f:
f.write(str(e))

View File

@ -1,169 +0,0 @@
# Converts an RWKV model checkpoint in PyTorch format to an rwkv.cpp compatible file.
# Usage: python convert_pytorch_to_ggml.py C:\RWKV-4-Pile-169M-20220807-8023.pth C:\rwkv.cpp-169M-FP16.bin FP16
# Get model checkpoints from https://huggingface.co/BlinkDL
# See FILE_FORMAT.md for the documentation on the file format.
import argparse
import struct
import torch
from typing import Dict
def parse_args():
parser = argparse.ArgumentParser(
description="Convert an RWKV model checkpoint in PyTorch format to an rwkv.cpp compatible file"
)
parser.add_argument("src_path", help="Path to PyTorch checkpoint file")
parser.add_argument(
"dest_path", help="Path to rwkv.cpp checkpoint file, will be overwritten"
)
parser.add_argument(
"data_type",
help="Data type, FP16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0",
type=str,
choices=[
"FP16",
"Q4_0",
"Q4_1",
"Q5_0",
"Q5_1",
"Q8_0",
],
default="FP16",
)
return parser.parse_args()
def get_layer_count(state_dict: Dict[str, torch.Tensor]) -> int:
n_layer: int = 0
while f"blocks.{n_layer}.ln1.weight" in state_dict:
n_layer += 1
assert n_layer > 0
return n_layer
def write_state_dict(
state_dict: Dict[str, torch.Tensor], dest_path: str, data_type: str
) -> None:
emb_weight: torch.Tensor = state_dict["emb.weight"]
n_layer: int = get_layer_count(state_dict)
n_vocab: int = emb_weight.shape[0]
n_embed: int = emb_weight.shape[1]
is_v5_1_or_2: bool = "blocks.0.att.ln_x.weight" in state_dict
is_v5_2: bool = "blocks.0.att.gate.weight" in state_dict
if is_v5_2:
print("Detected RWKV v5.2")
elif is_v5_1_or_2:
print("Detected RWKV v5.1")
else:
print("Detected RWKV v4")
with open(dest_path, "wb") as out_file:
is_FP16: bool = data_type == "FP16" or data_type == "float16"
out_file.write(
struct.pack(
# Disable padding with '='
"=iiiiii",
# Magic: 'ggmf' in hex
0x67676D66,
101,
n_vocab,
n_embed,
n_layer,
1 if is_FP16 else 0,
)
)
for k in state_dict.keys():
tensor: torch.Tensor = state_dict[k].float()
if ".time_" in k:
tensor = tensor.squeeze()
if is_v5_1_or_2:
if ".time_decay" in k:
if is_v5_2:
tensor = torch.exp(-torch.exp(tensor)).unsqueeze(-1)
else:
tensor = torch.exp(-torch.exp(tensor)).reshape(-1, 1, 1)
if ".time_first" in k:
tensor = torch.exp(tensor).reshape(-1, 1, 1)
if ".time_faaaa" in k:
tensor = tensor.unsqueeze(-1)
else:
if ".time_decay" in k:
tensor = -torch.exp(tensor)
# Keep 1-dim vectors and small matrices in FP32
if is_FP16 and len(tensor.shape) > 1 and ".time_" not in k:
tensor = tensor.half()
shape = tensor.shape
print(f"Writing {k}, shape {shape}, type {tensor.dtype}")
k_encoded: bytes = k.encode("utf-8")
out_file.write(
struct.pack(
"=iii",
len(shape),
len(k_encoded),
1 if tensor.dtype == torch.float16 else 0,
)
)
# Dimension order is reversed here:
# * PyTorch shape is (x rows, y columns)
# * ggml shape is (y elements in a row, x elements in a column)
# Both shapes represent the same tensor.
for dim in reversed(tensor.shape):
out_file.write(struct.pack("=i", dim))
out_file.write(k_encoded)
tensor.numpy().tofile(out_file)
def main() -> None:
args = parse_args()
print(f"Reading {args.src_path}")
state_dict: Dict[str, torch.Tensor] = torch.load(args.src_path, map_location="cpu")
temp_output: str = args.dest_path
if args.data_type.startswith("Q"):
import re
temp_output = re.sub(r"Q[4,5,8]_[0,1]", "fp16", temp_output)
write_state_dict(state_dict, temp_output, "FP16")
if args.data_type.startswith("Q"):
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from rwkv_pip.cpp import rwkv_cpp_shared_library
library = rwkv_cpp_shared_library.load_rwkv_shared_library()
library.rwkv_quantize_model_file(temp_output, args.dest_path, args.data_type)
print("Done")
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
with open("error.txt", "w") as f:
f.write(str(e))

View File

@ -1,113 +0,0 @@
import collections
import numpy
import os
import torch
from safetensors.torch import serialize_file, load_file
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, help="Path to input pth model")
parser.add_argument(
"--output",
type=str,
default="./converted.st",
help="Path to output safetensors model",
)
args = parser.parse_args()
def rename_key(rename, name):
for k, v in rename.items():
if k in name:
name = name.replace(k, v)
return name
def convert_file(pt_filename: str, sf_filename: str, rename={}, transpose_names=[]):
loaded: collections.OrderedDict = torch.load(pt_filename, map_location="cpu")
if "state_dict" in loaded:
loaded = loaded["state_dict"]
kk = list(loaded.keys())
version = 4
for x in kk:
if "ln_x" in x:
version = max(5, version)
if "gate.weight" in x:
version = max(5.1, version)
if int(version) == 5 and "att.time_decay" in x:
if len(loaded[x].shape) > 1:
if loaded[x].shape[1] > 1:
version = max(5.2, version)
if "time_maa" in x:
version = max(6, version)
print(f"Model detected: v{version:.1f}")
if version == 5.1:
_, n_emb = loaded["emb.weight"].shape
for k in kk:
if "time_decay" in k or "time_faaaa" in k:
# print(k, mm[k].shape)
loaded[k] = (
loaded[k].unsqueeze(1).repeat(1, n_emb // loaded[k].shape[0])
)
with torch.no_grad():
for k in kk:
new_k = rename_key(rename, k).lower()
v = loaded[k].half()
del loaded[k]
for transpose_name in transpose_names:
if transpose_name in new_k:
dims = len(v.shape)
v = v.transpose(dims - 2, dims - 1)
print(f"{new_k}\t{v.shape}\t{v.dtype}")
loaded[new_k] = {
"dtype": str(v.dtype).split(".")[-1],
"shape": v.shape,
"data": v.numpy().tobytes(),
}
dirname = os.path.dirname(sf_filename)
os.makedirs(dirname, exist_ok=True)
serialize_file(loaded, sf_filename, metadata={"format": "pt"})
# reloaded = load_file(sf_filename)
# for k in loaded:
# pt_tensor = torch.Tensor(
# numpy.frombuffer(
# bytearray(loaded[k]["data"]),
# dtype=getattr(numpy, loaded[k]["dtype"]),
# ).reshape(loaded[k]["shape"])
# )
# sf_tensor = reloaded[k]
# if not torch.equal(pt_tensor, sf_tensor):
# raise RuntimeError(f"The output tensors do not match for key {k}")
if __name__ == "__main__":
try:
convert_file(
args.input,
args.output,
rename={
"time_faaaa": "time_first",
"time_maa": "time_mix",
"lora_A": "lora.0",
"lora_B": "lora.1",
},
transpose_names=[
"time_mix_w1",
"time_mix_w2",
"time_decay_w1",
"time_decay_w2",
"time_state",
"lora.0",
],
)
print(f"Saved to {args.output}")
except Exception as e:
print(e)
with open("error.txt", "w") as f:
f.write(str(e))

View File

@ -1,21 +1,13 @@
import setuptools
if setuptools.__version__ >= "70.0.0":
raise ImportError("setuptools>=70.0.0 is not supported")
import multipart
import fitz
import safetensors
import midi2audio
import mido
import lm_dataformat
import ftfy
import tqdm
import tiktoken
import GPUtil
import torch
import rwkv
import langchain
import numpy
import tokenizers
import fastapi

View File

@ -1,11 +1,8 @@
from enum import Enum, auto
Args = "args"
Model = "model"
Model_Status = "model_status"
Model_Config = "model_config"
Deploy_Mode = "deploy_mode"
Midi_Vocab_Config_Type = "midi_vocab_config_type"
class ModelStatus(Enum):
@ -14,17 +11,10 @@ class ModelStatus(Enum):
Working = 3
class MidiVocabConfig(Enum):
Default = auto()
Piano = auto()
def init():
global GLOBALS
GLOBALS = {}
set(Model_Status, ModelStatus.Offline)
set(Deploy_Mode, False)
set(Midi_Vocab_Config_Type, MidiVocabConfig.Default)
def set(key, value):

View File

@ -1,59 +1,10 @@
import time
start_time = time.time()
import argparse
from typing import Union, Sequence
def get_args(args: Union[Sequence[str], None] = None):
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title="server arguments")
group.add_argument(
"--port",
type=int,
default=8000,
help="port to run the server on (default: 8000)",
)
group.add_argument(
"--host",
type=str,
default="127.0.0.1",
help="host to run the server on (default: 127.0.0.1)",
)
group = parser.add_argument_group(title="mode arguments")
group.add_argument(
"--webui",
action="store_true",
help="whether to enable WebUI (default: False)",
)
group.add_argument(
"--rwkv.cpp",
action="store_true",
help="whether to use rwkv.cpp (default: False)",
)
group.add_argument(
"--webgpu",
action="store_true",
help="whether to use webgpu (default: False)",
)
args = parser.parse_args(args)
return args
if __name__ == "__main__":
args = get_args()
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import psutil
from contextlib import asynccontextmanager
from fastapi import Depends, FastAPI, status
from fastapi import Depends, FastAPI
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
@ -61,17 +12,10 @@ from utils.rwkv import *
from utils.torch import *
from utils.ngrok import *
from utils.log import log_middleware
from routes import completion, config, state_cache, midi, misc, file_process
from routes import completion, config, state_cache, midi
import global_var
@asynccontextmanager
async def lifespan(app: FastAPI):
init()
yield
app = FastAPI(lifespan=lifespan, dependencies=[Depends(log_middleware)])
app = FastAPI(dependencies=[Depends(log_middleware)])
app.add_middleware(
CORSMiddleware,
@ -84,47 +28,12 @@ app.add_middleware(
app.include_router(completion.router)
app.include_router(config.router)
app.include_router(midi.router)
app.include_router(file_process.router)
app.include_router(misc.router)
app.include_router(state_cache.router)
@app.post("/exit", tags=["Root"])
def exit():
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
parent_pid = os.getpid()
parent = psutil.Process(parent_pid)
for child in parent.children(recursive=True):
child.kill()
parent.kill()
try:
if (
"RWKV_RUNNER_PARAMS" in os.environ
and "--webui" in os.environ["RWKV_RUNNER_PARAMS"].split(" ")
) or args.webui:
from webui_server import webui_server
app.mount("/", webui_server)
except NameError:
pass
@app.get("/", tags=["Root"])
def read_root():
return {"Hello": "World!"}
@app.on_event("startup")
def init():
global_var.init()
cmd_params = os.environ["RWKV_RUNNER_PARAMS"]
global_var.set(
global_var.Args, get_args(cmd_params.split(" ") if cmd_params else None)
)
state_cache.init()
set_torch()
@ -133,7 +42,23 @@ def init():
ngrok_connect()
@app.get("/", tags=["Root"])
def read_root():
return {"Hello": "World!"}
@app.post("/exit", tags=["Root"])
def exit():
parent_pid = os.getpid()
parent = psutil.Process(parent_pid)
for child in parent.children(recursive=True):
child.kill()
parent.kill()
if __name__ == "__main__":
os.environ["RWKV_RUNNER_PARAMS"] = " ".join(sys.argv[1:])
print("--- %s seconds ---" % (time.time() - start_time))
uvicorn.run("main:app", port=args.port, host=args.host, workers=1)
uvicorn.run(
"main:app",
port=8000 if len(sys.argv) < 2 else int(sys.argv[1]),
host="127.0.0.1" if len(sys.argv) < 3 else sys.argv[2],
)

Binary file not shown.

Binary file not shown.

View File

@ -2,13 +2,12 @@ import asyncio
import json
from threading import Lock
from typing import List, Union
from enum import Enum
import base64
import time
from fastapi import APIRouter, Request, status, HTTPException
from sse_starlette.sse import EventSourceResponse
from pydantic import BaseModel, Field
from pydantic import BaseModel
import numpy as np
import tiktoken
from utils.rwkv import *
from utils.log import quick_log
@ -17,81 +16,41 @@ import global_var
router = APIRouter()
class Role(Enum):
User = "user"
Assistant = "assistant"
System = "system"
class Message(BaseModel):
role: Role
content: str = Field(min_length=0)
raw: bool = Field(False, description="Whether to treat content as raw text")
default_stop = [
"\n\nUser",
"\n\nQuestion",
"\n\nQ",
"\n\nHuman",
"\n\nBob",
"\n\nAssistant",
"\n\nAnswer",
"\n\nA",
"\n\nBot",
"\n\nAlice",
]
role: str
content: str
class ChatCompletionBody(ModelConfigBody):
messages: Union[List[Message], None]
model: Union[str, None] = "rwkv"
messages: List[Message]
model: str = "rwkv"
stream: bool = False
stop: Union[str, List[str], None] = default_stop
user_name: Union[str, None] = Field(
None, description="Internal user name", min_length=1
)
assistant_name: Union[str, None] = Field(
None, description="Internal assistant name", min_length=1
)
system_name: Union[str, None] = Field(
None, description="Internal system name", min_length=1
)
presystem: bool = Field(
False, description="Whether to insert default system prompt at the beginning"
)
stop: str | List[str] = None
model_config = {
"json_schema_extra": {
class Config:
schema_extra = {
"example": {
"messages": [
{"role": Role.User.value, "content": "hello", "raw": False}
],
"messages": [{"role": "user", "content": "hello"}],
"model": "rwkv",
"stream": False,
"stop": None,
"user_name": None,
"assistant_name": None,
"system_name": None,
"presystem": True,
"max_tokens": 1000,
"temperature": 1,
"top_p": 0.3,
"presence_penalty": 0,
"frequency_penalty": 1,
"temperature": 1.2,
"top_p": 0.5,
"presence_penalty": 0.4,
"frequency_penalty": 0.4,
}
}
}
class CompletionBody(ModelConfigBody):
prompt: Union[str, List[str], None]
model: Union[str, None] = "rwkv"
prompt: Union[str, List[str]]
model: str = "rwkv"
stream: bool = False
stop: Union[str, List[str], None] = None
stop: str | List[str] = None
model_config = {
"json_schema_extra": {
class Config:
schema_extra = {
"example": {
"prompt": "The following is an epic science fiction masterpiece that is immortalized, "
+ "with delicate descriptions and grand depictions of interstellar civilization wars.\nChapter 1.\n",
@ -99,13 +58,12 @@ class CompletionBody(ModelConfigBody):
"stream": False,
"stop": None,
"max_tokens": 100,
"temperature": 1,
"top_p": 0.3,
"presence_penalty": 0,
"frequency_penalty": 1,
"temperature": 1.2,
"top_p": 0.5,
"presence_penalty": 0.4,
"frequency_penalty": 0.4,
}
}
}
completion_lock = Lock()
@ -119,7 +77,7 @@ async def eval_rwkv(
body: ModelConfigBody,
prompt: str,
stream: bool,
stop: Union[str, List[str], None],
stop: str,
chat_mode: bool,
):
global requests_num
@ -149,57 +107,39 @@ async def eval_rwkv(
return
set_rwkv_config(model, global_var.get(global_var.Model_Config))
set_rwkv_config(model, body)
print(get_rwkv_config(model))
response, prompt_tokens, completion_tokens = "", 0, 0
completion_start_time = None
for response, delta, prompt_tokens, completion_tokens in model.generate(
prompt,
stop=stop,
):
if not completion_start_time:
completion_start_time = time.time()
if await request.is_disconnected():
break
if stream:
yield json.dumps(
{
"object": (
"chat.completion.chunk"
if chat_mode
else "text_completion"
),
# "response": response,
"object": "chat.completion.chunk"
if chat_mode
else "text_completion",
"response": response,
"model": model.name,
"id": "chatcmpl-123",
"system_fingerprint": "fp_44709d6fcb",
"choices": [
(
{
"delta": {"role":Role.Assistant.value,"content": delta},
"index": 0,
"finish_reason": None,
"logprobs":None
}
if chat_mode
else {
"text": delta,
"index": 0,
"finish_reason": None,
}
)
{
"delta": {"content": delta},
"index": 0,
"finish_reason": None,
}
if chat_mode
else {
"text": delta,
"index": 0,
"finish_reason": None,
}
],
}
)
# torch_gc()
requests_num = requests_num - 1
completion_end_time = time.time()
completion_interval = completion_end_time - completion_start_time
tps = 0
if completion_interval > 0:
tps = completion_tokens / completion_interval
print(f"Generation TPS: {tps:.2f}")
if await request.is_disconnected():
print(f"{request.client} Stop Waiting")
quick_log(
@ -216,28 +156,23 @@ async def eval_rwkv(
if stream:
yield json.dumps(
{
"object": (
"chat.completion.chunk" if chat_mode else "text_completion"
),
# "response": response,
"object": "chat.completion.chunk"
if chat_mode
else "text_completion",
"response": response,
"model": model.name,
"id": "chatcmpl-123",
"system_fingerprint": "fp_44709d6fcb",
"choices": [
(
{
"delta": {},
"index": 0,
"logprobs": None,
"finish_reason": "stop",
}
if chat_mode
else {
"text": "",
"index": 0,
"finish_reason": "stop",
}
)
{
"delta": {},
"index": 0,
"finish_reason": "stop",
}
if chat_mode
else {
"text": "",
"index": 0,
"finish_reason": "stop",
}
],
}
)
@ -245,7 +180,7 @@ async def eval_rwkv(
else:
yield {
"object": "chat.completion" if chat_mode else "text_completion",
# "response": response,
"response": response,
"model": model.name,
"usage": {
"prompt_tokens": prompt_tokens,
@ -253,125 +188,24 @@ async def eval_rwkv(
"total_tokens": prompt_tokens + completion_tokens,
},
"choices": [
(
{
"message": {
"role": Role.Assistant.value,
"content": response,
},
"index": 0,
"finish_reason": "stop",
}
if chat_mode
else {
"text": response,
"index": 0,
"finish_reason": "stop",
}
)
{
"message": {
"role": "assistant",
"content": response,
},
"index": 0,
"finish_reason": "stop",
}
if chat_mode
else {
"text": response,
"index": 0,
"finish_reason": "stop",
}
],
}
def chat_template_old(
model: TextRWKV, body: ChatCompletionBody, interface: str, user: str, bot: str
):
is_raven = model.rwkv_type == RWKVType.Raven
completion_text: str = ""
basic_system: Union[str, None] = None
if body.presystem:
if body.messages[0].role == Role.System:
basic_system = body.messages[0].content
if basic_system is None:
completion_text = (
f"""
The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. \
{bot} is very intelligent, creative and friendly. \
{bot} is unlikely to disagree with {user}, and {bot} doesn't like to ask {user} questions. \
{bot} likes to tell {user} a lot about herself and her opinions. \
{bot} usually gives {user} kind, helpful and informative advices.\n
"""
if is_raven
else (
f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
)
else:
if not body.messages[0].raw:
basic_system = (
basic_system.replace("\r\n", "\n")
.replace("\r", "\n")
.replace("\n\n", "\n")
.replace("\n", " ")
.strip()
)
completion_text = (
(
f"The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. "
if is_raven
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
)
+ basic_system.replace("You are", f"{bot} is" if is_raven else "I am")
.replace("you are", f"{bot} is" if is_raven else "I am")
.replace("You're", f"{bot} is" if is_raven else "I'm")
.replace("you're", f"{bot} is" if is_raven else "I'm")
.replace("You", f"{bot}" if is_raven else "I")
.replace("you", f"{bot}" if is_raven else "I")
.replace("Your", f"{bot}'s" if is_raven else "My")
.replace("your", f"{bot}'s" if is_raven else "my")
.replace("", f"{bot}" if is_raven else "")
+ "\n\n"
)
for message in body.messages[(0 if basic_system is None else 1) :]:
append_message: str = ""
if message.role == Role.User:
append_message = f"{user}{interface} " + message.content
elif message.role == Role.Assistant:
append_message = f"{bot}{interface} " + message.content
elif message.role == Role.System:
append_message = message.content
if not message.raw:
append_message = (
append_message.replace("\r\n", "\n")
.replace("\r", "\n")
.replace("\n\n", "\n")
.strip()
)
completion_text += append_message + "\n\n"
completion_text += f"{bot}{interface}"
return completion_text
def chat_template(
model: TextRWKV, body: ChatCompletionBody, interface: str, user: str, bot: str
):
completion_text: str = ""
if body.presystem:
completion_text = (
f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
system = "System" if body.system_name is None else body.system_name
for message in body.messages:
append_message: str = ""
if message.role == Role.User:
append_message = f"{user}{interface} " + message.content
elif message.role == Role.Assistant:
append_message = f"{bot}{interface} " + message.content
elif message.role == Role.System:
append_message = f"{system}{interface} " + message.content
completion_text += append_message + "\n\n"
completion_text += f"{bot}{interface}"
return completion_text
@router.post("/v1/chat/completions", tags=["Completions"])
@router.post("/chat/completions", tags=["Completions"])
async def chat_completions(body: ChatCompletionBody, request: Request):
@ -379,40 +213,87 @@ async def chat_completions(body: ChatCompletionBody, request: Request):
if model is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "model not loaded")
if body.messages is None or body.messages == []:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "messages not found")
question = body.messages[-1]
if question.role == "user":
question = question.content
elif question.role == "system":
question = body.messages[-2]
if question.role == "user":
question = question.content
else:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "no question found")
else:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "no question found")
interface = model.interface
user = model.user if body.user_name is None else body.user_name
bot = model.bot if body.assistant_name is None else body.assistant_name
user = model.user
bot = model.bot
if model.version < 5:
completion_text = chat_template_old(model, body, interface, user, bot)
else:
completion_text = chat_template(model, body, interface, user, bot)
user_code = model.pipeline.decode([model.pipeline.encode(user)[0]])
bot_code = model.pipeline.decode([model.pipeline.encode(bot)[0]])
if type(body.stop) == str:
body.stop = [body.stop, f"\n\n{user_code}", f"\n\n{bot_code}"]
elif type(body.stop) == list:
body.stop.append(f"\n\n{user_code}")
body.stop.append(f"\n\n{bot_code}")
elif body.stop is None:
body.stop = default_stop + [f"\n\n{user_code}", f"\n\n{bot_code}"]
# if not body.presystem:
# body.stop.append("\n\n")
completion_text = (
f"""
The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. \
{bot} is very intelligent, creative and friendly. \
{bot} is unlikely to disagree with {user}, and {bot} doesn't like to ask {user} questions. \
{bot} likes to tell {user} a lot about herself and her opinions. \
{bot} usually gives {user} kind, helpful and informative advices.\n
"""
if user == "Bob"
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
for message in body.messages:
if message.role == "system":
completion_text = (
f"The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. "
if user == "Bob"
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ message.content.replace("\\n", "\n")
.replace("\r\n", "\n")
.replace("\n\n", "\n")
.replace("\n", " ")
.strip()
.replace("You are", f"{bot} is" if user == "Bob" else "I am")
.replace("you are", f"{bot} is" if user == "Bob" else "I am")
.replace("You're", f"{bot} is" if user == "Bob" else "I'm")
.replace("you're", f"{bot} is" if user == "Bob" else "I'm")
.replace("You", f"{bot}" if user == "Bob" else "I")
.replace("you", f"{bot}" if user == "Bob" else "I")
.replace("Your", f"{bot}'s" if user == "Bob" else "My")
.replace("your", f"{bot}'s" if user == "Bob" else "my")
.replace("", f"{bot}" if user == "Bob" else "")
+ "\n\n"
)
break
for message in body.messages:
if message.role == "user":
completion_text += (
f"{user}{interface} "
+ message.content.replace("\\n", "\n")
.replace("\r\n", "\n")
.replace("\n\n", "\n")
.strip()
+ "\n\n"
)
elif message.role == "assistant":
completion_text += (
f"{bot}{interface} "
+ message.content.replace("\\n", "\n")
.replace("\r\n", "\n")
.replace("\n\n", "\n")
.strip()
+ "\n\n"
)
completion_text += f"{bot}{interface}"
stop = f"\n\n{user}" if body.stop is None else body.stop
if body.stream:
return EventSourceResponse(
eval_rwkv(
model, request, body, completion_text, body.stream, body.stop, True
)
eval_rwkv(model, request, body, completion_text, body.stream, stop, True)
)
else:
try:
return await eval_rwkv(
model, request, body, completion_text, body.stream, body.stop, True
model, request, body, completion_text, body.stream, stop, True
).__anext__()
except StopAsyncIteration:
return None
@ -445,13 +326,13 @@ async def completions(body: CompletionBody, request: Request):
class EmbeddingsBody(BaseModel):
input: Union[str, List[str], List[List[int]], None]
model: Union[str, None] = "rwkv"
input: Union[str, List[str], List[List[int]]]
model: str = "rwkv"
encoding_format: str = None
fast_mode: bool = False
model_config = {
"json_schema_extra": {
class Config:
schema_extra = {
"example": {
"input": "a big apple",
"model": "rwkv",
@ -459,12 +340,9 @@ class EmbeddingsBody(BaseModel):
"fast_mode": False,
}
}
}
def embedding_base64(embedding: List[float]) -> str:
import numpy as np
return base64.b64encode(np.array(embedding).astype(np.float32)).decode("utf-8")

View File

@ -6,38 +6,44 @@ from pydantic import BaseModel
from utils.rwkv import *
from utils.torch import *
import global_var
import GPUtil
router = APIRouter()
def get_tokens_path(model_path: str):
model_path = model_path.lower()
tokenizer_dir = f"{pathlib.Path(__file__).parent.parent.resolve()}/rwkv_pip/"
default_tokens_path = tokenizer_dir + "20B_tokenizer.json"
if "raven" in model_path:
return default_tokens_path
elif "world" in model_path:
return "rwkv_vocab_v20230424"
elif "midi" in model_path:
return tokenizer_dir + "tokenizer-midi.json"
else:
return default_tokens_path
class SwitchModelBody(BaseModel):
model: str
strategy: str
tokenizer: Union[str, None] = None
customCuda: bool = False
deploy: bool = Field(
False,
description="Deploy mode. If success, will disable /switch-model, /exit and other dangerous APIs (state cache APIs, part of midi APIs)",
)
model_config = {
"json_schema_extra": {
class Config:
schema_extra = {
"example": {
"model": "models/RWKV-4-World-3B-v1-20230619-ctx4096.pth",
"strategy": "cuda fp16",
"tokenizer": "",
"customCuda": False,
"deploy": False,
}
}
}
@router.post("/switch-model", tags=["Configs"])
def switch_model(body: SwitchModelBody, response: Response, request: Request):
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(Status.HTTP_403_FORBIDDEN)
if global_var.get(global_var.Model_Status) is global_var.ModelStatus.Loading:
response.status_code = Status.HTTP_304_NOT_MODIFIED
return
@ -49,20 +55,13 @@ def switch_model(body: SwitchModelBody, response: Response, request: Request):
if body.model == "":
return "success"
devices = set(
[
x.strip().split(" ")[0].replace("cuda:0", "cuda")
for x in body.strategy.split("->")
]
)
print(f"Strategy Devices: {devices}")
# if len(devices) > 1:
# state_cache.disable_state_cache()
# else:
try:
state_cache.enable_state_cache()
except HTTPException:
pass
if "->" in body.strategy:
state_cache.disable_state_cache()
else:
try:
state_cache.enable_state_cache()
except HTTPException:
pass
os.environ["RWKV_CUDA_ON"] = "1" if body.customCuda else "0"
@ -70,74 +69,50 @@ def switch_model(body: SwitchModelBody, response: Response, request: Request):
try:
global_var.set(
global_var.Model,
RWKV(model=body.model, strategy=body.strategy, tokenizer=body.tokenizer),
TextRWKV(
model=body.model,
strategy=body.strategy,
tokens_path=get_tokens_path(body.model),
)
if "midi" not in body.model.lower()
else MusicRWKV(
model=body.model,
strategy=body.strategy,
tokens_path=get_tokens_path(body.model),
),
)
except Exception as e:
print(e)
import traceback
print(traceback.format_exc())
quick_log(request, body, f"Exception: {e}")
global_var.set(global_var.Model_Status, global_var.ModelStatus.Offline)
raise HTTPException(
Status.HTTP_500_INTERNAL_SERVER_ERROR, f"failed to load: {e}"
)
if body.deploy:
global_var.set(global_var.Deploy_Mode, True)
saved_model_config = global_var.get(global_var.Model_Config)
init_model_config = get_rwkv_config(global_var.get(global_var.Model))
if saved_model_config is not None:
merge_model(init_model_config, saved_model_config)
global_var.set(global_var.Model_Config, init_model_config)
if global_var.get(global_var.Model_Config) is None:
global_var.set(
global_var.Model_Config, get_rwkv_config(global_var.get(global_var.Model))
)
global_var.set(global_var.Model_Status, global_var.ModelStatus.Working)
return "success"
def merge_model(to_model: BaseModel, from_model: BaseModel):
from_model_fields = [x for x in from_model.dict().keys()]
to_model_fields = [x for x in to_model.dict().keys()]
for field_name in from_model_fields:
if field_name in to_model_fields:
from_value = getattr(from_model, field_name)
if from_value is not None:
setattr(to_model, field_name, from_value)
@router.post("/update-config", tags=["Configs"])
def update_config(body: ModelConfigBody):
"""
Will not update the model config immediately, but set it when completion called to avoid modifications during generation
"""
model_config = global_var.get(global_var.Model_Config)
if model_config is None:
model_config = ModelConfigBody()
global_var.set(global_var.Model_Config, model_config)
merge_model(model_config, body)
exception = load_rwkv_state(
global_var.get(global_var.Model), model_config.state, True
)
if exception is not None:
raise exception
print("Updated Model Config:", model_config)
print(body)
global_var.set(global_var.Model_Config, body)
return "success"
@router.get("/status", tags=["Configs"])
def status():
try:
import GPUtil
gpus = GPUtil.getGPUs()
except:
gpus = []
gpus = GPUtil.getGPUs()
if len(gpus) == 0:
device_name = "CPU"
else:

View File

@ -1,79 +0,0 @@
import os
from fastapi import (
APIRouter,
HTTPException,
status,
Depends,
File,
UploadFile,
)
from pydantic import BaseModel
from typing import Iterator
router = APIRouter()
class FileToTextParams(BaseModel):
file_name: str
file_encoding: str = "utf-8"
@router.post("/file-to-text", tags=["File Process"])
async def file_to_text(
params: FileToTextParams = Depends(), file_data: UploadFile = File(...)
):
from langchain.schema import Document
from langchain.document_loaders.blob_loaders import Blob
# from langchain
def parse_text(blob: Blob) -> Iterator[Document]:
yield Document(page_content=blob.as_string(), metadata={"source": blob.source})
# from langchain
def parse_pdf(blob: Blob) -> Iterator[Document]:
import fitz
with blob.as_bytes_io() as stream:
doc = fitz.Document(stream=stream)
yield from [
Document(
page_content=page.get_text(),
metadata=dict(
{
"source": blob.source,
"file_path": blob.source,
"page": page.number,
"total_pages": len(doc),
},
**{
k: doc.metadata[k]
for k in doc.metadata
if type(doc.metadata[k]) in [str, int]
},
),
)
for page in doc
]
file_parsers = {".txt": parse_text, ".pdf": parse_pdf}
file_name = file_data.filename or params.file_name
file_ext = os.path.splitext(file_name)[-1]
if file_ext not in file_parsers:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "file type not supported")
try:
pages: Iterator[Document] = file_parsers[file_ext](
Blob.from_data(
await file_data.read(),
encoding=params.file_encoding,
path=file_name,
)
)
pages = list(pages)
except Exception as e:
raise HTTPException(status.HTTP_400_BAD_REQUEST, f"{e}")
return {"pages": pages}

View File

@ -1,6 +1,5 @@
import io
import global_var
from fastapi import APIRouter, HTTPException, UploadFile, status
from fastapi import APIRouter, HTTPException, status
from starlette.responses import StreamingResponse
from pydantic import BaseModel
from utils.midi import *
@ -12,22 +11,17 @@ router = APIRouter()
class TextToMidiBody(BaseModel):
text: str
model_config = {
"json_schema_extra": {
class Config:
schema_extra = {
"example": {
"text": "p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:2d:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:2d:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:1f:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:26:a g:39:a g:39:a g:3e:a g:3e:a g:42:a g:42:a pi:39:a pi:3e:a pi:42:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0",
}
}
}
@router.post("/text-to-midi", tags=["MIDI"])
def text_to_midi(body: TextToMidiBody):
vocab_config_type = global_var.get(global_var.Midi_Vocab_Config_Type)
if vocab_config_type == global_var.MidiVocabConfig.Piano:
vocab_config = "backend-python/utils/vocab_config_piano.json"
else:
vocab_config = "backend-python/utils/midi_vocab_config.json"
vocab_config = "backend-python/utils/midi_vocab_config.json"
cfg = VocabConfig.from_json(vocab_config)
mid = convert_str_to_midi(cfg, body.text.strip())
mid_data = io.BytesIO()
@ -37,51 +31,25 @@ def text_to_midi(body: TextToMidiBody):
return StreamingResponse(mid_data, media_type="audio/midi")
@router.post("/midi-to-text", tags=["MIDI"])
async def midi_to_text(file_data: UploadFile):
vocab_config_type = global_var.get(global_var.Midi_Vocab_Config_Type)
if vocab_config_type == global_var.MidiVocabConfig.Piano:
vocab_config = "backend-python/utils/vocab_config_piano.json"
else:
vocab_config = "backend-python/utils/midi_vocab_config.json"
cfg = VocabConfig.from_json(vocab_config)
filter_config = "backend-python/utils/midi_filter_config.json"
filter_cfg = FilterConfig.from_json(filter_config)
mid = mido.MidiFile(file=file_data.file)
output_list = convert_midi_to_str(cfg, filter_cfg, mid)
if len(output_list) == 0:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "bad midi file")
return {"text": output_list[0]}
class TxtToMidiBody(BaseModel):
txt_path: str
midi_path: str
model_config = {
"json_schema_extra": {
class Config:
schema_extra = {
"example": {
"txt_path": "midi/sample.txt",
"midi_path": "midi/sample.mid",
}
}
}
@router.post("/txt-to-midi", tags=["MIDI"])
def txt_to_midi(body: TxtToMidiBody):
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
if not body.midi_path.startswith("midi/"):
raise HTTPException(status.HTTP_400_BAD_REQUEST, "bad output path")
vocab_config_type = global_var.get(global_var.Midi_Vocab_Config_Type)
if vocab_config_type == global_var.MidiVocabConfig.Piano:
vocab_config = "backend-python/utils/vocab_config_piano.json"
else:
vocab_config = "backend-python/utils/midi_vocab_config.json"
vocab_config = "backend-python/utils/midi_vocab_config.json"
cfg = VocabConfig.from_json(vocab_config)
with open(body.txt_path, "r") as f:
text = f.read()
@ -97,15 +65,14 @@ class MidiToWavBody(BaseModel):
wav_path: str
sound_font_path: str = "assets/default_sound_font.sf2"
model_config = {
"json_schema_extra": {
class Config:
schema_extra = {
"example": {
"midi_path": "midi/sample.mid",
"wav_path": "midi/sample.wav",
"sound_font_path": "assets/default_sound_font.sf2",
}
}
}
@router.post("/midi-to-wav", tags=["MIDI"])
@ -114,9 +81,6 @@ def midi_to_wav(body: MidiToWavBody):
Install fluidsynth first, see more: https://github.com/FluidSynth/fluidsynth/wiki/Download#distributions
"""
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
if not body.wav_path.startswith("midi/"):
raise HTTPException(status.HTTP_400_BAD_REQUEST, "bad output path")
@ -131,15 +95,14 @@ class TextToWavBody(BaseModel):
wav_name: str
sound_font_path: str = "assets/default_sound_font.sf2"
model_config = {
"json_schema_extra": {
class Config:
schema_extra = {
"example": {
"text": "p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:2d:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:2d:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:1f:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:26:a g:39:a g:39:a g:3e:a g:3e:a g:42:a g:42:a pi:39:a pi:3e:a pi:42:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0",
"wav_name": "sample",
"sound_font_path": "assets/default_sound_font.sf2",
}
}
}
@router.post("/text-to-wav", tags=["MIDI"])
@ -148,9 +111,6 @@ def text_to_wav(body: TextToWavBody):
Install fluidsynth first, see more: https://github.com/FluidSynth/fluidsynth/wiki/Download#distributions
"""
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
text = body.text.strip()
if not text.startswith("<start>"):
text = "<start> " + text

View File

@ -1,131 +0,0 @@
from fastapi import APIRouter, HTTPException, status
from utils.rwkv import AbstractRWKV
import global_var
router = APIRouter()
@router.get("/dashboard/billing/credit_grants", tags=["MISC"])
def credit_grants():
return {
"object": "credit_summary",
"total_granted": 10000,
"total_used": 0,
"total_available": 10000,
"grants": {
"object": "list",
"data": [
{
"object": "credit_grant",
"grant_amount": 10000,
"used_amount": 0,
"effective_at": 1672531200,
"expires_at": 33229440000,
}
],
},
}
fake_models = [
{
"id": "gpt-3.5-turbo",
"object": "model",
"created": 1677610602,
"owned_by": "openai",
"permission": [
{
"id": "modelperm-zy5TOjnE2zVaicIcKO9bQDgX",
"object": "model_permission",
"created": 1690864883,
"allow_create_engine": False,
"allow_sampling": True,
"allow_logprobs": True,
"allow_search_indices": False,
"allow_view": True,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False,
}
],
"root": "gpt-3.5-turbo",
"parent": None,
},
{
"id": "text-davinci-003",
"object": "model",
"created": 1669599635,
"owned_by": "openai-internal",
"permission": [
{
"id": "modelperm-a6niqBmW2JaGmo0fDO7FEt1n",
"object": "model_permission",
"created": 1690930172,
"allow_create_engine": False,
"allow_sampling": True,
"allow_logprobs": True,
"allow_search_indices": False,
"allow_view": True,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False,
}
],
"root": "text-davinci-003",
"parent": None,
},
]
@router.get("/v1/models", tags=["MISC"])
@router.get("/models", tags=["MISC"])
def models():
model: AbstractRWKV = global_var.get(global_var.Model)
model_name = model.name if model else "rwkv"
return {
"object": "list",
"data": [
{
"id": model_name,
"object": "model",
"owned_by": "rwkv",
"root": model_name,
"parent": None,
},
*fake_models,
],
}
@router.get("/v1/models/{model_id}", tags=["MISC"])
@router.get("/models/{model_id}", tags=["MISC"])
def model(model_id: str):
for fake_model in fake_models:
if fake_model["id"] == model_id:
return fake_model
if "rwkv" in model_id.lower():
model: AbstractRWKV = global_var.get(global_var.Model)
model_name = model.name if model else "rwkv"
return {
"id": model_name,
"object": "model",
"owned_by": "rwkv",
"root": model_name,
"parent": None,
}
raise HTTPException(
status.HTTP_404_NOT_FOUND,
{
"error": {
"message": f"The model '{model_id}' does not exist",
"type": "invalid_request_error",
"param": "model",
"code": "model_not_found",
}
},
)

View File

@ -1,16 +1,15 @@
from typing import Any, Dict, List, Union
from typing import Any, Dict, List
from utils.log import quick_log
from fastapi import APIRouter, HTTPException, Request, Response, status
from pydantic import BaseModel
import gc
import copy
import global_var
router = APIRouter()
trie = None
dtrie: Dict = {}
max_trie_len = 300
max_trie_len = 3000
loop_start_id = 1 # to prevent preloaded prompts from being deleted
loop_del_trie_id = loop_start_id
@ -37,24 +36,16 @@ def init():
def disable_state_cache():
global trie, dtrie
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
trie = None
dtrie = {}
gc.collect()
print("state cache disabled")
return "success"
@router.post("/enable-state-cache", tags=["State Cache"])
def enable_state_cache():
global trie, dtrie
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
try:
import cyac
@ -62,80 +53,36 @@ def enable_state_cache():
dtrie = {}
gc.collect()
print("state cache enabled")
return "success"
except ModuleNotFoundError:
print("state cache disabled")
raise HTTPException(status.HTTP_400_BAD_REQUEST, "cyac not found")
class AddStateBody(BaseModel):
prompt: str
tokens: List[Union[str, int]]
tokens: List[str]
state: Any
logits: Any
def copy_tensor_to_cpu(tensors):
import torch
import numpy as np
devices: List[torch.device] = []
copied: Union[Any, None] = None
tensors_type = type(tensors)
if tensors_type == list:
if hasattr(tensors[0], "device"): # torch state
devices = [tensor.device for tensor in tensors]
copied = [tensor.cpu() for tensor in tensors]
else: # WebGPU logits
copied = tensors
elif tensors_type == torch.Tensor: # torch logits
devices = [tensors.device]
copied = tensors.cpu()
elif tensors_type == np.ndarray: # rwkv.cpp
copied = tensors
else: # WebGPU state
model = global_var.get(global_var.Model)
if model:
copied = model.model.model.back_state()
return copied, devices
# @router.post("/add-state", tags=["State Cache"])
@router.post("/add-state", tags=["State Cache"])
def add_state(body: AddStateBody):
global trie, dtrie, loop_del_trie_id
# if global_var.get(global_var.Deploy_Mode) is True:
# raise HTTPException(status.HTTP_403_FORBIDDEN)
if trie is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
import torch
import numpy as np
try:
devices: List[torch.device] = []
logits_device: Union[torch.device, None] = None
state: Union[Any, None] = None
logits: Union[Any, None] = None
if body.state is not None:
state, devices = copy_tensor_to_cpu(body.state)
if body.logits is not None:
logits, logits_devices = copy_tensor_to_cpu(body.logits)
if len(logits_devices) > 0:
logits_device = logits_devices[0]
id: int = trie.insert(body.prompt)
device: torch.device = body.state[0].device
dtrie[id] = {
"tokens": body.tokens,
"state": state,
"logits": logits,
"devices": devices,
"logits_device": logits_device,
"tokens": copy.deepcopy(body.tokens),
"state": [tensor.cpu() for tensor in body.state]
if device != torch.device("cpu")
else copy.deepcopy(body.state),
"logits": copy.deepcopy(body.logits),
"device": device,
}
if len(trie) >= max_trie_len:
@ -149,11 +96,10 @@ def add_state(body: AddStateBody):
quick_log(
None,
None,
f"New Trie Id: {id}\nTrie Len: {len(trie)}\nTrie Buff Size: {trie.buff_size()}\nDtrie Buff Size Of Id: {__get_a_dtrie_buff_size(dtrie[id])}",
f"New Trie Id: {id}\nTrie Len: {len(trie)}\nTrie Buff Size: {trie.buff_size()}\nDtrie Buff Size Of Id: {_get_a_dtrie_buff_size(dtrie[id])}",
)
return "success"
except Exception as e:
print(e) # should not happen
raise HTTPException(
status.HTTP_400_BAD_REQUEST, f"insert failed, bad prompt.\n{e}"
)
@ -162,10 +108,6 @@ def add_state(body: AddStateBody):
@router.post("/reset-state", tags=["State Cache"])
def reset_state():
global trie, dtrie
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
if trie is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
@ -178,24 +120,11 @@ def reset_state():
return "success"
def force_reset_state():
global trie, dtrie
if trie is None:
return
import cyac
trie = cyac.Trie()
dtrie = {}
gc.collect()
class LongestPrefixStateBody(BaseModel):
prompt: str
def __get_a_dtrie_buff_size(dtrie_v):
def _get_a_dtrie_buff_size(dtrie_v):
# print(sys.getsizeof(dtrie_v["tokens"][0])) # str
# print(sys.getsizeof(dtrie_v["tokens"][0]) * len(dtrie_v["tokens"]))
# print(dtrie_v["state"][0][0].element_size())
@ -212,18 +141,13 @@ def __get_a_dtrie_buff_size(dtrie_v):
return 54 * len(dtrie_v["tokens"]) + 491520 + 262144 + 28 # TODO
# @router.post("/longest-prefix-state", tags=["State Cache"])
@router.post("/longest-prefix-state", tags=["State Cache"])
def longest_prefix_state(body: LongestPrefixStateBody, request: Request):
global trie
# if global_var.get(global_var.Deploy_Mode) is True:
# raise HTTPException(status.HTTP_403_FORBIDDEN)
if trie is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
import torch
import numpy as np
id = -1
try:
@ -232,52 +156,33 @@ def longest_prefix_state(body: LongestPrefixStateBody, request: Request):
except:
pass
if id != -1:
prompt: str = trie[id]
v = dtrie[id]
tokens: List[Union[str, int]] = copy.deepcopy(v["tokens"])
devices: List[torch.device] = v["devices"]
logits_device: Union[torch.device, None] = v["logits_device"]
state: Union[Any, None] = v["state"]
logits: Union[Any, None] = v["logits"]
state_type = type(state)
if state_type == list and hasattr(state[0], "device"): # torch
state = [
(
tensor.to(devices[i])
if devices[i] != torch.device("cpu")
else tensor.clone()
)
for i, tensor in enumerate(state)
]
logits = (
logits.to(logits_device)
if logits_device != torch.device("cpu")
else logits.clone()
)
elif state_type == np.ndarray: # rwkv.cpp
logits = np.copy(logits)
else: # WebGPU
logits = np.copy(logits)
device: torch.device = v["device"]
prompt: str = trie[id]
quick_log(request, body, "Hit:\n" + prompt)
return {
"prompt": prompt,
"tokens": tokens,
"state": state,
"logits": logits,
"tokens": v["tokens"],
"state": [tensor.to(device) for tensor in v["state"]]
if device != torch.device("cpu")
else v["state"],
"logits": v["logits"],
"device": device.type,
}
else:
return {"prompt": "", "tokens": [], "state": None, "logits": None}
return {
"prompt": "",
"tokens": [],
"state": None,
"logits": None,
"device": None,
}
# @router.post("/save-state", tags=["State Cache"])
@router.post("/save-state", tags=["State Cache"])
def save_state():
global trie
# if global_var.get(global_var.Deploy_Mode) is True:
# raise HTTPException(status.HTTP_403_FORBIDDEN)
if trie is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")

Binary file not shown.

Binary file not shown.

View File

@ -1,17 +0,0 @@
from typing import Any, List, Union
from . import rwkv_cpp_model
from . import rwkv_cpp_shared_library
class RWKV:
def __init__(self, model_path: str, strategy=None):
self.library = rwkv_cpp_shared_library.load_rwkv_shared_library()
self.model = rwkv_cpp_model.RWKVModel(self.library, model_path)
self.w = {} # fake weight
self.w["emb.weight"] = [0] * self.model.n_vocab
self.version = (
self.model.arch_version_major + self.model.arch_version_minor / 10
)
def forward(self, tokens: List[int], state: Union[Any, None] = None):
return self.model.eval_sequence_in_chunks(tokens, state, use_numpy=True)

Binary file not shown.

View File

@ -1,396 +0,0 @@
import os
import multiprocessing
# Pre-import PyTorch, if available.
# This fixes "OSError: [WinError 127] The specified procedure could not be found".
try:
import torch
except ModuleNotFoundError:
pass
# I'm sure this is not strictly correct, but let's keep this crutch for now.
try:
import rwkv_cpp_shared_library
except ModuleNotFoundError:
from . import rwkv_cpp_shared_library
from typing import TypeVar, Optional, Tuple, List
# A value of this type is either a numpy's ndarray or a PyTorch's Tensor.
NumpyArrayOrPyTorchTensor: TypeVar = TypeVar('NumpyArrayOrPyTorchTensor')
class RWKVModel:
"""
An RWKV model managed by rwkv.cpp library.
"""
def __init__(
self,
shared_library: rwkv_cpp_shared_library.RWKVSharedLibrary,
model_path: str,
thread_count: int = max(1, multiprocessing.cpu_count() // 2),
gpu_layer_count: int = 0,
**kwargs
) -> None:
"""
Loads the model and prepares it for inference.
In case of any error, this method will throw an exception.
Parameters
----------
shared_library : RWKVSharedLibrary
rwkv.cpp shared library.
model_path : str
Path to RWKV model file in ggml format.
thread_count : int
Thread count to use. If not set, defaults to CPU count / 2.
gpu_layer_count : int
Count of layers to offload onto the GPU, must be >= 0.
See documentation of `gpu_offload_layers` for details about layer offloading.
"""
if 'gpu_layers_count' in kwargs:
gpu_layer_count = kwargs['gpu_layers_count']
if not os.path.isfile(model_path):
raise ValueError(f'{model_path} is not a file')
if not (thread_count > 0):
raise ValueError('Thread count must be > 0')
if not (gpu_layer_count >= 0):
raise ValueError('GPU layer count must be >= 0')
self._library: rwkv_cpp_shared_library.RWKVSharedLibrary = shared_library
self._ctx: rwkv_cpp_shared_library.RWKVContext = self._library.rwkv_init_from_file(model_path, thread_count)
if gpu_layer_count > 0:
self.gpu_offload_layers(gpu_layer_count)
self._state_buffer_element_count: int = self._library.rwkv_get_state_buffer_element_count(self._ctx)
self._logits_buffer_element_count: int = self._library.rwkv_get_logits_buffer_element_count(self._ctx)
self._valid: bool = True
def gpu_offload_layers(self, layer_count: int) -> bool:
"""
Offloads specified count of model layers onto the GPU. Offloaded layers are evaluated using cuBLAS or CLBlast.
For the purposes of this function, model head (unembedding matrix) is treated as an additional layer:
- pass `model.n_layer` to offload all layers except model head
- pass `model.n_layer + 1` to offload all layers, including model head
Returns true if at least one layer was offloaded.
If rwkv.cpp was compiled without cuBLAS and CLBlast support, this function is a no-op and always returns false.
Parameters
----------
layer_count : int
Count of layers to offload onto the GPU, must be >= 0.
"""
if not (layer_count >= 0):
raise ValueError('Layer count must be >= 0')
return self._library.rwkv_gpu_offload_layers(self._ctx, layer_count)
@property
def arch_version_major(self) -> int:
return self._library.rwkv_get_arch_version_major(self._ctx)
@property
def arch_version_minor(self) -> int:
return self._library.rwkv_get_arch_version_minor(self._ctx)
@property
def n_vocab(self) -> int:
return self._library.rwkv_get_n_vocab(self._ctx)
@property
def n_embed(self) -> int:
return self._library.rwkv_get_n_embed(self._ctx)
@property
def n_layer(self) -> int:
return self._library.rwkv_get_n_layer(self._ctx)
def eval(
self,
token: int,
state_in: Optional[NumpyArrayOrPyTorchTensor],
state_out: Optional[NumpyArrayOrPyTorchTensor] = None,
logits_out: Optional[NumpyArrayOrPyTorchTensor] = None,
use_numpy: bool = False
) -> Tuple[NumpyArrayOrPyTorchTensor, NumpyArrayOrPyTorchTensor]:
"""
Evaluates the model for a single token.
In case of any error, this method will throw an exception.
Parameters
----------
token : int
Index of next token to be seen by the model. Must be in range 0 <= token < n_vocab.
state_in : Optional[NumpyArrayOrTorchTensor]
State from previous call of this method. If this is a first pass, set it to None.
state_out : Optional[NumpyArrayOrTorchTensor]
Optional output tensor for state. If provided, must be of type float32, contiguous and of shape (state_buffer_element_count).
logits_out : Optional[NumpyArrayOrTorchTensor]
Optional output tensor for logits. If provided, must be of type float32, contiguous and of shape (logits_buffer_element_count).
use_numpy : bool
If set to True, numpy's ndarrays will be created instead of PyTorch's Tensors.
This parameter is ignored if any tensor parameter is not None; in such case,
type of returned tensors will match the type of received tensors.
Returns
-------
logits, state
Logits vector of shape (n_vocab); state for the next step.
"""
if not self._valid:
raise ValueError('Model was freed')
use_numpy = self._detect_numpy_usage([state_in, state_out, logits_out], use_numpy)
if state_in is not None:
self._validate_tensor(state_in, 'state_in', self._state_buffer_element_count)
state_in_ptr = self._get_data_ptr(state_in)
else:
state_in_ptr = 0
if state_out is not None:
self._validate_tensor(state_out, 'state_out', self._state_buffer_element_count)
else:
state_out = self._zeros_float32(self._state_buffer_element_count, use_numpy)
if logits_out is not None:
self._validate_tensor(logits_out, 'logits_out', self._logits_buffer_element_count)
else:
logits_out = self._zeros_float32(self._logits_buffer_element_count, use_numpy)
self._library.rwkv_eval(
self._ctx,
token,
state_in_ptr,
self._get_data_ptr(state_out),
self._get_data_ptr(logits_out)
)
return logits_out, state_out
def eval_sequence(
self,
tokens: List[int],
state_in: Optional[NumpyArrayOrPyTorchTensor],
state_out: Optional[NumpyArrayOrPyTorchTensor] = None,
logits_out: Optional[NumpyArrayOrPyTorchTensor] = None,
use_numpy: bool = False
) -> Tuple[NumpyArrayOrPyTorchTensor, NumpyArrayOrPyTorchTensor]:
"""
Evaluates the model for a sequence of tokens.
NOTE ON GGML NODE LIMIT
ggml has a hard-coded limit on max amount of nodes in a computation graph. The sequence graph is built in a way that quickly exceedes
this limit when using large models and/or large sequence lengths.
Fortunately, rwkv.cpp's fork of ggml has increased limit which was tested to work for sequence lengths up to 64 for 14B models.
If you get `GGML_ASSERT: ...\\ggml.c:16941: cgraph->n_nodes < GGML_MAX_NODES`, this means you've exceeded the limit.
To get rid of the assertion failure, reduce the model size and/or sequence length.
In case of any error, this method will throw an exception.
Parameters
----------
tokens : List[int]
Indices of the next tokens to be seen by the model. Must be in range 0 <= token < n_vocab.
state_in : Optional[NumpyArrayOrTorchTensor]
State from previous call of this method. If this is a first pass, set it to None.
state_out : Optional[NumpyArrayOrTorchTensor]
Optional output tensor for state. If provided, must be of type float32, contiguous and of shape (state_buffer_element_count).
logits_out : Optional[NumpyArrayOrTorchTensor]
Optional output tensor for logits. If provided, must be of type float32, contiguous and of shape (logits_buffer_element_count).
use_numpy : bool
If set to True, numpy's ndarrays will be created instead of PyTorch's Tensors.
This parameter is ignored if any tensor parameter is not None; in such case,
type of returned tensors will match the type of received tensors.
Returns
-------
logits, state
Logits vector of shape (n_vocab); state for the next step.
"""
if not self._valid:
raise ValueError('Model was freed')
use_numpy = self._detect_numpy_usage([state_in, state_out, logits_out], use_numpy)
if state_in is not None:
self._validate_tensor(state_in, 'state_in', self._state_buffer_element_count)
state_in_ptr = self._get_data_ptr(state_in)
else:
state_in_ptr = 0
if state_out is not None:
self._validate_tensor(state_out, 'state_out', self._state_buffer_element_count)
else:
state_out = self._zeros_float32(self._state_buffer_element_count, use_numpy)
if logits_out is not None:
self._validate_tensor(logits_out, 'logits_out', self._logits_buffer_element_count)
else:
logits_out = self._zeros_float32(self._logits_buffer_element_count, use_numpy)
self._library.rwkv_eval_sequence(
self._ctx,
tokens,
state_in_ptr,
self._get_data_ptr(state_out),
self._get_data_ptr(logits_out)
)
return logits_out, state_out
def eval_sequence_in_chunks(
self,
tokens: List[int],
state_in: Optional[NumpyArrayOrPyTorchTensor],
state_out: Optional[NumpyArrayOrPyTorchTensor] = None,
logits_out: Optional[NumpyArrayOrPyTorchTensor] = None,
chunk_size: int = 16,
use_numpy: bool = False
) -> Tuple[NumpyArrayOrPyTorchTensor, NumpyArrayOrPyTorchTensor]:
"""
Evaluates the model for a sequence of tokens using `eval_sequence`, splitting a potentially long sequence into fixed-length chunks.
This function is useful for processing complete prompts and user input in chat & role-playing use-cases.
It is recommended to use this function instead of `eval_sequence` to avoid mistakes and get maximum performance.
Chunking allows processing sequences of thousands of tokens, while not reaching the ggml's node limit and not consuming too much memory.
A reasonable and recommended value of chunk size is 16. If you want maximum performance, try different chunk sizes in range [2..64]
and choose one that works the best in your use case.
In case of any error, this method will throw an exception.
Parameters
----------
tokens : List[int]
Indices of the next tokens to be seen by the model. Must be in range 0 <= token < n_vocab.
chunk_size : int
Size of each chunk in tokens, must be positive.
state_in : Optional[NumpyArrayOrTorchTensor]
State from previous call of this method. If this is a first pass, set it to None.
state_out : Optional[NumpyArrayOrTorchTensor]
Optional output tensor for state. If provided, must be of type float32, contiguous and of shape (state_buffer_element_count).
logits_out : Optional[NumpyArrayOrTorchTensor]
Optional output tensor for logits. If provided, must be of type float32, contiguous and of shape (logits_buffer_element_count).
use_numpy : bool
If set to True, numpy's ndarrays will be created instead of PyTorch's Tensors.
This parameter is ignored if any tensor parameter is not None; in such case,
type of returned tensors will match the type of received tensors.
Returns
-------
logits, state
Logits vector of shape (n_vocab); state for the next step.
"""
if not self._valid:
raise ValueError('Model was freed')
use_numpy = self._detect_numpy_usage([state_in, state_out, logits_out], use_numpy)
if state_in is not None:
self._validate_tensor(state_in, 'state_in', self._state_buffer_element_count)
state_in_ptr = self._get_data_ptr(state_in)
else:
state_in_ptr = 0
if state_out is not None:
self._validate_tensor(state_out, 'state_out', self._state_buffer_element_count)
else:
state_out = self._zeros_float32(self._state_buffer_element_count, use_numpy)
if logits_out is not None:
self._validate_tensor(logits_out, 'logits_out', self._logits_buffer_element_count)
else:
logits_out = self._zeros_float32(self._logits_buffer_element_count, use_numpy)
self._library.rwkv_eval_sequence_in_chunks(
self._ctx,
tokens,
chunk_size,
state_in_ptr,
self._get_data_ptr(state_out),
self._get_data_ptr(logits_out)
)
return logits_out, state_out
def free(self) -> None:
"""
Frees all allocated resources.
In case of any error, this method will throw an exception.
The object must not be used anymore after calling this method.
"""
if not self._valid:
raise ValueError('Already freed')
self._valid = False
self._library.rwkv_free(self._ctx)
def __del__(self) -> None:
# Free the context on GC in case user forgot to call free() explicitly.
if hasattr(self, '_valid') and self._valid:
self.free()
def _is_pytorch_tensor(self, tensor: NumpyArrayOrPyTorchTensor) -> bool:
return hasattr(tensor, '__module__') and tensor.__module__ == 'torch'
def _detect_numpy_usage(self, tensors: List[Optional[NumpyArrayOrPyTorchTensor]], use_numpy_by_default: bool) -> bool:
for tensor in tensors:
if tensor is not None:
return False if self._is_pytorch_tensor(tensor) else True
return use_numpy_by_default
def _validate_tensor(self, tensor: NumpyArrayOrPyTorchTensor, name: str, size: int) -> None:
if self._is_pytorch_tensor(tensor):
tensor: torch.Tensor = tensor
if tensor.device != torch.device('cpu'):
raise ValueError(f'{name} is not on CPU')
if tensor.dtype != torch.float32:
raise ValueError(f'{name} is not of type float32')
if tensor.shape != (size,):
raise ValueError(f'{name} has invalid shape {tensor.shape}, expected ({size})')
if not tensor.is_contiguous():
raise ValueError(f'{name} is not contiguous')
else:
import numpy as np
tensor: np.ndarray = tensor
if tensor.dtype != np.float32:
raise ValueError(f'{name} is not of type float32')
if tensor.shape != (size,):
raise ValueError(f'{name} has invalid shape {tensor.shape}, expected ({size})')
if not tensor.data.contiguous:
raise ValueError(f'{name} is not contiguous')
def _get_data_ptr(self, tensor: NumpyArrayOrPyTorchTensor):
if self._is_pytorch_tensor(tensor):
return tensor.data_ptr()
else:
return tensor.ctypes.data
def _zeros_float32(self, element_count: int, use_numpy: bool) -> NumpyArrayOrPyTorchTensor:
if use_numpy:
import numpy as np
return np.zeros(element_count, dtype=np.float32)
else:
return torch.zeros(element_count, dtype=torch.float32, device='cpu')

View File

@ -1,502 +0,0 @@
import os
import sys
import ctypes
import pathlib
import platform
from typing import Optional, List, Tuple, Callable
QUANTIZED_FORMAT_NAMES: Tuple[str, str, str, str, str] = (
"Q4_0",
"Q4_1",
"Q5_0",
"Q5_1",
"Q8_0",
)
P_FLOAT = ctypes.POINTER(ctypes.c_float)
P_INT = ctypes.POINTER(ctypes.c_int32)
class RWKVContext:
def __init__(self, ptr: ctypes.pointer) -> None:
self.ptr: ctypes.pointer = ptr
class RWKVSharedLibrary:
"""
Python wrapper around rwkv.cpp shared library.
"""
def __init__(self, shared_library_path: str) -> None:
"""
Loads the shared library from specified file.
In case of any error, this method will throw an exception.
Parameters
----------
shared_library_path : str
Path to rwkv.cpp shared library. On Windows, it would look like 'rwkv.dll'. On UNIX, 'rwkv.so'.
"""
# When Python is greater than 3.8, we need to reprocess the custom dll
# according to the documentation to prevent loading failure errors.
# https://docs.python.org/3/whatsnew/3.8.html#ctypes
if platform.system().lower() == "windows":
self.library = ctypes.CDLL(shared_library_path, winmode=0)
else:
self.library = ctypes.cdll.LoadLibrary(shared_library_path)
self.library.rwkv_init_from_file.argtypes = [ctypes.c_char_p, ctypes.c_uint32]
self.library.rwkv_init_from_file.restype = ctypes.c_void_p
self.library.rwkv_gpu_offload_layers.argtypes = [
ctypes.c_void_p,
ctypes.c_uint32,
]
self.library.rwkv_gpu_offload_layers.restype = ctypes.c_bool
self.library.rwkv_eval.argtypes = [
ctypes.c_void_p, # ctx
ctypes.c_int32, # token
P_FLOAT, # state_in
P_FLOAT, # state_out
P_FLOAT, # logits_out
]
self.library.rwkv_eval.restype = ctypes.c_bool
self.library.rwkv_eval_sequence.argtypes = [
ctypes.c_void_p, # ctx
P_INT, # tokens
ctypes.c_size_t, # token count
P_FLOAT, # state_in
P_FLOAT, # state_out
P_FLOAT, # logits_out
]
self.library.rwkv_eval_sequence.restype = ctypes.c_bool
self.library.rwkv_eval_sequence_in_chunks.argtypes = [
ctypes.c_void_p, # ctx
P_INT, # tokens
ctypes.c_size_t, # token count
ctypes.c_size_t, # chunk size
P_FLOAT, # state_in
P_FLOAT, # state_out
P_FLOAT, # logits_out
]
self.library.rwkv_eval_sequence_in_chunks.restype = ctypes.c_bool
self.library.rwkv_get_arch_version_major.argtypes = [ctypes.c_void_p]
self.library.rwkv_get_arch_version_major.restype = ctypes.c_uint32
self.library.rwkv_get_arch_version_minor.argtypes = [ctypes.c_void_p]
self.library.rwkv_get_arch_version_minor.restype = ctypes.c_uint32
self.library.rwkv_get_n_vocab.argtypes = [ctypes.c_void_p]
self.library.rwkv_get_n_vocab.restype = ctypes.c_size_t
self.library.rwkv_get_n_embed.argtypes = [ctypes.c_void_p]
self.library.rwkv_get_n_embed.restype = ctypes.c_size_t
self.library.rwkv_get_n_layer.argtypes = [ctypes.c_void_p]
self.library.rwkv_get_n_layer.restype = ctypes.c_size_t
self.library.rwkv_get_state_buffer_element_count.argtypes = [ctypes.c_void_p]
self.library.rwkv_get_state_buffer_element_count.restype = ctypes.c_uint32
self.library.rwkv_get_logits_buffer_element_count.argtypes = [ctypes.c_void_p]
self.library.rwkv_get_logits_buffer_element_count.restype = ctypes.c_uint32
self.library.rwkv_free.argtypes = [ctypes.c_void_p]
self.library.rwkv_free.restype = None
self.library.rwkv_free.argtypes = [ctypes.c_void_p]
self.library.rwkv_free.restype = None
self.library.rwkv_quantize_model_file.argtypes = [
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p,
]
self.library.rwkv_quantize_model_file.restype = ctypes.c_bool
self.library.rwkv_get_system_info_string.argtypes = []
self.library.rwkv_get_system_info_string.restype = ctypes.c_char_p
self.nullptr = ctypes.cast(0, ctypes.c_void_p)
def rwkv_init_from_file(
self, model_file_path: str, thread_count: int
) -> RWKVContext:
"""
Loads the model from a file and prepares it for inference.
Throws an exception in case of any error. Error messages would be printed to stderr.
Parameters
----------
model_file_path : str
Path to model file in ggml format.
thread_count : int
Count of threads to use, must be positive.
"""
ptr = self.library.rwkv_init_from_file(
model_file_path.encode("utf-8"), ctypes.c_uint32(thread_count)
)
if ptr is None:
raise ValueError("rwkv_init_from_file failed, check stderr")
return RWKVContext(ptr)
def rwkv_gpu_offload_layers(self, ctx: RWKVContext, layer_count: int) -> bool:
"""
Offloads specified count of model layers onto the GPU. Offloaded layers are evaluated using cuBLAS or CLBlast.
For the purposes of this function, model head (unembedding matrix) is treated as an additional layer:
- pass `rwkv_get_n_layer(ctx)` to offload all layers except model head
- pass `rwkv_get_n_layer(ctx) + 1` to offload all layers, including model head
Returns true if at least one layer was offloaded.
If rwkv.cpp was compiled without cuBLAS and CLBlast support, this function is a no-op and always returns false.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
layer_count : int
Count of layers to offload onto the GPU, must be >= 0.
"""
if not (layer_count >= 0):
raise ValueError("Layer count must be >= 0")
return self.library.rwkv_gpu_offload_layers(
ctx.ptr, ctypes.c_uint32(layer_count)
)
def rwkv_eval(
self,
ctx: RWKVContext,
token: int,
state_in_address: Optional[int],
state_out_address: int,
logits_out_address: int,
) -> None:
"""
Evaluates the model for a single token.
Throws an exception in case of any error. Error messages would be printed to stderr.
Not thread-safe. For parallel inference, call rwkv_clone_context to create one rwkv_context for each thread.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
token : int
Next token index, in range 0 <= token < n_vocab.
state_in_address : int
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count; or None, if this is a first pass.
state_out_address : int
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count. This buffer will be written to.
logits_out_address : int
Address of the first element of a FP32 buffer of size rwkv_get_logits_buffer_element_count. This buffer will be written to.
"""
if not self.library.rwkv_eval(
ctx.ptr,
ctypes.c_int32(token),
ctypes.cast(0 if state_in_address is None else state_in_address, P_FLOAT),
ctypes.cast(state_out_address, P_FLOAT),
ctypes.cast(logits_out_address, P_FLOAT),
):
raise ValueError("rwkv_eval failed, check stderr")
def rwkv_eval_sequence(
self,
ctx: RWKVContext,
tokens: List[int],
state_in_address: Optional[int],
state_out_address: int,
logits_out_address: int,
) -> None:
"""
Evaluates the model for a sequence of tokens.
Uses a faster algorithm than `rwkv_eval` if you do not need the state and logits for every token. Best used with sequence lengths of 64 or so.
Has to build a computation graph on the first call for a given sequence, but will use this cached graph for subsequent calls of the same sequence length.
NOTE ON GGML NODE LIMIT
ggml has a hard-coded limit on max amount of nodes in a computation graph. The sequence graph is built in a way that quickly exceedes
this limit when using large models and/or large sequence lengths.
Fortunately, rwkv.cpp's fork of ggml has increased limit which was tested to work for sequence lengths up to 64 for 14B models.
If you get `GGML_ASSERT: ...\\ggml.c:16941: cgraph->n_nodes < GGML_MAX_NODES`, this means you've exceeded the limit.
To get rid of the assertion failure, reduce the model size and/or sequence length.
Not thread-safe. For parallel inference, call `rwkv_clone_context` to create one rwkv_context for each thread.
Throws an exception in case of any error. Error messages would be printed to stderr.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
tokens : List[int]
Next token indices, in range 0 <= token < n_vocab.
state_in_address : int
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count; or None, if this is a first pass.
state_out_address : int
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count. This buffer will be written to.
logits_out_address : int
Address of the first element of a FP32 buffer of size rwkv_get_logits_buffer_element_count. This buffer will be written to.
"""
if not self.library.rwkv_eval_sequence(
ctx.ptr,
ctypes.cast((ctypes.c_int32 * len(tokens))(*tokens), P_INT),
ctypes.c_size_t(len(tokens)),
ctypes.cast(0 if state_in_address is None else state_in_address, P_FLOAT),
ctypes.cast(state_out_address, P_FLOAT),
ctypes.cast(logits_out_address, P_FLOAT),
):
raise ValueError("rwkv_eval_sequence failed, check stderr")
def rwkv_eval_sequence_in_chunks(
self,
ctx: RWKVContext,
tokens: List[int],
chunk_size: int,
state_in_address: Optional[int],
state_out_address: int,
logits_out_address: int,
) -> None:
"""
Evaluates the model for a sequence of tokens using `rwkv_eval_sequence`, splitting a potentially long sequence into fixed-length chunks.
This function is useful for processing complete prompts and user input in chat & role-playing use-cases.
It is recommended to use this function instead of `rwkv_eval_sequence` to avoid mistakes and get maximum performance.
Chunking allows processing sequences of thousands of tokens, while not reaching the ggml's node limit and not consuming too much memory.
A reasonable and recommended value of chunk size is 16. If you want maximum performance, try different chunk sizes in range [2..64]
and choose one that works the best in your use case.
Not thread-safe. For parallel inference, call `rwkv_clone_context` to create one rwkv_context for each thread.
Throws an exception in case of any error. Error messages would be printed to stderr.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
tokens : List[int]
Next token indices, in range 0 <= token < n_vocab.
chunk_size : int
Size of each chunk in tokens, must be positive.
state_in_address : int
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count; or None, if this is a first pass.
state_out_address : int
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count. This buffer will be written to.
logits_out_address : int
Address of the first element of a FP32 buffer of size rwkv_get_logits_buffer_element_count. This buffer will be written to.
"""
if not self.library.rwkv_eval_sequence_in_chunks(
ctx.ptr,
ctypes.cast((ctypes.c_int32 * len(tokens))(*tokens), P_INT),
ctypes.c_size_t(len(tokens)),
ctypes.c_size_t(chunk_size),
ctypes.cast(0 if state_in_address is None else state_in_address, P_FLOAT),
ctypes.cast(state_out_address, P_FLOAT),
ctypes.cast(logits_out_address, P_FLOAT),
):
raise ValueError("rwkv_eval_sequence_in_chunks failed, check stderr")
def rwkv_get_arch_version_major(self, ctx: RWKVContext) -> int:
"""
Returns the major version used by the given model.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
return self.library.rwkv_get_arch_version_major(ctx.ptr)
def rwkv_get_arch_version_minor(self, ctx: RWKVContext) -> int:
"""
Returns the minor version used by the given model.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
return self.library.rwkv_get_arch_version_minor(ctx.ptr)
def rwkv_get_n_vocab(self, ctx: RWKVContext) -> int:
"""
Returns the number of tokens in the given model's vocabulary.
Useful for telling 20B_tokenizer models (n_vocab = 50277) apart from World models (n_vocab = 65536).
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
return self.library.rwkv_get_n_vocab(ctx.ptr)
def rwkv_get_n_embed(self, ctx: RWKVContext) -> int:
"""
Returns the number of elements in the given model's embedding.
Useful for reading individual fields of a model's hidden state.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
return self.library.rwkv_get_n_embed(ctx.ptr)
def rwkv_get_n_layer(self, ctx: RWKVContext) -> int:
"""
Returns the number of layers in the given model.
A layer is a pair of RWKV and FFN operations, stacked multiple times throughout the model.
Embedding matrix and model head (unembedding matrix) are NOT counted in `n_layer`.
Useful for always offloading the entire model to GPU.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
return self.library.rwkv_get_n_layer(ctx.ptr)
def rwkv_get_state_buffer_element_count(self, ctx: RWKVContext) -> int:
"""
Returns count of FP32 elements in state buffer.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
return self.library.rwkv_get_state_buffer_element_count(ctx.ptr)
def rwkv_get_logits_buffer_element_count(self, ctx: RWKVContext) -> int:
"""
Returns count of FP32 elements in logits buffer.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
return self.library.rwkv_get_logits_buffer_element_count(ctx.ptr)
def rwkv_free(self, ctx: RWKVContext) -> None:
"""
Frees all allocated memory and the context.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
self.library.rwkv_free(ctx.ptr)
ctx.ptr = self.nullptr
def rwkv_quantize_model_file(
self, model_file_path_in: str, model_file_path_out: str, format_name: str
) -> None:
"""
Quantizes FP32 or FP16 model to one of INT4 formats.
Throws an exception in case of any error. Error messages would be printed to stderr.
Parameters
----------
model_file_path_in : str
Path to model file in ggml format, must be either FP32 or FP16.
model_file_path_out : str
Quantized model will be written here.
format_name : str
One of QUANTIZED_FORMAT_NAMES.
"""
if format_name not in QUANTIZED_FORMAT_NAMES:
raise ValueError(
f"Unknown format name {format_name}, use one of {QUANTIZED_FORMAT_NAMES}"
)
if not self.library.rwkv_quantize_model_file(
model_file_path_in.encode("utf-8"),
model_file_path_out.encode("utf-8"),
format_name.encode("utf-8"),
):
raise ValueError("rwkv_quantize_model_file failed, check stderr")
def rwkv_get_system_info_string(self) -> str:
"""
Returns system information string.
"""
return self.library.rwkv_get_system_info_string().decode("utf-8")
def load_rwkv_shared_library() -> RWKVSharedLibrary:
"""
Attempts to find rwkv.cpp shared library and load it.
To specify exact path to the library, create an instance of RWKVSharedLibrary explicitly.
"""
file_name: str
if "win32" in sys.platform or "cygwin" in sys.platform:
file_name = "rwkv.dll"
elif "darwin" in sys.platform:
file_name = "librwkv.dylib"
else:
file_name = "librwkv.so"
# Possible sub-paths to the library relative to the repo dir.
child_paths: List[Callable[[pathlib.Path], pathlib.Path]] = [
# No lookup for Debug config here.
# I assume that if a user wants to debug the library,
# they will be able to find the library and set the exact path explicitly.
lambda p: p / "backend-python" / "rwkv_pip" / "cpp" / file_name,
lambda p: p / "bin" / "Release" / file_name,
lambda p: p / "bin" / file_name,
# Some people prefer to build in the "build" subdirectory.
lambda p: p / "build" / "bin" / "Release" / file_name,
lambda p: p / "build" / "bin" / file_name,
lambda p: p / "build" / file_name,
# Fallback.
lambda p: p / file_name,
]
working_dir: pathlib.Path = pathlib.Path(os.path.abspath(os.getcwd()))
parent_paths: List[pathlib.Path] = [
# Possible repo dirs relative to the working dir.
# ./python/rwkv_cpp
working_dir.parent.parent,
# ./python
working_dir.parent,
# .
working_dir,
# Repo dir relative to this Python file.
pathlib.Path(os.path.abspath(__file__)).parent.parent.parent,
]
for parent_path in parent_paths:
for child_path in child_paths:
full_path: pathlib.Path = child_path(parent_path)
if os.path.isfile(full_path):
return RWKVSharedLibrary(str(full_path))
raise ValueError(
f"Failed to find {file_name} automatically; "
f"you need to find the library and create RWKVSharedLibrary specifying the path to it"
)

View File

@ -1,75 +0,0 @@
#include <cublas_v2.h>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAContext.h>
#define CUBLAS_CHECK(condition) \
for (cublasStatus_t _cublas_check_status = (condition); \
_cublas_check_status != CUBLAS_STATUS_SUCCESS;) \
throw std::runtime_error("cuBLAS error " + \
std::to_string(_cublas_check_status) + " at " + \
std::to_string(__LINE__));
#define CUDA_CHECK(condition) \
for (cudaError_t _cuda_check_status = (condition); \
_cuda_check_status != cudaSuccess;) \
throw std::runtime_error( \
"CUDA error " + std::string(cudaGetErrorString(_cuda_check_status)) + \
" at " + std::to_string(__LINE__));
/*
NOTE: blas gemm is column-major by default, but we need row-major output.
The data of row-major, transposed matrix is exactly the same as the
column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
*/
void gemm_fp16_cublas(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(a));
const auto cuda_data_type = CUDA_R_16F;
const auto cuda_c_data_type =
c.dtype() == torch::kFloat32 ? CUDA_R_32F : CUDA_R_16F;
const auto compute_type = CUDA_R_32F;
const float sp_alpha = 1.f;
// swap a and b, and use CUBLAS_OP_N. see the notes above
std::swap(a, b);
const cublasOperation_t cublas_trans_a = CUBLAS_OP_N;
const cublasOperation_t cublas_trans_b = CUBLAS_OP_N;
// m = (B^T).size(0) = B.size(1), and = A.size(1) after swap,
// negative axis is used because of the existence of batch matmul.
const int m = a.size(-1);
const int k = a.size(-2);
const int n = b.size(-2);
const int cublas_lda = m;
const int cublas_ldb = k;
const int cublas_ldc = m;
cublasHandle_t cublas_handle = at::cuda::getCurrentCUDABlasHandle();
#if CUDA_VERSION >= 11000
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
#else
cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP;
#endif
const float sp_beta = 0.f;
if (a.sizes().size() == 2 && b.sizes().size() == 2) {
CUBLAS_CHECK(cublasGemmEx(
cublas_handle, cublas_trans_a, cublas_trans_b, m, n, k, &sp_alpha,
a.data_ptr(), cuda_data_type, cublas_lda, b.data_ptr(), cuda_data_type,
cublas_ldb, &sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc,
compute_type, algo));
} else {
// batch matmul
assert(a.sizes().size() == 3 && b.sizes().size() == 3);
const long long int cublas_stride_a = m * k;
const long long int cublas_stride_b = k * n;
const long long int cublas_stride_c = m * n;
CUBLAS_CHECK(cublasGemmStridedBatchedEx(
cublas_handle, cublas_trans_a, cublas_trans_b, m,
n, k, &sp_alpha, a.data_ptr(), cuda_data_type, cublas_lda,
cublas_stride_a, b.data_ptr(), cuda_data_type, cublas_ldb, cublas_stride_b,
&sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc, cublas_stride_c,
a.size(0), compute_type, algo));
}
}

View File

@ -1,246 +0,0 @@
#include <stdio.h>
#include <assert.h>
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#define MIN_VALUE (-1e38)
typedef at::Half fp16;
__half *cast(fp16 *ptr) {
return reinterpret_cast<__half *>(ptr);
}
template <typename F>
__global__ void kernel_wkv_forward(const int B, const int T, const int C,
const float *__restrict__ const _w, const float *__restrict__ const _u, const F *__restrict__ const _k, const F *__restrict__ const _v,
F *__restrict__ const _y, float *__restrict__ const _aa, float *__restrict__ const _bb, float *__restrict__ const _pp) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int _b = idx / C;
const int _c = idx % C;
const int _offset = _b * T * C + _c;
const int _state_offset = _b * C + _c;
float u = _u[_c];
float w = _w[_c];
const F *__restrict__ const k = _k + _offset;
const F *__restrict__ const v = _v + _offset;
F *__restrict__ const y = _y + _offset;
float aa = _aa[_state_offset];
float bb = _bb[_state_offset];
float pp = _pp[_state_offset];
for (int i = 0; i < T; i++) {
const int ii = i * C;
const float kk = float(k[ii]);
const float vv = float(v[ii]);
float ww = u + kk;
float p = max(pp, ww);
float e1 = exp(pp - p);
float e2 = exp(ww - p);
y[ii] = F((e1 * aa + e2 * vv) / (e1 * bb + e2));
ww = w + pp;
p = max(ww, kk);
e1 = exp(ww - p);
e2 = exp(kk - p);
aa = e1 * aa + e2 * vv;
bb = e1 * bb + e2;
pp = p;
}
_aa[_state_offset] = aa;
_bb[_state_offset] = bb;
_pp[_state_offset] = pp;
}
template <typename F>
void cuda_wkv_forward(int B, int T, int C, float *w, float *u, F *k, F *v, F *y, float *aa, float *bb, float *pp) {
dim3 threadsPerBlock( min(C, 32) );
assert(B * C % threadsPerBlock.x == 0);
dim3 numBlocks(B * C / threadsPerBlock.x);
kernel_wkv_forward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, aa, bb, pp);
}
template void cuda_wkv_forward<fp16>(
int B, int T, int C,
float *w, float *u, fp16 *k, fp16 *v, fp16 *y,
float *aa, float *bb, float *pp);
template void cuda_wkv_forward<float>(
int B, int T, int C,
float *w, float *u, float *k, float *v, float *y,
float *aa, float *bb, float *pp);
__global__ void kernel_mm_seq_fp32i8(
const int B, const int N, const int M,
const float *__restrict__ const x, const int x_stride,
const uint8_t *__restrict__ const w, const int w_stride,
const float *__restrict__ const mx,
const float *__restrict__ const rx,
const float *__restrict__ const my,
const float *__restrict__ const ry,
float *__restrict__ const y, const int y_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int k = blockIdx.y * blockDim.y + threadIdx.y;
if (i < B && k < M) {
float y_local = 0;
for (int j = 0; j < N; ++j) {
y_local += x[i * x_stride + j] * (
(float(w[j * w_stride + k]) + 0.5f)
* rx[k] * ry[j] + mx[k] + my[j]
);
}
y[i * y_stride + k] = y_local;
}
}
template <typename F>
void cuda_mm8_seq(int B, int N, int M,
F *x, int x_stride,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
F *y, int y_stride);
template <>
void cuda_mm8_seq<float>(int B, int N, int M,
float *x, int x_stride,
uint8_t *w, int w_stride,
float *mx, float *rx,
float *my, float *ry,
float *y, int y_stride) {
dim3 blockSize(1, 128);
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_seq_fp32i8<<<gridSize, blockSize>>>(
B, N, M, x, x_stride, w, w_stride,
mx, rx, my, ry, y, y_stride);
}
__global__ void kernel_mm_seq_fp16i8(
const int B, const int N, const int M,
const __half *__restrict__ const x, const int x_stride,
const uint8_t *__restrict__ const w, const int w_stride,
const __half *__restrict__ const mx,
const __half *__restrict__ const rx,
const __half *__restrict__ const my,
const __half *__restrict__ const ry,
__half *__restrict__ const y, const int y_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int k = blockIdx.y * blockDim.y + threadIdx.y;
if (i < B && k < M) {
float y_local = 0;
for (int j = 0; j < N; ++j) {
y_local += __half2float(x[i * x_stride + j]) * (
(float(w[j * w_stride + k]) + 0.5f)
* __half2float(rx[k]) * __half2float(ry[j])
+ __half2float(mx[k]) + __half2float(my[j])
);
}
y[i * y_stride + k] = __float2half(y_local);
}
}
template <>
void cuda_mm8_seq<fp16>(int B, int N, int M,
fp16 *x, int x_stride,
uint8_t *w, int w_stride,
fp16 *mx, fp16 *rx,
fp16 *my, fp16 *ry,
fp16 *y, int y_stride) {
dim3 blockSize(1, 128);
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_seq_fp16i8<<<gridSize, blockSize>>>(
B, N, M, cast(x), x_stride, w, w_stride,
cast(mx), cast(rx), cast(my), cast(ry), cast(y), y_stride);
}
#define MM8_ONE_JSPLIT 24
#define MM8_ONE_TILE 1024
__global__ void kernel_mm_one_fp32i8(
const int N, const int M,
const float *__restrict__ const x,
const uint8_t *__restrict__ const w, const int w_stride,
const float *__restrict__ const mx,
const float *__restrict__ const rx,
const float *__restrict__ const my,
const float *__restrict__ const ry,
float *__restrict__ const y) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
if (k < M) {
float y_local = 0;
for (int j = j0; j < j1; ++j) {
y_local += x[j] * (
(float(w[j * w_stride + k]) + 0.5f)
* rx[k] * ry[j] + mx[k] + my[j]
);
}
atomicAdd(&y[k], y_local);
}
}
template <typename F>
void cuda_mm8_one(int N, int M,
F *x,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
float *y);
template <>
void cuda_mm8_one<float>(int N, int M,
float *x,
uint8_t *w, int w_stride,
float *mx, float *rx,
float *my, float *ry,
float *y) {
dim3 blockSize(1, MM8_ONE_TILE);
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_one_fp32i8<<<gridSize, blockSize>>>(
N, M, x, w, w_stride,
mx, rx, my, ry, y);
}
__global__ void kernel_mm_one_fp16i8(
const int N, const int M,
const __half *__restrict__ const x,
const uint8_t *__restrict__ const w, const int w_stride,
const __half *__restrict__ const mx,
const __half *__restrict__ const rx,
const __half *__restrict__ const my,
const __half *__restrict__ const ry,
float *__restrict__ const y) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
if (k < M) {
float y_local = 0;
for (int j = j0; j < j1; ++j) {
y_local += __half2float(x[j]) * (
(float(w[j * w_stride + k]) + 0.5f)
* __half2float(rx[k]) * __half2float(ry[j])
+ __half2float(mx[k]) + __half2float(my[j])
);
}
atomicAdd(&y[k], y_local);
}
}
template <>
void cuda_mm8_one<fp16>(int N, int M,
fp16 *x,
uint8_t *w, int w_stride,
fp16 *mx, fp16 *rx,
fp16 *my, fp16 *ry,
float *y) {
dim3 blockSize(1, MM8_ONE_TILE);
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_one_fp16i8<<<gridSize, blockSize>>>(
N, M, cast(x), w, w_stride,
cast(mx), cast(rx), cast(my), cast(ry), y);
}

View File

@ -1,88 +0,0 @@
#include <stdio.h>
#include <assert.h>
#include "ATen/ATen.h"
typedef at::BFloat16 bf16;
typedef at::Half fp16;
typedef float fp32;
template <typename F>
__global__ void kernel_forward(const int B, const int T, const int C, const int H, float *__restrict__ _state,
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u,
F *__restrict__ const _y)
{
const int b = blockIdx.x / H;
const int h = blockIdx.x % H;
const int i = threadIdx.x;
_w += h*_N_;
_u += h*_N_;
_state += h*_N_*_N_ + i*_N_; // wrong if B > 1 !!!
__shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
float state[_N_];
#pragma unroll
for (int j = 0; j < _N_; j++)
state[j] = _state[j];
__syncthreads();
u[i] = float(_u[i]);
w[i] = _w[i];
__syncthreads();
for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
{
__syncthreads();
r[i] = float(_r[t]);
k[i] = float(_k[t]);
__syncthreads();
const float v = float(_v[t]);
float y = 0;
#pragma unroll
for (int j = 0; j < _N_; j+=4)
{
const float4& r_ = (float4&)(r[j]);
const float4& k_ = (float4&)(k[j]);
const float4& w_ = (float4&)(w[j]);
const float4& u_ = (float4&)(u[j]);
float4& s = (float4&)(state[j]);
float4 x;
x.x = k_.x * v;
x.y = k_.y * v;
x.z = k_.z * v;
x.w = k_.w * v;
y += r_.x * (u_.x * x.x + s.x);
y += r_.y * (u_.y * x.y + s.y);
y += r_.z * (u_.z * x.z + s.z);
y += r_.w * (u_.w * x.w + s.w);
s.x = s.x * w_.x + x.x;
s.y = s.y * w_.y + x.y;
s.z = s.z * w_.z + x.z;
s.w = s.w * w_.w + x.w;
}
_y[t] = F(y);
}
#pragma unroll
for (int j = 0; j < _N_; j++)
_state[j] = state[j];
}
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}

View File

@ -1,34 +0,0 @@
#include <torch/extension.h>
#include "ATen/ATen.h"
#include <c10/cuda/CUDAGuard.h>
typedef at::BFloat16 bf16;
typedef at::Half fp16;
typedef float fp32;
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y);
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y);
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y);
void forward_bf16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_bf16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
}
void forward_fp16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_fp16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp16>(), k.data_ptr<fp16>(), v.data_ptr<fp16>(), w.data_ptr<float>(), u.data_ptr<fp16>(), y.data_ptr<fp16>());
}
void forward_fp32(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_fp32(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp32>(), k.data_ptr<fp32>(), v.data_ptr<fp32>(), w.data_ptr<float>(), u.data_ptr<fp32>(), y.data_ptr<fp32>());
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward_bf16", &forward_bf16, "rwkv5 forward_bf16");
m.def("forward_fp16", &forward_fp16, "rwkv5 forward_fp16");
m.def("forward_fp32", &forward_fp32, "rwkv5 forward_fp32");
}
TORCH_LIBRARY(rwkv5, m) {
m.def("forward_bf16", forward_bf16);
m.def("forward_fp16", forward_fp16);
m.def("forward_fp32", forward_fp32);
}

View File

@ -1,87 +0,0 @@
#include <stdio.h>
#include <assert.h>
#include "ATen/ATen.h"
typedef at::BFloat16 bf16;
typedef at::Half fp16;
typedef float fp32;
template <typename F>
__global__ void kernel_forward(const int B, const int T, const int C, const int H, float *__restrict__ _state,
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u,
F *__restrict__ const _y)
{
const int b = blockIdx.x / H;
const int h = blockIdx.x % H;
const int i = threadIdx.x;
_u += h*_N_;
_state += h*_N_*_N_ + i*_N_; // wrong if B > 1 !!!
__shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
float state[_N_];
#pragma unroll
for (int j = 0; j < _N_; j++)
state[j] = _state[j];
__syncthreads();
u[i] = float(_u[i]);
__syncthreads();
for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
{
__syncthreads();
w[i] = _w[t];
r[i] = float(_r[t]);
k[i] = float(_k[t]);
__syncthreads();
const float v = float(_v[t]);
float y = 0;
#pragma unroll
for (int j = 0; j < _N_; j+=4)
{
const float4& r_ = (float4&)(r[j]);
const float4& k_ = (float4&)(k[j]);
const float4& w_ = (float4&)(w[j]);
const float4& u_ = (float4&)(u[j]);
float4& s = (float4&)(state[j]);
float4 x;
x.x = k_.x * v;
x.y = k_.y * v;
x.z = k_.z * v;
x.w = k_.w * v;
y += r_.x * (u_.x * x.x + s.x);
y += r_.y * (u_.y * x.y + s.y);
y += r_.z * (u_.z * x.z + s.z);
y += r_.w * (u_.w * x.w + s.w);
s.x = s.x * w_.x + x.x;
s.y = s.y * w_.y + x.y;
s.z = s.z * w_.z + x.z;
s.w = s.w * w_.w + x.w;
}
_y[t] = F(y);
}
#pragma unroll
for (int j = 0; j < _N_; j++)
_state[j] = state[j];
}
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}

View File

@ -1,34 +0,0 @@
#include <torch/extension.h>
#include "ATen/ATen.h"
#include <c10/cuda/CUDAGuard.h>
typedef at::BFloat16 bf16;
typedef at::Half fp16;
typedef float fp32;
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y);
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y);
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y);
void forward_bf16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_bf16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
}
void forward_fp16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_fp16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp16>(), k.data_ptr<fp16>(), v.data_ptr<fp16>(), w.data_ptr<float>(), u.data_ptr<fp16>(), y.data_ptr<fp16>());
}
void forward_fp32(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_fp32(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp32>(), k.data_ptr<fp32>(), v.data_ptr<fp32>(), w.data_ptr<float>(), u.data_ptr<fp32>(), y.data_ptr<fp32>());
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward_bf16", &forward_bf16, "rwkv6 forward_bf16");
m.def("forward_fp16", &forward_fp16, "rwkv6 forward_fp16");
m.def("forward_fp32", &forward_fp32, "rwkv6 forward_fp32");
}
TORCH_LIBRARY(rwkv6, m) {
m.def("forward_bf16", forward_bf16);
m.def("forward_fp16", forward_fp16);
m.def("forward_fp32", forward_fp32);
}

View File

@ -1,141 +0,0 @@
#include <torch/extension.h>
#include "ATen/ATen.h"
#include <iostream>
#include <c10/cuda/CUDAGuard.h>
typedef at::Half fp16;
template <typename F>
void cuda_wkv_forward(int B, int T, int C,
float *w, float *u, F *k, F *v, F *y,
float *aa, float *bb, float *pp);
template <typename F>
void cuda_mm8_seq(int B, int N, int M,
F *x, int x_stride,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
F *y, int y_stride);
template <typename F>
void cuda_mm8_one(int N, int M,
F *x,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
float *y);
void wkv_forward(int64_t B, int64_t T, int64_t C,
torch::Tensor &w, torch::Tensor &u,
torch::Tensor &k, torch::Tensor &v, torch::Tensor &y,
torch::Tensor &aa, torch::Tensor &bb, torch::Tensor &pp) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (k.scalar_type()) {
case c10::ScalarType::Half:
cuda_wkv_forward(B, T, C,
w.data_ptr<float>(), u.data_ptr<float>(),
k.data_ptr<fp16>(), v.data_ptr<fp16>(), y.data_ptr<fp16>(),
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
break;
case c10::ScalarType::Float:
cuda_wkv_forward(B, T, C,
w.data_ptr<float>(), u.data_ptr<float>(),
k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>(),
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
void mm8_seq(int64_t B, int64_t N, int64_t M,
torch::Tensor &x, torch::Tensor &w,
torch::Tensor &mx, torch::Tensor &rx,
torch::Tensor &my, torch::Tensor &ry,
torch::Tensor &y) {
assert(x.stride(1) == 1);
assert(w.stride(1) == 1);
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
assert(my.stride(0) == 1 && ry.stride(0) == 1);
assert(y.stride(1) == 1);
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (x.scalar_type()) {
case c10::ScalarType::Half:
cuda_mm8_seq(
B, N, M,
x.data_ptr<fp16>(), x.stride(0),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
y.data_ptr<fp16>(), y.stride(0));
break;
case c10::ScalarType::Float:
cuda_mm8_seq(
B, N, M,
x.data_ptr<float>(), x.stride(0),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<float>(), rx.data_ptr<float>(),
my.data_ptr<float>(), ry.data_ptr<float>(),
y.data_ptr<float>(), y.stride(0));
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
void mm8_one(int64_t N, int64_t M,
torch::Tensor &x, torch::Tensor &w,
torch::Tensor &mx, torch::Tensor &rx,
torch::Tensor &my, torch::Tensor &ry,
torch::Tensor &y) {
assert(x.stride(0) == 1);
assert(w.stride(1) == 1);
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
assert(my.stride(0) == 1 && ry.stride(0) == 1);
assert(y.stride(0) == 1);
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (x.scalar_type()) {
case c10::ScalarType::Half:
cuda_mm8_one(
N, M,
x.data_ptr<fp16>(),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
y.data_ptr<float>());
break;
case c10::ScalarType::Float:
cuda_mm8_one(
N, M,
x.data_ptr<float>(),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<float>(), rx.data_ptr<float>(),
my.data_ptr<float>(), ry.data_ptr<float>(),
y.data_ptr<float>());
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
using torch::Tensor;
#ifndef DISABLE_CUBLAS_GEMM
void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
#endif
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("wkv_forward", &wkv_forward, "wkv forward");
m.def("mm8_seq", &mm8_seq, "mm8 seq");
m.def("mm8_one", &mm8_one, "mm8 one");
#ifndef DISABLE_CUBLAS_GEMM
m.def("gemm_fp16_cublas", &gemm_fp16_cublas, "gemv fp16 cublas");
#endif
}
TORCH_LIBRARY(rwkv, m) {
m.def("wkv_forward", wkv_forward);
m.def("mm8_seq", mm8_seq);
m.def("mm8_one", mm8_one);
#ifndef DISABLE_CUBLAS_GEMM
m.def("gemm_fp16_cublas", gemm_fp16_cublas);
#endif
}

File diff suppressed because it is too large Load Diff

Binary file not shown.

Binary file not shown.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -16,7 +16,6 @@ class PIPELINE_ARGS:
top_k=0,
alpha_frequency=0.2,
alpha_presence=0.2,
alpha_decay=0.996,
token_ban=[],
token_stop=[],
chunk_len=256,
@ -26,7 +25,6 @@ class PIPELINE_ARGS:
self.top_k = top_k
self.alpha_frequency = alpha_frequency # Frequency Penalty (as in GPT-3)
self.alpha_presence = alpha_presence # Presence Penalty (as in GPT-3)
self.alpha_decay = alpha_decay # gradually decay the penalty
self.token_ban = token_ban # ban the generation of some tokens
self.token_stop = token_stop # stop generation whenever you see any token here
self.chunk_len = (
@ -34,27 +32,8 @@ class PIPELINE_ARGS:
)
class ABC_TOKENIZER:
def __init__(self):
self.pad_token_id = 0
self.bos_token_id = 2
self.eos_token_id = 3
def encode(self, text):
ids = [ord(c) for c in text]
return ids
def decode(self, ids):
txt = "".join(
chr(idx) if idx > self.eos_token_id else ""
for idx in ids
if idx != self.eos_token_id
)
return txt
class PIPELINE:
def __init__(self, model, WORD_NAME: str):
def __init__(self, model, WORD_NAME):
self.model = model
if WORD_NAME == "cl100k_base":
import tiktoken
@ -67,18 +46,10 @@ class PIPELINE:
self.tokenizer = TRIE_TOKENIZER(
os.path.dirname(os.path.abspath(__file__)) + "/rwkv_vocab_v20230424.txt"
)
elif WORD_NAME == "abc_tokenizer":
self.tokenizer = ABC_TOKENIZER()
else:
if WORD_NAME.endswith(".txt"):
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from rwkv_tokenizer import TRIE_TOKENIZER
from tokenizers import Tokenizer
self.tokenizer = TRIE_TOKENIZER(WORD_NAME)
else:
from tokenizers import Tokenizer
self.tokenizer = Tokenizer.from_file(WORD_NAME)
self.tokenizer = Tokenizer.from_file(WORD_NAME)
def refine_context(self, context):
context = context.strip().split("\n")
@ -99,28 +70,15 @@ class PIPELINE:
def decode(self, x):
return self.tokenizer.decode(x)
def np_softmax(self, x: np.ndarray, axis: int):
x -= x.max(axis=axis, keepdims=True)
e: np.ndarray = np.exp(x)
return e / e.sum(axis=axis, keepdims=True)
def sample_logits(self, logits, temperature=1.0, top_p=0.85, top_k=0):
if type(logits) == list:
logits = np.array(logits)
np_logits = type(logits) == np.ndarray
if np_logits:
probs = self.np_softmax(logits, axis=-1)
else:
probs = F.softmax(logits.float(), dim=-1)
probs = F.softmax(logits.float(), dim=-1)
top_k = int(top_k)
# 'privateuseone' is the type of custom devices like `torch_directml.device()`
if np_logits or probs.device.type in ["cpu", "privateuseone"]:
if not np_logits:
probs = probs.cpu().numpy()
if probs.device == torch.device("cpu"):
probs = probs.numpy()
sorted_ids = np.argsort(probs)
sorted_probs = probs[sorted_ids][::-1]
cumulative_probs = np.cumsum(sorted_probs)
cutoff = float(sorted_probs[np.argmax(cumulative_probs >= top_p)])
cutoff = float(sorted_probs[np.argmax(cumulative_probs > top_p)])
probs[probs < cutoff] = 0
if top_k < len(probs) and top_k > 0:
probs[sorted_ids[:-top_k]] = 0
@ -134,7 +92,7 @@ class PIPELINE:
sorted_probs = probs[sorted_ids]
sorted_probs = torch.flip(sorted_probs, dims=(0,))
cumulative_probs = torch.cumsum(sorted_probs, dim=-1).cpu().numpy()
cutoff = float(sorted_probs[np.argmax(cumulative_probs >= top_p)])
cutoff = float(sorted_probs[np.argmax(cumulative_probs > top_p)])
probs[probs < cutoff] = 0
if top_k < len(probs) and top_k > 0:
probs[sorted_ids[:-top_k]] = 0
@ -169,20 +127,10 @@ class PIPELINE:
if token in args.token_stop:
break
all_tokens += [token]
for xxx in occurrence:
occurrence[xxx] *= args.alpha_decay
ttt = self.decode([token])
www = 1
if ttt in " \t0123456789":
www = 0
# elif ttt in '\r\n,.;?!"\':+-*/=#@$%^&_`~|<>\\()[]{},。;“”:?!()【】':
# www = 0.5
if token not in occurrence:
occurrence[token] = www
occurrence[token] = 1
else:
occurrence[token] += www
# print(occurrence) # debug
occurrence[token] += 1
# output
tmp = self.decode(all_tokens[out_last:])

View File

@ -1,50 +0,0 @@
from typing import Any, List, Union
try:
import web_rwkv_py as wrp
except ModuleNotFoundError:
try:
from . import web_rwkv_py as wrp
except ImportError:
raise ModuleNotFoundError(
"web_rwkv_py not found, install it from https://github.com/cryscan/web-rwkv-py"
)
class RWKV:
def __init__(self, model_path: str, strategy: str = None):
layer = (
int(s.lstrip("layer"))
for s in strategy.split()
for s in s.split(",")
if s.startswith("layer")
)
chunk_size = (
int(s.lstrip("chunk"))
for s in strategy.split()
for s in s.split(",")
if s.startswith("chunk")
)
self.token_chunk_size = next(chunk_size, 32)
args = {
"path": model_path,
"quant": next(layer, 31) if "i8" in strategy else 0,
"quant_nf4": next(layer, 26) if "i4" in strategy else 0,
}
self.model = wrp.Model(**args)
self.info = self.model.info()
self.w = {} # fake weight
self.w["emb.weight"] = [0] * self.info.num_vocab
self.version = str(self.info.version).lower()
self.version = float(self.version.lower().replace("v", ""))
def forward(self, tokens: List[int], state: Union[Any, None] = None):
if state is None:
self.model.clear_state()
elif type(state).__name__ == "State_Cpu":
self.model.load_state(state)
logits = self.model.run(tokens, self.token_chunk_size)
ret_state = "State_Gpu"
return logits, ret_state

Binary file not shown.

View File

@ -2,35 +2,24 @@ import json
import logging
from typing import Any
from fastapi import Request
from pydantic import BaseModel
from enum import Enum
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(levelname)s\n%(message)s")
fh = logging.handlers.RotatingFileHandler(
"api.log", mode="a", maxBytes=3 * 1024 * 1024, backupCount=3, encoding="utf-8"
"api.log", mode="a", maxBytes=3 * 1024 * 1024, backupCount=3
)
fh.setFormatter(formatter)
logger.addHandler(fh)
class ClsEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, BaseModel):
return obj.dict()
if isinstance(obj, Enum):
return obj.value
return super().default(obj)
def quick_log(request: Request, body: Any, response: str):
try:
logger.info(
f"Client: {request.client if request else ''}\nUrl: {request.url if request else ''}\n"
+ (
f"Body: {json.dumps(body.__dict__, ensure_ascii=False, cls=ClsEncoder)}\n"
f"Body: {json.dumps(body.__dict__, default=vars, ensure_ascii=False)}\n"
if body
else ""
)

View File

@ -52,8 +52,6 @@ class VocabConfig:
bin_name_to_program_name: Dict[str, str]
# Mapping from program number to instrument name.
instrument_names: Dict[str, str]
# Manual override for velocity bins. Each element is the max velocity value for that bin by index.
velocity_bins_override: Optional[List[int]] = None
def __post_init__(self):
self.validate()
@ -118,12 +116,6 @@ class VocabConfig:
raise ValueError("velocity_bins must be at least 2")
if len(self.bin_instrument_names) > 16:
raise ValueError("bin_instruments must have at most 16 values")
if self.velocity_bins_override:
print("VocabConfig is using velocity_bins_override. Ignoring velocity_exp.")
if len(self.velocity_bins_override) != self.velocity_bins:
raise ValueError(
"velocity_bins_override must have same length as velocity_bins"
)
if (
self.ch10_instrument_bin_name
and self.ch10_instrument_bin_name not in self.bin_instrument_names
@ -164,11 +156,6 @@ class VocabUtils:
def velocity_to_bin(self, velocity: float) -> int:
velocity = max(0, min(velocity, self.cfg.velocity_events - 1))
if self.cfg.velocity_bins_override:
for i, v in enumerate(self.cfg.velocity_bins_override):
if velocity <= v:
return i
return 0
binsize = self.cfg.velocity_events / (self.cfg.velocity_bins - 1)
if self.cfg.velocity_exp == 1.0:
return ceil(velocity / binsize)
@ -189,8 +176,6 @@ class VocabUtils:
)
def bin_to_velocity(self, bin: int) -> int:
if self.cfg.velocity_bins_override:
return self.cfg.velocity_bins_override[bin]
binsize = self.cfg.velocity_events / (self.cfg.velocity_bins - 1)
if self.cfg.velocity_exp == 1.0:
return max(0, ceil(bin * binsize - 1))
@ -373,32 +358,13 @@ class AugmentConfig:
)
@dataclass
class FilterConfig:
# Whether to filter out MIDI files with duplicate MD5 hashes.
deduplicate_md5: bool
# Minimum time delay between notes in a file before splitting into multiple documents.
piece_split_delay: float
# Minimum length of a piece in milliseconds.
min_piece_length: float
@classmethod
def from_json(cls, path: str):
with open(path, "r") as f:
config = json.load(f)
return cls(**config)
def mix_volume(velocity: int, volume: int, expression: int) -> float:
return velocity * (volume / 127.0) * (expression / 127.0)
def convert_midi_to_str(
cfg: VocabConfig,
filter_cfg: FilterConfig,
mid: mido.MidiFile,
augment: AugmentValues = None,
) -> List[str]:
cfg: VocabConfig, mid: mido.MidiFile, augment: AugmentValues = None
) -> str:
utils = VocabUtils(cfg)
if augment is None:
augment = AugmentValues.default()
@ -424,9 +390,7 @@ def convert_midi_to_str(
} # {channel: {(note, program) -> True}}
started_flag = False
output_list = []
output = ["<start>"]
output_length_ms = 0.0
token_data_buffer: List[
Tuple[int, int, int, float]
] = [] # need to sort notes between wait tokens
@ -468,33 +432,16 @@ def convert_midi_to_str(
token_data_buffer = []
def consume_note_program_data(prog: int, chan: int, note: int, vel: float):
nonlocal output, output_length_ms, started_flag, delta_time_ms, cfg, utils, token_data_buffer
nonlocal output, started_flag, delta_time_ms, cfg, utils, token_data_buffer
is_token_valid = (
utils.prog_data_to_token_data(prog, chan, note, vel) is not None
)
if not is_token_valid:
return
if delta_time_ms > filter_cfg.piece_split_delay * 1000.0:
# check if any notes are still held
silent = True
for channel in channel_notes.keys():
if len(channel_notes[channel]) > 0:
silent = False
break
if silent:
flush_token_data_buffer()
output.append("<end>")
if output_length_ms > filter_cfg.min_piece_length * 1000.0:
output_list.append(" ".join(output))
output = ["<start>"]
output_length_ms = 0.0
started_flag = False
if started_flag:
wait_tokens = utils.data_to_wait_tokens(delta_time_ms)
if len(wait_tokens) > 0:
flush_token_data_buffer()
output_length_ms += delta_time_ms
output += wait_tokens
delta_time_ms = 0.0
token_data_buffer.append((prog, chan, note, vel * augment.velocity_mod_factor))
@ -563,9 +510,7 @@ def convert_midi_to_str(
flush_token_data_buffer()
output.append("<end>")
if output_length_ms > filter_cfg.min_piece_length * 1000.0:
output_list.append(" ".join(output))
return output_list
return " ".join(output)
def generate_program_change_messages(cfg: VocabConfig):
@ -688,10 +633,10 @@ def token_to_midi_message(
if utils.cfg.decode_fix_repeated_notes:
if (channel, note) in state.active_notes:
del state.active_notes[(channel, note)]
yield mido.Message(
"note_off", note=note, time=ticks, channel=channel
), state
ticks = 0
yield mido.Message(
"note_off", note=note, time=ticks, channel=channel
), state
ticks = 0
state.active_notes[(channel, note)] = state.total_time
yield mido.Message(
"note_on", note=note, velocity=velocity, time=ticks, channel=channel

View File

@ -1,5 +0,0 @@
{
"deduplicate_md5": true,
"piece_split_delay": 10000,
"min_piece_length": 0
}

View File

@ -1,13 +1,11 @@
import os
import global_var
import sys
def ngrok_connect():
from pyngrok import ngrok, conf
conf.set_default(
conf.PyngrokConfig(ngrok_path="./ngrok.exe" if os.name == "nt" else "./ngrok")
)
conf.set_default(conf.PyngrokConfig(ngrok_path="./ngrok"))
ngrok.set_auth_token(os.environ["ngrok_token"])
http_tunnel = ngrok.connect(global_var.get(global_var.Args).port)
print(f"ngrok url: {http_tunnel.public_url}")
http_tunnel = ngrok.connect(8000 if len(sys.argv) == 1 else int(sys.argv[1]))
print(http_tunnel.public_url)

View File

@ -1,40 +1,34 @@
from abc import ABC, abstractmethod
from enum import Enum, auto
import os
import pathlib
import copy
import re
import time
from typing import Dict, Iterable, List, Tuple, Union, Type, Callable
from typing import Dict, Iterable, List, Tuple
from utils.log import quick_log
from fastapi import HTTPException, status
from fastapi import HTTPException
from pydantic import BaseModel, Field
import numpy as np
from routes import state_cache
import global_var
END_OF_TEXT = 0
END_OF_LINE_DOUBLE = 535
os.environ["TORCH_EXTENSIONS_DIR"] = f"{pathlib.Path(__file__).parent.parent.resolve()}"
class RWKVType(Enum):
NoneType = auto()
Raven = auto()
World = auto()
Music = auto()
class AbstractRWKV(ABC):
def __init__(self, model, pipeline):
self.EOS_ID = 0
def __init__(self, model: str, strategy: str, tokens_path: str):
from rwkv.model import RWKV as Model # dynamic import to make RWKV_CUDA_ON work
from rwkv_pip.utils import PIPELINE
self.name = "rwkv"
self.model_path = ""
self.version = 4
self.model = model
self.pipeline = pipeline
filename, _ = os.path.splitext(os.path.basename(model))
self.name = filename
self.model = Model(model, strategy)
self.pipeline = PIPELINE(self.model, tokens_path)
self.model_state = None
self.model_tokens = []
self.rwkv_type: RWKVType = RWKVType.NoneType
self.tokenizer_len = len(model.w["emb.weight"])
self.max_tokens_per_generation = 500
self.temperature = 1
@ -42,10 +36,6 @@ class AbstractRWKV(ABC):
self.top_k = 0
self.penalty_alpha_presence = 0
self.penalty_alpha_frequency = 1
self.penalty_decay = 0.99
self.global_penalty = False
self.state_path = ""
self.state_tuned = None
@abstractmethod
def adjust_occurrence(self, occurrence: Dict, token: int):
@ -71,8 +61,6 @@ class AbstractRWKV(ABC):
pass
def get_embedding(self, input: str, fast_mode: bool) -> Tuple[List[float], int]:
import numpy as np
if fast_mode:
embedding, token_len = self.__fast_embedding(
self.fix_tokens(self.pipeline.encode(input)), None
@ -225,10 +213,8 @@ class AbstractRWKV(ABC):
return state[0].tolist(), token_len
def generate(
self, prompt: str, stop: Union[str, List[str], None] = None
self, prompt: str, stop: str | List[str] = None
) -> Iterable[Tuple[str, str, int, int]]:
import numpy as np
quick_log(None, None, "Generation Prompt:\n" + prompt)
cache = None
delta_prompt = prompt
@ -238,30 +224,20 @@ class AbstractRWKV(ABC):
)
except HTTPException:
pass
if cache is None or cache["prompt"] == "" or cache["state"] is None:
if self.state_path:
self.model_state = copy.deepcopy(self.state_tuned)
else:
self.model_state = None
if cache is None or cache["prompt"] == "":
self.model_state = None
self.model_tokens = []
else:
delta_prompt = prompt[len(cache["prompt"]) :]
self.model_state = cache["state"]
self.model_tokens = cache["tokens"]
logits = cache["logits"]
self.model_state = copy.deepcopy(cache["state"])
self.model_tokens = copy.deepcopy(cache["tokens"])
logits = copy.deepcopy(cache["logits"])
prompt_token_len = 0
if delta_prompt != "":
prompt_start_time = time.time()
logits, prompt_token_len = self.run_rnn(
self.fix_tokens(self.pipeline.encode(delta_prompt))
)
prompt_end_time = time.time()
prompt_interval = prompt_end_time - prompt_start_time
tps = 0
if prompt_interval > 0:
tps = prompt_token_len / prompt_interval
print(f"Prompt Prefill TPS: {tps:.2f}", end=" ", flush=True)
try:
state_cache.add_state(
state_cache.AddStateBody(
@ -288,18 +264,7 @@ class AbstractRWKV(ABC):
logits, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k
)
if token == self.EOS_ID:
try:
state_cache.add_state(
state_cache.AddStateBody(
prompt=prompt + response,
tokens=self.model_tokens,
state=self.model_state,
logits=logits,
)
)
except HTTPException:
pass
if token == END_OF_TEXT:
yield response, "", prompt_token_len, completion_token_len
break
@ -330,25 +295,22 @@ class AbstractRWKV(ABC):
yield response, "", prompt_token_len, completion_token_len
break
elif type(stop) == list:
exit_flag = False
for s in stop:
if s in response:
try:
state_cache.add_state(
state_cache.AddStateBody(
prompt=prompt + response,
tokens=self.model_tokens,
state=self.model_state,
logits=logits,
)
stop_exist_regex = "|".join(stop)
matched = re.search(stop_exist_regex, response)
if matched:
try:
state_cache.add_state(
state_cache.AddStateBody(
prompt=prompt + response,
tokens=self.model_tokens,
state=self.model_state,
logits=logits,
)
except HTTPException:
pass
exit_flag = True
response = response.split(s)[0]
yield response, "", prompt_token_len, completion_token_len
break
if exit_flag:
)
except HTTPException:
pass
response = response.split(matched.group())[0]
yield response, "", prompt_token_len, completion_token_len
break
out_last = begin + i + 1
if i == self.max_tokens_per_generation - 1:
@ -367,8 +329,8 @@ class AbstractRWKV(ABC):
class TextRWKV(AbstractRWKV):
def __init__(self, model, pipeline) -> None:
super().__init__(model, pipeline)
def __init__(self, model: str, strategy: str, tokens_path: str) -> None:
super().__init__(model, strategy, tokens_path)
self.CHUNK_LEN = 256
@ -380,35 +342,27 @@ class TextRWKV(AbstractRWKV):
self.penalty_alpha_frequency = 1
self.interface = ":"
if self.tokenizer_len < 65536:
self.rwkv_type = RWKVType.Raven
if "world" in self.name.lower():
self.user = "Question"
self.bot = "Answer"
self.END_OF_LINE = 11
else:
self.user = "Bob"
self.bot = "Alice"
self.END_OF_LINE = 187
else:
self.rwkv_type = RWKVType.World
self.user = "User"
self.bot = "Assistant"
self.END_OF_LINE = 11
self.AVOID_REPEAT_TOKENS = set()
self.AVOID_REPEAT_TOKENS = []
AVOID_REPEAT = ""
for i in AVOID_REPEAT:
dd = self.pipeline.encode(i)
assert len(dd) == 1
self.AVOID_REPEAT_TOKENS.add(dd[0])
self.AVOID_PENALTY_TOKENS = set()
AVOID_PENALTY = '\n,.:?!,。:?!"“”<>[]{}/\\|;~`@#$%^&*()_+-=0123456789 '
for i in AVOID_PENALTY:
dd = self.pipeline.encode(i)
if len(dd) == 1:
self.AVOID_PENALTY_TOKENS.add(dd[0])
self.AVOID_REPEAT_TOKENS += dd
self.__preload()
def adjust_occurrence(self, occurrence: Dict, token: int):
for xxx in occurrence:
occurrence[xxx] *= self.penalty_decay
occurrence[xxx] *= 0.996
if token not in occurrence:
occurrence[token] = 1
else:
@ -416,24 +370,16 @@ class TextRWKV(AbstractRWKV):
def adjust_forward_logits(self, logits: List[float], occurrence: Dict, i: int):
for n in occurrence:
# if n not in self.AVOID_PENALTY_TOKENS:
logits[n] -= (
self.penalty_alpha_presence
+ occurrence[n] * self.penalty_alpha_frequency
)
# set global_penalty to False to get the same generated results as the official RWKV Gradio
if self.global_penalty and i == 0:
for token in self.model_tokens:
token = int(token)
if token not in self.AVOID_PENALTY_TOKENS:
self.adjust_occurrence(occurrence, token)
# Model only saw '\n\n' as [187, 187] before, but the tokenizer outputs [535] for it at the end
def fix_tokens(self, tokens) -> List[int]:
if self.rwkv_type == RWKVType.World:
if "world" in self.name.lower():
return tokens
if len(tokens) > 0 and tokens[-1] == 535:
if len(tokens) > 0 and tokens[-1] == END_OF_LINE_DOUBLE:
tokens = tokens[:-1] + [self.END_OF_LINE, self.END_OF_LINE]
return tokens
@ -471,11 +417,9 @@ The following is a coherent verbose detailed conversation between a girl named {
{bot} likes to tell {user} a lot about herself and her opinions. \
{bot} usually gives {user} kind, helpful and informative advices.\n
"""
if self.rwkv_type == RWKVType.Raven
else (
f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
if self.user == "Bob"
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
logits, _ = self.run_rnn(self.fix_tokens(self.pipeline.encode(preset_system)))
try:
@ -491,17 +435,15 @@ The following is a coherent verbose detailed conversation between a girl named {
pass
class MusicMidiRWKV(AbstractRWKV):
def __init__(self, model, pipeline):
super().__init__(model, pipeline)
class MusicRWKV(AbstractRWKV):
def __init__(self, model: str, strategy: str, tokens_path: str):
super().__init__(model, strategy, tokens_path)
self.max_tokens_per_generation = 500
self.temperature = 1
self.top_p = 0.8
self.top_k = 8
self.rwkv_type = RWKVType.Music
def adjust_occurrence(self, occurrence: Dict, token: int):
for n in occurrence:
occurrence[n] *= 0.997 #### decay repetition penalty
@ -533,266 +475,23 @@ class MusicMidiRWKV(AbstractRWKV):
return " " + delta
class MusicAbcRWKV(AbstractRWKV):
def __init__(self, model, pipeline):
super().__init__(model, pipeline)
self.EOS_ID = 3
self.max_tokens_per_generation = 500
self.temperature = 1
self.top_p = 0.8
self.top_k = 8
self.rwkv_type = RWKVType.Music
def adjust_occurrence(self, occurrence: Dict, token: int):
pass
def adjust_forward_logits(self, logits: List[float], occurrence: Dict, i: int):
pass
def fix_tokens(self, tokens) -> List[int]:
return tokens
def run_rnn(
self, _tokens: List[str], newline_adj: int = 0
) -> Tuple[List[float], int]:
tokens = [int(x) for x in _tokens]
token_len = len(tokens)
self.model_tokens += tokens
out, self.model_state = self.model.forward(tokens, self.model_state)
return out, token_len
def delta_postprocess(self, delta: str) -> str:
return delta
def get_tokenizer(tokenizer_len: int):
tokenizer_dir = f"{pathlib.Path(__file__).parent.parent.resolve()}/rwkv_pip/"
if tokenizer_len < 2176:
return "abc_tokenizer"
if tokenizer_len < 20096:
return tokenizer_dir + "tokenizer-midipiano.json"
if tokenizer_len < 50277:
return tokenizer_dir + "tokenizer-midi.json"
elif tokenizer_len < 65536:
return tokenizer_dir + "20B_tokenizer.json"
else:
return "rwkv_vocab_v20230424"
def get_model_path(model_path: str) -> str:
if os.path.isabs(model_path):
return model_path
working_dir: pathlib.Path = pathlib.Path(os.path.abspath(os.getcwd()))
parent_paths: List[pathlib.Path] = [
working_dir, # [cwd](RWKV-Runner)/models/xxx
working_dir.parent, # [cwd](backend-python)/../models/xxx
pathlib.Path(
os.path.abspath(__file__)
).parent.parent, # backend-python/models/xxx
pathlib.Path(
os.path.abspath(__file__)
).parent.parent.parent, # RWKV-Runner/models/xxx
]
child_paths: List[Callable[[pathlib.Path], pathlib.Path]] = [
lambda p: p / model_path,
lambda p: p / "build" / "bin" / model_path, # for dev
]
for parent_path in parent_paths:
for child_path in child_paths:
full_path: pathlib.Path = child_path(parent_path)
if os.path.isfile(full_path):
return str(full_path)
return model_path
def RWKV(model: str, strategy: str, tokenizer: Union[str, None]) -> AbstractRWKV:
model_path = get_model_path(model)
rwkv_cpp = getattr(global_var.get(global_var.Args), "rwkv.cpp")
webgpu = global_var.get(global_var.Args).webgpu
if "midi" in model_path.lower() or "abc" in model_path.lower():
os.environ["RWKV_RESCALE_LAYER"] = "999"
# dynamic import to make RWKV_CUDA_ON work
if rwkv_cpp:
print("Using rwkv.cpp, strategy is ignored")
from rwkv_pip.cpp.model import (
RWKV as Model,
)
elif webgpu:
print("Using webgpu")
from rwkv_pip.webgpu.model import (
RWKV as Model,
)
else:
from rwkv_pip.model import (
RWKV as Model,
)
from rwkv_pip.utils import PIPELINE
filename, _ = os.path.splitext(os.path.basename(model_path))
model = Model(model_path, strategy)
if not tokenizer:
tokenizer = get_tokenizer(len(model.w["emb.weight"]))
pipeline = PIPELINE(model, tokenizer)
rwkv_map: dict[str, Type[AbstractRWKV]] = {
"20B_tokenizer": TextRWKV,
"rwkv_vocab_v20230424": TextRWKV,
"tokenizer-midi": MusicMidiRWKV,
"tokenizer-midipiano": MusicMidiRWKV,
"abc_tokenizer": MusicAbcRWKV,
}
tokenizer_name = os.path.splitext(os.path.basename(tokenizer))[0]
global_var.set(
global_var.Midi_Vocab_Config_Type,
(
global_var.MidiVocabConfig.Piano
if tokenizer_name == "tokenizer-midipiano"
else global_var.MidiVocabConfig.Default
),
)
rwkv: AbstractRWKV
if tokenizer_name in rwkv_map:
rwkv = rwkv_map[tokenizer_name](model, pipeline)
else:
tokenizer_name = tokenizer_name.lower()
if "music" in tokenizer_name or "midi" in tokenizer_name:
rwkv = MusicMidiRWKV(model, pipeline)
elif "abc" in tokenizer_name:
rwkv = MusicAbcRWKV(model, pipeline)
else:
rwkv = TextRWKV(model, pipeline)
rwkv.name = filename
rwkv.model_path = model_path
rwkv.version = model.version
return rwkv
class ModelConfigBody(BaseModel):
max_tokens: int = Field(default=None, gt=0, le=102400)
temperature: float = Field(default=None, ge=0, le=3)
temperature: float = Field(default=None, ge=0, le=2)
top_p: float = Field(default=None, ge=0, le=1)
presence_penalty: float = Field(default=None, ge=-2, le=2)
frequency_penalty: float = Field(default=None, ge=-2, le=2)
penalty_decay: float = Field(default=None, ge=0.99, le=0.999)
top_k: int = Field(default=None, ge=0, le=25)
global_penalty: bool = Field(
default=None,
description="When generating a response, whether to include the submitted prompt as a penalty factor. By turning this off, you will get the same generated results as official RWKV Gradio. If you find duplicate results in the generated results, turning this on can help avoid generating duplicates.",
)
state: str = Field(default=None, description="state-tuned file path")
model_config = {
"json_schema_extra": {
class Config:
schema_extra = {
"example": {
"max_tokens": 1000,
"temperature": 1,
"top_p": 0.3,
"presence_penalty": 0,
"frequency_penalty": 1,
"penalty_decay": 0.996,
"global_penalty": False,
"state": "",
"temperature": 1.2,
"top_p": 0.5,
"presence_penalty": 0.4,
"frequency_penalty": 0.4,
}
}
}
def load_rwkv_state(
model: AbstractRWKV, state_path: str, print_log: bool = True
) -> HTTPException:
if model:
if state_path:
if model.model_path.endswith(".pth") and state_path.endswith(".pth"):
import torch
state_path = get_model_path(state_path)
if model.state_path == state_path:
return
if not os.path.isfile(state_path):
return HTTPException(
status.HTTP_400_BAD_REQUEST, "state file not found"
)
try:
state_raw = torch.load(state_path, map_location="cpu")
except Exception as e:
print(e)
return HTTPException(
status.HTTP_400_BAD_REQUEST, "state file failed to load"
)
state_raw_shape = next(iter(state_raw.values())).shape
args = model.model.args
if (
len(state_raw) != args.n_layer
or state_raw_shape[0] * state_raw_shape[1] != args.n_embd
):
if model.state_path:
pass
elif print_log:
print("state failed to load")
return HTTPException(
status.HTTP_400_BAD_REQUEST, "state shape mismatch"
)
strategy = model.model.strategy
model.state_tuned = [None] * args.n_layer * 3
for i in range(args.n_layer):
dd = strategy[i]
dev = dd.device
atype = dd.atype
model.state_tuned[i * 3 + 0] = torch.zeros(
args.n_embd, dtype=atype, requires_grad=False, device=dev
).contiguous()
model.state_tuned[i * 3 + 1] = (
state_raw[f"blocks.{i}.att.time_state"]
.transpose(1, 2)
.to(dtype=torch.float, device=dev)
.requires_grad_(False)
.contiguous()
)
model.state_tuned[i * 3 + 2] = torch.zeros(
args.n_embd, dtype=atype, requires_grad=False, device=dev
).contiguous()
state_cache.force_reset_state()
model.state_path = state_path
if print_log:
print("state loaded")
else:
if model.state_path:
pass
elif print_log:
print("state failed to load")
return HTTPException(
status.HTTP_400_BAD_REQUEST,
"file format of the model or state model not supported",
)
else:
if state_path == "" and model.state_path != "":
state_cache.force_reset_state()
model.state_path = ""
model.state_tuned = None # TODO cached
if print_log:
print("state unloaded")
else:
if print_log:
print("state not loaded")
def set_rwkv_config(model: AbstractRWKV, body: ModelConfigBody):
@ -809,14 +508,6 @@ def set_rwkv_config(model: AbstractRWKV, body: ModelConfigBody):
model.penalty_alpha_presence = body.presence_penalty
if body.frequency_penalty is not None:
model.penalty_alpha_frequency = body.frequency_penalty
if body.penalty_decay is not None:
model.penalty_decay = body.penalty_decay
if body.top_k is not None:
model.top_k = body.top_k
if body.global_penalty is not None:
model.global_penalty = body.global_penalty
if body.state is not None:
load_rwkv_state(model, body.state, False)
def get_rwkv_config(model: AbstractRWKV) -> ModelConfigBody:
@ -826,8 +517,4 @@ def get_rwkv_config(model: AbstractRWKV) -> ModelConfigBody:
top_p=model.top_p,
presence_penalty=model.penalty_alpha_presence,
frequency_penalty=model.penalty_alpha_frequency,
penalty_decay=model.penalty_decay,
top_k=model.top_k,
global_penalty=model.global_penalty,
state=model.state_path,
)

View File

@ -19,12 +19,9 @@ def set_torch():
def torch_gc():
try:
import torch
import torch
if torch.cuda.is_available():
with torch.cuda.device(0):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
except:
pass # prevent 'torch' has no attribute 'cuda' error, so user can use CPU or WebGPU
if torch.cuda.is_available():
with torch.cuda.device(0):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()

View File

@ -1,279 +0,0 @@
{
"note_events": 128,
"wait_events": 125,
"max_wait_time": 1000,
"velocity_events": 128,
"velocity_bins": 16,
"velocity_exp": 0.33,
"do_token_sorting": true,
"unrolled_tokens": false,
"decode_end_held_note_delay": 5.0,
"decode_fix_repeated_notes": true,
"bin_instrument_names": [
"piano"
],
"ch10_instrument_bin_name": "",
"program_name_to_bin_name": {
"Acoustic Grand Piano": "piano",
"Bright Acoustic Piano": "piano",
"Electric Grand Piano": "piano",
"Honky-tonk Piano": "piano",
"Electric Piano 1 (Rhodes Piano)": "piano",
"Electric Piano 2 (Chorused Piano)": "piano",
"Harpsichord": "piano",
"Clavinet": "piano",
"Celesta": "",
"Glockenspiel": "",
"Music Box": "",
"Vibraphone": "",
"Marimba": "",
"Xylophone": "",
"Tubular Bells": "",
"Dulcimer (Santur)": "",
"Drawbar Organ (Hammond)": "",
"Percussive Organ": "piano",
"Rock Organ": "piano",
"Church Organ": "piano",
"Reed Organ": "piano",
"Accordion (French)": "piano",
"Harmonica": "piano",
"Tango Accordion (Band neon)": "piano",
"Acoustic Guitar (nylon)": "",
"Acoustic Guitar (steel)": "",
"Electric Guitar (jazz)": "",
"Electric Guitar (clean)": "",
"Electric Guitar (muted)": "",
"Overdriven Guitar": "",
"Distortion Guitar": "",
"Guitar harmonics": "",
"Acoustic Bass": "",
"Electric Bass (fingered)": "",
"Electric Bass (picked)": "",
"Fretless Bass": "",
"Slap Bass 1": "",
"Slap Bass 2": "",
"Synth Bass 1": "",
"Synth Bass 2": "",
"Violin": "",
"Viola": "",
"Cello": "",
"Contrabass": "",
"Tremolo Strings": "",
"Pizzicato Strings": "",
"Orchestral Harp": "",
"Timpani": "",
"String Ensemble 1 (strings)": "",
"String Ensemble 2 (slow strings)": "",
"SynthStrings 1": "",
"SynthStrings 2": "",
"Choir Aahs": "",
"Voice Oohs": "",
"Synth Voice": "",
"Orchestra Hit": "",
"Trumpet": "",
"Trombone": "",
"Tuba": "",
"Muted Trumpet": "",
"French Horn": "",
"Brass Section": "",
"SynthBrass 1": "",
"SynthBrass 2": "",
"Soprano Sax": "",
"Alto Sax": "",
"Tenor Sax": "",
"Baritone Sax": "",
"Oboe": "",
"English Horn": "",
"Bassoon": "",
"Clarinet": "",
"Piccolo": "",
"Flute": "",
"Recorder": "",
"Pan Flute": "",
"Blown Bottle": "",
"Shakuhachi": "",
"Whistle": "",
"Ocarina": "",
"Lead 1 (square wave)": "",
"Lead 2 (sawtooth wave)": "",
"Lead 3 (calliope)": "",
"Lead 4 (chiffer)": "",
"Lead 5 (charang)": "",
"Lead 6 (voice solo)": "",
"Lead 7 (fifths)": "",
"Lead 8 (bass + lead)": "",
"Pad 1 (new age Fantasia)": "",
"Pad 2 (warm)": "",
"Pad 3 (polysynth)": "",
"Pad 4 (choir space voice)": "",
"Pad 5 (bowed glass)": "",
"Pad 6 (metallic pro)": "",
"Pad 7 (halo)": "",
"Pad 8 (sweep)": "",
"FX 1 (rain)": "",
"FX 2 (soundtrack)": "",
"FX 3 (crystal)": "",
"FX 4 (atmosphere)": "",
"FX 5 (brightness)": "",
"FX 6 (goblins)": "",
"FX 7 (echoes, drops)": "",
"FX 8 (sci-fi, star theme)": "",
"Sitar": "",
"Banjo": "",
"Shamisen": "",
"Koto": "",
"Kalimba": "",
"Bag pipe": "",
"Fiddle": "",
"Shanai": "",
"Tinkle Bell": "",
"Agogo": "",
"Steel Drums": "",
"Woodblock": "",
"Taiko Drum": "",
"Melodic Tom": "",
"Synth Drum": "",
"Reverse Cymbal": "",
"Guitar Fret Noise": "",
"Breath Noise": "",
"Seashore": "",
"Bird Tweet": "",
"Telephone Ring": "",
"Helicopter": "",
"Applause": "",
"Gunshot": ""
},
"bin_name_to_program_name": {
"piano": "Acoustic Grand Piano"
},
"instrument_names": {
"0": "Acoustic Grand Piano",
"1": "Bright Acoustic Piano",
"2": "Electric Grand Piano",
"3": "Honky-tonk Piano",
"4": "Electric Piano 1 (Rhodes Piano)",
"5": "Electric Piano 2 (Chorused Piano)",
"6": "Harpsichord",
"7": "Clavinet",
"8": "Celesta",
"9": "Glockenspiel",
"10": "Music Box",
"11": "Vibraphone",
"12": "Marimba",
"13": "Xylophone",
"14": "Tubular Bells",
"15": "Dulcimer (Santur)",
"16": "Drawbar Organ (Hammond)",
"17": "Percussive Organ",
"18": "Rock Organ",
"19": "Church Organ",
"20": "Reed Organ",
"21": "Accordion (French)",
"22": "Harmonica",
"23": "Tango Accordion (Band neon)",
"24": "Acoustic Guitar (nylon)",
"25": "Acoustic Guitar (steel)",
"26": "Electric Guitar (jazz)",
"27": "Electric Guitar (clean)",
"28": "Electric Guitar (muted)",
"29": "Overdriven Guitar",
"30": "Distortion Guitar",
"31": "Guitar harmonics",
"32": "Acoustic Bass",
"33": "Electric Bass (fingered)",
"34": "Electric Bass (picked)",
"35": "Fretless Bass",
"36": "Slap Bass 1",
"37": "Slap Bass 2",
"38": "Synth Bass 1",
"39": "Synth Bass 2",
"40": "Violin",
"41": "Viola",
"42": "Cello",
"43": "Contrabass",
"44": "Tremolo Strings",
"45": "Pizzicato Strings",
"46": "Orchestral Harp",
"47": "Timpani",
"48": "String Ensemble 1 (strings)",
"49": "String Ensemble 2 (slow strings)",
"50": "SynthStrings 1",
"51": "SynthStrings 2",
"52": "Choir Aahs",
"53": "Voice Oohs",
"54": "Synth Voice",
"55": "Orchestra Hit",
"56": "Trumpet",
"57": "Trombone",
"58": "Tuba",
"59": "Muted Trumpet",
"60": "French Horn",
"61": "Brass Section",
"62": "SynthBrass 1",
"63": "SynthBrass 2",
"64": "Soprano Sax",
"65": "Alto Sax",
"66": "Tenor Sax",
"67": "Baritone Sax",
"68": "Oboe",
"69": "English Horn",
"70": "Bassoon",
"71": "Clarinet",
"72": "Piccolo",
"73": "Flute",
"74": "Recorder",
"75": "Pan Flute",
"76": "Blown Bottle",
"77": "Shakuhachi",
"78": "Whistle",
"79": "Ocarina",
"80": "Lead 1 (square wave)",
"81": "Lead 2 (sawtooth wave)",
"82": "Lead 3 (calliope)",
"83": "Lead 4 (chiffer)",
"84": "Lead 5 (charang)",
"85": "Lead 6 (voice solo)",
"86": "Lead 7 (fifths)",
"87": "Lead 8 (bass + lead)",
"88": "Pad 1 (new age Fantasia)",
"89": "Pad 2 (warm)",
"90": "Pad 3 (polysynth)",
"91": "Pad 4 (choir space voice)",
"92": "Pad 5 (bowed glass)",
"93": "Pad 6 (metallic pro)",
"94": "Pad 7 (halo)",
"95": "Pad 8 (sweep)",
"96": "FX 1 (rain)",
"97": "FX 2 (soundtrack)",
"98": "FX 3 (crystal)",
"99": "FX 4 (atmosphere)",
"100": "FX 5 (brightness)",
"101": "FX 6 (goblins)",
"102": "FX 7 (echoes, drops)",
"103": "FX 8 (sci-fi, star theme)",
"104": "Sitar",
"105": "Banjo",
"106": "Shamisen",
"107": "Koto",
"108": "Kalimba",
"109": "Bag pipe",
"110": "Fiddle",
"111": "Shanai",
"112": "Tinkle Bell",
"113": "Agogo",
"114": "Steel Drums",
"115": "Woodblock",
"116": "Taiko Drum",
"117": "Melodic Tom",
"118": "Synth Drum",
"119": "Reverse Cymbal",
"120": "Guitar Fret Noise",
"121": "Breath Noise",
"122": "Seashore",
"123": "Bird Tweet",
"124": "Telephone Ring",
"125": "Helicopter",
"126": "Applause",
"127": "Gunshot"
}
}

View File

@ -1,14 +0,0 @@
from fastapi import FastAPI
from fastapi.middleware.gzip import GZipMiddleware
from fastapi.staticfiles import StaticFiles
import uvicorn
webui_server = FastAPI()
webui_server.add_middleware(GZipMiddleware, minimum_size=1000)
webui_server.mount(
"/", StaticFiles(directory="frontend/dist", html=True), name="static"
)
if __name__ == "__main__":
uvicorn.run("webui_server:webui_server")

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,734 @@
########################################################################################################
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
########################################################################################################
import types, gc, os, time, re
import torch
from torch.nn import functional as F
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.allow_tf32 = True
torch.backends.cuda.matmul.allow_tf32 = True
current_path = os.path.dirname(os.path.abspath(__file__))
# https://zhuanlan.zhihu.com/p/612879065
def LoadPreCompileLibrary(file):
import importlib
import os
import torch
# load the custom_op_library and register the custom ops
lib_dir = os.path.dirname(__file__)
if os.name == "nt":
# Register the main torchvision library location on the default DLL path
import ctypes
import sys
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
prev_error_mode = kernel32.SetErrorMode(0x0001)
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
if sys.version_info >= (3, 8):
os.add_dll_directory(lib_dir)
elif with_load_library_flags:
res = kernel32.AddDllDirectory(lib_dir)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += f' Error adding "{lib_dir}" to the DLL directories.'
raise ValueError(err)
kernel32.SetErrorMode(prev_error_mode)
loader_details = (
importlib.machinery.ExtensionFileLoader,
importlib.machinery.EXTENSION_SUFFIXES,
)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
ext_specs = extfinder.find_spec(file)
if ext_specs is None:
return False
try:
torch.ops.load_library(ext_specs.origin)
except OSError as exc:
return False
return True
########################################################################################################
if os.environ.get('RWKV_JIT_ON') != '0':
os.environ["RWKV_JIT_ON"] = '1'
MyModule = torch.jit.ScriptModule
MyFunction = torch.jit.script_method
MyStatic = torch.jit.script
else:
MyModule = torch.nn.Module
def __nop(ob):
return ob
MyFunction = __nop
MyStatic = __nop
if os.environ.get('RWKV_CUDA_ON') == '1':
if LoadPreCompileLibrary('wkv_cuda') is False:
from torch.utils.cpp_extension import load
load(
name=f"wkv_cuda",
sources=[f"{current_path}/cuda/wrapper.cpp", f"{current_path}/cuda/operators.cu"],
verbose=True,
extra_cuda_cflags=["-t 4", "-std=c++17", "--use_fast_math", "-O3", "--extra-device-vectorization"],
is_python_module=False)
@MyStatic
def cuda_wkv(T: int, C: int, w, u, k, v, aa, bb, pp):
assert 1 * C % min(C, 32) == 0
assert k.dtype == v.dtype == torch.float16 or k.dtype == v.dtype == torch.float32
assert w.dtype == u.dtype == aa.dtype == bb.dtype == pp.dtype == torch.float32
w = w.contiguous()
u = u.contiguous()
k = k.contiguous()
v = v.contiguous()
y = torch.empty((T, C), device=w.device, memory_format=torch.contiguous_format, dtype=k.dtype)
torch.ops.rwkv.wkv_forward(1, T, C, w, u, k, v, y, aa, bb, pp)
return y, aa, bb, pp
@MyStatic
def cuda_mm8_seq(B: int, N: int, M: int, x, w, mx, rx, my, ry):
assert x.dtype == mx.dtype == rx.dtype == my.dtype == ry.dtype
assert x.dtype == torch.float32 or x.dtype == torch.float16
assert w.dtype == torch.uint8
assert x.shape == [B, N]
assert w.shape == [N, M]
assert rx.shape == mx.shape == [M]
assert ry.shape == my.shape == [N, 1]
y = torch.empty((B, M), device=w.device, dtype=x.dtype)
torch.ops.rwkv.mm8_seq(B, N, M, x, w, mx, rx, my, ry, y)
return y
@MyStatic
def cuda_mm8_one(N: int, M: int, x, w, mx, rx, my, ry):
assert x.dtype == mx.dtype == rx.dtype == my.dtype == ry.dtype
assert x.dtype == torch.float32 or x.dtype == torch.float16
assert w.dtype == torch.uint8
assert x.shape == [N]
assert w.shape == [N, M]
assert rx.shape == mx.shape == [M]
assert ry.shape == my.shape == [N, 1]
y = torch.zeros((M,), device=w.device, dtype=torch.float32)
torch.ops.rwkv.mm8_one(N, M, x, w, mx, rx, my, ry, y)
return y.to(dtype=x.dtype)
else:
os.environ["RWKV_CUDA_ON"] = '0'
########################################################################################################
class RWKV(MyModule):
def __init__(self, model, strategy, verbose = True, convert_and_save_and_exit = None):
super().__init__()
if verbose:
prxxx = lambda *args, **kwargs: print(*args, **kwargs)
else:
prxxx = lambda *args, **kwargs: None
STRATEGY_REGEX = r"^(?:(?:^|->) *(?:cuda(?::[\d]+)?|cpu|mps) (?:fp(?:16|32)|bf16)(?:i8|i4|i3)?(?: \*[\d]+\+?)? *)+$"
if not re.match(STRATEGY_REGEX, strategy):
raise ValueError("Invalid strategy. Please read https://pypi.org/project/rwkv/")
strategy = ('->'.join([x.strip() for x in strategy.split('->')])).replace('->', ' -> ')
self.args = types.SimpleNamespace()
args = self.args
args.MODEL_NAME = model
args.strategy_string = strategy
# Rescale for fp16 mode: set x = x/2 every X layer (to avoid fp16 overflow)
self.RESCALE_LAYER = 6 if 'fp16' in strategy else 0
prxxx(f'RWKV_JIT_ON {os.environ["RWKV_JIT_ON"]} RWKV_CUDA_ON {os.environ["RWKV_CUDA_ON"]} RESCALE_LAYER {self.RESCALE_LAYER}\n')
args.MODEL_NAME = args.MODEL_NAME.strip()
if not args.MODEL_NAME.endswith('.pth'):
args.MODEL_NAME += '.pth'
prxxx(f'Loading {args.MODEL_NAME} ...')
with torch.no_grad():
self.w = torch.load(args.MODEL_NAME, map_location='cpu') # load model to CPU first
gc.collect()
w = self.w
ALREADY_CONVERTED = False
if '_strategy' in w:
ALREADY_CONVERTED = True
assert convert_and_save_and_exit == None # you should only convert a raw model
prxxx(f"Converted model: strategy {w['_strategy']}, version {w['_version']}\n")
assert w['_strategy'] == args.strategy_string # if you are using a new strategy, re-convert the model
assert float(w['_version']) >= 0.7 # sometimes you should re-convert using latest convert_model.py
assert w['_rescale_layer'] == self.RESCALE_LAYER
del w['_strategy']
del w['_version']
del w['_rescale_layer']
args.n_embd = w['emb.weight'].shape[1]
args.n_layer = 0
keys = list(w.keys())
for x in keys:
layer_id = int(x.split('.')[1]) if ('blocks.' in x) else 0
args.n_layer = max(args.n_layer, layer_id+1)
####################### Compute strategy
s = [x.strip().split(' ') for x in strategy.split('->')]
plan = [0] * len(s)
stream_i = -1
stream_count = 0
to_allocate = args.n_layer + 1
allocated = 0
free_slots = 0
for i in range(len(s)):
si = s[i]
si1 = si[1]
if si1.startswith('fp32'): si[1] = [torch.float]
elif si1.startswith('fp16'): si[1] = [torch.float16]
elif si1.startswith('bf16'): si[1] = [torch.bfloat16]
if si1.endswith('i8'): si[1] += [torch.uint8]
else: si[1] += [si[1][0]]
if len(si) > 2:
ss = si[2]
assert ss.startswith('*')
if ss.endswith('+'):
plan[i] = int(ss[1:-1])
stream_i = i
else:
plan[i] = int(ss[1:])
allocated += plan[i]
if allocated >= to_allocate:
plan[i] += to_allocate - allocated
break
else:
free_slots += 1
if stream_i < 0:
if free_slots > 0 and to_allocate > allocated:
for i in range(len(s)):
if plan[i] == 0:
plan[i] = (to_allocate - allocated) // free_slots
allocated += plan[i]
free_slots -= 1
if to_allocate > allocated:
plan[len(s)-1] += to_allocate - allocated
else:
if to_allocate > allocated:
stream_count = to_allocate - allocated
plan[stream_i] += stream_count
prxxx(f'Strategy: (total {args.n_layer}+1={args.n_layer+1} layers)')
for i in range(len(s)):
ss = s[i]
if i != stream_i:
prxxx(f'* {ss[0]} {str(ss[1]).replace("torch.","")}, store {plan[i]} layers')
else:
prxxx(f'* {ss[0]} {str(ss[1]).replace("torch.","")}, store {plan[i]-stream_count} layers, stream {stream_count} layers')
plan[i] += (0 if i == 0 else plan[i-1])
self.strategy = [None] * (args.n_layer + 1)
strategy = self.strategy
for n in range(args.n_layer + 1):
for i in range(len(s)):
if n < plan[i]:
strategy[n] = types.SimpleNamespace()
strategy[n].device = s[i][0]
strategy[n].atype = s[i][1][0]
strategy[n].wtype = s[i][1][1]
strategy[n].stream = False
if i == stream_i and n >= (plan[i] - stream_count):
strategy[n].stream = True
break
prxxx(f"{n}-{strategy[n].device}-{str(strategy[n].atype).replace('torch.','')}-{str(strategy[n].wtype).replace('torch.','')}{'-stream' if strategy[n].stream else ''}",end=' ')
prxxx()
####################### Load weights to self.w
if not ALREADY_CONVERTED:
try: # precompute embedding
w['emb.weight'] = F.layer_norm(w['emb.weight'], (args.n_embd,), weight=w['blocks.0.ln0.weight'], bias=w['blocks.0.ln0.bias'])
except:
w['emb.weight'] = F.layer_norm(w['emb.weight'].float(), (args.n_embd,), weight=w['blocks.0.ln0.weight'].float(), bias=w['blocks.0.ln0.bias'].float())
del w['blocks.0.ln0.weight']
del w['blocks.0.ln0.bias']
print_need_newline = False
keys = list(w.keys())
for x in keys:
w[x].requires_grad = False
layer_id = int(x.split('.')[1]) if ('blocks.' in x) else 0
if ('ln_out.' in x) or ('head.' in x):
layer_id = args.n_layer
dd = strategy[layer_id]
DEVICE = dd.device
ATYPE = dd.atype
WTYPE = dd.wtype
if not ALREADY_CONVERTED:
if self.RESCALE_LAYER > 0:
if 'att.output.weight' in x:
w[x] = w[x] / (2 ** int(layer_id // self.RESCALE_LAYER))
if 'ffn.value.weight' in x:
w[x] = w[x] / (2 ** int(layer_id // self.RESCALE_LAYER))
if '.time_' in x:
w[x] = w[x].squeeze()
if 'key.weight' in x or 'value.weight' in x or 'receptance.weight' in x or 'output.weight' in x or 'head.weight' in x:
w[x] = w[x].t()
if '.time_decay' in x: # need fp32 for this
w[x] = -torch.exp(w[x].float())
elif '.time_first' in x: # need fp32 for this
w[x] = w[x].float()
else:
if (len(w[x].shape) == 2) and ('emb' not in x):
if WTYPE != torch.uint8:
w[x] = w[x].to(dtype=WTYPE)
else:
w[x] = w[x].float()
if w[x].shape[0] > w[x].shape[1]:
w[x+'_my'] = torch.amin(w[x], dim=1).unsqueeze(1)
w[x] = w[x] - w[x+'_my']
w[x+'_mx'] = torch.amin(w[x], dim=0)
w[x] = w[x] - w[x+'_mx']
w[x+'_rx'] = torch.amax(w[x], dim=0)
w[x] = w[x] / w[x+'_rx']
w[x+'_ry'] = torch.amax(w[x], dim=1).unsqueeze(1)
w[x] = w[x] / w[x+'_ry']
else:
w[x+'_mx'] = torch.amin(w[x], dim=0)
w[x] = w[x] - w[x+'_mx']
w[x+'_my'] = torch.amin(w[x], dim=1).unsqueeze(1)
w[x] = w[x] - w[x+'_my']
w[x+'_rx'] = torch.amax(w[x], dim=0)
w[x] = w[x] / w[x+'_rx']
w[x+'_ry'] = torch.amax(w[x], dim=1).unsqueeze(1)
w[x] = w[x] / w[x+'_ry']
w[x] = torch.clip(torch.floor(w[x] * 256), min=0, max=255).to(dtype=torch.uint8)
w[x+'_mx'] = w[x+'_mx'].to(dtype=ATYPE).contiguous()
w[x+'_rx'] = (w[x+'_rx'] / 16).to(dtype=ATYPE).contiguous()
w[x+'_my'] = w[x+'_my'].to(dtype=ATYPE).contiguous()
w[x+'_ry'] = (w[x+'_ry'] / 16).to(dtype=ATYPE).contiguous()
else:
w[x] = w[x].to(dtype=ATYPE)
if convert_and_save_and_exit == None:
if 'emb.' in x:
w[x] = w[x].contiguous()
elif (dd.stream) and (x.endswith('key.weight') or x.endswith('value.weight') or x.endswith('receptance.weight') or x.endswith('output.weight')):
try:
w[x] = w[x].contiguous().pin_memory() # if you see "CUDA error: out of memory" here, that's out of CPU RAM, not VRAM. Get more RAM :)
except:
print('Note: You are running out of RAM. Get more CPU RAM. Now this will run much slower.')
elif DEVICE != 'cpu':
w[x] = w[x].to(device=DEVICE).contiguous()
if (dd.stream) or (DEVICE != 'cpu'):
try:
w[x+'_mx'] = w[x+'_mx'].to(device=DEVICE).contiguous()
w[x+'_rx'] = w[x+'_rx'].to(device=DEVICE).contiguous()
w[x+'_my'] = w[x+'_my'].to(device=DEVICE).contiguous()
w[x+'_ry'] = w[x+'_ry'].to(device=DEVICE).contiguous()
except:
pass
if 'ffn.value.weight' in x:
gc.collect()
if 'cuda' in args.strategy_string:
torch.cuda.empty_cache()
shape = [i for i in w[x].shape if i != 1]
if len(shape) > 1:
shape = f" {str(shape[0]).rjust(5)} {str(shape[1]).rjust(5)}"
else:
shape = f" {str(shape[0]).rjust(5)} "
if layer_id == 0 or layer_id >= args.n_layer-1:
if print_need_newline:
prxxx('\n', end = '')
print_need_newline = False
dt = str(w[x].dtype).replace('torch.', '')
dt = dt.replace('float32', 'f32').replace('bfloat16', 'bf16').replace('float16', 'f16').replace('uint8', 'i8')
prxxx(x.ljust(32), dt.rjust(4), str(w[x].device).rjust(8), shape, ' (pinned)' if w[x].is_pinned() else '')
else:
print_need_newline = True
prxxx('.', end = '', flush = True)
if convert_and_save_and_exit:
w['_strategy'] = args.strategy_string
w['_rescale_layer'] = self.RESCALE_LAYER
w['_version'] = '0.7'
if not convert_and_save_and_exit.endswith('.pth'):
convert_and_save_and_exit += '.pth'
prxxx(f'Saving to {convert_and_save_and_exit}...')
torch.save(w, convert_and_save_and_exit)
prxxx(f'Converted and saved. Now this will exit.')
exit(0)
gc.collect()
if 'cuda' in args.strategy_string:
torch.cuda.empty_cache()
@MyFunction
def torch_mm8_seq(self, x, w, mx, rx, my, ry):
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
@MyFunction
def torch_mm8_one(self, x, w, mx, rx, my, ry):
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
if os.environ.get('RWKV_CUDA_ON') == '1':
@MyFunction
def mm8_seq(self, x, w, mx, rx, my, ry):
if w.device.type == 'cuda' and x.dtype == torch.float16:
B, N, M = x.shape[0], w.shape[0], w.shape[1]
return cuda_mm8_seq(B, N, M, x, w, mx, rx, my, ry)
else:
return self.torch_mm8_seq(x, w, mx, rx, my, ry)
@MyFunction
def mm8_one(self, x, w, mx, rx, my, ry):
if w.device.type == 'cuda':
N, M = w.shape[0], w.shape[1]
return cuda_mm8_one(N, M, x, w, mx, rx, my, ry)
else:
return self.torch_mm8_one(x, w, mx, rx, my, ry)
else:
@MyFunction
def mm8_seq(self, x, w, mx, rx, my, ry):
return self.torch_mm8_seq(x, w, mx, rx, my, ry)
@MyFunction
def mm8_one(self, x, w, mx, rx, my, ry):
return self.torch_mm8_one(x, w, mx, rx, my, ry)
########################################################################################################
@MyFunction
def ffn_one(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
vx = torch.square(torch.relu(kx @ kw))
out = r * (vx @ vw)
return x + out, xx
@MyFunction
def ffn_one_i8(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_one(rx, rw, rmx, rrx, rmy, rry))
vx = torch.square(torch.relu(self.mm8_one(kx, kw, kmx, krx, kmy, kry)))
out = r * (self.mm8_one(vx, vw, vmx, vrx, vmy, vry))
return x + out, xx
########################################################################################################
@MyFunction
def ffn_seq(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
vx = torch.square(torch.relu(kx @ kw))
out = r * (vx @ vw)
return x + out, xx[-1,:]
@MyFunction
def ffn_seq_i8(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
vx = torch.square(torch.relu(self.mm8_seq(kx, kw, kmx, krx, kmy, kry)))
out = r * (self.mm8_seq(vx, vw, vmx, vrx, vmy, vry))
return x + out, xx[-1,:]
########################################################################################################
@MyFunction
def att_one(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
k = (kx @ kw).float()
v = (vx @ vw).float()
ww = t_first + k
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, k)
e1 = torch.exp(ww - p)
e2 = torch.exp(k - p)
out = (r * wkv) @ ow
return x + out, xx, e1 * aa + e2 * v, e1 * bb + e2, p
@MyFunction
def att_one_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_one(rx, rw, rmx, rrx, rmy, rry))
k = (self.mm8_one(kx, kw, kmx, krx, kmy, kry)).float()
v = (self.mm8_one(vx, vw, vmx, vrx, vmy, vry)).float()
ww = t_first + k
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, k)
e1 = torch.exp(ww - p)
e2 = torch.exp(k - p)
out = self.mm8_one(r * wkv, ow, omx, orx, omy, ory)
return x + out, xx, e1 * aa + e2 * v, e1 * bb + e2, p
########################################################################################################
@MyFunction
def att_seq(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
k = (kx @ kw).float()
v = (vx @ vw).float()
T = x.shape[0]
for t in range(T):
kk = k[t]
vv = v[t]
ww = t_first + kk
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, kk)
e1 = torch.exp(ww - p)
e2 = torch.exp(kk - p)
aa = e1 * aa + e2 * vv
bb = e1 * bb + e2
pp = p
out = (r * sx) @ ow
return x + out, xx[-1,:], aa, bb, pp
@MyFunction
def att_seq_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
k = self.mm8_seq(kx, kw, kmx, krx, kmy, kry).float()
v = self.mm8_seq(vx, vw, vmx, vrx, vmy, vry).float()
T = x.shape[0]
for t in range(T):
kk = k[t]
vv = v[t]
ww = t_first + kk
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, kk)
e1 = torch.exp(ww - p)
e2 = torch.exp(kk - p)
aa = e1 * aa + e2 * vv
bb = e1 * bb + e2
pp = p
out = self.mm8_seq(r * sx, ow, omx, orx, omy, ory)
return x + out, xx[-1,:], aa, bb, pp
########################################################################################################
if os.environ["RWKV_CUDA_ON"] == '1':
@MyFunction
def cuda_att_seq(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
T, C = x.size()
xx = F.layer_norm(x, (C,), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
k = kx @ kw
v = vx @ vw
y, aa, bb, pp = cuda_wkv(T, C, t_decay, t_first, k, v, aa, bb, pp)
out = (r * y) @ ow
return x + out, xx[-1,:], aa, bb, pp
@MyFunction
def cuda_att_seq_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
T, C = x.size()
xx = F.layer_norm(x, (C,), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
k = self.mm8_seq(kx, kw, kmx, krx, kmy, kry)
v = self.mm8_seq(vx, vw, vmx, vrx, vmy, vry)
y, aa, bb, pp = cuda_wkv(T, C, t_decay, t_first, k, v, aa, bb, pp)
out = self.mm8_seq(r * y, ow, omx, orx, omy, ory)
return x + out, xx[-1,:], aa, bb, pp
########################################################################################################
def forward(self, tokens, state, full_output=False):
with torch.no_grad():
w = self.w
args = self.args
if state == None:
state = [None] * args.n_layer * 5
for i in range(args.n_layer): # state: 0=att_xx 1=att_aa 2=att_bb 3=att_pp 4=ffn_xx
dd = self.strategy[i]
dev = dd.device
atype = dd.atype
state[i*5+0] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
state[i*5+1] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous()
state[i*5+2] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous()
state[i*5+3] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous() - 1e30
state[i*5+4] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
seq_mode = len(tokens) > 1
x = w['emb.weight'][tokens if seq_mode else tokens[0]]
for i in range(args.n_layer):
bbb = f'blocks.{i}.'
att = f'blocks.{i}.att.'
ffn = f'blocks.{i}.ffn.'
dd = self.strategy[i]
dev = dd.device
atype = dd.atype
wtype = dd.wtype
if seq_mode:
if 'cuda' in str(dev) and os.environ["RWKV_CUDA_ON"] == '1':
ATT = self.cuda_att_seq if wtype != torch.uint8 else self.cuda_att_seq_i8
else:
ATT = self.att_seq if wtype != torch.uint8 else self.att_seq_i8
FFN = self.ffn_seq if wtype != torch.uint8 else self.ffn_seq_i8
else:
ATT = self.att_one if wtype != torch.uint8 else self.att_one_i8
FFN = self.ffn_one if wtype != torch.uint8 else self.ffn_one_i8
x = x.to(dtype=atype, device=dev)
kw = w[f'{att}key.weight']
vw = w[f'{att}value.weight']
rw = w[f'{att}receptance.weight']
ow = w[f'{att}output.weight']
if dd.stream:
kw = kw.to(device=dev, non_blocking=True)
vw = vw.to(device=dev, non_blocking=True)
rw = rw.to(device=dev, non_blocking=True)
ow = ow.to(device=dev, non_blocking=True)
kmx = w[f'{att}key.weight_mx'] if wtype == torch.uint8 else x
krx = w[f'{att}key.weight_rx'] if wtype == torch.uint8 else x
kmy = w[f'{att}key.weight_my'] if wtype == torch.uint8 else x
kry = w[f'{att}key.weight_ry'] if wtype == torch.uint8 else x
vmx = w[f'{att}value.weight_mx'] if wtype == torch.uint8 else x
vrx = w[f'{att}value.weight_rx'] if wtype == torch.uint8 else x
vmy = w[f'{att}value.weight_my'] if wtype == torch.uint8 else x
vry = w[f'{att}value.weight_ry'] if wtype == torch.uint8 else x
rmx = w[f'{att}receptance.weight_mx'] if wtype == torch.uint8 else x
rrx = w[f'{att}receptance.weight_rx'] if wtype == torch.uint8 else x
rmy = w[f'{att}receptance.weight_my'] if wtype == torch.uint8 else x
rry = w[f'{att}receptance.weight_ry'] if wtype == torch.uint8 else x
omx = w[f'{att}output.weight_mx'] if wtype == torch.uint8 else x
orx = w[f'{att}output.weight_rx'] if wtype == torch.uint8 else x
omy = w[f'{att}output.weight_my'] if wtype == torch.uint8 else x
ory = w[f'{att}output.weight_ry'] if wtype == torch.uint8 else x
x, state[i*5+0], state[i*5+1], state[i*5+2], state[i*5+3] = ATT(
x, state[i*5+0], state[i*5+1], state[i*5+2], state[i*5+3],
w[f'{bbb}ln1.weight'], w[f'{bbb}ln1.bias'],
w[f'{att}time_mix_k'], w[f'{att}time_mix_v'], w[f'{att}time_mix_r'],
w[f'{att}time_decay'], w[f'{att}time_first'],
kw, vw, rw, ow,
kmx, krx, kmy, kry,
vmx, vrx, vmy, vry,
rmx, rrx, rmy, rry,
omx, orx, omy, ory,
)
if dd.stream:
del kw, vw, rw, ow
kw = w[f'{ffn}key.weight']
vw = w[f'{ffn}value.weight']
rw = w[f'{ffn}receptance.weight']
if dd.stream:
kw = kw.to(device=dev, non_blocking=True)
vw = vw.to(device=dev, non_blocking=True)
rw = rw.to(device=dev, non_blocking=True)
kmx = w[f'{ffn}key.weight_mx'] if wtype == torch.uint8 else x
krx = w[f'{ffn}key.weight_rx'] if wtype == torch.uint8 else x
kmy = w[f'{ffn}key.weight_my'] if wtype == torch.uint8 else x
kry = w[f'{ffn}key.weight_ry'] if wtype == torch.uint8 else x
vmx = w[f'{ffn}value.weight_mx'] if wtype == torch.uint8 else x
vrx = w[f'{ffn}value.weight_rx'] if wtype == torch.uint8 else x
vmy = w[f'{ffn}value.weight_my'] if wtype == torch.uint8 else x
vry = w[f'{ffn}value.weight_ry'] if wtype == torch.uint8 else x
rmx = w[f'{ffn}receptance.weight_mx'] if wtype == torch.uint8 else x
rrx = w[f'{ffn}receptance.weight_rx'] if wtype == torch.uint8 else x
rmy = w[f'{ffn}receptance.weight_my'] if wtype == torch.uint8 else x
rry = w[f'{ffn}receptance.weight_ry'] if wtype == torch.uint8 else x
x, state[i*5+4] = FFN(
x, state[i*5+4],
w[f'{bbb}ln2.weight'], w[f'{bbb}ln2.bias'],
w[f'{ffn}time_mix_k'], w[f'{ffn}time_mix_r'],
kw, vw, rw,
kmx, krx, kmy, kry,
vmx, vrx, vmy, vry,
rmx, rrx, rmy, rry,
)
if dd.stream:
del kw, vw, rw
if self.RESCALE_LAYER > 0:
if (i+1) % self.RESCALE_LAYER == 0:
x = x / 2
dd = self.strategy[args.n_layer]
x = x[-1,:] if (seq_mode and (not full_output)) else x
x = x.to(dtype=dd.atype, device=dd.device)
x = F.layer_norm(x, (args.n_embd,), weight=w['ln_out.weight'], bias=w['ln_out.bias'])
if w['head.weight'].dtype != torch.uint8:
x = x @ w['head.weight']
else:
if seq_mode and full_output:
x = self.mm8_seq(x, w['head.weight'], w['head.weight_mx'], w['head.weight_rx'], w['head.weight_my'], w['head.weight_ry'])
else:
x = self.mm8_one(x, w['head.weight'], w['head.weight_mx'], w['head.weight_rx'], w['head.weight_my'], w['head.weight_ry'])
return x.float(), state

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,6 @@
Client Download URL:
客户端下载地址:
クライアントのダウンロードURL:
https://github.com/josStorer/RWKV-Runner/releases/latest/download/RWKV-Runner_macos_universal.zip
For Mac and Linux users, please manually install Python 3.10 (usually the latest systems come with it built-in). You can specify the Python interpreter to use in Settings. (which python3)
对于Mac和Linux用户请手动安装 Python3.10 (通常最新的系统已经内置了). 你可以在设置中指定使用的Python解释器. (which python3)
MacおよびLinuxのユーザーの方は、Python3.10を手動でインストールしてください(通常、最新のシステムには既に組み込まれています)。 設定メニューで使用するPythonインタプリタを指定することができます。 (which python3)
For Mac and Linux users, please manually install Python 3.10 (usually the latest systems come with it built-in). You can specify the Python interpreter to use in Settings.
对于Mac和Linux用户请手动安装 Python3.10 (通常最新的系统已经内置了). 你可以在设置中指定使用的Python解释器.
MacおよびLinuxのユーザーの方は、Python3.10を手動でインストールしてください(通常、最新のシステムには既に組み込まれています)。 設定メニューで使用するPythonインタプリタを指定することができます。
Please execute this program in an empty directory. All related dependencies will be placed in this directory.
请将本程序放在一个空目录内执行, 所有相关依赖均会放置于此目录.

View File

@ -1,8 +1,3 @@
Client Download URL:
客户端下载地址:
クライアントのダウンロードURL:
https://github.com/josStorer/RWKV-Runner/releases/latest/download/RWKV-Runner_linux_x64
For Mac and Linux users, please manually install Python 3.10 (usually the latest systems come with it built-in). You can specify the Python interpreter to use in Settings.
对于Mac和Linux用户请手动安装 Python3.10 (通常最新的系统已经内置了). 你可以在设置中指定使用的Python解释器.
MacおよびLinuxのユーザーの方は、Python3.10を手動でインストールしてください(通常、最新のシステムには既に組み込まれています)。 設定メニューで使用するPythonインタプリタを指定することができます。

View File

@ -1,8 +1,3 @@
Client Download URL:
客户端下载地址:
クライアントのダウンロードURL:
https://github.com/josStorer/RWKV-Runner/releases/latest/download/RWKV-Runner_windows_x64.exe
Please execute this program in an empty directory. All related dependencies will be placed in this directory.
请将本程序放在一个空目录内执行, 所有相关依赖均会放置于此目录.
このプログラムを空のディレクトリで実行してください。関連するすべての依存関係は、このディレクトリに配置されます。

View File

@ -9,7 +9,7 @@ cd RWKV-Next-Web
git clone https://github.com/josStorer/RWKV-Runner --depth=1
python3 -m pip install torch torchvision torchaudio
python3 -m pip install -r RWKV-Runner/backend-python/requirements.txt
python3 ./RWKV-Runner/backend-python/main.py > log.txt & # this is only an example, you should use screen or other tools to run it in background
python3 ./RWKV-Runner/backend-python/main.py > log.txt &
if [ ! -d RWKV-Runner/models ]; then
mkdir RWKV-Runner/models
@ -22,6 +22,6 @@ yarn install
yarn build
export PROXY_URL=""
export BASE_URL=http://127.0.0.1:8000
yarn start & # this is only an example, you should use screen or other tools to run it in background
yarn start &
curl http://127.0.0.1:8000/switch-model -X POST -H "Content-Type: application/json" -d '{"model":"./RWKV-Runner/models/RWKV-4-World-0.1B-v1-20230520-ctx4096.pth","strategy":"cpu fp32"}'

View File

@ -1,19 +0,0 @@
: install git python3.10 npm by yourself
: change model and strategy according to your hardware
git clone https://github.com/josStorer/RWKV-Runner --depth=1
python -m pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cu117
python -m pip install -r RWKV-Runner/backend-python/requirements.txt
cd RWKV-Runner/frontend
call npm ci
call npm run build
cd ..
: optional: set ngrok_token=YOUR_NGROK_TOKEN
start python ./backend-python/main.py --webui
start "C:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe" "http://127.0.0.1:8000"
powershell -Command "(Test-Path ./models) -or (mkdir models)"
powershell -Command "Import-Module BitsTransfer"
powershell -Command "(Test-Path ./models/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth) -or (Start-BitsTransfer https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth ./models/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth)"
powershell -Command "Invoke-WebRequest http://127.0.0.1:8000/switch-model -Method POST -ContentType 'application/json' -Body '{\"model\":\"./models/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth\",\"strategy\":\"cuda fp32 *20+\",\"deploy\":\"true\"}'"

View File

@ -1,22 +0,0 @@
# install git python3.10 npm by yourself
# change model and strategy according to your hardware
sudo apt install python3-dev
git clone https://github.com/josStorer/RWKV-Runner --depth=1
python3 -m pip install torch torchvision torchaudio
python3 -m pip install -r RWKV-Runner/backend-python/requirements.txt
cd RWKV-Runner/frontend
npm ci
npm run build
cd ..
# optional: export ngrok_token=YOUR_NGROK_TOKEN
python3 ./backend-python/main.py --webui > log.txt & # this is only an example, you should use screen or other tools to run it in background
if [ ! -d models ]; then
mkdir models
fi
wget -N https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-0.1B-v1-20230520-ctx4096.pth -P models/
curl http://127.0.0.1:8000/switch-model -X POST -H "Content-Type: application/json" -d '{"model":"./models/RWKV-4-World-0.1B-v1-20230520-ctx4096.pth","strategy":"cpu fp32","deploy":"true"}'

View File

@ -1,18 +0,0 @@
services:
rmkv_runner:
image: rwkv-runner:latest
build: .
# Append "--rwkv.cpp" parameter to use rwkv.cpp
# command: python3.10 ./backend-python/main.py --port 27777 --host 0.0.0.0 --webui --rwkv.cpp
volumes:
- /mnt:/mnt
ports:
- "27777:27777"
# Comment the following lines if use rwkv.cpp
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]

View File

@ -19,15 +19,14 @@ document.querySelectorAll('.grid.h-10.grid-cols-12.place-content-center.gap-x-3.
if (!data.name.endsWith('.bin') && !data.name.endsWith('.pth'))
return
data.desc = { en: '', zh: '', ja: '' }
data.desc = {en: '', zh: ''}
const rawText = await (await fetch(e.children[1].href.replace('/resolve/', '/raw/'))).text()
data.size = parseInt(extractValue(rawText, 'size'))
data.SHA256 = extractValue(rawText, 'oid sha256:')
data.lastUpdated = e.children[3].children[0].getAttribute('datetime')
data.url = e.children[1].href.replace('/resolve/', '/blob/').replace('?download=true', '')
data.downloadUrl = e.children[1].href.replace('?download=true', '')
data.tags = []
data.url = e.children[1].href.replace('/resolve/', '/blob/')
data.downloadUrl = e.children[1].href
modelsJson.push(data)
})

View File

@ -1,5 +1,7 @@
{"text": "The following is an epic science fiction masterpiece that is immortalized, with delicate descriptions and grand depictions of interstellar civilization wars.\nChapter 1.\nAs I sit down to write here amidst the shadows of vine-leaves under the blue sky of southern Italy, it comes to me with a certain quality of astonishment that my participation in these amazing adventures of Mr. Cavor was, after all, the outcome of the purest accident. It might have been any one. I fell into these things at a time when I thought myself removed from the slightest possibility of disturbing experiences. I had gone to Lympne because I had imagined it the most uneventful place in the world. “Here, at any rate,” said I, “I shall find peace and a chance to work!”"}
{"text": "Translate the following into Chinese.\n\nEnglish: What rooms do you have available?\nChinese: 你们有哪些房间可以提供"}
{"text": "User: Hello.\n\nAssistant: I'm here, meow~.\n\nUser: Can you tell some jokes?\n\nAssistant: Of course, master. What kind of jokes would you like to hear?"}
{"text": "Instruction: Write a story using the following information\n\nInput: A man named Alex chops a tree down\n\nResponse: Once upon a time, there was a man named Alex who lived in the heart of the forest. He had always been fascinated by trees and spent most of his days exploring the forest and learning about its many wonders. One day, while wandering through the woods, he stumbled upon an old oak tree that stood tall and proud in the middle of a clearing."}
{"text": "def get_args(args: Union[Sequence[str], None] = None):\n parser = argparse.ArgumentParser()\n group = parser.add_argument_group(title=\"server arguments\")\n group.add_argument(\n \"--port\",\n type=int,\n default=8000,\n help=\"port to run the server on (default: 8000)\",\n )\n group.add_argument(\n \"--host\",\n type=str,\n default=\"127.0.0.1\",\n help=\"host to run the server on (default: 127.0.0.1)\",\n )"}
{"text": "1:This is the first document."}
{"text": "2:Hello\nWorld"}
{"text": "3:1+1=2\n1+2=3\n2+2=4"}
{"text": "4:You will be training the GPT version because it's paralleziable and faster to train."}
{"text": "5:Read the inference code in src/model.py and try using the final hidden state(.xx .aa .bb)"}
{"text": "6:You can fine-tune the model with longer ctxLen and it can quickly adapt to longer ctxLens."}
{"text": "7:Consider RWKV 14B. The state has 200 vectors, that is, 5 vectors for each block: fp16 (xx), fp32 (aa), fp32 (bb), fp32 (pp), fp16 (xx)."}

View File

@ -23,7 +23,6 @@ def file_cleaner(file):
return cleaner
expected_max_version = float(sys.argv[2]) if len(sys.argv) > 2 else 100
model_file = open(sys.argv[1], "rb")
cleaner = file_cleaner(model_file)
cleaner_thread = threading.Thread(target=cleaner, daemon=True)
@ -32,34 +31,11 @@ cleaner_thread.start()
w = torch.load(model_file, map_location="cpu")
gc.collect()
vocab_size = w["emb.weight"].shape[0]
n_embd = w["emb.weight"].shape[1]
n_layer = 0
keys = list(w.keys())
version = 4
for x in keys:
layer_id = int(x.split(".")[1]) if ("blocks." in x) else 0
n_layer = max(n_layer, layer_id + 1)
if "ln_x" in x:
version = max(5, version)
if "gate.weight" in x:
version = max(5.1, version)
if int(version) == 5 and "att.time_decay" in x:
if len(w[x].shape) > 1:
if w[x].shape[1] > 1:
version = max(5.2, version)
if "time_maa" in x:
version = max(6, version)
params = f"--vocab_size {vocab_size} --n_layer {n_layer} --n_embd {n_embd}"
if version <= expected_max_version:
if version == 6:
params += ' --my_testing "x060"'
print(
f"v{int(version)}/train.py {params}",
end="",
)
else:
raise Exception(f"RWKV{version} is not supported")
print(f"--n_layer {n_layer} --n_embd {n_embd}", end="")

View File

@ -1,7 +1,5 @@
echo $@
if [[ ${cnMirror} == 1 ]]; then
export PIP_INDEX_URL="https://mirrors.aliyun.com/pypi/simple"
export PIP_INDEX_URL="https://pypi.tuna.tsinghua.edu.cn/simple"
if grep -q "mirrors.aliyun.com" /etc/apt/sources.list; then
echo "apt cnMirror already set"
else
@ -22,12 +20,6 @@ else
sudo apt -y install python3-pip
fi
if dpkg -s "python3-dev" >/dev/null 2>&1; then
echo "python3-dev installed"
else
sudo apt -y install python3-dev
fi
if dpkg -s "ninja-build" >/dev/null 2>&1; then
echo "ninja installed"
else
@ -53,13 +45,8 @@ else
fi
echo "loading $loadModel"
modelInfo=$(python3 ./finetune/get_layer_and_embd.py $loadModel 6.0)
modelInfo=$(python3 ./finetune/get_layer_and_embd.py $loadModel)
echo $modelInfo
if [[ $modelInfo =~ "--n_layer" ]]; then
sudo rm -rf /root/.cache/torch_extensions
python3 ./finetune/lora/$modelInfo $@ --proj_dir lora-models --data_type binidx --lora \
--lora_parts=att,ffn,time,ln --strategy deepspeed_stage_2 --accelerator gpu --ds_bucket_mb 2
else
echo "modelInfo is invalid"
exit 1
fi
python3 ./finetune/lora/train.py $modelInfo $@ --proj_dir lora-models --data_type binidx --lora \
--lora_parts=att,ffn,time,ln --strategy deepspeed_stage_2 --accelerator gpu

View File

@ -246,6 +246,5 @@ if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
with open("error.txt", "w") as f:
f.write(str(e))

View File

@ -64,6 +64,5 @@ try:
torch.save(output_w, output)
except Exception as e:
print(e)
with open("error.txt", "w") as f:
f.write(str(e))

View File

@ -7,7 +7,6 @@ import struct
from functools import lru_cache
from itertools import accumulate
def print_rank_0(*message):
pass
# """If distributed is initialized print only on rank 0."""
@ -17,14 +16,12 @@ def print_rank_0(*message):
# else:
# print(*message, flush=True)
def _warmup_mmap_file(path):
pass
# with open(path, "rb") as stream:
# while stream.read(100 * 1024 * 1024):
# pass
dtypes = {
1: np.uint8,
2: np.int8,
@ -36,22 +33,18 @@ dtypes = {
8: np.uint16,
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + ".idx"
def data_file_path(prefix_path):
return prefix_path + ".bin"
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b"MMIDIDX\x00\x00"
@ -107,7 +100,7 @@ class MMapIndexedDataset(torch.utils.data.Dataset):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=False):
with open(path, "rb") as stream:
magic_test = stream.read(9)
@ -224,7 +217,8 @@ class MMapIndexedDataset(torch.utils.data.Dataset):
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
raise ValueError(
"Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
sizes = self._index._sizes[idx]
offsets = list(accumulate(sizes))

View File

@ -17,11 +17,9 @@ class MyDataset(Dataset):
if args.data_type == "binidx":
self.vocab_size = args.vocab_size
rank_zero_info(
f"Current vocab size = {self.vocab_size} (make sure it's correct)"
)
rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)")
if args.data_file.endswith("/"):
if args.data_file.endswith('/'):
d_all = []
for p in os.listdir(args.data_file):
if p.endswith(".idx"):
@ -31,52 +29,33 @@ class MyDataset(Dataset):
exit(0)
else:
self.data = MMapIndexedDataset(args.data_file)
self.data_size = (
len(self.data._bin_buffer) // self.data._index._dtype_size
)
self.data_size = len(self.data._bin_buffer) // self.data._index._dtype_size
rank_zero_info(f"Data has {self.data_size} tokens.")
if args.my_qa_mask > 0:
self.data_pile = MMapIndexedDataset(
"/fsx/BlinkDL/pile/pile_20B_tokenizer_text_document"
)
self.data_pile_size = (
len(self.data_pile._bin_buffer) // self.data._index._dtype_size
)
self.data_pile = MMapIndexedDataset('/fsx/BlinkDL/pile/pile_20B_tokenizer_text_document')
self.data_pile_size = len(self.data_pile._bin_buffer) // self.data._index._dtype_size
if args.my_pile_stage > 0:
# assert self.data_size == 332115325534 and self.vocab_size == 50277
self.samples_per_epoch = args.epoch_steps * args.real_bsz
assert self.samples_per_epoch == 40320
rank_zero_info(
f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########"
)
rank_zero_info(f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########")
dataset_slot = self.data_size // args.ctx_len
if args.my_pile_stage != 4:
assert MaybeIsPrime(args.magic_prime)
assert args.magic_prime % 3 == 2
assert (
args.magic_prime / dataset_slot > 0.99
and args.magic_prime / dataset_slot <= 1
)
assert args.magic_prime / dataset_slot > 0.99 and args.magic_prime / dataset_slot <= 1
elif args.data_type == "numpy":
self.data = np.load(args.data_file).astype("int")
self.vocab_size = args.vocab_size
rank_zero_info(
"Current vocab size =", self.vocab_size, "(make sure it's correct)"
)
rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)")
self.data_size = len(self.data)
rank_zero_info(f"Data has {self.data_size} tokens.")
elif args.data_type == "uint16":
self.data = (
np.fromfile(args.data_file, dtype=np.uint16)
.astype("int32")
.reshape(-1, args.my_sample_len)
)
self.data = np.fromfile(args.data_file, dtype=np.uint16).astype("int32").reshape(-1, args.my_sample_len)
self.vocab_size = args.vocab_size
rank_zero_info(
"Current vocab size =", self.vocab_size, "(make sure it's correct)"
)
rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)")
self.data_size = self.data.shape[0]
rank_zero_info(f"Data has {self.data_size} samples.")
elif args.data_type == "wds_img":
@ -107,14 +86,10 @@ class MyDataset(Dataset):
for u in unique:
xxObj[xx] = u
xx += 1
with open(
f"{args.proj_dir}/vocab.json", "w", encoding="utf-16le"
) as vocab_file:
with open(f"{args.proj_dir}/vocab.json", "w", encoding="utf-16le") as vocab_file:
vocab_file.write(json.dumps(xxObj, ensure_ascii=False))
self.data_size = len(self.data)
rank_zero_info(
f"Data has {self.data_size} tokens, {self.vocab_size} vocab size."
)
rank_zero_info(f"Data has {self.data_size} tokens, {self.vocab_size} vocab size.")
self.stoi = {ch: i for i, ch in enumerate(unique)}
self.itos = {i: ch for i, ch in enumerate(unique)}
@ -129,53 +104,36 @@ class MyDataset(Dataset):
# print(f"epoch {epoch} idx {idx} rank {rank}/{world_size}")
if args.data_type == "wds_img":
def init_wds(self, bias=0):
def identity(x):
return x
return x
import webdataset as wds
import torchvision.transforms as transforms
# img_transform = transforms.Compose(
# [transforms.CenterCrop(256)]
# )
img_transform = transforms.Compose(
[transforms.CenterCrop(512), transforms.Resize((args.my_img_size))]
)
self.data_raw = (
wds.WebDataset(args.data_file, resampled=True)
.shuffle(
10000,
initial=1000,
rng=random.Random(epoch * 100000 + rank + bias * 1e9),
)
.decode("torchrgb")
.to_tuple("jpg", "json", "txt")
.map_tuple(img_transform, identity, identity)
)
img_transform = transforms.Compose([
transforms.CenterCrop(512),
transforms.Resize((args.my_img_size))
])
self.data_raw = wds.WebDataset(args.data_file, resampled=True).shuffle(10000, initial=1000, rng=random.Random(epoch*100000+rank+bias*1e9)).decode("torchrgb").to_tuple("jpg", "json", "txt").map_tuple(img_transform, identity, identity)
for pp in self.data_raw.pipeline:
if "Resampled" in str(pp):
if 'Resampled' in str(pp):
pp.deterministic = True
def worker_seed():
return rank * 100000 + epoch + bias * 1e9
return rank*100000+epoch+bias*1e9
pp.worker_seed = worker_seed
self.data = iter(self.data_raw)
# print(f"WebDataset loaded for rank {rank} epoch {epoch}")
if self.data == None:
init_wds(self)
trial = 0
while trial < 10:
try:
dd = next(self.data) # jpg, json, txt
dd = next(self.data) # jpg, json, txt
break
except:
print(
f"[dataloader error - epoch {epoch} rank {rank} - trying a new shuffle]"
)
print(f'[dataloader error - epoch {epoch} rank {rank} - trying a new shuffle]')
self.error_count += 1
init_wds(self, self.error_count)
trial += 1
@ -186,7 +144,7 @@ class MyDataset(Dataset):
return dd[0], dd[2]
else:
if args.data_type == "uint16":
i = np.random.randint(0, self.data_size - 1)
i = np.random.randint(0, self.data_size-1)
dix = self.data[i]
x = torch.tensor(dix[:-1], dtype=torch.long)
y = torch.tensor(dix[1:], dtype=torch.long)
@ -238,12 +196,7 @@ class MyDataset(Dataset):
z_sum = 0
isGood = False
for i in range(3, ctx_len):
if (
dix[i] == 27
and dix[i - 1] == 34
and dix[i - 2] == 187
and dix[i - 3] == 187
):
if dix[i] == 27 and dix[i-1] == 34 and dix[i-2] == 187 and dix[i-3] == 187:
isGood = True
if dix[i] == 0:
isGood = False
@ -253,9 +206,7 @@ class MyDataset(Dataset):
if z_sum == 0:
z = [1] * ctx_len
i = np.random.randint(0, self.data_pile_size - req_len)
dix = self.data_pile.get(
idx=0, offset=i, length=req_len
).astype(int)
dix = self.data_pile.get(idx=0, offset=i, length=req_len).astype(int)
z = torch.tensor(z, dtype=torch.bfloat16)
x = torch.tensor(dix[:-1], dtype=torch.long)

View File

@ -5,7 +5,6 @@
import functools
import os, math, gc, importlib
import torch
# torch._C._jit_set_profiling_executor(True)
# torch._C._jit_set_profiling_mode(True)
import torch.nn as nn
@ -14,8 +13,7 @@ from torch.nn import functional as F
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from pytorch_lightning.strategies import DeepSpeedStrategy
if importlib.util.find_spec("deepspeed"):
if importlib.util.find_spec('deepspeed'):
import deepspeed
from deepspeed.ops.adam import DeepSpeedCPUAdam, FusedAdam
@ -30,10 +28,9 @@ LORA_CONFIG = {
try:
print("RWKV_MY_TESTING", os.environ["RWKV_MY_TESTING"])
print('RWKV_MY_TESTING', os.environ["RWKV_MY_TESTING"])
except:
os.environ["RWKV_MY_TESTING"] = ""
os.environ["RWKV_MY_TESTING"] = ''
def __nop(ob):
return ob
@ -56,26 +53,7 @@ T_MAX = int(os.environ["RWKV_T_MAX"]) # TAKES LOTS OF VRAM!
from torch.utils.cpp_extension import load
if os.environ["RWKV_FLOAT_MODE"] == "bf16":
wkv_cuda = load(
name=f"wkv_{T_MAX}_bf16",
sources=[
"finetune/lora/v4/cuda/wkv_op_bf16.cpp",
"finetune/lora/v4/cuda/wkv_cuda_bf16.cu",
],
verbose=True,
extra_cuda_cflags=[
"-t 4",
"-std=c++17",
"-res-usage",
"--maxrregcount 60",
"--use_fast_math",
"-O3",
"-Xptxas -O3",
"--extra-device-vectorization",
f"-DTmax={T_MAX}",
],
)
wkv_cuda = load(name=f"wkv_{T_MAX}_bf16", sources=["finetune/lora/cuda/wkv_op_bf16.cpp", "finetune/lora/cuda/wkv_cuda_bf16.cu"], verbose=True, extra_cuda_cflags=["-t 4", "-std=c++17", "-res-usage", "--maxrregcount 60", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-DTmax={T_MAX}"])
class WKV(torch.autograd.Function):
@staticmethod
def forward(ctx, B, T, C, w, u, k, v):
@ -88,16 +66,10 @@ if os.environ["RWKV_FLOAT_MODE"] == "bf16":
u = u.contiguous()
k = k.contiguous()
v = v.contiguous()
y = torch.empty(
(B, T, C),
device=w.device,
memory_format=torch.contiguous_format,
dtype=torch.bfloat16,
)
y = torch.empty((B, T, C), device=w.device, memory_format=torch.contiguous_format, dtype=torch.bfloat16)
wkv_cuda.forward(B, T, C, w, u, k, v, y)
ctx.save_for_backward(w, u, k, v, y)
return y
@staticmethod
def backward(ctx, gy):
B = ctx.B
@ -106,54 +78,16 @@ if os.environ["RWKV_FLOAT_MODE"] == "bf16":
assert T <= T_MAX
assert B * C % min(C, 32) == 0
w, u, k, v, y = ctx.saved_tensors
gw = torch.empty(
(B, C),
device=gy.device,
memory_format=torch.contiguous_format,
dtype=torch.bfloat16,
)
gu = torch.empty(
(B, C),
device=gy.device,
memory_format=torch.contiguous_format,
dtype=torch.bfloat16,
)
gk = torch.empty(
(B, T, C),
device=gy.device,
memory_format=torch.contiguous_format,
dtype=torch.bfloat16,
)
gv = torch.empty(
(B, T, C),
device=gy.device,
memory_format=torch.contiguous_format,
dtype=torch.bfloat16,
)
gw = torch.empty((B, C), device=gy.device, memory_format=torch.contiguous_format, dtype=torch.bfloat16)
gu = torch.empty((B, C), device=gy.device, memory_format=torch.contiguous_format, dtype=torch.bfloat16)
gk = torch.empty((B, T, C), device=gy.device, memory_format=torch.contiguous_format, dtype=torch.bfloat16)
gv = torch.empty((B, T, C), device=gy.device, memory_format=torch.contiguous_format, dtype=torch.bfloat16)
wkv_cuda.backward(B, T, C, w, u, k, v, y, gy.contiguous(), gw, gu, gk, gv)
gw = torch.sum(gw, dim=0)
gu = torch.sum(gu, dim=0)
return (None, None, None, gw, gu, gk, gv)
else:
wkv_cuda = load(
name=f"wkv_{T_MAX}",
sources=[
"finetune/lora/v4/cuda/wkv_op.cpp",
"finetune/lora/v4/cuda/wkv_cuda.cu",
],
verbose=True,
extra_cuda_cflags=[
"-res-usage",
"--maxrregcount 60",
"--use_fast_math",
"-O3",
"-Xptxas -O3",
"--extra-device-vectorization",
f"-DTmax={T_MAX}",
],
)
wkv_cuda = load(name=f"wkv_{T_MAX}", sources=["finetune/lora/cuda/wkv_op.cpp", "finetune/lora/cuda/wkv_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--maxrregcount 60", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-DTmax={T_MAX}"])
class WKV(torch.autograd.Function):
@staticmethod
def forward(ctx, B, T, C, w, u, k, v):
@ -172,9 +106,7 @@ else:
u = u.float().contiguous()
k = k.float().contiguous()
v = v.float().contiguous()
y = torch.empty(
(B, T, C), device=w.device, memory_format=torch.contiguous_format
)
y = torch.empty((B, T, C), device=w.device, memory_format=torch.contiguous_format)
wkv_cuda.forward(B, T, C, w, u, k, v, y)
ctx.save_for_backward(w, u, k, v, y)
if "32" in os.environ["RWKV_FLOAT_MODE"]:
@ -183,7 +115,6 @@ else:
return y.half()
elif os.environ["RWKV_FLOAT_MODE"] == "bf16":
return y.bfloat16()
@staticmethod
def backward(ctx, gy):
B = ctx.B
@ -192,26 +123,14 @@ else:
assert T <= T_MAX
assert B * C % min(C, 32) == 0
w, u, k, v, y = ctx.saved_tensors
gw = torch.empty(
(B, C), device=gy.device, memory_format=torch.contiguous_format
)
gu = torch.empty(
(B, C), device=gy.device, memory_format=torch.contiguous_format
)
gk = torch.empty(
(B, T, C), device=gy.device, memory_format=torch.contiguous_format
)
gv = torch.empty(
(B, T, C), device=gy.device, memory_format=torch.contiguous_format
)
gw = torch.empty((B, C), device=gy.device, memory_format=torch.contiguous_format)
gu = torch.empty((B, C), device=gy.device, memory_format=torch.contiguous_format)
gk = torch.empty((B, T, C), device=gy.device, memory_format=torch.contiguous_format)
gv = torch.empty((B, T, C), device=gy.device, memory_format=torch.contiguous_format)
if "32" in os.environ["RWKV_FLOAT_MODE"]:
wkv_cuda.backward(
B, T, C, w, u, k, v, y, gy.contiguous(), gw, gu, gk, gv
)
wkv_cuda.backward(B, T, C, w, u, k, v, y, gy.contiguous(), gw, gu, gk, gv)
else:
wkv_cuda.backward(
B, T, C, w, u, k, v, y, gy.float().contiguous(), gw, gu, gk, gv
)
wkv_cuda.backward(B, T, C, w, u, k, v, y, gy.float().contiguous(), gw, gu, gk, gv)
gw = torch.sum(gw, dim=0)
gu = torch.sum(gu, dim=0)
if "32" in os.environ["RWKV_FLOAT_MODE"]:
@ -219,15 +138,7 @@ else:
elif os.environ["RWKV_FLOAT_MODE"] == "fp16":
return (None, None, None, gw.half(), gu.half(), gk.half(), gv.half())
elif os.environ["RWKV_FLOAT_MODE"] == "bf16":
return (
None,
None,
None,
gw.bfloat16(),
gu.bfloat16(),
gk.bfloat16(),
gv.bfloat16(),
)
return (None, None, None, gw.bfloat16(), gu.bfloat16(), gk.bfloat16(), gv.bfloat16())
def RUN_CUDA(B, T, C, w, u, k, v):
@ -240,17 +151,15 @@ def RUN_CUDA(B, T, C, w, u, k, v):
class LoraLinear(nn.Module):
def __init__(self, in_features: int, out_features: int, bias: bool):
super().__init__()
self.weight = nn.Parameter(torch.empty((out_features, in_features)))
assert bias == False, "Biased LoraLinear not supported"
r, alpha, dropout = (
LORA_CONFIG["r"],
LORA_CONFIG["alpha"],
LORA_CONFIG["dropout"],
)
r, alpha, dropout = LORA_CONFIG["r"], LORA_CONFIG[
"alpha"], LORA_CONFIG["dropout"]
self.lora_A = nn.Parameter(torch.empty(r, in_features))
self.lora_B = nn.Parameter(torch.empty(out_features, r))
self.lora_dropout = nn.Dropout(dropout)
@ -261,9 +170,9 @@ class LoraLinear(nn.Module):
nn.init.zeros_(self.lora_B)
def forward(self, x):
return F.linear(x, self.weight) + self.scaling * F.linear(
F.linear(self.lora_dropout(x), self.lora_A), self.lora_B
)
return (
F.linear(x, self.weight) + self.scaling *
F.linear(F.linear(self.lora_dropout(x), self.lora_A), self.lora_B))
@functools.wraps(LoraLinear)
@ -305,23 +214,17 @@ class RWKV_TimeMix(MyModule):
# fancy time_decay
decay_speed = torch.ones(args.dim_att)
for h in range(args.dim_att):
decay_speed[h] = -5 + 8 * (h / (args.dim_att - 1)) ** (
0.7 + 1.3 * ratio_0_to_1
)
decay_speed[h] = -5 + 8 * (h / (args.dim_att - 1)) ** (0.7 + 1.3 * ratio_0_to_1)
self.time_decay = nn.Parameter(decay_speed)
# print(layer_id, self.time_decay.flatten()[:3].cpu().numpy(), '...', self.time_decay.flatten()[-3:].cpu().numpy())
# fancy time_first
zigzag = torch.tensor([(i + 1) % 3 - 1 for i in range(args.dim_att)]) * 0.5
self.time_first = nn.Parameter(
torch.ones(args.dim_att) * math.log(0.3) + zigzag
)
self.time_first = nn.Parameter(torch.ones(args.dim_att) * math.log(0.3) + zigzag)
# fancy time_mix
self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0))
self.time_mix_v = nn.Parameter(
torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1
)
self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1)
self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0))
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
@ -332,10 +235,8 @@ class RWKV_TimeMix(MyModule):
self.output = nn.Linear(args.dim_att, args.n_embd, bias=False)
if "a" in os.environ["RWKV_MY_TESTING"]:
self.register_buffer(
"att_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))
)
if 'a' in os.environ["RWKV_MY_TESTING"]:
self.register_buffer("att_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len)))
d_qkv = args.n_embd // 16
self.qq = nn.Linear(args.n_embd, d_qkv, bias=False)
self.kk = nn.Linear(args.n_embd, d_qkv, bias=False)
@ -344,17 +245,12 @@ class RWKV_TimeMix(MyModule):
with torch.no_grad():
self.time_mix_qq = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0))
self.time_mix_kk = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0))
self.time_mix_vv = nn.Parameter(
torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1
)
if "a" not in os.environ["RWKV_MY_TESTING"]:
self.time_mix_vv = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1)
if 'a' not in os.environ["RWKV_MY_TESTING"]:
@MyFunction
def jit_func(self, x):
xx = self.time_shift(
x
) # Mix x with the previous timestep to produce xk, xv, xr
xx = self.time_shift(x) # Mix x with the previous timestep to produce xk, xv, xr
xk = x * self.time_mix_k + xx * (1 - self.time_mix_k)
xv = x * self.time_mix_v + xx * (1 - self.time_mix_v)
xr = x * self.time_mix_r + xx * (1 - self.time_mix_r)
@ -367,26 +263,21 @@ class RWKV_TimeMix(MyModule):
def forward(self, x):
B, T, C = x.size() # x = (Batch,Time,Channel)
sr, k, v = self.jit_func(x)
rwkv = sr * RUN_CUDA(
B, T, self.args.dim_att, self.time_decay, self.time_first, k, v
)
rwkv = sr * RUN_CUDA(B, T, self.args.dim_att, self.time_decay, self.time_first, k, v)
return self.output(rwkv)
if "a" in os.environ["RWKV_MY_TESTING"]:
if 'a' in os.environ["RWKV_MY_TESTING"]:
@MyFunction
def QKV(self, q, k, v):
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.att_mask == 0, float("-inf"))
att = F.softmax(att, dim=-1)
att = att.masked_fill(self.att_mask == 0, float('-inf'))
att = F.softmax(att, dim = -1)
x = att @ v
return x
@MyFunction
def jit_funcQKV(self, x):
xx = self.time_shift(
x
) # Mix x with the previous timestep to produce xk, xv, xr
xx = self.time_shift(x) # Mix x with the previous timestep to produce xk, xv, xr
xk = x * self.time_mix_k + xx * (1 - self.time_mix_k)
xv = x * self.time_mix_v + xx * (1 - self.time_mix_v)
xr = x * self.time_mix_r + xx * (1 - self.time_mix_r)
@ -405,16 +296,12 @@ class RWKV_TimeMix(MyModule):
def forward(self, x):
B, T, C = x.size() # x = (Batch,Time,Channel)
sr, k, v, qq, kk, vv = self.jit_funcQKV(x)
rwkv = sr * RUN_CUDA(
B, T, self.args.dim_att, self.time_decay, self.time_first, k, v
)
rwkv = sr * RUN_CUDA(B, T, self.args.dim_att, self.time_decay, self.time_first, k, v)
rwkv = self.output(rwkv) + self.oo(self.QKV(qq, kk, vv))
return rwkv
########################################################################################################
class RWKV_ChannelMix(MyModule):
def __init__(self, args, layer_id):
super().__init__()
@ -444,7 +331,6 @@ class RWKV_ChannelMix(MyModule):
kv = self.value(k)
return torch.sigmoid(self.receptance(xr)) * kv
class MishGLU(MyModule):
def __init__(self, args, layer_id):
super().__init__()
@ -474,7 +360,6 @@ class MishGLU(MyModule):
b = self.bb(xb)
return self.value(a * F.mish(b))
########################################################################################################
# The RWKV Model with our blocks
########################################################################################################
@ -492,19 +377,15 @@ class Block(nn.Module):
if self.layer_id == 0:
self.ln0 = nn.LayerNorm(args.n_embd)
if args.my_pos_emb > 0:
self.pos_emb_x = nn.Parameter(
torch.zeros((1, args.my_pos_emb, args.n_embd))
)
self.pos_emb_y = nn.Parameter(
torch.zeros((args.my_pos_emb, 1, args.n_embd))
)
self.pos_emb_x = nn.Parameter(torch.zeros((1,args.my_pos_emb,args.n_embd)))
self.pos_emb_y = nn.Parameter(torch.zeros((args.my_pos_emb,1,args.n_embd)))
if self.layer_id == 0 and self.args.pre_ffn > 0:
self.ffnPre = RWKV_ChannelMix(args, 0)
else:
self.att = RWKV_TimeMix(args, layer_id)
if "g" in os.environ["RWKV_MY_TESTING"]:
if 'g' in os.environ["RWKV_MY_TESTING"]:
self.ffn = MishGLU(args, layer_id)
else:
self.ffn = RWKV_ChannelMix(args, layer_id)
@ -514,9 +395,7 @@ class Block(nn.Module):
self.tiny_q = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False)
self.tiny_k = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False)
self.tiny_v = nn.Linear(args.n_embd, args.n_embd, bias=False)
self.register_buffer(
"tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))
)
self.register_buffer("tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len)))
def forward(self, x, x_emb=None):
args = self.args
@ -524,7 +403,7 @@ class Block(nn.Module):
if self.layer_id == 0:
x = self.ln0(x)
if args.my_pos_emb > 0:
pos_emb = (self.pos_emb_x + self.pos_emb_y).reshape(T + 1, -1)[:-1, :]
pos_emb = (self.pos_emb_x + self.pos_emb_y).reshape(T+1, -1)[:-1,:]
x = x + pos_emb
if self.layer_id == 0 and args.pre_ffn > 0:
@ -564,13 +443,13 @@ class RWKV(pl.LightningModule):
def __init__(self, args):
super().__init__()
self.args = args
if not hasattr(args, "dim_att"):
if not hasattr(args, 'dim_att'):
args.dim_att = args.n_embd
if not hasattr(args, "dim_ffn"):
if not hasattr(args, 'dim_ffn'):
args.dim_ffn = args.n_embd * 4
if not hasattr(args, "tiny_att_layer"):
if not hasattr(args, 'tiny_att_layer'):
args.tiny_att_layer = -1
if not hasattr(args, "tiny_att_dim"):
if not hasattr(args, 'tiny_att_dim'):
args.tiny_att_dim = -1
self.emb = nn.Embedding(args.vocab_size, args.n_embd)
@ -583,9 +462,7 @@ class RWKV(pl.LightningModule):
if args.head_qk > 0:
self.head_q = nn.Linear(args.n_embd, args.head_qk, bias=False)
self.head_k = nn.Linear(args.n_embd, args.head_qk, bias=False)
self.register_buffer(
"copy_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))
)
self.register_buffer("copy_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len)))
def configure_optimizers(self):
args = self.args
@ -617,46 +494,19 @@ class RWKV(pl.LightningModule):
param_dict = {n: p for n, p in self.named_parameters()}
if args.my_pile_stage == 2:
optim_groups = [
{
"params": [param_dict[n] for n in lr_1x],
"weight_decay": 0.0,
"my_lr_scale": 1.0,
},
{
"params": [param_dict[n] for n in lr_2x],
"weight_decay": 0.0,
"my_lr_scale": 5.0,
}, # test: 2e-3 / args.lr_init},
{
"params": [param_dict[n] for n in lr_3x],
"weight_decay": 0.0,
"my_lr_scale": 5.0,
}, # test: 3e-3 / args.lr_init},
{"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0},
{"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 2e-3 / args.lr_init},
{"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 3e-3 / args.lr_init},
]
else:
optim_groups = [
{
"params": [param_dict[n] for n in lr_1x],
"weight_decay": 0.0,
"my_lr_scale": 1.0,
},
{
"params": [param_dict[n] for n in lr_2x],
"weight_decay": 0.0,
"my_lr_scale": 2.0,
},
{
"params": [param_dict[n] for n in lr_3x],
"weight_decay": 0.0,
"my_lr_scale": 3.0,
},
{"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0},
{"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 2.0},
{"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 3.0},
]
else:
optim_groups = [
{
"params": [p for n, p in self.named_parameters()],
"weight_decay": 0.0,
},
{"params": [p for n, p in self.named_parameters()], "weight_decay": 0.0},
]
for g in optim_groups:
@ -664,26 +514,8 @@ class RWKV(pl.LightningModule):
optim_groups = [g for g in optim_groups if len(g["params"]) > 0]
if self.deepspeed_offload:
return DeepSpeedCPUAdam(
optim_groups,
lr=self.args.lr_init,
betas=self.args.betas,
eps=self.args.adam_eps,
bias_correction=True,
adamw_mode=False,
weight_decay=0,
amsgrad=False,
)
return FusedAdam(
optim_groups,
lr=self.args.lr_init,
betas=self.args.betas,
eps=self.args.adam_eps,
bias_correction=True,
adam_w_mode=False,
weight_decay=0,
amsgrad=False,
)
return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=False, weight_decay=0, amsgrad=False)
return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=False, weight_decay=0, amsgrad=False)
# return ZeroOneAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, weight_decay=0, amsgrad=False, cuda_aware=False)
@property
@ -757,14 +589,10 @@ class RWKV(pl.LightningModule):
logits = self(idx)
if sum_mask == mask.shape[0]:
loss = F.cross_entropy(
logits.view(-1, logits.size(-1)), targets.view(-1)
)
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
# print('rank', self.global_rank, 'loss', loss.item())
else:
loss = F.cross_entropy(
logits.view(-1, logits.size(-1)), targets.view(-1), reduction="none"
)
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), reduction='none')
# loss_raw = loss
loss = torch.sum(loss * mask) / sum_mask
@ -804,14 +632,7 @@ class RWKV(pl.LightningModule):
gain = 1.0
scale = 1.0
if (
"ln_" in n
or ".ln" in n
or "time_" in n
or "_mask" in n
or "pos_emb" in n
or ".mask." in n
):
if "ln_" in n or ".ln" in n or "time_" in n or "_mask" in n or "pos_emb" in n or '.mask.' in n:
m[n] = p
else:
if n == "emb.weight":
@ -819,19 +640,7 @@ class RWKV(pl.LightningModule):
else:
if shape[0] > shape[1]:
gain = math.sqrt(shape[0] / shape[1])
for kk in [
".att.key.",
".att.receptance.",
".att.output.",
".att.key.",
".ffn.value.",
".ffn.receptance.",
".ffnPre.value.",
".ffnPre.receptance.",
"head_q.",
".oo.",
".rr.",
]:
for kk in [".att.key.", ".att.receptance.", ".att.output.", ".att.key.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.']:
if kk in n:
scale = 0
if n == "head.weight":
@ -841,9 +650,7 @@ class RWKV(pl.LightningModule):
if "head_q." in n:
scale = 0
print(
f"{str(shape[0]).ljust(5)} {str(shape[1]).ljust(5)} {str(scale).ljust(4)} {n}"
)
print(f"{str(shape[0]).ljust(5)} {str(shape[1]).ljust(5)} {str(scale).ljust(4)} {n}")
if self.args.accelerator.upper() == "GPU":
m[n] = torch.empty((shape[0], shape[1]), device="cuda")

View File

@ -5,17 +5,15 @@ import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from .model import LORA_CONFIG
def my_save(dd, ff):
if "14b-run1" not in ff:
if '14b-run1' not in ff:
torch.save(dd, ff)
else:
fn = ff.split("/")[-1]
fff = "/dev/shm/" + fn
fn = ff.split('/')[-1]
fff = '/dev/shm/' + fn
torch.save(dd, fff)
subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-14b-4k/{fn} --quiet", shell=True)
class train_callback(pl.Callback):
def __init__(self, args):
super().__init__()
@ -40,9 +38,7 @@ class train_callback(pl.Callback):
if args.lr_final == 0 or args.lr_init == 0: # linear decay
lr = args.lr_init + (args.lr_final - args.lr_init) * progress
else: # exp decay
lr = args.lr_init * math.exp(
math.log(args.lr_final / args.lr_init) * pow(progress, 1)
)
lr = args.lr_init * math.exp(math.log(args.lr_final / args.lr_init) * pow(progress, 1))
if trainer.global_step < w_step:
lr = lr * (0.2 + 0.8 * trainer.global_step / w_step)
@ -64,9 +60,7 @@ class train_callback(pl.Callback):
trainer.my_loss_sum = 0
trainer.my_loss_count = 0
trainer.my_log = open(args.proj_dir + "/train_log.txt", "a")
trainer.my_log.write(
f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n"
)
trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n")
try:
print(f"\n{trainer.strategy.config}\n")
trainer.my_log.write(f"{trainer.strategy.config}\n")
@ -76,7 +70,6 @@ class train_callback(pl.Callback):
if len(args.wandb) > 0:
print("Login to wandb...")
import wandb
wandb.init(
project=args.wandb,
name=args.run_name + " " + args.my_timestamp,
@ -109,26 +102,20 @@ class train_callback(pl.Callback):
# self.log("s", real_step, prog_bar=True, on_step=True)
if len(args.wandb) > 0:
lll = {
"loss": trainer.my_loss,
"lr": trainer.my_lr,
"Gtokens": real_step * token_per_step / 1e9,
}
lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "Gtokens": real_step * token_per_step / 1e9}
if kt_s > 0:
lll["kt/s"] = kt_s
trainer.my_wandb.log(lll, step=int(real_step))
if args.magic_prime > 0:
expand_factor = 2 if args.my_qa_mask > 0 else 1
if (
int(real_step)
== int(args.magic_prime * expand_factor // args.real_bsz) - 1
):
if int(real_step) == int(args.magic_prime * expand_factor // args.real_bsz) - 1:
to_save_dict = pl_module.state_dict()
my_save(
to_save_dict,
f"{args.proj_dir}/rwkv-final.pth",
)
def on_train_epoch_start(self, trainer, pl_module):
args = self.args
dataset = trainer.train_dataloader.dataset.datasets
@ -141,28 +128,24 @@ class train_callback(pl.Callback):
def on_train_epoch_end(self, trainer, pl_module):
args = self.args
if trainer.is_global_zero: # logging & save state_dict
if (
args.epoch_save > 0 and trainer.current_epoch % args.epoch_save == 0
) or trainer.current_epoch == args.epoch_count - 1:
if args.data_type == "wds_img":
if (args.epoch_save > 0 and trainer.current_epoch % args.epoch_save == 0) or trainer.current_epoch == args.epoch_count - 1:
if args.data_type == 'wds_img':
raw_dict = pl_module.state_dict()
to_save_dict = {}
for k in raw_dict:
if k.startswith("encoder.") or k.startswith("decoder."):
if k.startswith('encoder.') or k.startswith('decoder.'):
to_save_dict[k] = raw_dict[k]
else:
to_save_dict = pl_module.state_dict()
if args.lora:
enable_time_finetune = "time" in LORA_CONFIG["parts"]
enable_ln_finetune = "ln" in LORA_CONFIG["parts"]
enable_time_finetune = 'time' in LORA_CONFIG["parts"]
enable_ln_finetune = 'ln' in LORA_CONFIG["parts"]
lora_dict = {}
for name, state in to_save_dict.items():
if (
".lora_" in name
or (enable_time_finetune and ".time_" in name)
or (enable_ln_finetune and ".ln" in name)
):
if ('.lora_' in name
or (enable_time_finetune and '.time_' in name)
or (enable_ln_finetune and '.ln' in name)):
lora_dict[name] = state
to_save_dict = lora_dict
@ -172,10 +155,8 @@ class train_callback(pl.Callback):
f"{args.proj_dir}/rwkv-{args.epoch_begin + trainer.current_epoch}.pth",
)
except Exception as e:
print("Error\n\n", e, "\n\n")
trainer.my_log.write(
f"{args.epoch_begin + trainer.current_epoch} {trainer.my_epoch_loss:.6f} {math.exp(trainer.my_epoch_loss):.4f} {trainer.my_lr:.8f} {datetime.datetime.now()} {trainer.current_epoch}\n"
)
print('Error\n\n', e, '\n\n')
trainer.my_log.write(f"{args.epoch_begin + trainer.current_epoch} {trainer.my_epoch_loss:.6f} {math.exp(trainer.my_epoch_loss):.4f} {trainer.my_lr:.8f} {datetime.datetime.now()} {trainer.current_epoch}\n")
trainer.my_log.flush()
trainer.my_loss_sum = 0
@ -197,22 +178,22 @@ def generate_init_weight(model, init_weight_name):
mm[k] = src.reshape(mm[k].shape)
except:
tmp = mm[k].squeeze().clone()
print(k, src.shape, "-->", mm[k].shape)
print(k, src.shape, '-->', mm[k].shape)
ss = src.shape[0]
dd = tmp.shape[0]
for i in range(dd):
pos = i / dd * ss
if pos >= ss - 1:
tmp[i] = src[ss - 1]
tmp[i] = src[ss-1]
else:
p0 = int(math.floor(pos))
ii = pos - p0
tmp[i] = src[p0] * (1 - ii) + src[p0 + 1] * (ii)
tmp[i] = src[p0] * (1-ii) + src[p0+1] * (ii)
mm[k] = tmp.reshape(mm[k].shape)
sss = src.squeeze().float().cpu().numpy()
print(sss[:10], "...", sss[-10:])
print(sss[:10], '...', sss[-10:])
mmm = mm[k].squeeze().float().cpu().numpy()
print(mmm[:10], "...", mmm[-10:])
print(mmm[:10], '...', mmm[-10:])
print(f"Save to {init_weight_name}...")
torch.save(mm, init_weight_name)

View File

@ -6,7 +6,6 @@ from torch.nn import functional as F
time_slot = {}
time_ref = time.time_ns()
def record_time(name):
if name not in time_slot:
time_slot[name] = 1e20
@ -14,23 +13,20 @@ def record_time(name):
if tt < time_slot[name]:
time_slot[name] = tt
class TOKENIZER:
def __init__(self, WORD_NAME, UNKNOWN_CHAR="\ue083"):
if "list" in str(type(WORD_NAME)):
class TOKENIZER():
def __init__(self, WORD_NAME, UNKNOWN_CHAR='\ue083'):
if 'list' in str(type(WORD_NAME)):
self.charMode = False
if WORD_NAME[0] == WORD_NAME[1]:
from transformers import PreTrainedTokenizerFast
self.tokenizer = PreTrainedTokenizerFast(tokenizer_file=WORD_NAME[0])
else:
from transformers import GPT2TokenizerFast
self.tokenizer = GPT2TokenizerFast(WORD_NAME[0], WORD_NAME[1])
self.vocab_size = len(self.tokenizer)
else:
self.charMode = True
with open(WORD_NAME + ".json", "r", encoding="utf-16") as result_file:
with open(WORD_NAME + '.json', "r", encoding="utf-16") as result_file:
self.word_table = json.load(result_file)
self.vocab_size = len(self.word_table)
@ -41,25 +37,23 @@ class TOKENIZER:
self.UNKNOWN_CHAR = self.stoi[UNKNOWN_CHAR]
def refine_context(self, context):
context = context.strip().split("\n")
context = context.strip().split('\n')
for c in range(len(context)):
context[c] = context[c].strip().strip("\u3000").strip("\r")
context = list(filter(lambda c: c != "", context))
context = "\n" + ("\n".join(context)).strip()
if context == "":
context = "\n"
context[c] = context[c].strip().strip('\u3000').strip('\r')
context = list(filter(lambda c: c != '', context))
context = '\n' + ('\n'.join(context)).strip()
if context == '':
context = '\n'
return context
def sample_logits(
self, out, x, ctx_len, temperature=1.0, top_p_usual=None, top_p_newline=None
):
def sample_logits(self, out, x, ctx_len, temperature=1.0, top_p_usual=None, top_p_newline=None):
# out[self.UNKNOWN_CHAR] = -float('Inf')
lastChar = int(x[-1])
probs = F.softmax(out, dim=-1)
if self.charMode:
if self.itos[lastChar] == "\n":
if self.itos[lastChar] == '\n':
top_p = top_p_newline
else:
top_p = top_p_usual
@ -87,7 +81,6 @@ class TOKENIZER:
out = torch.multinomial(probs, num_samples=1)[0]
return out
def MaybeIsPrime(number):
if FermatPrimalityTest(number) and MillerRabinPrimalityTest(number):
return True
@ -128,9 +121,7 @@ def MillerRabinPrimalityTest(number):
if (randomNumberWithPower != 1) and (randomNumberWithPower != number - 1):
iterationNumber = 1
while (iterationNumber <= timesTwoDividNumber - 1) and (
randomNumberWithPower != number - 1
):
while (iterationNumber <= timesTwoDividNumber - 1) and (randomNumberWithPower != number - 1):
randomNumberWithPower = pow(randomNumberWithPower, 2, number)
iterationNumber = iterationNumber + 1
if randomNumberWithPower != (number - 1):

View File

@ -184,7 +184,7 @@ if __name__ == "__main__":
args.num_sanity_val_steps = 0
args.check_val_every_n_epoch = int(1e20)
args.log_every_n_steps = int(1e20)
args.max_epochs = args.epoch_count # -1 continue forever
args.max_epochs = -1 # continue forever
args.betas = (args.beta1, args.beta2)
args.real_bsz = int(args.num_nodes) * int(args.devices) * args.micro_bsz
os.environ["RWKV_T_MAX"] = str(args.ctx_len)
@ -264,7 +264,7 @@ if __name__ == "__main__":
#
# Data = {args.data_file} ({args.data_type}), ProjDir = {args.proj_dir}
#
# Epoch = {args.epoch_begin} to {args.epoch_begin + args.epoch_count - 1}, save every {args.epoch_save} epoch
# Epoch = {args.epoch_begin} to {args.epoch_begin + args.epoch_count - 1} (will continue afterwards), save every {args.epoch_save} epoch
#
# Each "epoch" = {args.epoch_steps} steps, {samples_per_epoch} samples, {tokens_per_epoch} tokens
#

View File

@ -1,202 +0,0 @@
#include <stdio.h>
#include <assert.h>
#include "ATen/ATen.h"
typedef at::BFloat16 bf16;
template <typename F>
__global__ void kernel_forward(const int B, const int T, const int C, const int H,
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u,
F *__restrict__ const _y)
{
const int b = blockIdx.x / H;
const int h = blockIdx.x % H;
const int i = threadIdx.x;
_w += h*_N_;
_u += h*_N_;
__shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
float state[_N_] = {0};
__syncthreads();
w[i] = _w[i];
u[i] = float(_u[i]);
__syncthreads();
for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
{
__syncthreads();
r[i] = float(_r[t]);
k[i] = float(_k[t]);
__syncthreads();
const float v = float(_v[t]);
float y = 0;
#pragma unroll
for (int j = 0; j < _N_; j+=4)
{
const float4& r_ = (float4&)(r[j]);
const float4& k_ = (float4&)(k[j]);
const float4& w_ = (float4&)(w[j]);
const float4& u_ = (float4&)(u[j]);
float4& s = (float4&)(state[j]);
float4 x;
x.x = k_.x * v;
x.y = k_.y * v;
x.z = k_.z * v;
x.w = k_.w * v;
y += r_.x * (u_.x * x.x + s.x);
y += r_.y * (u_.y * x.y + s.y);
y += r_.z * (u_.z * x.z + s.z);
y += r_.w * (u_.w * x.w + s.w);
s.x = s.x * w_.x + x.x;
s.y = s.y * w_.y + x.y;
s.z = s.z * w_.z + x.z;
s.w = s.w * w_.w + x.w;
}
_y[t] = F(y);
}
}
template <typename F>
__global__ void kernel_backward(const int B, const int T, const int C, const int H,
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const float *__restrict__ __w, const F *__restrict__ _u, const F *__restrict__ const _gy,
F *__restrict__ const _gr, F *__restrict__ const _gk, F *__restrict__ const _gv, F *__restrict__ const _gw, F *__restrict__ const _gu)
{
const int b = blockIdx.x / H;
const int h = blockIdx.x % H;
const int i = threadIdx.x;
_w += h*_N_;
_u += h*_N_;
__w += h*_N_;
__shared__ float w_[_N_], u_[_N_];
__shared__ float r[_N_], k[_N_], v[_N_], gy[_N_];
__syncthreads();
w_[i] = _w[i];
u_[i] = float(_u[i]);
__syncthreads();
const float w = w_[i];
const float ww = __w[i];
const float u = u_[i];
float state[_N_] = {0}, saaaa[_N_] = {0}, sbbbb[_N_] = {0}, scccc[_N_] = {0}, sdddd[_N_] = {0};
float gw = 0, gu = 0;
const int t000 = b*T*C + h*_N_ + i;
const int t111 = (b+1)*T*C + h*_N_ + i;
const int t222 = t111 - 2*C;
for (int t = t000; t < t111; t += C)
{
__syncthreads();
v[i] = float(_v[t]);
gy[i] = float(_gy[t]);
__syncthreads();
const float k = float(_k[t]);
float gr = 0, gu_ = 0;
#pragma unroll
for (int j = 0; j < _N_; j++)
{
float& s = state[j];
float x = k * v[j];
gr += (u * x + s) * gy[j];
gu_ += x * gy[j];
s = s * w + x;
}
_gr[t] = F(gr);
gu += float(_r[t]) * gu_;
}
_gu[b*C + h*_N_ + i] = F(gu);
for (int t = t000; t < t222; t += C)
{
__syncthreads();
v[i] = float(_v[t]);
gy[i] = float(_gy[t + 2*C]);
__syncthreads();
const float k = float(_k[t]);
float gw_ = 0;
#pragma unroll
for (int j = 0; j < _N_; j++)
{
float& s = saaaa[j];
float& s2 = sbbbb[j];
float x = k * v[j];
float tmp = w * (x + s);
s = tmp;
s2 = tmp + w * s2;
gw_ += s2 * gy[j];
}
gw += float(_r[t + 2*C]) * gw_;
}
_gw[b*C + h*_N_ + i] = F(ww * gw);
for (int t = t111 - C; t >= t000; t -= C)
{
__syncthreads();
v[i] = float(_v[t]);
gy[i] = float(_gy[t]);
__syncthreads();
const float rr = float(_r[t]);
float gk = 0;
#pragma unroll
for (int j = 0; j < _N_; j++)
{
float& s = scccc[j];
float x = rr * gy[j];
gk += (u * x + s) * v[j];
s = x + s * w;
}
_gk[t] = F(gk);
}
for (int t = t111 - C; t >= t000; t -= C)
{
__syncthreads();
r[i] = float(_r[t]);
k[i] = float(_k[t]);
__syncthreads();
const float gyy = float(_gy[t]);
float gv = 0;
#pragma unroll
for (int j = 0; j < _N_; j++)
{
float& s = sdddd[j];
float x = gyy * r[j];
gv += (u_[j] * x + s) * k[j];
s = x + s * w_[j];
}
_gv[t] = F(gv);
}
}
void cuda_forward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y)
{
assert(H*_N_ == C);
assert(_N_%4 == 0);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, r, k, v, w, u, y);
}
void cuda_backward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, float *w, float *ww, bf16 *u, bf16 *gy, bf16 *gr, bf16 *gk, bf16 *gv, bf16 *gw, bf16 *gu)
{
assert(H*_N_ == C);
assert(_N_%4 == 0);
kernel_backward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, r, k, v, w, ww, u, gy, gr, gk, gv, gw, gu);
}

View File

@ -1,22 +0,0 @@
#include <torch/extension.h>
#include "ATen/ATen.h"
typedef at::BFloat16 bf16;
void cuda_forward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y);
void cuda_backward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, float *w, float *ww, bf16 *u, bf16 *gy, bf16 *gr, bf16 *gk, bf16 *gv, bf16 *gw, bf16 *gu);
void forward(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
cuda_forward(B, T, C, H, r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
}
void backward(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &ww, torch::Tensor &u, torch::Tensor &gy, torch::Tensor &gr, torch::Tensor &gk, torch::Tensor &gv, torch::Tensor &gw, torch::Tensor &gu) {
cuda_backward(B, T, C, H, r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), ww.data_ptr<float>(), u.data_ptr<bf16>(), gy.data_ptr<bf16>(), gr.data_ptr<bf16>(), gk.data_ptr<bf16>(), gv.data_ptr<bf16>(), gw.data_ptr<bf16>(), gu.data_ptr<bf16>());
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &forward, "wkv5 forward");
m.def("backward", &backward, "wkv5 backward");
}
TORCH_LIBRARY(wkv5, m) {
m.def("forward", forward);
m.def("backward", backward);
}

View File

View File

@ -1,303 +0,0 @@
from lib2to3.pgen2 import token
import os
import torch
import numpy as np
import shutil
import struct
from functools import lru_cache
from itertools import accumulate
def print_rank_0(*message):
pass
# """If distributed is initialized print only on rank 0."""
# if torch.distributed.is_initialized():
# if torch.distributed.get_rank() == 0:
# print(*message, flush=True)
# else:
# print(*message, flush=True)
def _warmup_mmap_file(path):
pass
# with open(path, "rb") as stream:
# while stream.read(100 * 1024 * 1024):
# pass
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: float,
7: np.double,
8: np.uint16,
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + ".idx"
def data_file_path(prefix_path):
return prefix_path + ".bin"
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b"MMIDIDX\x00\x00"
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, "wb")
# Write Magic string so we can check the file format then opening it again.
self._file.write(cls._HDR_MAGIC)
# Write version number
# Little endian unsigned 64 Bit integer
self._file.write(struct.pack("<Q", 1))
# Little endian unsigned 8 Bit integer
self._file.write(struct.pack("<B", code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes, doc_idx):
pointers = self._get_pointers(sizes)
# Little endian unsigned 64 Bit integer
self._file.write(struct.pack("<Q", len(sizes)))
# Little endian unsigned 64 Bit integer
self._file.write(struct.pack("<Q", len(doc_idx)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order="C"))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order="C"))
del pointers
doc_idx = np.array(doc_idx, dtype=np.int64)
self._file.write(doc_idx.tobytes(order="C"))
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=False):
with open(path, "rb") as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
# Little endian unsigned 64 Bit integer
version = struct.unpack("<Q", stream.read(8))
assert (1,) == version
# Little endian unsigned 8 Bit integer
(dtype_code,) = struct.unpack("<B", stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack("<Q", stream.read(8))[0]
self._doc_count = struct.unpack("<Q", stream.read(8))[0]
offset = stream.tell()
if not skip_warmup:
print_rank_0(" warming up index mmap file...")
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode="r", order="C")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
print_rank_0(" reading sizes...")
self._sizes = np.frombuffer(
self._bin_buffer, dtype=np.int32, count=self._len, offset=offset
)
print_rank_0(" reading pointers...")
self._pointers = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._len,
offset=offset + self._sizes.nbytes,
)
print_rank_0(" reading document index...")
self._doc_idx = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._doc_count,
offset=offset + self._sizes.nbytes + self._pointers.nbytes,
)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@property
def doc_idx(self):
return self._doc_idx
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path, skip_warmup=False):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path, skip_warmup)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path, skip_warmup):
self._path = path
self._index = self.Index(index_file_path(self._path), skip_warmup)
if not skip_warmup:
print_rank_0(" warming up data mmap file...")
_warmup_mmap_file(data_file_path(self._path))
print_rank_0(" creating numpy buffer of mmap...")
self._bin_buffer_mmap = np.memmap(
data_file_path(self._path), mode="r", order="C"
)
print_rank_0(" creating memory view of numpy buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
ptr, size = self._index[idx]
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
)
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
sizes = self._index._sizes[idx]
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr
)
sents = np.split(np_array, offsets[:-1])
return sents
def get(self, idx, offset=0, length=None):
"""Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
ptr, size = self._index[idx]
if length is None:
length = size - offset
ptr += offset * np.dtype(self._index.dtype).itemsize
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr
)
return np_array
def pad(self, idx, length=None):
ptr, size = self._index[idx]
try:
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr
)
except:
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
)
ptr0, _ = self._index[0]
np_array0 = np.frombuffer(
self._bin_buffer,
dtype=self._index.dtype,
count=length - size,
offset=ptr0,
)
np_array = np.append(np_array, np_array0)
return np_array
def only(self, idx):
ptr, size = self._index[idx]
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
)
return np_array
@property
def sizes(self):
return self._index.sizes
@property
def doc_idx(self):
return self._index.doc_idx
def get_doc_idx(self):
return self._index._doc_idx
def set_doc_idx(self, doc_idx_):
self._index._doc_idx = doc_idx_
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return os.path.exists(index_file_path(path)) and os.path.exists(
data_file_path(path)
)

View File

@ -1,241 +0,0 @@
########################################################################################################
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
########################################################################################################
import json, math, random, os, sys
import numpy as np
import torch
from torch.utils.data import Dataset
from pytorch_lightning.utilities import rank_zero_info
from .binidx import MMapIndexedDataset
from .utils import MaybeIsPrime
class MyDataset(Dataset):
def __init__(self, args):
self.args = args
if args.data_type == "binidx":
self.vocab_size = args.vocab_size
rank_zero_info(
f"Current vocab size = {self.vocab_size} (make sure it's correct)"
)
if args.my_pile_version == 1:
self.data = MMapIndexedDataset(args.data_file)
self.data_size = (
len(self.data._bin_buffer) // self.data._index._dtype_size
)
rank_zero_info(f"Data has {self.data_size} tokens.")
elif args.my_pile_version == 2:
data_list = (
open(args.data_file, "r", encoding="utf-8")
.read()
.strip()
.split("\n")
)
data_list = [i.strip().split(" ") for i in data_list]
self.data = []
self.data_size = int(data_list[-1][-1])
rank_zero_info(f"Data has {self.data_size} chunks.")
for d in data_list:
data = MMapIndexedDataset(d[0])
data_size = len(data._bin_buffer) // data._index._dtype_size
assert (data_size - args.ctx_len) == int(d[1])
self.data += [[int(d[-1]), int(d[1]), data]]
# rank_zero_info(self.data)
if args.my_qa_mask > 0:
# self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document')
self.data_pile = MMapIndexedDataset(
"/fsx/pile_deduped/pile_0.87_deduped_text_document"
)
self.data_pile_size = (
len(self.data_pile._bin_buffer) // self.data._index._dtype_size
)
else:
self.data_pile = None
self.data_pile_size = 0
if args.my_pile_stage > 0:
# assert self.data_size == 332115325534 and self.vocab_size == 50277
self.samples_per_epoch = args.epoch_steps * args.real_bsz
assert self.samples_per_epoch == 40320
rank_zero_info(
f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########"
)
dataset_slot = self.data_size // args.ctx_len
if args.my_pile_stage != 4:
assert MaybeIsPrime(args.magic_prime)
assert args.magic_prime % 3 == 2
assert (
args.magic_prime / dataset_slot > 0.99
and args.magic_prime / dataset_slot <= 1
)
elif args.data_type == "numpy":
self.data = np.load(args.data_file).astype("int")
self.vocab_size = args.vocab_size
rank_zero_info(
f"Current vocab size = {self.vocab_size} (make sure it's correct)"
)
self.data_size = len(self.data)
rank_zero_info(f"Data has {self.data_size} tokens.")
elif args.data_type == "uint16":
self.data = (
np.fromfile(args.data_file, dtype=np.uint16)
.astype("int32")
.reshape(-1, args.my_sample_len)
)
self.vocab_size = args.vocab_size
rank_zero_info(
f"Current vocab size = {self.vocab_size} (make sure it's correct)"
)
self.data_size = self.data.shape[0]
rank_zero_info(f"Data has {self.data_size} samples.")
else:
if args.data_type == "dummy":
rank_zero_info("Building dummy data...")
self.data = ""
for i in range(100000):
aa = (i) % 10000
bb = (i * i) % 10000
cc = aa + bb
self.data += f".{aa}+{bb}={cc}."
else:
self.data = open(args.data_file, "r", encoding=args.data_type).read()
rank_zero_info("Building token list...")
unique = sorted(list(set(self.data)))
self.vocab_size = len(unique)
# rank_zero_info()
# for u in unique:
# print(u, end=' ')
# rank_zero_info('\n\n')
xx = 0
xxObj = {}
for u in unique:
xxObj[xx] = u
xx += 1
with open(
f"{args.proj_dir}/vocab.json", "w", encoding="utf-8"
) as vocab_file:
vocab_file.write(json.dumps(xxObj, ensure_ascii=False))
self.data_size = len(self.data)
rank_zero_info(
f"Data has {self.data_size} tokens, {self.vocab_size} vocab size."
)
self.stoi = {ch: i for i, ch in enumerate(unique)}
self.itos = {i: ch for i, ch in enumerate(unique)}
def __len__(self):
return self.args.epoch_steps * self.args.micro_bsz
def __getitem__(self, idx):
args = self.args
rank = self.global_rank
epoch = self.real_epoch
world_size = self.world_size
# print(f"epoch {epoch} idx {idx} rank {rank}/{world_size}")
if args.data_type == "uint16":
i = np.random.randint(0, self.data_size - 1)
dix = self.data[i]
x = torch.tensor(dix[:-1], dtype=torch.long)
y = torch.tensor(dix[1:], dtype=torch.long)
else:
ctx_len = args.ctx_len
req_len = ctx_len + 1
magic_prime = args.magic_prime
data = self.data
if args.my_pile_stage > 0:
ii = 1 + epoch * self.samples_per_epoch + (idx * world_size) + rank
if args.my_qa_mask > 0:
ii_orig = ii
if ii % 2 == 0:
ii = -1
data = self.data_pile
else:
ii = ii // 2
if data == self.data_pile:
i = np.random.randint(0, self.data_pile_size - req_len)
else:
if args.my_pile_stage == 4 or ii < args.my_random_steps:
# cheat: pick a random spot in dataset
if args.my_pile_version == 1:
i = np.random.randint(0, self.data_size - req_len)
else:
i = np.random.randint(0, self.data_size)
else:
ii = ii - args.my_random_steps
factor = (math.sqrt(5) - 1) / 2
factor = int(magic_prime * factor)
i = ((factor * ii * ii * ii) % magic_prime) * ctx_len
i = i + args.my_pile_shift
# print(f"epoch {epoch} idx {idx} rank {rank}/{world_size} ii {ii} pos {round(i / self.data_size, 3)}")
else:
# cheat: pick a random spot in dataset
i = np.random.randint(0, self.data_size - req_len)
if args.data_type == "binidx":
if args.my_pile_version == 1:
dix = data.get(idx=0, offset=i, length=req_len).astype(int)
else:
# self.data : cutoff, chunk_count, data
for j in range(len(data)):
if i < data[j][0]:
ii = i
i = (i - (data[j - 1][0] if j > 0 else 0)) % data[j][1]
dix = (
data[j][2]
.get(idx=0, offset=i, length=req_len)
.astype(int)
)
# print(ii, j, i)
break
elif args.data_type == "numpy":
dix = data[i : i + req_len]
else:
dix = [self.stoi[s] for s in data[i : i + req_len]]
if args.my_qa_mask == 1:
if data == self.data_pile:
z = [1] * ctx_len
else:
z = [0] * ctx_len
z_sum = 0
isGood = False
for i in range(3, ctx_len):
if (
dix[i] == 27
and dix[i - 1] == 34
and dix[i - 2] == 187
and dix[i - 3] == 187
):
isGood = True
if dix[i] == 0:
isGood = False
if isGood:
z[i] = 1
z_sum += 1
if z_sum == 0:
z = [1] * ctx_len
i = np.random.randint(0, self.data_pile_size - req_len)
dix = self.data_pile.get(
idx=0, offset=i, length=req_len
).astype(int)
z = torch.tensor(z, dtype=torch.bfloat16)
x = torch.tensor(dix[:-1], dtype=torch.long)
y = torch.tensor(dix[1:], dtype=torch.long)
# if ii_orig < 50:
# # if rank == 1:
# print('rank', rank, 'i', ii_orig, ii, i, 'x', x[:5], '...', x[-5:])
# else:
# exit(0)
if args.my_qa_mask == 1:
return x, y, z
return x, y

View File

@ -1,819 +0,0 @@
########################################################################################################
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
########################################################################################################
import functools
import os, math, gc, importlib
import torch
# torch._C._jit_set_profiling_executor(True)
# torch._C._jit_set_profiling_mode(True)
import torch.nn as nn
from torch.utils.checkpoint import checkpoint as torch_checkpoint
from torch.nn import functional as F
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from pytorch_lightning.strategies import DeepSpeedStrategy
if importlib.util.find_spec("deepspeed"):
import deepspeed
from deepspeed.ops.adam import DeepSpeedCPUAdam, FusedAdam
# from deepspeed.runtime.fp16.onebit.zoadam import ZeroOneAdam
# lora-config
LORA_CONFIG = {
"r": 0,
"alpha": 0,
"dropout": 0,
"parts": {"att", "ln", "time"},
}
try:
print("RWKV_MY_TESTING", os.environ["RWKV_MY_TESTING"])
except:
os.environ["RWKV_MY_TESTING"] = ""
def __nop(ob):
return ob
MyModule = nn.Module
MyFunction = __nop
if os.environ["RWKV_JIT_ON"] == "1":
MyModule = torch.jit.ScriptModule
MyFunction = torch.jit.script_method
########################################################################################################
# CUDA Kernel
########################################################################################################
from torch.utils.cpp_extension import load
HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"])
wkv5_cuda = load(
name="wkv5",
sources=[
"finetune/lora/v5/cuda/wkv5_op.cpp",
f"finetune/lora/v5/cuda/wkv5_cuda.cu",
],
verbose=True,
extra_cuda_cflags=[
"-res-usage",
"--use_fast_math",
"-O3",
"-Xptxas -O3",
"--extra-device-vectorization",
f"-D_N_={HEAD_SIZE}",
],
)
class WKV_5(torch.autograd.Function):
@staticmethod
def forward(ctx, B, T, C, H, r, k, v, w, u):
with torch.no_grad():
assert r.dtype == torch.bfloat16
assert k.dtype == torch.bfloat16
assert v.dtype == torch.bfloat16
assert w.dtype == torch.bfloat16
assert u.dtype == torch.bfloat16
assert HEAD_SIZE == C // H
ctx.B = B
ctx.T = T
ctx.C = C
ctx.H = H
assert r.is_contiguous()
assert k.is_contiguous()
assert v.is_contiguous()
assert w.is_contiguous()
assert u.is_contiguous()
ew = (-torch.exp(w.float())).contiguous()
eew = (torch.exp(ew)).contiguous()
ctx.save_for_backward(r, k, v, eew, ew, u)
y = torch.empty(
(B, T, C),
device=r.device,
dtype=torch.bfloat16,
memory_format=torch.contiguous_format,
) # .uniform_(-1, 1)
wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y)
return y
@staticmethod
def backward(ctx, gy):
with torch.no_grad():
assert gy.dtype == torch.bfloat16
B = ctx.B
T = ctx.T
C = ctx.C
H = ctx.H
assert gy.is_contiguous()
r, k, v, eew, ew, u = ctx.saved_tensors
gr = torch.empty(
(B, T, C),
device=gy.device,
requires_grad=False,
dtype=torch.bfloat16,
memory_format=torch.contiguous_format,
) # .uniform_(-1, 1)
gk = torch.empty(
(B, T, C),
device=gy.device,
requires_grad=False,
dtype=torch.bfloat16,
memory_format=torch.contiguous_format,
) # .uniform_(-1, 1)
gv = torch.empty(
(B, T, C),
device=gy.device,
requires_grad=False,
dtype=torch.bfloat16,
memory_format=torch.contiguous_format,
) # .uniform_(-1, 1)
gw = torch.empty(
(B, C),
device=gy.device,
requires_grad=False,
dtype=torch.bfloat16,
memory_format=torch.contiguous_format,
) # .uniform_(-1, 1)
gu = torch.empty(
(B, C),
device=gy.device,
requires_grad=False,
dtype=torch.bfloat16,
memory_format=torch.contiguous_format,
) # .uniform_(-1, 1)
wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu)
gw = torch.sum(gw, 0).view(H, C // H)
gu = torch.sum(gu, 0).view(H, C // H)
return (None, None, None, None, gr, gk, gv, gw, gu)
def RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u):
return WKV_5.apply(B, T, C, H, r, k, v, w, u)
#################################################################
class LoraLinear(nn.Module):
def __init__(self, in_features: int, out_features: int, bias: bool):
super().__init__()
self.weight = nn.Parameter(torch.empty((out_features, in_features)))
assert bias == False, "Biased LoraLinear not supported"
r, alpha, dropout = (
LORA_CONFIG["r"],
LORA_CONFIG["alpha"],
LORA_CONFIG["dropout"],
)
self.lora_A = nn.Parameter(torch.empty(r, in_features))
self.lora_B = nn.Parameter(torch.empty(out_features, r))
self.lora_dropout = nn.Dropout(dropout)
self.scaling = alpha / r
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
nn.init.zeros_(self.lora_B)
def forward(self, x):
return F.linear(x, self.weight) + self.scaling * F.linear(
F.linear(self.lora_dropout(x), self.lora_A), self.lora_B
)
@functools.wraps(LoraLinear)
def make_linear_att(*args, **kwargs):
if "att" in LORA_CONFIG["parts"] and LORA_CONFIG["r"] > 0:
return LoraLinear(*args, **kwargs)
else:
return nn.Linear(*args, **kwargs)
@functools.wraps(LoraLinear)
def make_linear_ffn(*args, **kwargs):
if "ffn" in LORA_CONFIG["parts"] and LORA_CONFIG["r"] > 0:
return LoraLinear(*args, **kwargs)
else:
return nn.Linear(*args, **kwargs)
########################################################################################################
class RWKV_TimeMix_RWKV5(MyModule):
def __init__(self, args, layer_id):
super().__init__()
self.args = args
self.layer_id = layer_id
self.head_size = args.head_size_a
assert HEAD_SIZE == self.head_size # change HEAD_SIZE to match args.head_size_a
self.n_head = args.dim_att // self.head_size
assert args.dim_att % self.n_head == 0
self.head_size_divisor = args.head_size_divisor
with torch.no_grad():
ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1
ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0
ddd = torch.ones(1, 1, args.n_embd)
for i in range(args.n_embd):
ddd[0, 0, i] = i / args.n_embd
# fancy time_mix
self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0))
self.time_mix_v = nn.Parameter(
torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1
)
self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0))
self.time_mix_g = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0))
# fancy time_decay
decay_speed = torch.ones(args.dim_att)
for n in range(args.dim_att):
decay_speed[n] = -6 + 5 * (n / (args.dim_att - 1)) ** (
0.7 + 1.3 * ratio_0_to_1
)
self.time_decay = nn.Parameter(
decay_speed.reshape(self.n_head, self.head_size)
)
# print(layer_id, self.time_decay.flatten()[:3].cpu().numpy(), '...', self.time_decay.flatten()[-3:].cpu().numpy())
tmp = torch.zeros(args.dim_att)
for n in range(args.dim_att):
zigzag = ((n + 1) % 3 - 1) * 0.1
tmp[n] = ratio_0_to_1 * (1 - (n / (args.dim_att - 1))) + zigzag
self.time_faaaa = nn.Parameter(tmp.reshape(self.n_head, self.head_size))
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
self.receptance = make_linear_att(args.n_embd, args.dim_att, bias=False)
self.key = make_linear_att(args.n_embd, args.dim_att, bias=False)
self.value = make_linear_att(args.n_embd, args.dim_att, bias=False)
self.output = nn.Linear(args.dim_att, args.n_embd, bias=False)
self.gate = make_linear_att(args.n_embd, args.dim_att, bias=False)
self.ln_x = nn.GroupNorm(self.n_head, args.dim_att)
@MyFunction
def jit_func(self, x):
B, T, C = x.size()
xx = self.time_shift(
x
) # Mix x with the previous timestep to produce xk, xv, xr
xk = x * self.time_mix_k + xx * (1 - self.time_mix_k)
xv = x * self.time_mix_v + xx * (1 - self.time_mix_v)
xr = x * self.time_mix_r + xx * (1 - self.time_mix_r)
xg = x * self.time_mix_g + xx * (1 - self.time_mix_g)
r = self.receptance(xr)
k = self.key(xk)
v = self.value(xv)
g = F.silu(self.gate(xg))
return r, k, v, g
@MyFunction
def jit_func_2(self, x, g):
B, T, C = x.size()
x = x.view(B * T, C)
x = self.ln_x(x / self.head_size_divisor).view(B, T, C)
x = self.output(x * g)
return x
def forward(self, x):
B, T, C = x.size()
H = self.n_head
r, k, v, g = self.jit_func(x)
x = RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w=self.time_decay, u=self.time_faaaa)
return self.jit_func_2(x, g)
########################################################################################################
class RWKV_ChannelMix(MyModule):
def __init__(self, args, layer_id):
super().__init__()
self.args = args
self.layer_id = layer_id
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
with torch.no_grad(): # fancy init of time_mix
ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0
ddd = torch.ones(1, 1, args.n_embd)
for i in range(args.n_embd):
ddd[0, 0, i] = i / args.n_embd
self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0))
self.time_mix_r = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0))
self.key = make_linear_ffn(args.n_embd, args.dim_ffn, bias=False)
self.receptance = make_linear_ffn(args.n_embd, args.n_embd, bias=False)
self.value = make_linear_ffn(args.dim_ffn, args.n_embd, bias=False)
@MyFunction
def forward(self, x):
xx = self.time_shift(x)
xk = x * self.time_mix_k + xx * (1 - self.time_mix_k)
xr = x * self.time_mix_r + xx * (1 - self.time_mix_r)
k = self.key(xk)
k = torch.relu(k) ** 2
kv = self.value(k)
return torch.sigmoid(self.receptance(xr)) * kv
class MishGLU(MyModule):
def __init__(self, args, layer_id):
super().__init__()
self.args = args
self.layer_id = layer_id
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
with torch.no_grad():
ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer)
x = torch.ones(1, 1, args.n_embd)
for i in range(args.n_embd):
x[0, 0, i] = i / args.n_embd
self.time_mix_k = nn.Parameter(torch.pow(x, ratio_1_to_almost0))
self.time_mix_r = nn.Parameter(torch.pow(x, ratio_1_to_almost0))
self.aa = nn.Linear(args.n_embd, args.dim_ffn, bias=False)
self.bb = nn.Linear(args.n_embd, args.dim_ffn, bias=False)
self.value = nn.Linear(args.dim_ffn, args.n_embd, bias=False)
@MyFunction
def forward(self, x):
xx = self.time_shift(x)
xa = x * self.time_mix_k + xx * (1 - self.time_mix_k)
xb = x * self.time_mix_r + xx * (1 - self.time_mix_r)
a = self.aa(xa)
b = self.bb(xb)
return self.value(a * F.mish(b))
########################################################################################################
# The RWKV Model with our blocks
########################################################################################################
class Block(nn.Module):
def __init__(self, args, layer_id):
super().__init__()
self.args = args
self.layer_id = layer_id
self.ln1 = nn.LayerNorm(args.n_embd)
self.ln2 = nn.LayerNorm(args.n_embd)
if self.layer_id == 0:
self.ln0 = nn.LayerNorm(args.n_embd)
if args.my_pos_emb > 0:
self.pos_emb_x = nn.Parameter(
torch.zeros((1, args.my_pos_emb, args.n_embd))
)
self.pos_emb_y = nn.Parameter(
torch.zeros((args.my_pos_emb, 1, args.n_embd))
)
if self.layer_id == 0 and self.args.pre_ffn > 0:
self.ffnPre = RWKV_ChannelMix(args, 0)
else:
self.att = RWKV_TimeMix_RWKV5(args, layer_id)
if "g" in os.environ["RWKV_MY_TESTING"]:
self.ffn = MishGLU(args, layer_id)
else:
self.ffn = RWKV_ChannelMix(args, layer_id)
if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer:
self.tiny_ln = nn.LayerNorm(args.n_embd)
self.tiny_q = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False)
self.tiny_k = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False)
self.tiny_v = nn.Linear(args.n_embd, args.n_embd, bias=False)
self.register_buffer(
"tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))
)
if args.dropout > 0:
self.drop0 = nn.Dropout(p=args.dropout)
self.drop1 = nn.Dropout(p=args.dropout)
def forward(self, x, x_emb=None):
args = self.args
B, T, C = x.size()
if self.layer_id == 0:
x = self.ln0(x)
if args.my_pos_emb > 0:
pos_emb = (self.pos_emb_x + self.pos_emb_y).reshape(T + 1, -1)[:-1, :]
x = x + pos_emb
if self.args.dropout == 0:
if self.layer_id == 0 and args.pre_ffn > 0:
x = x + self.ffnPre(self.ln1(x))
else:
x = x + self.att(self.ln1(x))
x = x + self.ffn(self.ln2(x))
else:
if self.layer_id == 0 and args.pre_ffn > 0:
x = self.drop0(x + self.ffnPre(self.ln1(x)))
else:
x = self.drop0(x + self.att(self.ln1(x)))
x = self.drop1(x + self.ffn(self.ln2(x)))
if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer:
xx = self.tiny_ln(x)
q = self.tiny_q(xx)[:, :T, :]
k = self.tiny_k(xx)[:, :T, :]
c = (q @ k.transpose(-2, -1)) * (args.tiny_att_dim ** (-0.5))
c = c.masked_fill(self.tiny_mask[:T, :T] == 0, 0)
x = x + c @ self.tiny_v(x_emb)
return x
class L2Wrap(torch.autograd.Function):
@staticmethod
def forward(ctx, loss, y):
ctx.save_for_backward(y)
return loss
@staticmethod
def backward(ctx, grad_output):
y = ctx.saved_tensors[0]
# to encourage the logits to be close to 0
factor = 1e-4 / (y.shape[0] * y.shape[1])
maxx, ids = torch.max(y, -1, keepdim=True)
gy = torch.zeros_like(y)
gy.scatter_(-1, ids, maxx * factor)
return (grad_output, gy)
class RWKV(pl.LightningModule):
def __init__(self, args):
super().__init__()
self.args = args
if not hasattr(args, "dim_att"):
args.dim_att = args.n_embd
if not hasattr(args, "dim_ffn"):
args.dim_ffn = args.n_embd * 4
if not hasattr(args, "tiny_att_layer"):
args.tiny_att_layer = -1
if not hasattr(args, "tiny_att_dim"):
args.tiny_att_dim = -1
assert args.n_embd % 32 == 0
assert args.dim_att % 32 == 0
assert args.dim_ffn % 32 == 0
self.emb = nn.Embedding(args.vocab_size, args.n_embd)
self.blocks = nn.ModuleList([Block(args, i) for i in range(args.n_layer)])
self.ln_out = nn.LayerNorm(args.n_embd)
self.head = nn.Linear(args.n_embd, args.vocab_size, bias=False)
if args.head_qk > 0:
self.head_q = nn.Linear(args.n_embd, args.head_qk, bias=False)
self.head_k = nn.Linear(args.n_embd, args.head_qk, bias=False)
self.register_buffer(
"copy_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))
)
if args.dropout > 0:
self.drop0 = nn.Dropout(p=args.dropout)
def configure_optimizers(self):
args = self.args
lr_decay = set()
lr_1x = set()
lr_2x = set()
lr_3x = set()
for n, p in self.named_parameters():
if ("time_mix" in n) and (args.layerwise_lr > 0):
if args.my_pile_stage == 2:
lr_2x.add(n)
else:
lr_1x.add(n)
elif ("time_decay" in n) and (args.layerwise_lr > 0):
if args.my_pile_stage == 2:
lr_3x.add(n)
else:
lr_2x.add(n)
elif ("time_faaaa" in n) and (args.layerwise_lr > 0):
if args.my_pile_stage == 2:
lr_2x.add(n)
else:
lr_1x.add(n)
elif ("time_first" in n) and (args.layerwise_lr > 0):
lr_3x.add(n)
elif (len(p.squeeze().shape) >= 2) and (args.weight_decay > 0):
lr_decay.add(n)
else:
lr_1x.add(n)
lr_decay = sorted(list(lr_decay))
lr_1x = sorted(list(lr_1x))
lr_2x = sorted(list(lr_2x))
lr_3x = sorted(list(lr_3x))
# print('decay', lr_decay)
# print('1x', lr_1x)
# print('2x', lr_2x)
# print('3x', lr_3x)
param_dict = {n: p for n, p in self.named_parameters()}
if args.layerwise_lr > 0:
if args.my_pile_stage == 2:
optim_groups = [
{
"params": [param_dict[n] for n in lr_1x],
"weight_decay": 0.0,
"my_lr_scale": 1.0,
},
{
"params": [param_dict[n] for n in lr_2x],
"weight_decay": 0.0,
"my_lr_scale": 5.0,
}, # test: 2e-3 / args.lr_init},
{
"params": [param_dict[n] for n in lr_3x],
"weight_decay": 0.0,
"my_lr_scale": 5.0,
}, # test: 3e-3 / args.lr_init},
]
else:
optim_groups = [
{
"params": [param_dict[n] for n in lr_1x],
"weight_decay": 0.0,
"my_lr_scale": 1.0,
},
{
"params": [param_dict[n] for n in lr_2x],
"weight_decay": 0.0,
"my_lr_scale": 2.0,
},
{
"params": [param_dict[n] for n in lr_3x],
"weight_decay": 0.0,
"my_lr_scale": 3.0,
},
]
else:
optim_groups = [
{
"params": [param_dict[n] for n in lr_1x],
"weight_decay": 0.0,
"my_lr_scale": 1.0,
}
]
if args.weight_decay > 0:
optim_groups += [
{
"params": [param_dict[n] for n in lr_decay],
"weight_decay": args.weight_decay,
"my_lr_scale": 1.0,
}
]
if self.deepspeed_offload:
return DeepSpeedCPUAdam(
optim_groups,
lr=self.args.lr_init,
betas=self.args.betas,
eps=self.args.adam_eps,
bias_correction=True,
adamw_mode=True,
amsgrad=False,
)
return FusedAdam(
optim_groups,
lr=self.args.lr_init,
betas=self.args.betas,
eps=self.args.adam_eps,
bias_correction=True,
adam_w_mode=True,
amsgrad=False,
)
else:
if self.deepspeed_offload:
return DeepSpeedCPUAdam(
optim_groups,
lr=self.args.lr_init,
betas=self.args.betas,
eps=self.args.adam_eps,
bias_correction=True,
adamw_mode=False,
weight_decay=0,
amsgrad=False,
)
return FusedAdam(
optim_groups,
lr=self.args.lr_init,
betas=self.args.betas,
eps=self.args.adam_eps,
bias_correction=True,
adam_w_mode=False,
weight_decay=0,
amsgrad=False,
)
# return ZeroOneAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, weight_decay=0, amsgrad=False, cuda_aware=False)
@property
def deepspeed_offload(self) -> bool:
strategy = self.trainer.strategy
if isinstance(strategy, DeepSpeedStrategy):
cfg = strategy.config["zero_optimization"]
return cfg.get("offload_optimizer") or cfg.get("offload_param")
return False
def forward(self, idx):
args = self.args
B, T = idx.size()
assert T <= args.ctx_len, "Cannot forward, model ctx_len is exhausted."
x = self.emb(idx)
x_emb = x
if args.dropout > 0:
x = self.drop0(x)
if args.tiny_att_dim > 0:
for block in self.blocks:
if args.grad_cp == 1:
if args.lora:
x = torch_checkpoint(block, x, x_emb, use_reentrant=False)
else:
x = deepspeed.checkpointing.checkpoint(block, x, x_emb)
else:
x = block(x, x_emb)
else:
for block in self.blocks:
if args.grad_cp == 1:
if args.lora:
x = torch_checkpoint(block, x, x_emb, use_reentrant=False)
else:
x = deepspeed.checkpointing.checkpoint(block, x)
else:
x = block(x)
x = self.ln_out(x)
if args.head_qk > 0:
q = self.head_q(x)[:, :T, :]
k = self.head_k(x)[:, :T, :]
c = (q @ k.transpose(-2, -1)) * (1.0 / args.head_qk)
c = c.masked_fill(self.copy_mask[:T, :T] == 0, 0)
if "32" in os.environ["RWKV_FLOAT_MODE"]:
c = c @ F.one_hot(idx, num_classes=args.vocab_size)
elif os.environ["RWKV_FLOAT_MODE"] == "fp16":
c = c @ F.one_hot(idx, num_classes=args.vocab_size).half()
elif os.environ["RWKV_FLOAT_MODE"] == "bf16":
c = c @ F.one_hot(idx, num_classes=args.vocab_size).bfloat16()
x = self.head(x) + c
else:
x = self.head(x)
return x
def training_step(self, batch, batch_idx):
args = self.args
if args.my_qa_mask != 1:
idx, targets = batch
logits = self(idx)
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
# if '0' in os.environ["RWKV_MY_TESTING"]:
# print('logits', logits)
# torch.set_printoptions(threshold=10000)
# print('idx', idx)
# exit(0)
else:
idx, targets, mask = batch
mask = mask.view(-1)
sum_mask = torch.sum(mask).item()
# if sum_mask == 0:
# return torch.tensor([0.0], requires_grad=True)
logits = self(idx)
if sum_mask == mask.shape[0]:
loss = F.cross_entropy(
logits.view(-1, logits.size(-1)), targets.view(-1)
)
# print('rank', self.global_rank, 'loss', loss.item())
else:
loss = F.cross_entropy(
logits.view(-1, logits.size(-1)), targets.view(-1), reduction="none"
)
# loss_raw = loss
loss = torch.sum(loss * mask) / sum_mask
# torch.set_printoptions(threshold=10000)
# if True: #self.global_rank == 1:
# tmp = ''
# sss = 0
# ccc = 0
# for i in range(mask.shape[0]):
# if mask[i] > 0:
# tmp += str(idx.view(-1)[i].item()) + ','
# sss += loss_raw.view(-1)[i].float().item()
# ccc += 1
# print('rank', self.global_rank, 'loss', loss.item(), 'lavg', sss / ccc)#, 'tmp', tmp, 'input', idx)
return L2Wrap.apply(loss, logits)
def training_step_end(self, batch_parts):
if pl.__version__[0] != "2":
all = self.all_gather(batch_parts)
if self.trainer.is_global_zero:
self.trainer.my_loss_all = all
def generate_init_weight(self):
print(
f"""
############################################################################
#
# Init model weight (slow for large models)...
#
############################################################################
"""
)
m = {}
for n in self.state_dict():
p = self.state_dict()[n]
shape = p.shape
gain = 1.0
scale = 1.0
if (
"ln_" in n
or ".ln" in n
or "time_" in n
or "_mask" in n
or "pos_emb" in n
or ".mask." in n
):
if "ln_x.weight" in n:
layer_scale = (1 + int(n.split(".")[1])) / self.args.n_layer
m[n] = (p * 0.0) + (layer_scale**0.7)
else:
m[n] = p
else:
if n == "emb.weight":
scale = -1 * self.args.lr_init
else:
if shape[0] > shape[1]:
gain = math.sqrt(shape[0] / shape[1])
zero = [
".att.output.",
".ffn.value.",
".ffn.receptance.",
".ffnPre.value.",
".ffnPre.receptance.",
"head_q.",
".oo.",
".rr.",
]
for kk in zero:
if kk in n:
scale = 0
if n == "head.weight":
scale = 0.5
if "head_k." in n:
scale = 0.1
if "head_q." in n:
scale = 0
print(
f"{str(shape[0]).ljust(5)} {str(shape[1]).ljust(5)} {str(scale).ljust(4)} {n}"
)
if self.args.accelerator.upper() == "GPU":
m[n] = torch.empty((shape[0], shape[1]), device="cuda")
else:
m[n] = torch.empty((shape[0], shape[1]))
if scale == 0:
nn.init.zeros_(m[n])
elif scale < 0:
nn.init.uniform_(m[n], a=scale, b=-scale)
else:
nn.init.orthogonal_(m[n], gain=gain * scale)
m[n] = m[n].cpu()
if os.environ["RWKV_FLOAT_MODE"] == "fp16":
m[n] = m[n].half()
elif os.environ["RWKV_FLOAT_MODE"] == "bf16":
m[n] = m[n].bfloat16()
# if n == "emb.weight":
# print(m[n])
gc.collect()
torch.cuda.empty_cache()
return m

View File

@ -1,310 +0,0 @@
import os, math, time, datetime, subprocess
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from .model import LORA_CONFIG
def my_save(args, trainer, dd, ff):
if "14b-run1" in ff:
fn = ff.split("/")[-1]
fff = "/dev/shm/" + fn
torch.save(dd, fff)
subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-14b-4k/{fn} --quiet", shell=True)
elif ("world/14b" in ff) or ("world/7b" in ff):
aa = ff.split("/")[1]
fn = ff.split("/")[-1]
fff = f"/dev/shm/{aa}-{fn}"
torch.save(dd, fff)
subprocess.Popen(
f" aws s3 mv {fff} s3://rwkv-world/{aa}-{fn} --quiet", shell=True
)
else:
if "deepspeed_stage_3" in args.strategy:
trainer.save_checkpoint(ff, weights_only=True)
else:
torch.save(dd, ff)
class train_callback(pl.Callback):
def __init__(self, args):
super().__init__()
self.args = args
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx):
args = self.args
# if args.cuda_cleanup > 0:
# torch.cuda.empty_cache()
real_step = trainer.global_step + args.epoch_begin * args.epoch_steps
# LR schedule
w_step = args.warmup_steps
if args.lr_final == args.lr_init or args.epoch_count == 0:
lr = args.lr_init
else:
decay_step = real_step - args.my_pile_edecay * args.epoch_steps
decay_total = (args.epoch_count - args.my_pile_edecay) * args.epoch_steps
progress = (decay_step - w_step + 1) / (decay_total - w_step)
progress = min(1, max(0, progress))
if args.lr_final == 0 or args.lr_init == 0: # linear decay
lr = args.lr_init + (args.lr_final - args.lr_init) * progress
else: # exp decay
lr = args.lr_init * math.exp(
math.log(args.lr_final / args.lr_init) * pow(progress, 1)
)
# if trainer.is_global_zero:
# print(trainer.global_step, decay_step, decay_total, w_step, progress, lr)
if args.my_exit_tokens != 0: # cosine decay
real_tokens = real_step * args.ctx_len * args.real_bsz
warmup_tokens = w_step * args.ctx_len * args.real_bsz
progress = (real_tokens - warmup_tokens) / (
abs(args.my_exit_tokens) - warmup_tokens
)
progress = max(0, min(1, progress))
lr_final_factor = args.lr_final / args.lr_init
lr_mult = (0.5 + lr_final_factor / 2) + (
0.5 - lr_final_factor / 2
) * math.cos(math.pi * progress)
if args.my_exit_tokens > 0:
lr = args.lr_init * lr_mult
else:
lr = (lr + args.lr_init * lr_mult) / 2
if progress >= 1:
if (trainer.is_global_zero) or ("deepspeed_stage_3" in args.strategy):
my_save(
args,
trainer,
pl_module.state_dict(),
f"{args.proj_dir}/rwkv-final.pth",
)
exit(0)
if trainer.global_step < w_step:
lr = lr * (0.2 + 0.8 * trainer.global_step / w_step)
if args.weight_decay_final > 0:
wd_now = args.weight_decay * math.exp(
math.log(args.weight_decay_final / args.weight_decay) * progress
)
else:
wd_now = args.weight_decay
for param_group in trainer.optimizers[0].param_groups:
if param_group["weight_decay"] > 0:
param_group["weight_decay"] = wd_now
if args.layerwise_lr > 0:
param_group["lr"] = lr * param_group["my_lr_scale"]
# print(param_group["lr"], param_group["my_lr_scale"])
else:
param_group["lr"] = lr
trainer.my_lr = lr
trainer.my_wd = wd_now
# rank_zero_info(f"{real_step} {lr}")
if trainer.global_step == 0:
if trainer.is_global_zero: # logging
trainer.my_loss_sum = 0
trainer.my_loss_count = 0
trainer.my_log = open(args.proj_dir + "/train_log.txt", "a")
trainer.my_log.write(
f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n"
)
try:
print(f"\n{trainer.strategy.config}\n")
trainer.my_log.write(f"{trainer.strategy.config}\n")
except:
pass
trainer.my_log.flush()
if len(args.wandb) > 0:
print("Login to wandb...")
import wandb
wandb.init(
project=args.wandb,
name=args.run_name + " " + args.my_timestamp,
config=args,
save_code=False,
)
trainer.my_wandb = wandb
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
args = self.args
token_per_step = args.ctx_len * args.real_bsz
real_step = trainer.global_step + args.epoch_begin * args.epoch_steps
if trainer.is_global_zero: # logging
t_now = time.time_ns()
kt_s = 0
try:
t_cost = (t_now - trainer.my_time_ns) / 1e9
kt_s = token_per_step / t_cost / 1000
self.log("REAL it/s", 1.0 / t_cost, prog_bar=True, on_step=True)
self.log("Kt/s", kt_s, prog_bar=True, on_step=True)
except:
pass
trainer.my_time_ns = t_now
if pl.__version__[0] == "2":
trainer.my_loss = outputs["loss"]
else:
trainer.my_loss = trainer.my_loss_all.float().mean().item()
trainer.my_loss_sum += trainer.my_loss
trainer.my_loss_count += 1
trainer.my_epoch_loss = trainer.my_loss_sum / trainer.my_loss_count
self.log("lr", trainer.my_lr, prog_bar=True, on_step=True)
self.log("loss", trainer.my_epoch_loss, prog_bar=True, on_step=True)
# self.log("s", real_step, prog_bar=True, on_step=True)
if len(args.wandb) > 0:
lll = {
"loss": trainer.my_loss,
"lr": trainer.my_lr,
"wd": trainer.my_wd,
"Gtokens": real_step * token_per_step / 1e9,
}
if kt_s > 0:
lll["kt/s"] = kt_s
trainer.my_wandb.log(lll, step=int(real_step))
if (trainer.is_global_zero) or (
"deepspeed_stage_3" in args.strategy
): # save pth
if args.magic_prime > 0:
expand_factor = 2 if args.my_qa_mask > 0 else 1
if int(real_step) == int(
args.magic_prime * expand_factor // args.real_bsz
) - 1 + int(args.my_random_steps):
to_save_dict = pl_module.state_dict()
my_save(
args,
trainer,
to_save_dict,
f"{args.proj_dir}/rwkv-final.pth",
)
# if args.batch_save==batch_idx :
# to_save_dict = pl_module.state_dict()
# for name, state in to_save_dict.items():
# if 'img' in name:
# to_save_dict[name] = state
# try:
# my_save(
# args, trainer,
# to_save_dict,
# f"{args.proj_dir}/rwkv-{args.epoch_begin + trainer.current_epoch}-{batch_idx}.pth",
# )
# except Exception as e:
# print('Error\n\n', e, '\n\n')
def on_train_epoch_start(self, trainer, pl_module):
args = self.args
if pl.__version__[0] == "2":
dataset = trainer.train_dataloader.dataset
else:
dataset = trainer.train_dataloader.dataset.datasets
assert "MyDataset" in str(dataset)
dataset.global_rank = trainer.global_rank
dataset.real_epoch = int(args.epoch_begin + trainer.current_epoch)
dataset.world_size = trainer.world_size
# print(f'########## world_size {dataset.world_size} global_rank {dataset.global_rank} real_epoch {dataset.real_epoch} ##########')
def on_train_epoch_end(self, trainer, pl_module):
args = self.args
to_save_dict = {}
if (trainer.is_global_zero) or (
"deepspeed_stage_3" in args.strategy
): # save pth
if (
args.epoch_save > 0 and trainer.current_epoch % args.epoch_save == 0
) or (trainer.current_epoch == args.epoch_count - 1):
if args.data_type == "wds_img":
raw_dict = pl_module.state_dict()
for k in raw_dict:
if k.startswith("encoder.") or k.startswith("decoder."):
to_save_dict[k] = raw_dict[k]
else:
to_save_dict = pl_module.state_dict()
if args.data_type == "img" and not args.lora:
for name, state in to_save_dict.items():
if "img" in name:
to_save_dict[name] = state
if args.lora:
enable_time_finetune = "time" in LORA_CONFIG["parts"]
enable_ln_finetune = "ln" in LORA_CONFIG["parts"]
lora_dict = {}
for name, state in to_save_dict.items():
if "img" in name:
lora_dict[name] = state
if (
".lora_" in name
or (enable_time_finetune and ".time_" in name)
or (enable_ln_finetune and ".ln" in name)
):
lora_dict[name] = state
to_save_dict = lora_dict
try:
my_save(
args,
trainer,
to_save_dict,
f"{args.proj_dir}/rwkv-{args.epoch_begin + trainer.current_epoch}.pth",
)
except Exception as e:
print("Error\n\n", e, "\n\n")
if trainer.is_global_zero: # logging
trainer.my_log.write(
f"{args.epoch_begin + trainer.current_epoch} {trainer.my_epoch_loss:.6f} {math.exp(trainer.my_epoch_loss):.4f} {trainer.my_lr:.8f} {datetime.datetime.now()} {trainer.current_epoch}\n"
)
trainer.my_log.flush()
trainer.my_loss_sum = 0
trainer.my_loss_count = 0
if (args.epoch_begin + trainer.current_epoch) >= args.my_exit:
exit(0)
@rank_zero_only
def generate_init_weight(model, init_weight_name):
mm = model.generate_init_weight()
if model.args.my_pile_stage == 1:
if len(model.args.load_model) > 0:
print(f"Combine weights from {model.args.load_model}...")
load_dict = torch.load(model.args.load_model, map_location="cpu")
for k in load_dict:
try:
assert k in mm
except:
print("missing", k)
exit(0)
src = load_dict[k]
try:
mm[k] = src.reshape(mm[k].shape)
except:
tmp = mm[k].squeeze().clone()
print(k, src.shape, "-->", mm[k].shape)
ss = src.shape[0]
dd = tmp.shape[0]
for i in range(dd):
pos = i / dd * ss
if pos >= ss - 1:
tmp[i] = src[ss - 1]
else:
p0 = int(math.floor(pos))
ii = pos - p0
tmp[i] = src[p0] * (1 - ii) + src[p0 + 1] * (ii)
mm[k] = tmp.reshape(mm[k].shape)
sss = src.squeeze().float().cpu().numpy()
print(sss[:10], "...", sss[-10:])
mmm = mm[k].squeeze().float().cpu().numpy()
print(mmm[:10], "...", mmm[-10:])
print(f"Save to {init_weight_name}...")
torch.save(mm, init_weight_name)
if model.args.my_pile_stage == 1:
print("Done. Now go for stage 2.")
exit(0)

Some files were not shown because too many files have changed in this diff Show More