Compare commits
166 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ad65765ba8 | ||
|
|
d04fd7cb87 | ||
|
|
b398cbb591 | ||
|
|
19b97e985c | ||
|
|
93bf74a320 | ||
|
|
7daae23bbb | ||
|
|
0d0a3f15cc | ||
|
|
04fbb38861 | ||
|
|
d666c6032b | ||
|
|
93e8660d69 | ||
|
|
e687cf02bb | ||
|
|
e858f1477a | ||
|
|
a2062ae9cc | ||
|
|
34112c79c7 | ||
|
|
b625b8a6d1 | ||
|
|
14a13d5768 | ||
|
|
7ce464ecda | ||
|
|
2c1f89383f | ||
|
|
e666c50f77 | ||
|
|
1b441752b0 | ||
|
|
e01897b24d | ||
|
|
6146d910b4 | ||
|
|
0063c171f3 | ||
|
|
bea3c29c1c | ||
|
|
5f543c2545 | ||
|
|
177b2c54d9 | ||
|
|
645e8e2f44 | ||
|
|
f2d0dda2ff | ||
|
|
3a449e7b46 | ||
|
|
18d2ecb7a7 | ||
|
|
bb3a93b419 | ||
|
|
1334f0e5ba | ||
|
|
8781416cfb | ||
|
|
a9819139b8 | ||
|
|
66e43c9d9b | ||
|
|
41e5bd5eb8 | ||
|
|
48fef0235b | ||
|
|
d435436525 | ||
|
|
cd7a9896dc | ||
|
|
bbcc6b07b6 | ||
|
|
646bcd81c0 | ||
|
|
dbf0dccc9d | ||
|
|
437de2be20 | ||
|
|
f739c61197 | ||
|
|
01d3c89ea4 | ||
|
|
d18218f21a | ||
|
|
c8470e77fd | ||
|
|
9ede7d7c6d | ||
|
|
a59c4436c8 | ||
|
|
068be2bfc4 | ||
|
|
94a5dc4fb7 | ||
|
|
9f288de951 | ||
|
|
3d5c3dcd31 | ||
|
|
0a4876a564 | ||
|
|
4f0558ae34 | ||
|
|
f03c9cf25f | ||
|
|
07797537d1 | ||
|
|
0c3a50cb07 | ||
|
|
c7dcff52a1 | ||
|
|
c6ef32958e | ||
|
|
7235e1067b | ||
|
|
0594290b92 | ||
|
|
d249a4c29a | ||
|
|
02ba37fab4 | ||
|
|
b5a6f8a425 | ||
|
|
1ad86d737c | ||
|
|
cfa3669f6f | ||
|
|
26d4c9f0ed | ||
|
|
3ddcf9f62e | ||
|
|
e734fce64f | ||
|
|
150beb578c | ||
|
|
db6fbe8366 | ||
|
|
46f52923c3 | ||
|
|
893be5cf43 | ||
|
|
384e4ce4d0 | ||
|
|
b8712e0b89 | ||
|
|
37dda4333d | ||
|
|
64826b9af7 | ||
|
|
47b0c35441 | ||
|
|
1dcda47013 | ||
|
|
1f81a1e5a8 | ||
|
|
35e92d2aef | ||
|
|
0d99e5549e | ||
|
|
fed1594ddc | ||
|
|
14b90bb36b | ||
|
|
f86b7f1f08 | ||
|
|
54355d5a7a | ||
|
|
ff7306349a | ||
|
|
77df56cddc | ||
|
|
97ae139de5 | ||
|
|
afd15ef2c5 | ||
|
|
6c73eae9f6 | ||
|
|
7078f47f72 | ||
|
|
d43954cc88 | ||
|
|
c87de93498 | ||
|
|
810843a5ab | ||
|
|
f7cbd2c803 | ||
|
|
faf1852012 | ||
|
|
43cfab5d4b | ||
|
|
627a20936d | ||
|
|
1d7f19ffaf | ||
|
|
d80565d780 | ||
|
|
d7ba88953d | ||
|
|
30e1c3171e | ||
|
|
1f058b16ac | ||
|
|
4a192f4057 | ||
|
|
0331bf47f7 | ||
|
|
2acdaa96b2 | ||
|
|
1d200d53ab | ||
|
|
df9e1f408e | ||
|
|
4a18696686 | ||
|
|
46b3b285f5 | ||
|
|
1d6aeab9dc | ||
|
|
ab110ba30b | ||
|
|
2f0fa4ee56 | ||
|
|
0005816c1d | ||
|
|
f70672e5a0 | ||
|
|
ee057071a5 | ||
|
|
4f26404002 | ||
|
|
df7652856a | ||
|
|
de755463e3 | ||
|
|
2fe98d9a2c | ||
|
|
2e42039607 | ||
|
|
71abd357a4 | ||
|
|
68228a4552 | ||
|
|
79851433f8 | ||
|
|
bd4de12e05 | ||
|
|
c0aa6aaba9 | ||
|
|
d7abe5f0d1 | ||
|
|
5e5e1e9651 | ||
|
|
f8388a0527 | ||
|
|
f8b764ef8f | ||
|
|
fcfaa5944e | ||
|
|
f89e89c1c9 | ||
|
|
a25965530c | ||
|
|
971124d0d7 | ||
|
|
d7dcc90008 | ||
|
|
df969fcfc6 | ||
|
|
c4042bbfd8 | ||
|
|
4112200b4c | ||
|
|
3f9a54e36f | ||
|
|
3ed4456135 | ||
|
|
e0df9ae47b | ||
|
|
87b2c3ed7d | ||
|
|
50ff7ef6bc | ||
|
|
c7a580ca8a | ||
|
|
eaae7624a7 | ||
|
|
fcd59de6fb | ||
|
|
1bbe127209 | ||
|
|
b868adc058 | ||
|
|
a24b78e8c3 | ||
|
|
c8025f1cff | ||
|
|
fe0860dbf0 | ||
|
|
02d5d641d1 | ||
|
|
a057bb6c5b | ||
|
|
c9e4ae7fa1 | ||
|
|
79a97b2bc4 | ||
|
|
ef53951a16 | ||
|
|
74f1a1c033 | ||
|
|
ce986cfc6d | ||
|
|
61cea2a784 | ||
|
|
8a13bd3c1e | ||
|
|
da68926e9c | ||
|
|
e0b7453883 | ||
|
|
91e2828a95 | ||
|
|
bcf6409536 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -2,6 +2,7 @@ backend-python/rwkv_pip/** linguist-vendored
|
||||
backend-python/wkv_cuda_utils/** linguist-vendored
|
||||
backend-python/get-pip.py linguist-vendored
|
||||
backend-python/convert_model.py linguist-vendored
|
||||
backend-python/convert_safetensors.py linguist-vendored
|
||||
backend-python/utils/midi.py linguist-vendored
|
||||
build/** linguist-vendored
|
||||
finetune/lora/** linguist-vendored
|
||||
|
||||
57
.github/workflows/release.yml
vendored
57
.github/workflows/release.yml
vendored
@@ -48,19 +48,32 @@ jobs:
|
||||
id: cp310
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
target: wasm32-unknown-unknown
|
||||
- uses: crazy-max/ghaction-chocolatey@v2
|
||||
with:
|
||||
args: install upx
|
||||
- run: |
|
||||
Start-BitsTransfer https://github.com/josStorer/LibreHardwareMonitor.Console/releases/download/v0.1.0/LibreHardwareMonitor.Console.zip ./LibreHardwareMonitor.Console.zip
|
||||
Expand-Archive ./LibreHardwareMonitor.Console.zip -DestinationPath ./components/LibreHardwareMonitor.Console
|
||||
Start-BitsTransfer https://www.python.org/ftp/python/3.10.11/python-3.10.11-embed-amd64.zip ./python-3.10.11-embed-amd64.zip
|
||||
Expand-Archive ./python-3.10.11-embed-amd64.zip -DestinationPath ./py310
|
||||
$content=Get-Content "./py310/python310._pth"; $content | ForEach-Object {if ($_.ReadCount -eq 3) {"Lib\\site-packages"} else {$_}} | Set-Content ./py310/python310._pth
|
||||
./py310/python ./backend-python/get-pip.py
|
||||
./py310/python -m pip install Cython==0.29.36
|
||||
./py310/python -m pip install Cython==3.0.4
|
||||
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../include" -Destination "py310/include" -Recurse
|
||||
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../libs" -Destination "py310/libs" -Recurse
|
||||
./py310/python -m pip install cyac==1.7
|
||||
./py310/python -m pip install cyac==1.9
|
||||
git clone https://github.com/josStorer/ai00_rwkv_server --depth=1
|
||||
cd ai00_rwkv_server
|
||||
cargo build --release
|
||||
mv ./target/release/ai00_server.exe ../backend-rust/webgpu_server.exe
|
||||
cd ..
|
||||
go install github.com/wailsapp/wails/v2/cmd/wails@latest
|
||||
(Get-Content -Path ./backend-golang/app.go) -replace "//go:custom_build windows ", "" | Set-Content -Path ./backend-golang/app.go
|
||||
make
|
||||
Rename-Item -Path "build/bin/RWKV-Runner.exe" -NewName "RWKV-Runner_windows_x64.exe"
|
||||
|
||||
@@ -76,16 +89,29 @@ jobs:
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20.5'
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
target: wasm32-unknown-unknown
|
||||
- run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install upx
|
||||
sudo apt-get install build-essential libgtk-3-dev libwebkit2gtk-4.0-dev
|
||||
git clone https://github.com/josStorer/ai00_rwkv_server --depth=1
|
||||
cd ai00_rwkv_server
|
||||
sudo apt-get install libudev-dev
|
||||
sudo apt-get install libasound2-dev
|
||||
rustup target add x86_64-unknown-linux-gnu
|
||||
cargo build --release --target x86_64-unknown-linux-gnu
|
||||
mv ./target/x86_64-unknown-linux-gnu/release/ai00_server ../backend-rust/webgpu_server
|
||||
cd ..
|
||||
go install github.com/wailsapp/wails/v2/cmd/wails@latest
|
||||
rm -rf ./backend-python/wkv_cuda_utils
|
||||
rm ./backend-python/rwkv_pip/wkv_cuda.pyd
|
||||
rm ./backend-python/rwkv_pip/rwkv5.pyd
|
||||
rm ./backend-python/rwkv_pip/rwkv6.pyd
|
||||
rm ./backend-python/rwkv_pip/beta/wkv_cuda.pyd
|
||||
rm ./backend-python/get-pip.py
|
||||
sed -i '1,2d' ./backend-golang/wsl_not_windows.go
|
||||
rm ./backend-golang/wsl.go
|
||||
mv ./backend-golang/wsl_not_windows.go ./backend-golang/wsl.go
|
||||
make
|
||||
mv build/bin/RWKV-Runner build/bin/RWKV-Runner_linux_x64
|
||||
|
||||
@@ -101,13 +127,24 @@ jobs:
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20.5'
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
target: wasm32-unknown-unknown
|
||||
- run: |
|
||||
git clone https://github.com/josStorer/ai00_rwkv_server --depth=1
|
||||
cd ai00_rwkv_server
|
||||
rustup target add aarch64-apple-darwin
|
||||
cargo build --release --target aarch64-apple-darwin
|
||||
mv ./target/aarch64-apple-darwin/release/ai00_server ../backend-rust/webgpu_server
|
||||
cd ..
|
||||
go install github.com/wailsapp/wails/v2/cmd/wails@latest
|
||||
rm -rf ./backend-python/wkv_cuda_utils
|
||||
rm ./backend-python/rwkv_pip/wkv_cuda.pyd
|
||||
rm ./backend-python/rwkv_pip/rwkv5.pyd
|
||||
rm ./backend-python/rwkv_pip/rwkv6.pyd
|
||||
rm ./backend-python/rwkv_pip/beta/wkv_cuda.pyd
|
||||
rm ./backend-python/get-pip.py
|
||||
sed -i '' '1,2d' ./backend-golang/wsl_not_windows.go
|
||||
rm ./backend-golang/wsl.go
|
||||
mv ./backend-golang/wsl_not_windows.go ./backend-golang/wsl.go
|
||||
make
|
||||
cp build/darwin/Readme_Install.txt build/bin/Readme_Install.txt
|
||||
cp build/bin/RWKV-Runner.app/Contents/MacOS/RWKV-Runner build/bin/RWKV-Runner_darwin_universal
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -5,6 +5,8 @@ __pycache__
|
||||
.idea
|
||||
.vs
|
||||
*.pth
|
||||
*.st
|
||||
*.safetensors
|
||||
*.bin
|
||||
/config.json
|
||||
/cache.json
|
||||
@@ -16,6 +18,7 @@ __pycache__
|
||||
/cmd-helper.bat
|
||||
/install-py-dep.bat
|
||||
/backend-python/wkv_cuda
|
||||
/backend-python/rwkv*
|
||||
*.exe
|
||||
*.old
|
||||
.DS_Store
|
||||
@@ -24,3 +27,4 @@ __pycache__
|
||||
train_log.txt
|
||||
finetune/json2binidx_tool/data
|
||||
/wsl.state
|
||||
/components
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
## Changes
|
||||
## Changes (v1.5.7 is a patch for v1.5.5 & v1.5.6)
|
||||
|
||||
- japanese UI
|
||||
- global penalty
|
||||
- allow custom user_name and assistant_name (`/chat/completions` API)
|
||||
- update defaultConfigs
|
||||
- MIDI Input Audio Tracks (Experimental, playing tracks is not supported yet, please save to generation area to preview)
|
||||
- fix autoPlayed midi cannot be stopped
|
||||
- try to use local soundfont by default
|
||||
- improve details
|
||||
|
||||
## Install
|
||||
|
||||
- Windows: https://github.com/josStorer/RWKV-Runner/blob/master/build/windows/Readme_Install.txt
|
||||
- MacOS: https://github.com/josStorer/RWKV-Runner/blob/master/build/darwin/Readme_Install.txt
|
||||
- Linux: https://github.com/josStorer/RWKV-Runner/blob/master/build/linux/Readme_Install.txt
|
||||
- Server-Deploy-Examples: https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples
|
||||
- Server-Deploy-Examples: https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples
|
||||
|
||||
#### MIDI Input Audio Tracks
|
||||
|
||||

|
||||
16
Makefile
16
Makefile
@@ -8,16 +8,26 @@ endif
|
||||
|
||||
build-windows:
|
||||
@echo ---- build for windows
|
||||
wails build -upx -ldflags "-s -w" -platform windows/amd64
|
||||
wails build -upx -ldflags '-s -w -extldflags "-static"' -platform windows/amd64
|
||||
|
||||
build-macos:
|
||||
@echo ---- build for macos
|
||||
wails build -ldflags "-s -w" -platform darwin/universal
|
||||
wails build -ldflags '-s -w' -platform darwin/universal
|
||||
|
||||
build-linux:
|
||||
@echo ---- build for linux
|
||||
wails build -upx -ldflags "-s -w" -platform linux/amd64
|
||||
wails build -upx -ldflags '-s -w' -platform linux/amd64
|
||||
|
||||
build-web:
|
||||
@echo ---- build for web
|
||||
cd frontend && npm run build
|
||||
|
||||
dev:
|
||||
wails dev
|
||||
|
||||
dev-web:
|
||||
cd frontend && npm run dev
|
||||
|
||||
preview:
|
||||
cd frontend && npm run preview
|
||||
|
||||
|
||||
@@ -47,7 +47,9 @@ English | [简体中文](README_ZH.md) | [日本語](README_JA.md)
|
||||
|
||||
</div>
|
||||
|
||||
#### Default configs has enabled custom CUDA kernel acceleration, which is much faster and consumes much less VRAM. If you encounter possible compatibility issues, go to the Configs page and turn off `Use Custom CUDA kernel to Accelerate`.
|
||||
#### Tip: You can deploy [backend-python](./backend-python/) on a server and use this program as a client only. Fill in your server address in the Settings `API URL`.
|
||||
|
||||
#### Default configs has enabled custom CUDA kernel acceleration, which is much faster and consumes much less VRAM. If you encounter possible compatibility issues (output garbled), go to the Configs page and turn off `Use Custom CUDA kernel to Accelerate`, or try to upgrade your gpu driver.
|
||||
|
||||
#### If Windows Defender claims this is a virus, you can try downloading [v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip) and letting it update automatically to the latest version, or add it to the trusted list (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`).
|
||||
|
||||
|
||||
@@ -47,7 +47,9 @@
|
||||
|
||||
</div>
|
||||
|
||||
#### デフォルトの設定はカスタム CUDA カーネルアクセラレーションを有効にしています。互換性の問題が発生する可能性がある場合は、コンフィグページに移動し、`Use Custom CUDA kernel to Accelerate` をオフにしてください。
|
||||
#### ヒント:サーバーに[backend-python](./backend-python/)をデプロイし、このプログラムをクライアントとして使用することができます。設定された`API URL`にサーバーアドレスを入力してください。
|
||||
|
||||
#### デフォルトの設定はカスタム CUDA カーネルアクセラレーションを有効にしています。互換性の問題 (文字化けを出力する) が発生する可能性がある場合は、コンフィグページに移動し、`Use Custom CUDA kernel to Accelerate` をオフにしてください、あるいは、GPUドライバーをアップグレードしてみてください。
|
||||
|
||||
#### Windows Defender がこれをウイルスだと主張する場合は、[v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip) をダウンロードして最新版に自動更新させるか、信頼済みリストに追加してみてください (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`)。
|
||||
|
||||
@@ -91,8 +93,8 @@ body.json:
|
||||
|
||||
## 埋め込み API の例
|
||||
|
||||
Note: v1.4.0 has improved the quality of embeddings API. The generated results are not compatible
|
||||
with previous versions. If you are using embeddings API to generate knowledge bases or similar, please regenerate.
|
||||
注意: v1.4.0 では、埋め込み API の品質が向上しました。生成される結果は、以前のバージョンとは互換性がありません。
|
||||
もし、embeddings API を使って知識ベースなどを生成している場合は、再生成してください。
|
||||
|
||||
LangChain を使用している場合は、`OpenAIEmbeddings(openai_api_base="http://127.0.0.1:8000", openai_api_key="sk-")`
|
||||
を使用してください
|
||||
|
||||
@@ -46,7 +46,9 @@ API兼容的接口,这意味着一切ChatGPT客户端都是RWKV客户端。
|
||||
|
||||
</div>
|
||||
|
||||
#### 预设配置已经开启自定义CUDA算子加速,速度更快,且显存消耗更少。如果你遇到可能的兼容性问题,前往配置页面,关闭`使用自定义CUDA算子加速`
|
||||
#### 小贴士:你可以在服务器部署[backend-python](./backend-python/),然后将此程序仅用作客户端,在设置的`API URL`中填入你的服务器地址
|
||||
|
||||
#### 预设配置已经开启自定义CUDA算子加速,速度更快,且显存消耗更少。如果你遇到可能的兼容性(输出乱码)问题,前往配置页面,关闭`使用自定义CUDA算子加速`,或更新你的显卡驱动
|
||||
|
||||
#### 如果Windows Defender说这是一个病毒,你可以尝试下载[v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip),然后让其自动更新到最新版,或添加信任 (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`)
|
||||
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
package backend_golang
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/minio/selfupdate"
|
||||
@@ -41,6 +45,7 @@ func (a *App) OnStartup(ctx context.Context) {
|
||||
a.cmdPrefix = "cd " + a.exDir + " && "
|
||||
}
|
||||
|
||||
os.Chmod(a.exDir+"backend-rust/webgpu_server", 0777)
|
||||
os.Mkdir(a.exDir+"models", os.ModePerm)
|
||||
os.Mkdir(a.exDir+"lora-models", os.ModePerm)
|
||||
os.Mkdir(a.exDir+"finetune/json2binidx_tool/data", os.ModePerm)
|
||||
@@ -50,11 +55,23 @@ func (a *App) OnStartup(ctx context.Context) {
|
||||
}
|
||||
|
||||
a.downloadLoop()
|
||||
a.midiLoop()
|
||||
a.watchFs()
|
||||
a.monitorHardware()
|
||||
}
|
||||
|
||||
func (a *App) OnBeforeClose(ctx context.Context) bool {
|
||||
if monitor != nil {
|
||||
monitor.Process.Kill()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *App) watchFs() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err == nil {
|
||||
watcher.Add("./lora-models")
|
||||
watcher.Add("./models")
|
||||
watcher.Add(a.exDir + "./lora-models")
|
||||
watcher.Add(a.exDir + "./models")
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
@@ -62,7 +79,7 @@ func (a *App) OnStartup(ctx context.Context) {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
wruntime.EventsEmit(ctx, "fsnotify", event.Name)
|
||||
wruntime.EventsEmit(a.ctx, "fsnotify", event.Name)
|
||||
case _, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
@@ -73,13 +90,81 @@ func (a *App) OnStartup(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
var monitor *exec.Cmd
|
||||
|
||||
func (a *App) monitorHardware() {
|
||||
if runtime.GOOS != "windows" {
|
||||
return
|
||||
}
|
||||
|
||||
monitor = exec.Command("./components/LibreHardwareMonitor.Console/LibreHardwareMonitor.Console.exe")
|
||||
stdout, err := monitor.StdoutPipe()
|
||||
if err != nil {
|
||||
monitor = nil
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
reader := bufio.NewReader(stdout)
|
||||
for {
|
||||
line, _, err := reader.ReadLine()
|
||||
if err != nil {
|
||||
wruntime.EventsEmit(a.ctx, "monitorerr", err.Error())
|
||||
break
|
||||
}
|
||||
wruntime.EventsEmit(a.ctx, "monitor", string(line))
|
||||
}
|
||||
}()
|
||||
|
||||
monitor.SysProcAttr = &syscall.SysProcAttr{}
|
||||
//go:custom_build windows monitor.SysProcAttr.HideWindow = true
|
||||
monitor.Start()
|
||||
}
|
||||
|
||||
type ProgressReader struct {
|
||||
reader io.Reader
|
||||
total int64
|
||||
err error
|
||||
}
|
||||
|
||||
func (pr *ProgressReader) Read(p []byte) (n int, err error) {
|
||||
n, err = pr.reader.Read(p)
|
||||
pr.err = err
|
||||
pr.total += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (a *App) UpdateApp(url string) (broken bool, err error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = selfupdate.Apply(resp.Body, selfupdate.Options{})
|
||||
pr := &ProgressReader{reader: resp.Body}
|
||||
|
||||
ticker := time.NewTicker(250 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
<-ticker.C
|
||||
wruntime.EventsEmit(a.ctx, "updateApp", &DownloadStatus{
|
||||
Name: filepath.Base(url),
|
||||
Path: "",
|
||||
Url: url,
|
||||
Transferred: pr.total,
|
||||
Size: resp.ContentLength,
|
||||
Speed: 0,
|
||||
Progress: 100 * (float64(pr.total) / float64(resp.ContentLength)),
|
||||
Downloading: pr.err == nil && pr.total < resp.ContentLength,
|
||||
Done: pr.total == resp.ContentLength,
|
||||
})
|
||||
if pr.err != nil || pr.total == resp.ContentLength {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
err = selfupdate.Apply(pr, selfupdate.Options{})
|
||||
if err != nil {
|
||||
if rerr := selfupdate.RollbackError(err); rerr != nil {
|
||||
return true, rerr
|
||||
|
||||
@@ -33,9 +33,9 @@ type DownloadStatus struct {
|
||||
|
||||
var downloadList []*DownloadStatus
|
||||
|
||||
func existsInDownloadList(url string) bool {
|
||||
func existsInDownloadList(path string, url string) bool {
|
||||
for _, ds := range downloadList {
|
||||
if ds.Url == url {
|
||||
if ds.Path == path || ds.Url == url {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func (a *App) ContinueDownload(url string) {
|
||||
}
|
||||
|
||||
func (a *App) AddToDownloadList(path string, url string) {
|
||||
if !existsInDownloadList(url) {
|
||||
if !existsInDownloadList(a.exDir+path, url) {
|
||||
downloadList = append(downloadList, &DownloadStatus{
|
||||
resp: nil,
|
||||
Name: filepath.Base(path),
|
||||
|
||||
@@ -53,12 +53,12 @@ type FileInfo struct {
|
||||
ModTime string `json:"modTime"`
|
||||
}
|
||||
|
||||
func (a *App) ReadFileInfo(fileName string) (FileInfo, error) {
|
||||
func (a *App) ReadFileInfo(fileName string) (*FileInfo, error) {
|
||||
info, err := os.Stat(a.exDir + fileName)
|
||||
if err != nil {
|
||||
return FileInfo{}, err
|
||||
return nil, err
|
||||
}
|
||||
return FileInfo{
|
||||
return &FileInfo{
|
||||
Name: info.Name(),
|
||||
Size: info.Size(),
|
||||
IsDir: info.IsDir(),
|
||||
@@ -145,6 +145,20 @@ func (a *App) OpenSaveFileDialogBytes(filterPattern string, defaultFileName stri
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// Only return the path of the selected file, because communication between frontend and backend is slow. Use AssetServer Handler to read the file.
|
||||
func (a *App) OpenOpenFileDialog(filterPattern string) (string, error) {
|
||||
path, err := wruntime.OpenFileDialog(a.ctx, wruntime.OpenDialogOptions{
|
||||
Filters: []wruntime.FileFilter{{Pattern: filterPattern}},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if path == "" {
|
||||
return "", nil
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func (a *App) OpenFileFolder(path string, relative bool) error {
|
||||
var absPath string
|
||||
var err error
|
||||
|
||||
164
backend-golang/midi.go
Normal file
164
backend-golang/midi.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package backend_golang
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/mattrtaylor/go-rtmidi"
|
||||
"github.com/wailsapp/wails/v2/pkg/runtime"
|
||||
)
|
||||
|
||||
type Port struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
type MIDIMessage struct {
|
||||
MessageType string `json:"messageType"`
|
||||
Channel int `json:"channel"`
|
||||
Note int `json:"note"`
|
||||
Velocity int `json:"velocity"`
|
||||
Control int `json:"control"`
|
||||
Value int `json:"value"`
|
||||
}
|
||||
|
||||
var ports []Port
|
||||
var input rtmidi.MIDIIn
|
||||
var out rtmidi.MIDIOut
|
||||
var activeIndex int = -1
|
||||
var lastNoteTime time.Time
|
||||
|
||||
func (a *App) midiLoop() {
|
||||
var err error
|
||||
input, err = rtmidi.NewMIDIInDefault()
|
||||
if err != nil {
|
||||
runtime.EventsEmit(a.ctx, "midiError", err.Error())
|
||||
return
|
||||
}
|
||||
out, err = rtmidi.NewMIDIOutDefault()
|
||||
if err != nil {
|
||||
runtime.EventsEmit(a.ctx, "midiError", err.Error())
|
||||
}
|
||||
err = out.OpenPort(0, "")
|
||||
if err != nil {
|
||||
runtime.EventsEmit(a.ctx, "midiError", err.Error())
|
||||
}
|
||||
ticker := time.NewTicker(500 * time.Millisecond)
|
||||
go func() {
|
||||
for {
|
||||
<-ticker.C
|
||||
count, err := input.PortCount()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ports = make([]Port, count)
|
||||
for i := 0; i < count; i++ {
|
||||
name, err := input.PortName(i)
|
||||
if err == nil {
|
||||
ports[i].Name = name
|
||||
}
|
||||
}
|
||||
runtime.EventsEmit(a.ctx, "midiPorts", &ports)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (a *App) OpenMidiPort(index int) error {
|
||||
if input == nil {
|
||||
return errors.New("failed to initialize MIDI input")
|
||||
}
|
||||
if activeIndex == index {
|
||||
return nil
|
||||
}
|
||||
input.Destroy()
|
||||
var err error
|
||||
input, err = rtmidi.NewMIDIInDefault()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = input.SetCallback(func(msg rtmidi.MIDIIn, bytes []byte, t float64) {
|
||||
// https://www.midi.org/specifications-old/item/table-1-summary-of-midi-message
|
||||
// https://www.rfc-editor.org/rfc/rfc6295.html
|
||||
//
|
||||
// msgType channel
|
||||
// 1001 0000
|
||||
//
|
||||
msgType := bytes[0] >> 4
|
||||
channel := bytes[0] & 0x0f
|
||||
switch msgType {
|
||||
case 0x8:
|
||||
note := bytes[1]
|
||||
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
|
||||
MessageType: "NoteOff",
|
||||
Channel: int(channel),
|
||||
Note: int(note),
|
||||
})
|
||||
case 0x9:
|
||||
elapsed := time.Since(lastNoteTime)
|
||||
lastNoteTime = time.Now()
|
||||
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
|
||||
MessageType: "ElapsedTime",
|
||||
Value: int(elapsed.Milliseconds()),
|
||||
})
|
||||
note := bytes[1]
|
||||
velocity := bytes[2]
|
||||
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
|
||||
MessageType: "NoteOn",
|
||||
Channel: int(channel),
|
||||
Note: int(note),
|
||||
Velocity: int(velocity),
|
||||
})
|
||||
case 0xb:
|
||||
// control 12 => K1 knob, control 13 => K2 knob
|
||||
control := bytes[1]
|
||||
value := bytes[2]
|
||||
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
|
||||
MessageType: "ControlChange",
|
||||
Channel: int(channel),
|
||||
Control: int(control),
|
||||
Value: int(value),
|
||||
})
|
||||
default:
|
||||
fmt.Printf("Unknown midi message: %v\n", bytes)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = input.OpenPort(index, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
activeIndex = index
|
||||
lastNoteTime = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *App) CloseMidiPort() error {
|
||||
if input == nil {
|
||||
return errors.New("failed to initialize MIDI input")
|
||||
}
|
||||
if activeIndex == -1 {
|
||||
return nil
|
||||
}
|
||||
activeIndex = -1
|
||||
input.Destroy()
|
||||
var err error
|
||||
input, err = rtmidi.NewMIDIInDefault()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *App) PlayNote(msg MIDIMessage) error {
|
||||
if out == nil {
|
||||
return errors.New("failed to initialize MIDI output")
|
||||
}
|
||||
channelByte := byte(msg.Channel)
|
||||
if msg.MessageType == "NoteOn" {
|
||||
out.SendMessage([]byte{0x90 | channelByte, byte(msg.Note), byte(msg.Velocity)})
|
||||
} else if msg.MessageType == "NoteOff" {
|
||||
out.SendMessage([]byte{0x80 | channelByte, byte(msg.Note), byte(msg.Velocity)})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (a *App) StartServer(python string, port int, host string) (string, error) {
|
||||
func (a *App) StartServer(python string, port int, host string, webui bool, rwkvBeta bool) (string, error) {
|
||||
var err error
|
||||
if python == "" {
|
||||
python, err = GetPython()
|
||||
@@ -18,7 +18,21 @@ func (a *App) StartServer(python string, port int, host string) (string, error)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return Cmd(python, "./backend-python/main.py", strconv.Itoa(port), host)
|
||||
args := []string{python, "./backend-python/main.py"}
|
||||
if webui {
|
||||
args = append(args, "--webui")
|
||||
}
|
||||
if rwkvBeta {
|
||||
args = append(args, "--rwkv-beta")
|
||||
}
|
||||
args = append(args, "--port", strconv.Itoa(port), "--host", host)
|
||||
return Cmd(args...)
|
||||
}
|
||||
|
||||
func (a *App) StartWebGPUServer(port int, host string) (string, error) {
|
||||
args := []string{"./backend-rust/webgpu_server"}
|
||||
args = append(args, "--port", strconv.Itoa(port), "--ip", host)
|
||||
return Cmd(args...)
|
||||
}
|
||||
|
||||
func (a *App) ConvertModel(python string, modelPath string, strategy string, outPath string) (string, error) {
|
||||
@@ -32,6 +46,17 @@ func (a *App) ConvertModel(python string, modelPath string, strategy string, out
|
||||
return Cmd(python, "./backend-python/convert_model.py", "--in", modelPath, "--out", outPath, "--strategy", strategy)
|
||||
}
|
||||
|
||||
func (a *App) ConvertSafetensors(python string, modelPath string, outPath string) (string, error) {
|
||||
var err error
|
||||
if python == "" {
|
||||
python, err = GetPython()
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return Cmd(python, "./backend-python/convert_safetensors.py", "--input", modelPath, "--output", outPath)
|
||||
}
|
||||
|
||||
func (a *App) ConvertData(python string, input string, outputPrefix string, vocab string) (string, error) {
|
||||
var err error
|
||||
if python == "" {
|
||||
@@ -126,13 +151,12 @@ func (a *App) InstallPyDep(python string, cnMirror bool) (string, error) {
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
ChangeFileLine("./py310/python310._pth", 3, "Lib\\site-packages")
|
||||
installScript := python + " ./backend-python/get-pip.py -i https://pypi.tuna.tsinghua.edu.cn/simple\n" +
|
||||
python + " -m pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cu117\n" +
|
||||
python + " -m pip install -r ./backend-python/requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple\n" +
|
||||
installScript := python + " ./backend-python/get-pip.py -i https://pypi.tuna.tsinghua.edu.cn/simple --no-warn-script-location\n" +
|
||||
python + " -m pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cu117 --no-warn-script-location\n" +
|
||||
python + " -m pip install -r ./backend-python/requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple --no-warn-script-location\n" +
|
||||
"exit"
|
||||
if !cnMirror {
|
||||
installScript = strings.Replace(installScript, " -i https://pypi.tuna.tsinghua.edu.cn/simple", "", -1)
|
||||
installScript = strings.Replace(installScript, "requirements.txt", "requirements_versions.txt", -1)
|
||||
}
|
||||
err = os.WriteFile("./install-py-dep.bat", []byte(installScript), 0644)
|
||||
if err != nil {
|
||||
|
||||
@@ -5,12 +5,15 @@ import (
|
||||
"bufio"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -205,3 +208,12 @@ func Unzip(source, destination string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *App) IsPortAvailable(port int) bool {
|
||||
l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%s", strconv.Itoa(port)))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer l.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
1
backend-python/convert_model.py
vendored
1
backend-python/convert_model.py
vendored
@@ -231,5 +231,6 @@ try:
|
||||
convert_and_save_and_exit=args.out,
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
with open("error.txt", "w") as f:
|
||||
f.write(str(e))
|
||||
|
||||
70
backend-python/convert_safetensors.py
vendored
Normal file
70
backend-python/convert_safetensors.py
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import copy
|
||||
import torch
|
||||
from safetensors.torch import load_file, save_file
|
||||
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--input", type=str, help="Path to input pth model")
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
type=str,
|
||||
default="./converted.st",
|
||||
help="Path to output safetensors model",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
def rename_key(rename, name):
|
||||
for k, v in rename.items():
|
||||
if k in name:
|
||||
name = name.replace(k, v)
|
||||
return name
|
||||
|
||||
|
||||
def convert_file(pt_filename: str, sf_filename: str, transpose_names=[], rename={}):
|
||||
loaded = torch.load(pt_filename, map_location="cpu")
|
||||
if "state_dict" in loaded:
|
||||
loaded = loaded["state_dict"]
|
||||
|
||||
loaded = {k: v.clone().half() for k, v in loaded.items()}
|
||||
# for k, v in loaded.items():
|
||||
# print(f'{k}\t{v.shape}\t{v.dtype}')
|
||||
|
||||
# For tensors to be contiguous
|
||||
for k, v in loaded.items():
|
||||
for transpose_name in transpose_names:
|
||||
if transpose_name in k:
|
||||
loaded[k] = v.transpose(0, 1)
|
||||
loaded = {rename_key(rename, k).lower(): v.contiguous() for k, v in loaded.items()}
|
||||
|
||||
for k, v in loaded.items():
|
||||
print(f"{k}\t{v.shape}\t{v.dtype}")
|
||||
|
||||
dirname = os.path.dirname(sf_filename)
|
||||
os.makedirs(dirname, exist_ok=True)
|
||||
save_file(loaded, sf_filename, metadata={"format": "pt"})
|
||||
reloaded = load_file(sf_filename)
|
||||
for k in loaded:
|
||||
pt_tensor = loaded[k]
|
||||
sf_tensor = reloaded[k]
|
||||
if not torch.equal(pt_tensor, sf_tensor):
|
||||
raise RuntimeError(f"The output tensors do not match for key {k}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
convert_file(
|
||||
args.input,
|
||||
args.output,
|
||||
["lora_A"],
|
||||
{"time_faaaa": "time_first", "lora_A": "lora.0", "lora_B": "lora.1"},
|
||||
)
|
||||
print(f"Saved to {args.output}")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
with open("error.txt", "w") as f:
|
||||
f.write(str(e))
|
||||
@@ -1,3 +1,6 @@
|
||||
import multipart
|
||||
import fitz
|
||||
import safetensors
|
||||
import midi2audio
|
||||
import mido
|
||||
import lm_dataformat
|
||||
@@ -8,6 +11,7 @@ import GPUtil
|
||||
|
||||
import torch
|
||||
import rwkv
|
||||
import langchain
|
||||
import numpy
|
||||
import tokenizers
|
||||
import fastapi
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
from enum import Enum, auto
|
||||
|
||||
Args = "args"
|
||||
Model = "model"
|
||||
Model_Status = "model_status"
|
||||
Model_Config = "model_config"
|
||||
Deploy_Mode = "deploy_mode"
|
||||
|
||||
|
||||
class ModelStatus(Enum):
|
||||
@@ -15,6 +17,7 @@ def init():
|
||||
global GLOBALS
|
||||
GLOBALS = {}
|
||||
set(Model_Status, ModelStatus.Offline)
|
||||
set(Deploy_Mode, False)
|
||||
|
||||
|
||||
def set(key, value):
|
||||
|
||||
@@ -1,10 +1,54 @@
|
||||
import time
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
import argparse
|
||||
from typing import Union, Sequence
|
||||
|
||||
|
||||
def get_args(args: Union[Sequence[str], None] = None):
|
||||
parser = argparse.ArgumentParser()
|
||||
group = parser.add_argument_group(title="server arguments")
|
||||
group.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
default=8000,
|
||||
help="port to run the server on (default: 8000)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--host",
|
||||
type=str,
|
||||
default="127.0.0.1",
|
||||
help="host to run the server on (default: 127.0.0.1)",
|
||||
)
|
||||
group = parser.add_argument_group(title="mode arguments")
|
||||
group.add_argument(
|
||||
"--webui",
|
||||
action="store_true",
|
||||
help="whether to enable WebUI (default: False)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--rwkv-beta",
|
||||
action="store_true",
|
||||
help="whether to use rwkv-beta (default: False)",
|
||||
)
|
||||
args = parser.parse_args(args)
|
||||
|
||||
return args
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = get_args()
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
|
||||
|
||||
import psutil
|
||||
from fastapi import Depends, FastAPI
|
||||
from contextlib import asynccontextmanager
|
||||
from fastapi import Depends, FastAPI, status
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
import uvicorn
|
||||
|
||||
@@ -12,10 +56,17 @@ from utils.rwkv import *
|
||||
from utils.torch import *
|
||||
from utils.ngrok import *
|
||||
from utils.log import log_middleware
|
||||
from routes import completion, config, state_cache, midi
|
||||
from routes import completion, config, state_cache, midi, misc, file_process
|
||||
import global_var
|
||||
|
||||
app = FastAPI(dependencies=[Depends(log_middleware)])
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
init()
|
||||
yield
|
||||
|
||||
|
||||
app = FastAPI(lifespan=lifespan, dependencies=[Depends(log_middleware)])
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
@@ -28,12 +79,47 @@ app.add_middleware(
|
||||
app.include_router(completion.router)
|
||||
app.include_router(config.router)
|
||||
app.include_router(midi.router)
|
||||
app.include_router(file_process.router)
|
||||
app.include_router(misc.router)
|
||||
app.include_router(state_cache.router)
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
@app.post("/exit", tags=["Root"])
|
||||
def exit():
|
||||
if global_var.get(global_var.Deploy_Mode) is True:
|
||||
raise HTTPException(status.HTTP_403_FORBIDDEN)
|
||||
|
||||
parent_pid = os.getpid()
|
||||
parent = psutil.Process(parent_pid)
|
||||
for child in parent.children(recursive=True):
|
||||
child.kill()
|
||||
parent.kill()
|
||||
|
||||
|
||||
try:
|
||||
if (
|
||||
"RWKV_RUNNER_PARAMS" in os.environ
|
||||
and "--webui" in os.environ["RWKV_RUNNER_PARAMS"].split(" ")
|
||||
) or args.webui:
|
||||
from webui_server import webui_server
|
||||
|
||||
app.mount("/", webui_server)
|
||||
except NameError:
|
||||
pass
|
||||
|
||||
|
||||
@app.get("/", tags=["Root"])
|
||||
def read_root():
|
||||
return {"Hello": "World!"}
|
||||
|
||||
|
||||
def init():
|
||||
global_var.init()
|
||||
cmd_params = os.environ["RWKV_RUNNER_PARAMS"]
|
||||
global_var.set(
|
||||
global_var.Args, get_args(cmd_params.split(" ") if cmd_params else None)
|
||||
)
|
||||
|
||||
state_cache.init()
|
||||
|
||||
set_torch()
|
||||
@@ -42,23 +128,7 @@ def init():
|
||||
ngrok_connect()
|
||||
|
||||
|
||||
@app.get("/", tags=["Root"])
|
||||
def read_root():
|
||||
return {"Hello": "World!"}
|
||||
|
||||
|
||||
@app.post("/exit", tags=["Root"])
|
||||
def exit():
|
||||
parent_pid = os.getpid()
|
||||
parent = psutil.Process(parent_pid)
|
||||
for child in parent.children(recursive=True):
|
||||
child.kill()
|
||||
parent.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
uvicorn.run(
|
||||
"main:app",
|
||||
port=8000 if len(sys.argv) < 2 else int(sys.argv[1]),
|
||||
host="127.0.0.1" if len(sys.argv) < 3 else sys.argv[2],
|
||||
)
|
||||
os.environ["RWKV_RUNNER_PARAMS"] = " ".join(sys.argv[1:])
|
||||
print("--- %s seconds ---" % (time.time() - start_time))
|
||||
uvicorn.run("main:app", port=args.port, host=args.host, workers=1)
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -2,11 +2,12 @@ import asyncio
|
||||
import json
|
||||
from threading import Lock
|
||||
from typing import List, Union
|
||||
from enum import Enum
|
||||
import base64
|
||||
|
||||
from fastapi import APIRouter, Request, status, HTTPException
|
||||
from sse_starlette.sse import EventSourceResponse
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
import numpy as np
|
||||
import tiktoken
|
||||
from utils.rwkv import *
|
||||
@@ -16,34 +17,59 @@ import global_var
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
class Role(Enum):
|
||||
User = "user"
|
||||
Assistant = "assistant"
|
||||
System = "system"
|
||||
|
||||
|
||||
class Message(BaseModel):
|
||||
role: str
|
||||
content: str
|
||||
role: Role
|
||||
content: str = Field(min_length=0)
|
||||
raw: bool = Field(False, description="Whether to treat content as raw text")
|
||||
|
||||
|
||||
default_stop = [
|
||||
"\n\nUser",
|
||||
"\n\nQuestion",
|
||||
"\n\nQ",
|
||||
"\n\nHuman",
|
||||
"\n\nBob",
|
||||
"\n\nAssistant",
|
||||
"\n\nAnswer",
|
||||
"\n\nA",
|
||||
"\n\nBot",
|
||||
"\n\nAlice",
|
||||
]
|
||||
|
||||
|
||||
class ChatCompletionBody(ModelConfigBody):
|
||||
messages: List[Message]
|
||||
model: str = "rwkv"
|
||||
messages: Union[List[Message], None]
|
||||
model: Union[str, None] = "rwkv"
|
||||
stream: bool = False
|
||||
stop: Union[str, List[str]] = [
|
||||
"\n\nUser",
|
||||
"\n\nQuestion",
|
||||
"\n\nQ",
|
||||
"\n\nHuman",
|
||||
"\n\nBob",
|
||||
]
|
||||
user_name: str = None
|
||||
assistant_name: str = None
|
||||
stop: Union[str, List[str], None] = default_stop
|
||||
user_name: Union[str, None] = Field(
|
||||
None, description="Internal user name", min_length=1
|
||||
)
|
||||
assistant_name: Union[str, None] = Field(
|
||||
None, description="Internal assistant name", min_length=1
|
||||
)
|
||||
presystem: bool = Field(
|
||||
True, description="Whether to insert default system prompt at the beginning"
|
||||
)
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
model_config = {
|
||||
"json_schema_extra": {
|
||||
"example": {
|
||||
"messages": [{"role": "user", "content": "hello"}],
|
||||
"messages": [
|
||||
{"role": Role.User.value, "content": "hello", "raw": False}
|
||||
],
|
||||
"model": "rwkv",
|
||||
"stream": False,
|
||||
"stop": None,
|
||||
"user_name": None,
|
||||
"assistant_name": None,
|
||||
"presystem": True,
|
||||
"max_tokens": 1000,
|
||||
"temperature": 1.2,
|
||||
"top_p": 0.5,
|
||||
@@ -51,16 +77,17 @@ class ChatCompletionBody(ModelConfigBody):
|
||||
"frequency_penalty": 0.4,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class CompletionBody(ModelConfigBody):
|
||||
prompt: Union[str, List[str]]
|
||||
model: str = "rwkv"
|
||||
prompt: Union[str, List[str], None]
|
||||
model: Union[str, None] = "rwkv"
|
||||
stream: bool = False
|
||||
stop: Union[str, List[str]] = None
|
||||
stop: Union[str, List[str], None] = None
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
model_config = {
|
||||
"json_schema_extra": {
|
||||
"example": {
|
||||
"prompt": "The following is an epic science fiction masterpiece that is immortalized, "
|
||||
+ "with delicate descriptions and grand depictions of interstellar civilization wars.\nChapter 1.\n",
|
||||
@@ -74,6 +101,7 @@ class CompletionBody(ModelConfigBody):
|
||||
"frequency_penalty": 0.4,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
completion_lock = Lock()
|
||||
@@ -87,7 +115,7 @@ async def eval_rwkv(
|
||||
body: ModelConfigBody,
|
||||
prompt: str,
|
||||
stream: bool,
|
||||
stop: Union[str, List[str]],
|
||||
stop: Union[str, List[str], None],
|
||||
chat_mode: bool,
|
||||
):
|
||||
global requests_num
|
||||
@@ -200,7 +228,7 @@ async def eval_rwkv(
|
||||
"choices": [
|
||||
{
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"role": Role.Assistant.value,
|
||||
"content": response,
|
||||
},
|
||||
"index": 0,
|
||||
@@ -223,17 +251,8 @@ async def chat_completions(body: ChatCompletionBody, request: Request):
|
||||
if model is None:
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "model not loaded")
|
||||
|
||||
question = body.messages[-1]
|
||||
if question.role == "user":
|
||||
question = question.content
|
||||
elif question.role == "system":
|
||||
question = body.messages[-2]
|
||||
if question.role == "user":
|
||||
question = question.content
|
||||
else:
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "no question found")
|
||||
else:
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "no question found")
|
||||
if body.messages is None or body.messages == []:
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "messages not found")
|
||||
|
||||
interface = model.interface
|
||||
user = model.user if body.user_name is None else body.user_name
|
||||
@@ -241,30 +260,43 @@ async def chat_completions(body: ChatCompletionBody, request: Request):
|
||||
|
||||
is_raven = model.rwkv_type == RWKVType.Raven
|
||||
|
||||
completion_text = (
|
||||
f"""
|
||||
completion_text: str = ""
|
||||
basic_system: Union[str, None] = None
|
||||
if body.presystem:
|
||||
if body.messages[0].role == Role.System:
|
||||
basic_system = body.messages[0].content
|
||||
|
||||
if basic_system is None:
|
||||
completion_text = (
|
||||
f"""
|
||||
The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. \
|
||||
{bot} is very intelligent, creative and friendly. \
|
||||
{bot} is unlikely to disagree with {user}, and {bot} doesn't like to ask {user} questions. \
|
||||
{bot} likes to tell {user} a lot about herself and her opinions. \
|
||||
{bot} usually gives {user} kind, helpful and informative advices.\n
|
||||
"""
|
||||
if is_raven
|
||||
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
|
||||
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
|
||||
)
|
||||
for message in body.messages:
|
||||
if message.role == "system":
|
||||
completion_text = (
|
||||
f"The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. "
|
||||
if is_raven
|
||||
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
|
||||
+ message.content.replace("\\n", "\n")
|
||||
.replace("\r\n", "\n")
|
||||
.replace("\n\n", "\n")
|
||||
.replace("\n", " ")
|
||||
.strip()
|
||||
.replace("You are", f"{bot} is" if is_raven else "I am")
|
||||
else (
|
||||
f"{user}{interface} hi\n\n{bot}{interface} Hi. "
|
||||
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
|
||||
)
|
||||
)
|
||||
else:
|
||||
if not body.messages[0].raw:
|
||||
basic_system = (
|
||||
basic_system.replace("\r\n", "\n")
|
||||
.replace("\r", "\n")
|
||||
.replace("\n\n", "\n")
|
||||
.replace("\n", " ")
|
||||
.strip()
|
||||
)
|
||||
completion_text = (
|
||||
(
|
||||
f"The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. "
|
||||
if is_raven
|
||||
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
|
||||
)
|
||||
+ basic_system.replace("You are", f"{bot} is" if is_raven else "I am")
|
||||
.replace("you are", f"{bot} is" if is_raven else "I am")
|
||||
.replace("You're", f"{bot} is" if is_raven else "I'm")
|
||||
.replace("you're", f"{bot} is" if is_raven else "I'm")
|
||||
@@ -275,33 +307,34 @@ The following is a coherent verbose detailed conversation between a girl named {
|
||||
.replace("你", f"{bot}" if is_raven else "我")
|
||||
+ "\n\n"
|
||||
)
|
||||
break
|
||||
for message in body.messages:
|
||||
if message.role == "user":
|
||||
completion_text += (
|
||||
f"{user}{interface} "
|
||||
+ message.content.replace("\\n", "\n")
|
||||
.replace("\r\n", "\n")
|
||||
|
||||
for message in body.messages[(0 if basic_system is None else 1) :]:
|
||||
append_message: str = ""
|
||||
if message.role == Role.User:
|
||||
append_message = f"{user}{interface} " + message.content
|
||||
elif message.role == Role.Assistant:
|
||||
append_message = f"{bot}{interface} " + message.content
|
||||
elif message.role == Role.System:
|
||||
append_message = message.content
|
||||
if not message.raw:
|
||||
append_message = (
|
||||
append_message.replace("\r\n", "\n")
|
||||
.replace("\r", "\n")
|
||||
.replace("\n\n", "\n")
|
||||
.strip()
|
||||
+ "\n\n"
|
||||
)
|
||||
elif message.role == "assistant":
|
||||
completion_text += (
|
||||
f"{bot}{interface} "
|
||||
+ message.content.replace("\\n", "\n")
|
||||
.replace("\r\n", "\n")
|
||||
.replace("\n\n", "\n")
|
||||
.strip()
|
||||
+ "\n\n"
|
||||
)
|
||||
completion_text += append_message + "\n\n"
|
||||
completion_text += f"{bot}{interface}"
|
||||
|
||||
user_code = model.pipeline.decode([model.pipeline.encode(user)[0]])
|
||||
bot_code = model.pipeline.decode([model.pipeline.encode(bot)[0]])
|
||||
if type(body.stop) == str:
|
||||
body.stop = [body.stop, f"\n\n{user}", f"\n\n{bot}"]
|
||||
else:
|
||||
body.stop.append(f"\n\n{user}")
|
||||
body.stop.append(f"\n\n{bot}")
|
||||
body.stop = [body.stop, f"\n\n{user_code}", f"\n\n{bot_code}"]
|
||||
elif type(body.stop) == list:
|
||||
body.stop.append(f"\n\n{user_code}")
|
||||
body.stop.append(f"\n\n{bot_code}")
|
||||
elif body.stop is None:
|
||||
body.stop = default_stop
|
||||
|
||||
if body.stream:
|
||||
return EventSourceResponse(
|
||||
@@ -345,13 +378,13 @@ async def completions(body: CompletionBody, request: Request):
|
||||
|
||||
|
||||
class EmbeddingsBody(BaseModel):
|
||||
input: Union[str, List[str], List[List[int]]]
|
||||
model: str = "rwkv"
|
||||
input: Union[str, List[str], List[List[int]], None]
|
||||
model: Union[str, None] = "rwkv"
|
||||
encoding_format: str = None
|
||||
fast_mode: bool = False
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
model_config = {
|
||||
"json_schema_extra": {
|
||||
"example": {
|
||||
"input": "a big apple",
|
||||
"model": "rwkv",
|
||||
@@ -359,6 +392,7 @@ class EmbeddingsBody(BaseModel):
|
||||
"fast_mode": False,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def embedding_base64(embedding: List[float]) -> str:
|
||||
|
||||
@@ -10,39 +10,34 @@ import global_var
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
def get_tokens_path(model_path: str):
|
||||
model_path = model_path.lower()
|
||||
tokenizer_dir = f"{pathlib.Path(__file__).parent.parent.resolve()}/rwkv_pip/"
|
||||
|
||||
default_tokens_path = tokenizer_dir + "20B_tokenizer.json"
|
||||
|
||||
if "raven" in model_path:
|
||||
return default_tokens_path
|
||||
elif "world" in model_path:
|
||||
return "rwkv_vocab_v20230424"
|
||||
elif "midi" in model_path:
|
||||
return tokenizer_dir + "tokenizer-midi.json"
|
||||
else:
|
||||
return default_tokens_path
|
||||
|
||||
|
||||
class SwitchModelBody(BaseModel):
|
||||
model: str
|
||||
strategy: str
|
||||
tokenizer: Union[str, None] = None
|
||||
customCuda: bool = False
|
||||
deploy: bool = Field(
|
||||
False,
|
||||
description="Deploy mode. If success, will disable /switch-model, /exit and other dangerous APIs (state cache APIs, part of midi APIs)",
|
||||
)
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
model_config = {
|
||||
"json_schema_extra": {
|
||||
"example": {
|
||||
"model": "models/RWKV-4-World-3B-v1-20230619-ctx4096.pth",
|
||||
"strategy": "cuda fp16",
|
||||
"tokenizer": "",
|
||||
"customCuda": False,
|
||||
"deploy": False,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@router.post("/switch-model", tags=["Configs"])
|
||||
def switch_model(body: SwitchModelBody, response: Response, request: Request):
|
||||
if global_var.get(global_var.Deploy_Mode) is True:
|
||||
raise HTTPException(Status.HTTP_403_FORBIDDEN)
|
||||
|
||||
if global_var.get(global_var.Model_Status) is global_var.ModelStatus.Loading:
|
||||
response.status_code = Status.HTTP_304_NOT_MODIFIED
|
||||
return
|
||||
@@ -68,17 +63,7 @@ def switch_model(body: SwitchModelBody, response: Response, request: Request):
|
||||
try:
|
||||
global_var.set(
|
||||
global_var.Model,
|
||||
TextRWKV(
|
||||
model=body.model,
|
||||
strategy=body.strategy,
|
||||
tokens_path=get_tokens_path(body.model),
|
||||
)
|
||||
if "midi" not in body.model.lower()
|
||||
else MusicRWKV(
|
||||
model=body.model,
|
||||
strategy=body.strategy,
|
||||
tokens_path=get_tokens_path(body.model),
|
||||
),
|
||||
RWKV(model=body.model, strategy=body.strategy, tokenizer=body.tokenizer),
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
@@ -88,6 +73,8 @@ def switch_model(body: SwitchModelBody, response: Response, request: Request):
|
||||
Status.HTTP_500_INTERNAL_SERVER_ERROR, f"failed to load: {e}"
|
||||
)
|
||||
|
||||
if body.deploy:
|
||||
global_var.set(global_var.Deploy_Mode, True)
|
||||
if global_var.get(global_var.Model_Config) is None:
|
||||
global_var.set(
|
||||
global_var.Model_Config, get_rwkv_config(global_var.get(global_var.Model))
|
||||
|
||||
79
backend-python/routes/file_process.py
Normal file
79
backend-python/routes/file_process.py
Normal file
@@ -0,0 +1,79 @@
|
||||
import os
|
||||
from fastapi import (
|
||||
APIRouter,
|
||||
HTTPException,
|
||||
status,
|
||||
Depends,
|
||||
File,
|
||||
UploadFile,
|
||||
)
|
||||
from pydantic import BaseModel
|
||||
from typing import Iterator
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
class FileToTextParams(BaseModel):
|
||||
file_name: str
|
||||
file_encoding: str = "utf-8"
|
||||
|
||||
|
||||
@router.post("/file-to-text", tags=["File Process"])
|
||||
async def file_to_text(
|
||||
params: FileToTextParams = Depends(), file_data: UploadFile = File(...)
|
||||
):
|
||||
from langchain.schema import Document
|
||||
from langchain.document_loaders.blob_loaders import Blob
|
||||
|
||||
# from langchain
|
||||
def parse_text(blob: Blob) -> Iterator[Document]:
|
||||
yield Document(page_content=blob.as_string(), metadata={"source": blob.source})
|
||||
|
||||
# from langchain
|
||||
def parse_pdf(blob: Blob) -> Iterator[Document]:
|
||||
import fitz
|
||||
|
||||
with blob.as_bytes_io() as stream:
|
||||
doc = fitz.Document(stream=stream)
|
||||
|
||||
yield from [
|
||||
Document(
|
||||
page_content=page.get_text(),
|
||||
metadata=dict(
|
||||
{
|
||||
"source": blob.source,
|
||||
"file_path": blob.source,
|
||||
"page": page.number,
|
||||
"total_pages": len(doc),
|
||||
},
|
||||
**{
|
||||
k: doc.metadata[k]
|
||||
for k in doc.metadata
|
||||
if type(doc.metadata[k]) in [str, int]
|
||||
},
|
||||
),
|
||||
)
|
||||
for page in doc
|
||||
]
|
||||
|
||||
file_parsers = {".txt": parse_text, ".pdf": parse_pdf}
|
||||
|
||||
file_name = file_data.filename or params.file_name
|
||||
file_ext = os.path.splitext(file_name)[-1]
|
||||
|
||||
if file_ext not in file_parsers:
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "file type not supported")
|
||||
|
||||
try:
|
||||
pages: Iterator[Document] = file_parsers[file_ext](
|
||||
Blob.from_data(
|
||||
await file_data.read(),
|
||||
encoding=params.file_encoding,
|
||||
path=file_name,
|
||||
)
|
||||
)
|
||||
pages = list(pages)
|
||||
except Exception as e:
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, f"{e}")
|
||||
|
||||
return {"pages": pages}
|
||||
@@ -1,4 +1,5 @@
|
||||
import io
|
||||
import global_var
|
||||
from fastapi import APIRouter, HTTPException, status
|
||||
from starlette.responses import StreamingResponse
|
||||
from pydantic import BaseModel
|
||||
@@ -11,12 +12,13 @@ router = APIRouter()
|
||||
class TextToMidiBody(BaseModel):
|
||||
text: str
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
model_config = {
|
||||
"json_schema_extra": {
|
||||
"example": {
|
||||
"text": "p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:2d:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:2d:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:1f:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:26:a g:39:a g:39:a g:3e:a g:3e:a g:42:a g:42:a pi:39:a pi:3e:a pi:42:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@router.post("/text-to-midi", tags=["MIDI"])
|
||||
@@ -35,17 +37,21 @@ class TxtToMidiBody(BaseModel):
|
||||
txt_path: str
|
||||
midi_path: str
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
model_config = {
|
||||
"json_schema_extra": {
|
||||
"example": {
|
||||
"txt_path": "midi/sample.txt",
|
||||
"midi_path": "midi/sample.mid",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@router.post("/txt-to-midi", tags=["MIDI"])
|
||||
def txt_to_midi(body: TxtToMidiBody):
|
||||
if global_var.get(global_var.Deploy_Mode) is True:
|
||||
raise HTTPException(status.HTTP_403_FORBIDDEN)
|
||||
|
||||
if not body.midi_path.startswith("midi/"):
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "bad output path")
|
||||
|
||||
@@ -65,14 +71,15 @@ class MidiToWavBody(BaseModel):
|
||||
wav_path: str
|
||||
sound_font_path: str = "assets/default_sound_font.sf2"
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
model_config = {
|
||||
"json_schema_extra": {
|
||||
"example": {
|
||||
"midi_path": "midi/sample.mid",
|
||||
"wav_path": "midi/sample.wav",
|
||||
"sound_font_path": "assets/default_sound_font.sf2",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@router.post("/midi-to-wav", tags=["MIDI"])
|
||||
@@ -81,6 +88,9 @@ def midi_to_wav(body: MidiToWavBody):
|
||||
Install fluidsynth first, see more: https://github.com/FluidSynth/fluidsynth/wiki/Download#distributions
|
||||
"""
|
||||
|
||||
if global_var.get(global_var.Deploy_Mode) is True:
|
||||
raise HTTPException(status.HTTP_403_FORBIDDEN)
|
||||
|
||||
if not body.wav_path.startswith("midi/"):
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "bad output path")
|
||||
|
||||
@@ -95,14 +105,15 @@ class TextToWavBody(BaseModel):
|
||||
wav_name: str
|
||||
sound_font_path: str = "assets/default_sound_font.sf2"
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
model_config = {
|
||||
"json_schema_extra": {
|
||||
"example": {
|
||||
"text": "p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:2d:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:2d:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:1f:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:26:a g:39:a g:39:a g:3e:a g:3e:a g:42:a g:42:a pi:39:a pi:3e:a pi:42:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0",
|
||||
"wav_name": "sample",
|
||||
"sound_font_path": "assets/default_sound_font.sf2",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@router.post("/text-to-wav", tags=["MIDI"])
|
||||
@@ -111,6 +122,9 @@ def text_to_wav(body: TextToWavBody):
|
||||
Install fluidsynth first, see more: https://github.com/FluidSynth/fluidsynth/wiki/Download#distributions
|
||||
"""
|
||||
|
||||
if global_var.get(global_var.Deploy_Mode) is True:
|
||||
raise HTTPException(status.HTTP_403_FORBIDDEN)
|
||||
|
||||
text = body.text.strip()
|
||||
if not text.startswith("<start>"):
|
||||
text = "<start> " + text
|
||||
|
||||
131
backend-python/routes/misc.py
Normal file
131
backend-python/routes/misc.py
Normal file
@@ -0,0 +1,131 @@
|
||||
from fastapi import APIRouter, HTTPException, status
|
||||
from utils.rwkv import AbstractRWKV
|
||||
import global_var
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/dashboard/billing/credit_grants", tags=["MISC"])
|
||||
def credit_grants():
|
||||
return {
|
||||
"object": "credit_summary",
|
||||
"total_granted": 10000,
|
||||
"total_used": 0,
|
||||
"total_available": 10000,
|
||||
"grants": {
|
||||
"object": "list",
|
||||
"data": [
|
||||
{
|
||||
"object": "credit_grant",
|
||||
"grant_amount": 10000,
|
||||
"used_amount": 0,
|
||||
"effective_at": 1672531200,
|
||||
"expires_at": 33229440000,
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
fake_models = [
|
||||
{
|
||||
"id": "gpt-3.5-turbo",
|
||||
"object": "model",
|
||||
"created": 1677610602,
|
||||
"owned_by": "openai",
|
||||
"permission": [
|
||||
{
|
||||
"id": "modelperm-zy5TOjnE2zVaicIcKO9bQDgX",
|
||||
"object": "model_permission",
|
||||
"created": 1690864883,
|
||||
"allow_create_engine": False,
|
||||
"allow_sampling": True,
|
||||
"allow_logprobs": True,
|
||||
"allow_search_indices": False,
|
||||
"allow_view": True,
|
||||
"allow_fine_tuning": False,
|
||||
"organization": "*",
|
||||
"group": None,
|
||||
"is_blocking": False,
|
||||
}
|
||||
],
|
||||
"root": "gpt-3.5-turbo",
|
||||
"parent": None,
|
||||
},
|
||||
{
|
||||
"id": "text-davinci-003",
|
||||
"object": "model",
|
||||
"created": 1669599635,
|
||||
"owned_by": "openai-internal",
|
||||
"permission": [
|
||||
{
|
||||
"id": "modelperm-a6niqBmW2JaGmo0fDO7FEt1n",
|
||||
"object": "model_permission",
|
||||
"created": 1690930172,
|
||||
"allow_create_engine": False,
|
||||
"allow_sampling": True,
|
||||
"allow_logprobs": True,
|
||||
"allow_search_indices": False,
|
||||
"allow_view": True,
|
||||
"allow_fine_tuning": False,
|
||||
"organization": "*",
|
||||
"group": None,
|
||||
"is_blocking": False,
|
||||
}
|
||||
],
|
||||
"root": "text-davinci-003",
|
||||
"parent": None,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@router.get("/v1/models", tags=["MISC"])
|
||||
@router.get("/models", tags=["MISC"])
|
||||
def models():
|
||||
model: AbstractRWKV = global_var.get(global_var.Model)
|
||||
model_name = model.name if model else "rwkv"
|
||||
|
||||
return {
|
||||
"object": "list",
|
||||
"data": [
|
||||
{
|
||||
"id": model_name,
|
||||
"object": "model",
|
||||
"owned_by": "rwkv",
|
||||
"root": model_name,
|
||||
"parent": None,
|
||||
},
|
||||
*fake_models,
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@router.get("/v1/models/{model_id}", tags=["MISC"])
|
||||
@router.get("/models/{model_id}", tags=["MISC"])
|
||||
def model(model_id: str):
|
||||
for fake_model in fake_models:
|
||||
if fake_model["id"] == model_id:
|
||||
return fake_model
|
||||
|
||||
if "rwkv" in model_id.lower():
|
||||
model: AbstractRWKV = global_var.get(global_var.Model)
|
||||
model_name = model.name if model else "rwkv"
|
||||
return {
|
||||
"id": model_name,
|
||||
"object": "model",
|
||||
"owned_by": "rwkv",
|
||||
"root": model_name,
|
||||
"parent": None,
|
||||
}
|
||||
|
||||
raise HTTPException(
|
||||
status.HTTP_404_NOT_FOUND,
|
||||
{
|
||||
"error": {
|
||||
"message": f"The model '{model_id}' does not exist",
|
||||
"type": "invalid_request_error",
|
||||
"param": "model",
|
||||
"code": "model_not_found",
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -1,15 +1,16 @@
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any, Dict, List, Union
|
||||
from utils.log import quick_log
|
||||
from fastapi import APIRouter, HTTPException, Request, Response, status
|
||||
from pydantic import BaseModel
|
||||
import gc
|
||||
import copy
|
||||
import global_var
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
trie = None
|
||||
dtrie: Dict = {}
|
||||
max_trie_len = 3000
|
||||
max_trie_len = 300
|
||||
loop_start_id = 1 # to prevent preloaded prompts from being deleted
|
||||
loop_del_trie_id = loop_start_id
|
||||
|
||||
@@ -36,6 +37,9 @@ def init():
|
||||
def disable_state_cache():
|
||||
global trie, dtrie
|
||||
|
||||
if global_var.get(global_var.Deploy_Mode) is True:
|
||||
raise HTTPException(status.HTTP_403_FORBIDDEN)
|
||||
|
||||
trie = None
|
||||
dtrie = {}
|
||||
gc.collect()
|
||||
@@ -46,6 +50,10 @@ def disable_state_cache():
|
||||
@router.post("/enable-state-cache", tags=["State Cache"])
|
||||
def enable_state_cache():
|
||||
global trie, dtrie
|
||||
|
||||
if global_var.get(global_var.Deploy_Mode) is True:
|
||||
raise HTTPException(status.HTTP_403_FORBIDDEN)
|
||||
|
||||
try:
|
||||
import cyac
|
||||
|
||||
@@ -60,14 +68,18 @@ def enable_state_cache():
|
||||
|
||||
class AddStateBody(BaseModel):
|
||||
prompt: str
|
||||
tokens: List[str]
|
||||
tokens: List[Union[str, int]]
|
||||
state: Any
|
||||
logits: Any
|
||||
|
||||
|
||||
@router.post("/add-state", tags=["State Cache"])
|
||||
# @router.post("/add-state", tags=["State Cache"])
|
||||
def add_state(body: AddStateBody):
|
||||
global trie, dtrie, loop_del_trie_id
|
||||
|
||||
# if global_var.get(global_var.Deploy_Mode) is True:
|
||||
# raise HTTPException(status.HTTP_403_FORBIDDEN)
|
||||
|
||||
if trie is None:
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
|
||||
|
||||
@@ -96,7 +108,7 @@ def add_state(body: AddStateBody):
|
||||
quick_log(
|
||||
None,
|
||||
None,
|
||||
f"New Trie Id: {id}\nTrie Len: {len(trie)}\nTrie Buff Size: {trie.buff_size()}\nDtrie Buff Size Of Id: {_get_a_dtrie_buff_size(dtrie[id])}",
|
||||
f"New Trie Id: {id}\nTrie Len: {len(trie)}\nTrie Buff Size: {trie.buff_size()}\nDtrie Buff Size Of Id: {__get_a_dtrie_buff_size(dtrie[id])}",
|
||||
)
|
||||
return "success"
|
||||
except Exception as e:
|
||||
@@ -108,6 +120,10 @@ def add_state(body: AddStateBody):
|
||||
@router.post("/reset-state", tags=["State Cache"])
|
||||
def reset_state():
|
||||
global trie, dtrie
|
||||
|
||||
if global_var.get(global_var.Deploy_Mode) is True:
|
||||
raise HTTPException(status.HTTP_403_FORBIDDEN)
|
||||
|
||||
if trie is None:
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
|
||||
|
||||
@@ -124,7 +140,7 @@ class LongestPrefixStateBody(BaseModel):
|
||||
prompt: str
|
||||
|
||||
|
||||
def _get_a_dtrie_buff_size(dtrie_v):
|
||||
def __get_a_dtrie_buff_size(dtrie_v):
|
||||
# print(sys.getsizeof(dtrie_v["tokens"][0])) # str
|
||||
# print(sys.getsizeof(dtrie_v["tokens"][0]) * len(dtrie_v["tokens"]))
|
||||
# print(dtrie_v["state"][0][0].element_size())
|
||||
@@ -141,9 +157,13 @@ def _get_a_dtrie_buff_size(dtrie_v):
|
||||
return 54 * len(dtrie_v["tokens"]) + 491520 + 262144 + 28 # TODO
|
||||
|
||||
|
||||
@router.post("/longest-prefix-state", tags=["State Cache"])
|
||||
# @router.post("/longest-prefix-state", tags=["State Cache"])
|
||||
def longest_prefix_state(body: LongestPrefixStateBody, request: Request):
|
||||
global trie
|
||||
|
||||
# if global_var.get(global_var.Deploy_Mode) is True:
|
||||
# raise HTTPException(status.HTTP_403_FORBIDDEN)
|
||||
|
||||
if trie is None:
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
|
||||
|
||||
@@ -180,9 +200,13 @@ def longest_prefix_state(body: LongestPrefixStateBody, request: Request):
|
||||
}
|
||||
|
||||
|
||||
@router.post("/save-state", tags=["State Cache"])
|
||||
# @router.post("/save-state", tags=["State Cache"])
|
||||
def save_state():
|
||||
global trie
|
||||
|
||||
# if global_var.get(global_var.Deploy_Mode) is True:
|
||||
# raise HTTPException(status.HTTP_403_FORBIDDEN)
|
||||
|
||||
if trie is None:
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
|
||||
|
||||
|
||||
124
backend-python/rwkv_pip/beta/cuda/att_one.cu
vendored
Normal file
124
backend-python/rwkv_pip/beta/cuda/att_one.cu
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
#include "ATen/ATen.h"
|
||||
#include <cuda_fp16.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <torch/extension.h>
|
||||
|
||||
#include "element_wise.h"
|
||||
#include "util.h"
|
||||
|
||||
// Equivalent Python code:
|
||||
// ww = t_first + k
|
||||
// p = torch.maximum(pp, ww)
|
||||
// e1 = torch.exp(pp - p)
|
||||
// e2 = torch.exp(ww - p)
|
||||
// wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
|
||||
// ww = t_decay + pp
|
||||
// p = torch.maximum(ww, k)
|
||||
// e1 = torch.exp(ww - p)
|
||||
// e2 = torch.exp(k - p)
|
||||
// t1 = e1 * aa + e2 * v
|
||||
// t2 = e1 * bb + e2
|
||||
// r = r * wkv
|
||||
// return t1, t2, p, r
|
||||
struct WkvForwardOne {
|
||||
const float *t_first;
|
||||
const float *k;
|
||||
const float *pp;
|
||||
const float *aa;
|
||||
const float *bb;
|
||||
const float *t_decay;
|
||||
const float *v;
|
||||
/* out */ float *t1;
|
||||
/* out */ float *t2;
|
||||
/* out */ float *p;
|
||||
/* in & out */ half *r;
|
||||
|
||||
__device__ void operator()(int i) const {
|
||||
float ww = t_first[i] + k[i];
|
||||
float pp_ = pp[i];
|
||||
float p_ = (pp_ > ww) ? pp_ : ww;
|
||||
float e1 = expf(pp_ - p_);
|
||||
float e2 = expf(ww - p_);
|
||||
float aa_ = aa[i];
|
||||
float bb_ = bb[i];
|
||||
float v_ = v[i];
|
||||
r[i] = __hmul(r[i], __float2half(((e1 * aa_ + e2 * v_) / (e1 * bb_ + e2))));
|
||||
ww = t_decay[i] + pp_;
|
||||
float k_ = k[i];
|
||||
p_ = (ww > k_) ? ww : k_;
|
||||
e1 = expf(ww - p_);
|
||||
e2 = expf(k_ - p_);
|
||||
t1[i] = e1 * aa_ + e2 * v_;
|
||||
t2[i] = e1 * bb_ + e2;
|
||||
p[i] = p_;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
Equivalent Python code:
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
*/
|
||||
|
||||
struct Mix {
|
||||
const half *xx;
|
||||
const half *sx;
|
||||
const half *k_mix;
|
||||
const half *v_mix;
|
||||
const half *r_mix;
|
||||
/* out */ half *kx;
|
||||
/* out */ half *vx;
|
||||
/* out */ half *rx;
|
||||
|
||||
__device__ void operator()(int i) const {
|
||||
half xx_ = xx[i];
|
||||
half sx_ = sx[i];
|
||||
half k_mix_ = k_mix[i];
|
||||
half v_mix_ = v_mix[i];
|
||||
half r_mix_ = r_mix[i];
|
||||
kx[i] = __hadd(__hmul(xx_, k_mix_),
|
||||
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
|
||||
vx[i] = __hadd(__hmul(xx_, v_mix_),
|
||||
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
|
||||
rx[i] = __hadd(__hmul(xx_, r_mix_),
|
||||
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
|
||||
}
|
||||
};
|
||||
|
||||
using torch::Tensor;
|
||||
|
||||
void gemm_fp16_cublas_tensor(Tensor a, Tensor b, Tensor c);
|
||||
|
||||
Tensor att_one(Tensor x, Tensor ln_w, Tensor ln_b, Tensor sx, Tensor k_mix,
|
||||
Tensor v_mix, Tensor r_mix, Tensor kw,
|
||||
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx, Tensor rw,
|
||||
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
|
||||
/* imm */ Tensor k, Tensor pp, Tensor ww, Tensor aa, Tensor bb,
|
||||
Tensor t_decay, /* imm */ Tensor v, /* in & out */ Tensor r,
|
||||
/* out */ Tensor x_plus_out, /* out */ Tensor t1,
|
||||
/* out */ Tensor t2, /* out */ Tensor p) {
|
||||
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
|
||||
element_wise(Mix{data_ptr<half>(xx), data_ptr<half>(sx),
|
||||
data_ptr<half>(k_mix), data_ptr<half>(v_mix),
|
||||
data_ptr<half>(r_mix), data_ptr<half>(kx),
|
||||
data_ptr<half>(vx), data_ptr<half>(rx)},
|
||||
x.numel());
|
||||
|
||||
gemm_fp16_cublas_tensor(kx, kw, k);
|
||||
gemm_fp16_cublas_tensor(vx, vw, v);
|
||||
gemm_fp16_cublas_tensor(rx, rw, r);
|
||||
at::sigmoid_(r);
|
||||
|
||||
element_wise(WkvForwardOne{data_ptr<float>(t_first), data_ptr<float>(k),
|
||||
data_ptr<float>(pp), data_ptr<float>(aa),
|
||||
data_ptr<float>(bb), data_ptr<float>(t_decay),
|
||||
data_ptr<float>(v), data_ptr<float>(t1),
|
||||
data_ptr<float>(t2), data_ptr<float>(p),
|
||||
data_ptr<half>(r)},
|
||||
x.numel());
|
||||
|
||||
gemm_fp16_cublas_tensor(r, ow, x_plus_out);
|
||||
x_plus_out += x;
|
||||
return xx;
|
||||
}
|
||||
109
backend-python/rwkv_pip/beta/cuda/att_one_v5.cu
vendored
Normal file
109
backend-python/rwkv_pip/beta/cuda/att_one_v5.cu
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
#include "ATen/ATen.h"
|
||||
#include <cuda_fp16.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <torch/extension.h>
|
||||
|
||||
#include "element_wise.h"
|
||||
#include "util.h"
|
||||
|
||||
// Equivalent Python code:
|
||||
// s1 = t_first * a + s
|
||||
// s2 = a + t_decay * s
|
||||
struct Fused1 {
|
||||
const float *t_first;
|
||||
const float *t_decay;
|
||||
const float *a;
|
||||
const float *s;
|
||||
const int32_t inner_size;
|
||||
/* out */ float *s1;
|
||||
/* out */ float *s2;
|
||||
|
||||
__device__ void operator()(int i) const {
|
||||
const int j = i / inner_size;
|
||||
s1[i] = t_first[j] * a[i] + s[i];
|
||||
s2[i] = a[i] + t_decay[j] * s[i];
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
Equivalent Python code:
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
*/
|
||||
|
||||
struct Mix {
|
||||
const half *xx;
|
||||
const half *sx;
|
||||
const half *k_mix;
|
||||
const half *v_mix;
|
||||
const half *r_mix;
|
||||
/* out */ half *kx;
|
||||
/* out */ half *vx;
|
||||
/* out */ half *rx;
|
||||
|
||||
__device__ void operator()(int i) const {
|
||||
half xx_ = xx[i];
|
||||
half sx_ = sx[i];
|
||||
half k_mix_ = k_mix[i];
|
||||
half v_mix_ = v_mix[i];
|
||||
half r_mix_ = r_mix[i];
|
||||
kx[i] = __hadd(__hmul(xx_, k_mix_),
|
||||
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
|
||||
vx[i] = __hadd(__hmul(xx_, v_mix_),
|
||||
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
|
||||
rx[i] = __hadd(__hmul(xx_, r_mix_),
|
||||
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
|
||||
}
|
||||
};
|
||||
|
||||
using torch::Tensor;
|
||||
|
||||
void gemm_fp16_cublas_tensor(Tensor a, Tensor b, Tensor c);
|
||||
|
||||
Tensor att_one_v5(Tensor x, Tensor sx, Tensor s, Tensor ln_w, Tensor ln_b,
|
||||
Tensor lx_w, Tensor lx_b, Tensor k_mix, Tensor v_mix,
|
||||
Tensor r_mix, Tensor kw,
|
||||
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx,
|
||||
Tensor rw,
|
||||
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
|
||||
/* imm */ Tensor k, Tensor t_decay, /* imm */ Tensor v,
|
||||
/* imm */ Tensor r, /* imm */ Tensor s1,
|
||||
/* out */ Tensor x_plus_out, /* out */ Tensor s2) {
|
||||
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
|
||||
element_wise(Mix{data_ptr<half>(xx), data_ptr<half>(sx),
|
||||
data_ptr<half>(k_mix), data_ptr<half>(v_mix),
|
||||
data_ptr<half>(r_mix), data_ptr<half>(kx),
|
||||
data_ptr<half>(vx), data_ptr<half>(rx)},
|
||||
x.numel());
|
||||
|
||||
int H = t_decay.size(0);
|
||||
int S = x.size(-1) / H;
|
||||
gemm_fp16_cublas_tensor(rx, rw, r);
|
||||
r = at::reshape(r, {H, 1, S});
|
||||
gemm_fp16_cublas_tensor(kx, kw, k);
|
||||
k = at::reshape(k, {H, S, 1});
|
||||
gemm_fp16_cublas_tensor(vx, vw, v);
|
||||
v = at::reshape(v, {H, 1, S});
|
||||
|
||||
{
|
||||
Tensor a = at::matmul(k, v);
|
||||
|
||||
// s1 = t_first * a + s
|
||||
// s2 = a + t_decay * s
|
||||
element_wise(Fused1{data_ptr<float>(t_first), data_ptr<float>(t_decay),
|
||||
data_ptr<float>(a), data_ptr<float>(s),
|
||||
static_cast<int32_t>(a.size(1) * a.size(2)),
|
||||
data_ptr<float>(s1), data_ptr<float>(s2)},
|
||||
a.numel());
|
||||
}
|
||||
|
||||
Tensor out = at::matmul(r, s1);
|
||||
out = at::flatten(out);
|
||||
out = at::squeeze(at::group_norm(at::unsqueeze(out, 0), H, lx_w, lx_b), 0);
|
||||
out = at::_cast_Half(out);
|
||||
|
||||
gemm_fp16_cublas_tensor(out, ow, x_plus_out);
|
||||
x_plus_out += x;
|
||||
return xx;
|
||||
}
|
||||
178
backend-python/rwkv_pip/beta/cuda/att_seq.cu
vendored
Normal file
178
backend-python/rwkv_pip/beta/cuda/att_seq.cu
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
#include "ATen/ATen.h"
|
||||
#include <cuda_fp16.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <torch/extension.h>
|
||||
|
||||
#include "util.h"
|
||||
#include "element_wise.h"
|
||||
|
||||
using torch::Tensor;
|
||||
|
||||
void gemm_fp16_cublas(const void *a, const void *b, void *c, int m,
|
||||
int n, int k, bool output_fp32);
|
||||
|
||||
// based on `kernel_wkv_forward`, fusing more operations
|
||||
__global__ void kernel_wkv_forward_new(
|
||||
const int B, const int T, const int C, const float *__restrict__ const _w,
|
||||
const float *__restrict__ const _u, const float *__restrict__ const _k,
|
||||
const float *__restrict__ const _v, const half *__restrict__ const r,
|
||||
half *__restrict__ const _y, float *__restrict__ const _aa,
|
||||
float *__restrict__ const _bb, float *__restrict__ const _pp) {
|
||||
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int _b = idx / C;
|
||||
const int _c = idx % C;
|
||||
const int _offset = _b * T * C + _c;
|
||||
const int _state_offset = _b * C + _c;
|
||||
|
||||
float u = _u[_c];
|
||||
float w = _w[_c];
|
||||
const float *__restrict__ const k = _k + _offset;
|
||||
const float *__restrict__ const v = _v + _offset;
|
||||
half *__restrict__ const y = _y + _offset;
|
||||
|
||||
float aa = _aa[_state_offset];
|
||||
float bb = _bb[_state_offset];
|
||||
float pp = _pp[_state_offset];
|
||||
for (int i = 0; i < T; i++) {
|
||||
const int ii = i * C;
|
||||
const float kk = k[ii];
|
||||
const float vv = v[ii];
|
||||
float ww = u + kk;
|
||||
float p = max(pp, ww);
|
||||
float e1 = exp(pp - p);
|
||||
float e2 = exp(ww - p);
|
||||
y[ii] = __float2half((e1 * aa + e2 * vv) / (e1 * bb + e2));
|
||||
ww = w + pp;
|
||||
p = max(ww, kk);
|
||||
e1 = exp(ww - p);
|
||||
e2 = exp(kk - p);
|
||||
aa = e1 * aa + e2 * vv;
|
||||
bb = e1 * bb + e2;
|
||||
pp = p;
|
||||
}
|
||||
_aa[_state_offset] = aa;
|
||||
_bb[_state_offset] = bb;
|
||||
_pp[_state_offset] = pp;
|
||||
}
|
||||
|
||||
void cuda_wkv_forward_new(int B, int T, int C, float *w, float *u, float *k,
|
||||
float *v, half *r, half *y, float *aa, float *bb,
|
||||
float *pp) {
|
||||
dim3 threadsPerBlock(min(C, 32));
|
||||
assert(B * C % threadsPerBlock.x == 0);
|
||||
dim3 numBlocks(B * C / threadsPerBlock.x);
|
||||
kernel_wkv_forward_new<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, r,
|
||||
y, aa, bb, pp);
|
||||
}
|
||||
|
||||
__global__ void _att_mix(const half *xx, const half *sx, const half *k_mix,
|
||||
const half *v_mix, const half *r_mix,
|
||||
const int outer_size, const int inner_size, half *kx,
|
||||
half *vx, half *rx) {
|
||||
for (int idx2 = blockIdx.x * blockDim.x + threadIdx.x; idx2 < inner_size;
|
||||
idx2 += blockDim.x * gridDim.x) {
|
||||
half k_mix_ = k_mix[idx2];
|
||||
half v_mix_ = v_mix[idx2];
|
||||
half r_mix_ = r_mix[idx2];
|
||||
for (int row = 0; row < outer_size; ++row) {
|
||||
int idx1 = row * inner_size + idx2;
|
||||
half xx_ = xx[idx1];
|
||||
half sx_ = sx[idx1];
|
||||
kx[idx1] = __hadd(__hmul(xx_, k_mix_),
|
||||
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
|
||||
vx[idx1] = __hadd(__hmul(xx_, v_mix_),
|
||||
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
|
||||
rx[idx1] = __hadd(__hmul(xx_, r_mix_),
|
||||
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void att_mix(const half *xx, const half *sx, const half *k_mix,
|
||||
const half *v_mix, const half *r_mix, const int outer_size,
|
||||
const int inner_size, half *kx, half *vx, half *rx) {
|
||||
// 256 is good enough on most GPUs
|
||||
const int32_t BLOCK_SIZE = 256;
|
||||
assert(inner_size % BLOCK_SIZE == 0);
|
||||
_att_mix<<<inner_size / BLOCK_SIZE, BLOCK_SIZE>>>(
|
||||
xx, sx, k_mix, v_mix, r_mix, outer_size, inner_size, kx, vx, rx);
|
||||
}
|
||||
|
||||
struct InplaceSigmoid {
|
||||
__device__ __forceinline__ half operator()(int i) const {
|
||||
ptr[i] = __float2half(1.0 / (1.0 + exp(-__half2float(ptr[i]))));
|
||||
}
|
||||
half *ptr;
|
||||
};
|
||||
|
||||
struct InplaceMul {
|
||||
__device__ __forceinline__ half operator()(int i) const {
|
||||
y[i] = __hmul(x[i], y[i]);
|
||||
}
|
||||
half *y;
|
||||
half *x;
|
||||
};
|
||||
|
||||
/*
|
||||
Equivalent Python code:
|
||||
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(gemm(rx, rw))
|
||||
k = gemm(kx, kw, output_dtype=torch.float32)
|
||||
v = gemm(vx, vw, output_dtype=torch.float32)
|
||||
|
||||
T = x.shape[0]
|
||||
for t in range(T):
|
||||
kk = k[t]
|
||||
vv = v[t]
|
||||
ww = t_first + kk
|
||||
p = torch.maximum(pp, ww)
|
||||
e1 = torch.exp(pp - p)
|
||||
e2 = torch.exp(ww - p)
|
||||
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
|
||||
ww = t_decay + pp
|
||||
p = torch.maximum(ww, kk)
|
||||
e1 = torch.exp(ww - p)
|
||||
e2 = torch.exp(kk - p)
|
||||
aa = e1 * aa + e2 * vv
|
||||
bb = e1 * bb + e2
|
||||
pp = p
|
||||
out = gemm(r * sx, ow)
|
||||
return x + out, xx[-1,:], aa, bb, pp
|
||||
*/
|
||||
Tensor att_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
|
||||
Tensor v_mix, Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
|
||||
Tensor ow, Tensor t_first, Tensor pp, Tensor aa, Tensor bb,
|
||||
Tensor t_decay, /* imm */ Tensor buf, /* out */ Tensor x_plus_out) {
|
||||
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
|
||||
sx = at::cat({sx.unsqueeze(0), xx.slice(0, 0, -1)}, 0);
|
||||
char* buf_ptr = (char*)buf.data_ptr();
|
||||
half* kx = (half*)buf_ptr;
|
||||
half* vx = kx + x.numel();
|
||||
half* rx = vx + x.numel();
|
||||
half* wkv_y = rx + x.numel();
|
||||
att_mix(data_ptr<half>(xx), data_ptr<half>(sx), data_ptr<half>(k_mix),
|
||||
data_ptr<half>(v_mix), data_ptr<half>(r_mix), xx.size(0), xx.size(1),
|
||||
kx, vx, rx);
|
||||
float* k = reinterpret_cast<float*>(wkv_y + x.numel());
|
||||
float* v = k + x.size(0) * kw.size(1);
|
||||
half* r = reinterpret_cast<half*>(v + x.size(0) * vw.size(1));
|
||||
|
||||
gemm_fp16_cublas(kx, kw.data_ptr(), k, x.size(0), kw.size(1), kw.size(0), true);
|
||||
gemm_fp16_cublas(vx, vw.data_ptr(), v, x.size(0), vw.size(1), vw.size(0), true);
|
||||
gemm_fp16_cublas(rx, rw.data_ptr(), r, x.size(0), rw.size(1), rw.size(0), false);
|
||||
element_wise(InplaceSigmoid{r}, x.size(0) * rw.size(1));
|
||||
cuda_wkv_forward_new(1, x.size(0), x.size(1), data_ptr<float>(t_decay),
|
||||
data_ptr<float>(t_first), k, v, r,
|
||||
wkv_y, data_ptr<float>(aa),
|
||||
data_ptr<float>(bb), data_ptr<float>(pp));
|
||||
element_wise(InplaceMul{wkv_y, r}, x.numel());
|
||||
gemm_fp16_cublas(wkv_y, ow.data_ptr(), x_plus_out.data_ptr(), x.size(0), ow.size(1), ow.size(0), false);
|
||||
x_plus_out += x;
|
||||
return xx;
|
||||
}
|
||||
21
backend-python/rwkv_pip/beta/cuda/element_wise.h
vendored
Normal file
21
backend-python/rwkv_pip/beta/cuda/element_wise.h
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
|
||||
template <typename Func> __global__ void _element_wise(Func func, int n) {
|
||||
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
|
||||
i += blockDim.x * gridDim.x) {
|
||||
func(i);
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: packed data type (e.g. float4) is a overkill for current sizes
|
||||
// (4096 in 7B model and 768 in 0.1B model),
|
||||
// and is not faster than the plain float version.
|
||||
template <typename Func>
|
||||
void element_wise(Func func, int n) {
|
||||
// 256 is good enough on most GPUs
|
||||
const int32_t BLOCK_SIZE = 256;
|
||||
assert(n % BLOCK_SIZE == 0);
|
||||
_element_wise<<<n / BLOCK_SIZE, BLOCK_SIZE>>>(func, n);
|
||||
}
|
||||
165
backend-python/rwkv_pip/beta/cuda/ffn.cu
vendored
Normal file
165
backend-python/rwkv_pip/beta/cuda/ffn.cu
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
#include "ATen/ATen.h"
|
||||
#include <cuda_fp16.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <torch/extension.h>
|
||||
|
||||
#include "element_wise.h"
|
||||
#include "util.h"
|
||||
|
||||
using torch::Tensor;
|
||||
|
||||
void gemm_fp16_cublas(const void *a, const void *b, void *c, int ori_m,
|
||||
int ori_n, int ori_k, bool output_fp32);
|
||||
|
||||
__global__ void _ffn_seq_mix(const half *xx, const half *sx, const half *k_mix,
|
||||
const half *r_mix, const int outer_size,
|
||||
const int inner_size, half *kx, half *rx) {
|
||||
for (int idx2 = blockIdx.x * blockDim.x + threadIdx.x; idx2 < inner_size;
|
||||
idx2 += blockDim.x * gridDim.x) {
|
||||
half k_mix_ = k_mix[idx2];
|
||||
half r_mix_ = r_mix[idx2];
|
||||
for (int row = 0; row < outer_size; ++row) {
|
||||
int idx1 = row * inner_size + idx2;
|
||||
half xx_ = xx[idx1];
|
||||
half sx_ = sx[idx1];
|
||||
kx[idx1] = __hadd(__hmul(xx_, k_mix_),
|
||||
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
|
||||
rx[idx1] = __hadd(__hmul(xx_, r_mix_),
|
||||
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ffn_seq_mix(const half *xx, const half *sx, const half *k_mix,
|
||||
const half *r_mix, const int outer_size, const int inner_size,
|
||||
half *kx, half *rx) {
|
||||
// 256 is good enough on most GPUs
|
||||
const int32_t BLOCK_SIZE = 256;
|
||||
assert(inner_size % BLOCK_SIZE == 0);
|
||||
_ffn_seq_mix<<<inner_size / BLOCK_SIZE, BLOCK_SIZE>>>(
|
||||
xx, sx, k_mix, r_mix, outer_size, inner_size, kx, rx);
|
||||
}
|
||||
|
||||
struct InplaceSigmoid {
|
||||
__device__ __forceinline__ void operator()(int i) const {
|
||||
ptr[i] = __float2half(1.0 / (1.0 + exp(-__half2float(ptr[i]))));
|
||||
}
|
||||
half *ptr;
|
||||
};
|
||||
|
||||
struct InplaceReLUAndSquare {
|
||||
__device__ __forceinline__ void operator()(int i) const {
|
||||
// __hmax is not defined in old cuda
|
||||
if (__hgt(ptr[i], __float2half(0))) {
|
||||
ptr[i] = __hmul(ptr[i], ptr[i]);
|
||||
} else {
|
||||
ptr[i] = __float2half(0);
|
||||
}
|
||||
}
|
||||
half *ptr;
|
||||
};
|
||||
|
||||
struct InplaceFma {
|
||||
__device__ __forceinline__ void operator()(int i) const {
|
||||
a[i] = __hfma(a[i], b[i], c[i]);
|
||||
}
|
||||
half *a;
|
||||
const half *b;
|
||||
const half *c;
|
||||
};
|
||||
|
||||
/*
|
||||
Equivalent Python code:
|
||||
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(gemm(rx, rw))
|
||||
vx = torch.square(torch.relu(gemm(kx, kw)))
|
||||
out = r * gemm(vx, vw)
|
||||
return x + out, xx[-1,:]
|
||||
*/
|
||||
Tensor ffn_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
|
||||
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
|
||||
/* imm */ Tensor buf,
|
||||
/* out */ Tensor x_plus_out) {
|
||||
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
|
||||
sx = at::cat({sx.unsqueeze(0), xx.slice(0, 0, -1)}, 0);
|
||||
char *buf_ptr = (char *)buf.data_ptr();
|
||||
half *kx = (half *)buf_ptr;
|
||||
half *rx = kx + x.numel();
|
||||
half *vx = rx + x.numel();
|
||||
half *r = vx + x.size(0) * kw.size(1);
|
||||
ffn_seq_mix(data_ptr<half>(xx), data_ptr<half>(sx), data_ptr<half>(k_mix),
|
||||
data_ptr<half>(r_mix), xx.size(0), xx.size(1), kx, rx);
|
||||
|
||||
gemm_fp16_cublas(rx, rw.data_ptr(), r, x.size(0), rw.size(1), x.size(1),
|
||||
false);
|
||||
element_wise(InplaceSigmoid{r}, x.size(0) * rw.size(1));
|
||||
gemm_fp16_cublas(kx, kw.data_ptr(), vx, x.size(0), kw.size(1), x.size(1),
|
||||
false);
|
||||
element_wise(InplaceReLUAndSquare{vx}, x.size(0) * kw.size(1));
|
||||
gemm_fp16_cublas(vx, vw.data_ptr(), x_plus_out.data_ptr(), x.size(0),
|
||||
vw.size(1), vw.size(0), false);
|
||||
element_wise(InplaceFma{data_ptr<half>(x_plus_out), r, data_ptr<half>(x)},
|
||||
x_plus_out.numel());
|
||||
return xx;
|
||||
}
|
||||
|
||||
struct FfnOneMix {
|
||||
__device__ __forceinline__ void operator()(int idx) {
|
||||
half k_mix_ = k_mix[idx];
|
||||
half r_mix_ = r_mix[idx];
|
||||
half xx_ = xx[idx];
|
||||
half sx_ = sx[idx];
|
||||
kx[idx] = __hadd(__hmul(xx_, k_mix_),
|
||||
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
|
||||
rx[idx] = __hadd(__hmul(xx_, r_mix_),
|
||||
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
|
||||
}
|
||||
half *k_mix;
|
||||
half *r_mix;
|
||||
half *xx;
|
||||
half *sx;
|
||||
half *kx;
|
||||
half *rx;
|
||||
};
|
||||
|
||||
/*
|
||||
Equivalent Python code:
|
||||
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(gemm(rx, rw))
|
||||
vx = torch.square(torch.relu(gemm(kx, kw)))
|
||||
out = r * gemm(vx, vw)
|
||||
return x + out, xx
|
||||
*/
|
||||
Tensor ffn_one(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
|
||||
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
|
||||
/* imm */ Tensor buf,
|
||||
/* out */ Tensor x_plus_out) {
|
||||
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
|
||||
char *buf_ptr = (char *)buf.data_ptr();
|
||||
half *kx = (half *)buf_ptr;
|
||||
half *rx = kx + x.numel();
|
||||
half *vx = rx + x.numel();
|
||||
half *r = vx + x.size(0) * kw.size(1);
|
||||
element_wise(FfnOneMix{data_ptr<half>(k_mix), data_ptr<half>(r_mix),
|
||||
data_ptr<half>(xx), data_ptr<half>(sx), kx, rx},
|
||||
x.numel());
|
||||
// vector * matrix, so m = 1
|
||||
gemm_fp16_cublas(rx, rw.data_ptr(), r, 1, rw.size(1), rw.size(0), false);
|
||||
element_wise(InplaceSigmoid{r}, rw.size(1));
|
||||
gemm_fp16_cublas(kx, kw.data_ptr(), vx, 1, kw.size(1), kw.size(0), false);
|
||||
element_wise(InplaceReLUAndSquare{vx}, kw.size(1));
|
||||
gemm_fp16_cublas(vx, vw.data_ptr(), x_plus_out.data_ptr(), 1, vw.size(1),
|
||||
vw.size(0), false);
|
||||
element_wise(InplaceFma{data_ptr<half>(x_plus_out), r, data_ptr<half>(x)},
|
||||
x_plus_out.numel());
|
||||
return xx;
|
||||
}
|
||||
128
backend-python/rwkv_pip/beta/cuda/gemm_fp16_cublas.cpp
vendored
Normal file
128
backend-python/rwkv_pip/beta/cuda/gemm_fp16_cublas.cpp
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
#include <cublas_v2.h>
|
||||
#include <cuda.h>
|
||||
#include <cuda_fp16.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <torch/extension.h>
|
||||
|
||||
#define CUBLAS_CHECK(condition) \
|
||||
for (cublasStatus_t _cublas_check_status = (condition); \
|
||||
_cublas_check_status != CUBLAS_STATUS_SUCCESS;) \
|
||||
throw std::runtime_error("cuBLAS error " + \
|
||||
std::to_string(_cublas_check_status) + " at " + \
|
||||
std::to_string(__LINE__));
|
||||
|
||||
#define CUDA_CHECK(condition) \
|
||||
for (cudaError_t _cuda_check_status = (condition); \
|
||||
_cuda_check_status != cudaSuccess;) \
|
||||
throw std::runtime_error( \
|
||||
"CUDA error " + std::string(cudaGetErrorString(_cuda_check_status)) + \
|
||||
" at " + std::to_string(__LINE__));
|
||||
|
||||
cublasHandle_t get_cublas_handle() {
|
||||
static cublasHandle_t cublas_handle = []() {
|
||||
cublasHandle_t handle = nullptr;
|
||||
CUBLAS_CHECK(cublasCreate(&handle));
|
||||
#if CUDA_VERSION < 11000
|
||||
CUBLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
|
||||
#else
|
||||
CUBLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
|
||||
#endif // CUDA_VERSION < 11000
|
||||
return handle;
|
||||
}();
|
||||
return cublas_handle;
|
||||
}
|
||||
|
||||
/*
|
||||
NOTE: blas gemm is column-major by default, but we need row-major output.
|
||||
The data of row-major, transposed matrix is exactly the same as the
|
||||
column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
|
||||
*/
|
||||
void gemm_fp16_cublas(const void *a, const void *b, void *c, int ori_m,
|
||||
int ori_n, int ori_k, bool output_fp32) {
|
||||
const auto cuda_data_type = CUDA_R_16F;
|
||||
const auto cuda_c_data_type = output_fp32 ? CUDA_R_32F : CUDA_R_16F;
|
||||
const auto compute_type = CUDA_R_32F;
|
||||
const float sp_alpha = 1.f;
|
||||
// use CUBLAS_OP_N. see the notes above
|
||||
const cublasOperation_t cublas_trans_a = CUBLAS_OP_N;
|
||||
const cublasOperation_t cublas_trans_b = CUBLAS_OP_N;
|
||||
// m = (B^T).size(0) = B.size(1) = n;
|
||||
const int cublas_m = ori_n;
|
||||
const int cublas_k = ori_k;
|
||||
// comptiable with rwkv one mode, where 1-D tensor * 2-D tensor
|
||||
// const int n = a.dense_dim() == 1 ? 1 : a.size(0);
|
||||
const int cublas_n = ori_m;
|
||||
const int cublas_lda = cublas_m;
|
||||
const int cublas_ldb = cublas_k;
|
||||
const int cublas_ldc = cublas_m;
|
||||
cublasHandle_t cublas_handle = get_cublas_handle();
|
||||
|
||||
#if CUDA_VERSION >= 11000
|
||||
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
|
||||
#else
|
||||
cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP;
|
||||
#endif
|
||||
const float sp_beta = 0.f;
|
||||
CUBLAS_CHECK(cublasGemmEx(
|
||||
cublas_handle, cublas_trans_a, cublas_trans_b, cublas_m, cublas_n,
|
||||
cublas_k, &sp_alpha, b, cuda_data_type, cublas_lda,
|
||||
a, cuda_data_type, cublas_ldb, &sp_beta, c,
|
||||
cuda_c_data_type, cublas_ldc, compute_type, algo));
|
||||
}
|
||||
|
||||
/*
|
||||
NOTE: blas gemm is column-major by default, but we need row-major output.
|
||||
The data of row-major, transposed matrix is exactly the same as the
|
||||
column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
|
||||
*/
|
||||
void gemm_fp16_cublas_tensor(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
|
||||
if (a.sizes().size() == 1) {
|
||||
assert(b.sizes().size() == 2);
|
||||
a = at::unsqueeze(a, 0);
|
||||
}
|
||||
const auto cuda_data_type = CUDA_R_16F;
|
||||
const auto cuda_c_data_type =
|
||||
c.dtype() == torch::kFloat32 ? CUDA_R_32F : CUDA_R_16F;
|
||||
const auto compute_type = CUDA_R_32F;
|
||||
const float sp_alpha = 1.f;
|
||||
// swap a and b, and use CUBLAS_OP_N. see the notes above
|
||||
std::swap(a, b);
|
||||
const cublasOperation_t cublas_trans_a = CUBLAS_OP_N;
|
||||
const cublasOperation_t cublas_trans_b = CUBLAS_OP_N;
|
||||
// m = (B^T).size(0) = B.size(1), and = A.size(1) after swap,
|
||||
// negative axis is used because of the existence of batch matmul.
|
||||
const int m = a.size(-1);
|
||||
const int k = a.size(-2);
|
||||
const int n = b.size(-2);
|
||||
const int cublas_lda = m;
|
||||
const int cublas_ldb = k;
|
||||
const int cublas_ldc = m;
|
||||
cublasHandle_t cublas_handle = get_cublas_handle();
|
||||
|
||||
#if CUDA_VERSION >= 11000
|
||||
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
|
||||
#else
|
||||
cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP;
|
||||
#endif
|
||||
const float sp_beta = 0.f;
|
||||
if (a.sizes().size() == 2 && b.sizes().size() == 2) {
|
||||
CUBLAS_CHECK(cublasGemmEx(
|
||||
cublas_handle, cublas_trans_a, cublas_trans_b, m, n, k, &sp_alpha,
|
||||
a.data_ptr(), cuda_data_type, cublas_lda, b.data_ptr(), cuda_data_type,
|
||||
cublas_ldb, &sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc,
|
||||
compute_type, algo));
|
||||
} else {
|
||||
// batch matmul
|
||||
assert(a.sizes().size() == 3 && b.sizes().size() == 3);
|
||||
|
||||
const long long int cublas_stride_a = m * k;
|
||||
const long long int cublas_stride_b = k * n;
|
||||
const long long int cublas_stride_c = m * n;
|
||||
CUBLAS_CHECK(cublasGemmStridedBatchedEx(
|
||||
cublas_handle, cublas_trans_a, cublas_trans_b, m,
|
||||
n, k, &sp_alpha, a.data_ptr(), cuda_data_type, cublas_lda,
|
||||
cublas_stride_a, b.data_ptr(), cuda_data_type, cublas_ldb, cublas_stride_b,
|
||||
&sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc, cublas_stride_c,
|
||||
a.size(0), compute_type, algo));
|
||||
}
|
||||
}
|
||||
246
backend-python/rwkv_pip/beta/cuda/operators.cu
vendored
Normal file
246
backend-python/rwkv_pip/beta/cuda/operators.cu
vendored
Normal file
@@ -0,0 +1,246 @@
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include "ATen/ATen.h"
|
||||
#include <cuda_fp16.h>
|
||||
#define MIN_VALUE (-1e38)
|
||||
typedef at::Half fp16;
|
||||
__half *cast(fp16 *ptr) {
|
||||
return reinterpret_cast<__half *>(ptr);
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
__global__ void kernel_wkv_forward(const int B, const int T, const int C,
|
||||
const float *__restrict__ const _w, const float *__restrict__ const _u, const F *__restrict__ const _k, const F *__restrict__ const _v,
|
||||
F *__restrict__ const _y, float *__restrict__ const _aa, float *__restrict__ const _bb, float *__restrict__ const _pp) {
|
||||
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int _b = idx / C;
|
||||
const int _c = idx % C;
|
||||
const int _offset = _b * T * C + _c;
|
||||
const int _state_offset = _b * C + _c;
|
||||
|
||||
float u = _u[_c];
|
||||
float w = _w[_c];
|
||||
const F *__restrict__ const k = _k + _offset;
|
||||
const F *__restrict__ const v = _v + _offset;
|
||||
F *__restrict__ const y = _y + _offset;
|
||||
|
||||
float aa = _aa[_state_offset];
|
||||
float bb = _bb[_state_offset];
|
||||
float pp = _pp[_state_offset];
|
||||
for (int i = 0; i < T; i++) {
|
||||
const int ii = i * C;
|
||||
const float kk = float(k[ii]);
|
||||
const float vv = float(v[ii]);
|
||||
float ww = u + kk;
|
||||
float p = max(pp, ww);
|
||||
float e1 = exp(pp - p);
|
||||
float e2 = exp(ww - p);
|
||||
y[ii] = F((e1 * aa + e2 * vv) / (e1 * bb + e2));
|
||||
ww = w + pp;
|
||||
p = max(ww, kk);
|
||||
e1 = exp(ww - p);
|
||||
e2 = exp(kk - p);
|
||||
aa = e1 * aa + e2 * vv;
|
||||
bb = e1 * bb + e2;
|
||||
pp = p;
|
||||
}
|
||||
_aa[_state_offset] = aa;
|
||||
_bb[_state_offset] = bb;
|
||||
_pp[_state_offset] = pp;
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
void cuda_wkv_forward(int B, int T, int C, float *w, float *u, F *k, F *v, F *y, float *aa, float *bb, float *pp) {
|
||||
dim3 threadsPerBlock( min(C, 32) );
|
||||
assert(B * C % threadsPerBlock.x == 0);
|
||||
dim3 numBlocks(B * C / threadsPerBlock.x);
|
||||
kernel_wkv_forward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, aa, bb, pp);
|
||||
}
|
||||
|
||||
template void cuda_wkv_forward<fp16>(
|
||||
int B, int T, int C,
|
||||
float *w, float *u, fp16 *k, fp16 *v, fp16 *y,
|
||||
float *aa, float *bb, float *pp);
|
||||
template void cuda_wkv_forward<float>(
|
||||
int B, int T, int C,
|
||||
float *w, float *u, float *k, float *v, float *y,
|
||||
float *aa, float *bb, float *pp);
|
||||
|
||||
__global__ void kernel_mm_seq_fp32i8(
|
||||
const int B, const int N, const int M,
|
||||
const float *__restrict__ const x, const int x_stride,
|
||||
const uint8_t *__restrict__ const w, const int w_stride,
|
||||
const float *__restrict__ const mx,
|
||||
const float *__restrict__ const rx,
|
||||
const float *__restrict__ const my,
|
||||
const float *__restrict__ const ry,
|
||||
float *__restrict__ const y, const int y_stride) {
|
||||
|
||||
const int i = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int k = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (i < B && k < M) {
|
||||
float y_local = 0;
|
||||
for (int j = 0; j < N; ++j) {
|
||||
y_local += x[i * x_stride + j] * (
|
||||
(float(w[j * w_stride + k]) + 0.5f)
|
||||
* rx[k] * ry[j] + mx[k] + my[j]
|
||||
);
|
||||
}
|
||||
y[i * y_stride + k] = y_local;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
void cuda_mm8_seq(int B, int N, int M,
|
||||
F *x, int x_stride,
|
||||
uint8_t *w, int w_stride,
|
||||
F *mx, F *rx,
|
||||
F *my, F *ry,
|
||||
F *y, int y_stride);
|
||||
|
||||
template <>
|
||||
void cuda_mm8_seq<float>(int B, int N, int M,
|
||||
float *x, int x_stride,
|
||||
uint8_t *w, int w_stride,
|
||||
float *mx, float *rx,
|
||||
float *my, float *ry,
|
||||
float *y, int y_stride) {
|
||||
dim3 blockSize(1, 128);
|
||||
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
|
||||
kernel_mm_seq_fp32i8<<<gridSize, blockSize>>>(
|
||||
B, N, M, x, x_stride, w, w_stride,
|
||||
mx, rx, my, ry, y, y_stride);
|
||||
}
|
||||
|
||||
__global__ void kernel_mm_seq_fp16i8(
|
||||
const int B, const int N, const int M,
|
||||
const __half *__restrict__ const x, const int x_stride,
|
||||
const uint8_t *__restrict__ const w, const int w_stride,
|
||||
const __half *__restrict__ const mx,
|
||||
const __half *__restrict__ const rx,
|
||||
const __half *__restrict__ const my,
|
||||
const __half *__restrict__ const ry,
|
||||
__half *__restrict__ const y, const int y_stride) {
|
||||
|
||||
const int i = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int k = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (i < B && k < M) {
|
||||
float y_local = 0;
|
||||
for (int j = 0; j < N; ++j) {
|
||||
y_local += __half2float(x[i * x_stride + j]) * (
|
||||
(float(w[j * w_stride + k]) + 0.5f)
|
||||
* __half2float(rx[k]) * __half2float(ry[j])
|
||||
+ __half2float(mx[k]) + __half2float(my[j])
|
||||
);
|
||||
}
|
||||
y[i * y_stride + k] = __float2half(y_local);
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void cuda_mm8_seq<fp16>(int B, int N, int M,
|
||||
fp16 *x, int x_stride,
|
||||
uint8_t *w, int w_stride,
|
||||
fp16 *mx, fp16 *rx,
|
||||
fp16 *my, fp16 *ry,
|
||||
fp16 *y, int y_stride) {
|
||||
dim3 blockSize(1, 128);
|
||||
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
|
||||
kernel_mm_seq_fp16i8<<<gridSize, blockSize>>>(
|
||||
B, N, M, cast(x), x_stride, w, w_stride,
|
||||
cast(mx), cast(rx), cast(my), cast(ry), cast(y), y_stride);
|
||||
}
|
||||
|
||||
#define MM8_ONE_JSPLIT 24
|
||||
#define MM8_ONE_TILE 1024
|
||||
|
||||
__global__ void kernel_mm_one_fp32i8(
|
||||
const int N, const int M,
|
||||
const float *__restrict__ const x,
|
||||
const uint8_t *__restrict__ const w, const int w_stride,
|
||||
const float *__restrict__ const mx,
|
||||
const float *__restrict__ const rx,
|
||||
const float *__restrict__ const my,
|
||||
const float *__restrict__ const ry,
|
||||
float *__restrict__ const y) {
|
||||
|
||||
const int k = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
|
||||
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
|
||||
|
||||
if (k < M) {
|
||||
float y_local = 0;
|
||||
for (int j = j0; j < j1; ++j) {
|
||||
y_local += x[j] * (
|
||||
(float(w[j * w_stride + k]) + 0.5f)
|
||||
* rx[k] * ry[j] + mx[k] + my[j]
|
||||
);
|
||||
}
|
||||
atomicAdd(&y[k], y_local);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
void cuda_mm8_one(int N, int M,
|
||||
F *x,
|
||||
uint8_t *w, int w_stride,
|
||||
F *mx, F *rx,
|
||||
F *my, F *ry,
|
||||
float *y);
|
||||
|
||||
template <>
|
||||
void cuda_mm8_one<float>(int N, int M,
|
||||
float *x,
|
||||
uint8_t *w, int w_stride,
|
||||
float *mx, float *rx,
|
||||
float *my, float *ry,
|
||||
float *y) {
|
||||
dim3 blockSize(1, MM8_ONE_TILE);
|
||||
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
|
||||
kernel_mm_one_fp32i8<<<gridSize, blockSize>>>(
|
||||
N, M, x, w, w_stride,
|
||||
mx, rx, my, ry, y);
|
||||
}
|
||||
|
||||
__global__ void kernel_mm_one_fp16i8(
|
||||
const int N, const int M,
|
||||
const __half *__restrict__ const x,
|
||||
const uint8_t *__restrict__ const w, const int w_stride,
|
||||
const __half *__restrict__ const mx,
|
||||
const __half *__restrict__ const rx,
|
||||
const __half *__restrict__ const my,
|
||||
const __half *__restrict__ const ry,
|
||||
float *__restrict__ const y) {
|
||||
|
||||
const int k = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
|
||||
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
|
||||
|
||||
if (k < M) {
|
||||
float y_local = 0;
|
||||
for (int j = j0; j < j1; ++j) {
|
||||
y_local += __half2float(x[j]) * (
|
||||
(float(w[j * w_stride + k]) + 0.5f)
|
||||
* __half2float(rx[k]) * __half2float(ry[j])
|
||||
+ __half2float(mx[k]) + __half2float(my[j])
|
||||
);
|
||||
}
|
||||
atomicAdd(&y[k], y_local);
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void cuda_mm8_one<fp16>(int N, int M,
|
||||
fp16 *x,
|
||||
uint8_t *w, int w_stride,
|
||||
fp16 *mx, fp16 *rx,
|
||||
fp16 *my, fp16 *ry,
|
||||
float *y) {
|
||||
dim3 blockSize(1, MM8_ONE_TILE);
|
||||
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
|
||||
kernel_mm_one_fp16i8<<<gridSize, blockSize>>>(
|
||||
N, M, cast(x), w, w_stride,
|
||||
cast(mx), cast(rx), cast(my), cast(ry), y);
|
||||
}
|
||||
7
backend-python/rwkv_pip/beta/cuda/util.h
vendored
Normal file
7
backend-python/rwkv_pip/beta/cuda/util.h
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
#include "ATen/ATen.h"
|
||||
#include <cuda_fp16.h>
|
||||
|
||||
template <typename T> T *data_ptr(torch::Tensor x) { return x.data_ptr<T>(); }
|
||||
template <> inline half *data_ptr(torch::Tensor x) {
|
||||
return reinterpret_cast<half *>(x.data_ptr<at::Half>());
|
||||
}
|
||||
181
backend-python/rwkv_pip/beta/cuda/wrapper.cpp
vendored
Normal file
181
backend-python/rwkv_pip/beta/cuda/wrapper.cpp
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
#include <torch/extension.h>
|
||||
#include "ATen/ATen.h"
|
||||
#include <iostream>
|
||||
#include <c10/cuda/CUDAGuard.h>
|
||||
|
||||
typedef at::Half fp16;
|
||||
|
||||
template <typename F>
|
||||
void cuda_wkv_forward(int B, int T, int C,
|
||||
float *w, float *u, F *k, F *v, F *y,
|
||||
float *aa, float *bb, float *pp);
|
||||
template <typename F>
|
||||
void cuda_mm8_seq(int B, int N, int M,
|
||||
F *x, int x_stride,
|
||||
uint8_t *w, int w_stride,
|
||||
F *mx, F *rx,
|
||||
F *my, F *ry,
|
||||
F *y, int y_stride);
|
||||
template <typename F>
|
||||
void cuda_mm8_one(int N, int M,
|
||||
F *x,
|
||||
uint8_t *w, int w_stride,
|
||||
F *mx, F *rx,
|
||||
F *my, F *ry,
|
||||
float *y);
|
||||
|
||||
void wkv_forward(int64_t B, int64_t T, int64_t C,
|
||||
torch::Tensor &w, torch::Tensor &u,
|
||||
torch::Tensor &k, torch::Tensor &v, torch::Tensor &y,
|
||||
torch::Tensor &aa, torch::Tensor &bb, torch::Tensor &pp) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
|
||||
switch (k.scalar_type()) {
|
||||
case c10::ScalarType::Half:
|
||||
cuda_wkv_forward(B, T, C,
|
||||
w.data_ptr<float>(), u.data_ptr<float>(),
|
||||
k.data_ptr<fp16>(), v.data_ptr<fp16>(), y.data_ptr<fp16>(),
|
||||
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
|
||||
break;
|
||||
case c10::ScalarType::Float:
|
||||
cuda_wkv_forward(B, T, C,
|
||||
w.data_ptr<float>(), u.data_ptr<float>(),
|
||||
k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>(),
|
||||
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
|
||||
break;
|
||||
default:
|
||||
assert(false && "Only FP16 and FP32 are currently supported");
|
||||
}
|
||||
}
|
||||
|
||||
void mm8_seq(int64_t B, int64_t N, int64_t M,
|
||||
torch::Tensor &x, torch::Tensor &w,
|
||||
torch::Tensor &mx, torch::Tensor &rx,
|
||||
torch::Tensor &my, torch::Tensor &ry,
|
||||
torch::Tensor &y) {
|
||||
assert(x.stride(1) == 1);
|
||||
assert(w.stride(1) == 1);
|
||||
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
|
||||
assert(my.stride(0) == 1 && ry.stride(0) == 1);
|
||||
assert(y.stride(1) == 1);
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
|
||||
switch (x.scalar_type()) {
|
||||
case c10::ScalarType::Half:
|
||||
cuda_mm8_seq(
|
||||
B, N, M,
|
||||
x.data_ptr<fp16>(), x.stride(0),
|
||||
w.data_ptr<uint8_t>(), w.stride(0),
|
||||
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
|
||||
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
|
||||
y.data_ptr<fp16>(), y.stride(0));
|
||||
break;
|
||||
case c10::ScalarType::Float:
|
||||
cuda_mm8_seq(
|
||||
B, N, M,
|
||||
x.data_ptr<float>(), x.stride(0),
|
||||
w.data_ptr<uint8_t>(), w.stride(0),
|
||||
mx.data_ptr<float>(), rx.data_ptr<float>(),
|
||||
my.data_ptr<float>(), ry.data_ptr<float>(),
|
||||
y.data_ptr<float>(), y.stride(0));
|
||||
break;
|
||||
default:
|
||||
assert(false && "Only FP16 and FP32 are currently supported");
|
||||
}
|
||||
}
|
||||
void mm8_one(int64_t N, int64_t M,
|
||||
torch::Tensor &x, torch::Tensor &w,
|
||||
torch::Tensor &mx, torch::Tensor &rx,
|
||||
torch::Tensor &my, torch::Tensor &ry,
|
||||
torch::Tensor &y) {
|
||||
assert(x.stride(0) == 1);
|
||||
assert(w.stride(1) == 1);
|
||||
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
|
||||
assert(my.stride(0) == 1 && ry.stride(0) == 1);
|
||||
assert(y.stride(0) == 1);
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
|
||||
switch (x.scalar_type()) {
|
||||
case c10::ScalarType::Half:
|
||||
cuda_mm8_one(
|
||||
N, M,
|
||||
x.data_ptr<fp16>(),
|
||||
w.data_ptr<uint8_t>(), w.stride(0),
|
||||
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
|
||||
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
|
||||
y.data_ptr<float>());
|
||||
break;
|
||||
case c10::ScalarType::Float:
|
||||
cuda_mm8_one(
|
||||
N, M,
|
||||
x.data_ptr<float>(),
|
||||
w.data_ptr<uint8_t>(), w.stride(0),
|
||||
mx.data_ptr<float>(), rx.data_ptr<float>(),
|
||||
my.data_ptr<float>(), ry.data_ptr<float>(),
|
||||
y.data_ptr<float>());
|
||||
break;
|
||||
default:
|
||||
assert(false && "Only FP16 and FP32 are currently supported");
|
||||
}
|
||||
}
|
||||
|
||||
using torch::Tensor;
|
||||
|
||||
#ifndef DISABLE_CUBLAS_GEMM
|
||||
void gemm_fp16_cublas_tensor(Tensor a, Tensor b, Tensor c);
|
||||
#endif
|
||||
|
||||
Tensor att_one(Tensor x, Tensor ln_w, Tensor ln_b, Tensor sx, Tensor k_mix,
|
||||
Tensor v_mix, Tensor r_mix, Tensor kw,
|
||||
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx, Tensor rw,
|
||||
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
|
||||
/* imm */ Tensor k, Tensor pp, Tensor ww, Tensor aa, Tensor bb,
|
||||
Tensor t_decay, /* imm */ Tensor v, /* in & out */ Tensor r,
|
||||
/* out */ Tensor x_plus_out, /* out */ Tensor t1,
|
||||
/* out */ Tensor t2, /* out */ Tensor p);
|
||||
|
||||
Tensor att_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
|
||||
Tensor v_mix, Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
|
||||
Tensor ow, Tensor t_first, Tensor pp, Tensor aa, Tensor bb,
|
||||
Tensor t_decay, /* imm */ Tensor buf, /* out */ Tensor x_plus_out);
|
||||
|
||||
Tensor att_one_v5(Tensor x, Tensor sx, Tensor s, Tensor ln_w, Tensor ln_b,
|
||||
Tensor lx_w, Tensor lx_b, Tensor k_mix, Tensor v_mix,
|
||||
Tensor r_mix, Tensor kw,
|
||||
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx,
|
||||
Tensor rw,
|
||||
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
|
||||
/* imm */ Tensor k, Tensor t_decay, /* imm */ Tensor v,
|
||||
/* imm */ Tensor r, /* imm */ Tensor s1,
|
||||
/* out */ Tensor x_plus_out, /* out */ Tensor s2);
|
||||
|
||||
Tensor ffn_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
|
||||
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
|
||||
/* imm */ Tensor buf,
|
||||
/* out */ Tensor x_plus_out);
|
||||
|
||||
Tensor ffn_one(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
|
||||
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
|
||||
/* imm */ Tensor buf,
|
||||
/* out */ Tensor x_plus_out);
|
||||
|
||||
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
||||
m.def("wkv_forward", &wkv_forward, "wkv forward");
|
||||
m.def("mm8_seq", &mm8_seq, "mm8 seq");
|
||||
m.def("mm8_one", &mm8_one, "mm8 one");
|
||||
m.def("gemm_fp16_cublas", &gemm_fp16_cublas_tensor, "gemv fp16 cublas");
|
||||
m.def("att_one", &att_one, "att one");
|
||||
m.def("att_one_v5", &att_one_v5, "att one v5");
|
||||
m.def("att_seq", &att_seq, "att seq");
|
||||
m.def("ffn_seq", &ffn_seq, "ffn seq");
|
||||
m.def("ffn_one", &ffn_one, "ffn one");
|
||||
}
|
||||
|
||||
TORCH_LIBRARY(rwkv, m) {
|
||||
m.def("wkv_forward", wkv_forward);
|
||||
m.def("mm8_seq", mm8_seq);
|
||||
m.def("mm8_one", mm8_one);
|
||||
m.def("gemm_fp16_cublas", gemm_fp16_cublas_tensor);
|
||||
m.def("att_one", att_one);
|
||||
m.def("att_one_v5", &att_one_v5);
|
||||
m.def("att_seq", att_seq);
|
||||
m.def("ffn_seq", ffn_seq);
|
||||
m.def("ffn_one", ffn_one);
|
||||
}
|
||||
1821
backend-python/rwkv_pip/beta/model.py
vendored
Normal file
1821
backend-python/rwkv_pip/beta/model.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
BIN
backend-python/rwkv_pip/beta/wkv_cuda.pyd
vendored
Normal file
BIN
backend-python/rwkv_pip/beta/wkv_cuda.pyd
vendored
Normal file
Binary file not shown.
75
backend-python/rwkv_pip/cuda/gemm_fp16_cublas.cpp
vendored
Normal file
75
backend-python/rwkv_pip/cuda/gemm_fp16_cublas.cpp
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
#include <cublas_v2.h>
|
||||
#include <cuda.h>
|
||||
#include <cuda_fp16.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <torch/extension.h>
|
||||
#include <c10/cuda/CUDAGuard.h>
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
|
||||
#define CUBLAS_CHECK(condition) \
|
||||
for (cublasStatus_t _cublas_check_status = (condition); \
|
||||
_cublas_check_status != CUBLAS_STATUS_SUCCESS;) \
|
||||
throw std::runtime_error("cuBLAS error " + \
|
||||
std::to_string(_cublas_check_status) + " at " + \
|
||||
std::to_string(__LINE__));
|
||||
|
||||
#define CUDA_CHECK(condition) \
|
||||
for (cudaError_t _cuda_check_status = (condition); \
|
||||
_cuda_check_status != cudaSuccess;) \
|
||||
throw std::runtime_error( \
|
||||
"CUDA error " + std::string(cudaGetErrorString(_cuda_check_status)) + \
|
||||
" at " + std::to_string(__LINE__));
|
||||
|
||||
/*
|
||||
NOTE: blas gemm is column-major by default, but we need row-major output.
|
||||
The data of row-major, transposed matrix is exactly the same as the
|
||||
column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
|
||||
*/
|
||||
void gemm_fp16_cublas(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(a));
|
||||
const auto cuda_data_type = CUDA_R_16F;
|
||||
const auto cuda_c_data_type =
|
||||
c.dtype() == torch::kFloat32 ? CUDA_R_32F : CUDA_R_16F;
|
||||
const auto compute_type = CUDA_R_32F;
|
||||
const float sp_alpha = 1.f;
|
||||
// swap a and b, and use CUBLAS_OP_N. see the notes above
|
||||
std::swap(a, b);
|
||||
const cublasOperation_t cublas_trans_a = CUBLAS_OP_N;
|
||||
const cublasOperation_t cublas_trans_b = CUBLAS_OP_N;
|
||||
// m = (B^T).size(0) = B.size(1), and = A.size(1) after swap,
|
||||
// negative axis is used because of the existence of batch matmul.
|
||||
const int m = a.size(-1);
|
||||
const int k = a.size(-2);
|
||||
const int n = b.size(-2);
|
||||
const int cublas_lda = m;
|
||||
const int cublas_ldb = k;
|
||||
const int cublas_ldc = m;
|
||||
cublasHandle_t cublas_handle = at::cuda::getCurrentCUDABlasHandle();
|
||||
|
||||
#if CUDA_VERSION >= 11000
|
||||
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
|
||||
#else
|
||||
cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP;
|
||||
#endif
|
||||
const float sp_beta = 0.f;
|
||||
if (a.sizes().size() == 2 && b.sizes().size() == 2) {
|
||||
CUBLAS_CHECK(cublasGemmEx(
|
||||
cublas_handle, cublas_trans_a, cublas_trans_b, m, n, k, &sp_alpha,
|
||||
a.data_ptr(), cuda_data_type, cublas_lda, b.data_ptr(), cuda_data_type,
|
||||
cublas_ldb, &sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc,
|
||||
compute_type, algo));
|
||||
} else {
|
||||
// batch matmul
|
||||
assert(a.sizes().size() == 3 && b.sizes().size() == 3);
|
||||
|
||||
const long long int cublas_stride_a = m * k;
|
||||
const long long int cublas_stride_b = k * n;
|
||||
const long long int cublas_stride_c = m * n;
|
||||
CUBLAS_CHECK(cublasGemmStridedBatchedEx(
|
||||
cublas_handle, cublas_trans_a, cublas_trans_b, m,
|
||||
n, k, &sp_alpha, a.data_ptr(), cuda_data_type, cublas_lda,
|
||||
cublas_stride_a, b.data_ptr(), cuda_data_type, cublas_ldb, cublas_stride_b,
|
||||
&sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc, cublas_stride_c,
|
||||
a.size(0), compute_type, algo));
|
||||
}
|
||||
}
|
||||
246
backend-python/rwkv_pip/cuda/operators.cu
vendored
Normal file
246
backend-python/rwkv_pip/cuda/operators.cu
vendored
Normal file
@@ -0,0 +1,246 @@
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include "ATen/ATen.h"
|
||||
#include <cuda_fp16.h>
|
||||
#define MIN_VALUE (-1e38)
|
||||
typedef at::Half fp16;
|
||||
__half *cast(fp16 *ptr) {
|
||||
return reinterpret_cast<__half *>(ptr);
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
__global__ void kernel_wkv_forward(const int B, const int T, const int C,
|
||||
const float *__restrict__ const _w, const float *__restrict__ const _u, const F *__restrict__ const _k, const F *__restrict__ const _v,
|
||||
F *__restrict__ const _y, float *__restrict__ const _aa, float *__restrict__ const _bb, float *__restrict__ const _pp) {
|
||||
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int _b = idx / C;
|
||||
const int _c = idx % C;
|
||||
const int _offset = _b * T * C + _c;
|
||||
const int _state_offset = _b * C + _c;
|
||||
|
||||
float u = _u[_c];
|
||||
float w = _w[_c];
|
||||
const F *__restrict__ const k = _k + _offset;
|
||||
const F *__restrict__ const v = _v + _offset;
|
||||
F *__restrict__ const y = _y + _offset;
|
||||
|
||||
float aa = _aa[_state_offset];
|
||||
float bb = _bb[_state_offset];
|
||||
float pp = _pp[_state_offset];
|
||||
for (int i = 0; i < T; i++) {
|
||||
const int ii = i * C;
|
||||
const float kk = float(k[ii]);
|
||||
const float vv = float(v[ii]);
|
||||
float ww = u + kk;
|
||||
float p = max(pp, ww);
|
||||
float e1 = exp(pp - p);
|
||||
float e2 = exp(ww - p);
|
||||
y[ii] = F((e1 * aa + e2 * vv) / (e1 * bb + e2));
|
||||
ww = w + pp;
|
||||
p = max(ww, kk);
|
||||
e1 = exp(ww - p);
|
||||
e2 = exp(kk - p);
|
||||
aa = e1 * aa + e2 * vv;
|
||||
bb = e1 * bb + e2;
|
||||
pp = p;
|
||||
}
|
||||
_aa[_state_offset] = aa;
|
||||
_bb[_state_offset] = bb;
|
||||
_pp[_state_offset] = pp;
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
void cuda_wkv_forward(int B, int T, int C, float *w, float *u, F *k, F *v, F *y, float *aa, float *bb, float *pp) {
|
||||
dim3 threadsPerBlock( min(C, 32) );
|
||||
assert(B * C % threadsPerBlock.x == 0);
|
||||
dim3 numBlocks(B * C / threadsPerBlock.x);
|
||||
kernel_wkv_forward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, aa, bb, pp);
|
||||
}
|
||||
|
||||
template void cuda_wkv_forward<fp16>(
|
||||
int B, int T, int C,
|
||||
float *w, float *u, fp16 *k, fp16 *v, fp16 *y,
|
||||
float *aa, float *bb, float *pp);
|
||||
template void cuda_wkv_forward<float>(
|
||||
int B, int T, int C,
|
||||
float *w, float *u, float *k, float *v, float *y,
|
||||
float *aa, float *bb, float *pp);
|
||||
|
||||
__global__ void kernel_mm_seq_fp32i8(
|
||||
const int B, const int N, const int M,
|
||||
const float *__restrict__ const x, const int x_stride,
|
||||
const uint8_t *__restrict__ const w, const int w_stride,
|
||||
const float *__restrict__ const mx,
|
||||
const float *__restrict__ const rx,
|
||||
const float *__restrict__ const my,
|
||||
const float *__restrict__ const ry,
|
||||
float *__restrict__ const y, const int y_stride) {
|
||||
|
||||
const int i = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int k = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (i < B && k < M) {
|
||||
float y_local = 0;
|
||||
for (int j = 0; j < N; ++j) {
|
||||
y_local += x[i * x_stride + j] * (
|
||||
(float(w[j * w_stride + k]) + 0.5f)
|
||||
* rx[k] * ry[j] + mx[k] + my[j]
|
||||
);
|
||||
}
|
||||
y[i * y_stride + k] = y_local;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
void cuda_mm8_seq(int B, int N, int M,
|
||||
F *x, int x_stride,
|
||||
uint8_t *w, int w_stride,
|
||||
F *mx, F *rx,
|
||||
F *my, F *ry,
|
||||
F *y, int y_stride);
|
||||
|
||||
template <>
|
||||
void cuda_mm8_seq<float>(int B, int N, int M,
|
||||
float *x, int x_stride,
|
||||
uint8_t *w, int w_stride,
|
||||
float *mx, float *rx,
|
||||
float *my, float *ry,
|
||||
float *y, int y_stride) {
|
||||
dim3 blockSize(1, 128);
|
||||
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
|
||||
kernel_mm_seq_fp32i8<<<gridSize, blockSize>>>(
|
||||
B, N, M, x, x_stride, w, w_stride,
|
||||
mx, rx, my, ry, y, y_stride);
|
||||
}
|
||||
|
||||
__global__ void kernel_mm_seq_fp16i8(
|
||||
const int B, const int N, const int M,
|
||||
const __half *__restrict__ const x, const int x_stride,
|
||||
const uint8_t *__restrict__ const w, const int w_stride,
|
||||
const __half *__restrict__ const mx,
|
||||
const __half *__restrict__ const rx,
|
||||
const __half *__restrict__ const my,
|
||||
const __half *__restrict__ const ry,
|
||||
__half *__restrict__ const y, const int y_stride) {
|
||||
|
||||
const int i = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int k = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (i < B && k < M) {
|
||||
float y_local = 0;
|
||||
for (int j = 0; j < N; ++j) {
|
||||
y_local += __half2float(x[i * x_stride + j]) * (
|
||||
(float(w[j * w_stride + k]) + 0.5f)
|
||||
* __half2float(rx[k]) * __half2float(ry[j])
|
||||
+ __half2float(mx[k]) + __half2float(my[j])
|
||||
);
|
||||
}
|
||||
y[i * y_stride + k] = __float2half(y_local);
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void cuda_mm8_seq<fp16>(int B, int N, int M,
|
||||
fp16 *x, int x_stride,
|
||||
uint8_t *w, int w_stride,
|
||||
fp16 *mx, fp16 *rx,
|
||||
fp16 *my, fp16 *ry,
|
||||
fp16 *y, int y_stride) {
|
||||
dim3 blockSize(1, 128);
|
||||
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
|
||||
kernel_mm_seq_fp16i8<<<gridSize, blockSize>>>(
|
||||
B, N, M, cast(x), x_stride, w, w_stride,
|
||||
cast(mx), cast(rx), cast(my), cast(ry), cast(y), y_stride);
|
||||
}
|
||||
|
||||
#define MM8_ONE_JSPLIT 24
|
||||
#define MM8_ONE_TILE 1024
|
||||
|
||||
__global__ void kernel_mm_one_fp32i8(
|
||||
const int N, const int M,
|
||||
const float *__restrict__ const x,
|
||||
const uint8_t *__restrict__ const w, const int w_stride,
|
||||
const float *__restrict__ const mx,
|
||||
const float *__restrict__ const rx,
|
||||
const float *__restrict__ const my,
|
||||
const float *__restrict__ const ry,
|
||||
float *__restrict__ const y) {
|
||||
|
||||
const int k = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
|
||||
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
|
||||
|
||||
if (k < M) {
|
||||
float y_local = 0;
|
||||
for (int j = j0; j < j1; ++j) {
|
||||
y_local += x[j] * (
|
||||
(float(w[j * w_stride + k]) + 0.5f)
|
||||
* rx[k] * ry[j] + mx[k] + my[j]
|
||||
);
|
||||
}
|
||||
atomicAdd(&y[k], y_local);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
void cuda_mm8_one(int N, int M,
|
||||
F *x,
|
||||
uint8_t *w, int w_stride,
|
||||
F *mx, F *rx,
|
||||
F *my, F *ry,
|
||||
float *y);
|
||||
|
||||
template <>
|
||||
void cuda_mm8_one<float>(int N, int M,
|
||||
float *x,
|
||||
uint8_t *w, int w_stride,
|
||||
float *mx, float *rx,
|
||||
float *my, float *ry,
|
||||
float *y) {
|
||||
dim3 blockSize(1, MM8_ONE_TILE);
|
||||
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
|
||||
kernel_mm_one_fp32i8<<<gridSize, blockSize>>>(
|
||||
N, M, x, w, w_stride,
|
||||
mx, rx, my, ry, y);
|
||||
}
|
||||
|
||||
__global__ void kernel_mm_one_fp16i8(
|
||||
const int N, const int M,
|
||||
const __half *__restrict__ const x,
|
||||
const uint8_t *__restrict__ const w, const int w_stride,
|
||||
const __half *__restrict__ const mx,
|
||||
const __half *__restrict__ const rx,
|
||||
const __half *__restrict__ const my,
|
||||
const __half *__restrict__ const ry,
|
||||
float *__restrict__ const y) {
|
||||
|
||||
const int k = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
|
||||
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
|
||||
|
||||
if (k < M) {
|
||||
float y_local = 0;
|
||||
for (int j = j0; j < j1; ++j) {
|
||||
y_local += __half2float(x[j]) * (
|
||||
(float(w[j * w_stride + k]) + 0.5f)
|
||||
* __half2float(rx[k]) * __half2float(ry[j])
|
||||
+ __half2float(mx[k]) + __half2float(my[j])
|
||||
);
|
||||
}
|
||||
atomicAdd(&y[k], y_local);
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void cuda_mm8_one<fp16>(int N, int M,
|
||||
fp16 *x,
|
||||
uint8_t *w, int w_stride,
|
||||
fp16 *mx, fp16 *rx,
|
||||
fp16 *my, fp16 *ry,
|
||||
float *y) {
|
||||
dim3 blockSize(1, MM8_ONE_TILE);
|
||||
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
|
||||
kernel_mm_one_fp16i8<<<gridSize, blockSize>>>(
|
||||
N, M, cast(x), w, w_stride,
|
||||
cast(mx), cast(rx), cast(my), cast(ry), y);
|
||||
}
|
||||
88
backend-python/rwkv_pip/cuda/rwkv5.cu
vendored
Normal file
88
backend-python/rwkv_pip/cuda/rwkv5.cu
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include "ATen/ATen.h"
|
||||
typedef at::BFloat16 bf16;
|
||||
typedef at::Half fp16;
|
||||
typedef float fp32;
|
||||
|
||||
template <typename F>
|
||||
__global__ void kernel_forward(const int B, const int T, const int C, const int H, float *__restrict__ _state,
|
||||
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u,
|
||||
F *__restrict__ const _y)
|
||||
{
|
||||
const int b = blockIdx.x / H;
|
||||
const int h = blockIdx.x % H;
|
||||
const int i = threadIdx.x;
|
||||
_w += h*_N_;
|
||||
_u += h*_N_;
|
||||
_state += h*_N_*_N_ + i*_N_; // wrong if B > 1 !!!
|
||||
|
||||
__shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
|
||||
|
||||
float state[_N_];
|
||||
#pragma unroll
|
||||
for (int j = 0; j < _N_; j++)
|
||||
state[j] = _state[j];
|
||||
|
||||
__syncthreads();
|
||||
u[i] = float(_u[i]);
|
||||
w[i] = _w[i];
|
||||
__syncthreads();
|
||||
|
||||
for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
|
||||
{
|
||||
__syncthreads();
|
||||
r[i] = float(_r[t]);
|
||||
k[i] = float(_k[t]);
|
||||
__syncthreads();
|
||||
|
||||
const float v = float(_v[t]);
|
||||
float y = 0;
|
||||
|
||||
#pragma unroll
|
||||
for (int j = 0; j < _N_; j+=4)
|
||||
{
|
||||
const float4& r_ = (float4&)(r[j]);
|
||||
const float4& k_ = (float4&)(k[j]);
|
||||
const float4& w_ = (float4&)(w[j]);
|
||||
const float4& u_ = (float4&)(u[j]);
|
||||
float4& s = (float4&)(state[j]);
|
||||
float4 x;
|
||||
|
||||
x.x = k_.x * v;
|
||||
x.y = k_.y * v;
|
||||
x.z = k_.z * v;
|
||||
x.w = k_.w * v;
|
||||
|
||||
y += r_.x * (u_.x * x.x + s.x);
|
||||
y += r_.y * (u_.y * x.y + s.y);
|
||||
y += r_.z * (u_.z * x.z + s.z);
|
||||
y += r_.w * (u_.w * x.w + s.w);
|
||||
|
||||
s.x = s.x * w_.x + x.x;
|
||||
s.y = s.y * w_.y + x.y;
|
||||
s.z = s.z * w_.z + x.z;
|
||||
s.w = s.w * w_.w + x.w;
|
||||
}
|
||||
_y[t] = F(y);
|
||||
}
|
||||
#pragma unroll
|
||||
for (int j = 0; j < _N_; j++)
|
||||
_state[j] = state[j];
|
||||
}
|
||||
|
||||
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y)
|
||||
{
|
||||
assert(H*_N_ == C);
|
||||
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
|
||||
}
|
||||
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y)
|
||||
{
|
||||
assert(H*_N_ == C);
|
||||
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
|
||||
}
|
||||
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y)
|
||||
{
|
||||
assert(H*_N_ == C);
|
||||
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
|
||||
}
|
||||
34
backend-python/rwkv_pip/cuda/rwkv5_op.cpp
vendored
Normal file
34
backend-python/rwkv_pip/cuda/rwkv5_op.cpp
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
#include <torch/extension.h>
|
||||
#include "ATen/ATen.h"
|
||||
#include <c10/cuda/CUDAGuard.h>
|
||||
typedef at::BFloat16 bf16;
|
||||
typedef at::Half fp16;
|
||||
typedef float fp32;
|
||||
|
||||
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y);
|
||||
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y);
|
||||
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y);
|
||||
|
||||
void forward_bf16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
|
||||
cuda_forward_bf16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
|
||||
}
|
||||
void forward_fp16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
|
||||
cuda_forward_fp16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp16>(), k.data_ptr<fp16>(), v.data_ptr<fp16>(), w.data_ptr<float>(), u.data_ptr<fp16>(), y.data_ptr<fp16>());
|
||||
}
|
||||
void forward_fp32(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
|
||||
cuda_forward_fp32(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp32>(), k.data_ptr<fp32>(), v.data_ptr<fp32>(), w.data_ptr<float>(), u.data_ptr<fp32>(), y.data_ptr<fp32>());
|
||||
}
|
||||
|
||||
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
||||
m.def("forward_bf16", &forward_bf16, "rwkv5 forward_bf16");
|
||||
m.def("forward_fp16", &forward_fp16, "rwkv5 forward_fp16");
|
||||
m.def("forward_fp32", &forward_fp32, "rwkv5 forward_fp32");
|
||||
}
|
||||
TORCH_LIBRARY(rwkv5, m) {
|
||||
m.def("forward_bf16", forward_bf16);
|
||||
m.def("forward_fp16", forward_fp16);
|
||||
m.def("forward_fp32", forward_fp32);
|
||||
}
|
||||
87
backend-python/rwkv_pip/cuda/rwkv6.cu
vendored
Normal file
87
backend-python/rwkv_pip/cuda/rwkv6.cu
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include "ATen/ATen.h"
|
||||
typedef at::BFloat16 bf16;
|
||||
typedef at::Half fp16;
|
||||
typedef float fp32;
|
||||
|
||||
template <typename F>
|
||||
__global__ void kernel_forward(const int B, const int T, const int C, const int H, float *__restrict__ _state,
|
||||
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u,
|
||||
F *__restrict__ const _y)
|
||||
{
|
||||
const int b = blockIdx.x / H;
|
||||
const int h = blockIdx.x % H;
|
||||
const int i = threadIdx.x;
|
||||
_u += h*_N_;
|
||||
_state += h*_N_*_N_ + i*_N_; // wrong if B > 1 !!!
|
||||
|
||||
__shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
|
||||
|
||||
float state[_N_];
|
||||
#pragma unroll
|
||||
for (int j = 0; j < _N_; j++)
|
||||
state[j] = _state[j];
|
||||
|
||||
__syncthreads();
|
||||
u[i] = float(_u[i]);
|
||||
__syncthreads();
|
||||
|
||||
for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
|
||||
{
|
||||
__syncthreads();
|
||||
w[i] = _w[t];
|
||||
r[i] = float(_r[t]);
|
||||
k[i] = float(_k[t]);
|
||||
__syncthreads();
|
||||
|
||||
const float v = float(_v[t]);
|
||||
float y = 0;
|
||||
|
||||
#pragma unroll
|
||||
for (int j = 0; j < _N_; j+=4)
|
||||
{
|
||||
const float4& r_ = (float4&)(r[j]);
|
||||
const float4& k_ = (float4&)(k[j]);
|
||||
const float4& w_ = (float4&)(w[j]);
|
||||
const float4& u_ = (float4&)(u[j]);
|
||||
float4& s = (float4&)(state[j]);
|
||||
float4 x;
|
||||
|
||||
x.x = k_.x * v;
|
||||
x.y = k_.y * v;
|
||||
x.z = k_.z * v;
|
||||
x.w = k_.w * v;
|
||||
|
||||
y += r_.x * (u_.x * x.x + s.x);
|
||||
y += r_.y * (u_.y * x.y + s.y);
|
||||
y += r_.z * (u_.z * x.z + s.z);
|
||||
y += r_.w * (u_.w * x.w + s.w);
|
||||
|
||||
s.x = s.x * w_.x + x.x;
|
||||
s.y = s.y * w_.y + x.y;
|
||||
s.z = s.z * w_.z + x.z;
|
||||
s.w = s.w * w_.w + x.w;
|
||||
}
|
||||
_y[t] = F(y);
|
||||
}
|
||||
#pragma unroll
|
||||
for (int j = 0; j < _N_; j++)
|
||||
_state[j] = state[j];
|
||||
}
|
||||
|
||||
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y)
|
||||
{
|
||||
assert(H*_N_ == C);
|
||||
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
|
||||
}
|
||||
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y)
|
||||
{
|
||||
assert(H*_N_ == C);
|
||||
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
|
||||
}
|
||||
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y)
|
||||
{
|
||||
assert(H*_N_ == C);
|
||||
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
|
||||
}
|
||||
34
backend-python/rwkv_pip/cuda/rwkv6_op.cpp
vendored
Normal file
34
backend-python/rwkv_pip/cuda/rwkv6_op.cpp
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
#include <torch/extension.h>
|
||||
#include "ATen/ATen.h"
|
||||
#include <c10/cuda/CUDAGuard.h>
|
||||
typedef at::BFloat16 bf16;
|
||||
typedef at::Half fp16;
|
||||
typedef float fp32;
|
||||
|
||||
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y);
|
||||
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y);
|
||||
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y);
|
||||
|
||||
void forward_bf16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
|
||||
cuda_forward_bf16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
|
||||
}
|
||||
void forward_fp16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
|
||||
cuda_forward_fp16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp16>(), k.data_ptr<fp16>(), v.data_ptr<fp16>(), w.data_ptr<float>(), u.data_ptr<fp16>(), y.data_ptr<fp16>());
|
||||
}
|
||||
void forward_fp32(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
|
||||
cuda_forward_fp32(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp32>(), k.data_ptr<fp32>(), v.data_ptr<fp32>(), w.data_ptr<float>(), u.data_ptr<fp32>(), y.data_ptr<fp32>());
|
||||
}
|
||||
|
||||
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
||||
m.def("forward_bf16", &forward_bf16, "rwkv6 forward_bf16");
|
||||
m.def("forward_fp16", &forward_fp16, "rwkv6 forward_fp16");
|
||||
m.def("forward_fp32", &forward_fp32, "rwkv6 forward_fp32");
|
||||
}
|
||||
TORCH_LIBRARY(rwkv6, m) {
|
||||
m.def("forward_bf16", forward_bf16);
|
||||
m.def("forward_fp16", forward_fp16);
|
||||
m.def("forward_fp32", forward_fp32);
|
||||
}
|
||||
141
backend-python/rwkv_pip/cuda/wrapper.cpp
vendored
Normal file
141
backend-python/rwkv_pip/cuda/wrapper.cpp
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
#include <torch/extension.h>
|
||||
#include "ATen/ATen.h"
|
||||
#include <iostream>
|
||||
#include <c10/cuda/CUDAGuard.h>
|
||||
|
||||
typedef at::Half fp16;
|
||||
|
||||
template <typename F>
|
||||
void cuda_wkv_forward(int B, int T, int C,
|
||||
float *w, float *u, F *k, F *v, F *y,
|
||||
float *aa, float *bb, float *pp);
|
||||
template <typename F>
|
||||
void cuda_mm8_seq(int B, int N, int M,
|
||||
F *x, int x_stride,
|
||||
uint8_t *w, int w_stride,
|
||||
F *mx, F *rx,
|
||||
F *my, F *ry,
|
||||
F *y, int y_stride);
|
||||
template <typename F>
|
||||
void cuda_mm8_one(int N, int M,
|
||||
F *x,
|
||||
uint8_t *w, int w_stride,
|
||||
F *mx, F *rx,
|
||||
F *my, F *ry,
|
||||
float *y);
|
||||
|
||||
void wkv_forward(int64_t B, int64_t T, int64_t C,
|
||||
torch::Tensor &w, torch::Tensor &u,
|
||||
torch::Tensor &k, torch::Tensor &v, torch::Tensor &y,
|
||||
torch::Tensor &aa, torch::Tensor &bb, torch::Tensor &pp) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
|
||||
switch (k.scalar_type()) {
|
||||
case c10::ScalarType::Half:
|
||||
cuda_wkv_forward(B, T, C,
|
||||
w.data_ptr<float>(), u.data_ptr<float>(),
|
||||
k.data_ptr<fp16>(), v.data_ptr<fp16>(), y.data_ptr<fp16>(),
|
||||
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
|
||||
break;
|
||||
case c10::ScalarType::Float:
|
||||
cuda_wkv_forward(B, T, C,
|
||||
w.data_ptr<float>(), u.data_ptr<float>(),
|
||||
k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>(),
|
||||
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
|
||||
break;
|
||||
default:
|
||||
assert(false && "Only FP16 and FP32 are currently supported");
|
||||
}
|
||||
}
|
||||
|
||||
void mm8_seq(int64_t B, int64_t N, int64_t M,
|
||||
torch::Tensor &x, torch::Tensor &w,
|
||||
torch::Tensor &mx, torch::Tensor &rx,
|
||||
torch::Tensor &my, torch::Tensor &ry,
|
||||
torch::Tensor &y) {
|
||||
assert(x.stride(1) == 1);
|
||||
assert(w.stride(1) == 1);
|
||||
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
|
||||
assert(my.stride(0) == 1 && ry.stride(0) == 1);
|
||||
assert(y.stride(1) == 1);
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
|
||||
switch (x.scalar_type()) {
|
||||
case c10::ScalarType::Half:
|
||||
cuda_mm8_seq(
|
||||
B, N, M,
|
||||
x.data_ptr<fp16>(), x.stride(0),
|
||||
w.data_ptr<uint8_t>(), w.stride(0),
|
||||
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
|
||||
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
|
||||
y.data_ptr<fp16>(), y.stride(0));
|
||||
break;
|
||||
case c10::ScalarType::Float:
|
||||
cuda_mm8_seq(
|
||||
B, N, M,
|
||||
x.data_ptr<float>(), x.stride(0),
|
||||
w.data_ptr<uint8_t>(), w.stride(0),
|
||||
mx.data_ptr<float>(), rx.data_ptr<float>(),
|
||||
my.data_ptr<float>(), ry.data_ptr<float>(),
|
||||
y.data_ptr<float>(), y.stride(0));
|
||||
break;
|
||||
default:
|
||||
assert(false && "Only FP16 and FP32 are currently supported");
|
||||
}
|
||||
}
|
||||
void mm8_one(int64_t N, int64_t M,
|
||||
torch::Tensor &x, torch::Tensor &w,
|
||||
torch::Tensor &mx, torch::Tensor &rx,
|
||||
torch::Tensor &my, torch::Tensor &ry,
|
||||
torch::Tensor &y) {
|
||||
assert(x.stride(0) == 1);
|
||||
assert(w.stride(1) == 1);
|
||||
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
|
||||
assert(my.stride(0) == 1 && ry.stride(0) == 1);
|
||||
assert(y.stride(0) == 1);
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
|
||||
switch (x.scalar_type()) {
|
||||
case c10::ScalarType::Half:
|
||||
cuda_mm8_one(
|
||||
N, M,
|
||||
x.data_ptr<fp16>(),
|
||||
w.data_ptr<uint8_t>(), w.stride(0),
|
||||
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
|
||||
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
|
||||
y.data_ptr<float>());
|
||||
break;
|
||||
case c10::ScalarType::Float:
|
||||
cuda_mm8_one(
|
||||
N, M,
|
||||
x.data_ptr<float>(),
|
||||
w.data_ptr<uint8_t>(), w.stride(0),
|
||||
mx.data_ptr<float>(), rx.data_ptr<float>(),
|
||||
my.data_ptr<float>(), ry.data_ptr<float>(),
|
||||
y.data_ptr<float>());
|
||||
break;
|
||||
default:
|
||||
assert(false && "Only FP16 and FP32 are currently supported");
|
||||
}
|
||||
}
|
||||
|
||||
using torch::Tensor;
|
||||
|
||||
#ifndef DISABLE_CUBLAS_GEMM
|
||||
void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
|
||||
#endif
|
||||
|
||||
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
||||
m.def("wkv_forward", &wkv_forward, "wkv forward");
|
||||
m.def("mm8_seq", &mm8_seq, "mm8 seq");
|
||||
m.def("mm8_one", &mm8_one, "mm8 one");
|
||||
#ifndef DISABLE_CUBLAS_GEMM
|
||||
m.def("gemm_fp16_cublas", &gemm_fp16_cublas, "gemv fp16 cublas");
|
||||
#endif
|
||||
}
|
||||
|
||||
TORCH_LIBRARY(rwkv, m) {
|
||||
m.def("wkv_forward", wkv_forward);
|
||||
m.def("mm8_seq", mm8_seq);
|
||||
m.def("mm8_one", mm8_one);
|
||||
#ifndef DISABLE_CUBLAS_GEMM
|
||||
m.def("gemm_fp16_cublas", gemm_fp16_cublas);
|
||||
#endif
|
||||
}
|
||||
2480
backend-python/rwkv_pip/model.py
vendored
Normal file
2480
backend-python/rwkv_pip/model.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
BIN
backend-python/rwkv_pip/rwkv5.pyd
vendored
Normal file
BIN
backend-python/rwkv_pip/rwkv5.pyd
vendored
Normal file
Binary file not shown.
BIN
backend-python/rwkv_pip/rwkv6.pyd
vendored
Normal file
BIN
backend-python/rwkv_pip/rwkv6.pyd
vendored
Normal file
Binary file not shown.
26
backend-python/rwkv_pip/utils.py
vendored
26
backend-python/rwkv_pip/utils.py
vendored
@@ -16,6 +16,7 @@ class PIPELINE_ARGS:
|
||||
top_k=0,
|
||||
alpha_frequency=0.2,
|
||||
alpha_presence=0.2,
|
||||
alpha_decay=0.996,
|
||||
token_ban=[],
|
||||
token_stop=[],
|
||||
chunk_len=256,
|
||||
@@ -25,6 +26,7 @@ class PIPELINE_ARGS:
|
||||
self.top_k = top_k
|
||||
self.alpha_frequency = alpha_frequency # Frequency Penalty (as in GPT-3)
|
||||
self.alpha_presence = alpha_presence # Presence Penalty (as in GPT-3)
|
||||
self.alpha_decay = alpha_decay # gradually decay the penalty
|
||||
self.token_ban = token_ban # ban the generation of some tokens
|
||||
self.token_stop = token_stop # stop generation whenever you see any token here
|
||||
self.chunk_len = (
|
||||
@@ -33,7 +35,7 @@ class PIPELINE_ARGS:
|
||||
|
||||
|
||||
class PIPELINE:
|
||||
def __init__(self, model, WORD_NAME):
|
||||
def __init__(self, model, WORD_NAME: str):
|
||||
self.model = model
|
||||
if WORD_NAME == "cl100k_base":
|
||||
import tiktoken
|
||||
@@ -47,9 +49,15 @@ class PIPELINE:
|
||||
os.path.dirname(os.path.abspath(__file__)) + "/rwkv_vocab_v20230424.txt"
|
||||
)
|
||||
else:
|
||||
from tokenizers import Tokenizer
|
||||
if WORD_NAME.endswith(".txt"):
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
from rwkv_tokenizer import TRIE_TOKENIZER
|
||||
|
||||
self.tokenizer = Tokenizer.from_file(WORD_NAME)
|
||||
self.tokenizer = TRIE_TOKENIZER(WORD_NAME)
|
||||
else:
|
||||
from tokenizers import Tokenizer
|
||||
|
||||
self.tokenizer = Tokenizer.from_file(WORD_NAME)
|
||||
|
||||
def refine_context(self, context):
|
||||
context = context.strip().split("\n")
|
||||
@@ -73,12 +81,13 @@ class PIPELINE:
|
||||
def sample_logits(self, logits, temperature=1.0, top_p=0.85, top_k=0):
|
||||
probs = F.softmax(logits.float(), dim=-1)
|
||||
top_k = int(top_k)
|
||||
if probs.device == torch.device("cpu"):
|
||||
probs = probs.numpy()
|
||||
# 'privateuseone' is the type of custom devices like `torch_directml.device()`
|
||||
if probs.device.type in ["cpu", "privateuseone"]:
|
||||
probs = probs.cpu().numpy()
|
||||
sorted_ids = np.argsort(probs)
|
||||
sorted_probs = probs[sorted_ids][::-1]
|
||||
cumulative_probs = np.cumsum(sorted_probs)
|
||||
cutoff = float(sorted_probs[np.argmax(cumulative_probs > top_p)])
|
||||
cutoff = float(sorted_probs[np.argmax(cumulative_probs >= top_p)])
|
||||
probs[probs < cutoff] = 0
|
||||
if top_k < len(probs) and top_k > 0:
|
||||
probs[sorted_ids[:-top_k]] = 0
|
||||
@@ -92,7 +101,7 @@ class PIPELINE:
|
||||
sorted_probs = probs[sorted_ids]
|
||||
sorted_probs = torch.flip(sorted_probs, dims=(0,))
|
||||
cumulative_probs = torch.cumsum(sorted_probs, dim=-1).cpu().numpy()
|
||||
cutoff = float(sorted_probs[np.argmax(cumulative_probs > top_p)])
|
||||
cutoff = float(sorted_probs[np.argmax(cumulative_probs >= top_p)])
|
||||
probs[probs < cutoff] = 0
|
||||
if top_k < len(probs) and top_k > 0:
|
||||
probs[sorted_ids[:-top_k]] = 0
|
||||
@@ -127,10 +136,13 @@ class PIPELINE:
|
||||
if token in args.token_stop:
|
||||
break
|
||||
all_tokens += [token]
|
||||
for xxx in occurrence:
|
||||
occurrence[xxx] *= args.alpha_decay
|
||||
if token not in occurrence:
|
||||
occurrence[token] = 1
|
||||
else:
|
||||
occurrence[token] += 1
|
||||
# print(occurrence) # debug
|
||||
|
||||
# output
|
||||
tmp = self.decode(all_tokens[out_last:])
|
||||
|
||||
BIN
backend-python/rwkv_pip/wkv_cuda.pyd
vendored
Normal file
BIN
backend-python/rwkv_pip/wkv_cuda.pyd
vendored
Normal file
Binary file not shown.
@@ -2,24 +2,35 @@ import json
|
||||
import logging
|
||||
from typing import Any
|
||||
from fastapi import Request
|
||||
from pydantic import BaseModel
|
||||
from enum import Enum
|
||||
|
||||
|
||||
logger = logging.getLogger()
|
||||
logger.setLevel(logging.INFO)
|
||||
formatter = logging.Formatter("%(asctime)s - %(levelname)s\n%(message)s")
|
||||
fh = logging.handlers.RotatingFileHandler(
|
||||
"api.log", mode="a", maxBytes=3 * 1024 * 1024, backupCount=3
|
||||
"api.log", mode="a", maxBytes=3 * 1024 * 1024, backupCount=3, encoding="utf-8"
|
||||
)
|
||||
fh.setFormatter(formatter)
|
||||
logger.addHandler(fh)
|
||||
|
||||
|
||||
class ClsEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, BaseModel):
|
||||
return obj.dict()
|
||||
if isinstance(obj, Enum):
|
||||
return obj.value
|
||||
return super().default(obj)
|
||||
|
||||
|
||||
def quick_log(request: Request, body: Any, response: str):
|
||||
try:
|
||||
logger.info(
|
||||
f"Client: {request.client if request else ''}\nUrl: {request.url if request else ''}\n"
|
||||
+ (
|
||||
f"Body: {json.dumps(body.__dict__, default=vars, ensure_ascii=False)}\n"
|
||||
f"Body: {json.dumps(body.__dict__, ensure_ascii=False, cls=ClsEncoder)}\n"
|
||||
if body
|
||||
else ""
|
||||
)
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import os
|
||||
import sys
|
||||
import global_var
|
||||
|
||||
|
||||
def ngrok_connect():
|
||||
from pyngrok import ngrok, conf
|
||||
|
||||
conf.set_default(conf.PyngrokConfig(ngrok_path="./ngrok"))
|
||||
conf.set_default(
|
||||
conf.PyngrokConfig(ngrok_path="./ngrok.exe" if os.name == "nt" else "./ngrok")
|
||||
)
|
||||
ngrok.set_auth_token(os.environ["ngrok_token"])
|
||||
http_tunnel = ngrok.connect(8000 if len(sys.argv) == 1 else int(sys.argv[1]))
|
||||
print(http_tunnel.public_url)
|
||||
http_tunnel = ngrok.connect(global_var.get(global_var.Args).port)
|
||||
print(f"ngrok url: {http_tunnel.public_url}")
|
||||
|
||||
@@ -4,12 +4,13 @@ import os
|
||||
import pathlib
|
||||
import copy
|
||||
import re
|
||||
from typing import Dict, Iterable, List, Tuple, Union
|
||||
from typing import Dict, Iterable, List, Tuple, Union, Type
|
||||
from utils.log import quick_log
|
||||
from fastapi import HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
import numpy as np
|
||||
from routes import state_cache
|
||||
import global_var
|
||||
|
||||
|
||||
END_OF_TEXT = 0
|
||||
@@ -20,23 +21,21 @@ os.environ["TORCH_EXTENSIONS_DIR"] = f"{pathlib.Path(__file__).parent.parent.res
|
||||
|
||||
|
||||
class RWKVType(Enum):
|
||||
NoneType = auto()
|
||||
Raven = auto()
|
||||
World = auto()
|
||||
Music = auto()
|
||||
|
||||
|
||||
class AbstractRWKV(ABC):
|
||||
def __init__(self, model: str, strategy: str, tokens_path: str):
|
||||
from rwkv.model import RWKV as Model # dynamic import to make RWKV_CUDA_ON work
|
||||
from rwkv_pip.utils import PIPELINE
|
||||
|
||||
filename, _ = os.path.splitext(os.path.basename(model))
|
||||
self.name = filename
|
||||
self.model = Model(model, strategy)
|
||||
self.pipeline = PIPELINE(self.model, tokens_path)
|
||||
def __init__(self, model, pipeline):
|
||||
self.name = "rwkv"
|
||||
self.model = model
|
||||
self.pipeline = pipeline
|
||||
self.model_state = None
|
||||
self.model_tokens = []
|
||||
self.rwkv_type: RWKVType = None
|
||||
self.rwkv_type: RWKVType = RWKVType.NoneType
|
||||
self.tokenizer_len = len(model.w["emb.weight"])
|
||||
|
||||
self.max_tokens_per_generation = 500
|
||||
self.temperature = 1
|
||||
@@ -221,7 +220,7 @@ class AbstractRWKV(ABC):
|
||||
return state[0].tolist(), token_len
|
||||
|
||||
def generate(
|
||||
self, prompt: str, stop: Union[str, List[str]] = None
|
||||
self, prompt: str, stop: Union[str, List[str], None] = None
|
||||
) -> Iterable[Tuple[str, str, int, int]]:
|
||||
quick_log(None, None, "Generation Prompt:\n" + prompt)
|
||||
cache = None
|
||||
@@ -337,8 +336,8 @@ class AbstractRWKV(ABC):
|
||||
|
||||
|
||||
class TextRWKV(AbstractRWKV):
|
||||
def __init__(self, model: str, strategy: str, tokens_path: str) -> None:
|
||||
super().__init__(model, strategy, tokens_path)
|
||||
def __init__(self, model, pipeline) -> None:
|
||||
super().__init__(model, pipeline)
|
||||
|
||||
self.CHUNK_LEN = 256
|
||||
|
||||
@@ -350,16 +349,16 @@ class TextRWKV(AbstractRWKV):
|
||||
self.penalty_alpha_frequency = 1
|
||||
|
||||
self.interface = ":"
|
||||
if "world" in self.name.lower():
|
||||
self.rwkv_type = RWKVType.World
|
||||
self.user = "Question"
|
||||
self.bot = "Answer"
|
||||
self.END_OF_LINE = 11
|
||||
else:
|
||||
if self.tokenizer_len < 65536:
|
||||
self.rwkv_type = RWKVType.Raven
|
||||
self.user = "Bob"
|
||||
self.bot = "Alice"
|
||||
self.END_OF_LINE = 187
|
||||
else:
|
||||
self.rwkv_type = RWKVType.World
|
||||
self.user = "User"
|
||||
self.bot = "Assistant"
|
||||
self.END_OF_LINE = 11
|
||||
|
||||
self.AVOID_REPEAT_TOKENS = []
|
||||
AVOID_REPEAT = ",:?!"
|
||||
@@ -438,8 +437,10 @@ The following is a coherent verbose detailed conversation between a girl named {
|
||||
{bot} usually gives {user} kind, helpful and informative advices.\n
|
||||
"""
|
||||
if self.rwkv_type == RWKVType.Raven
|
||||
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
|
||||
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
|
||||
else (
|
||||
f"{user}{interface} hi\n\n{bot}{interface} Hi. "
|
||||
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
|
||||
)
|
||||
)
|
||||
logits, _ = self.run_rnn(self.fix_tokens(self.pipeline.encode(preset_system)))
|
||||
try:
|
||||
@@ -456,8 +457,8 @@ The following is a coherent verbose detailed conversation between a girl named {
|
||||
|
||||
|
||||
class MusicRWKV(AbstractRWKV):
|
||||
def __init__(self, model: str, strategy: str, tokens_path: str):
|
||||
super().__init__(model, strategy, tokens_path)
|
||||
def __init__(self, model, pipeline):
|
||||
super().__init__(model, pipeline)
|
||||
|
||||
self.max_tokens_per_generation = 500
|
||||
self.temperature = 1
|
||||
@@ -497,6 +498,52 @@ class MusicRWKV(AbstractRWKV):
|
||||
return " " + delta
|
||||
|
||||
|
||||
def get_tokenizer(tokenizer_len: int):
|
||||
tokenizer_dir = f"{pathlib.Path(__file__).parent.parent.resolve()}/rwkv_pip/"
|
||||
if tokenizer_len < 50277:
|
||||
return tokenizer_dir + "tokenizer-midi.json"
|
||||
elif tokenizer_len < 65536:
|
||||
return tokenizer_dir + "20B_tokenizer.json"
|
||||
else:
|
||||
return "rwkv_vocab_v20230424"
|
||||
|
||||
|
||||
def RWKV(model: str, strategy: str, tokenizer: Union[str, None]) -> AbstractRWKV:
|
||||
rwkv_beta = global_var.get(global_var.Args).rwkv_beta
|
||||
|
||||
# dynamic import to make RWKV_CUDA_ON work
|
||||
if rwkv_beta:
|
||||
from rwkv_pip.beta.model import (
|
||||
RWKV as Model,
|
||||
)
|
||||
else:
|
||||
from rwkv_pip.model import (
|
||||
RWKV as Model,
|
||||
)
|
||||
from rwkv_pip.utils import PIPELINE
|
||||
|
||||
filename, _ = os.path.splitext(os.path.basename(model))
|
||||
model = Model(model, strategy)
|
||||
if not tokenizer:
|
||||
tokenizer = get_tokenizer(len(model.w["emb.weight"]))
|
||||
pipeline = PIPELINE(model, tokenizer)
|
||||
|
||||
rwkv_map: dict[str, Type[AbstractRWKV]] = {
|
||||
"20B_tokenizer": TextRWKV,
|
||||
"rwkv_vocab_v20230424": TextRWKV,
|
||||
"tokenizer-midi": MusicRWKV,
|
||||
}
|
||||
tokenizer_name = os.path.splitext(os.path.basename(tokenizer))[0]
|
||||
rwkv: AbstractRWKV
|
||||
if tokenizer_name in rwkv_map:
|
||||
rwkv = rwkv_map[tokenizer_name](model, pipeline)
|
||||
else:
|
||||
rwkv = TextRWKV(model, pipeline)
|
||||
rwkv.name = filename
|
||||
|
||||
return rwkv
|
||||
|
||||
|
||||
class ModelConfigBody(BaseModel):
|
||||
max_tokens: int = Field(default=None, gt=0, le=102400)
|
||||
temperature: float = Field(default=None, ge=0, le=2)
|
||||
@@ -504,8 +551,8 @@ class ModelConfigBody(BaseModel):
|
||||
presence_penalty: float = Field(default=None, ge=-2, le=2)
|
||||
frequency_penalty: float = Field(default=None, ge=-2, le=2)
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
model_config = {
|
||||
"json_schema_extra": {
|
||||
"example": {
|
||||
"max_tokens": 1000,
|
||||
"temperature": 1.2,
|
||||
@@ -514,6 +561,7 @@ class ModelConfigBody(BaseModel):
|
||||
"frequency_penalty": 0.4,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def set_rwkv_config(model: AbstractRWKV, body: ModelConfigBody):
|
||||
|
||||
14
backend-python/webui_server.py
Normal file
14
backend-python/webui_server.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.gzip import GZipMiddleware
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
import uvicorn
|
||||
|
||||
webui_server = FastAPI()
|
||||
|
||||
webui_server.add_middleware(GZipMiddleware, minimum_size=1000)
|
||||
webui_server.mount(
|
||||
"/", StaticFiles(directory="frontend/dist", html=True), name="static"
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
uvicorn.run("webui_server:webui_server")
|
||||
BIN
backend-python/wkv_cuda_utils/wkv_cuda10_30.pyd
vendored
BIN
backend-python/wkv_cuda_utils/wkv_cuda10_30.pyd
vendored
Binary file not shown.
BIN
backend-python/wkv_cuda_utils/wkv_cuda40.pyd
vendored
BIN
backend-python/wkv_cuda_utils/wkv_cuda40.pyd
vendored
Binary file not shown.
734
backend-python/wkv_cuda_utils/wkv_cuda_model.py
vendored
734
backend-python/wkv_cuda_utils/wkv_cuda_model.py
vendored
@@ -1,734 +0,0 @@
|
||||
########################################################################################################
|
||||
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
|
||||
########################################################################################################
|
||||
|
||||
import types, gc, os, time, re
|
||||
import torch
|
||||
from torch.nn import functional as F
|
||||
torch.backends.cudnn.benchmark = True
|
||||
torch.backends.cudnn.allow_tf32 = True
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
current_path = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# https://zhuanlan.zhihu.com/p/612879065
|
||||
def LoadPreCompileLibrary(file):
|
||||
import importlib
|
||||
import os
|
||||
|
||||
import torch
|
||||
|
||||
# load the custom_op_library and register the custom ops
|
||||
lib_dir = os.path.dirname(__file__)
|
||||
if os.name == "nt":
|
||||
# Register the main torchvision library location on the default DLL path
|
||||
import ctypes
|
||||
import sys
|
||||
|
||||
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
|
||||
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
|
||||
prev_error_mode = kernel32.SetErrorMode(0x0001)
|
||||
|
||||
if with_load_library_flags:
|
||||
kernel32.AddDllDirectory.restype = ctypes.c_void_p
|
||||
|
||||
if sys.version_info >= (3, 8):
|
||||
os.add_dll_directory(lib_dir)
|
||||
elif with_load_library_flags:
|
||||
res = kernel32.AddDllDirectory(lib_dir)
|
||||
if res is None:
|
||||
err = ctypes.WinError(ctypes.get_last_error())
|
||||
err.strerror += f' Error adding "{lib_dir}" to the DLL directories.'
|
||||
raise ValueError(err)
|
||||
|
||||
kernel32.SetErrorMode(prev_error_mode)
|
||||
|
||||
loader_details = (
|
||||
importlib.machinery.ExtensionFileLoader,
|
||||
importlib.machinery.EXTENSION_SUFFIXES,
|
||||
)
|
||||
|
||||
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
|
||||
ext_specs = extfinder.find_spec(file)
|
||||
if ext_specs is None:
|
||||
return False
|
||||
|
||||
try:
|
||||
torch.ops.load_library(ext_specs.origin)
|
||||
except OSError as exc:
|
||||
return False
|
||||
return True
|
||||
|
||||
########################################################################################################
|
||||
|
||||
if os.environ.get('RWKV_JIT_ON') != '0':
|
||||
os.environ["RWKV_JIT_ON"] = '1'
|
||||
MyModule = torch.jit.ScriptModule
|
||||
MyFunction = torch.jit.script_method
|
||||
MyStatic = torch.jit.script
|
||||
else:
|
||||
MyModule = torch.nn.Module
|
||||
def __nop(ob):
|
||||
return ob
|
||||
MyFunction = __nop
|
||||
MyStatic = __nop
|
||||
|
||||
if os.environ.get('RWKV_CUDA_ON') == '1':
|
||||
if LoadPreCompileLibrary('wkv_cuda') is False:
|
||||
from torch.utils.cpp_extension import load
|
||||
load(
|
||||
name=f"wkv_cuda",
|
||||
sources=[f"{current_path}/cuda/wrapper.cpp", f"{current_path}/cuda/operators.cu"],
|
||||
verbose=True,
|
||||
extra_cuda_cflags=["-t 4", "-std=c++17", "--use_fast_math", "-O3", "--extra-device-vectorization"],
|
||||
is_python_module=False)
|
||||
|
||||
@MyStatic
|
||||
def cuda_wkv(T: int, C: int, w, u, k, v, aa, bb, pp):
|
||||
assert 1 * C % min(C, 32) == 0
|
||||
assert k.dtype == v.dtype == torch.float16 or k.dtype == v.dtype == torch.float32
|
||||
assert w.dtype == u.dtype == aa.dtype == bb.dtype == pp.dtype == torch.float32
|
||||
w = w.contiguous()
|
||||
u = u.contiguous()
|
||||
k = k.contiguous()
|
||||
v = v.contiguous()
|
||||
y = torch.empty((T, C), device=w.device, memory_format=torch.contiguous_format, dtype=k.dtype)
|
||||
torch.ops.rwkv.wkv_forward(1, T, C, w, u, k, v, y, aa, bb, pp)
|
||||
return y, aa, bb, pp
|
||||
@MyStatic
|
||||
def cuda_mm8_seq(B: int, N: int, M: int, x, w, mx, rx, my, ry):
|
||||
assert x.dtype == mx.dtype == rx.dtype == my.dtype == ry.dtype
|
||||
assert x.dtype == torch.float32 or x.dtype == torch.float16
|
||||
assert w.dtype == torch.uint8
|
||||
assert x.shape == [B, N]
|
||||
assert w.shape == [N, M]
|
||||
assert rx.shape == mx.shape == [M]
|
||||
assert ry.shape == my.shape == [N, 1]
|
||||
y = torch.empty((B, M), device=w.device, dtype=x.dtype)
|
||||
torch.ops.rwkv.mm8_seq(B, N, M, x, w, mx, rx, my, ry, y)
|
||||
return y
|
||||
@MyStatic
|
||||
def cuda_mm8_one(N: int, M: int, x, w, mx, rx, my, ry):
|
||||
assert x.dtype == mx.dtype == rx.dtype == my.dtype == ry.dtype
|
||||
assert x.dtype == torch.float32 or x.dtype == torch.float16
|
||||
assert w.dtype == torch.uint8
|
||||
assert x.shape == [N]
|
||||
assert w.shape == [N, M]
|
||||
assert rx.shape == mx.shape == [M]
|
||||
assert ry.shape == my.shape == [N, 1]
|
||||
y = torch.zeros((M,), device=w.device, dtype=torch.float32)
|
||||
torch.ops.rwkv.mm8_one(N, M, x, w, mx, rx, my, ry, y)
|
||||
return y.to(dtype=x.dtype)
|
||||
else:
|
||||
os.environ["RWKV_CUDA_ON"] = '0'
|
||||
|
||||
########################################################################################################
|
||||
|
||||
class RWKV(MyModule):
|
||||
def __init__(self, model, strategy, verbose = True, convert_and_save_and_exit = None):
|
||||
super().__init__()
|
||||
if verbose:
|
||||
prxxx = lambda *args, **kwargs: print(*args, **kwargs)
|
||||
else:
|
||||
prxxx = lambda *args, **kwargs: None
|
||||
|
||||
STRATEGY_REGEX = r"^(?:(?:^|->) *(?:cuda(?::[\d]+)?|cpu|mps) (?:fp(?:16|32)|bf16)(?:i8|i4|i3)?(?: \*[\d]+\+?)? *)+$"
|
||||
if not re.match(STRATEGY_REGEX, strategy):
|
||||
raise ValueError("Invalid strategy. Please read https://pypi.org/project/rwkv/")
|
||||
|
||||
strategy = ('->'.join([x.strip() for x in strategy.split('->')])).replace('->', ' -> ')
|
||||
self.args = types.SimpleNamespace()
|
||||
args = self.args
|
||||
args.MODEL_NAME = model
|
||||
args.strategy_string = strategy
|
||||
|
||||
# Rescale for fp16 mode: set x = x/2 every X layer (to avoid fp16 overflow)
|
||||
self.RESCALE_LAYER = 6 if 'fp16' in strategy else 0
|
||||
prxxx(f'RWKV_JIT_ON {os.environ["RWKV_JIT_ON"]} RWKV_CUDA_ON {os.environ["RWKV_CUDA_ON"]} RESCALE_LAYER {self.RESCALE_LAYER}\n')
|
||||
|
||||
args.MODEL_NAME = args.MODEL_NAME.strip()
|
||||
if not args.MODEL_NAME.endswith('.pth'):
|
||||
args.MODEL_NAME += '.pth'
|
||||
prxxx(f'Loading {args.MODEL_NAME} ...')
|
||||
with torch.no_grad():
|
||||
self.w = torch.load(args.MODEL_NAME, map_location='cpu') # load model to CPU first
|
||||
gc.collect()
|
||||
w = self.w
|
||||
|
||||
ALREADY_CONVERTED = False
|
||||
if '_strategy' in w:
|
||||
ALREADY_CONVERTED = True
|
||||
assert convert_and_save_and_exit == None # you should only convert a raw model
|
||||
prxxx(f"Converted model: strategy {w['_strategy']}, version {w['_version']}\n")
|
||||
assert w['_strategy'] == args.strategy_string # if you are using a new strategy, re-convert the model
|
||||
assert float(w['_version']) >= 0.7 # sometimes you should re-convert using latest convert_model.py
|
||||
assert w['_rescale_layer'] == self.RESCALE_LAYER
|
||||
del w['_strategy']
|
||||
del w['_version']
|
||||
del w['_rescale_layer']
|
||||
|
||||
args.n_embd = w['emb.weight'].shape[1]
|
||||
args.n_layer = 0
|
||||
keys = list(w.keys())
|
||||
for x in keys:
|
||||
layer_id = int(x.split('.')[1]) if ('blocks.' in x) else 0
|
||||
args.n_layer = max(args.n_layer, layer_id+1)
|
||||
|
||||
####################### Compute strategy
|
||||
|
||||
s = [x.strip().split(' ') for x in strategy.split('->')]
|
||||
plan = [0] * len(s)
|
||||
stream_i = -1
|
||||
stream_count = 0
|
||||
to_allocate = args.n_layer + 1
|
||||
allocated = 0
|
||||
free_slots = 0
|
||||
for i in range(len(s)):
|
||||
si = s[i]
|
||||
si1 = si[1]
|
||||
if si1.startswith('fp32'): si[1] = [torch.float]
|
||||
elif si1.startswith('fp16'): si[1] = [torch.float16]
|
||||
elif si1.startswith('bf16'): si[1] = [torch.bfloat16]
|
||||
if si1.endswith('i8'): si[1] += [torch.uint8]
|
||||
else: si[1] += [si[1][0]]
|
||||
if len(si) > 2:
|
||||
ss = si[2]
|
||||
assert ss.startswith('*')
|
||||
if ss.endswith('+'):
|
||||
plan[i] = int(ss[1:-1])
|
||||
stream_i = i
|
||||
else:
|
||||
plan[i] = int(ss[1:])
|
||||
allocated += plan[i]
|
||||
if allocated >= to_allocate:
|
||||
plan[i] += to_allocate - allocated
|
||||
break
|
||||
else:
|
||||
free_slots += 1
|
||||
if stream_i < 0:
|
||||
if free_slots > 0 and to_allocate > allocated:
|
||||
for i in range(len(s)):
|
||||
if plan[i] == 0:
|
||||
plan[i] = (to_allocate - allocated) // free_slots
|
||||
allocated += plan[i]
|
||||
free_slots -= 1
|
||||
if to_allocate > allocated:
|
||||
plan[len(s)-1] += to_allocate - allocated
|
||||
else:
|
||||
if to_allocate > allocated:
|
||||
stream_count = to_allocate - allocated
|
||||
plan[stream_i] += stream_count
|
||||
prxxx(f'Strategy: (total {args.n_layer}+1={args.n_layer+1} layers)')
|
||||
for i in range(len(s)):
|
||||
ss = s[i]
|
||||
if i != stream_i:
|
||||
prxxx(f'* {ss[0]} {str(ss[1]).replace("torch.","")}, store {plan[i]} layers')
|
||||
else:
|
||||
prxxx(f'* {ss[0]} {str(ss[1]).replace("torch.","")}, store {plan[i]-stream_count} layers, stream {stream_count} layers')
|
||||
plan[i] += (0 if i == 0 else plan[i-1])
|
||||
self.strategy = [None] * (args.n_layer + 1)
|
||||
strategy = self.strategy
|
||||
for n in range(args.n_layer + 1):
|
||||
for i in range(len(s)):
|
||||
if n < plan[i]:
|
||||
strategy[n] = types.SimpleNamespace()
|
||||
strategy[n].device = s[i][0]
|
||||
strategy[n].atype = s[i][1][0]
|
||||
strategy[n].wtype = s[i][1][1]
|
||||
strategy[n].stream = False
|
||||
if i == stream_i and n >= (plan[i] - stream_count):
|
||||
strategy[n].stream = True
|
||||
break
|
||||
prxxx(f"{n}-{strategy[n].device}-{str(strategy[n].atype).replace('torch.','')}-{str(strategy[n].wtype).replace('torch.','')}{'-stream' if strategy[n].stream else ''}",end=' ')
|
||||
prxxx()
|
||||
|
||||
####################### Load weights to self.w
|
||||
|
||||
if not ALREADY_CONVERTED:
|
||||
try: # precompute embedding
|
||||
w['emb.weight'] = F.layer_norm(w['emb.weight'], (args.n_embd,), weight=w['blocks.0.ln0.weight'], bias=w['blocks.0.ln0.bias'])
|
||||
except:
|
||||
w['emb.weight'] = F.layer_norm(w['emb.weight'].float(), (args.n_embd,), weight=w['blocks.0.ln0.weight'].float(), bias=w['blocks.0.ln0.bias'].float())
|
||||
del w['blocks.0.ln0.weight']
|
||||
del w['blocks.0.ln0.bias']
|
||||
|
||||
print_need_newline = False
|
||||
keys = list(w.keys())
|
||||
for x in keys:
|
||||
w[x].requires_grad = False
|
||||
layer_id = int(x.split('.')[1]) if ('blocks.' in x) else 0
|
||||
if ('ln_out.' in x) or ('head.' in x):
|
||||
layer_id = args.n_layer
|
||||
dd = strategy[layer_id]
|
||||
DEVICE = dd.device
|
||||
ATYPE = dd.atype
|
||||
WTYPE = dd.wtype
|
||||
|
||||
if not ALREADY_CONVERTED:
|
||||
if self.RESCALE_LAYER > 0:
|
||||
if 'att.output.weight' in x:
|
||||
w[x] = w[x] / (2 ** int(layer_id // self.RESCALE_LAYER))
|
||||
if 'ffn.value.weight' in x:
|
||||
w[x] = w[x] / (2 ** int(layer_id // self.RESCALE_LAYER))
|
||||
|
||||
if '.time_' in x:
|
||||
w[x] = w[x].squeeze()
|
||||
if 'key.weight' in x or 'value.weight' in x or 'receptance.weight' in x or 'output.weight' in x or 'head.weight' in x:
|
||||
w[x] = w[x].t()
|
||||
|
||||
if '.time_decay' in x: # need fp32 for this
|
||||
w[x] = -torch.exp(w[x].float())
|
||||
elif '.time_first' in x: # need fp32 for this
|
||||
w[x] = w[x].float()
|
||||
else:
|
||||
if (len(w[x].shape) == 2) and ('emb' not in x):
|
||||
if WTYPE != torch.uint8:
|
||||
w[x] = w[x].to(dtype=WTYPE)
|
||||
else:
|
||||
w[x] = w[x].float()
|
||||
|
||||
if w[x].shape[0] > w[x].shape[1]:
|
||||
w[x+'_my'] = torch.amin(w[x], dim=1).unsqueeze(1)
|
||||
w[x] = w[x] - w[x+'_my']
|
||||
w[x+'_mx'] = torch.amin(w[x], dim=0)
|
||||
w[x] = w[x] - w[x+'_mx']
|
||||
w[x+'_rx'] = torch.amax(w[x], dim=0)
|
||||
w[x] = w[x] / w[x+'_rx']
|
||||
w[x+'_ry'] = torch.amax(w[x], dim=1).unsqueeze(1)
|
||||
w[x] = w[x] / w[x+'_ry']
|
||||
else:
|
||||
w[x+'_mx'] = torch.amin(w[x], dim=0)
|
||||
w[x] = w[x] - w[x+'_mx']
|
||||
w[x+'_my'] = torch.amin(w[x], dim=1).unsqueeze(1)
|
||||
w[x] = w[x] - w[x+'_my']
|
||||
w[x+'_rx'] = torch.amax(w[x], dim=0)
|
||||
w[x] = w[x] / w[x+'_rx']
|
||||
w[x+'_ry'] = torch.amax(w[x], dim=1).unsqueeze(1)
|
||||
w[x] = w[x] / w[x+'_ry']
|
||||
|
||||
w[x] = torch.clip(torch.floor(w[x] * 256), min=0, max=255).to(dtype=torch.uint8)
|
||||
w[x+'_mx'] = w[x+'_mx'].to(dtype=ATYPE).contiguous()
|
||||
w[x+'_rx'] = (w[x+'_rx'] / 16).to(dtype=ATYPE).contiguous()
|
||||
w[x+'_my'] = w[x+'_my'].to(dtype=ATYPE).contiguous()
|
||||
w[x+'_ry'] = (w[x+'_ry'] / 16).to(dtype=ATYPE).contiguous()
|
||||
else:
|
||||
w[x] = w[x].to(dtype=ATYPE)
|
||||
|
||||
if convert_and_save_and_exit == None:
|
||||
if 'emb.' in x:
|
||||
w[x] = w[x].contiguous()
|
||||
elif (dd.stream) and (x.endswith('key.weight') or x.endswith('value.weight') or x.endswith('receptance.weight') or x.endswith('output.weight')):
|
||||
try:
|
||||
w[x] = w[x].contiguous().pin_memory() # if you see "CUDA error: out of memory" here, that's out of CPU RAM, not VRAM. Get more RAM :)
|
||||
except:
|
||||
print('Note: You are running out of RAM. Get more CPU RAM. Now this will run much slower.')
|
||||
elif DEVICE != 'cpu':
|
||||
w[x] = w[x].to(device=DEVICE).contiguous()
|
||||
|
||||
if (dd.stream) or (DEVICE != 'cpu'):
|
||||
try:
|
||||
w[x+'_mx'] = w[x+'_mx'].to(device=DEVICE).contiguous()
|
||||
w[x+'_rx'] = w[x+'_rx'].to(device=DEVICE).contiguous()
|
||||
w[x+'_my'] = w[x+'_my'].to(device=DEVICE).contiguous()
|
||||
w[x+'_ry'] = w[x+'_ry'].to(device=DEVICE).contiguous()
|
||||
except:
|
||||
pass
|
||||
|
||||
if 'ffn.value.weight' in x:
|
||||
gc.collect()
|
||||
if 'cuda' in args.strategy_string:
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
shape = [i for i in w[x].shape if i != 1]
|
||||
if len(shape) > 1:
|
||||
shape = f" {str(shape[0]).rjust(5)} {str(shape[1]).rjust(5)}"
|
||||
else:
|
||||
shape = f" {str(shape[0]).rjust(5)} "
|
||||
if layer_id == 0 or layer_id >= args.n_layer-1:
|
||||
if print_need_newline:
|
||||
prxxx('\n', end = '')
|
||||
print_need_newline = False
|
||||
dt = str(w[x].dtype).replace('torch.', '')
|
||||
dt = dt.replace('float32', 'f32').replace('bfloat16', 'bf16').replace('float16', 'f16').replace('uint8', 'i8')
|
||||
prxxx(x.ljust(32), dt.rjust(4), str(w[x].device).rjust(8), shape, ' (pinned)' if w[x].is_pinned() else '')
|
||||
else:
|
||||
print_need_newline = True
|
||||
prxxx('.', end = '', flush = True)
|
||||
|
||||
if convert_and_save_and_exit:
|
||||
w['_strategy'] = args.strategy_string
|
||||
w['_rescale_layer'] = self.RESCALE_LAYER
|
||||
w['_version'] = '0.7'
|
||||
if not convert_and_save_and_exit.endswith('.pth'):
|
||||
convert_and_save_and_exit += '.pth'
|
||||
prxxx(f'Saving to {convert_and_save_and_exit}...')
|
||||
torch.save(w, convert_and_save_and_exit)
|
||||
prxxx(f'Converted and saved. Now this will exit.')
|
||||
exit(0)
|
||||
|
||||
gc.collect()
|
||||
if 'cuda' in args.strategy_string:
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
@MyFunction
|
||||
def torch_mm8_seq(self, x, w, mx, rx, my, ry):
|
||||
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
|
||||
|
||||
@MyFunction
|
||||
def torch_mm8_one(self, x, w, mx, rx, my, ry):
|
||||
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
|
||||
|
||||
if os.environ.get('RWKV_CUDA_ON') == '1':
|
||||
@MyFunction
|
||||
def mm8_seq(self, x, w, mx, rx, my, ry):
|
||||
if w.device.type == 'cuda' and x.dtype == torch.float16:
|
||||
B, N, M = x.shape[0], w.shape[0], w.shape[1]
|
||||
return cuda_mm8_seq(B, N, M, x, w, mx, rx, my, ry)
|
||||
else:
|
||||
return self.torch_mm8_seq(x, w, mx, rx, my, ry)
|
||||
@MyFunction
|
||||
def mm8_one(self, x, w, mx, rx, my, ry):
|
||||
if w.device.type == 'cuda':
|
||||
N, M = w.shape[0], w.shape[1]
|
||||
return cuda_mm8_one(N, M, x, w, mx, rx, my, ry)
|
||||
else:
|
||||
return self.torch_mm8_one(x, w, mx, rx, my, ry)
|
||||
else:
|
||||
@MyFunction
|
||||
def mm8_seq(self, x, w, mx, rx, my, ry):
|
||||
return self.torch_mm8_seq(x, w, mx, rx, my, ry)
|
||||
@MyFunction
|
||||
def mm8_one(self, x, w, mx, rx, my, ry):
|
||||
return self.torch_mm8_one(x, w, mx, rx, my, ry)
|
||||
|
||||
########################################################################################################
|
||||
|
||||
@MyFunction
|
||||
def ffn_one(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(rx @ rw)
|
||||
vx = torch.square(torch.relu(kx @ kw))
|
||||
out = r * (vx @ vw)
|
||||
return x + out, xx
|
||||
|
||||
@MyFunction
|
||||
def ffn_one_i8(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(self.mm8_one(rx, rw, rmx, rrx, rmy, rry))
|
||||
vx = torch.square(torch.relu(self.mm8_one(kx, kw, kmx, krx, kmy, kry)))
|
||||
out = r * (self.mm8_one(vx, vw, vmx, vrx, vmy, vry))
|
||||
return x + out, xx
|
||||
|
||||
########################################################################################################
|
||||
|
||||
@MyFunction
|
||||
def ffn_seq(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(rx @ rw)
|
||||
vx = torch.square(torch.relu(kx @ kw))
|
||||
out = r * (vx @ vw)
|
||||
return x + out, xx[-1,:]
|
||||
|
||||
@MyFunction
|
||||
def ffn_seq_i8(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
|
||||
vx = torch.square(torch.relu(self.mm8_seq(kx, kw, kmx, krx, kmy, kry)))
|
||||
out = r * (self.mm8_seq(vx, vw, vmx, vrx, vmy, vry))
|
||||
return x + out, xx[-1,:]
|
||||
|
||||
########################################################################################################
|
||||
|
||||
@MyFunction
|
||||
def att_one(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(rx @ rw)
|
||||
k = (kx @ kw).float()
|
||||
v = (vx @ vw).float()
|
||||
|
||||
ww = t_first + k
|
||||
p = torch.maximum(pp, ww)
|
||||
e1 = torch.exp(pp - p)
|
||||
e2 = torch.exp(ww - p)
|
||||
wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
|
||||
ww = t_decay + pp
|
||||
p = torch.maximum(ww, k)
|
||||
e1 = torch.exp(ww - p)
|
||||
e2 = torch.exp(k - p)
|
||||
|
||||
out = (r * wkv) @ ow
|
||||
return x + out, xx, e1 * aa + e2 * v, e1 * bb + e2, p
|
||||
|
||||
@MyFunction
|
||||
def att_one_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(self.mm8_one(rx, rw, rmx, rrx, rmy, rry))
|
||||
k = (self.mm8_one(kx, kw, kmx, krx, kmy, kry)).float()
|
||||
v = (self.mm8_one(vx, vw, vmx, vrx, vmy, vry)).float()
|
||||
|
||||
ww = t_first + k
|
||||
p = torch.maximum(pp, ww)
|
||||
e1 = torch.exp(pp - p)
|
||||
e2 = torch.exp(ww - p)
|
||||
wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
|
||||
ww = t_decay + pp
|
||||
p = torch.maximum(ww, k)
|
||||
e1 = torch.exp(ww - p)
|
||||
e2 = torch.exp(k - p)
|
||||
|
||||
out = self.mm8_one(r * wkv, ow, omx, orx, omy, ory)
|
||||
return x + out, xx, e1 * aa + e2 * v, e1 * bb + e2, p
|
||||
|
||||
########################################################################################################
|
||||
|
||||
@MyFunction
|
||||
def att_seq(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(rx @ rw)
|
||||
k = (kx @ kw).float()
|
||||
v = (vx @ vw).float()
|
||||
|
||||
T = x.shape[0]
|
||||
for t in range(T):
|
||||
kk = k[t]
|
||||
vv = v[t]
|
||||
ww = t_first + kk
|
||||
p = torch.maximum(pp, ww)
|
||||
e1 = torch.exp(pp - p)
|
||||
e2 = torch.exp(ww - p)
|
||||
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
|
||||
ww = t_decay + pp
|
||||
p = torch.maximum(ww, kk)
|
||||
e1 = torch.exp(ww - p)
|
||||
e2 = torch.exp(kk - p)
|
||||
aa = e1 * aa + e2 * vv
|
||||
bb = e1 * bb + e2
|
||||
pp = p
|
||||
out = (r * sx) @ ow
|
||||
return x + out, xx[-1,:], aa, bb, pp
|
||||
|
||||
@MyFunction
|
||||
def att_seq_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
|
||||
k = self.mm8_seq(kx, kw, kmx, krx, kmy, kry).float()
|
||||
v = self.mm8_seq(vx, vw, vmx, vrx, vmy, vry).float()
|
||||
|
||||
T = x.shape[0]
|
||||
for t in range(T):
|
||||
kk = k[t]
|
||||
vv = v[t]
|
||||
ww = t_first + kk
|
||||
p = torch.maximum(pp, ww)
|
||||
e1 = torch.exp(pp - p)
|
||||
e2 = torch.exp(ww - p)
|
||||
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
|
||||
ww = t_decay + pp
|
||||
p = torch.maximum(ww, kk)
|
||||
e1 = torch.exp(ww - p)
|
||||
e2 = torch.exp(kk - p)
|
||||
aa = e1 * aa + e2 * vv
|
||||
bb = e1 * bb + e2
|
||||
pp = p
|
||||
out = self.mm8_seq(r * sx, ow, omx, orx, omy, ory)
|
||||
return x + out, xx[-1,:], aa, bb, pp
|
||||
|
||||
########################################################################################################
|
||||
|
||||
if os.environ["RWKV_CUDA_ON"] == '1':
|
||||
@MyFunction
|
||||
def cuda_att_seq(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
|
||||
T, C = x.size()
|
||||
xx = F.layer_norm(x, (C,), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(rx @ rw)
|
||||
k = kx @ kw
|
||||
v = vx @ vw
|
||||
y, aa, bb, pp = cuda_wkv(T, C, t_decay, t_first, k, v, aa, bb, pp)
|
||||
|
||||
out = (r * y) @ ow
|
||||
return x + out, xx[-1,:], aa, bb, pp
|
||||
|
||||
@MyFunction
|
||||
def cuda_att_seq_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
|
||||
T, C = x.size()
|
||||
xx = F.layer_norm(x, (C,), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
|
||||
k = self.mm8_seq(kx, kw, kmx, krx, kmy, kry)
|
||||
v = self.mm8_seq(vx, vw, vmx, vrx, vmy, vry)
|
||||
y, aa, bb, pp = cuda_wkv(T, C, t_decay, t_first, k, v, aa, bb, pp)
|
||||
|
||||
out = self.mm8_seq(r * y, ow, omx, orx, omy, ory)
|
||||
return x + out, xx[-1,:], aa, bb, pp
|
||||
|
||||
########################################################################################################
|
||||
|
||||
def forward(self, tokens, state, full_output=False):
|
||||
with torch.no_grad():
|
||||
w = self.w
|
||||
args = self.args
|
||||
|
||||
if state == None:
|
||||
state = [None] * args.n_layer * 5
|
||||
for i in range(args.n_layer): # state: 0=att_xx 1=att_aa 2=att_bb 3=att_pp 4=ffn_xx
|
||||
dd = self.strategy[i]
|
||||
dev = dd.device
|
||||
atype = dd.atype
|
||||
state[i*5+0] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
|
||||
state[i*5+1] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous()
|
||||
state[i*5+2] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous()
|
||||
state[i*5+3] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous() - 1e30
|
||||
state[i*5+4] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
|
||||
|
||||
seq_mode = len(tokens) > 1
|
||||
|
||||
x = w['emb.weight'][tokens if seq_mode else tokens[0]]
|
||||
|
||||
for i in range(args.n_layer):
|
||||
bbb = f'blocks.{i}.'
|
||||
att = f'blocks.{i}.att.'
|
||||
ffn = f'blocks.{i}.ffn.'
|
||||
dd = self.strategy[i]
|
||||
dev = dd.device
|
||||
atype = dd.atype
|
||||
wtype = dd.wtype
|
||||
if seq_mode:
|
||||
if 'cuda' in str(dev) and os.environ["RWKV_CUDA_ON"] == '1':
|
||||
ATT = self.cuda_att_seq if wtype != torch.uint8 else self.cuda_att_seq_i8
|
||||
else:
|
||||
ATT = self.att_seq if wtype != torch.uint8 else self.att_seq_i8
|
||||
FFN = self.ffn_seq if wtype != torch.uint8 else self.ffn_seq_i8
|
||||
else:
|
||||
ATT = self.att_one if wtype != torch.uint8 else self.att_one_i8
|
||||
FFN = self.ffn_one if wtype != torch.uint8 else self.ffn_one_i8
|
||||
|
||||
x = x.to(dtype=atype, device=dev)
|
||||
|
||||
kw = w[f'{att}key.weight']
|
||||
vw = w[f'{att}value.weight']
|
||||
rw = w[f'{att}receptance.weight']
|
||||
ow = w[f'{att}output.weight']
|
||||
if dd.stream:
|
||||
kw = kw.to(device=dev, non_blocking=True)
|
||||
vw = vw.to(device=dev, non_blocking=True)
|
||||
rw = rw.to(device=dev, non_blocking=True)
|
||||
ow = ow.to(device=dev, non_blocking=True)
|
||||
kmx = w[f'{att}key.weight_mx'] if wtype == torch.uint8 else x
|
||||
krx = w[f'{att}key.weight_rx'] if wtype == torch.uint8 else x
|
||||
kmy = w[f'{att}key.weight_my'] if wtype == torch.uint8 else x
|
||||
kry = w[f'{att}key.weight_ry'] if wtype == torch.uint8 else x
|
||||
vmx = w[f'{att}value.weight_mx'] if wtype == torch.uint8 else x
|
||||
vrx = w[f'{att}value.weight_rx'] if wtype == torch.uint8 else x
|
||||
vmy = w[f'{att}value.weight_my'] if wtype == torch.uint8 else x
|
||||
vry = w[f'{att}value.weight_ry'] if wtype == torch.uint8 else x
|
||||
rmx = w[f'{att}receptance.weight_mx'] if wtype == torch.uint8 else x
|
||||
rrx = w[f'{att}receptance.weight_rx'] if wtype == torch.uint8 else x
|
||||
rmy = w[f'{att}receptance.weight_my'] if wtype == torch.uint8 else x
|
||||
rry = w[f'{att}receptance.weight_ry'] if wtype == torch.uint8 else x
|
||||
omx = w[f'{att}output.weight_mx'] if wtype == torch.uint8 else x
|
||||
orx = w[f'{att}output.weight_rx'] if wtype == torch.uint8 else x
|
||||
omy = w[f'{att}output.weight_my'] if wtype == torch.uint8 else x
|
||||
ory = w[f'{att}output.weight_ry'] if wtype == torch.uint8 else x
|
||||
x, state[i*5+0], state[i*5+1], state[i*5+2], state[i*5+3] = ATT(
|
||||
x, state[i*5+0], state[i*5+1], state[i*5+2], state[i*5+3],
|
||||
w[f'{bbb}ln1.weight'], w[f'{bbb}ln1.bias'],
|
||||
w[f'{att}time_mix_k'], w[f'{att}time_mix_v'], w[f'{att}time_mix_r'],
|
||||
w[f'{att}time_decay'], w[f'{att}time_first'],
|
||||
kw, vw, rw, ow,
|
||||
kmx, krx, kmy, kry,
|
||||
vmx, vrx, vmy, vry,
|
||||
rmx, rrx, rmy, rry,
|
||||
omx, orx, omy, ory,
|
||||
)
|
||||
if dd.stream:
|
||||
del kw, vw, rw, ow
|
||||
|
||||
kw = w[f'{ffn}key.weight']
|
||||
vw = w[f'{ffn}value.weight']
|
||||
rw = w[f'{ffn}receptance.weight']
|
||||
if dd.stream:
|
||||
kw = kw.to(device=dev, non_blocking=True)
|
||||
vw = vw.to(device=dev, non_blocking=True)
|
||||
rw = rw.to(device=dev, non_blocking=True)
|
||||
kmx = w[f'{ffn}key.weight_mx'] if wtype == torch.uint8 else x
|
||||
krx = w[f'{ffn}key.weight_rx'] if wtype == torch.uint8 else x
|
||||
kmy = w[f'{ffn}key.weight_my'] if wtype == torch.uint8 else x
|
||||
kry = w[f'{ffn}key.weight_ry'] if wtype == torch.uint8 else x
|
||||
vmx = w[f'{ffn}value.weight_mx'] if wtype == torch.uint8 else x
|
||||
vrx = w[f'{ffn}value.weight_rx'] if wtype == torch.uint8 else x
|
||||
vmy = w[f'{ffn}value.weight_my'] if wtype == torch.uint8 else x
|
||||
vry = w[f'{ffn}value.weight_ry'] if wtype == torch.uint8 else x
|
||||
rmx = w[f'{ffn}receptance.weight_mx'] if wtype == torch.uint8 else x
|
||||
rrx = w[f'{ffn}receptance.weight_rx'] if wtype == torch.uint8 else x
|
||||
rmy = w[f'{ffn}receptance.weight_my'] if wtype == torch.uint8 else x
|
||||
rry = w[f'{ffn}receptance.weight_ry'] if wtype == torch.uint8 else x
|
||||
x, state[i*5+4] = FFN(
|
||||
x, state[i*5+4],
|
||||
w[f'{bbb}ln2.weight'], w[f'{bbb}ln2.bias'],
|
||||
w[f'{ffn}time_mix_k'], w[f'{ffn}time_mix_r'],
|
||||
kw, vw, rw,
|
||||
kmx, krx, kmy, kry,
|
||||
vmx, vrx, vmy, vry,
|
||||
rmx, rrx, rmy, rry,
|
||||
)
|
||||
if dd.stream:
|
||||
del kw, vw, rw
|
||||
|
||||
if self.RESCALE_LAYER > 0:
|
||||
if (i+1) % self.RESCALE_LAYER == 0:
|
||||
x = x / 2
|
||||
|
||||
dd = self.strategy[args.n_layer]
|
||||
x = x[-1,:] if (seq_mode and (not full_output)) else x
|
||||
x = x.to(dtype=dd.atype, device=dd.device)
|
||||
|
||||
x = F.layer_norm(x, (args.n_embd,), weight=w['ln_out.weight'], bias=w['ln_out.bias'])
|
||||
if w['head.weight'].dtype != torch.uint8:
|
||||
x = x @ w['head.weight']
|
||||
else:
|
||||
if seq_mode and full_output:
|
||||
x = self.mm8_seq(x, w['head.weight'], w['head.weight_mx'], w['head.weight_rx'], w['head.weight_my'], w['head.weight_ry'])
|
||||
else:
|
||||
x = self.mm8_one(x, w['head.weight'], w['head.weight_mx'], w['head.weight_rx'], w['head.weight_my'], w['head.weight_ry'])
|
||||
|
||||
return x.float(), state
|
||||
66861
backend-rust/assets/rwkv_vocab_v20230424.json
Normal file
66861
backend-rust/assets/rwkv_vocab_v20230424.json
Normal file
File diff suppressed because it is too large
Load Diff
6
build/darwin/Readme_Install.txt
vendored
6
build/darwin/Readme_Install.txt
vendored
@@ -1,6 +1,6 @@
|
||||
For Mac and Linux users, please manually install Python 3.10 (usually the latest systems come with it built-in). You can specify the Python interpreter to use in Settings.
|
||||
对于Mac和Linux用户,请手动安装 Python3.10 (通常最新的系统已经内置了). 你可以在设置中指定使用的Python解释器.
|
||||
MacおよびLinuxのユーザーの方は、Python3.10を手動でインストールしてください(通常、最新のシステムには既に組み込まれています)。 設定メニューで使用するPythonインタプリタを指定することができます。
|
||||
For Mac and Linux users, please manually install Python 3.10 (usually the latest systems come with it built-in). You can specify the Python interpreter to use in Settings. (which python3)
|
||||
对于Mac和Linux用户,请手动安装 Python3.10 (通常最新的系统已经内置了). 你可以在设置中指定使用的Python解释器. (which python3)
|
||||
MacおよびLinuxのユーザーの方は、Python3.10を手動でインストールしてください(通常、最新のシステムには既に組み込まれています)。 設定メニューで使用するPythonインタプリタを指定することができます。 (which python3)
|
||||
|
||||
Please execute this program in an empty directory. All related dependencies will be placed in this directory.
|
||||
请将本程序放在一个空目录内执行, 所有相关依赖均会放置于此目录.
|
||||
|
||||
@@ -9,7 +9,7 @@ cd RWKV-Next-Web
|
||||
git clone https://github.com/josStorer/RWKV-Runner --depth=1
|
||||
python3 -m pip install torch torchvision torchaudio
|
||||
python3 -m pip install -r RWKV-Runner/backend-python/requirements.txt
|
||||
python3 ./RWKV-Runner/backend-python/main.py > log.txt &
|
||||
python3 ./RWKV-Runner/backend-python/main.py > log.txt & # this is only an example, you should use screen or other tools to run it in background
|
||||
|
||||
if [ ! -d RWKV-Runner/models ]; then
|
||||
mkdir RWKV-Runner/models
|
||||
@@ -22,6 +22,6 @@ yarn install
|
||||
yarn build
|
||||
export PROXY_URL=""
|
||||
export BASE_URL=http://127.0.0.1:8000
|
||||
yarn start &
|
||||
yarn start & # this is only an example, you should use screen or other tools to run it in background
|
||||
|
||||
curl http://127.0.0.1:8000/switch-model -X POST -H "Content-Type: application/json" -d '{"model":"./RWKV-Runner/models/RWKV-4-World-0.1B-v1-20230520-ctx4096.pth","strategy":"cpu fp32"}'
|
||||
|
||||
19
deploy-examples/RWKV-Runner-WebUI/setup.bat
Normal file
19
deploy-examples/RWKV-Runner-WebUI/setup.bat
Normal file
@@ -0,0 +1,19 @@
|
||||
: install git python3.10 npm by yourself
|
||||
: change model and strategy according to your hardware
|
||||
|
||||
git clone https://github.com/josStorer/RWKV-Runner --depth=1
|
||||
python -m pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cu117
|
||||
python -m pip install -r RWKV-Runner/backend-python/requirements.txt
|
||||
cd RWKV-Runner/frontend
|
||||
call npm ci
|
||||
call npm run build
|
||||
cd ..
|
||||
|
||||
: optional: set ngrok_token=YOUR_NGROK_TOKEN
|
||||
start python ./backend-python/main.py --webui
|
||||
start "C:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe" "http://127.0.0.1:8000"
|
||||
|
||||
powershell -Command "(Test-Path ./models) -or (mkdir models)"
|
||||
powershell -Command "Import-Module BitsTransfer"
|
||||
powershell -Command "(Test-Path ./models/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth) -or (Start-BitsTransfer https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth ./models/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth)"
|
||||
powershell -Command "Invoke-WebRequest http://127.0.0.1:8000/switch-model -Method POST -ContentType 'application/json' -Body '{\"model\":\"./models/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth\",\"strategy\":\"cuda fp32 *20+\",\"deploy\":\"true\"}'"
|
||||
22
deploy-examples/RWKV-Runner-WebUI/setup.sh
Normal file
22
deploy-examples/RWKV-Runner-WebUI/setup.sh
Normal file
@@ -0,0 +1,22 @@
|
||||
# install git python3.10 npm by yourself
|
||||
# change model and strategy according to your hardware
|
||||
|
||||
sudo apt install python3-dev
|
||||
|
||||
git clone https://github.com/josStorer/RWKV-Runner --depth=1
|
||||
python3 -m pip install torch torchvision torchaudio
|
||||
python3 -m pip install -r RWKV-Runner/backend-python/requirements.txt
|
||||
cd RWKV-Runner/frontend
|
||||
npm ci
|
||||
npm run build
|
||||
cd ..
|
||||
|
||||
# optional: export ngrok_token=YOUR_NGROK_TOKEN
|
||||
python3 ./backend-python/main.py --webui > log.txt & # this is only an example, you should use screen or other tools to run it in background
|
||||
|
||||
if [ ! -d models ]; then
|
||||
mkdir models
|
||||
fi
|
||||
wget -N https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-0.1B-v1-20230520-ctx4096.pth -P models/
|
||||
|
||||
curl http://127.0.0.1:8000/switch-model -X POST -H "Content-Type: application/json" -d '{"model":"./models/RWKV-4-World-0.1B-v1-20230520-ctx4096.pth","strategy":"cpu fp32","deploy":"true"}'
|
||||
@@ -1,7 +1,5 @@
|
||||
{"text": "1:This is the first document."}
|
||||
{"text": "2:Hello\nWorld"}
|
||||
{"text": "3:1+1=2\n1+2=3\n2+2=4"}
|
||||
{"text": "4:You will be training the GPT version because it's paralleziable and faster to train."}
|
||||
{"text": "5:Read the inference code in src/model.py and try using the final hidden state(.xx .aa .bb)"}
|
||||
{"text": "6:You can fine-tune the model with longer ctxLen and it can quickly adapt to longer ctxLens."}
|
||||
{"text": "7:Consider RWKV 14B. The state has 200 vectors, that is, 5 vectors for each block: fp16 (xx), fp32 (aa), fp32 (bb), fp32 (pp), fp16 (xx)."}
|
||||
{"text": "The following is an epic science fiction masterpiece that is immortalized, with delicate descriptions and grand depictions of interstellar civilization wars.\nChapter 1.\nAs I sit down to write here amidst the shadows of vine-leaves under the blue sky of southern Italy, it comes to me with a certain quality of astonishment that my participation in these amazing adventures of Mr. Cavor was, after all, the outcome of the purest accident. It might have been any one. I fell into these things at a time when I thought myself removed from the slightest possibility of disturbing experiences. I had gone to Lympne because I had imagined it the most uneventful place in the world. “Here, at any rate,” said I, “I shall find peace and a chance to work!”"}
|
||||
{"text": "Translate the following into Chinese.\n\nEnglish: What rooms do you have available?\nChinese: 你们有哪些房间可以提供"}
|
||||
{"text": "User: Hello.\n\nAssistant: I'm here, meow~.\n\nUser: Can you tell some jokes?\n\nAssistant: Of course, master. What kind of jokes would you like to hear?"}
|
||||
{"text": "Instruction: Write a story using the following information\n\nInput: A man named Alex chops a tree down\n\nResponse: Once upon a time, there was a man named Alex who lived in the heart of the forest. He had always been fascinated by trees and spent most of his days exploring the forest and learning about its many wonders. One day, while wandering through the woods, he stumbled upon an old oak tree that stood tall and proud in the middle of a clearing."}
|
||||
{"text": "def get_args(args: Union[Sequence[str], None] = None):\n parser = argparse.ArgumentParser()\n group = parser.add_argument_group(title=\"server arguments\")\n group.add_argument(\n \"--port\",\n type=int,\n default=8000,\n help=\"port to run the server on (default: 8000)\",\n )\n group.add_argument(\n \"--host\",\n type=str,\n default=\"127.0.0.1\",\n help=\"host to run the server on (default: 127.0.0.1)\",\n )"}
|
||||
@@ -1,3 +1,5 @@
|
||||
echo $@
|
||||
|
||||
if [[ ${cnMirror} == 1 ]]; then
|
||||
export PIP_INDEX_URL="https://pypi.tuna.tsinghua.edu.cn/simple"
|
||||
if grep -q "mirrors.aliyun.com" /etc/apt/sources.list; then
|
||||
@@ -47,6 +49,10 @@ fi
|
||||
echo "loading $loadModel"
|
||||
modelInfo=$(python3 ./finetune/get_layer_and_embd.py $loadModel)
|
||||
echo $modelInfo
|
||||
|
||||
python3 ./finetune/lora/train.py $modelInfo $@ --proj_dir lora-models --data_type binidx --lora \
|
||||
--lora_parts=att,ffn,time,ln --strategy deepspeed_stage_2 --accelerator gpu
|
||||
if [[ $modelInfo =~ "--n_layer" ]]; then
|
||||
python3 ./finetune/lora/train.py $modelInfo $@ --proj_dir lora-models --data_type binidx --lora \
|
||||
--lora_parts=att,ffn,time,ln --strategy deepspeed_stage_2 --accelerator gpu
|
||||
else
|
||||
echo "modelInfo is invalid"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -246,5 +246,6 @@ if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
with open("error.txt", "w") as f:
|
||||
f.write(str(e))
|
||||
|
||||
1
finetune/lora/merge_lora.py
vendored
1
finetune/lora/merge_lora.py
vendored
@@ -64,5 +64,6 @@ try:
|
||||
|
||||
torch.save(output_w, output)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
with open("error.txt", "w") as f:
|
||||
f.write(str(e))
|
||||
|
||||
16
finetune/lora/train.py
vendored
16
finetune/lora/train.py
vendored
@@ -184,7 +184,7 @@ if __name__ == "__main__":
|
||||
args.num_sanity_val_steps = 0
|
||||
args.check_val_every_n_epoch = int(1e20)
|
||||
args.log_every_n_steps = int(1e20)
|
||||
args.max_epochs = -1 # continue forever
|
||||
args.max_epochs = args.epoch_count # continue forever
|
||||
args.betas = (args.beta1, args.beta2)
|
||||
args.real_bsz = int(args.num_nodes) * int(args.devices) * args.micro_bsz
|
||||
os.environ["RWKV_T_MAX"] = str(args.ctx_len)
|
||||
@@ -264,7 +264,7 @@ if __name__ == "__main__":
|
||||
#
|
||||
# Data = {args.data_file} ({args.data_type}), ProjDir = {args.proj_dir}
|
||||
#
|
||||
# Epoch = {args.epoch_begin} to {args.epoch_begin + args.epoch_count - 1} (will continue afterwards), save every {args.epoch_save} epoch
|
||||
# Epoch = {args.epoch_begin} to {args.epoch_begin + args.epoch_count - 1}, save every {args.epoch_save} epoch
|
||||
#
|
||||
# Each "epoch" = {args.epoch_steps} steps, {samples_per_epoch} samples, {tokens_per_epoch} tokens
|
||||
#
|
||||
@@ -373,7 +373,7 @@ if __name__ == "__main__":
|
||||
for param in module.parameters():
|
||||
param.requires_grad = True
|
||||
elif enable_time_finetune and any(
|
||||
n.startswith("time") for n, _ in module.named_parameters()
|
||||
n.startswith("time") for n, _ in module.named_parameters()
|
||||
):
|
||||
for pname, param in module.named_parameters():
|
||||
if pname.startswith("time"):
|
||||
@@ -381,7 +381,7 @@ if __name__ == "__main__":
|
||||
param.requires_grad = True
|
||||
|
||||
if (
|
||||
len(args.load_model) == 0 or args.my_pile_stage == 1
|
||||
len(args.load_model) == 0 or args.my_pile_stage == 1
|
||||
): # shall we build the initial weights?
|
||||
init_weight_name = f"{args.proj_dir}/rwkv-init.pth"
|
||||
generate_init_weight(model, init_weight_name) # save initial weights
|
||||
@@ -423,8 +423,8 @@ if __name__ == "__main__":
|
||||
)
|
||||
|
||||
if (
|
||||
args.lr_init > 1e-4
|
||||
or trainer.world_size * args.micro_bsz * trainer.accumulate_grad_batches < 8
|
||||
args.lr_init > 1e-4
|
||||
or trainer.world_size * args.micro_bsz * trainer.accumulate_grad_batches < 8
|
||||
):
|
||||
if "I_KNOW_WHAT_IM_DOING" in os.environ:
|
||||
if trainer.global_rank == 0:
|
||||
@@ -459,10 +459,10 @@ if __name__ == "__main__":
|
||||
|
||||
if "deepspeed" in args.strategy:
|
||||
trainer.strategy.config["zero_optimization"]["allgather_bucket_size"] = (
|
||||
args.ds_bucket_mb * 1000 * 1000
|
||||
args.ds_bucket_mb * 1000 * 1000
|
||||
)
|
||||
trainer.strategy.config["zero_optimization"]["reduce_bucket_size"] = (
|
||||
args.ds_bucket_mb * 1000 * 1000
|
||||
args.ds_bucket_mb * 1000 * 1000
|
||||
)
|
||||
|
||||
# must set shuffle=False, persistent_workers=False (because worker is in another thread)
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
torch==1.13.1
|
||||
pytorch_lightning==1.9.5
|
||||
deepspeed
|
||||
deepspeed==0.11.2
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8"/>
|
||||
<meta content="width=device-width, initial-scale=1.0" name="viewport"/>
|
||||
<title>RWKV-Runner</title>
|
||||
<meta charset="UTF-8" />
|
||||
<meta content="width=device-width, initial-scale=1.0" name="viewport" />
|
||||
<title>RWKV-Runner</title>
|
||||
<link href="./src/assets/images/logo.png" rel="icon" type="image/x-icon">
|
||||
</head>
|
||||
<body>
|
||||
<div id="root"></div>
|
||||
|
||||
946
frontend/package-lock.json
generated
946
frontend/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -16,15 +16,17 @@
|
||||
"@primer/octicons-react": "^19.1.0",
|
||||
"chart.js": "^4.3.0",
|
||||
"classnames": "^2.3.2",
|
||||
"github-markdown-css": "^5.2.0",
|
||||
"file-saver": "^2.0.5",
|
||||
"html-midi-player": "^1.5.0",
|
||||
"i18next": "^22.4.15",
|
||||
"mobx": "^6.9.0",
|
||||
"mobx-react-lite": "^3.4.3",
|
||||
"pdfjs-dist": "^4.0.189",
|
||||
"react": "^18.2.0",
|
||||
"react-beautiful-dnd": "^13.1.1",
|
||||
"react-chartjs-2": "^5.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-draggable": "^4.4.6",
|
||||
"react-i18next": "^12.2.2",
|
||||
"react-markdown": "^8.0.7",
|
||||
"react-router": "^6.11.1",
|
||||
@@ -38,6 +40,7 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/file-saver": "^2.0.7",
|
||||
"@types/react": "^18.2.6",
|
||||
"@types/react-beautiful-dnd": "^13.1.4",
|
||||
"@types/react-dom": "^18.2.4",
|
||||
@@ -49,6 +52,7 @@
|
||||
"sass": "^1.62.1",
|
||||
"tailwindcss": "^3.3.2",
|
||||
"typescript": "^5.0.4",
|
||||
"vite": "^4.3.6"
|
||||
"vite": "^4.3.6",
|
||||
"vite-plugin-top-level-await": "^1.3.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,18 +26,22 @@
|
||||
import { FluentProvider, Tab, TabList, webDarkTheme, webLightTheme } from '@fluentui/react-components';
|
||||
import { FC, useEffect, useState } from 'react';
|
||||
import { Route, Routes, useLocation, useNavigate } from 'react-router';
|
||||
import { pages } from './pages';
|
||||
import { pages as clientPages } from './pages';
|
||||
import { useMediaQuery } from 'usehooks-ts';
|
||||
import commonStore from './stores/commonStore';
|
||||
import { observer } from 'mobx-react-lite';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { CustomToastContainer } from './components/CustomToastContainer';
|
||||
import { LazyImportComponent } from './components/LazyImportComponent';
|
||||
|
||||
const App: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
const navigate = useNavigate();
|
||||
const location = useLocation();
|
||||
const mq = useMediaQuery('(min-width: 640px)');
|
||||
const pages = commonStore.platform === 'web' ? clientPages.filter(page =>
|
||||
!['/configs', '/models', '/downloads', '/train', '/about'].some(path => page.path === path)
|
||||
) : clientPages;
|
||||
|
||||
const [path, setPath] = useState<string>(pages[0].path);
|
||||
|
||||
@@ -47,10 +51,10 @@ const App: FC = observer(() => {
|
||||
useEffect(() => setPath(location.pathname), [location]);
|
||||
|
||||
return (
|
||||
<FluentProvider className="h-screen"
|
||||
<FluentProvider
|
||||
theme={commonStore.settings.darkMode ? webDarkTheme : webLightTheme}
|
||||
data-theme={commonStore.settings.darkMode ? 'dark' : 'light'}>
|
||||
<div className="flex h-full">
|
||||
<div className="flex h-screen">
|
||||
<div className="flex flex-col w-16 sm:w-48 p-2 justify-between">
|
||||
<TabList
|
||||
size="large"
|
||||
@@ -82,7 +86,7 @@ const App: FC = observer(() => {
|
||||
<div className="h-full w-full p-2 box-border overflow-y-hidden">
|
||||
<Routes>
|
||||
{pages.map(({ path, element }, index) => (
|
||||
<Route key={`${path}-${index}`} path={path} element={element} />
|
||||
<Route key={`${path}-${index}`} path={path} element={<LazyImportComponent lazyChildren={element} />} />
|
||||
))}
|
||||
</Routes>
|
||||
</div>
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"Working": "動作中",
|
||||
"Stop": "停止",
|
||||
"Enable High Precision For Last Layer": "最後の層で高精度を有効にする",
|
||||
"Stored Layers": "メモリ層読み込み",
|
||||
"Stored Layers": "保存されるレイヤー",
|
||||
"Precision": "精度",
|
||||
"Device": "デバイス",
|
||||
"Convert model with these configs. Using a converted model will greatly improve the loading speed, but model parameters of the converted model cannot be modified.": "これらの設定でモデルを変換します。変換されたモデルを使用すると、読み込み速度が大幅に向上しますが、変換したモデルのパラメータを変更することはできません。",
|
||||
@@ -82,7 +82,7 @@
|
||||
"Just like feeding sedatives to the model. Consider the results of the top n% probability mass, 0.1 considers the top 10%, with higher quality but more conservative, 1 considers all results, with lower quality but more diverse.": "モデルに鎮静剤を与えるようなもの。上位n%の確率質量の結果を考えてみてください。0.1は上位10%を考えており、質が高いが保守的で、1は全ての結果を考慮しており、質は低いが多様性があります。",
|
||||
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.": "ポジティヴ値は、新しいトークンが今までのテキストに出現していたかどうかに基づいてこれらをペナルティとし、新しいトピックについて話す可能性を増加させます。",
|
||||
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "ポジティブ値は、新しいトークンが既存のテキストでどれだけ頻繁に使われているかに基づいてペナルティを与え、モデルが同じ行を完全に繰り返す可能性を減らします。",
|
||||
"int8 uses less VRAM, but has slightly lower quality. fp16 has higher quality, and fp32 has the best quality.": "int8はVRAMの使用量が少ないですが、質が若干低いです。fp16は高品質、fp32は最高品質です。",
|
||||
"int8 uses less VRAM, but has slightly lower quality. fp16 has higher quality.": "int8はVRAMの使用量が少ないですが、質が若干低いです。fp16は高品質。",
|
||||
"Number of the neural network layers loaded into VRAM, the more you load, the faster the speed, but it consumes more VRAM. (If your VRAM is not enough, it will fail to load)": "VRAMにロードされるニューラルネットワークの層の数。ロードする量が多いほど速度は速くなりますが、VRAMを多く消費します。(VRAMが不足している場合、ロードに失敗します)",
|
||||
"Whether to use CPU to calculate the last output layer of the neural network with FP32 precision to obtain better quality.": "ネットワークの最終出力層をFP32精度で計算するためにCPUを使用するかどうか。",
|
||||
"Downloads": "ダウンロード",
|
||||
@@ -100,7 +100,7 @@
|
||||
"Model Config Exception": "モデル設定例外",
|
||||
"Use Gitee Updates Source": "Gitee更新ソースを使用",
|
||||
"Use Custom CUDA kernel to Accelerate": "カスタムCUDAカーネルを使用して加速",
|
||||
"Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues. If it fails to start, please turn off this option.": "このオプションを有効にすると、推論速度が大幅に向上し、一部のVRAMを節約できますが、互換性の問題が生じる可能性があります。起動に失敗した場合は、このオプションをオフにしてください。",
|
||||
"Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues (output garbled). If it fails to start, please turn off this option, or try to upgrade your gpu driver.": "このオプションを有効にすると、推論速度が大幅に向上し、一部のVRAMを節約できますが、互換性の問題 (文字化けを出力する) が生じる可能性があります。起動に失敗した場合は、このオプションを無効にするか、GPUドライバーをアップグレードしてみてください。",
|
||||
"Supported custom cuda file not found": "対応しているカスタムCUDAファイルが見つかりません",
|
||||
"Failed to copy custom cuda file": "カスタムCUDAファイルのコピーに失敗しました",
|
||||
"Downloading update, please wait. If it is not completed, please manually download the program from GitHub and replace the original program.": "更新をダウンロード中です、お待ちください。完了しない場合は、GitHubから手動でプログラムをダウンロードし、元のプログラムを置き換えてください。",
|
||||
@@ -128,6 +128,7 @@
|
||||
"Chinese Kongfu": "中国武術",
|
||||
"Allow external access to the API (service must be restarted)": "APIへの外部アクセスを許可する (サービスを再起動する必要があります)",
|
||||
"Custom": "カスタム",
|
||||
"CUDA (Beta, Faster)": "CUDA (ベータ、高速)",
|
||||
"Reset All Configs": "すべての設定をリセット",
|
||||
"Cancel": "キャンセル",
|
||||
"Confirm": "確認",
|
||||
@@ -177,7 +178,7 @@
|
||||
"Failed to import. Please copy a preset to the clipboard.": "インポートに失敗しました。プリセットをクリップボードにコピーしてください。",
|
||||
"Clipboard is empty.": "クリップボードが空です。",
|
||||
"Successfully copied to clipboard.": "クリップボードにコピーしました。",
|
||||
"Edit Messages": "メッセージの編集",
|
||||
"Edit Character Settings": "キャラクター設定を編集",
|
||||
"Go Back": "戻る",
|
||||
"Description": "説明",
|
||||
"Avatar Url": "アバターURL",
|
||||
@@ -225,14 +226,14 @@
|
||||
"Please select a LoRA model": "LoRAモデルを選択してください",
|
||||
"You are using sample data for training. For formal training, please make sure to create your own jsonl file.": "トレーニングにはサンプルデータを使用しています。正式なトレーニングのためには、自身でjsonlファイルを作成してください。",
|
||||
"WSL is not running, please retry. If it keeps happening, it means you may be using an outdated version of WSL, run \"wsl --update\" to update.": "WSLが実行されていません、もう一度試してください。これが続く場合、古いバージョンのWSLを使用している可能性があります。\"wsl --update\"を実行して更新してください。",
|
||||
"Memory is not enough, try to increase the virtual memory or use a smaller base model.": "メモリが不足しています、仮想メモリを増やすか小さなベースモデルを使用してみてください。",
|
||||
"Memory is not enough, try to increase the virtual memory (Swap of WSL) or use a smaller base model.": "メモリが不足しています、仮想メモリ (WSL Swap) を増やすか小さなベースモデルを使用してみてください。",
|
||||
"VRAM is not enough": "ビデオRAMが不足しています",
|
||||
"Training data is not enough, reduce context length or add more data for training": "トレーニングデータが不足しています、コンテキストの長さを減らすか、トレーニング用のデータをさらに追加してください",
|
||||
"You are using WSL 1 for training, please upgrade to WSL 2. e.g. Run \"wsl --set-version Ubuntu-22.04 2\"": "トレーニングにWSL 1を使用しています、WSL 2にアップグレードしてください。例:\"wsl --set-version Ubuntu-22.04 2\"を実行する",
|
||||
"Can not find an Nvidia GPU. Perhaps the gpu driver of windows is too old, or you are using WSL 1 for training, please upgrade to WSL 2. e.g. Run \"wsl --set-version Ubuntu-22.04 2\"": "Nvidia GPUが見つかりません。WindowsのGPUドライバが古すぎるか、トレーニングにWSL 1を使用している可能性があります。WSL 2にアップグレードしてください。例:\"wsl --set-version Ubuntu-22.04 2\"を実行してください",
|
||||
"Matched CUDA is not installed": "対応するCUDAがインストールされていません",
|
||||
"Failed to convert data": "データの変換に失敗しました",
|
||||
"Failed to merge model": "モデルのマージに失敗しました",
|
||||
"The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\nWhen you provide a directory path, all the txt files within that directory will be automatically converted into training data. This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\nThe jsonl format file can be referenced at https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl.\nYou can also write it similar to OpenAI's playground format, as shown in https://platform.openai.com/playground/p/default-chat.\nEven for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. If they are different dialogues or topics, they should be written in separate lines.": "データのパスはディレクトリまたはjsonl形式のファイルでなければなりません(将来的にはより多くの形式がサポートされる予定です)。ディレクトリパスを提供した場合、そのディレクトリ内のすべてのtxtファイルが自動的にトレーニングデータに変換されます。これは大規模なライティング、コード生成、または知識ベースのトレーニングで一般的に使用されます。jsonl形式のファイルは、https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl を参照してください。\nhttps://platform.openai.com/playground/p/default-chat のように、OpenAIのプレイグラウンド形式に似た形式で書くこともできます。複数ターンの対話であっても、一行で書く必要があり、行の区切りを示すために`\\n`を使用します。それらが異なる対話やトピックであれば、それらは別々の行に書かれるべきです。",
|
||||
"The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\nWhen you provide a directory path, all the txt files within that directory will be automatically converted into training data. This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\nThe jsonl format file can be referenced at https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl.\nYou can also write it similar to OpenAI's playground format, as shown in https://platform.openai.com/playground/p/default-chat.\nEven for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. If they are different dialogues or topics, they should be written in separate lines.": "データのパスはディレクトリまたはjsonl形式のファイルでなければなりません(将来的にはより多くの形式がサポートされる予定です)。ディレクトリパスを提供した場合、そのディレクトリ内のすべてのtxtファイルが自動的にトレーニングデータに変換されます。これは大規模なライティング、コード生成、または知識ベースのトレーニングで一般的に使用されます。jsonl形式のファイルは、https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl を参照してください。\nhttps://platform.openai.com/playground/p/default-chat のように、OpenAIのプレイグラウンド形式に似た形式で書くこともできます。複数ターンの対話であっても、一行で書く必要があり、行の区切りを示すために`\\n`を使用します。それらが異なる対話やトピックであれば、それらは別々の行に書かれるべきです。",
|
||||
"Size mismatch for blocks. You are attempting to continue training from the LoRA model, but it does not match the base model. Please set LoRA model to None.": "ブロックのサイズが一致しません。LoRAモデルからトレーニングを続けようとしていますが、それはベースモデルと一致しません。LoRAモデルをNoneに設定してください。",
|
||||
"Instruction: Write a story using the following information\n\nInput: A man named Alex chops a tree down\n\nResponse:": "Instruction: Write a story using the following information\n\nInput: アレックスという男が木を切り倒す\n\nResponse:",
|
||||
"Composition": "作曲",
|
||||
@@ -240,5 +241,64 @@
|
||||
"Auto Play At The End": "最後に自動再生",
|
||||
"No File to save": "保存するファイルがありません",
|
||||
"File Saved": "ファイルが保存されました",
|
||||
"Failed to load local sound font, please check if the files exist - assets/sound-font": "ローカルサウンドフォントの読み込みに失敗しました、ファイルが存在するか確認してください - assets/sound-font"
|
||||
"Failed to load local sound font, please check if the files exist - assets/sound-font": "ローカルサウンドフォントの読み込みに失敗しました、ファイルが存在するか確認してください - assets/sound-font",
|
||||
"Please convert model to safe tensors format first": "モデルを安全なテンソル形式に変換してください",
|
||||
"Convert To Safe Tensors Format": "安全なテンソル形式に変換",
|
||||
"Please change Strategy to WebGPU to use safetensors format": "StrategyをWebGPUに変更して、安全なテンソル形式を使用してください",
|
||||
"Preview Only": "プレビューのみ",
|
||||
"RAM": "RAM",
|
||||
"VRAM": "VRAM",
|
||||
"GPU Usage": "GPU使用率",
|
||||
"Use Custom Tokenizer": "カスタムトークナイザーを使用する",
|
||||
"Tokenizer Path (e.g. backend-python/rwkv_pip/20B_tokenizer.json)": "トークナイザーパス (例: backend-python/rwkv_pip/20B_tokenizer.json)",
|
||||
"User Name": "ユーザー名",
|
||||
"Assistant Name": "アシスタント名",
|
||||
"Insert default system prompt at the beginning": "最初にデフォルトのシステムプロンプトを挿入",
|
||||
"Format Content": "内容フォーマットの規格化",
|
||||
"Add An Attachment (Accepts pdf, txt)": "添付ファイルを追加 (pdf, txtを受け付けます)",
|
||||
"Uploading Attachment": "添付ファイルアップロード中",
|
||||
"Remove Attachment": "添付ファイルを削除",
|
||||
"The content of file": "ファイル",
|
||||
"is as follows. When replying to me, consider the file content and respond accordingly:": "の内容は以下の通りです。私に返信する際は、ファイルの内容を考慮して適切に返信してください:",
|
||||
"What's the file name": "ファイル名は何ですか",
|
||||
"The file name is: ": "ファイル名は次のとおりです: ",
|
||||
"Port is occupied. Change it in Configs page or close the program that occupies the port.": "ポートが占有されています。設定ページで変更するか、ポートを占有しているプログラムを終了してください。",
|
||||
"Loading...": "読み込み中...",
|
||||
"Hello, what can I do for you?": "こんにちは、何かお手伝いできますか?",
|
||||
"Enable WebUI": "WebUIを有効化",
|
||||
"Server is working on deployment mode, please close the terminal window manually": "サーバーはデプロイモードで動作しています、ターミナルウィンドウを手動で閉じてください",
|
||||
"Server is working on deployment mode, please exit the program manually to stop the server": "サーバーはデプロイモードで動作しています、サーバーを停止するにはプログラムを手動で終了してください",
|
||||
"You can increase the number of stored layers in Configs page to improve performance": "パフォーマンスを向上させるために、保存されるレイヤーの数を設定ページで増やすことができます",
|
||||
"Failed to load model, try to increase the virtual memory (Swap of WSL) or use a smaller base model.": "モデルの読み込みに失敗しました、仮想メモリ (WSL Swap) を増やすか小さなベースモデルを使用してみてください。",
|
||||
"Save Conversation": "会話を保存",
|
||||
"Use Hugging Face Mirror": "Hugging Faceミラーを使用",
|
||||
"File is empty": "ファイルが空です",
|
||||
"Open MIDI Input Audio Tracks": "MIDI入力オーディオトラックを開く",
|
||||
"Track": "トラック",
|
||||
"Play All": "すべて再生",
|
||||
"Clear All": "すべてクリア",
|
||||
"Scale View": "スケールビュー",
|
||||
"Record": "録音",
|
||||
"Play": "再生",
|
||||
"New Track": "新規トラック",
|
||||
"Select a track to preview the content": "トラックを選択して内容をプレビュー",
|
||||
"Save to generation area": "生成エリアに保存",
|
||||
"Piano": "ピアノ",
|
||||
"Percussion": "パーカッション",
|
||||
"Drum": "ドラム",
|
||||
"Tuba": "チューバ",
|
||||
"Marimba": "マリンバ",
|
||||
"Bass": "ベース",
|
||||
"Guitar": "ギター",
|
||||
"Violin": "バイオリン",
|
||||
"Trumpet": "トランペット",
|
||||
"Sax": "サックス",
|
||||
"Flute": "フルート",
|
||||
"Lead": "リード",
|
||||
"Pad": "パッド",
|
||||
"MIDI Input": "MIDI入力",
|
||||
"Select the MIDI input device to be used.": "使用するMIDI入力デバイスを選択します。",
|
||||
"Start Time": "開始時間",
|
||||
"Content Duration": "内容の長さ",
|
||||
"Please select a MIDI device first": "まずMIDIデバイスを選択してください"
|
||||
}
|
||||
@@ -82,7 +82,7 @@
|
||||
"Just like feeding sedatives to the model. Consider the results of the top n% probability mass, 0.1 considers the top 10%, with higher quality but more conservative, 1 considers all results, with lower quality but more diverse.": "就像给模型喂镇静剂. 考虑前 n% 概率质量的结果, 0.1 考虑前 10%, 质量更高, 但更保守, 1 考虑所有质量结果, 质量降低, 但更多样",
|
||||
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.": "存在惩罚. 正值根据新token在至今的文本中是否出现过, 来对其进行惩罚, 从而增加了模型涉及新话题的可能性",
|
||||
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "频率惩罚. 正值根据新token在至今的文本中出现的频率/次数, 来对其进行惩罚, 从而减少模型原封不动地重复相同句子的可能性",
|
||||
"int8 uses less VRAM, but has slightly lower quality. fp16 has higher quality, and fp32 has the best quality.": "int8占用显存更低, 但质量略微下降. fp16质量更好, fp32质量最好",
|
||||
"int8 uses less VRAM, but has slightly lower quality. fp16 has higher quality.": "int8占用显存更低, 但质量略微下降. fp16质量更好",
|
||||
"Number of the neural network layers loaded into VRAM, the more you load, the faster the speed, but it consumes more VRAM. (If your VRAM is not enough, it will fail to load)": "载入显存的神经网络层数, 载入越多, 速度越快, 但显存消耗越大 (如果你的显存不够, 会载入失败)",
|
||||
"Whether to use CPU to calculate the last output layer of the neural network with FP32 precision to obtain better quality.": "是否使用cpu以fp32精度计算神经网络的最后一层输出层, 以获得更好的质量",
|
||||
"Downloads": "下载",
|
||||
@@ -100,7 +100,7 @@
|
||||
"Model Config Exception": "模型配置异常",
|
||||
"Use Gitee Updates Source": "使用Gitee更新源",
|
||||
"Use Custom CUDA kernel to Accelerate": "使用自定义CUDA算子加速",
|
||||
"Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues. If it fails to start, please turn off this option.": "开启这个选项能大大提升推理速度并节省显存,但可能存在兼容性问题,如果启动失败,请关闭此选项",
|
||||
"Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues (output garbled). If it fails to start, please turn off this option, or try to upgrade your gpu driver.": "开启这个选项能大大提升推理速度并节省显存,但可能存在兼容性(回复乱码)问题,如果发生相关问题,请关闭此选项。或更新你的显卡驱动",
|
||||
"Supported custom cuda file not found": "没有找到支持的自定义cuda文件",
|
||||
"Failed to copy custom cuda file": "自定义cuda文件复制失败",
|
||||
"Downloading update, please wait. If it is not completed, please manually download the program from GitHub and replace the original program.": "正在下载更新,请等待。如果一直未完成,请从Github手动下载并覆盖原程序",
|
||||
@@ -128,6 +128,7 @@
|
||||
"Chinese Kongfu": "情境冒险",
|
||||
"Allow external access to the API (service must be restarted)": "允许外部访问API (必须重启服务)",
|
||||
"Custom": "自定义",
|
||||
"CUDA (Beta, Faster)": "CUDA (Beta, 更快)",
|
||||
"Reset All Configs": "重置所有配置",
|
||||
"Cancel": "取消",
|
||||
"Confirm": "确认",
|
||||
@@ -177,7 +178,7 @@
|
||||
"Failed to import. Please copy a preset to the clipboard.": "导入失败。请复制一个预设到剪贴板",
|
||||
"Clipboard is empty.": "剪贴板没有内容",
|
||||
"Successfully copied to clipboard.": "成功复制到剪贴板",
|
||||
"Edit Messages": "编辑对话",
|
||||
"Edit Character Settings": "编辑人设",
|
||||
"Go Back": "返回",
|
||||
"Description": "描述",
|
||||
"Avatar Url": "头像图片地址",
|
||||
@@ -225,14 +226,14 @@
|
||||
"Please select a LoRA model": "请选择一个LoRA模型",
|
||||
"You are using sample data for training. For formal training, please make sure to create your own jsonl file.": "你正在使用示例数据训练,对于正式训练场合,请务必创建你自己的jsonl训练数据",
|
||||
"WSL is not running, please retry. If it keeps happening, it means you may be using an outdated version of WSL, run \"wsl --update\" to update.": "WSL没有运行,请重试。如果一直出现此错误,意味着你可能正在使用旧版本的WSL,请在cmd执行\"wsl --update\"以更新",
|
||||
"Memory is not enough, try to increase the virtual memory or use a smaller base model.": "内存不足,尝试增加虚拟内存,或使用一个更小规模的基底模型",
|
||||
"Memory is not enough, try to increase the virtual memory (Swap of WSL) or use a smaller base model.": "内存不足,尝试增加虚拟内存(WSL Swap),或使用一个更小规模的基底模型",
|
||||
"VRAM is not enough": "显存不足",
|
||||
"Training data is not enough, reduce context length or add more data for training": "训练数据不足,请减小上下文长度或增加训练数据",
|
||||
"You are using WSL 1 for training, please upgrade to WSL 2. e.g. Run \"wsl --set-version Ubuntu-22.04 2\"": "你正在使用WSL 1进行训练,请升级到WSL 2。例如,运行\"wsl --set-version Ubuntu-22.04 2\"",
|
||||
"Can not find an Nvidia GPU. Perhaps the gpu driver of windows is too old, or you are using WSL 1 for training, please upgrade to WSL 2. e.g. Run \"wsl --set-version Ubuntu-22.04 2\"": "没有找到Nvidia显卡。可能是因为你的windows显卡驱动太旧,或者你正在使用WSL 1进行训练,请升级到WSL 2。例如,执行\"wsl --set-version Ubuntu-22.04 2\"",
|
||||
"Matched CUDA is not installed": "未安装匹配的CUDA",
|
||||
"Failed to convert data": "数据转换失败",
|
||||
"Failed to merge model": "合并模型失败",
|
||||
"The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\nWhen you provide a directory path, all the txt files within that directory will be automatically converted into training data. This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\nThe jsonl format file can be referenced at https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl.\nYou can also write it similar to OpenAI's playground format, as shown in https://platform.openai.com/playground/p/default-chat.\nEven for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. If they are different dialogues or topics, they should be written in separate lines.": "数据路径必须是一个文件夹,或者jsonl格式文件 (未来会支持更多格式)\n\n当你填写的路径是一个文件夹时,该文件夹内的所有txt文件会被自动转换为训练数据,通常这用于大批量训练写作,代码生成或知识库\n\njsonl文件的格式参考 https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl\n你也可以仿照openai的playground编写,参考 https://platform.openai.com/playground/p/default-chat\n即使是多轮对话也必须写在一行,用`\\n`表示换行,如果是不同对话或主题,则另起一行",
|
||||
"The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\nWhen you provide a directory path, all the txt files within that directory will be automatically converted into training data. This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\nThe jsonl format file can be referenced at https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl.\nYou can also write it similar to OpenAI's playground format, as shown in https://platform.openai.com/playground/p/default-chat.\nEven for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. If they are different dialogues or topics, they should be written in separate lines.": "数据路径必须是一个文件夹,或者jsonl格式文件 (未来会支持更多格式)\n\n当你填写的路径是一个文件夹时,该文件夹内的所有txt文件会被自动转换为训练数据,通常这用于大批量训练写作,代码生成或知识库\n\njsonl文件的格式参考 https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl 以及 https://zhuanlan.zhihu.com/p/643433851\n你也可以仿照openai的playground编写,参考 https://platform.openai.com/playground/p/default-chat\n即使是多轮对话也必须写在一行,用`\\n`表示换行,如果是不同对话或主题,则另起一行",
|
||||
"Size mismatch for blocks. You are attempting to continue training from the LoRA model, but it does not match the base model. Please set LoRA model to None.": "尺寸不匹配块。你正在尝试从LoRA模型继续训练,但该LoRA模型与基底模型不匹配,请将LoRA模型设为空",
|
||||
"Instruction: Write a story using the following information\n\nInput: A man named Alex chops a tree down\n\nResponse:": "Instruction: Write a story using the following information\n\nInput: 艾利克斯砍倒了一棵树\n\nResponse:",
|
||||
"Composition": "作曲",
|
||||
@@ -240,5 +241,64 @@
|
||||
"Auto Play At The End": "结束时自动播放",
|
||||
"No File to save": "无文件可保存",
|
||||
"File Saved": "文件已保存",
|
||||
"Failed to load local sound font, please check if the files exist - assets/sound-font": "加载本地音色资源失败,请检查文件是否存在 - assets/sound-font"
|
||||
"Failed to load local sound font, please check if the files exist - assets/sound-font": "加载本地音色资源失败,请检查文件是否存在 - assets/sound-font",
|
||||
"Please convert model to safe tensors format first": "请先将模型转换为Safetensors格式",
|
||||
"Convert To Safe Tensors Format": "转换为Safetensors格式",
|
||||
"Please change Strategy to WebGPU to use safetensors format": "请将Strategy改为WebGPU以使用safetensors格式",
|
||||
"Preview Only": "仅预览",
|
||||
"RAM": "内存",
|
||||
"VRAM": "显存",
|
||||
"GPU Usage": "GPU占用",
|
||||
"Use Custom Tokenizer": "使用自定义Tokenizer",
|
||||
"Tokenizer Path (e.g. backend-python/rwkv_pip/20B_tokenizer.json)": "Tokenizer路径 (例如: backend-python/rwkv_pip/20B_tokenizer.json)",
|
||||
"User Name": "用户名称",
|
||||
"Assistant Name": "AI名称",
|
||||
"Insert default system prompt at the beginning": "在开头自动插入默认系统提示",
|
||||
"Format Content": "规范格式",
|
||||
"Add An Attachment (Accepts pdf, txt)": "添加一个附件 (支持pdf, txt)",
|
||||
"Uploading Attachment": "正在上传附件",
|
||||
"Remove Attachment": "移除附件",
|
||||
"The content of file": "文件",
|
||||
"is as follows. When replying to me, consider the file content and respond accordingly:": "内容如下。回复时考虑文件内容并做出相应回复:",
|
||||
"What's the file name": "文件名是什么",
|
||||
"The file name is: ": "文件名是:",
|
||||
"Port is occupied. Change it in Configs page or close the program that occupies the port.": "端口被占用。请在配置页面更改端口,或关闭占用端口的程序",
|
||||
"Loading...": "加载中...",
|
||||
"Hello, what can I do for you?": "你好,有什么要我帮忙的吗?",
|
||||
"Enable WebUI": "启用WebUI",
|
||||
"Server is working on deployment mode, please close the terminal window manually": "服务器正在部署模式下运行,请手动关闭终端窗口",
|
||||
"Server is working on deployment mode, please exit the program manually to stop the server": "服务器正在部署模式下运行,请手动退出程序以停止服务器",
|
||||
"You can increase the number of stored layers in Configs page to improve performance": "你可以在配置页面增加载入显存层数以提升性能",
|
||||
"Failed to load model, try to increase the virtual memory (Swap of WSL) or use a smaller base model.": "模型载入失败,尝试增加虚拟内存(WSL Swap),或使用一个更小规模的基底模型",
|
||||
"Save Conversation": "保存对话",
|
||||
"Use Hugging Face Mirror": "使用Hugging Face镜像源",
|
||||
"File is empty": "文件为空",
|
||||
"Open MIDI Input Audio Tracks": "打开MIDI输入音轨",
|
||||
"Track": "音轨",
|
||||
"Play All": "播放全部",
|
||||
"Clear All": "清空",
|
||||
"Scale View": "缩放视图",
|
||||
"Record": "录制",
|
||||
"Play": "播放",
|
||||
"New Track": "新建音轨",
|
||||
"Select a track to preview the content": "选择一个音轨以预览内容",
|
||||
"Save to generation area": "保存到生成区",
|
||||
"Piano": "钢琴",
|
||||
"Percussion": "打击乐",
|
||||
"Drum": "鼓",
|
||||
"Tuba": "大号",
|
||||
"Marimba": "马林巴",
|
||||
"Bass": "贝斯",
|
||||
"Guitar": "吉他",
|
||||
"Violin": "小提琴",
|
||||
"Trumpet": "小号",
|
||||
"Sax": "萨克斯",
|
||||
"Flute": "长笛",
|
||||
"Lead": "主音",
|
||||
"Pad": "和音",
|
||||
"MIDI Input": "MIDI输入",
|
||||
"Select the MIDI input device to be used.": "选择要使用的MIDI输入设备",
|
||||
"Start Time": "开始时间",
|
||||
"Content Duration": "内容时长",
|
||||
"Please select a MIDI device first": "请先选择一个MIDI设备"
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import { Dropdown, Option } from '@fluentui/react-components';
|
||||
import commonStore from '../stores/commonStore';
|
||||
|
||||
export const ConfigSelector: FC<{ size?: 'small' | 'medium' | 'large' }> = observer(({ size }) => {
|
||||
return <Dropdown size={size} style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
|
||||
return <Dropdown size={size} style={{ minWidth: 0 }} listbox={{ style: { minWidth: 'fit-content' } }}
|
||||
value={commonStore.getCurrentModelConfig().name}
|
||||
selectedOptions={[commonStore.currentModelConfigIndex.toString()]}
|
||||
onOptionSelect={(_, data) => {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { FC, ReactElement } from 'react';
|
||||
import React, { FC, ReactElement } from 'react';
|
||||
import {
|
||||
Button,
|
||||
Dialog,
|
||||
@@ -11,7 +11,9 @@ import {
|
||||
} from '@fluentui/react-components';
|
||||
import { ToolTipButton } from './ToolTipButton';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import MarkdownRender from './MarkdownRender';
|
||||
import { LazyImportComponent } from './LazyImportComponent';
|
||||
|
||||
const MarkdownRender = React.lazy(() => import('./MarkdownRender'));
|
||||
|
||||
export const DialogButton: FC<{
|
||||
text?: string | null
|
||||
@@ -45,7 +47,9 @@ export const DialogButton: FC<{
|
||||
<DialogContent>
|
||||
{
|
||||
markdown ?
|
||||
<MarkdownRender>{contentText}</MarkdownRender> :
|
||||
<LazyImportComponent lazyChildren={MarkdownRender}>
|
||||
{contentText}
|
||||
</LazyImportComponent> :
|
||||
contentText
|
||||
}
|
||||
</DialogContent>
|
||||
|
||||
24
frontend/src/components/LazyImportComponent.tsx
Normal file
24
frontend/src/components/LazyImportComponent.tsx
Normal file
@@ -0,0 +1,24 @@
|
||||
import { FC, LazyExoticComponent, ReactNode, Suspense } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { Spinner } from '@fluentui/react-components';
|
||||
|
||||
interface LazyImportComponentProps {
|
||||
lazyChildren: LazyExoticComponent<FC<any>>;
|
||||
lazyProps?: any;
|
||||
children?: ReactNode;
|
||||
}
|
||||
|
||||
export const LazyImportComponent: FC<LazyImportComponentProps> = (props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Suspense fallback={
|
||||
<div className="flex justify-center items-center h-full w-full">
|
||||
<Spinner size="huge" label={t('Loading...')} />
|
||||
</div>}>
|
||||
<props.lazyChildren {...props.lazyProps}>
|
||||
{props.children}
|
||||
</props.lazyChildren>
|
||||
</Suspense>
|
||||
);
|
||||
};
|
||||
@@ -21,7 +21,7 @@ const Hyperlink: FC<any> = ({ href, children }) => {
|
||||
);
|
||||
};
|
||||
|
||||
export const MarkdownRender: FC<ReactMarkdownOptions> = (props) => {
|
||||
const MarkdownRender: FC<ReactMarkdownOptions> = (props) => {
|
||||
return (
|
||||
<div dir="auto" className="markdown-body">
|
||||
<ReactMarkdown
|
||||
|
||||
@@ -40,6 +40,8 @@ export const ReadButton: FC<{
|
||||
voice = voices.find((v) => v.name.toLowerCase().includes('microsoft aria'));
|
||||
else if (lang === 'zh')
|
||||
voice = voices.find((v) => v.name.toLowerCase().includes('xiaoyi'));
|
||||
else if (lang === 'ja')
|
||||
voice = voices.find((v) => v.name.toLowerCase().includes('nanami'));
|
||||
if (!voice) voice = voices.find((v) => v.lang.substring(0, 2) === lang);
|
||||
if (!voice) voice = voices.find((v) => v.lang === navigator.language);
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ export const ResetConfigsButton: FC<{ afterConfirm?: () => void }> = ({ afterCon
|
||||
return <DialogButton icon={<ArrowReset20Regular />} tooltip={t('Reset All Configs')} title={t('Reset All Configs')}
|
||||
contentText={t('Are you sure you want to reset all configs? This will obtain the latest preset configs, but will override your custom configs and cannot be undone.')}
|
||||
onConfirm={() => {
|
||||
commonStore.setModelConfigs(commonStore.platform != 'darwin' ? defaultModelConfigs : defaultModelConfigsMac, false);
|
||||
commonStore.setModelConfigs(commonStore.platform !== 'darwin' ? defaultModelConfigs : defaultModelConfigsMac, false);
|
||||
commonStore.setCurrentConfigIndex(0, true);
|
||||
afterConfirm?.();
|
||||
}} />;
|
||||
|
||||
@@ -1,16 +1,23 @@
|
||||
import React, { FC, MouseEventHandler, ReactElement } from 'react';
|
||||
import commonStore, { ModelStatus } from '../stores/commonStore';
|
||||
import { AddToDownloadList, CopyFile, FileExists, StartServer } from '../../wailsjs/go/backend_golang/App';
|
||||
import {
|
||||
AddToDownloadList,
|
||||
FileExists,
|
||||
IsPortAvailable,
|
||||
StartServer,
|
||||
StartWebGPUServer
|
||||
} from '../../wailsjs/go/backend_golang/App';
|
||||
import { Button } from '@fluentui/react-components';
|
||||
import { observer } from 'mobx-react-lite';
|
||||
import { exit, getStatus, readRoot, switchModel, updateConfig } from '../apis';
|
||||
import { toast } from 'react-toastify';
|
||||
import { checkDependencies, getStrategy, getSupportedCustomCudaFile, toastWithButton } from '../utils';
|
||||
import { checkDependencies, getHfDownloadUrl, getStrategy, toastWithButton } from '../utils';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { ToolTipButton } from './ToolTipButton';
|
||||
import { Play16Regular, Stop16Regular } from '@fluentui/react-icons';
|
||||
import { useNavigate } from 'react-router';
|
||||
import { WindowShow } from '../../wailsjs/runtime/runtime';
|
||||
import { WindowShow } from '../../wailsjs/runtime';
|
||||
import { convertToSt } from '../utils/convert-to-st';
|
||||
|
||||
const mainButtonText = {
|
||||
[ModelStatus.Offline]: 'Run',
|
||||
@@ -39,6 +46,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
commonStore.setStatus({ status: ModelStatus.Starting });
|
||||
|
||||
const modelConfig = commonStore.getCurrentModelConfig();
|
||||
const webgpu = modelConfig.modelParameters.device === 'WebGPU';
|
||||
let modelName = '';
|
||||
let modelPath = '';
|
||||
if (modelConfig && modelConfig.modelParameters) {
|
||||
@@ -50,10 +58,6 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
return;
|
||||
}
|
||||
|
||||
const ok = await checkDependencies(navigate);
|
||||
if (!ok)
|
||||
return;
|
||||
|
||||
const currentModelSource = commonStore.modelSourceList.find(item => item.name === modelName);
|
||||
|
||||
const showDownloadPrompt = (promptInfo: string, downloadName: string) => {
|
||||
@@ -64,18 +68,57 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
navigate({ pathname: '/downloads' });
|
||||
},
|
||||
{ autoClose: 3000 });
|
||||
AddToDownloadList(modelPath, downloadUrl);
|
||||
AddToDownloadList(modelPath, getHfDownloadUrl(downloadUrl));
|
||||
} else {
|
||||
toast(t('Can not find download url'), { type: 'error' });
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
if (webgpu) {
|
||||
if (!['.st', '.safetensors'].some(ext => modelPath.endsWith(ext))) {
|
||||
const stModelPath = modelPath.replace(/\.pth$/, '.st');
|
||||
if (await FileExists(stModelPath)) {
|
||||
modelPath = stModelPath;
|
||||
} else if (!await FileExists(modelPath)) {
|
||||
showDownloadPrompt(t('Model file not found'), modelName);
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
return;
|
||||
} else if (!currentModelSource?.isComplete) {
|
||||
showDownloadPrompt(t('Model file download is not complete'), modelName);
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
return;
|
||||
} else {
|
||||
toastWithButton(t('Please convert model to safe tensors format first'), t('Convert'), () => {
|
||||
convertToSt(navigate, modelConfig);
|
||||
});
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!webgpu) {
|
||||
if (['.st', '.safetensors'].some(ext => modelPath.endsWith(ext))) {
|
||||
toast(t('Please change Strategy to WebGPU to use safetensors format'), { type: 'error' });
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!webgpu) {
|
||||
const ok = await checkDependencies(navigate);
|
||||
if (!ok)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!await FileExists(modelPath)) {
|
||||
showDownloadPrompt(t('Model file not found'), modelName);
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
return;
|
||||
} else if (!currentModelSource?.isComplete) {
|
||||
} else // If the user selects the .pth model with WebGPU mode, modelPath will be set to the .st model.
|
||||
// However, if the .pth model is deleted, modelPath will exist and isComplete will be false.
|
||||
if (!currentModelSource?.isComplete && modelPath.endsWith('.pth')) {
|
||||
showDownloadPrompt(t('Model file download is not complete'), modelName);
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
return;
|
||||
@@ -83,9 +126,24 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
|
||||
const port = modelConfig.apiParameters.apiPort;
|
||||
|
||||
await exit(1000).catch(() => {
|
||||
});
|
||||
StartServer(commonStore.settings.customPythonPath, port, commonStore.settings.host !== '127.0.0.1' ? '0.0.0.0' : '127.0.0.1').catch((e) => {
|
||||
if (!await IsPortAvailable(port)) {
|
||||
await exit(1000).catch(() => {
|
||||
});
|
||||
if (!await IsPortAvailable(port)) {
|
||||
toast(t('Port is occupied. Change it in Configs page or close the program that occupies the port.'), { type: 'error' });
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const startServer = webgpu ?
|
||||
(_: string, port: number, host: string) => StartWebGPUServer(port, host)
|
||||
: StartServer;
|
||||
const isUsingCudaBeta = modelConfig.modelParameters.device === 'CUDA-Beta';
|
||||
|
||||
startServer(commonStore.settings.customPythonPath, port, commonStore.settings.host !== '127.0.0.1' ? '0.0.0.0' : '127.0.0.1',
|
||||
!!modelConfig.enableWebUI, isUsingCudaBeta
|
||||
).catch((e) => {
|
||||
const errMsg = e.message || e;
|
||||
if (errMsg.includes('path contains space'))
|
||||
toast(`${t('Error')} - ${t('File Path Cannot Contain Space')}`, { type: 'error' });
|
||||
@@ -93,6 +151,8 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
toast(t('Error') + ' - ' + errMsg, { type: 'error' });
|
||||
});
|
||||
setTimeout(WindowShow, 1000);
|
||||
setTimeout(WindowShow, 2000);
|
||||
setTimeout(WindowShow, 3000);
|
||||
|
||||
let timeoutCount = 6;
|
||||
let loading = false;
|
||||
@@ -102,41 +162,49 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
if (r.ok && !loading) {
|
||||
loading = true;
|
||||
clearInterval(intervalId);
|
||||
await getStatus().then(status => {
|
||||
if (status)
|
||||
commonStore.setStatus(status);
|
||||
});
|
||||
if (!webgpu) {
|
||||
await getStatus().then(status => {
|
||||
if (status)
|
||||
commonStore.setStatus(status);
|
||||
});
|
||||
}
|
||||
commonStore.setStatus({ status: ModelStatus.Loading });
|
||||
toast(t('Loading Model'), { type: 'info' });
|
||||
updateConfig({
|
||||
max_tokens: modelConfig.apiParameters.maxResponseToken,
|
||||
temperature: modelConfig.apiParameters.temperature,
|
||||
top_p: modelConfig.apiParameters.topP,
|
||||
presence_penalty: modelConfig.apiParameters.presencePenalty,
|
||||
frequency_penalty: modelConfig.apiParameters.frequencyPenalty
|
||||
});
|
||||
const loadingId = toast(t('Loading Model'), { type: 'info' });
|
||||
if (!webgpu) {
|
||||
updateConfig({
|
||||
max_tokens: modelConfig.apiParameters.maxResponseToken,
|
||||
temperature: modelConfig.apiParameters.temperature,
|
||||
top_p: modelConfig.apiParameters.topP,
|
||||
presence_penalty: modelConfig.apiParameters.presencePenalty,
|
||||
frequency_penalty: modelConfig.apiParameters.frequencyPenalty
|
||||
});
|
||||
}
|
||||
|
||||
const strategy = getStrategy(modelConfig);
|
||||
let customCudaFile = '';
|
||||
if ((modelConfig.modelParameters.device === 'CUDA' || modelConfig.modelParameters.device === 'Custom')
|
||||
if ((modelConfig.modelParameters.device.includes('CUDA') || modelConfig.modelParameters.device === 'Custom')
|
||||
&& modelConfig.modelParameters.useCustomCuda && !strategy.includes('fp32')) {
|
||||
if (commonStore.platform === 'windows') {
|
||||
customCudaFile = getSupportedCustomCudaFile();
|
||||
if (customCudaFile) {
|
||||
FileExists('./py310/Lib/site-packages/rwkv/model.py').then((exist) => {
|
||||
// defensive measure. As Python has already been launched, will only take effect the next time it runs.
|
||||
if (!exist) CopyFile('./backend-python/wkv_cuda_utils/wkv_cuda_model.py', './py310/Lib/site-packages/rwkv/model.py');
|
||||
});
|
||||
await CopyFile(customCudaFile, './py310/Lib/site-packages/rwkv/wkv_cuda.pyd').catch(() => {
|
||||
FileExists('./py310/Lib/site-packages/rwkv/wkv_cuda.pyd').then((exist) => {
|
||||
if (!exist) {
|
||||
customCudaFile = '';
|
||||
toast(t('Failed to copy custom cuda file'), { type: 'error' });
|
||||
}
|
||||
});
|
||||
});
|
||||
} else
|
||||
toast(t('Supported custom cuda file not found'), { type: 'warning' });
|
||||
// this part is currently unused because there's no longer a need to use different kernels for different GPUs, but it might still be needed in the future
|
||||
//
|
||||
// customCudaFile = getSupportedCustomCudaFile(isUsingCudaBeta);
|
||||
// if (customCudaFile) {
|
||||
// let kernelTargetPath: string;
|
||||
// if (isUsingCudaBeta)
|
||||
// kernelTargetPath = './backend-python/rwkv_pip/beta/wkv_cuda.pyd';
|
||||
// else
|
||||
// kernelTargetPath = './backend-python/rwkv_pip/wkv_cuda.pyd';
|
||||
// await CopyFile(customCudaFile, kernelTargetPath).catch(() => {
|
||||
// FileExists(kernelTargetPath).then((exist) => {
|
||||
// if (!exist) {
|
||||
// customCudaFile = '';
|
||||
// toast(t('Failed to copy custom cuda file'), { type: 'error' });
|
||||
// }
|
||||
// });
|
||||
// });
|
||||
// } else
|
||||
// toast(t('Supported custom cuda file not found'), { type: 'warning' });
|
||||
customCudaFile = 'any';
|
||||
} else {
|
||||
customCudaFile = 'any';
|
||||
}
|
||||
@@ -145,7 +213,9 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
switchModel({
|
||||
model: modelPath,
|
||||
strategy: strategy,
|
||||
customCuda: customCudaFile !== ''
|
||||
tokenizer: modelConfig.modelParameters.useCustomTokenizer ? modelConfig.modelParameters.customTokenizer : undefined,
|
||||
customCuda: customCudaFile !== '',
|
||||
deploy: modelConfig.enableWebUI
|
||||
}).then(async (r) => {
|
||||
if (r.ok) {
|
||||
commonStore.setStatus({ status: ModelStatus.Working });
|
||||
@@ -158,6 +228,12 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
const buttonFn = () => {
|
||||
navigate({ pathname: '/' + buttonName.toLowerCase() });
|
||||
};
|
||||
|
||||
if ((modelConfig.modelParameters.device === 'CUDA' || modelConfig.modelParameters.device === 'CUDA-Beta') &&
|
||||
modelConfig.modelParameters.storedLayers < modelConfig.modelParameters.maxStoredLayers &&
|
||||
commonStore.monitorData && commonStore.monitorData.totalVram !== 0 &&
|
||||
(commonStore.monitorData.usedVram / commonStore.monitorData.totalVram) < 0.85)
|
||||
toast(t('You can increase the number of stored layers in Configs page to improve performance'), { type: 'info' });
|
||||
toastWithButton(t('Startup Completed'), t(buttonName), buttonFn, { type: 'success', autoClose: 3000 });
|
||||
} else if (r.status === 304) {
|
||||
toast(t('Loading Model'), { type: 'info' });
|
||||
@@ -179,6 +255,8 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
}).catch((e) => {
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
toast(t('Failed to switch model') + ' - ' + (e.message || e), { type: 'error' });
|
||||
}).finally(() => {
|
||||
toast.dismiss(loadingId);
|
||||
});
|
||||
}
|
||||
}).catch(() => {
|
||||
@@ -192,7 +270,13 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
}, 1000);
|
||||
} else {
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
exit();
|
||||
exit().then(r => {
|
||||
if (r.status === 403)
|
||||
if (commonStore.platform !== 'linux')
|
||||
toast(t('Server is working on deployment mode, please close the terminal window manually'), { type: 'info' });
|
||||
else
|
||||
toast(t('Server is working on deployment mode, please exit the program manually to stop the server'), { type: 'info' });
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import { ConfigSelector } from './ConfigSelector';
|
||||
import { RunButton } from './RunButton';
|
||||
import { PresenceBadgeStatus } from '@fluentui/react-badge';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useMediaQuery } from 'usehooks-ts';
|
||||
|
||||
const statusText = {
|
||||
[ModelStatus.Offline]: 'Offline',
|
||||
@@ -23,15 +24,21 @@ const badgeStatus: { [modelStatus: number]: PresenceBadgeStatus } = {
|
||||
|
||||
export const WorkHeader: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
const mq = useMediaQuery('(min-width: 640px)');
|
||||
const port = commonStore.getCurrentModelConfig().apiParameters.apiPort;
|
||||
|
||||
return (
|
||||
return commonStore.platform === 'web' ?
|
||||
<div /> :
|
||||
<div className="flex flex-col gap-1">
|
||||
<div className="flex justify-between items-center">
|
||||
<div className="flex items-center gap-2">
|
||||
<PresenceBadge status={badgeStatus[commonStore.status.status]} />
|
||||
<Text size={100}>{t('Model Status') + ': ' + t(statusText[commonStore.status.status])}</Text>
|
||||
</div>
|
||||
{commonStore.lastModelName && mq &&
|
||||
<Text size={100}>
|
||||
{commonStore.lastModelName}
|
||||
</Text>}
|
||||
<div className="flex items-center gap-2">
|
||||
<ConfigSelector size="small" />
|
||||
<RunButton iconMode />
|
||||
@@ -42,5 +49,5 @@ export const WorkHeader: FC = observer(() => {
|
||||
</Text>
|
||||
<Divider style={{ flexGrow: 0 }} />
|
||||
</div>
|
||||
);
|
||||
;
|
||||
});
|
||||
@@ -1,3 +1,4 @@
|
||||
import './webWails';
|
||||
import React from 'react';
|
||||
import { createRoot } from 'react-dom/client';
|
||||
import './style.scss';
|
||||
@@ -6,7 +7,6 @@ import App from './App';
|
||||
import { HashRouter } from 'react-router-dom';
|
||||
import { startup } from './startup';
|
||||
import './_locales/i18n-react';
|
||||
import 'html-midi-player';
|
||||
import { WindowShow } from '../wailsjs/runtime';
|
||||
|
||||
startup().then(() => {
|
||||
|
||||
@@ -5,9 +5,7 @@ import MarkdownRender from '../components/MarkdownRender';
|
||||
import { observer } from 'mobx-react-lite';
|
||||
import commonStore from '../stores/commonStore';
|
||||
|
||||
export type AboutContent = { [lang: string]: string }
|
||||
|
||||
export const About: FC = observer(() => {
|
||||
const About: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
const lang: string = commonStore.settings.language;
|
||||
|
||||
@@ -21,3 +19,5 @@ export const About: FC = observer(() => {
|
||||
} />
|
||||
);
|
||||
});
|
||||
|
||||
export default About;
|
||||
|
||||
40
frontend/src/pages/AudiotrackManager/AudiotrackButton.tsx
Normal file
40
frontend/src/pages/AudiotrackManager/AudiotrackButton.tsx
Normal file
@@ -0,0 +1,40 @@
|
||||
import React, { FC, lazy } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { Button, Dialog, DialogBody, DialogContent, DialogSurface, DialogTrigger } from '@fluentui/react-components';
|
||||
import { CustomToastContainer } from '../../components/CustomToastContainer';
|
||||
import { LazyImportComponent } from '../../components/LazyImportComponent';
|
||||
import { flushMidiRecordingContent } from '../../utils';
|
||||
import commonStore from '../../stores/commonStore';
|
||||
|
||||
const AudiotrackEditor = lazy(() => import('./AudiotrackEditor'));
|
||||
|
||||
export const AudiotrackButton: FC<{
|
||||
size?: 'small' | 'medium' | 'large',
|
||||
shape?: 'rounded' | 'circular' | 'square';
|
||||
appearance?: 'secondary' | 'primary' | 'outline' | 'subtle' | 'transparent';
|
||||
setPrompt: (prompt: string) => void;
|
||||
}> = ({ size, shape, appearance, setPrompt }) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return <Dialog onOpenChange={(e, data) => {
|
||||
if (!data.open) {
|
||||
flushMidiRecordingContent();
|
||||
commonStore.setRecordingTrackId('');
|
||||
commonStore.setPlayingTrackId('');
|
||||
}
|
||||
}}>
|
||||
<DialogTrigger disableButtonEnhancement>
|
||||
<Button size={size} shape={shape} appearance={appearance}>
|
||||
{t('Open MIDI Input Audio Tracks')}
|
||||
</Button>
|
||||
</DialogTrigger>
|
||||
<DialogSurface style={{ paddingTop: 0, maxWidth: '90vw', width: 'fit-content' }}>
|
||||
<DialogBody>
|
||||
<DialogContent className="overflow-hidden">
|
||||
<CustomToastContainer />
|
||||
<LazyImportComponent lazyChildren={AudiotrackEditor} lazyProps={{ setPrompt }} />
|
||||
</DialogContent>
|
||||
</DialogBody>
|
||||
</DialogSurface>
|
||||
</Dialog>;
|
||||
};
|
||||
499
frontend/src/pages/AudiotrackManager/AudiotrackEditor.tsx
Normal file
499
frontend/src/pages/AudiotrackManager/AudiotrackEditor.tsx
Normal file
@@ -0,0 +1,499 @@
|
||||
import React, { FC, useEffect, useRef, useState } from 'react';
|
||||
import { observer } from 'mobx-react-lite';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import Draggable from 'react-draggable';
|
||||
import { ToolTipButton } from '../../components/ToolTipButton';
|
||||
import { v4 as uuid } from 'uuid';
|
||||
import {
|
||||
Add16Regular,
|
||||
ArrowAutofitWidth20Regular,
|
||||
Delete16Regular,
|
||||
MusicNote220Regular,
|
||||
Pause16Regular,
|
||||
Play16Filled,
|
||||
Play16Regular,
|
||||
Record16Regular,
|
||||
Stop16Filled
|
||||
} from '@fluentui/react-icons';
|
||||
import { Button, Card, DialogTrigger, Slider, Text, Tooltip } from '@fluentui/react-components';
|
||||
import { useWindowSize } from 'usehooks-ts';
|
||||
import commonStore from '../../stores/commonStore';
|
||||
import classnames from 'classnames';
|
||||
import {
|
||||
InstrumentTypeNameMap,
|
||||
InstrumentTypeTokenMap,
|
||||
MidiMessage,
|
||||
tracksMinimalTotalTime
|
||||
} from '../../types/composition';
|
||||
import { toast } from 'react-toastify';
|
||||
import { ToastOptions } from 'react-toastify/dist/types';
|
||||
import { flushMidiRecordingContent, refreshTracksTotalTime } from '../../utils';
|
||||
import { PlayNote } from '../../../wailsjs/go/backend_golang/App';
|
||||
import { t } from 'i18next';
|
||||
|
||||
const snapValue = 25;
|
||||
const minimalMoveTime = 8; // 1000/125=8ms wait_events=125
|
||||
const scaleMin = 0.05;
|
||||
const scaleMax = 3;
|
||||
const baseMoveTime = Math.round(minimalMoveTime / scaleMin);
|
||||
|
||||
const velocityEvents = 128;
|
||||
const velocityBins = 12;
|
||||
const velocityExp = 0.5;
|
||||
|
||||
const minimalTrackWidth = 80;
|
||||
const trackInitOffsetPx = 10;
|
||||
const pixelFix = 0.5;
|
||||
const topToArrowIcon = 19;
|
||||
const arrowIconToTracks = 23;
|
||||
|
||||
type TrackProps = {
|
||||
id: string;
|
||||
right: number;
|
||||
scale: number;
|
||||
isSelected: boolean;
|
||||
onSelect: (id: string) => void;
|
||||
};
|
||||
|
||||
const displayCurrentInstrumentType = () => {
|
||||
const displayPanelId = 'instrument_panel_id';
|
||||
const content: React.ReactNode =
|
||||
<div className="flex gap-2 items-center">
|
||||
{InstrumentTypeNameMap.map((name, i) =>
|
||||
<Text key={name} style={{ whiteSpace: 'nowrap' }}
|
||||
className={commonStore.instrumentType === i ? 'text-blue-600' : ''}
|
||||
weight={commonStore.instrumentType === i ? 'bold' : 'regular'}
|
||||
size={commonStore.instrumentType === i ? 300 : 100}
|
||||
>{t(name)}</Text>)}
|
||||
</div>;
|
||||
const options: ToastOptions = {
|
||||
type: 'default',
|
||||
autoClose: 2000,
|
||||
toastId: displayPanelId,
|
||||
position: 'top-left',
|
||||
style: {
|
||||
width: 'fit-content'
|
||||
}
|
||||
};
|
||||
if (toast.isActive(displayPanelId))
|
||||
toast.update(displayPanelId, {
|
||||
render: content,
|
||||
...options
|
||||
});
|
||||
else
|
||||
toast(content, options);
|
||||
};
|
||||
|
||||
const velocityToBin = (velocity: number) => {
|
||||
velocity = Math.max(0, Math.min(velocity, velocityEvents - 1));
|
||||
const binsize = velocityEvents / (velocityBins - 1);
|
||||
return Math.ceil((velocityEvents * ((Math.pow(velocityExp, (velocity / velocityEvents)) - 1.0) / (velocityExp - 1.0))) / binsize);
|
||||
};
|
||||
|
||||
const midiMessageToToken = (msg: MidiMessage) => {
|
||||
if (msg.messageType === 'NoteOn' || msg.messageType === 'NoteOff') {
|
||||
const instrument = InstrumentTypeTokenMap[commonStore.instrumentType];
|
||||
const note = msg.note.toString(16);
|
||||
const velocity = velocityToBin(msg.velocity).toString(16);
|
||||
return `${instrument}:${note}:${velocity} `;
|
||||
} else if (msg.messageType === 'ElapsedTime') {
|
||||
let time = Math.round(msg.value / minimalMoveTime);
|
||||
const num = Math.floor(time / 125); // wait_events=125
|
||||
time -= num * 125;
|
||||
let ret = '';
|
||||
for (let i = 0; i < num; i++) {
|
||||
ret += 't125 ';
|
||||
}
|
||||
if (time > 0)
|
||||
ret += `t${time} `;
|
||||
return ret;
|
||||
} else
|
||||
return '';
|
||||
};
|
||||
|
||||
let dropRecordingTime = false;
|
||||
|
||||
export const midiMessageHandler = async (data: MidiMessage) => {
|
||||
if (data.messageType === 'ControlChange') {
|
||||
commonStore.setInstrumentType(Math.round(data.value / 127 * (InstrumentTypeNameMap.length - 1)));
|
||||
displayCurrentInstrumentType();
|
||||
return;
|
||||
}
|
||||
if (commonStore.recordingTrackId) {
|
||||
if (dropRecordingTime && data.messageType === 'ElapsedTime') {
|
||||
dropRecordingTime = false;
|
||||
return;
|
||||
}
|
||||
data = {
|
||||
...data,
|
||||
instrument: commonStore.instrumentType
|
||||
};
|
||||
commonStore.setRecordingRawContent([...commonStore.recordingRawContent, data]);
|
||||
commonStore.setRecordingContent(commonStore.recordingContent + midiMessageToToken(data));
|
||||
|
||||
//TODO data.channel = data.instrument;
|
||||
PlayNote(data);
|
||||
}
|
||||
};
|
||||
|
||||
const Track: React.FC<TrackProps> = observer(({
|
||||
id,
|
||||
right,
|
||||
scale,
|
||||
isSelected,
|
||||
onSelect
|
||||
}) => {
|
||||
const { t } = useTranslation();
|
||||
const trackIndex = commonStore.tracks.findIndex(t => t.id === id)!;
|
||||
const track = commonStore.tracks[trackIndex];
|
||||
const trackClass = isSelected ? 'bg-blue-600' : 'bg-gray-700';
|
||||
const controlX = useRef(0);
|
||||
|
||||
return (
|
||||
<Draggable
|
||||
axis="x"
|
||||
bounds={{ left: 0, right }}
|
||||
grid={[snapValue, snapValue]}
|
||||
position={{
|
||||
x: (track.offsetTime - commonStore.trackCurrentTime) / (baseMoveTime * scale) * snapValue,
|
||||
y: 0
|
||||
}}
|
||||
onStart={(e, data) => {
|
||||
controlX.current = data.lastX;
|
||||
}}
|
||||
onStop={(e, data) => {
|
||||
const delta = data.lastX - controlX.current;
|
||||
let offsetTime = Math.round(Math.round(delta / snapValue * baseMoveTime * scale) / minimalMoveTime) * minimalMoveTime;
|
||||
offsetTime = Math.min(Math.max(
|
||||
offsetTime,
|
||||
-track.offsetTime), commonStore.trackTotalTime - track.offsetTime);
|
||||
|
||||
const tracks = commonStore.tracks.slice();
|
||||
tracks[trackIndex].offsetTime += offsetTime;
|
||||
commonStore.setTracks(tracks);
|
||||
refreshTracksTotalTime();
|
||||
}}
|
||||
>
|
||||
<div
|
||||
className={`p-1 cursor-move rounded whitespace-nowrap overflow-hidden ${trackClass}`}
|
||||
style={{
|
||||
width: `${Math.max(minimalTrackWidth,
|
||||
track.contentTime / (baseMoveTime * scale) * snapValue
|
||||
)}px`
|
||||
}}
|
||||
onClick={() => onSelect(id)}
|
||||
>
|
||||
<span className="text-white">{t('Track') + ' ' + (track.content || id)}</span>
|
||||
</div>
|
||||
</Draggable>
|
||||
);
|
||||
});
|
||||
|
||||
const AudiotrackEditor: FC<{ setPrompt: (prompt: string) => void }> = observer(({ setPrompt }) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const viewControlsContainerRef = useRef<HTMLDivElement>(null);
|
||||
const currentTimeControlRef = useRef<HTMLDivElement>(null);
|
||||
const playStartTimeControlRef = useRef<HTMLDivElement>(null);
|
||||
const tracksEndLineRef = useRef<HTMLDivElement>(null);
|
||||
const tracksRef = useRef<HTMLDivElement>(null);
|
||||
const toolbarRef = useRef<HTMLDivElement>(null);
|
||||
const toolbarButtonRef = useRef<HTMLDivElement>(null);
|
||||
const toolbarSliderRef = useRef<HTMLInputElement>(null);
|
||||
const contentPreviewRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
const [refreshRef, setRefreshRef] = useState(false);
|
||||
|
||||
const windowSize = useWindowSize();
|
||||
const scale = (scaleMin + scaleMax) - commonStore.trackScale;
|
||||
|
||||
const [selectedTrackId, setSelectedTrackId] = useState<string>('');
|
||||
const playStartTimeControlX = useRef(0);
|
||||
const selectedTrack = selectedTrackId ? commonStore.tracks.find(t => t.id === selectedTrackId) : undefined;
|
||||
|
||||
useEffect(() => {
|
||||
if (toolbarSliderRef.current && toolbarSliderRef.current.parentElement)
|
||||
toolbarSliderRef.current.parentElement.style.removeProperty('--fui-Slider--steps-percent');
|
||||
}, []);
|
||||
|
||||
const scrollContentToBottom = () => {
|
||||
if (contentPreviewRef.current)
|
||||
contentPreviewRef.current.scrollTop = contentPreviewRef.current.scrollHeight;
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
scrollContentToBottom();
|
||||
}, [commonStore.recordingContent]);
|
||||
|
||||
useEffect(() => {
|
||||
setRefreshRef(!refreshRef);
|
||||
}, [windowSize, commonStore.tracks]);
|
||||
|
||||
const viewControlsContainerWidth = (toolbarRef.current && toolbarButtonRef.current && toolbarSliderRef.current) ?
|
||||
toolbarRef.current.clientWidth - toolbarButtonRef.current.clientWidth - toolbarSliderRef.current.clientWidth - 16 // 16 = ml-2 mr-2
|
||||
: 0;
|
||||
const tracksWidth = viewControlsContainerWidth;
|
||||
const timeOfTracksWidth = Math.floor(tracksWidth / snapValue) // number of moves
|
||||
* baseMoveTime * scale;
|
||||
const currentTimeControlWidth = (timeOfTracksWidth < commonStore.trackTotalTime)
|
||||
? timeOfTracksWidth / commonStore.trackTotalTime * viewControlsContainerWidth
|
||||
: 0;
|
||||
const playStartTimeControlPosition = (commonStore.trackPlayStartTime - commonStore.trackCurrentTime) / (baseMoveTime * scale) * snapValue;
|
||||
const tracksEndPosition = (commonStore.trackTotalTime - commonStore.trackCurrentTime) / (baseMoveTime * scale) * snapValue;
|
||||
const moveableTracksWidth = (tracksEndLineRef.current && viewControlsContainerRef.current &&
|
||||
((tracksEndLineRef.current.getBoundingClientRect().left - (viewControlsContainerRef.current.getBoundingClientRect().left + trackInitOffsetPx)) > 0))
|
||||
? tracksEndLineRef.current.getBoundingClientRect().left - (viewControlsContainerRef.current.getBoundingClientRect().left + trackInitOffsetPx)
|
||||
: Infinity;
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-2 overflow-hidden" style={{ width: '80vw', height: '80vh' }}>
|
||||
<div className="mx-auto">
|
||||
<Text size={100}>{`${commonStore.trackPlayStartTime} ms / ${commonStore.trackTotalTime} ms`}</Text>
|
||||
</div>
|
||||
<div className="flex pb-2 border-b" ref={toolbarRef}>
|
||||
<div className="flex gap-2" ref={toolbarButtonRef}>
|
||||
<ToolTipButton disabled desc={t('Play All') + ' (Unavailable)'} icon={<Play16Regular />} />
|
||||
<ToolTipButton desc={t('Clear All')} icon={<Delete16Regular />} onClick={() => {
|
||||
commonStore.setTracks([]);
|
||||
commonStore.setTrackScale(1);
|
||||
commonStore.setTrackTotalTime(tracksMinimalTotalTime);
|
||||
commonStore.setTrackCurrentTime(0);
|
||||
commonStore.setTrackPlayStartTime(0);
|
||||
}} />
|
||||
</div>
|
||||
<div className="grow">
|
||||
<div className="flex flex-col ml-2 mr-2" ref={viewControlsContainerRef}>
|
||||
<div className="relative">
|
||||
<Tooltip content={`${commonStore.trackTotalTime} ms`} showDelay={0} hideDelay={0}
|
||||
relationship="description">
|
||||
<div className="border-l absolute"
|
||||
ref={tracksEndLineRef}
|
||||
style={{
|
||||
height: (tracksRef.current && commonStore.tracks.length > 0)
|
||||
? tracksRef.current.clientHeight - arrowIconToTracks
|
||||
: 0,
|
||||
top: `${topToArrowIcon + arrowIconToTracks}px`,
|
||||
left: `${tracksEndPosition + trackInitOffsetPx - pixelFix}px`
|
||||
}} />
|
||||
</Tooltip>
|
||||
</div>
|
||||
<Draggable axis="x" bounds={{
|
||||
left: 0,
|
||||
right: viewControlsContainerWidth - currentTimeControlWidth
|
||||
}}
|
||||
position={{
|
||||
x: commonStore.trackCurrentTime / commonStore.trackTotalTime * viewControlsContainerWidth,
|
||||
y: 0
|
||||
}}
|
||||
onDrag={(e, data) => {
|
||||
setTimeout(() => {
|
||||
let offset = 0;
|
||||
if (currentTimeControlRef.current) {
|
||||
const match = currentTimeControlRef.current.style.transform.match(/translate\((.+)px,/);
|
||||
if (match)
|
||||
offset = parseFloat(match[1]);
|
||||
}
|
||||
const offsetTime = commonStore.trackTotalTime / viewControlsContainerWidth * offset;
|
||||
commonStore.setTrackCurrentTime(offsetTime);
|
||||
}, 1);
|
||||
}}
|
||||
>
|
||||
<div ref={currentTimeControlRef} className="h-2 bg-gray-700 cursor-move rounded"
|
||||
style={{ width: currentTimeControlWidth }} />
|
||||
</Draggable>
|
||||
<div className={classnames(
|
||||
'flex',
|
||||
(playStartTimeControlPosition < 0 || playStartTimeControlPosition > viewControlsContainerWidth)
|
||||
&& 'hidden'
|
||||
)}>
|
||||
<Draggable axis="x" bounds={{
|
||||
left: 0,
|
||||
right: (playStartTimeControlRef.current)
|
||||
? Math.min(viewControlsContainerWidth - playStartTimeControlRef.current.clientWidth, moveableTracksWidth)
|
||||
: 0
|
||||
}}
|
||||
grid={[snapValue, snapValue]}
|
||||
position={{ x: playStartTimeControlPosition, y: 0 }}
|
||||
onStart={(e, data) => {
|
||||
playStartTimeControlX.current = data.lastX;
|
||||
}}
|
||||
onStop={(e, data) => {
|
||||
const delta = data.lastX - playStartTimeControlX.current;
|
||||
let offsetTime = Math.round(Math.round(delta / snapValue * baseMoveTime * scale) / minimalMoveTime) * minimalMoveTime;
|
||||
offsetTime = Math.min(Math.max(
|
||||
offsetTime,
|
||||
-commonStore.trackPlayStartTime), commonStore.trackTotalTime - commonStore.trackPlayStartTime);
|
||||
commonStore.setTrackPlayStartTime(commonStore.trackPlayStartTime + offsetTime);
|
||||
}}
|
||||
>
|
||||
<div className="relative cursor-move"
|
||||
ref={playStartTimeControlRef}>
|
||||
<ArrowAutofitWidth20Regular />
|
||||
<div className="border-l absolute border-gray-700"
|
||||
style={{
|
||||
height: (tracksRef.current && commonStore.tracks.length > 0)
|
||||
? tracksRef.current.clientHeight
|
||||
: 0,
|
||||
top: '50%',
|
||||
left: `calc(50% - ${pixelFix}px)`
|
||||
}} />
|
||||
</div>
|
||||
</Draggable>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<Tooltip content={t('Scale View')! + ': ' + commonStore.trackScale} showDelay={0} hideDelay={0}
|
||||
relationship="description">
|
||||
<Slider ref={toolbarSliderRef} value={commonStore.trackScale} step={scaleMin} max={scaleMax} min={scaleMin}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setTrackScale(data.value);
|
||||
}}
|
||||
/>
|
||||
</Tooltip>
|
||||
</div>
|
||||
<div className="flex flex-col overflow-y-auto gap-1" ref={tracksRef}>
|
||||
{commonStore.tracks.map(track =>
|
||||
<div key={track.id} className="flex gap-2 pb-1 border-b">
|
||||
<div className="flex gap-1 border-r h-7">
|
||||
<ToolTipButton desc={commonStore.recordingTrackId === track.id ? t('Stop') : t('Record')}
|
||||
icon={commonStore.recordingTrackId === track.id ? <Stop16Filled /> : <Record16Regular />}
|
||||
size="small" shape="circular" appearance="subtle"
|
||||
onClick={() => {
|
||||
flushMidiRecordingContent();
|
||||
commonStore.setPlayingTrackId('');
|
||||
|
||||
if (commonStore.recordingTrackId === track.id) {
|
||||
commonStore.setRecordingTrackId('');
|
||||
} else {
|
||||
if (commonStore.activeMidiDeviceIndex === -1) {
|
||||
toast(t('Please select a MIDI device first'), { type: 'warning' });
|
||||
return;
|
||||
}
|
||||
|
||||
dropRecordingTime = true;
|
||||
setSelectedTrackId(track.id);
|
||||
|
||||
commonStore.setRecordingTrackId(track.id);
|
||||
commonStore.setRecordingContent(track.content);
|
||||
commonStore.setRecordingRawContent(track.rawContent.slice());
|
||||
}
|
||||
}} />
|
||||
<ToolTipButton disabled
|
||||
desc={commonStore.playingTrackId === track.id ? t('Stop') : t('Play') + ' (Unavailable)'}
|
||||
icon={commonStore.playingTrackId === track.id ? <Pause16Regular /> : <Play16Filled />}
|
||||
size="small" shape="circular" appearance="subtle"
|
||||
onClick={() => {
|
||||
flushMidiRecordingContent();
|
||||
commonStore.setRecordingTrackId('');
|
||||
|
||||
if (commonStore.playingTrackId === track.id) {
|
||||
commonStore.setPlayingTrackId('');
|
||||
} else {
|
||||
setSelectedTrackId(track.id);
|
||||
|
||||
commonStore.setPlayingTrackId(track.id);
|
||||
}
|
||||
}} />
|
||||
<ToolTipButton desc={t('Delete')} icon={<Delete16Regular />} size="small" shape="circular"
|
||||
appearance="subtle" onClick={() => {
|
||||
const tracks = commonStore.tracks.slice().filter(t => t.id !== track.id);
|
||||
commonStore.setTracks(tracks);
|
||||
refreshTracksTotalTime();
|
||||
}} />
|
||||
</div>
|
||||
<div className="relative grow overflow-hidden">
|
||||
<div className="absolute" style={{ left: -0 }}>
|
||||
<Track
|
||||
id={track.id}
|
||||
scale={scale}
|
||||
right={Math.min(tracksWidth, moveableTracksWidth)}
|
||||
isSelected={selectedTrackId === track.id}
|
||||
onSelect={setSelectedTrackId}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>)}
|
||||
<div className="flex justify-between items-center">
|
||||
<Button icon={<Add16Regular />} size="small" shape="circular"
|
||||
appearance="subtle"
|
||||
onClick={() => {
|
||||
commonStore.setTracks([...commonStore.tracks, {
|
||||
id: uuid(),
|
||||
content: '',
|
||||
rawContent: [],
|
||||
offsetTime: 0,
|
||||
contentTime: 0
|
||||
}]);
|
||||
}}>
|
||||
{t('New Track')}
|
||||
</Button>
|
||||
<Text size={100}>
|
||||
{t('Select a track to preview the content')}
|
||||
</Text>
|
||||
</div>
|
||||
</div>
|
||||
<div className="grow"></div>
|
||||
{selectedTrack &&
|
||||
<Card size="small" appearance="outline" style={{ minHeight: '150px', maxHeight: '200px' }}>
|
||||
<div className="flex flex-col gap-1 overflow-hidden">
|
||||
<Text size={100}>{`${t('Start Time')}: ${selectedTrack.offsetTime} ms`}</Text>
|
||||
<Text size={100}>{`${t('Content Duration')}: ${selectedTrack.contentTime} ms`}</Text>
|
||||
<div className="overflow-y-auto overflow-x-hidden" ref={contentPreviewRef}>
|
||||
{selectedTrackId === commonStore.recordingTrackId
|
||||
? commonStore.recordingContent
|
||||
: selectedTrack.content}
|
||||
</div>
|
||||
</div>
|
||||
</Card>
|
||||
}
|
||||
<DialogTrigger disableButtonEnhancement>
|
||||
<Button icon={<MusicNote220Regular />} style={{ minHeight: '32px' }} onClick={() => {
|
||||
flushMidiRecordingContent();
|
||||
commonStore.setRecordingTrackId('');
|
||||
commonStore.setPlayingTrackId('');
|
||||
|
||||
const timestamp = [];
|
||||
const sortedTracks = commonStore.tracks.slice().sort((a, b) => a.offsetTime - b.offsetTime);
|
||||
for (const track of sortedTracks) {
|
||||
timestamp.push(track.offsetTime);
|
||||
let accContentTime = 0;
|
||||
for (const msg of track.rawContent) {
|
||||
if (msg.messageType === 'ElapsedTime') {
|
||||
accContentTime += msg.value;
|
||||
timestamp.push(track.offsetTime + accContentTime);
|
||||
}
|
||||
}
|
||||
}
|
||||
const sortedTimestamp = timestamp.slice().sort((a, b) => a - b);
|
||||
const globalMessages: MidiMessage[] = sortedTimestamp.reduce((messages, current, i) =>
|
||||
[...messages, {
|
||||
messageType: 'ElapsedTime',
|
||||
value: current - (i === 0 ? 0 : sortedTimestamp[i - 1])
|
||||
} as MidiMessage]
|
||||
, [] as MidiMessage[]);
|
||||
for (const track of sortedTracks) {
|
||||
let currentTime = track.offsetTime;
|
||||
let accContentTime = 0;
|
||||
for (const msg of track.rawContent) {
|
||||
if (msg.messageType === 'ElapsedTime') {
|
||||
accContentTime += msg.value;
|
||||
currentTime = track.offsetTime + accContentTime;
|
||||
} else if (msg.messageType === 'NoteOn' || msg.messageType === 'NoteOff') {
|
||||
const insertIndex = sortedTimestamp.findIndex(t => t >= currentTime);
|
||||
globalMessages.splice(insertIndex + 1, 0, msg);
|
||||
sortedTimestamp.splice(insertIndex + 1, 0, 0); // placeholder
|
||||
}
|
||||
}
|
||||
}
|
||||
const result = globalMessages.map(m => midiMessageToToken(m)).join('');
|
||||
commonStore.setCompositionSubmittedPrompt(result);
|
||||
setPrompt(result);
|
||||
}}>
|
||||
{t('Save to generation area')}
|
||||
</Button>
|
||||
</DialogTrigger>
|
||||
</div>
|
||||
);
|
||||
});
|
||||
|
||||
export default AudiotrackEditor;
|
||||
@@ -10,56 +10,40 @@ import { KebabHorizontalIcon, PencilIcon, SyncIcon, TrashIcon } from '@primer/oc
|
||||
import logo from '../assets/images/logo.png';
|
||||
import MarkdownRender from '../components/MarkdownRender';
|
||||
import { ToolTipButton } from '../components/ToolTipButton';
|
||||
import { ArrowCircleUp28Regular, Delete28Regular, RecordStop28Regular, Save28Regular } from '@fluentui/react-icons';
|
||||
import {
|
||||
ArrowCircleUp28Regular,
|
||||
ArrowClockwise16Regular,
|
||||
Attach16Regular,
|
||||
Delete28Regular,
|
||||
Dismiss16Regular,
|
||||
Dismiss24Regular,
|
||||
RecordStop28Regular,
|
||||
SaveRegular,
|
||||
TextAlignJustify24Regular,
|
||||
TextAlignJustifyRotate9024Regular
|
||||
} from '@fluentui/react-icons';
|
||||
import { CopyButton } from '../components/CopyButton';
|
||||
import { ReadButton } from '../components/ReadButton';
|
||||
import { toast } from 'react-toastify';
|
||||
import { WorkHeader } from '../components/WorkHeader';
|
||||
import { DialogButton } from '../components/DialogButton';
|
||||
import { OpenFileFolder, OpenSaveFileDialog } from '../../wailsjs/go/backend_golang/App';
|
||||
import { toastWithButton } from '../utils';
|
||||
import { PresetsButton } from './PresetsManager/PresetsButton';
|
||||
import { OpenFileFolder, OpenOpenFileDialog, OpenSaveFileDialog } from '../../wailsjs/go/backend_golang/App';
|
||||
import { absPathAsset, bytesToReadable, getServerRoot, toastWithButton } from '../utils';
|
||||
import { useMediaQuery } from 'usehooks-ts';
|
||||
import { botName, ConversationMessage, MessageType, userName, welcomeUuid } from '../types/chat';
|
||||
import { Labeled } from '../components/Labeled';
|
||||
import { ValuedSlider } from '../components/ValuedSlider';
|
||||
import { PresetsButton } from './PresetsManager/PresetsButton';
|
||||
import { webOpenOpenFileDialog } from '../utils/web-file-operations';
|
||||
|
||||
export const userName = 'M E';
|
||||
export const botName = 'A I';
|
||||
let chatSseControllers: {
|
||||
[id: string]: AbortController
|
||||
} = {};
|
||||
|
||||
export const welcomeUuid = 'welcome';
|
||||
|
||||
export enum MessageType {
|
||||
Normal,
|
||||
Error
|
||||
}
|
||||
|
||||
export type Side = 'left' | 'right'
|
||||
|
||||
export type Color = 'neutral' | 'brand' | 'colorful'
|
||||
|
||||
export type MessageItem = {
|
||||
sender: string,
|
||||
type: MessageType,
|
||||
color: Color,
|
||||
avatarImg?: string,
|
||||
time: string,
|
||||
content: string,
|
||||
side: Side,
|
||||
done: boolean
|
||||
}
|
||||
|
||||
export type Conversation = {
|
||||
[uuid: string]: MessageItem
|
||||
}
|
||||
|
||||
export type Role = 'assistant' | 'user' | 'system';
|
||||
|
||||
export type ConversationMessage = {
|
||||
role: Role;
|
||||
content: string;
|
||||
}
|
||||
|
||||
let chatSseController: AbortController | null = null;
|
||||
|
||||
const MoreUtilsButton: FC<{ uuid: string, setEditing: (editing: boolean) => void }> = observer(({
|
||||
const MoreUtilsButton: FC<{
|
||||
uuid: string,
|
||||
setEditing: (editing: boolean) => void
|
||||
}> = observer(({
|
||||
uuid,
|
||||
setEditing
|
||||
}) => {
|
||||
@@ -83,13 +67,15 @@ const MoreUtilsButton: FC<{ uuid: string, setEditing: (editing: boolean) => void
|
||||
onClick={() => {
|
||||
commonStore.conversationOrder.splice(commonStore.conversationOrder.indexOf(uuid), 1);
|
||||
delete commonStore.conversation[uuid];
|
||||
commonStore.setAttachment(uuid, null);
|
||||
}} />
|
||||
</MenuPopover>
|
||||
</Menu>;
|
||||
});
|
||||
|
||||
const ChatMessageItem: FC<{
|
||||
uuid: string, onSubmit: (message: string | null, answerId: string | null,
|
||||
uuid: string,
|
||||
onSubmit: (message: string | null, answerId: string | null,
|
||||
startUuid: string | null, endUuid: string | null, includeEndUuid: boolean) => void
|
||||
}> = observer(({ uuid, onSubmit }) => {
|
||||
const { t } = useTranslation();
|
||||
@@ -114,6 +100,13 @@ const ChatMessageItem: FC<{
|
||||
}
|
||||
};
|
||||
|
||||
let avatarImg: string | undefined;
|
||||
if (commonStore.activePreset && messageItem.sender === botName) {
|
||||
avatarImg = absPathAsset(commonStore.activePreset.avatarImg);
|
||||
} else if (messageItem.avatarImg) {
|
||||
avatarImg = messageItem.avatarImg;
|
||||
}
|
||||
|
||||
return <div
|
||||
className={classnames(
|
||||
'flex gap-2 mb-2 overflow-hidden',
|
||||
@@ -131,7 +124,7 @@ const ChatMessageItem: FC<{
|
||||
<Avatar
|
||||
color={messageItem.color}
|
||||
name={messageItem.sender}
|
||||
image={(commonStore.activePreset && messageItem.sender === botName) ? { src: commonStore.activePreset.avatarImg } : messageItem.avatarImg ? { src: messageItem.avatarImg } : undefined}
|
||||
image={avatarImg ? { src: avatarImg } : undefined}
|
||||
/>
|
||||
<div
|
||||
className={classnames(
|
||||
@@ -142,13 +135,31 @@ const ChatMessageItem: FC<{
|
||||
)}
|
||||
>
|
||||
{!editing ?
|
||||
<MarkdownRender>{messageItem.content}</MarkdownRender> :
|
||||
<div className="flex flex-col">
|
||||
<MarkdownRender>{messageItem.content}</MarkdownRender>
|
||||
{uuid in commonStore.attachments &&
|
||||
<div className="flex grow">
|
||||
<div className="grow" />
|
||||
<ToolTipButton className="whitespace-nowrap"
|
||||
text={
|
||||
commonStore.attachments[uuid][0].name.replace(
|
||||
new RegExp('(^[^\\.]{5})[^\\.]+'), '$1...')
|
||||
}
|
||||
desc={`${commonStore.attachments[uuid][0].name} (${bytesToReadable(commonStore.attachments[uuid][0].size)})`}
|
||||
size="small" shape="circular" appearance="secondary" />
|
||||
</div>
|
||||
}
|
||||
</div> :
|
||||
<Textarea ref={textareaRef}
|
||||
className="grow"
|
||||
style={{ minWidth: 0 }}
|
||||
value={messageItem.content}
|
||||
onChange={(e) => {
|
||||
messageItem.content = e.target.value;
|
||||
commonStore.conversation[uuid].type = MessageType.Normal;
|
||||
commonStore.conversation[uuid].done = true;
|
||||
commonStore.setConversation(commonStore.conversation);
|
||||
commonStore.setConversationOrder([...commonStore.conversationOrder]);
|
||||
}}
|
||||
onBlur={() => {
|
||||
setEditingInner(false);
|
||||
@@ -166,6 +177,10 @@ const ChatMessageItem: FC<{
|
||||
messageItem.sender === botName && uuid !== welcomeUuid &&
|
||||
<ToolTipButton desc={t('Retry')} size="small" appearance="subtle"
|
||||
icon={<SyncIcon />} onClick={() => {
|
||||
if (uuid in chatSseControllers) {
|
||||
chatSseControllers[uuid].abort();
|
||||
delete chatSseControllers[uuid];
|
||||
}
|
||||
onSubmit(null, uuid, null, uuid, false);
|
||||
}} />
|
||||
}
|
||||
@@ -179,23 +194,129 @@ const ChatMessageItem: FC<{
|
||||
</div>;
|
||||
});
|
||||
|
||||
const SidePanel: FC = observer(() => {
|
||||
const [t] = useTranslation();
|
||||
const mq = useMediaQuery('(min-width: 640px)');
|
||||
const params = commonStore.chatParams;
|
||||
|
||||
return <div
|
||||
className={classnames(
|
||||
'flex flex-col gap-1 h-full flex-shrink-0 transition-width duration-300 ease-in-out',
|
||||
commonStore.sidePanelCollapsed ? 'w-0' : (mq ? 'w-64' : 'w-full'),
|
||||
!commonStore.sidePanelCollapsed && 'ml-1')
|
||||
}>
|
||||
<div className="flex m-1">
|
||||
<div className="grow" />
|
||||
<PresetsButton tab="Chat" size="medium" shape="circular" appearance="subtle" />
|
||||
<Button size="medium" shape="circular" appearance="subtle" icon={<Dismiss24Regular />}
|
||||
onClick={() => commonStore.setSidePanelCollapsed(true)}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col gap-1 overflow-x-hidden overflow-y-auto p-1">
|
||||
<Labeled flex breakline label={t('Max Response Token')}
|
||||
desc={t('By default, the maximum number of tokens that can be answered in a single response, it can be changed by the user by specifying API parameters.')}
|
||||
content={
|
||||
<ValuedSlider value={params.maxResponseToken} min={100} max={8100}
|
||||
step={100}
|
||||
input
|
||||
onChange={(e, data) => {
|
||||
commonStore.setChatParams({
|
||||
maxResponseToken: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled flex breakline label={t('Temperature')}
|
||||
desc={t('Sampling temperature, it\'s like giving alcohol to a model, the higher the stronger the randomness and creativity, while the lower, the more focused and deterministic it will be.')}
|
||||
content={
|
||||
<ValuedSlider value={params.temperature} min={0} max={2} step={0.1}
|
||||
input
|
||||
onChange={(e, data) => {
|
||||
commonStore.setChatParams({
|
||||
temperature: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled flex breakline label={t('Top_P')}
|
||||
desc={t('Just like feeding sedatives to the model. Consider the results of the top n% probability mass, 0.1 considers the top 10%, with higher quality but more conservative, 1 considers all results, with lower quality but more diverse.')}
|
||||
content={
|
||||
<ValuedSlider value={params.topP} min={0} max={1} step={0.1} input
|
||||
onChange={(e, data) => {
|
||||
commonStore.setChatParams({
|
||||
topP: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled flex breakline label={t('Presence Penalty')}
|
||||
desc={t('Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.')}
|
||||
content={
|
||||
<ValuedSlider value={params.presencePenalty} min={0} max={2}
|
||||
step={0.1} input
|
||||
onChange={(e, data) => {
|
||||
commonStore.setChatParams({
|
||||
presencePenalty: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled flex breakline label={t('Frequency Penalty')}
|
||||
desc={t('Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.')}
|
||||
content={
|
||||
<ValuedSlider value={params.frequencyPenalty} min={0} max={2}
|
||||
step={0.1} input
|
||||
onChange={(e, data) => {
|
||||
commonStore.setChatParams({
|
||||
frequencyPenalty: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
</div>
|
||||
<div className="grow" />
|
||||
{/*<Button*/}
|
||||
{/* icon={<FolderOpenVerticalRegular />}*/}
|
||||
{/* onClick={() => {*/}
|
||||
{/* }}>*/}
|
||||
{/* {t('Load Conversation')}*/}
|
||||
{/*</Button>*/}
|
||||
<Button
|
||||
icon={<SaveRegular />}
|
||||
onClick={() => {
|
||||
let savedContent: string = '';
|
||||
const isWorldModel = commonStore.getCurrentModelConfig().modelParameters.modelName.toLowerCase().includes('world');
|
||||
const user = isWorldModel ? 'User' : 'Bob';
|
||||
const bot = isWorldModel ? 'Assistant' : 'Alice';
|
||||
commonStore.conversationOrder.forEach((uuid) => {
|
||||
if (uuid === welcomeUuid)
|
||||
return;
|
||||
const messageItem = commonStore.conversation[uuid];
|
||||
if (messageItem.type !== MessageType.Error) {
|
||||
savedContent += `${messageItem.sender === userName ? user : bot}: ${messageItem.content}\n\n`;
|
||||
}
|
||||
});
|
||||
|
||||
OpenSaveFileDialog('*.txt', 'conversation.txt', savedContent).then((path) => {
|
||||
if (path)
|
||||
toastWithButton(t('Conversation Saved'), t('Open'), () => {
|
||||
OpenFileFolder(path, false);
|
||||
});
|
||||
}).catch(e => {
|
||||
toast(t('Error') + ' - ' + (e.message || e), { type: 'error', autoClose: 2500 });
|
||||
});
|
||||
}}>
|
||||
{t('Save Conversation')}
|
||||
</Button>
|
||||
</div>;
|
||||
});
|
||||
|
||||
const ChatPanel: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
const bodyRef = useRef<HTMLDivElement>(null);
|
||||
const inputRef = useRef<HTMLTextAreaElement>(null);
|
||||
const mq = useMediaQuery('(min-width: 640px)');
|
||||
if (commonStore.sidePanelCollapsed === 'auto')
|
||||
commonStore.setSidePanelCollapsed(!mq);
|
||||
const currentConfig = commonStore.getCurrentModelConfig();
|
||||
const apiParams = currentConfig.apiParameters;
|
||||
const port = apiParams.apiPort;
|
||||
|
||||
let lastMessageId: string;
|
||||
let generating: boolean = false;
|
||||
if (commonStore.conversationOrder.length > 0) {
|
||||
lastMessageId = commonStore.conversationOrder[commonStore.conversationOrder.length - 1];
|
||||
const lastMessage = commonStore.conversation[lastMessageId];
|
||||
if (lastMessage.sender === botName)
|
||||
generating = !lastMessage.done;
|
||||
}
|
||||
const generating: boolean = Object.keys(chatSseControllers).length > 0;
|
||||
|
||||
useEffect(() => {
|
||||
if (inputRef.current)
|
||||
@@ -213,7 +334,7 @@ const ChatPanel: FC = observer(() => {
|
||||
color: 'colorful',
|
||||
avatarImg: logo,
|
||||
time: new Date().toISOString(),
|
||||
content: t('Hello! I\'m RWKV, an open-source and commercially usable large language model.'),
|
||||
content: commonStore.platform === 'web' ? t('Hello, what can I do for you?') : t('Hello! I\'m RWKV, an open-source and commercially usable large language model.'),
|
||||
side: 'left',
|
||||
done: true
|
||||
}
|
||||
@@ -230,7 +351,7 @@ const ChatPanel: FC = observer(() => {
|
||||
e.stopPropagation();
|
||||
if (e.type === 'click' || (e.keyCode === 13 && !e.shiftKey)) {
|
||||
e.preventDefault();
|
||||
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl) {
|
||||
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl && commonStore.platform !== 'web') {
|
||||
toast(t('Please click the button in the top right corner to start the model'), { type: 'warning' });
|
||||
return;
|
||||
}
|
||||
@@ -260,6 +381,11 @@ const ChatPanel: FC = observer(() => {
|
||||
commonStore.setConversation(commonStore.conversation);
|
||||
commonStore.conversationOrder.push(newId);
|
||||
commonStore.setConversationOrder(commonStore.conversationOrder);
|
||||
|
||||
if (commonStore.currentTempAttachment) {
|
||||
commonStore.setAttachment(newId, [commonStore.currentTempAttachment]);
|
||||
commonStore.setCurrentTempAttachment(null);
|
||||
}
|
||||
}
|
||||
|
||||
let startIndex = startUuid ? commonStore.conversationOrder.indexOf(startUuid) : 0;
|
||||
@@ -271,6 +397,17 @@ const ChatPanel: FC = observer(() => {
|
||||
if (uuid === welcomeUuid)
|
||||
return;
|
||||
const messageItem = commonStore.conversation[uuid];
|
||||
if (uuid in commonStore.attachments) {
|
||||
const attachment = commonStore.attachments[uuid][0];
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: t('The content of file') + ` "${attachment.name}" `
|
||||
+ t('is as follows. When replying to me, consider the file content and respond accordingly:')
|
||||
+ '\n\n' + attachment.content
|
||||
});
|
||||
messages.push({ role: 'user', content: t('What\'s the file name') });
|
||||
messages.push({ role: 'assistant', content: t('The file name is: ') + attachment.name });
|
||||
}
|
||||
if (messageItem.done && messageItem.type === MessageType.Normal && messageItem.sender === userName) {
|
||||
messages.push({ role: 'user', content: messageItem.content });
|
||||
} else if (messageItem.done && messageItem.type === MessageType.Normal && messageItem.sender === botName) {
|
||||
@@ -296,11 +433,10 @@ const ChatPanel: FC = observer(() => {
|
||||
commonStore.setConversationOrder(commonStore.conversationOrder);
|
||||
setTimeout(scrollToBottom);
|
||||
let answer = '';
|
||||
chatSseController = new AbortController();
|
||||
fetchEventSource( // https://api.openai.com/v1/chat/completions || http://127.0.0.1:${port}/chat/completions
|
||||
commonStore.settings.apiUrl ?
|
||||
commonStore.settings.apiUrl + '/v1/chat/completions' :
|
||||
`http://127.0.0.1:${port}/chat/completions`,
|
||||
const chatSseController = new AbortController();
|
||||
chatSseControllers[answerId] = chatSseController;
|
||||
fetchEventSource( // https://api.openai.com/v1/chat/completions || http://127.0.0.1:${port}/v1/chat/completions
|
||||
getServerRoot(port) + '/v1/chat/completions',
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
@@ -311,13 +447,20 @@ const ChatPanel: FC = observer(() => {
|
||||
messages,
|
||||
stream: true,
|
||||
model: commonStore.settings.apiChatModelName, // 'gpt-3.5-turbo'
|
||||
temperature: apiParams.temperature,
|
||||
top_p: apiParams.topP
|
||||
temperature: commonStore.chatParams.temperature,
|
||||
top_p: commonStore.chatParams.topP,
|
||||
presence_penalty: commonStore.chatParams.presencePenalty,
|
||||
frequency_penalty: commonStore.chatParams.frequencyPenalty,
|
||||
user_name: commonStore.activePreset?.userName || undefined,
|
||||
assistant_name: commonStore.activePreset?.assistantName || undefined,
|
||||
presystem: commonStore.activePreset?.presystem && undefined
|
||||
}),
|
||||
signal: chatSseController?.signal,
|
||||
onmessage(e) {
|
||||
scrollToBottom();
|
||||
if (e.data.trim() === '[DONE]') {
|
||||
if (answerId! in chatSseControllers)
|
||||
delete chatSseControllers[answerId!];
|
||||
commonStore.conversation[answerId!].done = true;
|
||||
commonStore.conversation[answerId!].content = commonStore.conversation[answerId!].content.trim();
|
||||
commonStore.setConversation(commonStore.conversation);
|
||||
@@ -331,6 +474,8 @@ const ChatPanel: FC = observer(() => {
|
||||
console.debug('json error', error);
|
||||
return;
|
||||
}
|
||||
if (data.model)
|
||||
commonStore.setLastModelName(data.model);
|
||||
if (data.choices && Array.isArray(data.choices) && data.choices.length > 0) {
|
||||
answer += data.choices[0]?.delta?.content || '';
|
||||
commonStore.conversation[answerId!].content = answer;
|
||||
@@ -347,9 +492,13 @@ const ChatPanel: FC = observer(() => {
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
if (answerId! in chatSseControllers)
|
||||
delete chatSseControllers[answerId!];
|
||||
console.log('Connection closed');
|
||||
},
|
||||
onerror(err) {
|
||||
if (answerId! in chatSseControllers)
|
||||
delete chatSseControllers[answerId!];
|
||||
commonStore.conversation[answerId!].type = MessageType.Error;
|
||||
commonStore.conversation[answerId!].done = true;
|
||||
err = err.message || err;
|
||||
@@ -364,82 +513,176 @@ const ChatPanel: FC = observer(() => {
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div className="flex flex-col w-full grow gap-4 pt-4 overflow-hidden">
|
||||
<div ref={bodyRef} className="grow overflow-y-scroll overflow-x-hidden pr-2">
|
||||
{commonStore.conversationOrder.map(uuid =>
|
||||
<ChatMessageItem key={uuid} uuid={uuid} onSubmit={onSubmit} />
|
||||
)}
|
||||
</div>
|
||||
<div className={classnames('flex items-end', mq ? 'gap-2' : '')}>
|
||||
<PresetsButton tab="Chat" size={mq ? 'large' : 'small'} shape="circular" appearance="subtle" />
|
||||
<DialogButton tooltip={t('Clear')}
|
||||
icon={<Delete28Regular />}
|
||||
size={mq ? 'large' : 'small'} shape="circular" appearance="subtle" title={t('Clear')}
|
||||
contentText={t('Are you sure you want to clear the conversation? It cannot be undone.')}
|
||||
onConfirm={() => {
|
||||
if (generating)
|
||||
chatSseController?.abort();
|
||||
commonStore.setConversation({});
|
||||
commonStore.setConversationOrder([]);
|
||||
}} />
|
||||
<Textarea
|
||||
ref={inputRef}
|
||||
style={{ minWidth: 0 }}
|
||||
className="grow"
|
||||
resize="vertical"
|
||||
placeholder={t('Type your message here')!}
|
||||
value={commonStore.currentInput}
|
||||
onChange={(e) => commonStore.setCurrentInput(e.target.value)}
|
||||
onKeyDown={handleKeyDownOrClick}
|
||||
/>
|
||||
<ToolTipButton desc={generating ? t('Stop') : t('Send')}
|
||||
icon={generating ? <RecordStop28Regular /> : <ArrowCircleUp28Regular />}
|
||||
size={mq ? 'large' : 'small'} shape="circular" appearance="subtle"
|
||||
onClick={(e) => {
|
||||
if (generating) {
|
||||
chatSseController?.abort();
|
||||
if (lastMessageId) {
|
||||
commonStore.conversation[lastMessageId].type = MessageType.Error;
|
||||
commonStore.conversation[lastMessageId].done = true;
|
||||
<div className="flex h-full grow pt-4 overflow-hidden">
|
||||
<div className="relative flex flex-col w-full grow gap-4 overflow-hidden">
|
||||
<Button className="absolute top-1 right-1" size="medium" shape="circular" appearance="subtle"
|
||||
style={{ zIndex: 1 }}
|
||||
icon={commonStore.sidePanelCollapsed ? <TextAlignJustify24Regular /> : <TextAlignJustifyRotate9024Regular />}
|
||||
onClick={() => commonStore.setSidePanelCollapsed(!commonStore.sidePanelCollapsed)} />
|
||||
<div ref={bodyRef} className="grow overflow-y-scroll overflow-x-hidden pr-2">
|
||||
{commonStore.conversationOrder.map(uuid =>
|
||||
<ChatMessageItem key={uuid} uuid={uuid} onSubmit={onSubmit} />
|
||||
)}
|
||||
</div>
|
||||
<div className={classnames('flex items-end', mq ? 'gap-2' : '')}>
|
||||
<DialogButton tooltip={t('Clear')}
|
||||
icon={<Delete28Regular />}
|
||||
size={mq ? 'large' : 'small'} shape="circular" appearance="subtle" title={t('Clear')}
|
||||
contentText={t('Are you sure you want to clear the conversation? It cannot be undone.')}
|
||||
onConfirm={() => {
|
||||
if (generating) {
|
||||
for (const id in chatSseControllers) {
|
||||
chatSseControllers[id].abort();
|
||||
}
|
||||
chatSseControllers = {};
|
||||
}
|
||||
commonStore.setConversation({});
|
||||
commonStore.setConversationOrder([]);
|
||||
}} />
|
||||
<div className="relative flex grow">
|
||||
<Textarea
|
||||
ref={inputRef}
|
||||
style={{ minWidth: 0 }}
|
||||
className="grow"
|
||||
resize="vertical"
|
||||
placeholder={t('Type your message here')!}
|
||||
value={commonStore.currentInput}
|
||||
onChange={(e) => commonStore.setCurrentInput(e.target.value)}
|
||||
onKeyDown={handleKeyDownOrClick}
|
||||
/>
|
||||
<div className="absolute right-2 bottom-2">
|
||||
{!commonStore.currentTempAttachment ?
|
||||
<ToolTipButton
|
||||
desc={commonStore.attachmentUploading ?
|
||||
t('Uploading Attachment') :
|
||||
t('Add An Attachment (Accepts pdf, txt)')}
|
||||
icon={commonStore.attachmentUploading ?
|
||||
<ArrowClockwise16Regular className="animate-spin" />
|
||||
: <Attach16Regular />}
|
||||
size="small" shape="circular" appearance="secondary"
|
||||
onClick={() => {
|
||||
if (commonStore.attachmentUploading)
|
||||
return;
|
||||
|
||||
const filterPattern = '*.txt;*.pdf';
|
||||
const setUploading = () => commonStore.setAttachmentUploading(true);
|
||||
// actually, status of web platform is always Offline
|
||||
if (commonStore.platform === 'web' || commonStore.status.status === ModelStatus.Offline || currentConfig.modelParameters.device === 'WebGPU') {
|
||||
webOpenOpenFileDialog({ filterPattern, fnStartLoading: setUploading }).then(webReturn => {
|
||||
if (webReturn.content)
|
||||
commonStore.setCurrentTempAttachment(
|
||||
{
|
||||
name: webReturn.blob.name,
|
||||
size: webReturn.blob.size,
|
||||
content: webReturn.content
|
||||
});
|
||||
else
|
||||
toast(t('File is empty'), {
|
||||
type: 'info',
|
||||
autoClose: 1000
|
||||
});
|
||||
}).catch(e => {
|
||||
toast(t('Error') + ' - ' + (e.message || e), { type: 'error', autoClose: 2500 });
|
||||
}).finally(() => {
|
||||
commonStore.setAttachmentUploading(false);
|
||||
});
|
||||
} else {
|
||||
OpenOpenFileDialog(filterPattern).then(async filePath => {
|
||||
if (!filePath)
|
||||
return;
|
||||
|
||||
setUploading();
|
||||
|
||||
// Both are slow. Communication between frontend and backend is slow. Use AssetServer Handler to read the file.
|
||||
// const blob = new Blob([atob(info.content as unknown as string)]); // await fetch(`data:application/octet-stream;base64,${info.content}`).then(r => r.blob());
|
||||
const blob = await fetch(absPathAsset(filePath)).then(r => r.blob());
|
||||
const attachmentName = filePath.split(/[\\/]/).pop();
|
||||
const urlPath = `/file-to-text?file_name=${attachmentName}`;
|
||||
const bodyForm = new FormData();
|
||||
bodyForm.append('file_data', blob, attachmentName);
|
||||
fetch(getServerRoot(port) + urlPath, {
|
||||
method: 'POST',
|
||||
body: bodyForm
|
||||
}).then(async r => {
|
||||
if (r.status === 200) {
|
||||
const pages = (await r.json()).pages as any[];
|
||||
let attachmentContent: string;
|
||||
if (pages.length === 1)
|
||||
attachmentContent = pages[0].page_content;
|
||||
else
|
||||
attachmentContent = pages.map((p, i) => `Page ${i + 1}:\n${p.page_content}`).join('\n\n');
|
||||
if (attachmentContent)
|
||||
commonStore.setCurrentTempAttachment(
|
||||
{
|
||||
name: attachmentName!,
|
||||
size: blob.size,
|
||||
content: attachmentContent!
|
||||
});
|
||||
else
|
||||
toast(t('File is empty'), {
|
||||
type: 'info',
|
||||
autoClose: 1000
|
||||
});
|
||||
} else {
|
||||
toast(r.statusText + '\n' + (await r.text()), {
|
||||
type: 'error'
|
||||
});
|
||||
}
|
||||
}
|
||||
).catch(e => {
|
||||
toast(t('Error') + ' - ' + (e.message || e), { type: 'error', autoClose: 2500 });
|
||||
}).finally(() => {
|
||||
commonStore.setAttachmentUploading(false);
|
||||
});
|
||||
}).catch(e => {
|
||||
toast(t('Error') + ' - ' + (e.message || e), { type: 'error', autoClose: 2500 });
|
||||
});
|
||||
}
|
||||
}}
|
||||
/> :
|
||||
<div>
|
||||
<ToolTipButton
|
||||
text={
|
||||
commonStore.currentTempAttachment.name.replace(
|
||||
new RegExp('(^[^\\.]{5})[^\\.]+'), '$1...')
|
||||
}
|
||||
desc={`${commonStore.currentTempAttachment.name} (${bytesToReadable(commonStore.currentTempAttachment.size)})`}
|
||||
size="small" shape="circular" appearance="secondary" />
|
||||
<ToolTipButton desc={t('Remove Attachment')}
|
||||
icon={<Dismiss16Regular />}
|
||||
size="small" shape="circular" appearance="subtle"
|
||||
onClick={() => {
|
||||
commonStore.setCurrentTempAttachment(null);
|
||||
}} />
|
||||
</div>
|
||||
}
|
||||
</div>
|
||||
</div>
|
||||
<ToolTipButton desc={generating ? t('Stop') : t('Send')}
|
||||
icon={generating ? <RecordStop28Regular /> : <ArrowCircleUp28Regular />}
|
||||
size={mq ? 'large' : 'small'} shape="circular" appearance="subtle"
|
||||
onClick={(e) => {
|
||||
if (generating) {
|
||||
for (const id in chatSseControllers) {
|
||||
chatSseControllers[id].abort();
|
||||
commonStore.conversation[id].type = MessageType.Error;
|
||||
commonStore.conversation[id].done = true;
|
||||
}
|
||||
chatSseControllers = {};
|
||||
commonStore.setConversation(commonStore.conversation);
|
||||
commonStore.setConversationOrder([...commonStore.conversationOrder]);
|
||||
} else {
|
||||
handleKeyDownOrClick(e);
|
||||
}
|
||||
} else {
|
||||
handleKeyDownOrClick(e);
|
||||
}
|
||||
}} />
|
||||
<ToolTipButton desc={t('Save')}
|
||||
icon={<Save28Regular />}
|
||||
size={mq ? 'large' : 'small'} shape="circular" appearance="subtle"
|
||||
onClick={() => {
|
||||
let savedContent: string = '';
|
||||
const isWorldModel = commonStore.getCurrentModelConfig().modelParameters.modelName.toLowerCase().includes('world');
|
||||
const user = isWorldModel ? 'Question' : 'Bob';
|
||||
const bot = isWorldModel ? 'Answer' : 'Alice';
|
||||
commonStore.conversationOrder.forEach((uuid) => {
|
||||
if (uuid === welcomeUuid)
|
||||
return;
|
||||
const messageItem = commonStore.conversation[uuid];
|
||||
if (messageItem.type !== MessageType.Error) {
|
||||
savedContent += `${messageItem.sender === userName ? user : bot}: ${messageItem.content}\n\n`;
|
||||
}
|
||||
});
|
||||
|
||||
OpenSaveFileDialog('*.txt', 'conversation.txt', savedContent).then((path) => {
|
||||
if (path)
|
||||
toastWithButton(t('Conversation Saved'), t('Open'), () => {
|
||||
OpenFileFolder(path, false);
|
||||
});
|
||||
}).catch(e => {
|
||||
toast(t('Error') + ' - ' + (e.message || e), { type: 'error', autoClose: 2500 });
|
||||
});
|
||||
}} />
|
||||
}} />
|
||||
</div>
|
||||
</div>
|
||||
<SidePanel />
|
||||
</div>
|
||||
);
|
||||
});
|
||||
|
||||
export const Chat: FC = observer(() => {
|
||||
const Chat: FC = observer(() => {
|
||||
return (
|
||||
<div className="flex flex-col gap-1 p-2 h-full overflow-hidden">
|
||||
<WorkHeader />
|
||||
@@ -447,3 +690,5 @@ export const Chat: FC = observer(() => {
|
||||
</div>
|
||||
);
|
||||
});
|
||||
|
||||
export default Chat;
|
||||
|
||||
@@ -5,7 +5,6 @@ import { Button, Dropdown, Input, Option, Textarea } from '@fluentui/react-compo
|
||||
import { Labeled } from '../components/Labeled';
|
||||
import { ValuedSlider } from '../components/ValuedSlider';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { ApiParameters } from './Configs';
|
||||
import commonStore, { ModelStatus } from '../stores/commonStore';
|
||||
import { fetchEventSource } from '@microsoft/fetch-event-source';
|
||||
import { toast } from 'react-toastify';
|
||||
@@ -14,18 +13,8 @@ import { PresetsButton } from './PresetsManager/PresetsButton';
|
||||
import { ToolTipButton } from '../components/ToolTipButton';
|
||||
import { ArrowSync20Regular } from '@fluentui/react-icons';
|
||||
import { defaultPresets } from './defaultConfigs';
|
||||
|
||||
export type CompletionParams = Omit<ApiParameters, 'apiPort'> & {
|
||||
stop: string,
|
||||
injectStart: string,
|
||||
injectEnd: string
|
||||
};
|
||||
|
||||
export type CompletionPreset = {
|
||||
name: string,
|
||||
prompt: string,
|
||||
params: CompletionParams
|
||||
}
|
||||
import { CompletionParams, CompletionPreset } from '../types/completion';
|
||||
import { getServerRoot } from '../utils';
|
||||
|
||||
let completionSseController: AbortController | null = null;
|
||||
|
||||
@@ -40,8 +29,10 @@ const CompletionPanel: FC = observer(() => {
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (inputRef.current)
|
||||
if (inputRef.current) {
|
||||
inputRef.current.style.height = '100%';
|
||||
inputRef.current.style.maxHeight = '100%';
|
||||
}
|
||||
scrollToBottom();
|
||||
}, []);
|
||||
|
||||
@@ -80,7 +71,7 @@ const CompletionPanel: FC = observer(() => {
|
||||
const onSubmit = (prompt: string) => {
|
||||
commonStore.setCompletionSubmittedPrompt(prompt);
|
||||
|
||||
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl) {
|
||||
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl && commonStore.platform !== 'web') {
|
||||
toast(t('Please click the button in the top right corner to start the model'), { type: 'warning' });
|
||||
commonStore.setCompletionGenerating(false);
|
||||
return;
|
||||
@@ -90,10 +81,8 @@ const CompletionPanel: FC = observer(() => {
|
||||
|
||||
let answer = '';
|
||||
completionSseController = new AbortController();
|
||||
fetchEventSource( // https://api.openai.com/v1/completions || http://127.0.0.1:${port}/completions
|
||||
commonStore.settings.apiUrl ?
|
||||
commonStore.settings.apiUrl + '/v1/completions' :
|
||||
`http://127.0.0.1:${port}/completions`,
|
||||
fetchEventSource( // https://api.openai.com/v1/completions || http://127.0.0.1:${port}/v1/completions
|
||||
getServerRoot(port) + '/v1/completions',
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
@@ -125,6 +114,8 @@ const CompletionPanel: FC = observer(() => {
|
||||
console.debug('json error', error);
|
||||
return;
|
||||
}
|
||||
if (data.model)
|
||||
commonStore.setLastModelName(data.model);
|
||||
if (data.choices && Array.isArray(data.choices) && data.choices.length > 0) {
|
||||
answer += data.choices[0]?.text || data.choices[0]?.delta?.content || '';
|
||||
setPrompt(prompt + answer.replace(/\s+$/, '') + params.injectEnd.replaceAll('\\n', '\n'));
|
||||
@@ -186,7 +177,7 @@ const CompletionPanel: FC = observer(() => {
|
||||
desc={t('By default, the maximum number of tokens that can be answered in a single response, it can be changed by the user by specifying API parameters.')}
|
||||
content={
|
||||
<ValuedSlider value={params.maxResponseToken} min={100} max={8100}
|
||||
step={400}
|
||||
step={100}
|
||||
input
|
||||
onChange={(e, data) => {
|
||||
setParams({
|
||||
@@ -269,6 +260,13 @@ const CompletionPanel: FC = observer(() => {
|
||||
} />
|
||||
</div>
|
||||
<div className="grow" />
|
||||
<div className="hidden justify-between gap-2 sm:flex">
|
||||
<Button className="grow" onClick={() => {
|
||||
const newPrompt = prompt.replace(/\n+\ /g, '\n').split('\n').map((line) => line.trim()).join('\n');
|
||||
setPrompt(newPrompt);
|
||||
commonStore.setCompletionSubmittedPrompt(newPrompt);
|
||||
}}>{t('Format Content')}</Button>
|
||||
</div>
|
||||
<div className="flex justify-between gap-2">
|
||||
<ToolTipButton desc={t('Regenerate')} icon={<ArrowSync20Regular />} onClick={() => {
|
||||
completionSseController?.abort();
|
||||
@@ -296,7 +294,7 @@ const CompletionPanel: FC = observer(() => {
|
||||
);
|
||||
});
|
||||
|
||||
export const Completion: FC = observer(() => {
|
||||
const Completion: FC = observer(() => {
|
||||
return (
|
||||
<div className="flex flex-col gap-1 p-2 h-full overflow-hidden">
|
||||
<WorkHeader />
|
||||
@@ -304,3 +302,5 @@ export const Completion: FC = observer(() => {
|
||||
</div>
|
||||
);
|
||||
});
|
||||
|
||||
export default Completion;
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import 'html-midi-player';
|
||||
import React, { FC, useEffect, useRef } from 'react';
|
||||
import { observer } from 'mobx-react-lite';
|
||||
import { WorkHeader } from '../components/WorkHeader';
|
||||
import { Button, Checkbox, Textarea } from '@fluentui/react-components';
|
||||
import { Button, Checkbox, Dropdown, Option, Textarea } from '@fluentui/react-components';
|
||||
import { Labeled } from '../components/Labeled';
|
||||
import { ValuedSlider } from '../components/ValuedSlider';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -15,24 +16,23 @@ import { PlayerElement, VisualizerElement } from 'html-midi-player';
|
||||
import * as mm from '@magenta/music/esm/core.js';
|
||||
import { NoteSequence } from '@magenta/music/esm/protobuf.js';
|
||||
import { defaultCompositionPrompt } from './defaultConfigs';
|
||||
import { FileExists, OpenFileFolder, OpenSaveFileDialogBytes } from '../../wailsjs/go/backend_golang/App';
|
||||
import { toastWithButton } from '../utils';
|
||||
|
||||
export type CompositionParams = {
|
||||
prompt: string,
|
||||
maxResponseToken: number,
|
||||
temperature: number,
|
||||
topP: number,
|
||||
autoPlay: boolean,
|
||||
useLocalSoundFont: boolean,
|
||||
midi: ArrayBuffer | null,
|
||||
ns: NoteSequence | null
|
||||
}
|
||||
import {
|
||||
CloseMidiPort,
|
||||
FileExists,
|
||||
OpenFileFolder,
|
||||
OpenMidiPort,
|
||||
OpenSaveFileDialogBytes
|
||||
} from '../../wailsjs/go/backend_golang/App';
|
||||
import { getServerRoot, getSoundFont, toastWithButton } from '../utils';
|
||||
import { CompositionParams } from '../types/composition';
|
||||
import { useMediaQuery } from 'usehooks-ts';
|
||||
import { AudiotrackButton } from './AudiotrackManager/AudiotrackButton';
|
||||
|
||||
let compositionSseController: AbortController | null = null;
|
||||
|
||||
const CompositionPanel: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
const mq = useMediaQuery('(min-width: 640px)');
|
||||
const inputRef = useRef<HTMLTextAreaElement>(null);
|
||||
const port = commonStore.getCurrentModelConfig().apiParameters.apiPort;
|
||||
const visualizerRef = useRef<VisualizerElement>(null);
|
||||
@@ -71,26 +71,16 @@ const CompositionPanel: FC = observer(() => {
|
||||
};
|
||||
|
||||
const setSoundFont = async () => {
|
||||
let soundUrl: string;
|
||||
if (commonStore.compositionParams.useLocalSoundFont)
|
||||
soundUrl = 'assets/sound-font';
|
||||
else
|
||||
soundUrl = !commonStore.settings.giteeUpdatesSource ?
|
||||
`https://raw.githubusercontent.com/josStorer/sgm_plus/master` :
|
||||
`https://gitee.com/josc146/sgm_plus/raw/master`;
|
||||
const fallbackUrl = 'https://cdn.jsdelivr.net/gh/josstorer/sgm_plus';
|
||||
await fetch(soundUrl + '/soundfont.json').then(r => {
|
||||
if (!r.ok)
|
||||
soundUrl = fallbackUrl;
|
||||
}).catch(() => soundUrl = fallbackUrl);
|
||||
if (playerRef.current) {
|
||||
playerRef.current.soundFont = soundUrl;
|
||||
playerRef.current.soundFont = await getSoundFont();
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (inputRef.current)
|
||||
if (inputRef.current) {
|
||||
inputRef.current.style.height = '100%';
|
||||
inputRef.current.style.maxHeight = '100%';
|
||||
}
|
||||
scrollToBottom();
|
||||
|
||||
if (playerRef.current && visualizerRef.current) {
|
||||
@@ -109,9 +99,7 @@ const CompositionPanel: FC = observer(() => {
|
||||
}, []);
|
||||
|
||||
const generateNs = (autoPlay: boolean) => {
|
||||
fetch(commonStore.settings.apiUrl ?
|
||||
commonStore.settings.apiUrl + '/text-to-midi' :
|
||||
`http://127.0.0.1:${port}/text-to-midi`, {
|
||||
fetch(getServerRoot(port) + '/text-to-midi', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
@@ -128,7 +116,9 @@ const CompositionPanel: FC = observer(() => {
|
||||
});
|
||||
updateNs(ns);
|
||||
if (autoPlay) {
|
||||
playerRef.current?.start();
|
||||
setTimeout(() => {
|
||||
playerRef.current?.start();
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -137,7 +127,7 @@ const CompositionPanel: FC = observer(() => {
|
||||
const onSubmit = (prompt: string) => {
|
||||
commonStore.setCompositionSubmittedPrompt(prompt);
|
||||
|
||||
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl) {
|
||||
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl && commonStore.platform !== 'web') {
|
||||
toast(t('Please click the button in the top right corner to start the model'), { type: 'warning' });
|
||||
commonStore.setCompositionGenerating(false);
|
||||
return;
|
||||
@@ -145,10 +135,8 @@ const CompositionPanel: FC = observer(() => {
|
||||
|
||||
let answer = '';
|
||||
compositionSseController = new AbortController();
|
||||
fetchEventSource( // https://api.openai.com/v1/completions || http://127.0.0.1:${port}/completions
|
||||
commonStore.settings.apiUrl ?
|
||||
commonStore.settings.apiUrl + '/v1/completions' :
|
||||
`http://127.0.0.1:${port}/completions`,
|
||||
fetchEventSource( // https://api.openai.com/v1/completions || http://127.0.0.1:${port}/v1/completions
|
||||
getServerRoot(port) + '/v1/completions',
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
@@ -178,6 +166,8 @@ const CompositionPanel: FC = observer(() => {
|
||||
console.debug('json error', error);
|
||||
return;
|
||||
}
|
||||
if (data.model)
|
||||
commonStore.setLastModelName(data.model);
|
||||
if (data.choices && Array.isArray(data.choices) && data.choices.length > 0) {
|
||||
answer += data.choices[0]?.text || data.choices[0]?.delta?.content || '';
|
||||
setPrompt(prompt + answer.replace(/\s+$/, ''));
|
||||
@@ -217,62 +207,94 @@ const CompositionPanel: FC = observer(() => {
|
||||
setPrompt(e.target.value);
|
||||
}}
|
||||
/>
|
||||
<div className="flex flex-col gap-1 max-h-48 sm:max-w-sm sm:max-h-full overflow-x-hidden overflow-y-auto p-1">
|
||||
<Labeled flex breakline label={t('Max Response Token')}
|
||||
desc={t('By default, the maximum number of tokens that can be answered in a single response, it can be changed by the user by specifying API parameters.')}
|
||||
content={
|
||||
<ValuedSlider value={params.maxResponseToken} min={100} max={4100}
|
||||
step={100}
|
||||
input
|
||||
onChange={(e, data) => {
|
||||
setParams({
|
||||
maxResponseToken: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled flex breakline label={t('Temperature')}
|
||||
desc={t('Sampling temperature, it\'s like giving alcohol to a model, the higher the stronger the randomness and creativity, while the lower, the more focused and deterministic it will be.')}
|
||||
content={
|
||||
<ValuedSlider value={params.temperature} min={0} max={2} step={0.1}
|
||||
input
|
||||
onChange={(e, data) => {
|
||||
setParams({
|
||||
temperature: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled flex breakline label={t('Top_P')}
|
||||
desc={t('Just like feeding sedatives to the model. Consider the results of the top n% probability mass, 0.1 considers the top 10%, with higher quality but more conservative, 1 considers all results, with lower quality but more diverse.')}
|
||||
content={
|
||||
<ValuedSlider value={params.topP} min={0} max={1} step={0.1} input
|
||||
onChange={(e, data) => {
|
||||
setParams({
|
||||
topP: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<div className="grow" />
|
||||
<Checkbox className="select-none"
|
||||
size="large" label={t('Use Local Sound Font')} checked={params.useLocalSoundFont}
|
||||
onChange={async (_, data) => {
|
||||
if (data.checked) {
|
||||
if (!await FileExists('assets/sound-font/accordion/instrument.json')) {
|
||||
toast(t('Failed to load local sound font, please check if the files exist - assets/sound-font'),
|
||||
{ type: 'warning' });
|
||||
return;
|
||||
<div className="flex flex-col gap-1 max-h-48 sm:max-w-sm sm:max-h-full">
|
||||
<div className="flex flex-col gap-1 grow overflow-x-hidden overflow-y-auto p-1">
|
||||
<Labeled flex breakline label={t('Max Response Token')}
|
||||
desc={t('By default, the maximum number of tokens that can be answered in a single response, it can be changed by the user by specifying API parameters.')}
|
||||
content={
|
||||
<ValuedSlider value={params.maxResponseToken} min={100} max={4100}
|
||||
step={100}
|
||||
input
|
||||
onChange={(e, data) => {
|
||||
setParams({
|
||||
maxResponseToken: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled flex breakline label={t('Temperature')}
|
||||
desc={t('Sampling temperature, it\'s like giving alcohol to a model, the higher the stronger the randomness and creativity, while the lower, the more focused and deterministic it will be.')}
|
||||
content={
|
||||
<ValuedSlider value={params.temperature} min={0} max={2} step={0.1}
|
||||
input
|
||||
onChange={(e, data) => {
|
||||
setParams({
|
||||
temperature: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled flex breakline label={t('Top_P')}
|
||||
desc={t('Just like feeding sedatives to the model. Consider the results of the top n% probability mass, 0.1 considers the top 10%, with higher quality but more conservative, 1 considers all results, with lower quality but more diverse.')}
|
||||
content={
|
||||
<ValuedSlider value={params.topP} min={0} max={1} step={0.1} input
|
||||
onChange={(e, data) => {
|
||||
setParams({
|
||||
topP: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<div className="grow" />
|
||||
<Checkbox className="select-none"
|
||||
size="large" label={t('Use Local Sound Font')} checked={params.useLocalSoundFont}
|
||||
onChange={async (_, data) => {
|
||||
if (data.checked) {
|
||||
if (!await FileExists('assets/sound-font/accordion/instrument.json')) {
|
||||
toast(t('Failed to load local sound font, please check if the files exist - assets/sound-font'),
|
||||
{ type: 'warning' });
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
setParams({
|
||||
useLocalSoundFont: data.checked as boolean
|
||||
});
|
||||
setSoundFont();
|
||||
}} />
|
||||
<Checkbox className="select-none"
|
||||
size="large" label={t('Auto Play At The End')} checked={params.autoPlay} onChange={(_, data) => {
|
||||
setParams({
|
||||
useLocalSoundFont: data.checked as boolean
|
||||
autoPlay: data.checked as boolean
|
||||
});
|
||||
setSoundFont();
|
||||
}} />
|
||||
<Checkbox className="select-none"
|
||||
size="large" label={t('Auto Play At The End')} checked={params.autoPlay} onChange={(_, data) => {
|
||||
setParams({
|
||||
autoPlay: data.checked as boolean
|
||||
});
|
||||
}} />
|
||||
{commonStore.platform !== 'web' &&
|
||||
<Labeled flex breakline label={t('MIDI Input')}
|
||||
desc={t('Select the MIDI input device to be used.')}
|
||||
content={
|
||||
<div className="flex flex-col gap-1">
|
||||
<Dropdown style={{ minWidth: 0 }}
|
||||
value={commonStore.activeMidiDeviceIndex === -1 ? t('None')! : commonStore.midiPorts[commonStore.activeMidiDeviceIndex].name}
|
||||
selectedOptions={[commonStore.activeMidiDeviceIndex.toString()]}
|
||||
onOptionSelect={(_, data) => {
|
||||
if (data.optionValue) {
|
||||
const index = Number(data.optionValue);
|
||||
let action = (index === -1)
|
||||
? () => CloseMidiPort()
|
||||
: () => OpenMidiPort(index);
|
||||
action().then(() => {
|
||||
commonStore.setActiveMidiDeviceIndex(index);
|
||||
}).catch((e) => {
|
||||
toast(t('Error') + ' - ' + (e.message || e), { type: 'error', autoClose: 2500 });
|
||||
});
|
||||
}
|
||||
}}>
|
||||
<Option value={'-1'}>{t('None')!}</Option>
|
||||
{commonStore.midiPorts.map((p, i) =>
|
||||
<Option key={i} value={i.toString()}>{p.name}</Option>)
|
||||
}
|
||||
</Dropdown>
|
||||
<AudiotrackButton setPrompt={setPrompt} />
|
||||
</div>
|
||||
} />
|
||||
}
|
||||
</div>
|
||||
<div className="flex justify-between gap-2">
|
||||
<ToolTipButton desc={t('Regenerate')} icon={<ArrowSync20Regular />} onClick={() => {
|
||||
compositionSseController?.abort();
|
||||
@@ -311,7 +333,7 @@ const CompositionPanel: FC = observer(() => {
|
||||
ref={playerRef}
|
||||
style={{ width: '100%' }}
|
||||
/>
|
||||
<Button icon={<Save28Regular />}
|
||||
<Button icon={<Save28Regular />} size={mq ? 'large' : 'medium'} appearance={mq ? 'secondary' : 'subtle'}
|
||||
onClick={() => {
|
||||
if (params.midi) {
|
||||
OpenSaveFileDialogBytes('*.mid', 'music.mid', Array.from(new Uint8Array(params.midi))).then((path) => {
|
||||
@@ -319,7 +341,7 @@ const CompositionPanel: FC = observer(() => {
|
||||
toastWithButton(t('File Saved'), t('Open'), () => {
|
||||
OpenFileFolder(path, false);
|
||||
});
|
||||
}).catch((e: any) => {
|
||||
}).catch((e) => {
|
||||
toast(t('Error') + ' - ' + (e.message || e), { type: 'error', autoClose: 2500 });
|
||||
});
|
||||
} else {
|
||||
@@ -327,7 +349,7 @@ const CompositionPanel: FC = observer(() => {
|
||||
}
|
||||
}}
|
||||
>
|
||||
{t('Save')}
|
||||
{mq ? t('Save') : ''}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
@@ -335,7 +357,7 @@ const CompositionPanel: FC = observer(() => {
|
||||
);
|
||||
});
|
||||
|
||||
export const Composition: FC = observer(() => {
|
||||
const Composition: FC = observer(() => {
|
||||
return (
|
||||
<div className="flex flex-col gap-1 p-2 h-full overflow-hidden">
|
||||
<WorkHeader />
|
||||
@@ -343,3 +365,5 @@ export const Composition: FC = observer(() => {
|
||||
</div>
|
||||
);
|
||||
});
|
||||
|
||||
export default Composition;
|
||||
|
||||
@@ -1,6 +1,19 @@
|
||||
import { Dropdown, Input, Label, Option, Select, Switch, Text } from '@fluentui/react-components';
|
||||
import {
|
||||
Accordion,
|
||||
AccordionHeader,
|
||||
AccordionItem,
|
||||
AccordionPanel,
|
||||
Checkbox,
|
||||
Dropdown,
|
||||
Input,
|
||||
Label,
|
||||
Option,
|
||||
Select,
|
||||
Switch,
|
||||
Text
|
||||
} from '@fluentui/react-components';
|
||||
import { AddCircle20Regular, DataUsageSettings20Regular, Delete20Regular, Save20Regular } from '@fluentui/react-icons';
|
||||
import React, { FC } from 'react';
|
||||
import React, { FC, useEffect, useRef } from 'react';
|
||||
import { Section } from '../components/Section';
|
||||
import { Labeled } from '../components/Labeled';
|
||||
import { ToolTipButton } from '../components/ToolTipButton';
|
||||
@@ -14,51 +27,31 @@ import { useNavigate } from 'react-router';
|
||||
import { RunButton } from '../components/RunButton';
|
||||
import { updateConfig } from '../apis';
|
||||
import { ConvertModel, FileExists, GetPyError } from '../../wailsjs/go/backend_golang/App';
|
||||
import { getStrategy } from '../utils';
|
||||
import { checkDependencies, getStrategy } from '../utils';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { WindowShow } from '../../wailsjs/runtime/runtime';
|
||||
import { WindowShow } from '../../wailsjs/runtime';
|
||||
import strategyImg from '../assets/images/strategy.jpg';
|
||||
import strategyZhImg from '../assets/images/strategy_zh.jpg';
|
||||
import { ResetConfigsButton } from '../components/ResetConfigsButton';
|
||||
import { useMediaQuery } from 'usehooks-ts';
|
||||
import { ApiParameters, Device, ModelParameters, Precision } from '../types/configs';
|
||||
import { convertToSt } from '../utils/convert-to-st';
|
||||
|
||||
export type ApiParameters = {
|
||||
apiPort: number
|
||||
maxResponseToken: number;
|
||||
temperature: number;
|
||||
topP: number;
|
||||
presencePenalty: number;
|
||||
frequencyPenalty: number;
|
||||
}
|
||||
|
||||
export type Device = 'CPU' | 'CUDA' | 'MPS' | 'Custom';
|
||||
export type Precision = 'fp16' | 'int8' | 'fp32';
|
||||
|
||||
export type ModelParameters = {
|
||||
// different models can not have the same name
|
||||
modelName: string;
|
||||
device: Device;
|
||||
precision: Precision;
|
||||
storedLayers: number;
|
||||
maxStoredLayers: number;
|
||||
useCustomCuda?: boolean;
|
||||
customStrategy?: string;
|
||||
}
|
||||
|
||||
export type ModelConfig = {
|
||||
// different configs can have the same name
|
||||
name: string;
|
||||
apiParameters: ApiParameters
|
||||
modelParameters: ModelParameters
|
||||
}
|
||||
|
||||
export const Configs: FC = observer(() => {
|
||||
const Configs: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
const [selectedIndex, setSelectedIndex] = React.useState(commonStore.currentModelConfigIndex);
|
||||
const [selectedConfig, setSelectedConfig] = React.useState(commonStore.modelConfigs[selectedIndex]);
|
||||
const [displayStrategyImg, setDisplayStrategyImg] = React.useState(false);
|
||||
const advancedHeaderRef = useRef<HTMLDivElement>(null);
|
||||
const mq = useMediaQuery('(min-width: 640px)');
|
||||
const navigate = useNavigate();
|
||||
const port = selectedConfig.apiParameters.apiPort;
|
||||
|
||||
useEffect(() => {
|
||||
if (advancedHeaderRef.current)
|
||||
(advancedHeaderRef.current.firstElementChild as HTMLElement).style.padding = '0';
|
||||
}, []);
|
||||
|
||||
const updateSelectedIndex = (newIndex: number) => {
|
||||
setSelectedIndex(newIndex);
|
||||
setSelectedConfig(commonStore.modelConfigs[newIndex]);
|
||||
@@ -128,7 +121,8 @@ export const Configs: FC = observer(() => {
|
||||
setSelectedIndex(0);
|
||||
setSelectedConfig(commonStore.modelConfigs[0]);
|
||||
}} />
|
||||
<ToolTipButton desc={t('Save Config')} icon={<Save20Regular />} onClick={onClickSave} />
|
||||
<ToolTipButton desc={mq ? '' : t('Save Config')} icon={<Save20Regular />} text={mq ? t('Save Config') : null}
|
||||
onClick={onClickSave} />
|
||||
</div>
|
||||
<div className="flex items-center gap-4">
|
||||
<Label>{t('Config Name')}</Label>
|
||||
@@ -157,7 +151,7 @@ export const Configs: FC = observer(() => {
|
||||
desc={t('By default, the maximum number of tokens that can be answered in a single response, it can be changed by the user by specifying API parameters.')}
|
||||
content={
|
||||
<ValuedSlider value={selectedConfig.apiParameters.maxResponseToken} min={100} max={8100}
|
||||
step={400}
|
||||
step={100}
|
||||
input
|
||||
onChange={(e, data) => {
|
||||
setSelectedConfigApiParams({
|
||||
@@ -237,40 +231,50 @@ export const Configs: FC = observer(() => {
|
||||
}} />
|
||||
</div>
|
||||
} />
|
||||
<ToolTipButton text={t('Convert')}
|
||||
desc={t('Convert model with these configs. Using a converted model will greatly improve the loading speed, but model parameters of the converted model cannot be modified.')}
|
||||
onClick={async () => {
|
||||
if (commonStore.platform == 'darwin') {
|
||||
toast(t('MacOS is not yet supported for performing this operation, please do it manually.'), { type: 'info' });
|
||||
return;
|
||||
} else if (commonStore.platform == 'linux') {
|
||||
toast(t('Linux is not yet supported for performing this operation, please do it manually.'), { type: 'info' });
|
||||
return;
|
||||
}
|
||||
|
||||
const modelPath = `${commonStore.settings.customModelsPath}/${selectedConfig.modelParameters.modelName}`;
|
||||
if (await FileExists(modelPath)) {
|
||||
const strategy = getStrategy(selectedConfig);
|
||||
const newModelPath = modelPath + '-' + strategy.replace(/[:> *+]/g, '-');
|
||||
toast(t('Start Converting'), { autoClose: 1000, type: 'info' });
|
||||
ConvertModel(commonStore.settings.customPythonPath, modelPath, strategy, newModelPath).then(async () => {
|
||||
if (!await FileExists(newModelPath + '.pth')) {
|
||||
toast(t('Convert Failed') + ' - ' + await GetPyError(), { type: 'error' });
|
||||
} else {
|
||||
toast(`${t('Convert Success')} - ${newModelPath}`, { type: 'success' });
|
||||
{
|
||||
selectedConfig.modelParameters.device !== 'WebGPU' ?
|
||||
<ToolTipButton text={t('Convert')}
|
||||
desc={t('Convert model with these configs. Using a converted model will greatly improve the loading speed, but model parameters of the converted model cannot be modified.')}
|
||||
onClick={async () => {
|
||||
if (commonStore.platform === 'darwin') {
|
||||
toast(t('MacOS is not yet supported for performing this operation, please do it manually.') + ' (backend-python/convert_model.py)', { type: 'info' });
|
||||
return;
|
||||
} else if (commonStore.platform === 'linux') {
|
||||
toast(t('Linux is not yet supported for performing this operation, please do it manually.') + ' (backend-python/convert_model.py)', { type: 'info' });
|
||||
return;
|
||||
}
|
||||
}).catch(e => {
|
||||
const errMsg = e.message || e;
|
||||
if (errMsg.includes('path contains space'))
|
||||
toast(`${t('Convert Failed')} - ${t('File Path Cannot Contain Space')}`, { type: 'error' });
|
||||
else
|
||||
toast(`${t('Convert Failed')} - ${e.message || e}`, { type: 'error' });
|
||||
});
|
||||
setTimeout(WindowShow, 1000);
|
||||
} else {
|
||||
toast(`${t('Model Not Found')} - ${modelPath}`, { type: 'error' });
|
||||
}
|
||||
}} />
|
||||
|
||||
const ok = await checkDependencies(navigate);
|
||||
if (!ok)
|
||||
return;
|
||||
|
||||
const modelPath = `${commonStore.settings.customModelsPath}/${selectedConfig.modelParameters.modelName}`;
|
||||
if (await FileExists(modelPath)) {
|
||||
const strategy = getStrategy(selectedConfig);
|
||||
const newModelPath = modelPath + '-' + strategy.replace(/[:> *+]/g, '-');
|
||||
toast(t('Start Converting'), { autoClose: 1000, type: 'info' });
|
||||
ConvertModel(commonStore.settings.customPythonPath, modelPath, strategy, newModelPath).then(async () => {
|
||||
if (!await FileExists(newModelPath + '.pth')) {
|
||||
toast(t('Convert Failed') + ' - ' + await GetPyError(), { type: 'error' });
|
||||
} else {
|
||||
toast(`${t('Convert Success')} - ${newModelPath}`, { type: 'success' });
|
||||
}
|
||||
}).catch(e => {
|
||||
const errMsg = e.message || e;
|
||||
if (errMsg.includes('path contains space'))
|
||||
toast(`${t('Convert Failed')} - ${t('File Path Cannot Contain Space')}`, { type: 'error' });
|
||||
else
|
||||
toast(`${t('Convert Failed')} - ${e.message || e}`, { type: 'error' });
|
||||
});
|
||||
setTimeout(WindowShow, 1000);
|
||||
} else {
|
||||
toast(`${t('Model Not Found')} - ${modelPath}`, { type: 'error' });
|
||||
}
|
||||
}} /> :
|
||||
<ToolTipButton text={t('Convert To Safe Tensors Format')}
|
||||
desc=""
|
||||
onClick={() => convertToSt(navigate, selectedConfig)} />
|
||||
}
|
||||
<Labeled label={t('Strategy')} content={
|
||||
<Dropdown style={{ minWidth: 0 }} className="grow" value={t(selectedConfig.modelParameters.device)!}
|
||||
selectedOptions={[selectedConfig.modelParameters.device]}
|
||||
@@ -284,12 +288,14 @@ export const Configs: FC = observer(() => {
|
||||
<Option value="CPU">CPU</Option>
|
||||
{commonStore.platform === 'darwin' && <Option value="MPS">MPS</Option>}
|
||||
<Option value="CUDA">CUDA</Option>
|
||||
<Option value="CUDA-Beta">{t('CUDA (Beta, Faster)')!}</Option>
|
||||
<Option value="WebGPU">WebGPU</Option>
|
||||
<Option value="Custom">{t('Custom')!}</Option>
|
||||
</Dropdown>
|
||||
} />
|
||||
{
|
||||
selectedConfig.modelParameters.device != 'Custom' && <Labeled label={t('Precision')}
|
||||
desc={t('int8 uses less VRAM, but has slightly lower quality. fp16 has higher quality, and fp32 has the best quality.')}
|
||||
selectedConfig.modelParameters.device !== 'Custom' && <Labeled label={t('Precision')}
|
||||
desc={t('int8 uses less VRAM, but has slightly lower quality. fp16 has higher quality.')}
|
||||
content={
|
||||
<Dropdown style={{ minWidth: 0 }} className="grow"
|
||||
value={selectedConfig.modelParameters.precision}
|
||||
@@ -301,19 +307,21 @@ export const Configs: FC = observer(() => {
|
||||
});
|
||||
}
|
||||
}}>
|
||||
<Option>fp16</Option>
|
||||
{selectedConfig.modelParameters.device !== 'CPU' && selectedConfig.modelParameters.device !== 'MPS' &&
|
||||
<Option>fp16</Option>}
|
||||
<Option>int8</Option>
|
||||
<Option>fp32</Option>
|
||||
{selectedConfig.modelParameters.device === 'WebGPU' && <Option>nf4</Option>}
|
||||
{selectedConfig.modelParameters.device !== 'WebGPU' && <Option>fp32</Option>}
|
||||
</Dropdown>
|
||||
} />
|
||||
}
|
||||
{
|
||||
selectedConfig.modelParameters.device == 'CUDA' &&
|
||||
selectedConfig.modelParameters.device.includes('CUDA') &&
|
||||
<Labeled label={t('Current Strategy')}
|
||||
content={<Text> {getStrategy(selectedConfig)} </Text>} />
|
||||
}
|
||||
{
|
||||
selectedConfig.modelParameters.device == 'CUDA' &&
|
||||
selectedConfig.modelParameters.device.includes('CUDA') &&
|
||||
<Labeled label={t('Stored Layers')}
|
||||
desc={t('Number of the neural network layers loaded into VRAM, the more you load, the faster the speed, but it consumes more VRAM. (If your VRAM is not enough, it will fail to load)')}
|
||||
content={
|
||||
@@ -326,9 +334,7 @@ export const Configs: FC = observer(() => {
|
||||
}} />
|
||||
} />
|
||||
}
|
||||
{
|
||||
selectedConfig.modelParameters.device == 'CUDA' && <div />
|
||||
}
|
||||
{selectedConfig.modelParameters.device.includes('CUDA') && <div />}
|
||||
{
|
||||
displayStrategyImg &&
|
||||
<img style={{ width: '80vh', height: 'auto', zIndex: 100 }}
|
||||
@@ -336,13 +342,13 @@ export const Configs: FC = observer(() => {
|
||||
src={commonStore.settings.language === 'zh' ? strategyZhImg : strategyImg} />
|
||||
}
|
||||
{
|
||||
selectedConfig.modelParameters.device == 'Custom' &&
|
||||
selectedConfig.modelParameters.device === 'Custom' &&
|
||||
<Labeled label="Strategy"
|
||||
onMouseEnter={() => setDisplayStrategyImg(true)}
|
||||
onMouseLeave={() => setDisplayStrategyImg(false)}
|
||||
content={
|
||||
<Input className="grow"
|
||||
placeholder={commonStore.platform != 'darwin' ? 'cuda:0 fp16 *20 -> cuda:1 fp16' : 'mps fp32'}
|
||||
placeholder={commonStore.platform !== 'darwin' ? 'cuda:0 fp16 *20 -> cuda:1 fp16' : 'mps fp32'}
|
||||
value={selectedConfig.modelParameters.customStrategy}
|
||||
onChange={(e, data) => {
|
||||
setSelectedConfigModelParams({
|
||||
@@ -351,11 +357,11 @@ export const Configs: FC = observer(() => {
|
||||
}} />
|
||||
} />
|
||||
}
|
||||
{selectedConfig.modelParameters.device == 'Custom' && <div />}
|
||||
{selectedConfig.modelParameters.device === 'Custom' && <div />}
|
||||
{
|
||||
selectedConfig.modelParameters.device != 'CPU' && selectedConfig.modelParameters.device != 'MPS' &&
|
||||
(selectedConfig.modelParameters.device.includes('CUDA') || selectedConfig.modelParameters.device === 'Custom') &&
|
||||
<Labeled label={t('Use Custom CUDA kernel to Accelerate')}
|
||||
desc={t('Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues. If it fails to start, please turn off this option.')}
|
||||
desc={t('Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues (output garbled). If it fails to start, please turn off this option, or try to upgrade your gpu driver.')}
|
||||
content={
|
||||
<Switch checked={selectedConfig.modelParameters.useCustomCuda}
|
||||
onChange={(e, data) => {
|
||||
@@ -365,14 +371,62 @@ export const Configs: FC = observer(() => {
|
||||
}} />
|
||||
} />
|
||||
}
|
||||
{selectedConfig.modelParameters.device !== 'WebGPU' &&
|
||||
<Accordion className="sm:col-span-2" collapsible
|
||||
openItems={!commonStore.modelParamsCollapsed && 'advanced'}
|
||||
onToggle={(e, data) => {
|
||||
if (data.value === 'advanced')
|
||||
commonStore.setModelParamsCollapsed(!commonStore.modelParamsCollapsed);
|
||||
}}>
|
||||
<AccordionItem value="advanced">
|
||||
<AccordionHeader ref={advancedHeaderRef} size="small">{t('Advanced')}</AccordionHeader>
|
||||
<AccordionPanel>
|
||||
<div className="flex flex-col">
|
||||
<div className="flex grow">
|
||||
<Checkbox className="select-none"
|
||||
size="large" label={t('Use Custom Tokenizer')}
|
||||
checked={selectedConfig.modelParameters.useCustomTokenizer}
|
||||
onChange={(_, data) => {
|
||||
setSelectedConfigModelParams({
|
||||
useCustomTokenizer: data.checked as boolean
|
||||
});
|
||||
}} />
|
||||
<Input className="grow"
|
||||
placeholder={t('Tokenizer Path (e.g. backend-python/rwkv_pip/20B_tokenizer.json)')!}
|
||||
value={selectedConfig.modelParameters.customTokenizer}
|
||||
onChange={(e, data) => {
|
||||
setSelectedConfigModelParams({
|
||||
customTokenizer: data.value
|
||||
});
|
||||
}} />
|
||||
</div>
|
||||
</div>
|
||||
</AccordionPanel>
|
||||
</AccordionItem>
|
||||
</Accordion>
|
||||
}
|
||||
</div>
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-row-reverse sm:fixed bottom-2 right-2">
|
||||
<RunButton onClickRun={onClickSave} />
|
||||
<div className="flex gap-2">
|
||||
{selectedConfig.modelParameters.device !== 'WebGPU'
|
||||
&& <Checkbox className="select-none"
|
||||
size="large" label={t('Enable WebUI')}
|
||||
checked={selectedConfig.enableWebUI}
|
||||
onChange={(_, data) => {
|
||||
setSelectedConfig({
|
||||
...selectedConfig,
|
||||
enableWebUI: data.checked as boolean
|
||||
});
|
||||
}} />}
|
||||
<RunButton onClickRun={onClickSave} />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
} />
|
||||
);
|
||||
});
|
||||
|
||||
export default Configs;
|
||||
|
||||
@@ -1,28 +1,22 @@
|
||||
import React, { FC } from 'react';
|
||||
import React, { FC, useEffect } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { Page } from '../components/Page';
|
||||
import { observer } from 'mobx-react-lite';
|
||||
import commonStore from '../stores/commonStore';
|
||||
import { Divider, Field, ProgressBar } from '@fluentui/react-components';
|
||||
import { bytesToGb, bytesToKb, bytesToMb } from '../utils';
|
||||
import { bytesToGb, bytesToKb, bytesToMb, refreshLocalModels } from '../utils';
|
||||
import { ToolTipButton } from '../components/ToolTipButton';
|
||||
import { Folder20Regular, Pause20Regular, Play20Regular } from '@fluentui/react-icons';
|
||||
import { AddToDownloadList, OpenFileFolder, PauseDownload } from '../../wailsjs/go/backend_golang/App';
|
||||
|
||||
export type DownloadStatus = {
|
||||
name: string;
|
||||
path: string;
|
||||
url: string;
|
||||
transferred: number;
|
||||
size: number;
|
||||
speed: number;
|
||||
progress: number;
|
||||
downloading: boolean;
|
||||
done: boolean;
|
||||
}
|
||||
|
||||
export const Downloads: FC = observer(() => {
|
||||
const Downloads: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
const finishedModelsLen = commonStore.downloadList.filter((status) => status.done && status.name.endsWith('.pth')).length;
|
||||
useEffect(() => {
|
||||
if (finishedModelsLen > 0)
|
||||
refreshLocalModels({ models: commonStore.modelSourceList }, false);
|
||||
console.log('finishedModelsLen:', finishedModelsLen);
|
||||
}, [finishedModelsLen]);
|
||||
|
||||
let displayList = commonStore.downloadList.slice();
|
||||
const downloadListNames = displayList.map(s => s.name);
|
||||
@@ -85,3 +79,5 @@ export const Downloads: FC = observer(() => {
|
||||
} />
|
||||
);
|
||||
});
|
||||
|
||||
export default Downloads;
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import { CompoundButton, Link, Text } from '@fluentui/react-components';
|
||||
import React, { FC, ReactElement } from 'react';
|
||||
import React, { FC } from 'react';
|
||||
import banner from '../assets/images/banner.jpg';
|
||||
import {
|
||||
Chat20Regular,
|
||||
ClipboardEdit20Regular,
|
||||
DataUsageSettings20Regular,
|
||||
DocumentSettings20Regular
|
||||
DocumentSettings20Regular,
|
||||
MusicNote220Regular,
|
||||
Settings20Regular
|
||||
} from '@fluentui/react-icons';
|
||||
import { useNavigate } from 'react-router';
|
||||
import { observer } from 'mobx-react-lite';
|
||||
@@ -14,21 +16,13 @@ import manifest from '../../../manifest.json';
|
||||
import { BrowserOpenURL } from '../../wailsjs/runtime';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { ConfigSelector } from '../components/ConfigSelector';
|
||||
import MarkdownRender from '../components/MarkdownRender';
|
||||
import commonStore from '../stores/commonStore';
|
||||
import { Completion } from './Completion';
|
||||
import { ResetConfigsButton } from '../components/ResetConfigsButton';
|
||||
import { AdvancedGeneralSettings } from './Settings';
|
||||
import { NavCard } from '../types/home';
|
||||
import { LazyImportComponent } from '../components/LazyImportComponent';
|
||||
|
||||
export type IntroductionContent = { [lang: string]: string }
|
||||
|
||||
type NavCard = {
|
||||
label: string;
|
||||
desc: string;
|
||||
path: string;
|
||||
icon: ReactElement;
|
||||
};
|
||||
|
||||
const navCards: NavCard[] = [
|
||||
const clientNavCards: NavCard[] = [
|
||||
{
|
||||
label: 'Chat',
|
||||
desc: 'Go to chat page',
|
||||
@@ -55,7 +49,36 @@ const navCards: NavCard[] = [
|
||||
}
|
||||
];
|
||||
|
||||
export const Home: FC = observer(() => {
|
||||
const webNavCards: NavCard[] = [
|
||||
{
|
||||
label: 'Chat',
|
||||
desc: 'Go to chat page',
|
||||
path: '/chat',
|
||||
icon: <Chat20Regular />
|
||||
},
|
||||
{
|
||||
label: 'Completion',
|
||||
desc: 'Writer, Translator, Role-playing',
|
||||
path: '/completion',
|
||||
icon: <ClipboardEdit20Regular />
|
||||
},
|
||||
{
|
||||
label: 'Composition',
|
||||
desc: '',
|
||||
path: '/composition',
|
||||
icon: <MusicNote220Regular />
|
||||
},
|
||||
{
|
||||
label: 'Settings',
|
||||
desc: '',
|
||||
path: '/settings',
|
||||
icon: <Settings20Regular />
|
||||
}
|
||||
];
|
||||
|
||||
const MarkdownRender = React.lazy(() => import('../components/MarkdownRender'));
|
||||
|
||||
const Home: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
const navigate = useNavigate();
|
||||
const lang: string = commonStore.settings.language;
|
||||
@@ -64,39 +87,64 @@ export const Home: FC = observer(() => {
|
||||
navigate({ pathname: path });
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex flex-col justify-between h-full">
|
||||
<img className="rounded-xl select-none hidden sm:block"
|
||||
style={{ maxHeight: '40%', margin: '0 auto' }} src={banner} />
|
||||
<div className="flex flex-col gap-2">
|
||||
<Text size={600} weight="medium">{t('Introduction')}</Text>
|
||||
<div className="h-40 overflow-y-auto overflow-x-hidden p-1">
|
||||
<MarkdownRender>
|
||||
{lang in commonStore.introduction ? commonStore.introduction[lang] : commonStore.introduction['en']}
|
||||
</MarkdownRender>
|
||||
return commonStore.platform === 'web' ?
|
||||
(
|
||||
<div className="flex flex-col gap-2 h-full overflow-x-hidden overflow-y-auto">
|
||||
<img className="rounded-xl select-none object-cover grow"
|
||||
style={{ maxHeight: '40%' }} src={banner} />
|
||||
<div className="grow"></div>
|
||||
<div className="grid grid-cols-2 sm:grid-cols-4 gap-5">
|
||||
{webNavCards.map(({ label, path, icon, desc }, index) => (
|
||||
<CompoundButton icon={icon} secondaryContent={t(desc)} key={`${path}-${index}`} value={path}
|
||||
size="large" onClick={() => onClickNavCard(path)}>
|
||||
{t(label)}
|
||||
</CompoundButton>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
<div className="grid grid-cols-2 sm:grid-cols-4 gap-5">
|
||||
{navCards.map(({ label, path, icon, desc }, index) => (
|
||||
<CompoundButton icon={icon} secondaryContent={t(desc)} key={`${path}-${index}`} value={path}
|
||||
size="large" onClick={() => onClickNavCard(path)}>
|
||||
{t(label)}
|
||||
</CompoundButton>
|
||||
))}
|
||||
</div>
|
||||
<div className="flex flex-col gap-2">
|
||||
<div className="flex flex-row-reverse sm:fixed bottom-2 right-2">
|
||||
<div className="flex gap-3">
|
||||
<ResetConfigsButton />
|
||||
<ConfigSelector />
|
||||
<RunButton />
|
||||
<div className="flex flex-col gap-2">
|
||||
<AdvancedGeneralSettings />
|
||||
<div className="flex gap-4 items-end">
|
||||
{t('Version')}: {manifest.version}
|
||||
<Link onClick={() => BrowserOpenURL('https://github.com/josStorer/RWKV-Runner')}>{t('Help')}</Link>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex gap-4 items-end">
|
||||
{t('Version')}: {manifest.version}
|
||||
<Link onClick={() => BrowserOpenURL('https://github.com/josStorer/RWKV-Runner')}>{t('Help')}</Link>
|
||||
</div>
|
||||
)
|
||||
: (
|
||||
<div className="flex flex-col justify-between h-full">
|
||||
<img className="rounded-xl select-none object-cover hidden sm:block"
|
||||
style={{ maxHeight: '40%' }} src={banner} />
|
||||
<div className="flex flex-col gap-2">
|
||||
<Text size={600} weight="medium">{t('Introduction')}</Text>
|
||||
<div className="h-40 overflow-y-auto overflow-x-hidden p-1">
|
||||
<LazyImportComponent lazyChildren={MarkdownRender}>
|
||||
{lang in commonStore.introduction ? commonStore.introduction[lang] : commonStore.introduction['en']}
|
||||
</LazyImportComponent>
|
||||
</div>
|
||||
</div>
|
||||
<div className="grid grid-cols-2 sm:grid-cols-4 gap-5">
|
||||
{clientNavCards.map(({ label, path, icon, desc }, index) => (
|
||||
<CompoundButton icon={icon} secondaryContent={t(desc)} key={`${path}-${index}`} value={path}
|
||||
size="large" onClick={() => onClickNavCard(path)}>
|
||||
{t(label)}
|
||||
</CompoundButton>
|
||||
))}
|
||||
</div>
|
||||
<div className="flex flex-col gap-2">
|
||||
<div className="flex flex-row-reverse sm:fixed bottom-2 right-2">
|
||||
<div className="flex gap-3">
|
||||
<ResetConfigsButton />
|
||||
<ConfigSelector />
|
||||
<RunButton />
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex gap-4 items-end">
|
||||
{t('Version')}: {manifest.version}
|
||||
<Link onClick={() => BrowserOpenURL('https://github.com/josStorer/RWKV-Runner')}>{t('Help')}</Link>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
);
|
||||
});
|
||||
|
||||
export default Home;
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import React, { FC } from 'react';
|
||||
import {
|
||||
Checkbox,
|
||||
createTableColumn,
|
||||
DataGrid,
|
||||
DataGridBody,
|
||||
@@ -19,24 +20,10 @@ import commonStore from '../stores/commonStore';
|
||||
import { BrowserOpenURL } from '../../wailsjs/runtime';
|
||||
import { AddToDownloadList, OpenFileFolder } from '../../wailsjs/go/backend_golang/App';
|
||||
import { Page } from '../components/Page';
|
||||
import { bytesToGb, refreshModels, saveConfigs, toastWithButton } from '../utils';
|
||||
import { bytesToGb, getHfDownloadUrl, refreshModels, saveConfigs, toastWithButton } from '../utils';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useNavigate } from 'react-router';
|
||||
|
||||
export type ModelSourceItem = {
|
||||
name: string;
|
||||
size: number;
|
||||
lastUpdated: string;
|
||||
desc?: { [lang: string]: string | undefined; };
|
||||
SHA256?: string;
|
||||
url?: string;
|
||||
downloadUrl?: string;
|
||||
isComplete?: boolean;
|
||||
isLocal?: boolean;
|
||||
localSize?: number;
|
||||
lastUpdatedMs?: number;
|
||||
hide?: boolean;
|
||||
};
|
||||
import { ModelSourceItem } from '../types/models';
|
||||
|
||||
const columns: TableColumnDefinition<ModelSourceItem>[] = [
|
||||
createTableColumn<ModelSourceItem>({
|
||||
@@ -153,7 +140,7 @@ const columns: TableColumnDefinition<ModelSourceItem>[] = [
|
||||
navigate({ pathname: '/downloads' });
|
||||
},
|
||||
{ autoClose: 3000 });
|
||||
AddToDownloadList(`${commonStore.settings.customModelsPath}/${item.name}`, item.downloadUrl!);
|
||||
AddToDownloadList(`${commonStore.settings.customModelsPath}/${item.name}`, getHfDownloadUrl(item.downloadUrl!));
|
||||
}} />}
|
||||
{item.url && <ToolTipButton desc={t('Open Url')} icon={<Open20Regular />} onClick={() => {
|
||||
BrowserOpenURL(item.url!);
|
||||
@@ -165,7 +152,7 @@ const columns: TableColumnDefinition<ModelSourceItem>[] = [
|
||||
})
|
||||
];
|
||||
|
||||
export const Models: FC = observer(() => {
|
||||
const Models: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
@@ -174,10 +161,21 @@ export const Models: FC = observer(() => {
|
||||
<div className="flex flex-col gap-1">
|
||||
<div className="flex justify-between items-center">
|
||||
<Text weight="medium">{t('Model Source Manifest List')}</Text>
|
||||
<ToolTipButton desc={t('Refresh')} icon={<ArrowClockwise20Regular />} onClick={() => {
|
||||
refreshModels(false);
|
||||
saveConfigs();
|
||||
}} />
|
||||
<div className="flex">
|
||||
{commonStore.settings.language === 'zh' &&
|
||||
<Checkbox className="select-none"
|
||||
size="large" label={t('Use Hugging Face Mirror')}
|
||||
checked={commonStore.settings.useHfMirror}
|
||||
onChange={(_, data) => {
|
||||
commonStore.setSettings({
|
||||
useHfMirror: data.checked as boolean
|
||||
});
|
||||
}} />}
|
||||
<ToolTipButton desc={t('Refresh')} icon={<ArrowClockwise20Regular />} onClick={() => {
|
||||
refreshModels(false);
|
||||
saveConfigs();
|
||||
}} />
|
||||
</div>
|
||||
</div>
|
||||
<Text size={100}>
|
||||
{t('Provide JSON file URLs for the models manifest. Separate URLs with semicolons. The "models" field in JSON files will be parsed into the following table.')}
|
||||
@@ -220,3 +218,5 @@ export const Models: FC = observer(() => {
|
||||
} />
|
||||
);
|
||||
});
|
||||
|
||||
export default Models;
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
import React, { FC, useState } from 'react';
|
||||
import { DragDropContext, Draggable, Droppable, DropResult } from 'react-beautiful-dnd';
|
||||
import commonStore from '../../stores/commonStore';
|
||||
import { Preset } from './PresetsButton';
|
||||
import { observer } from 'mobx-react-lite';
|
||||
import { v4 as uuid } from 'uuid';
|
||||
import { Button, Card, Dropdown, Option, Textarea } from '@fluentui/react-components';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { ToolTipButton } from '../../components/ToolTipButton';
|
||||
import { Delete20Regular, ReOrderDotsVertical20Regular } from '@fluentui/react-icons';
|
||||
import { ConversationMessage, Role } from '../Chat';
|
||||
import { Preset } from '../../types/presets';
|
||||
import { ConversationMessage, Role } from '../../types/chat';
|
||||
|
||||
type Item = {
|
||||
id: string;
|
||||
@@ -31,7 +31,7 @@ const reorder = (list: Item[], startIndex: number, endIndex: number) => {
|
||||
return result;
|
||||
};
|
||||
|
||||
export const MessagesEditor: FC = observer(() => {
|
||||
const MessagesEditor: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const editingPreset = commonStore.editingPreset!;
|
||||
@@ -108,7 +108,8 @@ export const MessagesEditor: FC = observer(() => {
|
||||
style={{ borderTopRightRadius: 0, borderBottomRightRadius: 0 }}>
|
||||
<ReOrderDotsVertical20Regular />
|
||||
</Card>
|
||||
<Dropdown style={{ minWidth: 0, borderRadius: 0 }} listbox={{ style: { minWidth: 0 } }}
|
||||
<Dropdown style={{ minWidth: 0, borderRadius: 0 }}
|
||||
listbox={{ style: { minWidth: 'fit-content' } }}
|
||||
value={t(item.role)!}
|
||||
selectedOptions={[item.role]}
|
||||
onOptionSelect={(_, data) => {
|
||||
@@ -152,3 +153,5 @@ export const MessagesEditor: FC = observer(() => {
|
||||
</div>
|
||||
);
|
||||
});
|
||||
|
||||
export default MessagesEditor;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// TODO refactor
|
||||
|
||||
import React, { FC, PropsWithChildren, ReactElement, useState } from 'react';
|
||||
import React, { FC, lazy, PropsWithChildren, ReactElement, useState } from 'react';
|
||||
import {
|
||||
Button,
|
||||
Dialog,
|
||||
@@ -25,40 +25,21 @@ import {
|
||||
} from '@fluentui/react-icons';
|
||||
import { ToolTipButton } from '../../components/ToolTipButton';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { botName, Conversation, ConversationMessage, MessageType, userName } from '../Chat';
|
||||
import { SelectTabEventHandler } from '@fluentui/react-tabs';
|
||||
import { Labeled } from '../../components/Labeled';
|
||||
import commonStore from '../../stores/commonStore';
|
||||
import logo from '../../assets/images/logo.png';
|
||||
import { observer } from 'mobx-react-lite';
|
||||
import { MessagesEditor } from './MessagesEditor';
|
||||
import { ClipboardGetText, ClipboardSetText } from '../../../wailsjs/runtime';
|
||||
import { toast } from 'react-toastify';
|
||||
import { CustomToastContainer } from '../../components/CustomToastContainer';
|
||||
import { v4 as uuid } from 'uuid';
|
||||
import { absPathAsset } from '../../utils';
|
||||
import { Preset, PresetsNavigationItem } from '../../types/presets';
|
||||
import { botName, Conversation, MessageType, userName } from '../../types/chat';
|
||||
import { LazyImportComponent } from '../../components/LazyImportComponent';
|
||||
|
||||
export type PresetType = 'chat' | 'completion' | 'chatInCompletion'
|
||||
|
||||
export type Preset = {
|
||||
name: string,
|
||||
tag: string,
|
||||
// if name and sourceUrl are same, it will be overridden when importing
|
||||
sourceUrl: string,
|
||||
desc: string,
|
||||
avatarImg: string,
|
||||
type: PresetType,
|
||||
// chat
|
||||
welcomeMessage: string,
|
||||
messages: ConversationMessage[],
|
||||
displayPresetMessages: boolean,
|
||||
// completion
|
||||
prompt: string,
|
||||
stop: string,
|
||||
injectStart: string,
|
||||
injectEnd: string,
|
||||
}
|
||||
|
||||
export const defaultPreset: Preset = {
|
||||
const defaultPreset: Preset = {
|
||||
name: 'RWKV',
|
||||
tag: 'default',
|
||||
sourceUrl: '',
|
||||
@@ -74,6 +55,8 @@ export const defaultPreset: Preset = {
|
||||
injectEnd: ''
|
||||
};
|
||||
|
||||
const MessagesEditor = lazy(() => import('./MessagesEditor'));
|
||||
|
||||
const setActivePreset = (preset: Preset) => {
|
||||
commonStore.setActivePreset(preset);
|
||||
//TODO if (preset.displayPresetMessages) {
|
||||
@@ -97,7 +80,7 @@ const setActivePreset = (preset: Preset) => {
|
||||
//}
|
||||
};
|
||||
|
||||
export const PresetCardFrame: FC<PropsWithChildren & { onClick?: () => void }> = (props) => {
|
||||
const PresetCardFrame: FC<PropsWithChildren & { onClick?: () => void }> = (props) => {
|
||||
return <Button
|
||||
className="flex flex-col gap-1 w-32 h-56 break-all"
|
||||
style={{ minWidth: 0, borderRadius: '0.75rem', justifyContent: 'unset' }}
|
||||
@@ -107,7 +90,7 @@ export const PresetCardFrame: FC<PropsWithChildren & { onClick?: () => void }> =
|
||||
</Button>;
|
||||
};
|
||||
|
||||
export const PresetCard: FC<{
|
||||
const PresetCard: FC<{
|
||||
avatarImg: string,
|
||||
name: string,
|
||||
desc: string,
|
||||
@@ -121,7 +104,7 @@ export const PresetCard: FC<{
|
||||
const { t } = useTranslation();
|
||||
|
||||
return <PresetCardFrame onClick={onClick}>
|
||||
<img src={avatarImg} className="rounded-xl select-none ml-auto mr-auto h-28" />
|
||||
<img src={absPathAsset(avatarImg)} className="rounded-xl select-none ml-auto mr-auto h-28" />
|
||||
<Text size={400}>{name}</Text>
|
||||
<Text size={200} style={{
|
||||
overflow: 'hidden', textOverflow: 'ellipsis',
|
||||
@@ -143,7 +126,7 @@ export const PresetCard: FC<{
|
||||
</PresetCardFrame>;
|
||||
});
|
||||
|
||||
export const ChatPresetEditor: FC<{
|
||||
const ChatPresetEditor: FC<{
|
||||
triggerButton: ReactElement,
|
||||
presetIndex: number
|
||||
}> = observer(({ triggerButton, presetIndex }) => {
|
||||
@@ -164,8 +147,14 @@ export const ChatPresetEditor: FC<{
|
||||
const importPreset = () => {
|
||||
ClipboardGetText().then((text) => {
|
||||
try {
|
||||
if (!text.trim().startsWith('{'))
|
||||
text = new TextDecoder().decode(
|
||||
new Uint8Array(atob(text)
|
||||
.split('')
|
||||
.map((c) => c.charCodeAt(0))));
|
||||
const preset = JSON.parse(text);
|
||||
setEditingPreset(preset);
|
||||
setEditingMessages(false);
|
||||
toast(t('Imported successfully'), {
|
||||
type: 'success',
|
||||
autoClose: 1000
|
||||
@@ -239,7 +228,7 @@ export const ChatPresetEditor: FC<{
|
||||
<Button appearance="subtle" icon={<Dismiss20Regular />} />
|
||||
</DialogTrigger>
|
||||
</div>
|
||||
<img src={editingPreset.avatarImg} className="rounded-xl select-none ml-auto mr-auto h-28" />
|
||||
<img src={absPathAsset(editingPreset.avatarImg)} className="rounded-xl select-none ml-auto mr-auto h-28" />
|
||||
<Labeled flex breakline label={t('Name')}
|
||||
content={
|
||||
<div className="flex gap-2">
|
||||
@@ -250,14 +239,41 @@ export const ChatPresetEditor: FC<{
|
||||
}} />
|
||||
<Button onClick={() => {
|
||||
setEditingMessages(!editingMessages);
|
||||
}}>{!editingMessages ? t('Edit Messages') : t('Go Back')}</Button>
|
||||
}}>{!editingMessages ? t('Edit Character Settings') : t('Go Back')}</Button>
|
||||
</div>
|
||||
} />
|
||||
{
|
||||
editingMessages ?
|
||||
<MessagesEditor /> :
|
||||
<div className="flex flex-col gap-1">
|
||||
<Labeled flex spaceBetween label={t('Insert default system prompt at the beginning')}
|
||||
content={
|
||||
<Switch checked={editingPreset.presystem === undefined ? true : editingPreset.presystem}
|
||||
onChange={(e, data) => {
|
||||
setEditingPreset({
|
||||
presystem: data.checked
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled flex breakline label={t('User Name')}
|
||||
content={
|
||||
<Input placeholder="User" value={editingPreset.userName} onChange={(e, data) => {
|
||||
setEditingPreset({
|
||||
userName: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled flex breakline label={t('Assistant Name')}
|
||||
content={
|
||||
<Input placeholder="Assistant" value={editingPreset.assistantName} onChange={(e, data) => {
|
||||
setEditingPreset({
|
||||
assistantName: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<LazyImportComponent lazyChildren={MessagesEditor} />
|
||||
</div> :
|
||||
<div className="flex flex-col gap-1 p-2 overflow-x-hidden overflow-y-auto">
|
||||
<Labeled flex breakline label={t('Description')}
|
||||
<Labeled flex breakline label={`${t('Description')} (${t('Preview Only')})`}
|
||||
content={
|
||||
<Input value={editingPreset.desc} onChange={(e, data) => {
|
||||
setEditingPreset({
|
||||
@@ -319,7 +335,7 @@ export const ChatPresetEditor: FC<{
|
||||
</Dialog>;
|
||||
});
|
||||
|
||||
export const ChatPresets: FC = observer(() => {
|
||||
const ChatPresets: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return <div className="flex flex-wrap gap-2">
|
||||
@@ -355,11 +371,6 @@ export const ChatPresets: FC = observer(() => {
|
||||
</div>;
|
||||
});
|
||||
|
||||
type PresetsNavigationItem = {
|
||||
icon: ReactElement;
|
||||
element: ReactElement;
|
||||
};
|
||||
|
||||
const pages: { [label: string]: PresetsNavigationItem } = {
|
||||
Chat: {
|
||||
icon: <Chat20Regular />,
|
||||
@@ -375,7 +386,7 @@ const pages: { [label: string]: PresetsNavigationItem } = {
|
||||
}
|
||||
};
|
||||
|
||||
export const PresetsManager: FC<{ initTab: string }> = ({ initTab }) => {
|
||||
const PresetsManager: FC<{ initTab: string }> = ({ initTab }) => {
|
||||
const { t } = useTranslation();
|
||||
const [tab, setTab] = useState(initTab);
|
||||
|
||||
|
||||
@@ -16,33 +16,181 @@ import { observer } from 'mobx-react-lite';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { checkUpdate, toastWithButton } from '../utils';
|
||||
import { RestartApp } from '../../wailsjs/go/backend_golang/App';
|
||||
import { Language, Languages } from '../types/settings';
|
||||
|
||||
export const Languages = {
|
||||
dev: 'English', // i18n default
|
||||
zh: '简体中文',
|
||||
ja: '日本語'
|
||||
};
|
||||
export const GeneralSettings: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
export type Language = keyof typeof Languages;
|
||||
return <div className="flex flex-col gap-2">
|
||||
<Labeled label={t('Language')} flex spaceBetween content={
|
||||
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 'fit-content' } }}
|
||||
value={Languages[commonStore.settings.language]}
|
||||
selectedOptions={[commonStore.settings.language]}
|
||||
onOptionSelect={(_, data) => {
|
||||
if (data.optionValue) {
|
||||
const lang = data.optionValue as Language;
|
||||
commonStore.setSettings({
|
||||
language: lang
|
||||
});
|
||||
}
|
||||
}}>
|
||||
{
|
||||
Object.entries(Languages).map(([langKey, desc]) =>
|
||||
<Option key={langKey} value={langKey}>{desc}</Option>)
|
||||
}
|
||||
</Dropdown>
|
||||
} />
|
||||
{
|
||||
commonStore.platform === 'windows' &&
|
||||
<Labeled label={t('DPI Scaling')} flex spaceBetween content={
|
||||
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 'fit-content' } }}
|
||||
value={commonStore.settings.dpiScaling + '%'}
|
||||
selectedOptions={[commonStore.settings.dpiScaling.toString()]}
|
||||
onOptionSelect={(_, data) => {
|
||||
if (data.optionValue) {
|
||||
commonStore.setSettings({
|
||||
dpiScaling: Number(data.optionValue)
|
||||
});
|
||||
toastWithButton(t('Restart the app to apply DPI Scaling.'), t('Restart'), () => {
|
||||
RestartApp();
|
||||
}, {
|
||||
autoClose: 5000
|
||||
});
|
||||
}
|
||||
}}>
|
||||
{
|
||||
Array.from({ length: 7 }, (_, i) => (i + 2) * 25).map((v, i) =>
|
||||
<Option key={i} value={v.toString()}>{v + '%'}</Option>)
|
||||
}
|
||||
</Dropdown>
|
||||
} />
|
||||
}
|
||||
<Labeled label={t('Dark Mode')} flex spaceBetween content={
|
||||
<Switch checked={commonStore.settings.darkMode}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
darkMode: data.checked
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
</div>;
|
||||
});
|
||||
|
||||
export type SettingsType = {
|
||||
language: Language
|
||||
darkMode: boolean
|
||||
autoUpdatesCheck: boolean
|
||||
giteeUpdatesSource: boolean
|
||||
cnMirror: boolean
|
||||
host: string
|
||||
dpiScaling: number
|
||||
customModelsPath: string
|
||||
customPythonPath: string
|
||||
apiUrl: string
|
||||
apiKey: string
|
||||
apiChatModelName: string
|
||||
apiCompletionModelName: string
|
||||
}
|
||||
export const AdvancedGeneralSettings: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
export const Settings: FC = observer(() => {
|
||||
const { t, i18n } = useTranslation();
|
||||
return <div className="flex flex-col gap-2">
|
||||
<Labeled label={'API URL'}
|
||||
content={
|
||||
<div className="flex gap-2">
|
||||
<Input style={{ minWidth: 0 }} className="grow" value={commonStore.settings.apiUrl}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
apiUrl: data.value
|
||||
});
|
||||
}} />
|
||||
<Dropdown style={{ minWidth: '33px' }} listbox={{ style: { minWidth: 'fit-content' } }}
|
||||
value="..." selectedOptions={[]} expandIcon={null}
|
||||
onOptionSelect={(_, data) => {
|
||||
commonStore.setSettings({
|
||||
apiUrl: data.optionValue
|
||||
});
|
||||
if (data.optionText === 'OpenAI') {
|
||||
if (commonStore.settings.apiChatModelName === 'rwkv')
|
||||
commonStore.setSettings({
|
||||
apiChatModelName: 'gpt-3.5-turbo'
|
||||
});
|
||||
if (commonStore.settings.apiCompletionModelName === 'rwkv')
|
||||
commonStore.setSettings({
|
||||
apiCompletionModelName: 'gpt-3.5-turbo-instruct'
|
||||
});
|
||||
} else if (data.optionText === 'RWKV') {
|
||||
if (commonStore.settings.apiChatModelName === 'gpt-3.5-turbo')
|
||||
commonStore.setSettings({
|
||||
apiChatModelName: 'rwkv'
|
||||
});
|
||||
if (commonStore.settings.apiCompletionModelName === 'gpt-3.5-turbo-instruct' || commonStore.settings.apiCompletionModelName === 'text-davinci-003')
|
||||
commonStore.setSettings({
|
||||
apiCompletionModelName: 'rwkv'
|
||||
});
|
||||
}
|
||||
}}>
|
||||
<Option value="">{t('Localhost')!}</Option>
|
||||
<Option value="https://rwkv.ai-creator.net/chntuned">RWKV</Option>
|
||||
<Option value="https://api.openai.com">OpenAI</Option>
|
||||
</Dropdown>
|
||||
</div>
|
||||
} />
|
||||
<Labeled label={'API Key'}
|
||||
content={
|
||||
<Input type="password" className="grow" placeholder="sk-" value={commonStore.settings.apiKey}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
apiKey: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled label={t('API Chat Model Name')}
|
||||
content={
|
||||
<div className="flex gap-2">
|
||||
<Input style={{ minWidth: 0 }} className="grow" placeholder="rwkv"
|
||||
value={commonStore.settings.apiChatModelName}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
apiChatModelName: data.value
|
||||
});
|
||||
}} />
|
||||
<Dropdown style={{ minWidth: '33px' }} listbox={{ style: { minWidth: 'fit-content' } }}
|
||||
value="..." selectedOptions={[]} expandIcon={null}
|
||||
onOptionSelect={(_, data) => {
|
||||
if (data.optionValue) {
|
||||
commonStore.setSettings({
|
||||
apiChatModelName: data.optionValue
|
||||
});
|
||||
}
|
||||
}}>
|
||||
{
|
||||
['rwkv', 'gpt-4-1106-preview', 'gpt-4', 'gpt-4-32k', 'gpt-4-0613', 'gpt-4-32k-0613', 'gpt-3.5-turbo-1106', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k']
|
||||
.map((v, i) =>
|
||||
<Option key={i} value={v}>{v}</Option>
|
||||
)
|
||||
}
|
||||
</Dropdown>
|
||||
</div>
|
||||
} />
|
||||
<Labeled label={t('API Completion Model Name')}
|
||||
content={
|
||||
<div className="flex gap-2">
|
||||
<Input style={{ minWidth: 0 }} className="grow" placeholder="rwkv"
|
||||
value={commonStore.settings.apiCompletionModelName}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
apiCompletionModelName: data.value
|
||||
});
|
||||
}} />
|
||||
<Dropdown style={{ minWidth: '33px' }} listbox={{ style: { minWidth: 'fit-content', minHeight: 0 } }}
|
||||
value="..." selectedOptions={[]} expandIcon={null}
|
||||
onOptionSelect={(_, data) => {
|
||||
if (data.optionValue) {
|
||||
commonStore.setSettings({
|
||||
apiCompletionModelName: data.optionValue
|
||||
});
|
||||
}
|
||||
}}>
|
||||
{
|
||||
['rwkv', 'gpt-3.5-turbo-instruct', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002', 'text-curie-001', 'text-babbage-001', 'text-ada-001']
|
||||
.map((v, i) =>
|
||||
<Option key={i} value={v}>{v}</Option>
|
||||
)
|
||||
}
|
||||
</Dropdown>
|
||||
</div>
|
||||
} />
|
||||
</div>;
|
||||
});
|
||||
|
||||
const Settings: FC = observer(() => {
|
||||
const { t } = useTranslation();
|
||||
const advancedHeaderRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
useEffect(() => {
|
||||
@@ -53,227 +201,101 @@ export const Settings: FC = observer(() => {
|
||||
return (
|
||||
<Page title={t('Settings')} content={
|
||||
<div className="flex flex-col gap-2 overflow-y-auto overflow-x-hidden p-1">
|
||||
<Labeled label={t('Language')} flex spaceBetween content={
|
||||
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
|
||||
value={Languages[commonStore.settings.language]}
|
||||
selectedOptions={[commonStore.settings.language]}
|
||||
onOptionSelect={(_, data) => {
|
||||
if (data.optionValue) {
|
||||
const lang = data.optionValue as Language;
|
||||
commonStore.setSettings({
|
||||
language: lang
|
||||
});
|
||||
}
|
||||
}}>
|
||||
{
|
||||
Object.entries(Languages).map(([langKey, desc]) =>
|
||||
<Option key={langKey} value={langKey}>{desc}</Option>)
|
||||
}
|
||||
</Dropdown>
|
||||
} />
|
||||
{
|
||||
commonStore.platform === 'windows' &&
|
||||
<Labeled label={t('DPI Scaling')} flex spaceBetween content={
|
||||
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
|
||||
value={commonStore.settings.dpiScaling + '%'}
|
||||
selectedOptions={[commonStore.settings.dpiScaling.toString()]}
|
||||
onOptionSelect={(_, data) => {
|
||||
if (data.optionValue) {
|
||||
commonStore.setSettings({
|
||||
dpiScaling: Number(data.optionValue)
|
||||
});
|
||||
toastWithButton(t('Restart the app to apply DPI Scaling.'), t('Restart'), () => {
|
||||
RestartApp();
|
||||
}, {
|
||||
autoClose: 5000
|
||||
});
|
||||
}
|
||||
}}>
|
||||
{
|
||||
Array.from({ length: 7 }, (_, i) => (i + 2) * 25).map((v, i) =>
|
||||
<Option key={i} value={v.toString()}>{v + '%'}</Option>)
|
||||
}
|
||||
</Dropdown>
|
||||
} />
|
||||
}
|
||||
<Labeled label={t('Dark Mode')} flex spaceBetween content={
|
||||
<Switch checked={commonStore.settings.darkMode}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
darkMode: data.checked
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled label={t('Automatic Updates Check')} flex spaceBetween content={
|
||||
<Switch checked={commonStore.settings.autoUpdatesCheck}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
autoUpdatesCheck: data.checked
|
||||
});
|
||||
if (data.checked)
|
||||
checkUpdate(true);
|
||||
}} />
|
||||
} />
|
||||
{
|
||||
commonStore.settings.language === 'zh' &&
|
||||
<Labeled label={t('Use Gitee Updates Source')} flex spaceBetween content={
|
||||
<Switch checked={commonStore.settings.giteeUpdatesSource}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
giteeUpdatesSource: data.checked
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
}
|
||||
{
|
||||
commonStore.settings.language === 'zh' && commonStore.platform != 'linux' &&
|
||||
<Labeled label={t('Use Tsinghua Pip Mirrors')} flex spaceBetween content={
|
||||
<Switch checked={commonStore.settings.cnMirror}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
cnMirror: data.checked
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
}
|
||||
<Labeled label={t('Allow external access to the API (service must be restarted)')} flex spaceBetween content={
|
||||
<Switch checked={commonStore.settings.host !== '127.0.0.1'}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
host: data.checked ? '0.0.0.0' : '127.0.0.1'
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Accordion collapsible openItems={!commonStore.advancedCollapsed && 'advanced'} onToggle={(e, data) => {
|
||||
if (data.value === 'advanced')
|
||||
commonStore.setAdvancedCollapsed(!commonStore.advancedCollapsed);
|
||||
}}>
|
||||
<AccordionItem value="advanced">
|
||||
<AccordionHeader ref={advancedHeaderRef} size="large">{t('Advanced')}</AccordionHeader>
|
||||
<AccordionPanel>
|
||||
<div className="flex flex-col gap-2 overflow-hidden">
|
||||
{commonStore.platform !== 'darwin' &&
|
||||
<Labeled label={t('Custom Models Path')}
|
||||
content={
|
||||
<Input className="grow" placeholder="./models" value={commonStore.settings.customModelsPath}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
customModelsPath: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
}
|
||||
<Labeled label={t('Custom Python Path')} // if set, will not use precompiled cuda kernel
|
||||
content={
|
||||
<Input className="grow" placeholder="./py310/python" value={commonStore.settings.customPythonPath}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setDepComplete(false);
|
||||
commonStore.setSettings({
|
||||
customPythonPath: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled label={'API URL'}
|
||||
content={
|
||||
<div className="flex gap-2">
|
||||
<Input style={{ minWidth: 0 }} className="grow" value={commonStore.settings.apiUrl}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
apiUrl: data.value
|
||||
});
|
||||
}} />
|
||||
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
|
||||
value="..." selectedOptions={[]} expandIcon={null}
|
||||
onOptionSelect={(_, data) => {
|
||||
commonStore.setSettings({
|
||||
apiUrl: data.optionValue
|
||||
});
|
||||
if (data.optionText === 'OpenAI') {
|
||||
if (commonStore.settings.apiChatModelName === 'rwkv')
|
||||
commonStore.setSettings({
|
||||
apiChatModelName: 'gpt-3.5-turbo'
|
||||
});
|
||||
if (commonStore.settings.apiCompletionModelName === 'rwkv')
|
||||
commonStore.setSettings({
|
||||
apiCompletionModelName: 'text-davinci-003'
|
||||
});
|
||||
}
|
||||
}}>
|
||||
<Option value="">{t('Localhost')!}</Option>
|
||||
<Option value="https://api.openai.com">OpenAI</Option>
|
||||
</Dropdown>
|
||||
</div>
|
||||
} />
|
||||
<Labeled label={'API Key'}
|
||||
content={
|
||||
<Input className="grow" placeholder="sk-" value={commonStore.settings.apiKey}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
apiKey: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Labeled label={t('API Chat Model Name')}
|
||||
content={
|
||||
<div className="flex gap-2">
|
||||
<Input style={{ minWidth: 0 }} className="grow" placeholder="rwkv"
|
||||
value={commonStore.settings.apiChatModelName}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
apiChatModelName: data.value
|
||||
});
|
||||
}} />
|
||||
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
|
||||
value="..." selectedOptions={[]} expandIcon={null}
|
||||
onOptionSelect={(_, data) => {
|
||||
if (data.optionValue) {
|
||||
commonStore.setSettings({
|
||||
apiChatModelName: data.optionValue
|
||||
});
|
||||
}
|
||||
}}>
|
||||
{
|
||||
['rwkv', 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-32k-0613', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613']
|
||||
.map((v, i) =>
|
||||
<Option key={i} value={v}>{v}</Option>
|
||||
)
|
||||
}
|
||||
</Dropdown>
|
||||
</div>
|
||||
} />
|
||||
<Labeled label={t('API Completion Model Name')}
|
||||
content={
|
||||
<div className="flex gap-2">
|
||||
<Input style={{ minWidth: 0 }} className="grow" placeholder="rwkv"
|
||||
value={commonStore.settings.apiCompletionModelName}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
apiCompletionModelName: data.value
|
||||
});
|
||||
}} />
|
||||
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
|
||||
value="..." selectedOptions={[]} expandIcon={null}
|
||||
onOptionSelect={(_, data) => {
|
||||
if (data.optionValue) {
|
||||
commonStore.setSettings({
|
||||
apiCompletionModelName: data.optionValue
|
||||
});
|
||||
}
|
||||
}}>
|
||||
{
|
||||
['rwkv', 'text-davinci-003', 'text-davinci-002', 'text-curie-001', 'text-babbage-001', 'text-ada-001']
|
||||
.map((v, i) =>
|
||||
<Option key={i} value={v}>{v}</Option>
|
||||
)
|
||||
}
|
||||
</Dropdown>
|
||||
</div>
|
||||
} />
|
||||
commonStore.platform === 'web' ?
|
||||
(
|
||||
<div className="flex flex-col gap-2">
|
||||
<GeneralSettings />
|
||||
<AdvancedGeneralSettings />
|
||||
</div>
|
||||
</AccordionPanel>
|
||||
</AccordionItem>
|
||||
</Accordion>
|
||||
)
|
||||
:
|
||||
(
|
||||
<div className="flex flex-col gap-2">
|
||||
<GeneralSettings />
|
||||
<Labeled label={t('Automatic Updates Check')} flex spaceBetween content={
|
||||
<Switch checked={commonStore.settings.autoUpdatesCheck}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
autoUpdatesCheck: data.checked
|
||||
});
|
||||
if (data.checked)
|
||||
checkUpdate(true);
|
||||
}} />
|
||||
} />
|
||||
{
|
||||
commonStore.settings.language === 'zh' &&
|
||||
<Labeled label={t('Use Gitee Updates Source')} flex spaceBetween content={
|
||||
<Switch checked={commonStore.settings.giteeUpdatesSource}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
giteeUpdatesSource: data.checked
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
}
|
||||
{
|
||||
commonStore.settings.language === 'zh' && commonStore.platform !== 'linux' &&
|
||||
<Labeled label={t('Use Tsinghua Pip Mirrors')} flex spaceBetween content={
|
||||
<Switch checked={commonStore.settings.cnMirror}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
cnMirror: data.checked
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
}
|
||||
<Labeled label={t('Allow external access to the API (service must be restarted)')} flex spaceBetween
|
||||
content={
|
||||
<Switch checked={commonStore.settings.host !== '127.0.0.1'}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
host: data.checked ? '0.0.0.0' : '127.0.0.1'
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<Accordion collapsible openItems={!commonStore.advancedCollapsed && 'advanced'} onToggle={(e, data) => {
|
||||
if (data.value === 'advanced')
|
||||
commonStore.setAdvancedCollapsed(!commonStore.advancedCollapsed);
|
||||
}}>
|
||||
<AccordionItem value="advanced">
|
||||
<AccordionHeader ref={advancedHeaderRef} size="large">{t('Advanced')}</AccordionHeader>
|
||||
<AccordionPanel>
|
||||
<div className="flex flex-col gap-2 overflow-hidden">
|
||||
{commonStore.platform !== 'darwin' &&
|
||||
<Labeled label={t('Custom Models Path')}
|
||||
content={
|
||||
<Input className="grow" placeholder="./models"
|
||||
value={commonStore.settings.customModelsPath}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
customModelsPath: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
}
|
||||
<Labeled label={t('Custom Python Path')} // if set, will not use precompiled cuda kernel
|
||||
content={
|
||||
<Input className="grow" placeholder="./py310/python"
|
||||
value={commonStore.settings.customPythonPath}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setDepComplete(false);
|
||||
commonStore.setSettings({
|
||||
customPythonPath: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
<AdvancedGeneralSettings />
|
||||
</div>
|
||||
</AccordionPanel>
|
||||
</AccordionItem>
|
||||
</Accordion>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
</div>
|
||||
} />
|
||||
);
|
||||
});
|
||||
|
||||
export default Settings;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import React, { FC, ReactElement, useEffect, useRef, useState } from 'react';
|
||||
import React, { FC, useEffect, useRef, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { Button, Dropdown, Input, Option, Select, Switch, Tab, TabList } from '@fluentui/react-components';
|
||||
import {
|
||||
@@ -24,7 +24,6 @@ import { Labeled } from '../components/Labeled';
|
||||
import { ToolTipButton } from '../components/ToolTipButton';
|
||||
import { DataUsageSettings20Regular, Folder20Regular } from '@fluentui/react-icons';
|
||||
import { useNavigate } from 'react-router';
|
||||
import { Precision } from './Configs';
|
||||
import {
|
||||
CategoryScale,
|
||||
Chart as ChartJS,
|
||||
@@ -40,6 +39,12 @@ import { ChartJSOrUndefined } from 'react-chartjs-2/dist/types';
|
||||
import { WindowShow } from '../../wailsjs/runtime';
|
||||
import { t } from 'i18next';
|
||||
import { DialogButton } from '../components/DialogButton';
|
||||
import {
|
||||
DataProcessParameters,
|
||||
LoraFinetuneParameters,
|
||||
LoraFinetunePrecision,
|
||||
TrainNavigationItem
|
||||
} from '../types/train';
|
||||
|
||||
ChartJS.register(
|
||||
CategoryScale,
|
||||
@@ -86,39 +91,6 @@ const addLossDataToChart = (epoch: number, loss: number) => {
|
||||
commonStore.setChartData(commonStore.chartData);
|
||||
};
|
||||
|
||||
export type DataProcessParameters = {
|
||||
dataPath: string;
|
||||
vocabPath: string;
|
||||
}
|
||||
|
||||
export type LoraFinetunePrecision = 'bf16' | 'fp16' | 'tf32';
|
||||
|
||||
export type LoraFinetuneParameters = {
|
||||
baseModel: string;
|
||||
ctxLen: number;
|
||||
epochSteps: number;
|
||||
epochCount: number;
|
||||
epochBegin: number;
|
||||
epochSave: number;
|
||||
microBsz: number;
|
||||
accumGradBatches: number;
|
||||
preFfn: boolean;
|
||||
headQk: boolean;
|
||||
lrInit: string;
|
||||
lrFinal: string;
|
||||
warmupSteps: number;
|
||||
beta1: number;
|
||||
beta2: number;
|
||||
adamEps: string;
|
||||
devices: number;
|
||||
precision: LoraFinetunePrecision;
|
||||
gradCp: boolean;
|
||||
loraR: number;
|
||||
loraAlpha: number;
|
||||
loraDropout: number;
|
||||
loraLoad: string
|
||||
}
|
||||
|
||||
const loraFinetuneParametersOptions: Array<[key: keyof LoraFinetuneParameters, type: string, name: string]> = [
|
||||
['devices', 'number', 'Devices'],
|
||||
['precision', 'LoraFinetunePrecision', 'Precision'],
|
||||
@@ -154,14 +126,15 @@ const showError = (e: any) => {
|
||||
};
|
||||
|
||||
const errorsMap = Object.entries({
|
||||
'python3 ./finetune/lora/train.py': 'Memory is not enough, try to increase the virtual memory or use a smaller base model.',
|
||||
'python3 ./finetune/lora/train.py': 'Memory is not enough, try to increase the virtual memory (Swap of WSL) or use a smaller base model.',
|
||||
'cuda out of memory': 'VRAM is not enough',
|
||||
'valueerror: high <= 0': 'Training data is not enough, reduce context length or add more data for training',
|
||||
'+= \'+ptx\'': 'You are using WSL 1 for training, please upgrade to WSL 2. e.g. Run "wsl --set-version Ubuntu-22.04 2"',
|
||||
'+= \'+ptx\'': 'Can not find an Nvidia GPU. Perhaps the gpu driver of windows is too old, or you are using WSL 1 for training, please upgrade to WSL 2. e.g. Run "wsl --set-version Ubuntu-22.04 2"',
|
||||
'size mismatch for blocks': 'Size mismatch for blocks. You are attempting to continue training from the LoRA model, but it does not match the base model. Please set LoRA model to None.',
|
||||
'cuda_home environment variable is not set': 'Matched CUDA is not installed',
|
||||
'unsupported gpu architecture': 'Matched CUDA is not installed',
|
||||
'error building extension \'fused_adam\'': 'Matched CUDA is not installed'
|
||||
'error building extension \'fused_adam\'': 'Matched CUDA is not installed',
|
||||
'modelinfo is invalid': 'Failed to load model, try to increase the virtual memory (Swap of WSL) or use a smaller base model.'
|
||||
});
|
||||
|
||||
export const wslHandler = (data: string) => {
|
||||
@@ -219,7 +192,7 @@ const Terminal: FC = observer(() => {
|
||||
WslStart().then(() => {
|
||||
addWslMessage('WSL> ' + input);
|
||||
setInput('');
|
||||
WslCommand(input).catch(showError);
|
||||
WslCommand(input).then(WindowShow).catch(showError);
|
||||
}).catch(showError);
|
||||
}
|
||||
};
|
||||
@@ -414,7 +387,7 @@ const LoraFinetune: FC = observer(() => {
|
||||
contentText={t('The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\n' +
|
||||
'When you provide a directory path, all the txt files within that directory will be automatically converted into training data. ' +
|
||||
'This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\n' +
|
||||
'The jsonl format file can be referenced at https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl.\n' +
|
||||
'The jsonl format file can be referenced at https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl.\n' +
|
||||
'You can also write it similar to OpenAI\'s playground format, as shown in https://platform.openai.com/playground/p/default-chat.\n' +
|
||||
'Even for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. ' +
|
||||
'If they are different dialogues or topics, they should be written in separate lines.')} />
|
||||
@@ -568,10 +541,6 @@ const LoraFinetune: FC = observer(() => {
|
||||
);
|
||||
});
|
||||
|
||||
type TrainNavigationItem = {
|
||||
element: ReactElement;
|
||||
};
|
||||
|
||||
const pages: { [label: string]: TrainNavigationItem } = {
|
||||
'LoRA Finetune': {
|
||||
element: <LoraFinetune />
|
||||
@@ -582,7 +551,7 @@ const pages: { [label: string]: TrainNavigationItem } = {
|
||||
};
|
||||
|
||||
|
||||
export const Train: FC = () => {
|
||||
const Train: FC = () => {
|
||||
const { t } = useTranslation();
|
||||
const [tab, setTab] = useState('LoRA Finetune');
|
||||
|
||||
@@ -607,3 +576,5 @@ export const Train: FC = () => {
|
||||
</div>
|
||||
</div>;
|
||||
};
|
||||
|
||||
export default Train;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user