Compare commits

..

60 Commits

Author SHA1 Message Date
josc146
c7dcff52a1 release v1.5.1 2023-11-08 23:41:17 +08:00
josc146
c6ef32958e when client webUI enabled, set server into deployment mode 2023-11-08 23:31:13 +08:00
josc146
7235e1067b add deployment mode. If /switch-model with deploy: true, will disable /switch-model, /exit and other dangerous APIs (state cache APIs, part of midi APIs) 2023-11-08 23:29:42 +08:00
josc146
0594290b92 disable WebUI Option of WebGPU Mode (webgpu not supported yet) 2023-11-08 23:05:59 +08:00
josc146
d249a4c29a print error.txt 2023-11-08 22:57:38 +08:00
josc146
02ba37fab4 improve api url getter 2023-11-08 22:25:41 +08:00
josc146
b5a6f8a425 set deepspeed to 0.11.2 to avoid finetune error 2023-11-08 22:20:11 +08:00
josc146
1ad86d737c chore 2023-11-08 22:18:49 +08:00
josc146
cfa3669f6f fix /docs default api params (Pydantic v2) 2023-11-07 22:53:11 +08:00
josc146
26d4c9f0ed chore 2023-11-07 22:28:13 +08:00
josc146
3ddcf9f62e add webui entry 2023-11-07 22:24:06 +08:00
josc146
e734fce64f create webui assets 2023-11-07 22:23:26 +08:00
josc146
150beb578c chore 2023-11-07 22:23:00 +08:00
josc146
db6fbe8366 add python webui server 2023-11-07 22:22:29 +08:00
josc146
46f52923c3 improve webui 2023-11-07 22:21:41 +08:00
josc146
893be5cf43 webui build 2023-11-07 19:27:21 +08:00
github-actions[bot]
384e4ce4d0 release v1.5.0 2023-11-05 13:10:50 +00:00
josc146
b8712e0b89 release v1.5.0 2023-11-05 21:10:21 +08:00
josc146
37dda4333d chat attachment is now related to single message 2023-11-05 21:05:06 +08:00
josc146
64826b9af7 fix log encoding error 2023-11-05 21:00:31 +08:00
josc146
47b0c35441 update ngrok_connect 2023-11-04 20:22:28 +08:00
josc146
1dcda47013 improve startup process 2023-11-04 20:21:55 +08:00
josc146
1f81a1e5a8 upgrade to rwkv 0.8.20 2023-11-03 23:27:14 +08:00
josc146
35e92d2aef chore 2023-11-03 23:22:52 +08:00
josc146
0d99e5549e port occupied detection 2023-11-03 21:18:42 +08:00
josc146
fed1594ddc fix stop button status of Chat page 2023-10-30 21:09:23 +08:00
josc146
14b90bb36b improve dml mode performance (20% faster, https://github.com/BlinkDL/ChatRWKV/pull/181) 2023-10-30 20:24:57 +08:00
josc146
f86b7f1f08 python38 compatibility 2023-10-29 14:11:11 +08:00
josc146
54355d5a7a improve the compatibility between frontend presets and chatgpt api 2023-10-28 23:06:19 +08:00
josc146
ff7306349a improve memory usage of state cache 2023-10-28 23:04:49 +08:00
github-actions[bot]
77df56cddc release v1.4.9 2023-10-27 06:04:00 +00:00
josc146
97ae139de5 release v1.4.9 2023-10-27 14:03:28 +08:00
josc146
afd15ef2c5 base64 preset support 2023-10-27 13:35:29 +08:00
josc146
6c73eae9f6 edited chat message now is marked as Normal 2023-10-27 13:11:12 +08:00
josc146
7078f47f72 allow avatarImg to be local absolute path 2023-10-27 12:53:20 +08:00
josc146
d43954cc88 improve message interruption and retry for Chat page 2023-10-27 12:13:05 +08:00
josc146
c87de93498 allow conversation with some document (.pdf, .txt) 2023-10-27 11:36:29 +08:00
josc146
810843a5ab update manifest.json 2023-10-27 00:48:37 +08:00
josc146
f7cbd2c803 update manifest.json 2023-10-26 18:04:06 +08:00
josc146
faf1852012 update stop strategy 2023-10-26 17:47:40 +08:00
josc146
43cfab5d4b change default World series prefix to User/Assistant 2023-10-26 16:58:53 +08:00
josc146
627a20936d RWKVType now no longer relies on the file name 2023-10-26 16:55:33 +08:00
josc146
1d7f19ffaf update sample.jsonl 2023-10-26 14:08:16 +08:00
josc146
d80565d780 mark rwkv raven series as old model 2023-10-26 13:32:59 +08:00
josc146
d7ba88953d chore 2023-10-25 22:53:14 +08:00
josc146
30e1c3171e update kernel (CUDA Compute Capability 5.3) 2023-10-25 22:53:14 +08:00
josc146
1f058b16ac update kernel (CUDA Compute Capability 6.1, Previously 7.5) 2023-10-25 22:53:13 +08:00
josc146
4a192f4057 upgrade to webgpu 0.2.2 (https://github.com/josStorer/ai00_rwkv_server) 2023-10-25 21:02:44 +08:00
josc146
0331bf47f7 upgrade rwkv 0.8.16 (DirectML support; rwkv 5.2 no longer needs to ensure custom cuda kernel enabled) 2023-10-25 17:56:18 +08:00
josc146
2acdaa96b2 chore 2023-10-25 17:51:59 +08:00
josc146
1d200d53ab fix beta linux kernel 2023-10-25 17:51:13 +08:00
josc146
df9e1f408e add /file-to-text api 2023-10-25 17:14:33 +08:00
josc146
4a18696686 add pip --no-warn-script-location 2023-10-25 17:08:50 +08:00
josc146
46b3b285f5 upgrade packages 2023-10-25 17:07:40 +08:00
josc146
1d6aeab9dc fix the make command on Linux and macOS, no longer need manual operations on the wsl.go file. (#158, #173, #207) 2023-10-25 16:12:34 +08:00
josc146
ab110ba30b chore 2023-10-24 23:41:18 +08:00
josc146
2f0fa4ee56 update readme 2023-10-24 21:11:55 +08:00
josc146
0005816c1d fix linux kernel (partial revert 68228a45) 2023-10-05 00:08:18 +08:00
josc146
f70672e5a0 update .gitignore 2023-10-05 00:08:02 +08:00
github-actions[bot]
ee057071a5 release v1.4.8 2023-10-03 07:05:41 +00:00
94 changed files with 2186 additions and 1809 deletions

View File

@@ -63,10 +63,10 @@ jobs:
Expand-Archive ./python-3.10.11-embed-amd64.zip -DestinationPath ./py310
$content=Get-Content "./py310/python310._pth"; $content | ForEach-Object {if ($_.ReadCount -eq 3) {"Lib\\site-packages"} else {$_}} | Set-Content ./py310/python310._pth
./py310/python ./backend-python/get-pip.py
./py310/python -m pip install Cython==0.29.36
./py310/python -m pip install Cython==3.0.4
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../include" -Destination "py310/include" -Recurse
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../libs" -Destination "py310/libs" -Recurse
./py310/python -m pip install cyac==1.7
./py310/python -m pip install cyac==1.9
git clone https://github.com/josStorer/ai00_rwkv_server --depth=1
cd ai00_rwkv_server
cargo build --release
@@ -111,9 +111,6 @@ jobs:
rm ./backend-python/rwkv_pip/rwkv5.pyd
rm ./backend-python/rwkv_pip/beta/wkv_cuda.pyd
rm ./backend-python/get-pip.py
sed -i '1,2d' ./backend-golang/wsl_not_windows.go
rm ./backend-golang/wsl.go
mv ./backend-golang/wsl_not_windows.go ./backend-golang/wsl.go
make
mv build/bin/RWKV-Runner build/bin/RWKV-Runner_linux_x64
@@ -145,9 +142,6 @@ jobs:
rm ./backend-python/rwkv_pip/rwkv5.pyd
rm ./backend-python/rwkv_pip/beta/wkv_cuda.pyd
rm ./backend-python/get-pip.py
sed -i '' '1,2d' ./backend-golang/wsl_not_windows.go
rm ./backend-golang/wsl.go
mv ./backend-golang/wsl_not_windows.go ./backend-golang/wsl.go
make
cp build/darwin/Readme_Install.txt build/bin/Readme_Install.txt
cp build/bin/RWKV-Runner.app/Contents/MacOS/RWKV-Runner build/bin/RWKV-Runner_darwin_universal

1
.gitignore vendored
View File

@@ -18,6 +18,7 @@ __pycache__
/cmd-helper.bat
/install-py-dep.bat
/backend-python/wkv_cuda
/backend-python/rwkv5
*.exe
*.old
.DS_Store

View File

@@ -1,8 +1,22 @@
## Changes
- latest rwkv-5.2 is now supported (with pre-compiled kernel for windows)
- completion page: add format content button
- chore
### Features
- add webUI for easier service sharing (enable it in Configs page or --webui command line parameter, compile it
with `make
build-web`)
- add deployment mode. If `/switch-model` with `deploy: true`, will disable /switch-model, /exit and other dangerous
APIs (state cache APIs, part of midi APIs)
### Chores
- print error.txt to console when script fails
- api url getter
### Fixes
- set deepspeed to 0.11.2 to avoid finetune error
- fix `/docs` default api params (Pydantic v2)
## Install

View File

@@ -18,6 +18,16 @@ build-linux:
@echo ---- build for linux
wails build -upx -ldflags "-s -w" -platform linux/amd64
build-web:
@echo ---- build for web
cd frontend && npm run build
dev:
wails dev
dev-web:
cd frontend && npm run dev
preview:
cd frontend && npm run preview

View File

@@ -47,7 +47,9 @@ English | [简体中文](README_ZH.md) | [日本語](README_JA.md)
</div>
#### Default configs has enabled custom CUDA kernel acceleration, which is much faster and consumes much less VRAM. If you encounter possible compatibility issues, go to the Configs page and turn off `Use Custom CUDA kernel to Accelerate`.
#### Tip: You can deploy [backend-python](./backend-python/) on a server and use this program as a client only. Fill in your server address in the Settings `API URL`.
#### Default configs has enabled custom CUDA kernel acceleration, which is much faster and consumes much less VRAM. If you encounter possible compatibility issues (output garbled), go to the Configs page and turn off `Use Custom CUDA kernel to Accelerate`, or try to upgrade your gpu driver.
#### If Windows Defender claims this is a virus, you can try downloading [v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip) and letting it update automatically to the latest version, or add it to the trusted list (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`).

View File

@@ -47,7 +47,9 @@
</div>
#### デフォルトの設定はカスタム CUDA カーネルアクセラレーションを有効にしています。互換性の問題が発生する可能性がある場合は、コンフィグページに移動し、`Use Custom CUDA kernel to Accelerate` をオフにしてください。
#### ヒント:サーバーに[backend-python](./backend-python/)をデプロイし、このプログラムをクライアントとして使用することができます。設定された`API URL`にサーバーアドレスを入力してください。
#### デフォルトの設定はカスタム CUDA カーネルアクセラレーションを有効にしています。互換性の問題 (文字化けを出力する) が発生する可能性がある場合は、コンフィグページに移動し、`Use Custom CUDA kernel to Accelerate` をオフにしてください、あるいは、GPUドライバーをアップグレードしてみてください。
#### Windows Defender がこれをウイルスだと主張する場合は、[v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip) をダウンロードして最新版に自動更新させるか、信頼済みリストに追加してみてください (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`)。

View File

@@ -46,7 +46,9 @@ API兼容的接口这意味着一切ChatGPT客户端都是RWKV客户端。
</div>
#### 预设配置已经开启自定义CUDA算子加速速度更快且显存消耗更少。如果你遇到可能的兼容性问题前往配置页面关闭`使用自定义CUDA算子加速`
#### 小贴士:你可以在服务器部署[backend-python](./backend-python/),然后将此程序仅用作客户端,在设置的`API URL`中填入你的服务器地址
#### 预设配置已经开启自定义CUDA算子加速速度更快且显存消耗更少。如果你遇到可能的兼容性(输出乱码)问题,前往配置页面,关闭`使用自定义CUDA算子加速`,或更新你的显卡驱动
#### 如果Windows Defender说这是一个病毒你可以尝试下载[v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip),然后让其自动更新到最新版,或添加信任 (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`)

View File

@@ -53,12 +53,12 @@ type FileInfo struct {
ModTime string `json:"modTime"`
}
func (a *App) ReadFileInfo(fileName string) (FileInfo, error) {
func (a *App) ReadFileInfo(fileName string) (*FileInfo, error) {
info, err := os.Stat(a.exDir + fileName)
if err != nil {
return FileInfo{}, err
return nil, err
}
return FileInfo{
return &FileInfo{
Name: info.Name(),
Size: info.Size(),
IsDir: info.IsDir(),
@@ -145,6 +145,20 @@ func (a *App) OpenSaveFileDialogBytes(filterPattern string, defaultFileName stri
return path, nil
}
// Only return the path of the selected file, because communication between frontend and backend is slow. Use AssetServer Handler to read the file.
func (a *App) OpenOpenFileDialog(filterPattern string) (string, error) {
path, err := wruntime.OpenFileDialog(a.ctx, wruntime.OpenDialogOptions{
Filters: []wruntime.FileFilter{{Pattern: filterPattern}},
})
if err != nil {
return "", err
}
if path == "" {
return "", nil
}
return path, nil
}
func (a *App) OpenFileFolder(path string, relative bool) error {
var absPath string
var err error

View File

@@ -10,7 +10,7 @@ import (
"strings"
)
func (a *App) StartServer(python string, port int, host string, rwkvBeta bool) (string, error) {
func (a *App) StartServer(python string, port int, host string, webui bool, rwkvBeta bool) (string, error) {
var err error
if python == "" {
python, err = GetPython()
@@ -19,6 +19,9 @@ func (a *App) StartServer(python string, port int, host string, rwkvBeta bool) (
return "", err
}
args := []string{python, "./backend-python/main.py"}
if webui {
args = append(args, "--webui")
}
if rwkvBeta {
args = append(args, "--rwkv-beta")
}
@@ -28,8 +31,7 @@ func (a *App) StartServer(python string, port int, host string, rwkvBeta bool) (
func (a *App) StartWebGPUServer(port int, host string) (string, error) {
args := []string{"./backend-rust/webgpu_server"}
args = append(args, "-a", "0", "-t", "backend-rust/assets/rwkv_vocab_v20230424.json",
"--port", strconv.Itoa(port), "--ip", host)
args = append(args, "--port", strconv.Itoa(port), "--ip", host)
return Cmd(args...)
}
@@ -149,9 +151,9 @@ func (a *App) InstallPyDep(python string, cnMirror bool) (string, error) {
if runtime.GOOS == "windows" {
ChangeFileLine("./py310/python310._pth", 3, "Lib\\site-packages")
installScript := python + " ./backend-python/get-pip.py -i https://pypi.tuna.tsinghua.edu.cn/simple\n" +
python + " -m pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cu117\n" +
python + " -m pip install -r ./backend-python/requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple\n" +
installScript := python + " ./backend-python/get-pip.py -i https://pypi.tuna.tsinghua.edu.cn/simple --no-warn-script-location\n" +
python + " -m pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cu117 --no-warn-script-location\n" +
python + " -m pip install -r ./backend-python/requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple --no-warn-script-location\n" +
"exit"
if !cnMirror {
installScript = strings.Replace(installScript, " -i https://pypi.tuna.tsinghua.edu.cn/simple", "", -1)

View File

@@ -5,12 +5,15 @@ import (
"bufio"
"embed"
"errors"
"fmt"
"io"
"io/fs"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
)
@@ -205,3 +208,12 @@ func Unzip(source, destination string) error {
}
return nil
}
func (a *App) IsPortAvailable(port int) bool {
l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%s", strconv.Itoa(port)))
if err != nil {
return false
}
defer l.Close()
return true
}

View File

@@ -231,5 +231,6 @@ try:
convert_and_save_and_exit=args.out,
)
except Exception as e:
print(e)
with open("error.txt", "w") as f:
f.write(str(e))

View File

@@ -18,20 +18,31 @@ parser.add_argument(
args = parser.parse_args()
def convert_file(
pt_filename: str,
sf_filename: str,
):
def rename_key(rename, name):
for k, v in rename.items():
if k in name:
name = name.replace(k, v)
return name
def convert_file(pt_filename: str, sf_filename: str, transpose_names=[], rename={}):
loaded = torch.load(pt_filename, map_location="cpu")
if "state_dict" in loaded:
loaded = loaded["state_dict"]
loaded = {k: v.clone().half() for k, v in loaded.items()}
for k, v in loaded.items():
print(f"{k}\t{v.shape}\t{v.dtype}")
# for k, v in loaded.items():
# print(f'{k}\t{v.shape}\t{v.dtype}')
# For tensors to be contiguous
loaded = {k: v.contiguous() for k, v in loaded.items()}
for k, v in loaded.items():
for transpose_name in transpose_names:
if transpose_name in k:
loaded[k] = v.transpose(0, 1)
loaded = {rename_key(rename, k).lower(): v.contiguous() for k, v in loaded.items()}
for k, v in loaded.items():
print(f"{k}\t{v.shape}\t{v.dtype}")
dirname = os.path.dirname(sf_filename)
os.makedirs(dirname, exist_ok=True)
@@ -46,8 +57,14 @@ def convert_file(
if __name__ == "__main__":
try:
convert_file(args.input, args.output)
convert_file(
args.input,
args.output,
["lora_A"],
{"time_faaaa": "time_first", "lora_A": "lora.0", "lora_B": "lora.1"},
)
print(f"Saved to {args.output}")
except Exception as e:
print(e)
with open("error.txt", "w") as f:
f.write(str(e))

View File

@@ -1,3 +1,5 @@
import multipart
import fitz
import safetensors
import midi2audio
import mido
@@ -9,6 +11,7 @@ import GPUtil
import torch
import rwkv
import langchain
import numpy
import tokenizers
import fastapi

View File

@@ -4,6 +4,7 @@ Args = "args"
Model = "model"
Model_Status = "model_status"
Model_Config = "model_config"
Deploy_Mode = "deploy_mode"
class ModelStatus(Enum):
@@ -16,6 +17,7 @@ def init():
global GLOBALS
GLOBALS = {}
set(Model_Status, ModelStatus.Offline)
set(Deploy_Mode, False)
def set(key, value):

View File

@@ -2,70 +2,8 @@ import time
start_time = time.time()
import os
import sys
import argparse
from typing import Sequence
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import psutil
from fastapi import Depends, FastAPI
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from utils.rwkv import *
from utils.torch import *
from utils.ngrok import *
from utils.log import log_middleware
from routes import completion, config, state_cache, midi, misc
import global_var
app = FastAPI(dependencies=[Depends(log_middleware)])
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(completion.router)
app.include_router(config.router)
app.include_router(midi.router)
app.include_router(misc.router)
app.include_router(state_cache.router)
@app.on_event("startup")
def init():
global_var.init()
cmd_params = os.environ["RWKV_RUNNER_PARAMS"]
global_var.set(
global_var.Args, get_args(cmd_params.split(" ") if cmd_params else None)
)
state_cache.init()
set_torch()
if os.environ.get("ngrok_token") is not None:
ngrok_connect()
@app.get("/", tags=["Root"])
def read_root():
return {"Hello": "World!"}
@app.post("/exit", tags=["Root"])
def exit():
parent_pid = os.getpid()
parent = psutil.Process(parent_pid)
for child in parent.children(recursive=True):
child.kill()
parent.kill()
from typing import Union, Sequence
def get_args(args: Union[Sequence[str], None] = None):
@@ -84,6 +22,11 @@ def get_args(args: Union[Sequence[str], None] = None):
help="host to run the server on (default: 127.0.0.1)",
)
group = parser.add_argument_group(title="mode arguments")
group.add_argument(
"--webui",
action="store_true",
help="whether to enable WebUI (default: False)",
)
group.add_argument(
"--rwkv-beta",
action="store_true",
@@ -96,6 +39,96 @@ def get_args(args: Union[Sequence[str], None] = None):
if __name__ == "__main__":
args = get_args()
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import psutil
from contextlib import asynccontextmanager
from fastapi import Depends, FastAPI, status
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from utils.rwkv import *
from utils.torch import *
from utils.ngrok import *
from utils.log import log_middleware
from routes import completion, config, state_cache, midi, misc, file_process
import global_var
@asynccontextmanager
async def lifespan(app: FastAPI):
init()
yield
app = FastAPI(lifespan=lifespan, dependencies=[Depends(log_middleware)])
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(completion.router)
app.include_router(config.router)
app.include_router(midi.router)
app.include_router(file_process.router)
app.include_router(misc.router)
app.include_router(state_cache.router)
@app.post("/exit", tags=["Root"])
def exit():
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
parent_pid = os.getpid()
parent = psutil.Process(parent_pid)
for child in parent.children(recursive=True):
child.kill()
parent.kill()
try:
if (
"RWKV_RUNNER_PARAMS" in os.environ
and "--webui" in os.environ["RWKV_RUNNER_PARAMS"].split(" ")
) or args.webui:
from webui_server import webui_server
app.mount("/", webui_server)
except NameError:
pass
@app.get("/", tags=["Root"])
def read_root():
return {"Hello": "World!"}
def init():
global_var.init()
cmd_params = os.environ["RWKV_RUNNER_PARAMS"]
global_var.set(
global_var.Args, get_args(cmd_params.split(" ") if cmd_params else None)
)
state_cache.init()
set_torch()
if os.environ.get("ngrok_token") is not None:
ngrok_connect()
if __name__ == "__main__":
os.environ["RWKV_RUNNER_PARAMS"] = " ".join(sys.argv[1:])
print("--- %s seconds ---" % (time.time() - start_time))
uvicorn.run("main:app", port=args.port, host=args.host, workers=1)

Binary file not shown.

View File

@@ -43,16 +43,18 @@ class ChatCompletionBody(ModelConfigBody):
model: Union[str, None] = "rwkv"
stream: bool = False
stop: Union[str, List[str], None] = default_stop
user_name: Union[str, None] = Field(None, description="Internal user name")
user_name: Union[str, None] = Field(
None, description="Internal user name", min_length=1
)
assistant_name: Union[str, None] = Field(
None, description="Internal assistant name"
None, description="Internal assistant name", min_length=1
)
presystem: bool = Field(
True, description="Whether to insert default system prompt at the beginning"
)
class Config:
schema_extra = {
model_config = {
"json_schema_extra": {
"example": {
"messages": [
{"role": Role.User.value, "content": "hello", "raw": False}
@@ -70,6 +72,7 @@ class ChatCompletionBody(ModelConfigBody):
"frequency_penalty": 0.4,
}
}
}
class CompletionBody(ModelConfigBody):
@@ -78,8 +81,8 @@ class CompletionBody(ModelConfigBody):
stream: bool = False
stop: Union[str, List[str], None] = None
class Config:
schema_extra = {
model_config = {
"json_schema_extra": {
"example": {
"prompt": "The following is an epic science fiction masterpiece that is immortalized, "
+ "with delicate descriptions and grand depictions of interstellar civilization wars.\nChapter 1.\n",
@@ -93,6 +96,7 @@ class CompletionBody(ModelConfigBody):
"frequency_penalty": 0.4,
}
}
}
completion_lock = Lock()
@@ -317,11 +321,13 @@ The following is a coherent verbose detailed conversation between a girl named {
completion_text += append_message + "\n\n"
completion_text += f"{bot}{interface}"
user_code = model.pipeline.decode([model.pipeline.encode(user)[0]])
bot_code = model.pipeline.decode([model.pipeline.encode(bot)[0]])
if type(body.stop) == str:
body.stop = [body.stop, f"\n\n{user}", f"\n\n{bot}"]
body.stop = [body.stop, f"\n\n{user_code}", f"\n\n{bot_code}"]
elif type(body.stop) == list:
body.stop.append(f"\n\n{user}")
body.stop.append(f"\n\n{bot}")
body.stop.append(f"\n\n{user_code}")
body.stop.append(f"\n\n{bot_code}")
elif body.stop is None:
body.stop = default_stop
@@ -372,8 +378,8 @@ class EmbeddingsBody(BaseModel):
encoding_format: str = None
fast_mode: bool = False
class Config:
schema_extra = {
model_config = {
"json_schema_extra": {
"example": {
"input": "a big apple",
"model": "rwkv",
@@ -381,6 +387,7 @@ class EmbeddingsBody(BaseModel):
"fast_mode": False,
}
}
}
def embedding_base64(embedding: List[float]) -> str:

View File

@@ -10,41 +10,34 @@ import global_var
router = APIRouter()
def get_tokens_path(model_path: str):
model_path = model_path.lower()
tokenizer_dir = f"{pathlib.Path(__file__).parent.parent.resolve()}/rwkv_pip/"
default_tokens_path = tokenizer_dir + "20B_tokenizer.json"
if "raven" in model_path:
return default_tokens_path
elif "world" in model_path:
return "rwkv_vocab_v20230424"
elif "midi" in model_path:
return tokenizer_dir + "tokenizer-midi.json"
else:
return default_tokens_path
class SwitchModelBody(BaseModel):
model: str
strategy: str
tokenizer: Union[str, None] = None
customCuda: bool = False
deploy: bool = Field(
False,
description="Deploy mode. If success, will disable /switch-model, /exit and other dangerous APIs (state cache APIs, part of midi APIs)",
)
class Config:
schema_extra = {
model_config = {
"json_schema_extra": {
"example": {
"model": "models/RWKV-4-World-3B-v1-20230619-ctx4096.pth",
"strategy": "cuda fp16",
"tokenizer": None,
"customCuda": False,
"deploy": False,
}
}
}
@router.post("/switch-model", tags=["Configs"])
def switch_model(body: SwitchModelBody, response: Response, request: Request):
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(Status.HTTP_403_FORBIDDEN)
if global_var.get(global_var.Model_Status) is global_var.ModelStatus.Loading:
response.status_code = Status.HTTP_304_NOT_MODIFIED
return
@@ -67,25 +60,10 @@ def switch_model(body: SwitchModelBody, response: Response, request: Request):
os.environ["RWKV_CUDA_ON"] = "1" if body.customCuda else "0"
global_var.set(global_var.Model_Status, global_var.ModelStatus.Loading)
tokenizer = (
get_tokens_path(body.model)
if body.tokenizer is None or body.tokenizer == ""
else body.tokenizer
)
try:
global_var.set(
global_var.Model,
TextRWKV(
model=body.model,
strategy=body.strategy,
tokens_path=tokenizer,
)
if "midi" not in body.model.lower()
else MusicRWKV(
model=body.model,
strategy=body.strategy,
tokens_path=tokenizer,
),
RWKV(model=body.model, strategy=body.strategy, tokenizer=body.tokenizer),
)
except Exception as e:
print(e)
@@ -95,6 +73,8 @@ def switch_model(body: SwitchModelBody, response: Response, request: Request):
Status.HTTP_500_INTERNAL_SERVER_ERROR, f"failed to load: {e}"
)
if body.deploy:
global_var.set(global_var.Deploy_Mode, True)
if global_var.get(global_var.Model_Config) is None:
global_var.set(
global_var.Model_Config, get_rwkv_config(global_var.get(global_var.Model))

View File

@@ -0,0 +1,79 @@
import os
from fastapi import (
APIRouter,
HTTPException,
status,
Depends,
File,
UploadFile,
)
from pydantic import BaseModel
from typing import Iterator
router = APIRouter()
class FileToTextParams(BaseModel):
file_name: str
file_encoding: str = "utf-8"
@router.post("/file-to-text", tags=["File Process"])
async def file_to_text(
params: FileToTextParams = Depends(), file_data: UploadFile = File(...)
):
from langchain.schema import Document
from langchain.document_loaders.blob_loaders import Blob
# from langchain
def parse_text(blob: Blob) -> Iterator[Document]:
yield Document(page_content=blob.as_string(), metadata={"source": blob.source})
# from langchain
def parse_pdf(blob: Blob) -> Iterator[Document]:
import fitz
with blob.as_bytes_io() as stream:
doc = fitz.Document(stream=stream)
yield from [
Document(
page_content=page.get_text(),
metadata=dict(
{
"source": blob.source,
"file_path": blob.source,
"page": page.number,
"total_pages": len(doc),
},
**{
k: doc.metadata[k]
for k in doc.metadata
if type(doc.metadata[k]) in [str, int]
},
),
)
for page in doc
]
file_parsers = {".txt": parse_text, ".pdf": parse_pdf}
file_name = file_data.filename or params.file_name
file_ext = os.path.splitext(file_name)[-1]
if file_ext not in file_parsers:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "file type not supported")
try:
pages: Iterator[Document] = file_parsers[file_ext](
Blob.from_data(
await file_data.read(),
encoding=params.file_encoding,
path=file_name,
)
)
pages = list(pages)
except Exception as e:
raise HTTPException(status.HTTP_400_BAD_REQUEST, f"{e}")
return {"pages": pages}

View File

@@ -1,4 +1,5 @@
import io
import global_var
from fastapi import APIRouter, HTTPException, status
from starlette.responses import StreamingResponse
from pydantic import BaseModel
@@ -11,12 +12,13 @@ router = APIRouter()
class TextToMidiBody(BaseModel):
text: str
class Config:
schema_extra = {
model_config = {
"json_schema_extra": {
"example": {
"text": "p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:2d:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:2d:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:1f:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:26:a g:39:a g:39:a g:3e:a g:3e:a g:42:a g:42:a pi:39:a pi:3e:a pi:42:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0",
}
}
}
@router.post("/text-to-midi", tags=["MIDI"])
@@ -35,17 +37,21 @@ class TxtToMidiBody(BaseModel):
txt_path: str
midi_path: str
class Config:
schema_extra = {
model_config = {
"json_schema_extra": {
"example": {
"txt_path": "midi/sample.txt",
"midi_path": "midi/sample.mid",
}
}
}
@router.post("/txt-to-midi", tags=["MIDI"])
def txt_to_midi(body: TxtToMidiBody):
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
if not body.midi_path.startswith("midi/"):
raise HTTPException(status.HTTP_400_BAD_REQUEST, "bad output path")
@@ -65,14 +71,15 @@ class MidiToWavBody(BaseModel):
wav_path: str
sound_font_path: str = "assets/default_sound_font.sf2"
class Config:
schema_extra = {
model_config = {
"json_schema_extra": {
"example": {
"midi_path": "midi/sample.mid",
"wav_path": "midi/sample.wav",
"sound_font_path": "assets/default_sound_font.sf2",
}
}
}
@router.post("/midi-to-wav", tags=["MIDI"])
@@ -81,6 +88,9 @@ def midi_to_wav(body: MidiToWavBody):
Install fluidsynth first, see more: https://github.com/FluidSynth/fluidsynth/wiki/Download#distributions
"""
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
if not body.wav_path.startswith("midi/"):
raise HTTPException(status.HTTP_400_BAD_REQUEST, "bad output path")
@@ -95,14 +105,15 @@ class TextToWavBody(BaseModel):
wav_name: str
sound_font_path: str = "assets/default_sound_font.sf2"
class Config:
schema_extra = {
model_config = {
"json_schema_extra": {
"example": {
"text": "p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:2d:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:2d:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:1f:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:26:a g:39:a g:39:a g:3e:a g:3e:a g:42:a g:42:a pi:39:a pi:3e:a pi:42:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0",
"wav_name": "sample",
"sound_font_path": "assets/default_sound_font.sf2",
}
}
}
@router.post("/text-to-wav", tags=["MIDI"])
@@ -111,6 +122,9 @@ def text_to_wav(body: TextToWavBody):
Install fluidsynth first, see more: https://github.com/FluidSynth/fluidsynth/wiki/Download#distributions
"""
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
text = body.text.strip()
if not text.startswith("<start>"):
text = "<start> " + text

View File

@@ -4,12 +4,13 @@ from fastapi import APIRouter, HTTPException, Request, Response, status
from pydantic import BaseModel
import gc
import copy
import global_var
router = APIRouter()
trie = None
dtrie: Dict = {}
max_trie_len = 3000
max_trie_len = 300
loop_start_id = 1 # to prevent preloaded prompts from being deleted
loop_del_trie_id = loop_start_id
@@ -36,6 +37,9 @@ def init():
def disable_state_cache():
global trie, dtrie
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
trie = None
dtrie = {}
gc.collect()
@@ -46,6 +50,10 @@ def disable_state_cache():
@router.post("/enable-state-cache", tags=["State Cache"])
def enable_state_cache():
global trie, dtrie
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
try:
import cyac
@@ -68,6 +76,10 @@ class AddStateBody(BaseModel):
@router.post("/add-state", tags=["State Cache"])
def add_state(body: AddStateBody):
global trie, dtrie, loop_del_trie_id
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
if trie is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
@@ -108,6 +120,10 @@ def add_state(body: AddStateBody):
@router.post("/reset-state", tags=["State Cache"])
def reset_state():
global trie, dtrie
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
if trie is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
@@ -144,6 +160,10 @@ def __get_a_dtrie_buff_size(dtrie_v):
@router.post("/longest-prefix-state", tags=["State Cache"])
def longest_prefix_state(body: LongestPrefixStateBody, request: Request):
global trie
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
if trie is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
@@ -183,6 +203,10 @@ def longest_prefix_state(body: LongestPrefixStateBody, request: Request):
@router.post("/save-state", tags=["State Cache"])
def save_state():
global trie
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
if trie is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")

View File

@@ -94,7 +94,7 @@ if os.environ.get("RWKV_CUDA_ON") == "1":
f"{current_path}/cuda/att_one_v5.cu",
],
verbose=True,
extra_ldflags=["cublas.lib"],
extra_ldflags=["cublas.lib" if os.name == "nt" else ""],
extra_cuda_cflags=[
"-t 4",
"-std=c++17",

Binary file not shown.

View File

@@ -1,124 +0,0 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "element_wise.h"
#include "util.h"
// Equivalent Python code:
// ww = t_first + k
// p = torch.maximum(pp, ww)
// e1 = torch.exp(pp - p)
// e2 = torch.exp(ww - p)
// wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
// ww = t_decay + pp
// p = torch.maximum(ww, k)
// e1 = torch.exp(ww - p)
// e2 = torch.exp(k - p)
// t1 = e1 * aa + e2 * v
// t2 = e1 * bb + e2
// r = r * wkv
// return t1, t2, p, r
struct WkvForwardOne {
const float *t_first;
const float *k;
const float *pp;
const float *aa;
const float *bb;
const float *t_decay;
const float *v;
/* out */ float *t1;
/* out */ float *t2;
/* out */ float *p;
/* in & out */ half *r;
__device__ void operator()(int i) const {
float ww = t_first[i] + k[i];
float pp_ = pp[i];
float p_ = (pp_ > ww) ? pp_ : ww;
float e1 = expf(pp_ - p_);
float e2 = expf(ww - p_);
float aa_ = aa[i];
float bb_ = bb[i];
float v_ = v[i];
r[i] = __hmul(r[i], __float2half(((e1 * aa_ + e2 * v_) / (e1 * bb_ + e2))));
ww = t_decay[i] + pp_;
float k_ = k[i];
p_ = (ww > k_) ? ww : k_;
e1 = expf(ww - p_);
e2 = expf(k_ - p_);
t1[i] = e1 * aa_ + e2 * v_;
t2[i] = e1 * bb_ + e2;
p[i] = p_;
}
};
/*
Equivalent Python code:
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
*/
struct Mix {
const half *xx;
const half *sx;
const half *k_mix;
const half *v_mix;
const half *r_mix;
/* out */ half *kx;
/* out */ half *vx;
/* out */ half *rx;
__device__ void operator()(int i) const {
half xx_ = xx[i];
half sx_ = sx[i];
half k_mix_ = k_mix[i];
half v_mix_ = v_mix[i];
half r_mix_ = r_mix[i];
kx[i] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
vx[i] = __hadd(__hmul(xx_, v_mix_),
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
rx[i] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
};
using torch::Tensor;
void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
Tensor att_one(Tensor x, Tensor ln_w, Tensor ln_b, Tensor sx, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw,
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx, Tensor rw,
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
/* imm */ Tensor k, Tensor pp, Tensor ww, Tensor aa, Tensor bb,
Tensor t_decay, /* imm */ Tensor v, /* in & out */ Tensor r,
/* out */ Tensor x_plus_out, /* out */ Tensor t1,
/* out */ Tensor t2, /* out */ Tensor p) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
element_wise(Mix{data_ptr<half>(xx), data_ptr<half>(sx),
data_ptr<half>(k_mix), data_ptr<half>(v_mix),
data_ptr<half>(r_mix), data_ptr<half>(kx),
data_ptr<half>(vx), data_ptr<half>(rx)},
x.numel());
gemm_fp16_cublas(kx, kw, k);
gemm_fp16_cublas(vx, vw, v);
gemm_fp16_cublas(rx, rw, r);
at::sigmoid_(r);
element_wise(WkvForwardOne{data_ptr<float>(t_first), data_ptr<float>(k),
data_ptr<float>(pp), data_ptr<float>(aa),
data_ptr<float>(bb), data_ptr<float>(t_decay),
data_ptr<float>(v), data_ptr<float>(t1),
data_ptr<float>(t2), data_ptr<float>(p),
data_ptr<half>(r)},
x.numel());
gemm_fp16_cublas(r, ow, x_plus_out);
x_plus_out += x;
return xx;
}

View File

@@ -1,179 +0,0 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "util.h"
#include "element_wise.h"
using torch::Tensor;
void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
void gemm_fp16_cublas(const void *a, const void *b, void *c, int m,
int n, int k, bool output_fp32);
// based on `kernel_wkv_forward`, fusing more operations
__global__ void kernel_wkv_forward_new(
const int B, const int T, const int C, const float *__restrict__ const _w,
const float *__restrict__ const _u, const float *__restrict__ const _k,
const float *__restrict__ const _v, const half *__restrict__ const r,
half *__restrict__ const _y, float *__restrict__ const _aa,
float *__restrict__ const _bb, float *__restrict__ const _pp) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int _b = idx / C;
const int _c = idx % C;
const int _offset = _b * T * C + _c;
const int _state_offset = _b * C + _c;
float u = _u[_c];
float w = _w[_c];
const float *__restrict__ const k = _k + _offset;
const float *__restrict__ const v = _v + _offset;
half *__restrict__ const y = _y + _offset;
float aa = _aa[_state_offset];
float bb = _bb[_state_offset];
float pp = _pp[_state_offset];
for (int i = 0; i < T; i++) {
const int ii = i * C;
const float kk = k[ii];
const float vv = v[ii];
float ww = u + kk;
float p = max(pp, ww);
float e1 = exp(pp - p);
float e2 = exp(ww - p);
y[ii] = __float2half((e1 * aa + e2 * vv) / (e1 * bb + e2));
ww = w + pp;
p = max(ww, kk);
e1 = exp(ww - p);
e2 = exp(kk - p);
aa = e1 * aa + e2 * vv;
bb = e1 * bb + e2;
pp = p;
}
_aa[_state_offset] = aa;
_bb[_state_offset] = bb;
_pp[_state_offset] = pp;
}
void cuda_wkv_forward_new(int B, int T, int C, float *w, float *u, float *k,
float *v, half *r, half *y, float *aa, float *bb,
float *pp) {
dim3 threadsPerBlock(min(C, 32));
assert(B * C % threadsPerBlock.x == 0);
dim3 numBlocks(B * C / threadsPerBlock.x);
kernel_wkv_forward_new<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, r,
y, aa, bb, pp);
}
__global__ void _att_mix(const half *xx, const half *sx, const half *k_mix,
const half *v_mix, const half *r_mix,
const int outer_size, const int inner_size, half *kx,
half *vx, half *rx) {
for (int idx2 = blockIdx.x * blockDim.x + threadIdx.x; idx2 < inner_size;
idx2 += blockDim.x * gridDim.x) {
half k_mix_ = k_mix[idx2];
half v_mix_ = v_mix[idx2];
half r_mix_ = r_mix[idx2];
for (int row = 0; row < outer_size; ++row) {
int idx1 = row * inner_size + idx2;
half xx_ = xx[idx1];
half sx_ = sx[idx1];
kx[idx1] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
vx[idx1] = __hadd(__hmul(xx_, v_mix_),
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
rx[idx1] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
}
}
void att_mix(const half *xx, const half *sx, const half *k_mix,
const half *v_mix, const half *r_mix, const int outer_size,
const int inner_size, half *kx, half *vx, half *rx) {
// 256 is good enough on most GPUs
const int32_t BLOCK_SIZE = 256;
assert(inner_size % BLOCK_SIZE == 0);
_att_mix<<<inner_size / BLOCK_SIZE, BLOCK_SIZE>>>(
xx, sx, k_mix, v_mix, r_mix, outer_size, inner_size, kx, vx, rx);
}
struct InplaceSigmoid {
__device__ __forceinline__ half operator()(int i) const {
ptr[i] = __float2half(1.0 / (1.0 + exp(-__half2float(ptr[i]))));
}
half *ptr;
};
struct InplaceMul {
__device__ __forceinline__ half operator()(int i) const {
y[i] = __hmul(x[i], y[i]);
}
half *y;
half *x;
};
/*
Equivalent Python code:
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
k = gemm(kx, kw, output_dtype=torch.float32)
v = gemm(vx, vw, output_dtype=torch.float32)
T = x.shape[0]
for t in range(T):
kk = k[t]
vv = v[t]
ww = t_first + kk
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, kk)
e1 = torch.exp(ww - p)
e2 = torch.exp(kk - p)
aa = e1 * aa + e2 * vv
bb = e1 * bb + e2
pp = p
out = gemm(r * sx, ow)
return x + out, xx[-1,:], aa, bb, pp
*/
Tensor att_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
Tensor ow, Tensor t_first, Tensor pp, Tensor aa, Tensor bb,
Tensor t_decay, /* imm */ Tensor buf, /* out */ Tensor x_plus_out) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
sx = at::cat({sx.unsqueeze(0), xx.slice(0, 0, -1)}, 0);
char* buf_ptr = (char*)buf.data_ptr();
half* kx = (half*)buf_ptr;
half* vx = kx + x.numel();
half* rx = vx + x.numel();
half* wkv_y = rx + x.numel();
att_mix(data_ptr<half>(xx), data_ptr<half>(sx), data_ptr<half>(k_mix),
data_ptr<half>(v_mix), data_ptr<half>(r_mix), xx.size(0), xx.size(1),
kx, vx, rx);
float* k = reinterpret_cast<float*>(wkv_y + x.numel());
float* v = k + x.size(0) * kw.size(1);
half* r = reinterpret_cast<half*>(v + x.size(0) * vw.size(1));
gemm_fp16_cublas(kx, kw.data_ptr(), k, x.size(0), kw.size(1), kw.size(0), true);
gemm_fp16_cublas(vx, vw.data_ptr(), v, x.size(0), vw.size(1), vw.size(0), true);
gemm_fp16_cublas(rx, rw.data_ptr(), r, x.size(0), rw.size(1), rw.size(0), false);
element_wise(InplaceSigmoid{r}, x.size(0) * rw.size(1));
cuda_wkv_forward_new(1, x.size(0), x.size(1), data_ptr<float>(t_decay),
data_ptr<float>(t_first), k, v, r,
wkv_y, data_ptr<float>(aa),
data_ptr<float>(bb), data_ptr<float>(pp));
element_wise(InplaceMul{wkv_y, r}, x.numel());
gemm_fp16_cublas(wkv_y, ow.data_ptr(), x_plus_out.data_ptr(), x.size(0), ow.size(1), ow.size(0), false);
x_plus_out += x;
return xx;
}

View File

@@ -1,21 +0,0 @@
#include <cassert>
#include <cstddef>
#include <cstdint>
template <typename Func> __global__ void _element_wise(Func func, int n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
func(i);
}
}
// NOTE: packed data type (e.g. float4) is a overkill for current sizes
// (4096 in 7B model and 768 in 0.1B model),
// and is not faster than the plain float version.
template <typename Func>
void element_wise(Func func, int n) {
// 256 is good enough on most GPUs
const int32_t BLOCK_SIZE = 256;
assert(n % BLOCK_SIZE == 0);
_element_wise<<<n / BLOCK_SIZE, BLOCK_SIZE>>>(func, n);
}

View File

@@ -1,165 +0,0 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "element_wise.h"
#include "util.h"
using torch::Tensor;
void gemm_fp16_cublas(const void *a, const void *b, void *c, int ori_m,
int ori_n, int ori_k, bool output_fp32);
__global__ void _ffn_seq_mix(const half *xx, const half *sx, const half *k_mix,
const half *r_mix, const int outer_size,
const int inner_size, half *kx, half *rx) {
for (int idx2 = blockIdx.x * blockDim.x + threadIdx.x; idx2 < inner_size;
idx2 += blockDim.x * gridDim.x) {
half k_mix_ = k_mix[idx2];
half r_mix_ = r_mix[idx2];
for (int row = 0; row < outer_size; ++row) {
int idx1 = row * inner_size + idx2;
half xx_ = xx[idx1];
half sx_ = sx[idx1];
kx[idx1] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
rx[idx1] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
}
}
void ffn_seq_mix(const half *xx, const half *sx, const half *k_mix,
const half *r_mix, const int outer_size, const int inner_size,
half *kx, half *rx) {
// 256 is good enough on most GPUs
const int32_t BLOCK_SIZE = 256;
assert(inner_size % BLOCK_SIZE == 0);
_ffn_seq_mix<<<inner_size / BLOCK_SIZE, BLOCK_SIZE>>>(
xx, sx, k_mix, r_mix, outer_size, inner_size, kx, rx);
}
struct InplaceSigmoid {
__device__ __forceinline__ void operator()(int i) const {
ptr[i] = __float2half(1.0 / (1.0 + exp(-__half2float(ptr[i]))));
}
half *ptr;
};
struct InplaceReLUAndSquare {
__device__ __forceinline__ void operator()(int i) const {
// __hmax is not defined in old cuda
if (__hgt(ptr[i], __float2half(0))) {
ptr[i] = __hmul(ptr[i], ptr[i]);
} else {
ptr[i] = __float2half(0);
}
}
half *ptr;
};
struct InplaceFma {
__device__ __forceinline__ void operator()(int i) const {
a[i] = __hfma(a[i], b[i], c[i]);
}
half *a;
const half *b;
const half *c;
};
/*
Equivalent Python code:
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
vx = torch.square(torch.relu(gemm(kx, kw)))
out = r * gemm(vx, vw)
return x + out, xx[-1,:]
*/
Tensor ffn_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
/* imm */ Tensor buf,
/* out */ Tensor x_plus_out) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
sx = at::cat({sx.unsqueeze(0), xx.slice(0, 0, -1)}, 0);
char *buf_ptr = (char *)buf.data_ptr();
half *kx = (half *)buf_ptr;
half *rx = kx + x.numel();
half *vx = rx + x.numel();
half *r = vx + x.size(0) * kw.size(1);
ffn_seq_mix(data_ptr<half>(xx), data_ptr<half>(sx), data_ptr<half>(k_mix),
data_ptr<half>(r_mix), xx.size(0), xx.size(1), kx, rx);
gemm_fp16_cublas(rx, rw.data_ptr(), r, x.size(0), rw.size(1), x.size(1),
false);
element_wise(InplaceSigmoid{r}, x.size(0) * rw.size(1));
gemm_fp16_cublas(kx, kw.data_ptr(), vx, x.size(0), kw.size(1), x.size(1),
false);
element_wise(InplaceReLUAndSquare{vx}, x.size(0) * kw.size(1));
gemm_fp16_cublas(vx, vw.data_ptr(), x_plus_out.data_ptr(), x.size(0),
vw.size(1), vw.size(0), false);
element_wise(InplaceFma{data_ptr<half>(x_plus_out), r, data_ptr<half>(x)},
x_plus_out.numel());
return xx;
}
struct FfnOneMix {
__device__ __forceinline__ void operator()(int idx) {
half k_mix_ = k_mix[idx];
half r_mix_ = r_mix[idx];
half xx_ = xx[idx];
half sx_ = sx[idx];
kx[idx] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
rx[idx] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
half *k_mix;
half *r_mix;
half *xx;
half *sx;
half *kx;
half *rx;
};
/*
Equivalent Python code:
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
vx = torch.square(torch.relu(gemm(kx, kw)))
out = r * gemm(vx, vw)
return x + out, xx
*/
Tensor ffn_one(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
/* imm */ Tensor buf,
/* out */ Tensor x_plus_out) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
char *buf_ptr = (char *)buf.data_ptr();
half *kx = (half *)buf_ptr;
half *rx = kx + x.numel();
half *vx = rx + x.numel();
half *r = vx + x.size(0) * kw.size(1);
element_wise(FfnOneMix{data_ptr<half>(k_mix), data_ptr<half>(r_mix),
data_ptr<half>(xx), data_ptr<half>(sx), kx, rx},
x.numel());
// vector * matrix, so m = 1
gemm_fp16_cublas(rx, rw.data_ptr(), r, 1, rw.size(1), rw.size(0), false);
element_wise(InplaceSigmoid{r}, rw.size(1));
gemm_fp16_cublas(kx, kw.data_ptr(), vx, 1, kw.size(1), kw.size(0), false);
element_wise(InplaceReLUAndSquare{vx}, kw.size(1));
gemm_fp16_cublas(vx, vw.data_ptr(), x_plus_out.data_ptr(), 1, vw.size(1),
vw.size(0), false);
element_wise(InplaceFma{data_ptr<half>(x_plus_out), r, data_ptr<half>(x)},
x_plus_out.numel());
return xx;
}

View File

@@ -3,6 +3,8 @@
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAContext.h>
#define CUBLAS_CHECK(condition) \
for (cublasStatus_t _cublas_check_status = (condition); \
@@ -18,26 +20,13 @@
"CUDA error " + std::string(cudaGetErrorString(_cuda_check_status)) + \
" at " + std::to_string(__LINE__));
cublasHandle_t get_cublas_handle() {
static cublasHandle_t cublas_handle = []() {
cublasHandle_t handle = nullptr;
CUBLAS_CHECK(cublasCreate(&handle));
#if CUDA_VERSION < 11000
CUBLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
#else
CUBLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
#endif // CUDA_VERSION < 11000
return handle;
}();
return cublas_handle;
}
/*
NOTE: blas gemm is column-major by default, but we need row-major output.
The data of row-major, transposed matrix is exactly the same as the
column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
*/
void gemm_fp16_cublas(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(a));
const auto cuda_data_type = CUDA_R_16F;
const auto cuda_c_data_type =
c.dtype() == torch::kFloat32 ? CUDA_R_32F : CUDA_R_16F;
@@ -55,7 +44,7 @@ void gemm_fp16_cublas(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
const int cublas_lda = m;
const int cublas_ldb = k;
const int cublas_ldc = m;
cublasHandle_t cublas_handle = get_cublas_handle();
cublasHandle_t cublas_handle = at::cuda::getCurrentCUDABlasHandle();
#if CUDA_VERSION >= 11000
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;

View File

@@ -1,5 +1,6 @@
#include <torch/extension.h>
#include "ATen/ATen.h"
#include <c10/cuda/CUDAGuard.h>
typedef at::BFloat16 bf16;
typedef at::Half fp16;
typedef float fp32;
@@ -9,12 +10,15 @@ void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y);
void forward_bf16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_bf16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
}
void forward_fp16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_fp16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp16>(), k.data_ptr<fp16>(), v.data_ptr<fp16>(), w.data_ptr<float>(), u.data_ptr<fp16>(), y.data_ptr<fp16>());
}
void forward_fp32(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_fp32(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp32>(), k.data_ptr<fp32>(), v.data_ptr<fp32>(), w.data_ptr<float>(), u.data_ptr<fp32>(), y.data_ptr<fp32>());
}

View File

@@ -1,7 +0,0 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
template <typename T> T *data_ptr(torch::Tensor x) { return x.data_ptr<T>(); }
template <> inline half *data_ptr(torch::Tensor x) {
return reinterpret_cast<half *>(x.data_ptr<at::Half>());
}

View File

@@ -92,7 +92,7 @@ if os.environ.get("RWKV_CUDA_ON") == "1":
f"{current_path}/cuda/gemm_fp16_cublas.cpp",
],
verbose=True,
extra_ldflags=["cublas.lib"],
extra_ldflags=["cublas.lib" if os.name == "nt" else ""],
extra_cuda_cflags=[
"--use_fast_math",
"-O3",
@@ -171,10 +171,86 @@ if os.environ.get("RWKV_CUDA_ON") == "1":
else:
os.environ["RWKV_CUDA_ON"] = "0"
if os.environ.get("RWKV_CUDA_ON") == "1" and not DISABLE_CUBLAS_GEMM:
@MyStatic
def torch_mm8_seq(x, w, mx, rx, my, ry):
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
@MyStatic
def torch_mm8_one(x, w, mx, rx, my, ry):
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
if os.environ.get("RWKV_CUDA_ON") == "1":
@MyStatic
def gemm(a, b, output_dtype: Optional[torch.dtype] = None):
def mm8_seq(x, w, mx, rx, my, ry):
if w.device.type == "cuda" and x.dtype == torch.float16:
B, N, M = x.shape[0], w.shape[0], w.shape[1]
return cuda_mm8_seq(B, N, M, x, w, mx, rx, my, ry)
else:
return torch_mm8_seq(x, w, mx, rx, my, ry)
@MyStatic
def mm8_one(x, w, mx, rx, my, ry):
if w.device.type == "cuda":
N, M = w.shape[0], w.shape[1]
return cuda_mm8_one(N, M, x, w, mx, rx, my, ry)
else:
return torch_mm8_one(x, w, mx, rx, my, ry)
else:
@MyStatic
def mm8_seq(x, w, mx, rx, my, ry):
return torch_mm8_seq(x, w, mx, rx, my, ry)
@MyStatic
def mm8_one(x, w, mx, rx, my, ry):
return torch_mm8_one(x, w, mx, rx, my, ry)
def mm8(
x: torch.Tensor,
w: torch.Tensor,
mx: torch.Tensor,
rx: torch.Tensor,
my: torch.Tensor,
ry: torch.Tensor,
):
if len(x.shape) == 1:
return mm8_one(x, w, mx, rx, my, ry)
return mm8_seq(x, w, mx, rx, my, ry)
def matmul(
a,
b,
mx: Optional[torch.Tensor] = None,
rx: Optional[torch.Tensor] = None,
my: Optional[torch.Tensor] = None,
ry: Optional[torch.Tensor] = None,
output_dtype: Optional[torch.dtype] = None,
) -> torch.Tensor:
if output_dtype is None:
output_dtype = a.dtype
if b.dtype in [torch.float16, torch.bfloat16, torch.float32]:
assert a.dtype == b.dtype
return matmul_float(a, b, output_dtype=output_dtype)
elif b.dtype == torch.uint8:
assert mx is not None
assert rx is not None
assert my is not None
assert ry is not None
return mm8(a, b, mx, rx, my, ry).to(output_dtype)
else:
raise ValueError("Unsupported dtype")
if os.environ.get("RWKV_CUDA_ON") == "1" and not DISABLE_CUBLAS_GEMM:
def matmul_float(a, b, output_dtype: Optional[torch.dtype] = None):
if output_dtype is None:
output_dtype = a.dtype
if a.dtype == b.dtype == torch.float16 and a.device.type == "cuda":
@@ -203,9 +279,7 @@ if os.environ.get("RWKV_CUDA_ON") == "1" and not DISABLE_CUBLAS_GEMM:
else:
def gemm(a, b, output_dtype: Optional[torch.dtype] = None):
if output_dtype is None:
output_dtype = a.dtype
def matmul_float(a, b, output_dtype: Optional[torch.dtype] = None):
return (a @ b).to(output_dtype)
@@ -220,7 +294,7 @@ class RWKV(MyModule):
else:
prxxx = lambda *args, **kwargs: None
STRATEGY_REGEX = r"^(?:(?:^|->) *(?:cuda(?::[\d]+)?|cpu|mps) (?:fp(?:16|32)|bf16)(?:i8|i4|i3)?(?: \*[\d]+\+?)? *)+$"
STRATEGY_REGEX = r"^(?:(?:^|->) *(?:cuda(?::[\d]+)?|cpu|mps|dml) (?:fp(?:16|32)|bf16)(?:i8|i4|i3)?(?: \*[\d]+\+?)? *)+$"
if not re.match(STRATEGY_REGEX, strategy):
raise ValueError(
"Invalid strategy. Please read https://pypi.org/project/rwkv/"
@@ -372,6 +446,10 @@ class RWKV(MyModule):
strategy[n].atype = s[i][1][0]
strategy[n].wtype = s[i][1][1]
strategy[n].stream = False
if strategy[n].device == "dml":
import torch_directml
strategy[n].device = torch_directml.device()
if i == stream_i and n >= (plan[i] - stream_count):
strategy[n].stream = True
break
@@ -577,10 +655,7 @@ class RWKV(MyModule):
prxxx(f"Converted and saved. Now this will exit.")
exit(0)
if self.version == 5.2:
assert (
os.environ["RWKV_CUDA_ON"] == "1"
), "Please Enable Custom CUDA Kernel. Latest RWKV-5 requires os.environ['RWKV_CUDA_ON'] == '1' (will fix soon)"
if self.version == 5.2 and os.environ["RWKV_CUDA_ON"] == "1":
HEAD_SIZE = args.n_att // args.n_head
if LoadPreCompileLibrary("rwkv5") is True:
rwkv5 = torch.ops.rwkv5
@@ -596,6 +671,7 @@ class RWKV(MyModule):
"-res-usage",
"--use_fast_math",
"-O3",
"-Xptxas -O3" if os.name != "nt" else "",
"--extra-device-vectorization",
f"-D_N_={HEAD_SIZE}",
],
@@ -642,42 +718,6 @@ class RWKV(MyModule):
def RUN_RWKV_5(self, B, T, C, H, state, r, k, v, w, u):
return self.RWKV_5.apply(B, T, C, H, state, r, k, v, w, u)
@MyFunction
def torch_mm8_seq(self, x, w, mx, rx, my, ry):
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
@MyFunction
def torch_mm8_one(self, x, w, mx, rx, my, ry):
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
if os.environ.get("RWKV_CUDA_ON") == "1":
@MyFunction
def mm8_seq(self, x, w, mx, rx, my, ry):
if w.device.type == "cuda" and x.dtype == torch.float16:
B, N, M = x.shape[0], w.shape[0], w.shape[1]
return cuda_mm8_seq(B, N, M, x, w, mx, rx, my, ry)
else:
return self.torch_mm8_seq(x, w, mx, rx, my, ry)
@MyFunction
def mm8_one(self, x, w, mx, rx, my, ry):
if w.device.type == "cuda":
N, M = w.shape[0], w.shape[1]
return cuda_mm8_one(N, M, x, w, mx, rx, my, ry)
else:
return self.torch_mm8_one(x, w, mx, rx, my, ry)
else:
@MyFunction
def mm8_seq(self, x, w, mx, rx, my, ry):
return self.torch_mm8_seq(x, w, mx, rx, my, ry)
@MyFunction
def mm8_one(self, x, w, mx, rx, my, ry):
return self.torch_mm8_one(x, w, mx, rx, my, ry)
########################################################################################################
@MyFunction
@@ -709,43 +749,9 @@ class RWKV(MyModule):
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
vx = torch.square(torch.relu(gemm(kx, kw)))
out = r * gemm(vx, vw)
return x + out, xx
@MyFunction
def ffn_one_i8(
self,
x,
sx,
ln_w,
ln_b,
k_mix,
r_mix,
kw,
vw,
rw,
kmx,
krx,
kmy,
kry,
vmx,
vrx,
vmy,
vry,
rmx,
rrx,
rmy,
rry,
):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_one(rx, rw, rmx, rrx, rmy, rry))
vx = torch.square(torch.relu(self.mm8_one(kx, kw, kmx, krx, kmy, kry)))
out = r * (self.mm8_one(vx, vw, vmx, vrx, vmy, vry))
r = torch.sigmoid(matmul(rx, rw, rmx, rrx, rmy, rry))
vx = torch.square(torch.relu(matmul(kx, kw, kmx, krx, kmy, kry)))
out = r * matmul(vx, vw, vmx, vrx, vmy, vry)
return x + out, xx
########################################################################################################
@@ -780,44 +786,9 @@ class RWKV(MyModule):
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
vx = torch.square(torch.relu(gemm(kx, kw)))
out = r * gemm(vx, vw)
return x + out, xx[-1, :]
@MyFunction
def ffn_seq_i8(
self,
x,
sx,
ln_w,
ln_b,
k_mix,
r_mix,
kw,
vw,
rw,
kmx,
krx,
kmy,
kry,
vmx,
vrx,
vmy,
vry,
rmx,
rrx,
rmy,
rry,
):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1, :]))
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
vx = torch.square(torch.relu(self.mm8_seq(kx, kw, kmx, krx, kmy, kry)))
out = r * (self.mm8_seq(vx, vw, vmx, vrx, vmy, vry))
r = torch.sigmoid(matmul(rx, rw, rmx, rrx, rmy, rry))
vx = torch.square(torch.relu(matmul(kx, kw, kmx, krx, kmy, kry)))
out = r * matmul(vx, vw, vmx, vrx, vmy, vry)
return x + out, xx[-1, :]
########################################################################################################
@@ -863,9 +834,9 @@ class RWKV(MyModule):
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
k = gemm(kx, kw, output_dtype=torch.float32)
v = gemm(vx, vw, output_dtype=torch.float32)
r = torch.sigmoid(matmul(rx, rw, rmx, rrx, rmy, rry))
k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32)
v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32)
ww = t_first + k
p = torch.maximum(pp, ww)
@@ -877,65 +848,7 @@ class RWKV(MyModule):
e1 = torch.exp(ww - p)
e2 = torch.exp(k - p)
out = gemm(r * wkv, ow)
return x + out, xx, e1 * aa + e2 * v, e1 * bb + e2, p
@MyFunction
def att_one_i8(
self,
x,
sx,
aa,
bb,
pp,
ln_w,
ln_b,
k_mix,
v_mix,
r_mix,
t_decay,
t_first,
kw,
vw,
rw,
ow,
kmx,
krx,
kmy,
kry,
vmx,
vrx,
vmy,
vry,
rmx,
rrx,
rmy,
rry,
omx,
orx,
omy,
ory,
):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_one(rx, rw, rmx, rrx, rmy, rry))
k = (self.mm8_one(kx, kw, kmx, krx, kmy, kry)).float()
v = (self.mm8_one(vx, vw, vmx, vrx, vmy, vry)).float()
ww = t_first + k
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, k)
e1 = torch.exp(ww - p)
e2 = torch.exp(k - p)
out = self.mm8_one(r * wkv, ow, omx, orx, omy, ory)
out = matmul(r * wkv, ow, omx, orx, omy, ory)
return x + out, xx, e1 * aa + e2 * v, e1 * bb + e2, p
########################################################################################################
@@ -982,9 +895,9 @@ class RWKV(MyModule):
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
k = gemm(kx, kw, output_dtype=torch.float32)
v = gemm(vx, vw, output_dtype=torch.float32)
r = torch.sigmoid(matmul(rx, rw, rmx, rrx, rmy, rry))
k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32)
v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32)
T = x.shape[0]
for t in range(T):
@@ -1002,72 +915,7 @@ class RWKV(MyModule):
aa = e1 * aa + e2 * vv
bb = e1 * bb + e2
pp = p
out = gemm(r * sx, ow)
return x + out, xx[-1, :], aa, bb, pp
@MyFunction
def att_seq_i8(
self,
x,
sx,
aa,
bb,
pp,
ln_w,
ln_b,
k_mix,
v_mix,
r_mix,
t_decay,
t_first,
kw,
vw,
rw,
ow,
kmx,
krx,
kmy,
kry,
vmx,
vrx,
vmy,
vry,
rmx,
rrx,
rmy,
rry,
omx,
orx,
omy,
ory,
):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1, :]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
k = self.mm8_seq(kx, kw, kmx, krx, kmy, kry).float()
v = self.mm8_seq(vx, vw, vmx, vrx, vmy, vry).float()
T = x.shape[0]
for t in range(T):
kk = k[t]
vv = v[t]
ww = t_first + kk
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, kk)
e1 = torch.exp(ww - p)
e2 = torch.exp(kk - p)
aa = e1 * aa + e2 * vv
bb = e1 * bb + e2
pp = p
out = self.mm8_seq(r * sx, ow, omx, orx, omy, ory)
out = matmul(r * sx, ow, omx, orx, omy, ory)
return x + out, xx[-1, :], aa, bb, pp
########################################################################################################
@@ -1116,11 +964,11 @@ class RWKV(MyModule):
H = t_decay.shape[0]
S = x.shape[-1] // H
r = gemm(rx, rw, output_dtype=torch.float32).view(H, 1, S)
k = gemm(kx, kw, output_dtype=torch.float32).view(H, S, 1)
v = gemm(vx, vw, output_dtype=torch.float32).view(H, 1, S)
r = matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32).view(H, 1, S)
k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32).view(H, S, 1)
v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32).view(H, 1, S)
a = gemm(k, v)
a = matmul(k, v)
out = r @ (t_first * a + s)
s = a + t_decay * s
@@ -1129,7 +977,7 @@ class RWKV(MyModule):
out.unsqueeze(0), num_groups=H, weight=lx_w, bias=lx_b
).squeeze(0)
out = out.to(dtype=x.dtype)
out = gemm(out, ow)
out = matmul(out, ow, omx, orx, omy, ory)
return x + out, xx, s
@@ -1192,14 +1040,22 @@ class RWKV(MyModule):
w = w[:, :-T].reshape(-1, T, 2 * T - 1)
w = w[:, :, T - 1 :].reshape(H, T, T)
r = gemm(rx, rw, output_dtype=torch.float32).view(T, H, S).transpose(0, 1)
r = (
matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32)
.view(T, H, S)
.transpose(0, 1)
)
k = (
gemm(kx, kw, output_dtype=torch.float32)
matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32)
.view(T, H, S)
.transpose(0, 1)
.transpose(-2, -1)
)
v = gemm(vx, vw, output_dtype=torch.float32).view(T, H, S).transpose(0, 1)
v = (
matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32)
.view(T, H, S)
.transpose(0, 1)
)
out = ((r @ k) * w) @ v + (r @ s) * wb
s = ws * s + (k * wk) @ v
@@ -1207,7 +1063,7 @@ class RWKV(MyModule):
out = out.transpose(0, 1).contiguous().reshape(T, H * S)
out = F.group_norm(out, num_groups=H, weight=lx_w, bias=lx_b)
out = out.to(dtype=x.dtype)
out = gemm(out, ow)
out = matmul(out, ow, omx, orx, omy, ory)
return x + out, xx[-1, :], s
@@ -1246,6 +1102,10 @@ class RWKV(MyModule):
rrx,
rmy,
rry,
gmx,
grx,
gmy,
gry,
omx,
orx,
omy,
@@ -1260,12 +1120,12 @@ class RWKV(MyModule):
H = t_decay.shape[0]
S = x.shape[-1] // H
r = gemm(rx, rw, output_dtype=torch.float32).view(H, 1, S)
k = gemm(kx, kw, output_dtype=torch.float32).view(H, S, 1)
v = gemm(vx, vw, output_dtype=torch.float32).view(H, 1, S)
g = F.silu(gemm(gx, gw))
r = matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32).view(H, 1, S)
k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32).view(H, S, 1)
v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32).view(H, 1, S)
g = F.silu(matmul(gx, gw, gmx, grx, gmy, gry))
a = gemm(k, v)
a = matmul(k, v)
out = r @ (t_first * a + s)
s = a + t_decay * s
@@ -1274,7 +1134,7 @@ class RWKV(MyModule):
out.unsqueeze(0), num_groups=H, weight=lx_w, bias=lx_b
).squeeze(0)
out = out.to(dtype=x.dtype) * g
out = gemm(out, ow)
out = matmul(out, ow, omx, orx, omy, ory)
return x + out, xx, s
@@ -1311,6 +1171,10 @@ class RWKV(MyModule):
rrx,
rmy,
rry,
gmx,
grx,
gmy,
gry,
omx,
orx,
omy,
@@ -1340,15 +1204,23 @@ class RWKV(MyModule):
w = w[:, :-T].reshape(-1, T, 2 * T - 1)
w = w[:, :, T - 1 :].reshape(H, T, T)
r = gemm(rx, rw, output_dtype=torch.float32).view(T, H, S).transpose(0, 1)
r = (
matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32)
.view(T, H, S)
.transpose(0, 1)
)
k = (
gemm(kx, kw, output_dtype=torch.float32)
matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32)
.view(T, H, S)
.transpose(0, 1)
.transpose(-2, -1)
)
v = gemm(vx, vw, output_dtype=torch.float32).view(T, H, S).transpose(0, 1)
g = F.silu(gemm(gx, gw))
v = (
matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32)
.view(T, H, S)
.transpose(0, 1)
)
g = F.silu(matmul(gx, gw, gmx, grx, gmy, gry))
out = ((r @ k) * w) @ v + (r @ s) * wb
s = ws * s + (k * wk) @ v
@@ -1356,12 +1228,13 @@ class RWKV(MyModule):
out = out.transpose(0, 1).contiguous().reshape(T, H * S)
out = F.group_norm(out, num_groups=H, weight=lx_w, bias=lx_b)
out = out.to(dtype=x.dtype) * g
out = gemm(out, ow)
out = matmul(out, ow, omx, orx, omy, ory)
return x + out, xx[-1, :], s
########################################################################################################
@MyFunction
def att_seq_v5_2(
self,
x,
@@ -1394,6 +1267,10 @@ class RWKV(MyModule):
rrx,
rmy,
rry,
gmx,
grx,
gmy,
gry,
omx,
orx,
omy,
@@ -1407,32 +1284,40 @@ class RWKV(MyModule):
gx = xx * g_mix + sx * (1 - g_mix)
H = t_decay.shape[0]
N = x.shape[-1] // H
S = x.shape[-1] // H
T = x.shape[0]
r = gemm(rx, rw, output_dtype=torch.float32)
k = gemm(kx, kw, output_dtype=torch.float32)
v = gemm(vx, vw, output_dtype=torch.float32)
g = F.silu(gemm(gx, gw))
out, s = self.RUN_RWKV_5(
1,
T,
self.args.n_att,
H,
s.transpose(-1, -2).contiguous(),
r,
k,
v,
w=t_decay,
u=t_first,
r = (
matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32)
.view(T, H, S)
.transpose(0, 1)
)
s = s.transpose(-1, -2)
k = (
matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32)
.view(T, H, S)
.transpose(0, 1)
.transpose(-2, -1)
)
v = (
matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32)
.view(T, H, S)
.transpose(0, 1)
)
g = F.silu(matmul(gx, gw, gmx, grx, gmy, gry))
out = out.reshape(T, H * N)
out = torch.empty((T, H, S), dtype=r.dtype, device=r.device)
for t in range(T):
rt = r[:, t : t + 1, :]
kt = k[:, :, t : t + 1]
vt = v[:, t : t + 1, :]
at = matmul(kt, vt)
out[t] = (rt @ (t_first * at + s)).squeeze(1)
s = at + t_decay * s
out = out.reshape(T, H * S)
out = F.group_norm(out, num_groups=H, weight=lx_w, bias=lx_b)
out = out.to(dtype=x.dtype) * g
out = gemm(out, ow)
out = matmul(out, ow, omx, orx, omy, ory)
return x + out, xx[-1, :], s
@@ -1483,32 +1368,34 @@ class RWKV(MyModule):
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
k = gemm(kx, kw, output_dtype=torch.float32)
v = gemm(vx, vw, output_dtype=torch.float32)
y, aa, bb, pp = cuda_wkv(T, aa.shape[0], t_decay, t_first, k, v, aa, bb, pp)
r = torch.sigmoid(matmul(rx, rw, rmx, rrx, rmy, rry))
k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32)
v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32)
y, aa, bb, pp = cuda_wkv(T, C, t_decay, t_first, k, v, aa, bb, pp)
out = gemm(r * y.to(x.dtype), ow)
out = matmul(r * y.to(x.dtype), ow, omx, orx, omy, ory)
return x + out, xx[-1, :], aa, bb, pp
@MyFunction
def cuda_att_seq_i8(
# NOTE: decorate with @MyFunction causes JIT error
def cuda_att_seq_v5_2(
self,
x,
sx,
aa,
bb,
pp,
s,
ln_w,
ln_b,
lx_w,
lx_b,
k_mix,
v_mix,
r_mix,
g_mix,
t_decay,
t_first,
kw,
vw,
rw,
gw,
ow,
kmx,
krx,
@@ -1522,25 +1409,51 @@ class RWKV(MyModule):
rrx,
rmy,
rry,
gmx,
grx,
gmy,
gry,
omx,
orx,
omy,
ory,
):
T, C = x.shape
xx = F.layer_norm(x, (C,), weight=ln_w, bias=ln_b)
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1, :]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
gx = xx * g_mix + sx * (1 - g_mix)
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
k = self.mm8_seq(kx, kw, kmx, krx, kmy, kry)
v = self.mm8_seq(vx, vw, vmx, vrx, vmy, vry)
y, aa, bb, pp = cuda_wkv(T, C, t_decay, t_first, k, v, aa, bb, pp)
H = t_decay.shape[0]
N = x.shape[-1] // H
T = x.shape[0]
out = self.mm8_seq(r * y, ow, omx, orx, omy, ory)
return x + out, xx[-1, :], aa, bb, pp
r = matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32)
k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32)
v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32)
g = F.silu(matmul(gx, gw, gmx, grx, gmy, gry))
out, s = self.RUN_RWKV_5(
1,
T,
self.args.n_att,
H,
s.transpose(-1, -2).contiguous(),
r,
k,
v,
w=t_decay,
u=t_first,
)
s = s.transpose(-1, -2)
out = out.reshape(T, H * N)
out = F.group_norm(out, num_groups=H, weight=lx_w, bias=lx_b)
out = out.to(dtype=x.dtype) * g
out = matmul(out, ow, omx, orx, omy, ory)
return x + out, xx[-1, :], s
########################################################################################################
@@ -1621,30 +1534,31 @@ class RWKV(MyModule):
atype = dd.atype
wtype = dd.wtype
if seq_mode:
if "cuda" in str(dev) and os.environ["RWKV_CUDA_ON"] == "1":
ATT = (
self.cuda_att_seq
if wtype != torch.uint8
else self.cuda_att_seq_i8
)
cuda_applicable = os.environ[
"RWKV_CUDA_ON"
] == "1" and "cuda" in str(dev)
if cuda_applicable:
ATT = self.cuda_att_seq
else:
ATT = self.att_seq if wtype != torch.uint8 else self.att_seq_i8
ATT = self.att_seq
if self.version == 5:
ATT = self.att_seq_v5
elif self.version == 5.1:
ATT = self.att_seq_v5_1
elif self.version == 5.2:
ATT = self.att_seq_v5_2
FFN = self.ffn_seq if wtype != torch.uint8 else self.ffn_seq_i8
if cuda_applicable:
ATT = self.cuda_att_seq_v5_2
FFN = self.ffn_seq
else:
ATT = self.att_one if wtype != torch.uint8 else self.att_one_i8
ATT = self.att_one
if self.version == 5:
ATT = self.att_one_v5
elif self.version == 5.1:
ATT = self.att_one_v5_1
elif self.version == 5.2:
ATT = self.att_one_v5_1 # same as v5.1
FFN = self.ffn_one if wtype != torch.uint8 else self.ffn_one_i8
FFN = self.ffn_one
x = x.to(dtype=atype, device=dev)
@@ -1789,6 +1703,10 @@ class RWKV(MyModule):
rrx,
rmy,
rry,
gmx,
grx,
gmy,
gry,
omx,
orx,
omy,
@@ -1861,7 +1779,7 @@ class RWKV(MyModule):
x = x @ w["head.weight"]
else:
if seq_mode and full_output:
x = self.mm8_seq(
x = mm8_seq(
x,
w["head.weight"],
w["head.weight_mx"],
@@ -1870,7 +1788,7 @@ class RWKV(MyModule):
w["head.weight_ry"],
)
else:
x = self.mm8_one(
x = mm8_one(
x,
w["head.weight"],
w["head.weight_mx"],

Binary file not shown.

View File

@@ -81,8 +81,9 @@ class PIPELINE:
def sample_logits(self, logits, temperature=1.0, top_p=0.85, top_k=0):
probs = F.softmax(logits.float(), dim=-1)
top_k = int(top_k)
if probs.device == torch.device("cpu"):
probs = probs.numpy()
# 'privateuseone' is the type of custom devices like `torch_directml.device()`
if probs.device.type in ["cpu", "privateuseone"]:
probs = probs.cpu().numpy()
sorted_ids = np.argsort(probs)
sorted_probs = probs[sorted_ids][::-1]
cumulative_probs = np.cumsum(sorted_probs)

Binary file not shown.

View File

@@ -10,7 +10,7 @@ logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(levelname)s\n%(message)s")
fh = logging.handlers.RotatingFileHandler(
"api.log", mode="a", maxBytes=3 * 1024 * 1024, backupCount=3
"api.log", mode="a", maxBytes=3 * 1024 * 1024, backupCount=3, encoding="utf-8"
)
fh.setFormatter(formatter)
logger.addHandler(fh)

View File

@@ -1,11 +1,13 @@
import os
import sys
import global_var
def ngrok_connect():
from pyngrok import ngrok, conf
conf.set_default(conf.PyngrokConfig(ngrok_path="./ngrok"))
conf.set_default(
conf.PyngrokConfig(ngrok_path="./ngrok.exe" if os.name == "nt" else "./ngrok")
)
ngrok.set_auth_token(os.environ["ngrok_token"])
http_tunnel = ngrok.connect(8000 if len(sys.argv) == 1 else int(sys.argv[1]))
print(http_tunnel.public_url)
http_tunnel = ngrok.connect(global_var.get(global_var.Args).port)
print(f"ngrok url: {http_tunnel.public_url}")

View File

@@ -4,7 +4,7 @@ import os
import pathlib
import copy
import re
from typing import Dict, Iterable, List, Tuple, Union
from typing import Dict, Iterable, List, Tuple, Union, Type
from utils.log import quick_log
from fastapi import HTTPException
from pydantic import BaseModel, Field
@@ -21,33 +21,21 @@ os.environ["TORCH_EXTENSIONS_DIR"] = f"{pathlib.Path(__file__).parent.parent.res
class RWKVType(Enum):
NoneType = auto()
Raven = auto()
World = auto()
Music = auto()
class AbstractRWKV(ABC):
def __init__(self, model: str, strategy: str, tokens_path: str):
rwkv_beta = global_var.get(global_var.Args).rwkv_beta
# dynamic import to make RWKV_CUDA_ON work
if rwkv_beta:
from rwkv_pip.beta.model import (
RWKV as Model,
)
else:
from rwkv_pip.model import (
RWKV as Model,
)
from rwkv_pip.utils import PIPELINE
filename, _ = os.path.splitext(os.path.basename(model))
self.name = filename
self.model = Model(model, strategy)
self.pipeline = PIPELINE(self.model, tokens_path)
def __init__(self, model, pipeline):
self.name = "rwkv"
self.model = model
self.pipeline = pipeline
self.model_state = None
self.model_tokens = []
self.rwkv_type: RWKVType = None
self.rwkv_type: RWKVType = RWKVType.NoneType
self.tokenizer_len = len(model.w["emb.weight"])
self.max_tokens_per_generation = 500
self.temperature = 1
@@ -348,8 +336,8 @@ class AbstractRWKV(ABC):
class TextRWKV(AbstractRWKV):
def __init__(self, model: str, strategy: str, tokens_path: str) -> None:
super().__init__(model, strategy, tokens_path)
def __init__(self, model, pipeline) -> None:
super().__init__(model, pipeline)
self.CHUNK_LEN = 256
@@ -361,16 +349,16 @@ class TextRWKV(AbstractRWKV):
self.penalty_alpha_frequency = 1
self.interface = ":"
if "world" in self.name.lower():
self.rwkv_type = RWKVType.World
self.user = "Question"
self.bot = "Answer"
self.END_OF_LINE = 11
else:
if self.tokenizer_len < 65536:
self.rwkv_type = RWKVType.Raven
self.user = "Bob"
self.bot = "Alice"
self.END_OF_LINE = 187
else:
self.rwkv_type = RWKVType.World
self.user = "User"
self.bot = "Assistant"
self.END_OF_LINE = 11
self.AVOID_REPEAT_TOKENS = []
AVOID_REPEAT = ""
@@ -469,8 +457,8 @@ The following is a coherent verbose detailed conversation between a girl named {
class MusicRWKV(AbstractRWKV):
def __init__(self, model: str, strategy: str, tokens_path: str):
super().__init__(model, strategy, tokens_path)
def __init__(self, model, pipeline):
super().__init__(model, pipeline)
self.max_tokens_per_generation = 500
self.temperature = 1
@@ -510,6 +498,52 @@ class MusicRWKV(AbstractRWKV):
return " " + delta
def get_tokenizer(tokenizer_len: int):
tokenizer_dir = f"{pathlib.Path(__file__).parent.parent.resolve()}/rwkv_pip/"
if tokenizer_len < 50277:
return tokenizer_dir + "tokenizer-midi.json"
elif tokenizer_len < 65536:
return tokenizer_dir + "20B_tokenizer.json"
else:
return "rwkv_vocab_v20230424"
def RWKV(model: str, strategy: str, tokenizer: Union[str, None]) -> AbstractRWKV:
rwkv_beta = global_var.get(global_var.Args).rwkv_beta
# dynamic import to make RWKV_CUDA_ON work
if rwkv_beta:
from rwkv_pip.beta.model import (
RWKV as Model,
)
else:
from rwkv_pip.model import (
RWKV as Model,
)
from rwkv_pip.utils import PIPELINE
filename, _ = os.path.splitext(os.path.basename(model))
model = Model(model, strategy)
if not tokenizer:
tokenizer = get_tokenizer(len(model.w["emb.weight"]))
pipeline = PIPELINE(model, tokenizer)
rwkv_map: dict[str, Type[AbstractRWKV]] = {
"20B_tokenizer": TextRWKV,
"rwkv_vocab_v20230424": TextRWKV,
"tokenizer-midi": MusicRWKV,
}
tokenizer_name = os.path.splitext(os.path.basename(tokenizer))[0]
rwkv: AbstractRWKV
if tokenizer_name in rwkv_map:
rwkv = rwkv_map[tokenizer_name](model, pipeline)
else:
rwkv = TextRWKV(model, pipeline)
rwkv.name = filename
return rwkv
class ModelConfigBody(BaseModel):
max_tokens: int = Field(default=None, gt=0, le=102400)
temperature: float = Field(default=None, ge=0, le=2)
@@ -517,8 +551,8 @@ class ModelConfigBody(BaseModel):
presence_penalty: float = Field(default=None, ge=-2, le=2)
frequency_penalty: float = Field(default=None, ge=-2, le=2)
class Config:
schema_extra = {
model_config = {
"json_schema_extra": {
"example": {
"max_tokens": 1000,
"temperature": 1.2,
@@ -527,6 +561,7 @@ class ModelConfigBody(BaseModel):
"frequency_penalty": 0.4,
}
}
}
def set_rwkv_config(model: AbstractRWKV, body: ModelConfigBody):

View File

@@ -0,0 +1,14 @@
from fastapi import FastAPI
from fastapi.middleware.gzip import GZipMiddleware
from fastapi.staticfiles import StaticFiles
import uvicorn
webui_server = FastAPI()
webui_server.add_middleware(GZipMiddleware, minimum_size=1000)
webui_server.mount(
"/", StaticFiles(directory="frontend/dist", html=True), name="static"
)
if __name__ == "__main__":
uvicorn.run("webui_server:webui_server")

View File

@@ -1,7 +1,5 @@
{"text": "1:This is the first document."}
{"text": "2:Hello\nWorld"}
{"text": "3:1+1=2\n1+2=3\n2+2=4"}
{"text": "4:You will be training the GPT version because it's paralleziable and faster to train."}
{"text": "5:Read the inference code in src/model.py and try using the final hidden state(.xx .aa .bb)"}
{"text": "6:You can fine-tune the model with longer ctxLen and it can quickly adapt to longer ctxLens."}
{"text": "7:Consider RWKV 14B. The state has 200 vectors, that is, 5 vectors for each block: fp16 (xx), fp32 (aa), fp32 (bb), fp32 (pp), fp16 (xx)."}
{"text": "The following is an epic science fiction masterpiece that is immortalized, with delicate descriptions and grand depictions of interstellar civilization wars.\nChapter 1.\nAs I sit down to write here amidst the shadows of vine-leaves under the blue sky of southern Italy, it comes to me with a certain quality of astonishment that my participation in these amazing adventures of Mr. Cavor was, after all, the outcome of the purest accident. It might have been any one. I fell into these things at a time when I thought myself removed from the slightest possibility of disturbing experiences. I had gone to Lympne because I had imagined it the most uneventful place in the world. “Here, at any rate,” said I, “I shall find peace and a chance to work!”"}
{"text": "Translate the following into Chinese.\n\nEnglish: What rooms do you have available?\nChinese: 你们有哪些房间可以提供"}
{"text": "User: Hello.\n\nAssistant: I'm here, meow~.\n\nUser: Can you tell some jokes?\n\nAssistant: Of course, master. What kind of jokes would you like to hear?"}
{"text": "Instruction: Write a story using the following information\n\nInput: A man named Alex chops a tree down\n\nResponse: Once upon a time, there was a man named Alex who lived in the heart of the forest. He had always been fascinated by trees and spent most of his days exploring the forest and learning about its many wonders. One day, while wandering through the woods, he stumbled upon an old oak tree that stood tall and proud in the middle of a clearing."}
{"text": "def get_args(args: Union[Sequence[str], None] = None):\n parser = argparse.ArgumentParser()\n group = parser.add_argument_group(title=\"server arguments\")\n group.add_argument(\n \"--port\",\n type=int,\n default=8000,\n help=\"port to run the server on (default: 8000)\",\n )\n group.add_argument(\n \"--host\",\n type=str,\n default=\"127.0.0.1\",\n help=\"host to run the server on (default: 127.0.0.1)\",\n )"}

View File

@@ -246,5 +246,6 @@ if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
with open("error.txt", "w") as f:
f.write(str(e))

View File

@@ -64,5 +64,6 @@ try:
torch.save(output_w, output)
except Exception as e:
print(e)
with open("error.txt", "w") as f:
f.write(str(e))

View File

@@ -264,7 +264,7 @@ if __name__ == "__main__":
#
# Data = {args.data_file} ({args.data_type}), ProjDir = {args.proj_dir}
#
# Epoch = {args.epoch_begin} to {args.epoch_begin + args.epoch_count - 1} (will continue afterwards), save every {args.epoch_save} epoch
# Epoch = {args.epoch_begin} to {args.epoch_begin + args.epoch_count - 1}, save every {args.epoch_save} epoch
#
# Each "epoch" = {args.epoch_steps} steps, {samples_per_epoch} samples, {tokens_per_epoch} tokens
#

View File

@@ -1,3 +1,3 @@
torch==1.13.1
pytorch_lightning==1.9.5
deepspeed
deepspeed==0.11.2

View File

@@ -1,9 +1,10 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8"/>
<meta content="width=device-width, initial-scale=1.0" name="viewport"/>
<title>RWKV-Runner</title>
<meta charset="UTF-8" />
<meta content="width=device-width, initial-scale=1.0" name="viewport" />
<title>RWKV-Runner</title>
<link href="./src/assets/images/logo.png" rel="icon" type="image/x-icon">
</head>
<body>
<div id="root"></div>

View File

@@ -15,7 +15,7 @@
"@primer/octicons-react": "^19.1.0",
"chart.js": "^4.3.0",
"classnames": "^2.3.2",
"github-markdown-css": "^5.2.0",
"file-saver": "^2.0.5",
"html-midi-player": "^1.5.0",
"i18next": "^22.4.15",
"mobx": "^6.9.0",
@@ -37,6 +37,7 @@
"uuid": "^9.0.0"
},
"devDependencies": {
"@types/file-saver": "^2.0.7",
"@types/react": "^18.2.6",
"@types/react-beautiful-dnd": "^13.1.4",
"@types/react-dom": "^18.2.4",
@@ -74,12 +75,13 @@
}
},
"node_modules/@babel/code-frame": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.5.tgz",
"integrity": "sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==",
"version": "7.22.13",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz",
"integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==",
"dev": true,
"dependencies": {
"@babel/highlight": "^7.22.5"
"@babel/highlight": "^7.22.13",
"chalk": "^2.4.2"
},
"engines": {
"node": ">=6.9.0"
@@ -125,12 +127,12 @@
}
},
"node_modules/@babel/generator": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.22.5.tgz",
"integrity": "sha512-+lcUbnTRhd0jOewtFSedLyiPsD5tswKkbgcezOqqWFUVNEwoUTlpPOBmvhG7OXWLR4jMdv0czPGH5XbflnD1EA==",
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz",
"integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==",
"dev": true,
"dependencies": {
"@babel/types": "^7.22.5",
"@babel/types": "^7.23.0",
"@jridgewell/gen-mapping": "^0.3.2",
"@jridgewell/trace-mapping": "^0.3.17",
"jsesc": "^2.5.1"
@@ -159,22 +161,22 @@
}
},
"node_modules/@babel/helper-environment-visitor": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.5.tgz",
"integrity": "sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q==",
"version": "7.22.20",
"resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz",
"integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==",
"dev": true,
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-function-name": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.22.5.tgz",
"integrity": "sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==",
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz",
"integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==",
"dev": true,
"dependencies": {
"@babel/template": "^7.22.5",
"@babel/types": "^7.22.5"
"@babel/template": "^7.22.15",
"@babel/types": "^7.23.0"
},
"engines": {
"node": ">=6.9.0"
@@ -245,9 +247,9 @@
}
},
"node_modules/@babel/helper-split-export-declaration": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.5.tgz",
"integrity": "sha512-thqK5QFghPKWLhAV321lxF95yCg2K3Ob5yw+M3VHWfdia0IkPXUtoLH8x/6Fh486QUvzhb8YOWHChTVen2/PoQ==",
"version": "7.22.6",
"resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz",
"integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==",
"dev": true,
"dependencies": {
"@babel/types": "^7.22.5"
@@ -266,9 +268,9 @@
}
},
"node_modules/@babel/helper-validator-identifier": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz",
"integrity": "sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==",
"version": "7.22.20",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz",
"integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==",
"dev": true,
"engines": {
"node": ">=6.9.0"
@@ -298,13 +300,13 @@
}
},
"node_modules/@babel/highlight": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.5.tgz",
"integrity": "sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw==",
"version": "7.22.20",
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz",
"integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==",
"dev": true,
"dependencies": {
"@babel/helper-validator-identifier": "^7.22.5",
"chalk": "^2.0.0",
"@babel/helper-validator-identifier": "^7.22.20",
"chalk": "^2.4.2",
"js-tokens": "^4.0.0"
},
"engines": {
@@ -312,9 +314,9 @@
}
},
"node_modules/@babel/parser": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.22.5.tgz",
"integrity": "sha512-DFZMC9LJUG9PLOclRC32G63UXwzqS2koQC8dkx+PLdmt1xSePYpbT/NbsrJy8Q/muXz7o/h/d4A7Fuyixm559Q==",
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz",
"integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==",
"dev": true,
"bin": {
"parser": "bin/babel-parser.js"
@@ -365,33 +367,33 @@
}
},
"node_modules/@babel/template": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.5.tgz",
"integrity": "sha512-X7yV7eiwAxdj9k94NEylvbVHLiVG1nvzCV2EAowhxLTwODV1jl9UzZ48leOC0sH7OnuHrIkllaBgneUykIcZaw==",
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz",
"integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==",
"dev": true,
"dependencies": {
"@babel/code-frame": "^7.22.5",
"@babel/parser": "^7.22.5",
"@babel/types": "^7.22.5"
"@babel/code-frame": "^7.22.13",
"@babel/parser": "^7.22.15",
"@babel/types": "^7.22.15"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/traverse": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.22.5.tgz",
"integrity": "sha512-7DuIjPgERaNo6r+PZwItpjCZEa5vyw4eJGufeLxrPdBXBoLcCJCIasvK6pK/9DVNrLZTLFhUGqaC6X/PA007TQ==",
"version": "7.23.2",
"resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz",
"integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==",
"dev": true,
"dependencies": {
"@babel/code-frame": "^7.22.5",
"@babel/generator": "^7.22.5",
"@babel/helper-environment-visitor": "^7.22.5",
"@babel/helper-function-name": "^7.22.5",
"@babel/code-frame": "^7.22.13",
"@babel/generator": "^7.23.0",
"@babel/helper-environment-visitor": "^7.22.20",
"@babel/helper-function-name": "^7.23.0",
"@babel/helper-hoist-variables": "^7.22.5",
"@babel/helper-split-export-declaration": "^7.22.5",
"@babel/parser": "^7.22.5",
"@babel/types": "^7.22.5",
"@babel/helper-split-export-declaration": "^7.22.6",
"@babel/parser": "^7.23.0",
"@babel/types": "^7.23.0",
"debug": "^4.1.0",
"globals": "^11.1.0"
},
@@ -400,13 +402,13 @@
}
},
"node_modules/@babel/types": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.22.5.tgz",
"integrity": "sha512-zo3MIHGOkPOfoRXitsgHLjEXmlDaD/5KU1Uzuc9GNiZPhSqVxVRtxuPaSBZDsYZ9qV88AjtMtWW7ww98loJ9KA==",
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz",
"integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==",
"dev": true,
"dependencies": {
"@babel/helper-string-parser": "^7.22.5",
"@babel/helper-validator-identifier": "^7.22.5",
"@babel/helper-validator-identifier": "^7.22.20",
"to-fast-properties": "^2.0.0"
},
"engines": {
@@ -2279,6 +2281,12 @@
"@types/ms": "*"
}
},
"node_modules/@types/file-saver": {
"version": "2.0.7",
"resolved": "https://registry.npmjs.org/@types/file-saver/-/file-saver-2.0.7.tgz",
"integrity": "sha512-dNKVfHd/jk0SkR/exKGj2ggkB45MAkzvWCaqLUUgkyjITkGNzH8H+yUwr+BLJUBjZOe9w8X3wgmXhZDRg1ED6A==",
"dev": true
},
"node_modules/@types/hast": {
"version": "2.3.4",
"resolved": "https://registry.npmmirror.com/@types/hast/-/hast-2.3.4.tgz",
@@ -2288,9 +2296,9 @@
}
},
"node_modules/@types/hoist-non-react-statics": {
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/@types/hoist-non-react-statics/-/hoist-non-react-statics-3.3.1.tgz",
"integrity": "sha512-iMIqiko6ooLrTh1joXodJK5X9xeEALT1kM5G3ZLhD3hszxBdIEd5C75U834D9mLcINgD4OyZf5uQXjkuYydWvA==",
"version": "3.3.5",
"resolved": "https://registry.npmjs.org/@types/hoist-non-react-statics/-/hoist-non-react-statics-3.3.5.tgz",
"integrity": "sha512-SbcrWzkKBw2cdwRTwQAswfpB9g9LJWfjtUeW/jvNwbhC8cpmmNYVePa+ncbUe0rGTQ7G3Ff6mYUN2VMfLVr+Sg==",
"dependencies": {
"@types/react": "*",
"hoist-non-react-statics": "^3.3.0"
@@ -2371,9 +2379,9 @@
}
},
"node_modules/@types/react-redux": {
"version": "7.1.25",
"resolved": "https://registry.npmjs.org/@types/react-redux/-/react-redux-7.1.25.tgz",
"integrity": "sha512-bAGh4e+w5D8dajd6InASVIyCo4pZLJ66oLb80F9OBLO1gKESbZcRCJpTT6uLXX+HAB57zw1WTdwJdAsewuTweg==",
"version": "7.1.29",
"resolved": "https://registry.npmjs.org/@types/react-redux/-/react-redux-7.1.29.tgz",
"integrity": "sha512-orHCOWqBBQ1LP1uD6JVdXL+ZRTEWhGGne+VOPcXef03rC+QYdzktLhxR3ozymPDyZK0CNCUuQs9tyQhfg1ku+w==",
"dependencies": {
"@types/hoist-non-react-statics": "^3.3.0",
"@types/react": "*",
@@ -2649,10 +2657,24 @@
}
},
"node_modules/caniuse-lite": {
"version": "1.0.30001482",
"resolved": "https://registry.npmmirror.com/caniuse-lite/-/caniuse-lite-1.0.30001482.tgz",
"integrity": "sha512-F1ZInsg53cegyjroxLNW9DmrEQ1SuGRTO1QlpA0o2/6OpQ0gFeDRoq1yFmnr8Sakn9qwwt9DmbxHB6w167OSuQ==",
"dev": true
"version": "1.0.30001561",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001561.tgz",
"integrity": "sha512-NTt0DNoKe958Q0BE0j0c1V9jbUzhBxHIEJy7asmGrpE0yG63KTV7PLHPnK2E1O9RsQrQ081I3NLuXGS6zht3cw==",
"dev": true,
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/browserslist"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/caniuse-lite"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
]
},
"node_modules/ccount": {
"version": "2.0.1",
@@ -3232,6 +3254,11 @@
"resolved": "https://registry.npmjs.org/fft.js/-/fft.js-4.0.4.tgz",
"integrity": "sha512-f9c00hphOgeQTlDyavwTtu6RiK8AIFjD6+jvXkNkpeQ7rirK3uFWVpalkoS4LAwbdX7mfZ8aoBfFVQX1Re/8aw=="
},
"node_modules/file-saver": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/file-saver/-/file-saver-2.0.5.tgz",
"integrity": "sha512-P9bmyZ3h/PRG+Nzga+rbdI4OEpNDzAVyy74uVO9ATgzLK6VtAsYybF/+TOCvrc0MO793d6+42lLyZTw7/ArVzA=="
},
"node_modules/fill-range": {
"version": "7.0.1",
"resolved": "https://registry.npmmirror.com/fill-range/-/fill-range-7.0.1.tgz",
@@ -3316,11 +3343,6 @@
"node": "6.* || 8.* || >= 10.*"
}
},
"node_modules/github-markdown-css": {
"version": "5.2.0",
"resolved": "https://registry.npmmirror.com/github-markdown-css/-/github-markdown-css-5.2.0.tgz",
"integrity": "sha512-hq5RaCInSUZ48bImOZpkppW2/MT44StRgsbsZ8YA4vJFwLKB/Vo3k7R2t+pUGqO+ThG0QDMi96TewV/B3vyItg=="
},
"node_modules/glob": {
"version": "7.1.6",
"resolved": "https://registry.npmmirror.com/glob/-/glob-7.1.6.tgz",
@@ -4591,10 +4613,24 @@
}
},
"node_modules/postcss": {
"version": "8.4.23",
"resolved": "https://registry.npmmirror.com/postcss/-/postcss-8.4.23.tgz",
"integrity": "sha512-bQ3qMcpF6A/YjR55xtoTr0jGOlnPOKAIMdOWiv0EIT6HVPEaJiJB4NLljSbiHoC2RX7DN5Uvjtpbg1NPdwv1oA==",
"version": "8.4.31",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz",
"integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==",
"dev": true,
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/postcss/"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/postcss"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
"nanoid": "^3.3.6",
"picocolors": "^1.0.0",
@@ -4696,9 +4732,9 @@
"integrity": "sha512-kma4U7AFCTwpqq5twzC1YVIDXSqg6qQK6JN0smOw8fgRy1OkMi0CYSzFmsy6dnqSenamAtj0CyXMUJ1Mf6oROg=="
},
"node_modules/protobufjs": {
"version": "6.11.3",
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.3.tgz",
"integrity": "sha512-xL96WDdCZYdU7Slin569tFX712BxsxslWwAfAhCYjQKGTq7dAU91Lomy6nLLhh/dyGhk/YH4TwTSRxTzhuHyZg==",
"version": "6.11.4",
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.4.tgz",
"integrity": "sha512-5kQWPaJHi1WoCpjTGszzQ32PG2F4+wRY6BmAT4Vfw56Q2FZ4YZzK20xUYQH4YkfehY1e6QSICrJquM6xXZNcrw==",
"hasInstallScript": true,
"dependencies": {
"@protobufjs/aspromise": "^1.1.2",

View File

@@ -16,7 +16,7 @@
"@primer/octicons-react": "^19.1.0",
"chart.js": "^4.3.0",
"classnames": "^2.3.2",
"github-markdown-css": "^5.2.0",
"file-saver": "^2.0.5",
"html-midi-player": "^1.5.0",
"i18next": "^22.4.15",
"mobx": "^6.9.0",
@@ -38,6 +38,7 @@
"uuid": "^9.0.0"
},
"devDependencies": {
"@types/file-saver": "^2.0.7",
"@types/react": "^18.2.6",
"@types/react-beautiful-dnd": "^13.1.4",
"@types/react-dom": "^18.2.4",

View File

@@ -26,18 +26,22 @@
import { FluentProvider, Tab, TabList, webDarkTheme, webLightTheme } from '@fluentui/react-components';
import { FC, useEffect, useState } from 'react';
import { Route, Routes, useLocation, useNavigate } from 'react-router';
import { pages } from './pages';
import { pages as clientPages } from './pages';
import { useMediaQuery } from 'usehooks-ts';
import commonStore from './stores/commonStore';
import { observer } from 'mobx-react-lite';
import { useTranslation } from 'react-i18next';
import { CustomToastContainer } from './components/CustomToastContainer';
import { LazyImportComponent } from './components/LazyImportComponent';
const App: FC = observer(() => {
const { t } = useTranslation();
const navigate = useNavigate();
const location = useLocation();
const mq = useMediaQuery('(min-width: 640px)');
const pages = commonStore.platform === 'web' ? clientPages.filter(page =>
!['/configs', '/models', '/downloads', '/train', '/about'].some(path => page.path === path)
) : clientPages;
const [path, setPath] = useState<string>(pages[0].path);
@@ -82,7 +86,7 @@ const App: FC = observer(() => {
<div className="h-full w-full p-2 box-border overflow-y-hidden">
<Routes>
{pages.map(({ path, element }, index) => (
<Route key={`${path}-${index}`} path={path} element={element} />
<Route key={`${path}-${index}`} path={path} element={<LazyImportComponent lazyChildren={element} />} />
))}
</Routes>
</div>

View File

@@ -100,7 +100,7 @@
"Model Config Exception": "モデル設定例外",
"Use Gitee Updates Source": "Gitee更新ソースを使用",
"Use Custom CUDA kernel to Accelerate": "カスタムCUDAカーネルを使用して加速",
"Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues. If it fails to start, please turn off this option.": "このオプションを有効にすると、推論速度が大幅に向上し、一部のVRAMを節約できますが、互換性の問題が生じる可能性があります。起動に失敗した場合は、このオプションをオフにしてください。",
"Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues (output garbled). If it fails to start, please turn off this option, or try to upgrade your gpu driver.": "このオプションを有効にすると、推論速度が大幅に向上し、一部のVRAMを節約できますが、互換性の問題 (文字化けを出力する) が生じる可能性があります。起動に失敗した場合は、このオプションを無効にするか、GPUドライバーをアップグレードしてみてください。",
"Supported custom cuda file not found": "対応しているカスタムCUDAファイルが見つかりません",
"Failed to copy custom cuda file": "カスタムCUDAファイルのコピーに失敗しました",
"Downloading update, please wait. If it is not completed, please manually download the program from GitHub and replace the original program.": "更新をダウンロード中です、お待ちください。完了しない場合は、GitHubから手動でプログラムをダウンロードし、元のプログラムを置き換えてください。",
@@ -233,7 +233,7 @@
"Matched CUDA is not installed": "対応するCUDAがインストールされていません",
"Failed to convert data": "データの変換に失敗しました",
"Failed to merge model": "モデルのマージに失敗しました",
"The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\nWhen you provide a directory path, all the txt files within that directory will be automatically converted into training data. This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\nThe jsonl format file can be referenced at https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl.\nYou can also write it similar to OpenAI's playground format, as shown in https://platform.openai.com/playground/p/default-chat.\nEven for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. If they are different dialogues or topics, they should be written in separate lines.": "データのパスはディレクトリまたはjsonl形式のファイルでなければなりません将来的にはより多くの形式がサポートされる予定です。ディレクトリパスを提供した場合、そのディレクトリ内のすべてのtxtファイルが自動的にトレーニングデータに変換されます。これは大規模なライティング、コード生成、または知識ベースのトレーニングで一般的に使用されます。jsonl形式のファイルは、https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl を参照してください。\nhttps://platform.openai.com/playground/p/default-chat のように、OpenAIのプレイグラウンド形式に似た形式で書くこともできます。複数ターンの対話であっても、一行で書く必要があり、行の区切りを示すために`\\n`を使用します。それらが異なる対話やトピックであれば、それらは別々の行に書かれるべきです。",
"The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\nWhen you provide a directory path, all the txt files within that directory will be automatically converted into training data. This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\nThe jsonl format file can be referenced at https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl.\nYou can also write it similar to OpenAI's playground format, as shown in https://platform.openai.com/playground/p/default-chat.\nEven for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. If they are different dialogues or topics, they should be written in separate lines.": "データのパスはディレクトリまたはjsonl形式のファイルでなければなりません将来的にはより多くの形式がサポートされる予定です。ディレクトリパスを提供した場合、そのディレクトリ内のすべてのtxtファイルが自動的にトレーニングデータに変換されます。これは大規模なライティング、コード生成、または知識ベースのトレーニングで一般的に使用されます。jsonl形式のファイルは、https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl を参照してください。\nhttps://platform.openai.com/playground/p/default-chat のように、OpenAIのプレイグラウンド形式に似た形式で書くこともできます。複数ターンの対話であっても、一行で書く必要があり、行の区切りを示すために`\\n`を使用します。それらが異なる対話やトピックであれば、それらは別々の行に書かれるべきです。",
"Size mismatch for blocks. You are attempting to continue training from the LoRA model, but it does not match the base model. Please set LoRA model to None.": "ブロックのサイズが一致しません。LoRAモデルからトレーニングを続けようとしていますが、それはベースモデルと一致しません。LoRAモデルをNoneに設定してください。",
"Instruction: Write a story using the following information\n\nInput: A man named Alex chops a tree down\n\nResponse:": "Instruction: Write a story using the following information\n\nInput: アレックスという男が木を切り倒す\n\nResponse:",
"Composition": "作曲",
@@ -254,6 +254,18 @@
"User Name": "ユーザー名",
"Assistant Name": "アシスタント名",
"Insert default system prompt at the beginning": "最初にデフォルトのシステムプロンプトを挿入",
"Please Enable Custom CUDA Kernel. Latest RWKV-5 requires os.environ['RWKV_CUDA_ON'] == '1' (will fix soon).": "カスタムCUDAカーネルを有効にしてください。最新のRWKV-5ではos.environ['RWKV_CUDA_ON'] == '1'が必要です(近日中に修正します)。",
"Format Content": "内容フォーマットの規格化"
"Format Content": "内容フォーマットの規格化",
"Add An Attachment (Accepts pdf, txt)": "添付ファイルを追加 (pdf, txtを受け付けます)",
"Uploading Attachment": "添付ファイルアップロード中",
"Remove Attachment": "添付ファイルを削除",
"The content of file": "ファイル",
"is as follows. When replying to me, consider the file content and respond accordingly:": "の内容は以下の通りです。私に返信する際は、ファイルの内容を考慮して適切に返信してください:",
"What's the file name": "ファイル名は何ですか",
"The file name is: ": "ファイル名は次のとおりです: ",
"Port is occupied. Change it in Configs page or close the program that occupies the port.": "ポートが占有されています。設定ページで変更するか、ポートを占有しているプログラムを終了してください。",
"Loading...": "読み込み中...",
"Hello, what can I do for you?": "こんにちは、何かお手伝いできますか?",
"Enable WebUI": "WebUIを有効化",
"Server is working on deployment mode, please close the terminal window manually": "サーバーはデプロイモードで動作しています、ターミナルウィンドウを手動で閉じてください",
"Server is working on deployment mode, please exit the program manually to stop the server": "サーバーはデプロイモードで動作しています、サーバーを停止するにはプログラムを手動で終了してください"
}

View File

@@ -100,7 +100,7 @@
"Model Config Exception": "模型配置异常",
"Use Gitee Updates Source": "使用Gitee更新源",
"Use Custom CUDA kernel to Accelerate": "使用自定义CUDA算子加速",
"Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues. If it fails to start, please turn off this option.": "开启这个选项能大大提升推理速度并节省显存,但可能存在兼容性问题,如果启动失败,请关闭此选项",
"Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues (output garbled). If it fails to start, please turn off this option, or try to upgrade your gpu driver.": "开启这个选项能大大提升推理速度并节省显存,但可能存在兼容性(回复乱码)问题,如果发生相关问题,请关闭此选项。或更新你的显卡驱动",
"Supported custom cuda file not found": "没有找到支持的自定义cuda文件",
"Failed to copy custom cuda file": "自定义cuda文件复制失败",
"Downloading update, please wait. If it is not completed, please manually download the program from GitHub and replace the original program.": "正在下载更新请等待。如果一直未完成请从Github手动下载并覆盖原程序",
@@ -233,7 +233,7 @@
"Matched CUDA is not installed": "未安装匹配的CUDA",
"Failed to convert data": "数据转换失败",
"Failed to merge model": "合并模型失败",
"The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\nWhen you provide a directory path, all the txt files within that directory will be automatically converted into training data. This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\nThe jsonl format file can be referenced at https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl.\nYou can also write it similar to OpenAI's playground format, as shown in https://platform.openai.com/playground/p/default-chat.\nEven for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. If they are different dialogues or topics, they should be written in separate lines.": "数据路径必须是一个文件夹或者jsonl格式文件 (未来会支持更多格式)\n\n当你填写的路径是一个文件夹时该文件夹内的所有txt文件会被自动转换为训练数据通常这用于大批量训练写作代码生成或知识库\n\njsonl文件的格式参考 https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl\n你也可以仿照openai的playground编写参考 https://platform.openai.com/playground/p/default-chat\n即使是多轮对话也必须写在一行用`\\n`表示换行,如果是不同对话或主题,则另起一行",
"The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\nWhen you provide a directory path, all the txt files within that directory will be automatically converted into training data. This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\nThe jsonl format file can be referenced at https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl.\nYou can also write it similar to OpenAI's playground format, as shown in https://platform.openai.com/playground/p/default-chat.\nEven for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. If they are different dialogues or topics, they should be written in separate lines.": "数据路径必须是一个文件夹或者jsonl格式文件 (未来会支持更多格式)\n\n当你填写的路径是一个文件夹时该文件夹内的所有txt文件会被自动转换为训练数据通常这用于大批量训练写作代码生成或知识库\n\njsonl文件的格式参考 https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl 以及 https://zhuanlan.zhihu.com/p/643433851\n你也可以仿照openai的playground编写参考 https://platform.openai.com/playground/p/default-chat\n即使是多轮对话也必须写在一行用`\\n`表示换行,如果是不同对话或主题,则另起一行",
"Size mismatch for blocks. You are attempting to continue training from the LoRA model, but it does not match the base model. Please set LoRA model to None.": "尺寸不匹配块。你正在尝试从LoRA模型继续训练但该LoRA模型与基底模型不匹配请将LoRA模型设为空",
"Instruction: Write a story using the following information\n\nInput: A man named Alex chops a tree down\n\nResponse:": "Instruction: Write a story using the following information\n\nInput: 艾利克斯砍倒了一棵树\n\nResponse:",
"Composition": "作曲",
@@ -254,6 +254,18 @@
"User Name": "用户名称",
"Assistant Name": "AI名称",
"Insert default system prompt at the beginning": "在开头自动插入默认系统提示",
"Please Enable Custom CUDA Kernel. Latest RWKV-5 requires os.environ['RWKV_CUDA_ON'] == '1' (will fix soon).": "请启用自定义CUDA算子。最新的RWKV-5需要os.environ['RWKV_CUDA_ON'] == '1' (未来会修复)",
"Format Content": "规范格式"
"Format Content": "规范格式",
"Add An Attachment (Accepts pdf, txt)": "添加一个附件 (支持pdf, txt)",
"Uploading Attachment": "正在上传附件",
"Remove Attachment": "移除附件",
"The content of file": "文件",
"is as follows. When replying to me, consider the file content and respond accordingly:": "内容如下。回复时考虑文件内容并做出相应回复:",
"What's the file name": "文件名是什么",
"The file name is: ": "文件名是:",
"Port is occupied. Change it in Configs page or close the program that occupies the port.": "端口被占用。请在配置页面更改端口,或关闭占用端口的程序",
"Loading...": "加载中...",
"Hello, what can I do for you?": "你好,有什么要我帮忙的吗?",
"Enable WebUI": "启用WebUI",
"Server is working on deployment mode, please close the terminal window manually": "服务器正在部署模式下运行,请手动关闭终端窗口",
"Server is working on deployment mode, please exit the program manually to stop the server": "服务器正在部署模式下运行,请手动退出程序以停止服务器"
}

View File

@@ -1,4 +1,4 @@
import { FC, ReactElement } from 'react';
import React, { FC, ReactElement } from 'react';
import {
Button,
Dialog,
@@ -11,7 +11,9 @@ import {
} from '@fluentui/react-components';
import { ToolTipButton } from './ToolTipButton';
import { useTranslation } from 'react-i18next';
import MarkdownRender from './MarkdownRender';
import { LazyImportComponent } from './LazyImportComponent';
const MarkdownRender = React.lazy(() => import('./MarkdownRender'));
export const DialogButton: FC<{
text?: string | null
@@ -45,7 +47,9 @@ export const DialogButton: FC<{
<DialogContent>
{
markdown ?
<MarkdownRender>{contentText}</MarkdownRender> :
<LazyImportComponent lazyChildren={MarkdownRender}>
{contentText}
</LazyImportComponent> :
contentText
}
</DialogContent>

View File

@@ -0,0 +1,20 @@
import { FC, LazyExoticComponent, ReactNode, Suspense } from 'react';
import { useTranslation } from 'react-i18next';
interface LazyImportComponentProps {
lazyChildren: LazyExoticComponent<FC<any>>;
lazyProps?: any;
children?: ReactNode;
}
export const LazyImportComponent: FC<LazyImportComponentProps> = (props) => {
const { t } = useTranslation();
return (
<Suspense fallback={<div>{t('Loading...')}</div>}>
<props.lazyChildren {...props.lazyProps}>
{props.children}
</props.lazyChildren>
</Suspense>
);
};

View File

@@ -21,7 +21,7 @@ const Hyperlink: FC<any> = ({ href, children }) => {
);
};
export const MarkdownRender: FC<ReactMarkdownOptions> = (props) => {
const MarkdownRender: FC<ReactMarkdownOptions> = (props) => {
return (
<div dir="auto" className="markdown-body">
<ReactMarkdown

View File

@@ -40,6 +40,8 @@ export const ReadButton: FC<{
voice = voices.find((v) => v.name.toLowerCase().includes('microsoft aria'));
else if (lang === 'zh')
voice = voices.find((v) => v.name.toLowerCase().includes('xiaoyi'));
else if (lang === 'ja')
voice = voices.find((v) => v.name.toLowerCase().includes('nanami'));
if (!voice) voice = voices.find((v) => v.lang.substring(0, 2) === lang);
if (!voice) voice = voices.find((v) => v.lang === navigator.language);

View File

@@ -1,6 +1,12 @@
import React, { FC, MouseEventHandler, ReactElement } from 'react';
import commonStore, { ModelStatus } from '../stores/commonStore';
import { AddToDownloadList, FileExists, StartServer, StartWebGPUServer } from '../../wailsjs/go/backend_golang/App';
import {
AddToDownloadList,
FileExists,
IsPortAvailable,
StartServer,
StartWebGPUServer
} from '../../wailsjs/go/backend_golang/App';
import { Button } from '@fluentui/react-components';
import { observer } from 'mobx-react-lite';
import { exit, getStatus, readRoot, switchModel, updateConfig } from '../apis';
@@ -10,7 +16,7 @@ import { useTranslation } from 'react-i18next';
import { ToolTipButton } from './ToolTipButton';
import { Play16Regular, Stop16Regular } from '@fluentui/react-icons';
import { useNavigate } from 'react-router';
import { WindowShow } from '../../wailsjs/runtime/runtime';
import { WindowShow } from '../../wailsjs/runtime';
const mainButtonText = {
[ModelStatus.Offline]: 'Run',
@@ -107,8 +113,15 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
const port = modelConfig.apiParameters.apiPort;
await exit(1000).catch(() => {
});
if (!await IsPortAvailable(port)) {
await exit(1000).catch(() => {
});
if (!await IsPortAvailable(port)) {
toast(t('Port is occupied. Change it in Configs page or close the program that occupies the port.'), { type: 'error' });
commonStore.setStatus({ status: ModelStatus.Offline });
return;
}
}
const startServer = webgpu ?
(_: string, port: number, host: string) => StartWebGPUServer(port, host)
@@ -116,7 +129,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
const isUsingCudaBeta = modelConfig.modelParameters.device === 'CUDA-Beta';
startServer(commonStore.settings.customPythonPath, port, commonStore.settings.host !== '127.0.0.1' ? '0.0.0.0' : '127.0.0.1',
isUsingCudaBeta
!!modelConfig.enableWebUI, isUsingCudaBeta
).catch((e) => {
const errMsg = e.message || e;
if (errMsg.includes('path contains space'))
@@ -186,7 +199,8 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
model: modelPath,
strategy: strategy,
tokenizer: modelConfig.modelParameters.useCustomTokenizer ? modelConfig.modelParameters.customTokenizer : undefined,
customCuda: customCudaFile !== ''
customCuda: customCudaFile !== '',
deploy: modelConfig.enableWebUI
}).then(async (r) => {
if (r.ok) {
commonStore.setStatus({ status: ModelStatus.Working });
@@ -211,8 +225,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
'invalid header or archive is corrupted': 'The model file is corrupted, please download again.',
'no NVIDIA driver': 'Found no NVIDIA driver, please install the latest driver.',
'CUDA out of memory': 'VRAM is not enough, please reduce stored layers or use a lower precision in Configs page.',
'Ninja is required to load C++ extensions': 'Failed to enable custom CUDA kernel, ninja is required to load C++ extensions. You may be using the CPU version of PyTorch, please reinstall PyTorch with CUDA. Or if you are using a custom Python interpreter, you must compile the CUDA kernel by yourself or disable Custom CUDA kernel acceleration.',
'Please Enable Custom CUDA Kernel': 'Please Enable Custom CUDA Kernel. Latest RWKV-5 requires os.environ[\'RWKV_CUDA_ON\'] == \'1\' (will fix soon).'
'Ninja is required to load C++ extensions': 'Failed to enable custom CUDA kernel, ninja is required to load C++ extensions. You may be using the CPU version of PyTorch, please reinstall PyTorch with CUDA. Or if you are using a custom Python interpreter, you must compile the CUDA kernel by yourself or disable Custom CUDA kernel acceleration.'
};
const matchedError = Object.entries(errorsMap).find(([key, _]) => error.includes(key));
const message = matchedError ? t(matchedError[1]) : error;
@@ -234,7 +247,13 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
}, 1000);
} else {
commonStore.setStatus({ status: ModelStatus.Offline });
exit();
exit().then(r => {
if (r.status === 403)
if (commonStore.platform !== 'linux')
toast(t('Server is working on deployment mode, please close the terminal window manually'), { type: 'info' });
else
toast(t('Server is working on deployment mode, please exit the program manually to stop the server'), { type: 'info' });
});
}
};

View File

@@ -25,7 +25,8 @@ export const WorkHeader: FC = observer(() => {
const { t } = useTranslation();
const port = commonStore.getCurrentModelConfig().apiParameters.apiPort;
return (
return commonStore.platform === 'web' ?
<div /> :
<div className="flex flex-col gap-1">
<div className="flex justify-between items-center">
<div className="flex items-center gap-2">
@@ -42,5 +43,5 @@ export const WorkHeader: FC = observer(() => {
</Text>
<Divider style={{ flexGrow: 0 }} />
</div>
);
;
});

View File

@@ -1,3 +1,4 @@
import './webWails';
import React from 'react';
import { createRoot } from 'react-dom/client';
import './style.scss';
@@ -6,7 +7,6 @@ import App from './App';
import { HashRouter } from 'react-router-dom';
import { startup } from './startup';
import './_locales/i18n-react';
import 'html-midi-player';
import { WindowShow } from '../wailsjs/runtime';
startup().then(() => {

View File

@@ -5,9 +5,7 @@ import MarkdownRender from '../components/MarkdownRender';
import { observer } from 'mobx-react-lite';
import commonStore from '../stores/commonStore';
export type AboutContent = { [lang: string]: string }
export const About: FC = observer(() => {
const About: FC = observer(() => {
const { t } = useTranslation();
const lang: string = commonStore.settings.language;
@@ -21,3 +19,5 @@ export const About: FC = observer(() => {
} />
);
});
export default About;

View File

@@ -10,56 +10,34 @@ import { KebabHorizontalIcon, PencilIcon, SyncIcon, TrashIcon } from '@primer/oc
import logo from '../assets/images/logo.png';
import MarkdownRender from '../components/MarkdownRender';
import { ToolTipButton } from '../components/ToolTipButton';
import { ArrowCircleUp28Regular, Delete28Regular, RecordStop28Regular, Save28Regular } from '@fluentui/react-icons';
import {
ArrowCircleUp28Regular,
ArrowClockwise16Regular,
Attach16Regular,
Delete28Regular,
Dismiss16Regular,
RecordStop28Regular,
Save28Regular
} from '@fluentui/react-icons';
import { CopyButton } from '../components/CopyButton';
import { ReadButton } from '../components/ReadButton';
import { toast } from 'react-toastify';
import { WorkHeader } from '../components/WorkHeader';
import { DialogButton } from '../components/DialogButton';
import { OpenFileFolder, OpenSaveFileDialog } from '../../wailsjs/go/backend_golang/App';
import { toastWithButton } from '../utils';
import { OpenFileFolder, OpenOpenFileDialog, OpenSaveFileDialog } from '../../wailsjs/go/backend_golang/App';
import { absPathAsset, bytesToReadable, getServerRoot, toastWithButton } from '../utils';
import { PresetsButton } from './PresetsManager/PresetsButton';
import { useMediaQuery } from 'usehooks-ts';
import { botName, ConversationMessage, MessageType, userName, welcomeUuid } from '../types/chat';
export const userName = 'M E';
export const botName = 'A I';
let chatSseControllers: {
[id: string]: AbortController
} = {};
export const welcomeUuid = 'welcome';
export enum MessageType {
Normal,
Error
}
export type Side = 'left' | 'right'
export type Color = 'neutral' | 'brand' | 'colorful'
export type MessageItem = {
sender: string,
type: MessageType,
color: Color,
avatarImg?: string,
time: string,
content: string,
side: Side,
done: boolean
}
export type Conversation = {
[uuid: string]: MessageItem
}
export type Role = 'assistant' | 'user' | 'system';
export type ConversationMessage = {
role: Role;
content: string;
}
let chatSseController: AbortController | null = null;
const MoreUtilsButton: FC<{ uuid: string, setEditing: (editing: boolean) => void }> = observer(({
const MoreUtilsButton: FC<{
uuid: string,
setEditing: (editing: boolean) => void
}> = observer(({
uuid,
setEditing
}) => {
@@ -83,13 +61,15 @@ const MoreUtilsButton: FC<{ uuid: string, setEditing: (editing: boolean) => void
onClick={() => {
commonStore.conversationOrder.splice(commonStore.conversationOrder.indexOf(uuid), 1);
delete commonStore.conversation[uuid];
commonStore.setAttachment(uuid, null);
}} />
</MenuPopover>
</Menu>;
});
const ChatMessageItem: FC<{
uuid: string, onSubmit: (message: string | null, answerId: string | null,
uuid: string,
onSubmit: (message: string | null, answerId: string | null,
startUuid: string | null, endUuid: string | null, includeEndUuid: boolean) => void
}> = observer(({ uuid, onSubmit }) => {
const { t } = useTranslation();
@@ -114,6 +94,13 @@ const ChatMessageItem: FC<{
}
};
let avatarImg: string | undefined;
if (commonStore.activePreset && messageItem.sender === botName) {
avatarImg = absPathAsset(commonStore.activePreset.avatarImg);
} else if (messageItem.avatarImg) {
avatarImg = messageItem.avatarImg;
}
return <div
className={classnames(
'flex gap-2 mb-2 overflow-hidden',
@@ -131,7 +118,7 @@ const ChatMessageItem: FC<{
<Avatar
color={messageItem.color}
name={messageItem.sender}
image={(commonStore.activePreset && messageItem.sender === botName) ? { src: commonStore.activePreset.avatarImg } : messageItem.avatarImg ? { src: messageItem.avatarImg } : undefined}
image={avatarImg ? { src: avatarImg } : undefined}
/>
<div
className={classnames(
@@ -142,13 +129,31 @@ const ChatMessageItem: FC<{
)}
>
{!editing ?
<MarkdownRender>{messageItem.content}</MarkdownRender> :
<div className="flex flex-col">
<MarkdownRender>{messageItem.content}</MarkdownRender>
{uuid in commonStore.attachments &&
<div className="flex grow">
<div className="grow" />
<ToolTipButton className="whitespace-nowrap"
text={
commonStore.attachments[uuid][0].name.replace(
new RegExp('(^[^\\.]{5})[^\\.]+'), '$1...')
}
desc={`${commonStore.attachments[uuid][0].name} (${bytesToReadable(commonStore.attachments[uuid][0].size)})`}
size="small" shape="circular" appearance="secondary" />
</div>
}
</div> :
<Textarea ref={textareaRef}
className="grow"
style={{ minWidth: 0 }}
value={messageItem.content}
onChange={(e) => {
messageItem.content = e.target.value;
commonStore.conversation[uuid].type = MessageType.Normal;
commonStore.conversation[uuid].done = true;
commonStore.setConversation(commonStore.conversation);
commonStore.setConversationOrder([...commonStore.conversationOrder]);
}}
onBlur={() => {
setEditingInner(false);
@@ -166,6 +171,10 @@ const ChatMessageItem: FC<{
messageItem.sender === botName && uuid !== welcomeUuid &&
<ToolTipButton desc={t('Retry')} size="small" appearance="subtle"
icon={<SyncIcon />} onClick={() => {
if (uuid in chatSseControllers) {
chatSseControllers[uuid].abort();
delete chatSseControllers[uuid];
}
onSubmit(null, uuid, null, uuid, false);
}} />
}
@@ -187,15 +196,7 @@ const ChatPanel: FC = observer(() => {
const currentConfig = commonStore.getCurrentModelConfig();
const apiParams = currentConfig.apiParameters;
const port = apiParams.apiPort;
let lastMessageId: string;
let generating: boolean = false;
if (commonStore.conversationOrder.length > 0) {
lastMessageId = commonStore.conversationOrder[commonStore.conversationOrder.length - 1];
const lastMessage = commonStore.conversation[lastMessageId];
if (lastMessage.sender === botName)
generating = !lastMessage.done;
}
const generating: boolean = Object.keys(chatSseControllers).length > 0;
useEffect(() => {
if (inputRef.current)
@@ -213,7 +214,7 @@ const ChatPanel: FC = observer(() => {
color: 'colorful',
avatarImg: logo,
time: new Date().toISOString(),
content: t('Hello! I\'m RWKV, an open-source and commercially usable large language model.'),
content: commonStore.platform === 'web' ? t('Hello, what can I do for you?') : t('Hello! I\'m RWKV, an open-source and commercially usable large language model.'),
side: 'left',
done: true
}
@@ -230,7 +231,7 @@ const ChatPanel: FC = observer(() => {
e.stopPropagation();
if (e.type === 'click' || (e.keyCode === 13 && !e.shiftKey)) {
e.preventDefault();
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl) {
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl && commonStore.platform !== 'web') {
toast(t('Please click the button in the top right corner to start the model'), { type: 'warning' });
return;
}
@@ -260,6 +261,11 @@ const ChatPanel: FC = observer(() => {
commonStore.setConversation(commonStore.conversation);
commonStore.conversationOrder.push(newId);
commonStore.setConversationOrder(commonStore.conversationOrder);
if (commonStore.currentTempAttachment) {
commonStore.setAttachment(newId, [commonStore.currentTempAttachment]);
commonStore.setCurrentTempAttachment(null);
}
}
let startIndex = startUuid ? commonStore.conversationOrder.indexOf(startUuid) : 0;
@@ -271,6 +277,17 @@ const ChatPanel: FC = observer(() => {
if (uuid === welcomeUuid)
return;
const messageItem = commonStore.conversation[uuid];
if (uuid in commonStore.attachments) {
const attachment = commonStore.attachments[uuid][0];
messages.push({
role: 'user',
content: t('The content of file') + ` "${attachment.name}" `
+ t('is as follows. When replying to me, consider the file content and respond accordingly:')
+ '\n\n' + attachment.content
});
messages.push({ role: 'user', content: t('What\'s the file name') });
messages.push({ role: 'assistant', content: t('The file name is: ') + attachment.name });
}
if (messageItem.done && messageItem.type === MessageType.Normal && messageItem.sender === userName) {
messages.push({ role: 'user', content: messageItem.content });
} else if (messageItem.done && messageItem.type === MessageType.Normal && messageItem.sender === botName) {
@@ -296,11 +313,10 @@ const ChatPanel: FC = observer(() => {
commonStore.setConversationOrder(commonStore.conversationOrder);
setTimeout(scrollToBottom);
let answer = '';
chatSseController = new AbortController();
fetchEventSource( // https://api.openai.com/v1/chat/completions || http://127.0.0.1:${port}/chat/completions
commonStore.settings.apiUrl ?
commonStore.settings.apiUrl + '/v1/chat/completions' :
`http://127.0.0.1:${port}/chat/completions`,
const chatSseController = new AbortController();
chatSseControllers[answerId] = chatSseController;
fetchEventSource( // https://api.openai.com/v1/chat/completions || http://127.0.0.1:${port}/v1/chat/completions
getServerRoot(port) + '/v1/chat/completions',
{
method: 'POST',
headers: {
@@ -313,14 +329,16 @@ const ChatPanel: FC = observer(() => {
model: commonStore.settings.apiChatModelName, // 'gpt-3.5-turbo'
temperature: apiParams.temperature,
top_p: apiParams.topP,
user_name: commonStore.activePreset?.userName,
assistant_name: commonStore.activePreset?.assistantName,
presystem: commonStore.activePreset?.presystem
user_name: commonStore.activePreset?.userName || undefined,
assistant_name: commonStore.activePreset?.assistantName || undefined,
presystem: commonStore.activePreset?.presystem && undefined
}),
signal: chatSseController?.signal,
onmessage(e) {
scrollToBottom();
if (e.data.trim() === '[DONE]') {
if (answerId! in chatSseControllers)
delete chatSseControllers[answerId!];
commonStore.conversation[answerId!].done = true;
commonStore.conversation[answerId!].content = commonStore.conversation[answerId!].content.trim();
commonStore.setConversation(commonStore.conversation);
@@ -350,9 +368,13 @@ const ChatPanel: FC = observer(() => {
}
},
onclose() {
if (answerId! in chatSseControllers)
delete chatSseControllers[answerId!];
console.log('Connection closed');
},
onerror(err) {
if (answerId! in chatSseControllers)
delete chatSseControllers[answerId!];
commonStore.conversation[answerId!].type = MessageType.Error;
commonStore.conversation[answerId!].done = true;
err = err.message || err;
@@ -380,33 +402,141 @@ const ChatPanel: FC = observer(() => {
size={mq ? 'large' : 'small'} shape="circular" appearance="subtle" title={t('Clear')}
contentText={t('Are you sure you want to clear the conversation? It cannot be undone.')}
onConfirm={() => {
if (generating)
chatSseController?.abort();
if (generating) {
for (const id in chatSseControllers) {
chatSseControllers[id].abort();
}
chatSseControllers = {};
}
commonStore.setConversation({});
commonStore.setConversationOrder([]);
}} />
<Textarea
ref={inputRef}
style={{ minWidth: 0 }}
className="grow"
resize="vertical"
placeholder={t('Type your message here')!}
value={commonStore.currentInput}
onChange={(e) => commonStore.setCurrentInput(e.target.value)}
onKeyDown={handleKeyDownOrClick}
/>
<div className="relative flex grow">
<Textarea
ref={inputRef}
style={{ minWidth: 0 }}
className="grow"
resize="vertical"
placeholder={t('Type your message here')!}
value={commonStore.currentInput}
onChange={(e) => commonStore.setCurrentInput(e.target.value)}
onKeyDown={handleKeyDownOrClick}
/>
<div className="absolute right-2 bottom-2">
{!commonStore.currentTempAttachment ?
<ToolTipButton
desc={commonStore.attachmentUploading ?
t('Uploading Attachment') :
t('Add An Attachment (Accepts pdf, txt)')}
icon={commonStore.attachmentUploading ?
<ArrowClockwise16Regular className="animate-spin" />
: <Attach16Regular />}
size="small" shape="circular" appearance="secondary"
onClick={() => {
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl && commonStore.platform !== 'web') {
toast(t('Please click the button in the top right corner to start the model'), { type: 'warning' });
return;
}
if (commonStore.attachmentUploading)
return;
OpenOpenFileDialog('*.txt;*.pdf').then(async filePath => {
if (!filePath)
return;
commonStore.setAttachmentUploading(true);
let blob: Blob;
let attachmentName: string | undefined;
let attachmentContent: string | undefined;
if (commonStore.platform === 'web') {
const webReturn = filePath as any;
blob = webReturn.blob;
attachmentName = blob.name;
attachmentContent = webReturn.content;
} else {
// Both are slow. Communication between frontend and backend is slow. Use AssetServer Handler to read the file.
// const blob = new Blob([atob(info.content as unknown as string)]); // await fetch(`data:application/octet-stream;base64,${info.content}`).then(r => r.blob());
blob = await fetch(absPathAsset(filePath)).then(r => r.blob());
attachmentName = filePath.split(/[\\/]/).pop();
}
if (attachmentContent) {
commonStore.setCurrentTempAttachment(
{
name: attachmentName!,
size: blob.size,
content: attachmentContent
});
commonStore.setAttachmentUploading(false);
} else {
const urlPath = `/file-to-text?file_name=${attachmentName}`;
const bodyForm = new FormData();
bodyForm.append('file_data', blob, attachmentName);
fetch(getServerRoot(port) + urlPath, {
method: 'POST',
body: bodyForm
}).then(async r => {
if (r.status === 200) {
const pages = (await r.json()).pages as any[];
if (pages.length === 1)
attachmentContent = pages[0].page_content;
else
attachmentContent = pages.map((p, i) => `Page ${i + 1}:\n${p.page_content}`).join('\n\n');
commonStore.setCurrentTempAttachment(
{
name: attachmentName!,
size: blob.size,
content: attachmentContent!
});
} else {
toast(r.statusText + '\n' + (await r.text()), {
type: 'error'
});
}
commonStore.setAttachmentUploading(false);
}
).catch(e => {
commonStore.setAttachmentUploading(false);
toast(t('Error') + ' - ' + (e.message || e), { type: 'error', autoClose: 2500 });
});
}
}).catch(e => {
toast(t('Error') + ' - ' + (e.message || e), { type: 'error', autoClose: 2500 });
});
}}
/> :
<div>
<ToolTipButton
text={
commonStore.currentTempAttachment.name.replace(
new RegExp('(^[^\\.]{5})[^\\.]+'), '$1...')
}
desc={`${commonStore.currentTempAttachment.name} (${bytesToReadable(commonStore.currentTempAttachment.size)})`}
size="small" shape="circular" appearance="secondary" />
<ToolTipButton desc={t('Remove Attachment')}
icon={<Dismiss16Regular />}
size="small" shape="circular" appearance="subtle"
onClick={() => {
commonStore.setCurrentTempAttachment(null);
}} />
</div>
}
</div>
</div>
<ToolTipButton desc={generating ? t('Stop') : t('Send')}
icon={generating ? <RecordStop28Regular /> : <ArrowCircleUp28Regular />}
size={mq ? 'large' : 'small'} shape="circular" appearance="subtle"
onClick={(e) => {
if (generating) {
chatSseController?.abort();
if (lastMessageId) {
commonStore.conversation[lastMessageId].type = MessageType.Error;
commonStore.conversation[lastMessageId].done = true;
commonStore.setConversation(commonStore.conversation);
commonStore.setConversationOrder([...commonStore.conversationOrder]);
for (const id in chatSseControllers) {
chatSseControllers[id].abort();
commonStore.conversation[id].type = MessageType.Error;
commonStore.conversation[id].done = true;
}
chatSseControllers = {};
commonStore.setConversation(commonStore.conversation);
commonStore.setConversationOrder([...commonStore.conversationOrder]);
} else {
handleKeyDownOrClick(e);
}
@@ -417,8 +547,8 @@ const ChatPanel: FC = observer(() => {
onClick={() => {
let savedContent: string = '';
const isWorldModel = commonStore.getCurrentModelConfig().modelParameters.modelName.toLowerCase().includes('world');
const user = isWorldModel ? 'Question' : 'Bob';
const bot = isWorldModel ? 'Answer' : 'Alice';
const user = isWorldModel ? 'User' : 'Bob';
const bot = isWorldModel ? 'Assistant' : 'Alice';
commonStore.conversationOrder.forEach((uuid) => {
if (uuid === welcomeUuid)
return;
@@ -442,7 +572,7 @@ const ChatPanel: FC = observer(() => {
);
});
export const Chat: FC = observer(() => {
const Chat: FC = observer(() => {
return (
<div className="flex flex-col gap-1 p-2 h-full overflow-hidden">
<WorkHeader />
@@ -450,3 +580,5 @@ export const Chat: FC = observer(() => {
</div>
);
});
export default Chat;

View File

@@ -5,7 +5,6 @@ import { Button, Dropdown, Input, Option, Textarea } from '@fluentui/react-compo
import { Labeled } from '../components/Labeled';
import { ValuedSlider } from '../components/ValuedSlider';
import { useTranslation } from 'react-i18next';
import { ApiParameters } from './Configs';
import commonStore, { ModelStatus } from '../stores/commonStore';
import { fetchEventSource } from '@microsoft/fetch-event-source';
import { toast } from 'react-toastify';
@@ -14,18 +13,8 @@ import { PresetsButton } from './PresetsManager/PresetsButton';
import { ToolTipButton } from '../components/ToolTipButton';
import { ArrowSync20Regular } from '@fluentui/react-icons';
import { defaultPresets } from './defaultConfigs';
export type CompletionParams = Omit<ApiParameters, 'apiPort'> & {
stop: string,
injectStart: string,
injectEnd: string
};
export type CompletionPreset = {
name: string,
prompt: string,
params: CompletionParams
}
import { CompletionParams, CompletionPreset } from '../types/completion';
import { getServerRoot } from '../utils';
let completionSseController: AbortController | null = null;
@@ -80,7 +69,7 @@ const CompletionPanel: FC = observer(() => {
const onSubmit = (prompt: string) => {
commonStore.setCompletionSubmittedPrompt(prompt);
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl) {
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl && commonStore.platform !== 'web') {
toast(t('Please click the button in the top right corner to start the model'), { type: 'warning' });
commonStore.setCompletionGenerating(false);
return;
@@ -90,10 +79,8 @@ const CompletionPanel: FC = observer(() => {
let answer = '';
completionSseController = new AbortController();
fetchEventSource( // https://api.openai.com/v1/completions || http://127.0.0.1:${port}/completions
commonStore.settings.apiUrl ?
commonStore.settings.apiUrl + '/v1/completions' :
`http://127.0.0.1:${port}/completions`,
fetchEventSource( // https://api.openai.com/v1/completions || http://127.0.0.1:${port}/v1/completions
getServerRoot(port) + '/v1/completions',
{
method: 'POST',
headers: {
@@ -269,7 +256,7 @@ const CompletionPanel: FC = observer(() => {
} />
</div>
<div className="grow" />
<div className="flex justify-between gap-2">
<div className="hidden justify-between gap-2 sm:flex">
<Button className="grow" onClick={() => {
const newPrompt = prompt.replace(/\n+\ /g, '\n').split('\n').map((line) => line.trim()).join('\n');
setPrompt(newPrompt);
@@ -303,7 +290,7 @@ const CompletionPanel: FC = observer(() => {
);
});
export const Completion: FC = observer(() => {
const Completion: FC = observer(() => {
return (
<div className="flex flex-col gap-1 p-2 h-full overflow-hidden">
<WorkHeader />
@@ -311,3 +298,5 @@ export const Completion: FC = observer(() => {
</div>
);
});
export default Completion;

View File

@@ -1,3 +1,4 @@
import 'html-midi-player';
import React, { FC, useEffect, useRef } from 'react';
import { observer } from 'mobx-react-lite';
import { WorkHeader } from '../components/WorkHeader';
@@ -16,18 +17,8 @@ import * as mm from '@magenta/music/esm/core.js';
import { NoteSequence } from '@magenta/music/esm/protobuf.js';
import { defaultCompositionPrompt } from './defaultConfigs';
import { FileExists, OpenFileFolder, OpenSaveFileDialogBytes } from '../../wailsjs/go/backend_golang/App';
import { toastWithButton } from '../utils';
export type CompositionParams = {
prompt: string,
maxResponseToken: number,
temperature: number,
topP: number,
autoPlay: boolean,
useLocalSoundFont: boolean,
midi: ArrayBuffer | null,
ns: NoteSequence | null
}
import { getServerRoot, toastWithButton } from '../utils';
import { CompositionParams } from '../types/composition';
let compositionSseController: AbortController | null = null;
@@ -109,9 +100,7 @@ const CompositionPanel: FC = observer(() => {
}, []);
const generateNs = (autoPlay: boolean) => {
fetch(commonStore.settings.apiUrl ?
commonStore.settings.apiUrl + '/text-to-midi' :
`http://127.0.0.1:${port}/text-to-midi`, {
fetch(getServerRoot(port) + '/text-to-midi', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
@@ -137,7 +126,7 @@ const CompositionPanel: FC = observer(() => {
const onSubmit = (prompt: string) => {
commonStore.setCompositionSubmittedPrompt(prompt);
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl) {
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl && commonStore.platform !== 'web') {
toast(t('Please click the button in the top right corner to start the model'), { type: 'warning' });
commonStore.setCompositionGenerating(false);
return;
@@ -145,10 +134,8 @@ const CompositionPanel: FC = observer(() => {
let answer = '';
compositionSseController = new AbortController();
fetchEventSource( // https://api.openai.com/v1/completions || http://127.0.0.1:${port}/completions
commonStore.settings.apiUrl ?
commonStore.settings.apiUrl + '/v1/completions' :
`http://127.0.0.1:${port}/completions`,
fetchEventSource( // https://api.openai.com/v1/completions || http://127.0.0.1:${port}/v1/completions
getServerRoot(port) + '/v1/completions',
{
method: 'POST',
headers: {
@@ -319,7 +306,7 @@ const CompositionPanel: FC = observer(() => {
toastWithButton(t('File Saved'), t('Open'), () => {
OpenFileFolder(path, false);
});
}).catch((e: any) => {
}).catch((e) => {
toast(t('Error') + ' - ' + (e.message || e), { type: 'error', autoClose: 2500 });
});
} else {
@@ -335,7 +322,7 @@ const CompositionPanel: FC = observer(() => {
);
});
export const Composition: FC = observer(() => {
const Composition: FC = observer(() => {
return (
<div className="flex flex-col gap-1 p-2 h-full overflow-hidden">
<WorkHeader />
@@ -343,3 +330,5 @@ export const Composition: FC = observer(() => {
</div>
);
});
export default Composition;

View File

@@ -29,45 +29,14 @@ import { updateConfig } from '../apis';
import { ConvertModel, ConvertSafetensors, FileExists, GetPyError } from '../../wailsjs/go/backend_golang/App';
import { checkDependencies, getStrategy } from '../utils';
import { useTranslation } from 'react-i18next';
import { WindowShow } from '../../wailsjs/runtime/runtime';
import { WindowShow } from '../../wailsjs/runtime';
import strategyImg from '../assets/images/strategy.jpg';
import strategyZhImg from '../assets/images/strategy_zh.jpg';
import { ResetConfigsButton } from '../components/ResetConfigsButton';
import { useMediaQuery } from 'usehooks-ts';
import { ApiParameters, Device, ModelParameters, Precision } from '../types/configs';
export type ApiParameters = {
apiPort: number
maxResponseToken: number;
temperature: number;
topP: number;
presencePenalty: number;
frequencyPenalty: number;
}
export type Device = 'CPU' | 'CUDA' | 'CUDA-Beta' | 'WebGPU' | 'MPS' | 'Custom';
export type Precision = 'fp16' | 'int8' | 'fp32';
export type ModelParameters = {
// different models can not have the same name
modelName: string;
device: Device;
precision: Precision;
storedLayers: number;
maxStoredLayers: number;
useCustomCuda?: boolean;
customStrategy?: string;
useCustomTokenizer?: boolean;
customTokenizer?: string;
}
export type ModelConfig = {
// different configs can have the same name
name: string;
apiParameters: ApiParameters
modelParameters: ModelParameters
}
export const Configs: FC = observer(() => {
const Configs: FC = observer(() => {
const { t } = useTranslation();
const [selectedIndex, setSelectedIndex] = React.useState(commonStore.currentModelConfigIndex);
const [selectedConfig, setSelectedConfig] = React.useState(commonStore.modelConfigs[selectedIndex]);
@@ -423,7 +392,7 @@ export const Configs: FC = observer(() => {
{
(selectedConfig.modelParameters.device.includes('CUDA') || selectedConfig.modelParameters.device === 'Custom') &&
<Labeled label={t('Use Custom CUDA kernel to Accelerate')}
desc={t('Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues. If it fails to start, please turn off this option.')}
desc={t('Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues (output garbled). If it fails to start, please turn off this option, or try to upgrade your gpu driver.')}
content={
<Switch checked={selectedConfig.modelParameters.useCustomCuda}
onChange={(e, data) => {
@@ -472,9 +441,23 @@ export const Configs: FC = observer(() => {
/>
</div>
<div className="flex flex-row-reverse sm:fixed bottom-2 right-2">
<RunButton onClickRun={onClickSave} />
<div className="flex gap-2">
{selectedConfig.modelParameters.device !== 'WebGPU'
&& <Checkbox className="select-none"
size="large" label={t('Enable WebUI')}
checked={selectedConfig.enableWebUI}
onChange={(_, data) => {
setSelectedConfig({
...selectedConfig,
enableWebUI: data.checked as boolean
});
}} />}
<RunButton onClickRun={onClickSave} />
</div>
</div>
</div>
} />
);
});
export default Configs;

View File

@@ -9,19 +9,7 @@ import { ToolTipButton } from '../components/ToolTipButton';
import { Folder20Regular, Pause20Regular, Play20Regular } from '@fluentui/react-icons';
import { AddToDownloadList, OpenFileFolder, PauseDownload } from '../../wailsjs/go/backend_golang/App';
export type DownloadStatus = {
name: string;
path: string;
url: string;
transferred: number;
size: number;
speed: number;
progress: number;
downloading: boolean;
done: boolean;
}
export const Downloads: FC = observer(() => {
const Downloads: FC = observer(() => {
const { t } = useTranslation();
const finishedModelsLen = commonStore.downloadList.filter((status) => status.done && status.name.endsWith('.pth')).length;
useEffect(() => {
@@ -91,3 +79,5 @@ export const Downloads: FC = observer(() => {
} />
);
});
export default Downloads;

View File

@@ -1,11 +1,13 @@
import { CompoundButton, Link, Text } from '@fluentui/react-components';
import React, { FC, ReactElement } from 'react';
import React, { FC } from 'react';
import banner from '../assets/images/banner.jpg';
import {
Chat20Regular,
ClipboardEdit20Regular,
DataUsageSettings20Regular,
DocumentSettings20Regular
DocumentSettings20Regular,
MusicNote220Regular,
Settings20Regular
} from '@fluentui/react-icons';
import { useNavigate } from 'react-router';
import { observer } from 'mobx-react-lite';
@@ -14,21 +16,13 @@ import manifest from '../../../manifest.json';
import { BrowserOpenURL } from '../../wailsjs/runtime';
import { useTranslation } from 'react-i18next';
import { ConfigSelector } from '../components/ConfigSelector';
import MarkdownRender from '../components/MarkdownRender';
import commonStore from '../stores/commonStore';
import { Completion } from './Completion';
import { ResetConfigsButton } from '../components/ResetConfigsButton';
import { AdvancedGeneralSettings } from './Settings';
import { NavCard } from '../types/home';
import { LazyImportComponent } from '../components/LazyImportComponent';
export type IntroductionContent = { [lang: string]: string }
type NavCard = {
label: string;
desc: string;
path: string;
icon: ReactElement;
};
const navCards: NavCard[] = [
const clientNavCards: NavCard[] = [
{
label: 'Chat',
desc: 'Go to chat page',
@@ -55,7 +49,36 @@ const navCards: NavCard[] = [
}
];
export const Home: FC = observer(() => {
const webNavCards: NavCard[] = [
{
label: 'Chat',
desc: 'Go to chat page',
path: '/chat',
icon: <Chat20Regular />
},
{
label: 'Completion',
desc: 'Writer, Translator, Role-playing',
path: '/completion',
icon: <ClipboardEdit20Regular />
},
{
label: 'Composition',
desc: '',
path: '/composition',
icon: <MusicNote220Regular />
},
{
label: 'Settings',
desc: '',
path: '/settings',
icon: <Settings20Regular />
}
];
const MarkdownRender = React.lazy(() => import('../components/MarkdownRender'));
const Home: FC = observer(() => {
const { t } = useTranslation();
const navigate = useNavigate();
const lang: string = commonStore.settings.language;
@@ -64,39 +87,64 @@ export const Home: FC = observer(() => {
navigate({ pathname: path });
};
return (
<div className="flex flex-col justify-between h-full">
<img className="rounded-xl select-none hidden sm:block"
style={{ maxHeight: '40%', margin: '0 auto' }} src={banner} />
<div className="flex flex-col gap-2">
<Text size={600} weight="medium">{t('Introduction')}</Text>
<div className="h-40 overflow-y-auto overflow-x-hidden p-1">
<MarkdownRender>
{lang in commonStore.introduction ? commonStore.introduction[lang] : commonStore.introduction['en']}
</MarkdownRender>
return commonStore.platform === 'web' ?
(
<div className="flex flex-col gap-2 h-full">
<img className="rounded-xl select-none object-cover grow"
style={{ maxHeight: '40%' }} src={banner} />
<div className="grow"></div>
<div className="grid grid-cols-2 sm:grid-cols-4 gap-5">
{webNavCards.map(({ label, path, icon, desc }, index) => (
<CompoundButton icon={icon} secondaryContent={t(desc)} key={`${path}-${index}`} value={path}
size="large" onClick={() => onClickNavCard(path)}>
{t(label)}
</CompoundButton>
))}
</div>
</div>
<div className="grid grid-cols-2 sm:grid-cols-4 gap-5">
{navCards.map(({ label, path, icon, desc }, index) => (
<CompoundButton icon={icon} secondaryContent={t(desc)} key={`${path}-${index}`} value={path}
size="large" onClick={() => onClickNavCard(path)}>
{t(label)}
</CompoundButton>
))}
</div>
<div className="flex flex-col gap-2">
<div className="flex flex-row-reverse sm:fixed bottom-2 right-2">
<div className="flex gap-3">
<ResetConfigsButton />
<ConfigSelector />
<RunButton />
<div className="flex flex-col gap-2">
<AdvancedGeneralSettings />
<div className="flex gap-4 items-end">
{t('Version')}: {manifest.version}
<Link onClick={() => BrowserOpenURL('https://github.com/josStorer/RWKV-Runner')}>{t('Help')}</Link>
</div>
</div>
<div className="flex gap-4 items-end">
{t('Version')}: {manifest.version}
<Link onClick={() => BrowserOpenURL('https://github.com/josStorer/RWKV-Runner')}>{t('Help')}</Link>
</div>
)
: (
<div className="flex flex-col justify-between h-full">
<img className="rounded-xl select-none object-cover hidden sm:block"
style={{ maxHeight: '40%' }} src={banner} />
<div className="flex flex-col gap-2">
<Text size={600} weight="medium">{t('Introduction')}</Text>
<div className="h-40 overflow-y-auto overflow-x-hidden p-1">
<LazyImportComponent lazyChildren={MarkdownRender}>
{lang in commonStore.introduction ? commonStore.introduction[lang] : commonStore.introduction['en']}
</LazyImportComponent>
</div>
</div>
<div className="grid grid-cols-2 sm:grid-cols-4 gap-5">
{clientNavCards.map(({ label, path, icon, desc }, index) => (
<CompoundButton icon={icon} secondaryContent={t(desc)} key={`${path}-${index}`} value={path}
size="large" onClick={() => onClickNavCard(path)}>
{t(label)}
</CompoundButton>
))}
</div>
<div className="flex flex-col gap-2">
<div className="flex flex-row-reverse sm:fixed bottom-2 right-2">
<div className="flex gap-3">
<ResetConfigsButton />
<ConfigSelector />
<RunButton />
</div>
</div>
<div className="flex gap-4 items-end">
{t('Version')}: {manifest.version}
<Link onClick={() => BrowserOpenURL('https://github.com/josStorer/RWKV-Runner')}>{t('Help')}</Link>
</div>
</div>
</div>
</div>
);
);
});
export default Home;

View File

@@ -22,21 +22,7 @@ import { Page } from '../components/Page';
import { bytesToGb, refreshModels, saveConfigs, toastWithButton } from '../utils';
import { useTranslation } from 'react-i18next';
import { useNavigate } from 'react-router';
export type ModelSourceItem = {
name: string;
size: number;
lastUpdated: string;
desc?: { [lang: string]: string | undefined; };
SHA256?: string;
url?: string;
downloadUrl?: string;
isComplete?: boolean;
isLocal?: boolean;
localSize?: number;
lastUpdatedMs?: number;
hide?: boolean;
};
import { ModelSourceItem } from '../types/models';
const columns: TableColumnDefinition<ModelSourceItem>[] = [
createTableColumn<ModelSourceItem>({
@@ -165,7 +151,7 @@ const columns: TableColumnDefinition<ModelSourceItem>[] = [
})
];
export const Models: FC = observer(() => {
const Models: FC = observer(() => {
const { t } = useTranslation();
return (
@@ -220,3 +206,5 @@ export const Models: FC = observer(() => {
} />
);
});
export default Models;

View File

@@ -1,14 +1,14 @@
import React, { FC, useState } from 'react';
import { DragDropContext, Draggable, Droppable, DropResult } from 'react-beautiful-dnd';
import commonStore from '../../stores/commonStore';
import { Preset } from './PresetsButton';
import { observer } from 'mobx-react-lite';
import { v4 as uuid } from 'uuid';
import { Button, Card, Dropdown, Option, Textarea } from '@fluentui/react-components';
import { useTranslation } from 'react-i18next';
import { ToolTipButton } from '../../components/ToolTipButton';
import { Delete20Regular, ReOrderDotsVertical20Regular } from '@fluentui/react-icons';
import { ConversationMessage, Role } from '../Chat';
import { Preset } from '../../types/presets';
import { ConversationMessage, Role } from '../../types/chat';
type Item = {
id: string;
@@ -31,7 +31,7 @@ const reorder = (list: Item[], startIndex: number, endIndex: number) => {
return result;
};
export const MessagesEditor: FC = observer(() => {
const MessagesEditor: FC = observer(() => {
const { t } = useTranslation();
const editingPreset = commonStore.editingPreset!;
@@ -152,3 +152,5 @@ export const MessagesEditor: FC = observer(() => {
</div>
);
});
export default MessagesEditor;

View File

@@ -1,6 +1,6 @@
// TODO refactor
import React, { FC, PropsWithChildren, ReactElement, useState } from 'react';
import React, { FC, lazy, PropsWithChildren, ReactElement, useState } from 'react';
import {
Button,
Dialog,
@@ -25,43 +25,21 @@ import {
} from '@fluentui/react-icons';
import { ToolTipButton } from '../../components/ToolTipButton';
import { useTranslation } from 'react-i18next';
import { botName, Conversation, ConversationMessage, MessageType, userName } from '../Chat';
import { SelectTabEventHandler } from '@fluentui/react-tabs';
import { Labeled } from '../../components/Labeled';
import commonStore from '../../stores/commonStore';
import logo from '../../assets/images/logo.png';
import { observer } from 'mobx-react-lite';
import { MessagesEditor } from './MessagesEditor';
import { ClipboardGetText, ClipboardSetText } from '../../../wailsjs/runtime';
import { toast } from 'react-toastify';
import { CustomToastContainer } from '../../components/CustomToastContainer';
import { v4 as uuid } from 'uuid';
import { absPathAsset } from '../../utils';
import { Preset, PresetsNavigationItem } from '../../types/presets';
import { botName, Conversation, MessageType, userName } from '../../types/chat';
import { LazyImportComponent } from '../../components/LazyImportComponent';
export type PresetType = 'chat' | 'completion' | 'chatInCompletion'
export type Preset = {
name: string,
tag: string,
// if name and sourceUrl are same, it will be overridden when importing
sourceUrl: string,
desc: string,
avatarImg: string,
type: PresetType,
// chat
welcomeMessage: string,
messages: ConversationMessage[],
displayPresetMessages: boolean,
// completion
prompt: string,
stop: string,
injectStart: string,
injectEnd: string,
presystem?: boolean,
userName?: string,
assistantName?: string
}
export const defaultPreset: Preset = {
const defaultPreset: Preset = {
name: 'RWKV',
tag: 'default',
sourceUrl: '',
@@ -77,6 +55,8 @@ export const defaultPreset: Preset = {
injectEnd: ''
};
const MessagesEditor = lazy(() => import('./MessagesEditor'));
const setActivePreset = (preset: Preset) => {
commonStore.setActivePreset(preset);
//TODO if (preset.displayPresetMessages) {
@@ -100,7 +80,7 @@ const setActivePreset = (preset: Preset) => {
//}
};
export const PresetCardFrame: FC<PropsWithChildren & { onClick?: () => void }> = (props) => {
const PresetCardFrame: FC<PropsWithChildren & { onClick?: () => void }> = (props) => {
return <Button
className="flex flex-col gap-1 w-32 h-56 break-all"
style={{ minWidth: 0, borderRadius: '0.75rem', justifyContent: 'unset' }}
@@ -110,7 +90,7 @@ export const PresetCardFrame: FC<PropsWithChildren & { onClick?: () => void }> =
</Button>;
};
export const PresetCard: FC<{
const PresetCard: FC<{
avatarImg: string,
name: string,
desc: string,
@@ -124,7 +104,7 @@ export const PresetCard: FC<{
const { t } = useTranslation();
return <PresetCardFrame onClick={onClick}>
<img src={avatarImg} className="rounded-xl select-none ml-auto mr-auto h-28" />
<img src={absPathAsset(avatarImg)} className="rounded-xl select-none ml-auto mr-auto h-28" />
<Text size={400}>{name}</Text>
<Text size={200} style={{
overflow: 'hidden', textOverflow: 'ellipsis',
@@ -146,7 +126,7 @@ export const PresetCard: FC<{
</PresetCardFrame>;
});
export const ChatPresetEditor: FC<{
const ChatPresetEditor: FC<{
triggerButton: ReactElement,
presetIndex: number
}> = observer(({ triggerButton, presetIndex }) => {
@@ -167,8 +147,14 @@ export const ChatPresetEditor: FC<{
const importPreset = () => {
ClipboardGetText().then((text) => {
try {
if (!text.trim().startsWith('{'))
text = new TextDecoder().decode(
new Uint8Array(atob(text)
.split('')
.map((c) => c.charCodeAt(0))));
const preset = JSON.parse(text);
setEditingPreset(preset);
setEditingMessages(false);
toast(t('Imported successfully'), {
type: 'success',
autoClose: 1000
@@ -242,7 +228,7 @@ export const ChatPresetEditor: FC<{
<Button appearance="subtle" icon={<Dismiss20Regular />} />
</DialogTrigger>
</div>
<img src={editingPreset.avatarImg} className="rounded-xl select-none ml-auto mr-auto h-28" />
<img src={absPathAsset(editingPreset.avatarImg)} className="rounded-xl select-none ml-auto mr-auto h-28" />
<Labeled flex breakline label={t('Name')}
content={
<div className="flex gap-2">
@@ -284,7 +270,7 @@ export const ChatPresetEditor: FC<{
});
}} />
} />
<MessagesEditor />
<LazyImportComponent lazyChildren={MessagesEditor} />
</div> :
<div className="flex flex-col gap-1 p-2 overflow-x-hidden overflow-y-auto">
<Labeled flex breakline label={`${t('Description')} (${t('Preview Only')})`}
@@ -349,7 +335,7 @@ export const ChatPresetEditor: FC<{
</Dialog>;
});
export const ChatPresets: FC = observer(() => {
const ChatPresets: FC = observer(() => {
const { t } = useTranslation();
return <div className="flex flex-wrap gap-2">
@@ -385,11 +371,6 @@ export const ChatPresets: FC = observer(() => {
</div>;
});
type PresetsNavigationItem = {
icon: ReactElement;
element: ReactElement;
};
const pages: { [label: string]: PresetsNavigationItem } = {
Chat: {
icon: <Chat20Regular />,
@@ -405,7 +386,7 @@ const pages: { [label: string]: PresetsNavigationItem } = {
}
};
export const PresetsManager: FC<{ initTab: string }> = ({ initTab }) => {
const PresetsManager: FC<{ initTab: string }> = ({ initTab }) => {
const { t } = useTranslation();
const [tab, setTab] = useState(initTab);

View File

@@ -16,33 +16,171 @@ import { observer } from 'mobx-react-lite';
import { useTranslation } from 'react-i18next';
import { checkUpdate, toastWithButton } from '../utils';
import { RestartApp } from '../../wailsjs/go/backend_golang/App';
import { Language, Languages } from '../types/settings';
export const Languages = {
dev: 'English', // i18n default
zh: '简体中文',
ja: '日本語'
};
export const GeneralSettings: FC = observer(() => {
const { t } = useTranslation();
export type Language = keyof typeof Languages;
return <div className="flex flex-col gap-2">
<Labeled label={t('Language')} flex spaceBetween content={
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
value={Languages[commonStore.settings.language]}
selectedOptions={[commonStore.settings.language]}
onOptionSelect={(_, data) => {
if (data.optionValue) {
const lang = data.optionValue as Language;
commonStore.setSettings({
language: lang
});
}
}}>
{
Object.entries(Languages).map(([langKey, desc]) =>
<Option key={langKey} value={langKey}>{desc}</Option>)
}
</Dropdown>
} />
{
commonStore.platform === 'windows' &&
<Labeled label={t('DPI Scaling')} flex spaceBetween content={
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
value={commonStore.settings.dpiScaling + '%'}
selectedOptions={[commonStore.settings.dpiScaling.toString()]}
onOptionSelect={(_, data) => {
if (data.optionValue) {
commonStore.setSettings({
dpiScaling: Number(data.optionValue)
});
toastWithButton(t('Restart the app to apply DPI Scaling.'), t('Restart'), () => {
RestartApp();
}, {
autoClose: 5000
});
}
}}>
{
Array.from({ length: 7 }, (_, i) => (i + 2) * 25).map((v, i) =>
<Option key={i} value={v.toString()}>{v + '%'}</Option>)
}
</Dropdown>
} />
}
<Labeled label={t('Dark Mode')} flex spaceBetween content={
<Switch checked={commonStore.settings.darkMode}
onChange={(e, data) => {
commonStore.setSettings({
darkMode: data.checked
});
}} />
} />
</div>;
});
export type SettingsType = {
language: Language
darkMode: boolean
autoUpdatesCheck: boolean
giteeUpdatesSource: boolean
cnMirror: boolean
host: string
dpiScaling: number
customModelsPath: string
customPythonPath: string
apiUrl: string
apiKey: string
apiChatModelName: string
apiCompletionModelName: string
}
export const AdvancedGeneralSettings: FC = observer(() => {
const { t } = useTranslation();
export const Settings: FC = observer(() => {
const { t, i18n } = useTranslation();
return <div className="flex flex-col gap-2">
<Labeled label={'API URL'}
content={
<div className="flex gap-2">
<Input style={{ minWidth: 0 }} className="grow" value={commonStore.settings.apiUrl}
onChange={(e, data) => {
commonStore.setSettings({
apiUrl: data.value
});
}} />
<Dropdown style={{ minWidth: '33px' }} listbox={{ style: { minWidth: 0 } }}
value="..." selectedOptions={[]} expandIcon={null}
onOptionSelect={(_, data) => {
commonStore.setSettings({
apiUrl: data.optionValue
});
if (data.optionText === 'OpenAI') {
if (commonStore.settings.apiChatModelName === 'rwkv')
commonStore.setSettings({
apiChatModelName: 'gpt-3.5-turbo'
});
if (commonStore.settings.apiCompletionModelName === 'rwkv')
commonStore.setSettings({
apiCompletionModelName: 'text-davinci-003'
});
}
}}>
<Option value="">{t('Localhost')!}</Option>
<Option value="https://api.openai.com">OpenAI</Option>
</Dropdown>
</div>
} />
<Labeled label={'API Key'}
content={
<Input type="password" className="grow" placeholder="sk-" value={commonStore.settings.apiKey}
onChange={(e, data) => {
commonStore.setSettings({
apiKey: data.value
});
}} />
} />
<Labeled label={t('API Chat Model Name')}
content={
<div className="flex gap-2">
<Input style={{ minWidth: 0 }} className="grow" placeholder="rwkv"
value={commonStore.settings.apiChatModelName}
onChange={(e, data) => {
commonStore.setSettings({
apiChatModelName: data.value
});
}} />
<Dropdown style={{ minWidth: '33px' }} listbox={{ style: { minWidth: 0 } }}
value="..." selectedOptions={[]} expandIcon={null}
onOptionSelect={(_, data) => {
if (data.optionValue) {
commonStore.setSettings({
apiChatModelName: data.optionValue
});
}
}}>
{
['rwkv', 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-32k-0613', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613']
.map((v, i) =>
<Option key={i} value={v}>{v}</Option>
)
}
</Dropdown>
</div>
} />
<Labeled label={t('API Completion Model Name')}
content={
<div className="flex gap-2">
<Input style={{ minWidth: 0 }} className="grow" placeholder="rwkv"
value={commonStore.settings.apiCompletionModelName}
onChange={(e, data) => {
commonStore.setSettings({
apiCompletionModelName: data.value
});
}} />
<Dropdown style={{ minWidth: '33px' }} listbox={{ style: { minWidth: 0 } }}
value="..." selectedOptions={[]} expandIcon={null}
onOptionSelect={(_, data) => {
if (data.optionValue) {
commonStore.setSettings({
apiCompletionModelName: data.optionValue
});
}
}}>
{
['rwkv', 'text-davinci-003', 'text-davinci-002', 'text-curie-001', 'text-babbage-001', 'text-ada-001']
.map((v, i) =>
<Option key={i} value={v}>{v}</Option>
)
}
</Dropdown>
</div>
} />
</div>;
});
const Settings: FC = observer(() => {
const { t } = useTranslation();
const advancedHeaderRef = useRef<HTMLDivElement>(null);
useEffect(() => {
@@ -53,227 +191,101 @@ export const Settings: FC = observer(() => {
return (
<Page title={t('Settings')} content={
<div className="flex flex-col gap-2 overflow-y-auto overflow-x-hidden p-1">
<Labeled label={t('Language')} flex spaceBetween content={
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
value={Languages[commonStore.settings.language]}
selectedOptions={[commonStore.settings.language]}
onOptionSelect={(_, data) => {
if (data.optionValue) {
const lang = data.optionValue as Language;
commonStore.setSettings({
language: lang
});
}
}}>
{
Object.entries(Languages).map(([langKey, desc]) =>
<Option key={langKey} value={langKey}>{desc}</Option>)
}
</Dropdown>
} />
{
commonStore.platform === 'windows' &&
<Labeled label={t('DPI Scaling')} flex spaceBetween content={
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
value={commonStore.settings.dpiScaling + '%'}
selectedOptions={[commonStore.settings.dpiScaling.toString()]}
onOptionSelect={(_, data) => {
if (data.optionValue) {
commonStore.setSettings({
dpiScaling: Number(data.optionValue)
});
toastWithButton(t('Restart the app to apply DPI Scaling.'), t('Restart'), () => {
RestartApp();
}, {
autoClose: 5000
});
}
}}>
{
Array.from({ length: 7 }, (_, i) => (i + 2) * 25).map((v, i) =>
<Option key={i} value={v.toString()}>{v + '%'}</Option>)
}
</Dropdown>
} />
}
<Labeled label={t('Dark Mode')} flex spaceBetween content={
<Switch checked={commonStore.settings.darkMode}
onChange={(e, data) => {
commonStore.setSettings({
darkMode: data.checked
});
}} />
} />
<Labeled label={t('Automatic Updates Check')} flex spaceBetween content={
<Switch checked={commonStore.settings.autoUpdatesCheck}
onChange={(e, data) => {
commonStore.setSettings({
autoUpdatesCheck: data.checked
});
if (data.checked)
checkUpdate(true);
}} />
} />
{
commonStore.settings.language === 'zh' &&
<Labeled label={t('Use Gitee Updates Source')} flex spaceBetween content={
<Switch checked={commonStore.settings.giteeUpdatesSource}
onChange={(e, data) => {
commonStore.setSettings({
giteeUpdatesSource: data.checked
});
}} />
} />
}
{
commonStore.settings.language === 'zh' && commonStore.platform !== 'linux' &&
<Labeled label={t('Use Tsinghua Pip Mirrors')} flex spaceBetween content={
<Switch checked={commonStore.settings.cnMirror}
onChange={(e, data) => {
commonStore.setSettings({
cnMirror: data.checked
});
}} />
} />
}
<Labeled label={t('Allow external access to the API (service must be restarted)')} flex spaceBetween content={
<Switch checked={commonStore.settings.host !== '127.0.0.1'}
onChange={(e, data) => {
commonStore.setSettings({
host: data.checked ? '0.0.0.0' : '127.0.0.1'
});
}} />
} />
<Accordion collapsible openItems={!commonStore.advancedCollapsed && 'advanced'} onToggle={(e, data) => {
if (data.value === 'advanced')
commonStore.setAdvancedCollapsed(!commonStore.advancedCollapsed);
}}>
<AccordionItem value="advanced">
<AccordionHeader ref={advancedHeaderRef} size="large">{t('Advanced')}</AccordionHeader>
<AccordionPanel>
<div className="flex flex-col gap-2 overflow-hidden">
{commonStore.platform !== 'darwin' &&
<Labeled label={t('Custom Models Path')}
content={
<Input className="grow" placeholder="./models" value={commonStore.settings.customModelsPath}
onChange={(e, data) => {
commonStore.setSettings({
customModelsPath: data.value
});
}} />
} />
}
<Labeled label={t('Custom Python Path')} // if set, will not use precompiled cuda kernel
content={
<Input className="grow" placeholder="./py310/python" value={commonStore.settings.customPythonPath}
onChange={(e, data) => {
commonStore.setDepComplete(false);
commonStore.setSettings({
customPythonPath: data.value
});
}} />
} />
<Labeled label={'API URL'}
content={
<div className="flex gap-2">
<Input style={{ minWidth: 0 }} className="grow" value={commonStore.settings.apiUrl}
onChange={(e, data) => {
commonStore.setSettings({
apiUrl: data.value
});
}} />
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
value="..." selectedOptions={[]} expandIcon={null}
onOptionSelect={(_, data) => {
commonStore.setSettings({
apiUrl: data.optionValue
});
if (data.optionText === 'OpenAI') {
if (commonStore.settings.apiChatModelName === 'rwkv')
commonStore.setSettings({
apiChatModelName: 'gpt-3.5-turbo'
});
if (commonStore.settings.apiCompletionModelName === 'rwkv')
commonStore.setSettings({
apiCompletionModelName: 'text-davinci-003'
});
}
}}>
<Option value="">{t('Localhost')!}</Option>
<Option value="https://api.openai.com">OpenAI</Option>
</Dropdown>
</div>
} />
<Labeled label={'API Key'}
content={
<Input className="grow" placeholder="sk-" value={commonStore.settings.apiKey}
onChange={(e, data) => {
commonStore.setSettings({
apiKey: data.value
});
}} />
} />
<Labeled label={t('API Chat Model Name')}
content={
<div className="flex gap-2">
<Input style={{ minWidth: 0 }} className="grow" placeholder="rwkv"
value={commonStore.settings.apiChatModelName}
onChange={(e, data) => {
commonStore.setSettings({
apiChatModelName: data.value
});
}} />
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
value="..." selectedOptions={[]} expandIcon={null}
onOptionSelect={(_, data) => {
if (data.optionValue) {
commonStore.setSettings({
apiChatModelName: data.optionValue
});
}
}}>
{
['rwkv', 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-32k-0613', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613']
.map((v, i) =>
<Option key={i} value={v}>{v}</Option>
)
}
</Dropdown>
</div>
} />
<Labeled label={t('API Completion Model Name')}
content={
<div className="flex gap-2">
<Input style={{ minWidth: 0 }} className="grow" placeholder="rwkv"
value={commonStore.settings.apiCompletionModelName}
onChange={(e, data) => {
commonStore.setSettings({
apiCompletionModelName: data.value
});
}} />
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
value="..." selectedOptions={[]} expandIcon={null}
onOptionSelect={(_, data) => {
if (data.optionValue) {
commonStore.setSettings({
apiCompletionModelName: data.optionValue
});
}
}}>
{
['rwkv', 'text-davinci-003', 'text-davinci-002', 'text-curie-001', 'text-babbage-001', 'text-ada-001']
.map((v, i) =>
<Option key={i} value={v}>{v}</Option>
)
}
</Dropdown>
</div>
} />
commonStore.platform === 'web' ?
(
<div className="flex flex-col gap-2">
<GeneralSettings />
<AdvancedGeneralSettings />
</div>
</AccordionPanel>
</AccordionItem>
</Accordion>
)
:
(
<div className="flex flex-col gap-2">
<GeneralSettings />
<Labeled label={t('Automatic Updates Check')} flex spaceBetween content={
<Switch checked={commonStore.settings.autoUpdatesCheck}
onChange={(e, data) => {
commonStore.setSettings({
autoUpdatesCheck: data.checked
});
if (data.checked)
checkUpdate(true);
}} />
} />
{
commonStore.settings.language === 'zh' &&
<Labeled label={t('Use Gitee Updates Source')} flex spaceBetween content={
<Switch checked={commonStore.settings.giteeUpdatesSource}
onChange={(e, data) => {
commonStore.setSettings({
giteeUpdatesSource: data.checked
});
}} />
} />
}
{
commonStore.settings.language === 'zh' && commonStore.platform !== 'linux' &&
<Labeled label={t('Use Tsinghua Pip Mirrors')} flex spaceBetween content={
<Switch checked={commonStore.settings.cnMirror}
onChange={(e, data) => {
commonStore.setSettings({
cnMirror: data.checked
});
}} />
} />
}
<Labeled label={t('Allow external access to the API (service must be restarted)')} flex spaceBetween
content={
<Switch checked={commonStore.settings.host !== '127.0.0.1'}
onChange={(e, data) => {
commonStore.setSettings({
host: data.checked ? '0.0.0.0' : '127.0.0.1'
});
}} />
} />
<Accordion collapsible openItems={!commonStore.advancedCollapsed && 'advanced'} onToggle={(e, data) => {
if (data.value === 'advanced')
commonStore.setAdvancedCollapsed(!commonStore.advancedCollapsed);
}}>
<AccordionItem value="advanced">
<AccordionHeader ref={advancedHeaderRef} size="large">{t('Advanced')}</AccordionHeader>
<AccordionPanel>
<div className="flex flex-col gap-2 overflow-hidden">
{commonStore.platform !== 'darwin' &&
<Labeled label={t('Custom Models Path')}
content={
<Input className="grow" placeholder="./models"
value={commonStore.settings.customModelsPath}
onChange={(e, data) => {
commonStore.setSettings({
customModelsPath: data.value
});
}} />
} />
}
<Labeled label={t('Custom Python Path')} // if set, will not use precompiled cuda kernel
content={
<Input className="grow" placeholder="./py310/python"
value={commonStore.settings.customPythonPath}
onChange={(e, data) => {
commonStore.setDepComplete(false);
commonStore.setSettings({
customPythonPath: data.value
});
}} />
} />
<AdvancedGeneralSettings />
</div>
</AccordionPanel>
</AccordionItem>
</Accordion>
</div>
)
}
</div>
} />
);
});
export default Settings;

View File

@@ -1,4 +1,4 @@
import React, { FC, ReactElement, useEffect, useRef, useState } from 'react';
import React, { FC, useEffect, useRef, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { Button, Dropdown, Input, Option, Select, Switch, Tab, TabList } from '@fluentui/react-components';
import {
@@ -24,7 +24,6 @@ import { Labeled } from '../components/Labeled';
import { ToolTipButton } from '../components/ToolTipButton';
import { DataUsageSettings20Regular, Folder20Regular } from '@fluentui/react-icons';
import { useNavigate } from 'react-router';
import { Precision } from './Configs';
import {
CategoryScale,
Chart as ChartJS,
@@ -40,6 +39,12 @@ import { ChartJSOrUndefined } from 'react-chartjs-2/dist/types';
import { WindowShow } from '../../wailsjs/runtime';
import { t } from 'i18next';
import { DialogButton } from '../components/DialogButton';
import {
DataProcessParameters,
LoraFinetuneParameters,
LoraFinetunePrecision,
TrainNavigationItem
} from '../types/train';
ChartJS.register(
CategoryScale,
@@ -86,39 +91,6 @@ const addLossDataToChart = (epoch: number, loss: number) => {
commonStore.setChartData(commonStore.chartData);
};
export type DataProcessParameters = {
dataPath: string;
vocabPath: string;
}
export type LoraFinetunePrecision = 'bf16' | 'fp16' | 'tf32';
export type LoraFinetuneParameters = {
baseModel: string;
ctxLen: number;
epochSteps: number;
epochCount: number;
epochBegin: number;
epochSave: number;
microBsz: number;
accumGradBatches: number;
preFfn: boolean;
headQk: boolean;
lrInit: string;
lrFinal: string;
warmupSteps: number;
beta1: number;
beta2: number;
adamEps: string;
devices: number;
precision: LoraFinetunePrecision;
gradCp: boolean;
loraR: number;
loraAlpha: number;
loraDropout: number;
loraLoad: string
}
const loraFinetuneParametersOptions: Array<[key: keyof LoraFinetuneParameters, type: string, name: string]> = [
['devices', 'number', 'Devices'],
['precision', 'LoraFinetunePrecision', 'Precision'],
@@ -414,7 +386,7 @@ const LoraFinetune: FC = observer(() => {
contentText={t('The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\n' +
'When you provide a directory path, all the txt files within that directory will be automatically converted into training data. ' +
'This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\n' +
'The jsonl format file can be referenced at https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl.\n' +
'The jsonl format file can be referenced at https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl.\n' +
'You can also write it similar to OpenAI\'s playground format, as shown in https://platform.openai.com/playground/p/default-chat.\n' +
'Even for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. ' +
'If they are different dialogues or topics, they should be written in separate lines.')} />
@@ -568,10 +540,6 @@ const LoraFinetune: FC = observer(() => {
);
});
type TrainNavigationItem = {
element: ReactElement;
};
const pages: { [label: string]: TrainNavigationItem } = {
'LoRA Finetune': {
element: <LoraFinetune />
@@ -582,7 +550,7 @@ const pages: { [label: string]: TrainNavigationItem } = {
};
export const Train: FC = () => {
const Train: FC = () => {
const { t } = useTranslation();
const [tab, setTab] = useState('LoRA Finetune');
@@ -607,3 +575,5 @@ export const Train: FC = () => {
</div>
</div>;
};
export default Train;

View File

@@ -1,5 +1,5 @@
import { ModelConfig } from './Configs';
import { CompletionPreset } from './Completion';
import { CompletionPreset } from '../types/completion';
import { ModelConfig } from '../types/configs';
export const defaultCompositionPrompt = '<pad>';

View File

@@ -1,5 +1,4 @@
import { ReactElement } from 'react';
import { Configs } from './Configs';
import { FC, lazy, LazyExoticComponent, ReactElement } from 'react';
import {
ArrowDownload20Regular,
Chat20Regular,
@@ -12,21 +11,12 @@ import {
Settings20Regular,
Storage20Regular
} from '@fluentui/react-icons';
import { Home } from './Home';
import { Chat } from './Chat';
import { Models } from './Models';
import { Train } from './Train';
import { Settings } from './Settings';
import { About } from './About';
import { Downloads } from './Downloads';
import { Completion } from './Completion';
import { Composition } from './Composition';
type NavigationItem = {
label: string;
path: string;
icon: ReactElement;
element: ReactElement;
element: LazyExoticComponent<FC>;
top: boolean;
};
@@ -35,70 +25,70 @@ export const pages: NavigationItem[] = [
label: 'Home',
path: '/',
icon: <Home20Regular />,
element: <Home />,
element: lazy(() => import('./Home')),
top: true
},
{
label: 'Chat',
path: '/chat',
icon: <Chat20Regular />,
element: <Chat />,
element: lazy(() => import('./Chat')),
top: true
},
{
label: 'Completion',
path: '/completion',
icon: <ClipboardEdit20Regular />,
element: <Completion />,
element: lazy(() => import('./Completion')),
top: true
},
{
label: 'Composition',
path: '/composition',
icon: <MusicNote220Regular />,
element: <Composition />,
element: lazy(() => import('./Composition')),
top: true
},
{
label: 'Configs',
path: '/configs',
icon: <DocumentSettings20Regular />,
element: <Configs />,
element: lazy(() => import('./Configs')),
top: true
},
{
label: 'Models',
path: '/models',
icon: <DataUsageSettings20Regular />,
element: <Models />,
element: lazy(() => import('./Models')),
top: true
},
{
label: 'Downloads',
path: '/downloads',
icon: <ArrowDownload20Regular />,
element: <Downloads />,
element: lazy(() => import('./Downloads')),
top: true
},
{
label: 'Train',
path: '/train',
icon: <Storage20Regular />,
element: <Train />,
element: lazy(() => import('./Train')),
top: true
},
{
label: 'Settings',
path: '/settings',
icon: <Settings20Regular />,
element: <Settings />,
element: lazy(() => import('./Settings')),
top: false
},
{
label: 'About',
path: '/about',
icon: <Info20Regular />,
element: <About />,
element: lazy(() => import('./About')),
top: false
}
];

View File

@@ -5,39 +5,42 @@ import { getStatus } from './apis';
import { EventsOn, WindowSetTitle } from '../wailsjs/runtime';
import manifest from '../../manifest.json';
import { defaultModelConfigs, defaultModelConfigsMac } from './pages/defaultConfigs';
import { Preset } from './pages/PresetsManager/PresetsButton';
import { wslHandler } from './pages/Train';
import { t } from 'i18next';
import { Preset } from './types/presets';
export async function startup() {
downloadProgramFiles();
EventsOn('downloadList', (data) => {
if (data)
commonStore.setDownloadList(data);
});
EventsOn('wsl', wslHandler);
EventsOn('wslerr', (e) => {
console.log(e);
});
initLocalModelsNotify();
initLoraModels();
initPresets();
initHardwareMonitor();
await GetPlatform().then(p => commonStore.setPlatform(p as Platform));
if (commonStore.platform !== 'web') {
downloadProgramFiles();
EventsOn('downloadList', (data) => {
if (data)
commonStore.setDownloadList(data);
});
EventsOn('wsl', (await import('./pages/Train')).wslHandler);
EventsOn('wslerr', (e) => {
console.log(e);
});
initLocalModelsNotify();
initLoraModels();
initHardwareMonitor();
}
await initConfig();
initCache(true).then(initRemoteText); // depends on config customModelsPath
if (commonStore.platform !== 'web') {
initCache(true).then(initRemoteText); // depends on config customModelsPath
if (commonStore.settings.autoUpdatesCheck) // depends on config settings
checkUpdate();
if (commonStore.settings.autoUpdatesCheck) // depends on config settings
checkUpdate();
getStatus(1000).then(status => { // depends on config api port
if (status)
commonStore.setStatus(status);
});
getStatus(1000).then(status => { // depends on config api port
if (status)
commonStore.setStatus(status);
});
}
}
async function initRemoteText() {
@@ -88,7 +91,8 @@ async function initCache(initUnfinishedModels: boolean) {
async function initPresets() {
await ReadJson('presets.json').then((presets: Preset[]) => {
commonStore.setPresets(presets, false);
if (Array.isArray(presets))
commonStore.setPresets(presets, false);
}).catch(() => {
});
}

View File

@@ -2,21 +2,20 @@ import { makeAutoObservable } from 'mobx';
import { getUserLanguage, isSystemLightMode, saveCache, saveConfigs, savePresets } from '../utils';
import { WindowSetDarkTheme, WindowSetLightTheme } from '../../wailsjs/runtime';
import manifest from '../../../manifest.json';
import { ModelConfig } from '../pages/Configs';
import { Conversation } from '../pages/Chat';
import { ModelSourceItem } from '../pages/Models';
import { DownloadStatus } from '../pages/Downloads';
import { SettingsType } from '../pages/Settings';
import { IntroductionContent } from '../pages/Home';
import { AboutContent } from '../pages/About';
import i18n from 'i18next';
import { CompletionPreset } from '../pages/Completion';
import { defaultCompositionPrompt, defaultModelConfigs, defaultModelConfigsMac } from '../pages/defaultConfigs';
import commonStore from './commonStore';
import { Preset } from '../pages/PresetsManager/PresetsButton';
import { DataProcessParameters, LoraFinetuneParameters } from '../pages/Train';
import { ChartData } from 'chart.js';
import { CompositionParams } from '../pages/Composition';
import { Preset } from '../types/presets';
import { AboutContent } from '../types/about';
import { Conversation } from '../types/chat';
import { CompletionPreset } from '../types/completion';
import { CompositionParams } from '../types/composition';
import { ModelConfig } from '../types/configs';
import { DownloadStatus } from '../types/downloads';
import { IntroductionContent } from '../types/home';
import { ModelSourceItem } from '../types/models';
import { SettingsType } from '../types/settings';
import { DataProcessParameters, LoraFinetuneParameters } from '../types/train';
export enum ModelStatus {
Offline,
@@ -31,9 +30,13 @@ export type Status = {
device_name: string;
}
export type Platform = 'windows' | 'darwin' | 'linux';
export type Attachment = {
name: string;
size: number;
content: string;
}
const labels = ['January', 'February', 'March', 'April', 'May', 'June', 'July'];
export type Platform = 'windows' | 'darwin' | 'linux' | 'web';
class CommonStore {
// global
@@ -54,6 +57,9 @@ class CommonStore {
conversation: Conversation = {};
conversationOrder: string[] = [];
activePreset: Preset | null = null;
attachmentUploading: boolean = false;
attachments: { [uuid: string]: Attachment[] } = {};
currentTempAttachment: Attachment | null = null;
// completion
completionPreset: CompletionPreset | null = null;
completionGenerating: boolean = false;
@@ -96,7 +102,7 @@ class CommonStore {
epochSteps: 200,
epochCount: 20,
epochBegin: 0,
epochSave: 2,
epochSave: 1,
microBsz: 1,
accumGradBatches: 8,
preFfn: false,
@@ -128,7 +134,7 @@ class CommonStore {
customModelsPath: './models',
customPythonPath: '',
apiUrl: '',
apiKey: 'sk-',
apiKey: '',
apiChatModelName: 'rwkv',
apiCompletionModelName: 'rwkv'
};
@@ -168,7 +174,7 @@ class CommonStore {
createModelConfig = (config: ModelConfig = defaultModelConfigs[0], saveConfig: boolean = true) => {
if (config.name === defaultModelConfigs[0].name) {
// deep copy
config = JSON.parse(JSON.stringify(commonStore.platform !== 'darwin' ? defaultModelConfigs[0] : defaultModelConfigsMac[0]));
config = JSON.parse(JSON.stringify(this.platform !== 'darwin' ? defaultModelConfigs[0] : defaultModelConfigsMac[0]));
config.name = new Date().toLocaleString();
}
this.modelConfigs.push(config);
@@ -325,6 +331,25 @@ class CommonStore {
setLoraModels(value: string[]) {
this.loraModels = value;
}
setAttachmentUploading(value: boolean) {
this.attachmentUploading = value;
}
setAttachments(value: { [uuid: string]: Attachment[] }) {
this.attachments = value;
}
setAttachment(uuid: string, value: Attachment[] | null) {
if (value === null)
delete this.attachments[uuid];
else
this.attachments[uuid] = value;
}
setCurrentTempAttachment(value: Attachment | null) {
this.currentTempAttachment = value;
}
}
export default new CommonStore();

View File

@@ -1,12 +1,10 @@
[data-theme='dark'] {
@import 'highlight.js/scss/github-dark.scss';
@import 'github-markdown-css/github-markdown-dark.css';
--color-neutral-muted: rgba(110, 118, 129, 0.4);
}
[data-theme='light'] {
@import 'highlight.js/scss/github.scss';
@import 'github-markdown-css/github-markdown-light.css';
--color-neutral-muted: rgba(150, 160, 170, 0.3);
}

View File

@@ -0,0 +1 @@
export type AboutContent = { [lang: string]: string }

View File

@@ -0,0 +1,29 @@
export const userName = 'M E';
export const botName = 'A I';
export const welcomeUuid = 'welcome';
export enum MessageType {
Normal,
Error
}
export type Side = 'left' | 'right'
export type Color = 'neutral' | 'brand' | 'colorful'
export type MessageItem = {
sender: string,
type: MessageType,
color: Color,
avatarImg?: string,
time: string,
content: string,
side: Side,
done: boolean
}
export type Conversation = {
[uuid: string]: MessageItem
}
export type Role = 'assistant' | 'user' | 'system';
export type ConversationMessage = {
role: Role;
content: string;
}

View File

@@ -0,0 +1,12 @@
import { ApiParameters } from './configs';
export type CompletionParams = Omit<ApiParameters, 'apiPort'> & {
stop: string,
injectStart: string,
injectEnd: string
};
export type CompletionPreset = {
name: string,
prompt: string,
params: CompletionParams
}

View File

@@ -0,0 +1,12 @@
import { NoteSequence } from '@magenta/music/esm/protobuf';
export type CompositionParams = {
prompt: string,
maxResponseToken: number,
temperature: number,
topP: number,
autoPlay: boolean,
useLocalSoundFont: boolean,
midi: ArrayBuffer | null,
ns: NoteSequence | null
}

View File

@@ -0,0 +1,29 @@
export type ApiParameters = {
apiPort: number
maxResponseToken: number;
temperature: number;
topP: number;
presencePenalty: number;
frequencyPenalty: number;
}
export type Device = 'CPU' | 'CUDA' | 'CUDA-Beta' | 'WebGPU' | 'MPS' | 'Custom';
export type Precision = 'fp16' | 'int8' | 'fp32';
export type ModelParameters = {
// different models can not have the same name
modelName: string;
device: Device;
precision: Precision;
storedLayers: number;
maxStoredLayers: number;
useCustomCuda?: boolean;
customStrategy?: string;
useCustomTokenizer?: boolean;
customTokenizer?: string;
}
export type ModelConfig = {
// different configs can have the same name
name: string;
apiParameters: ApiParameters;
modelParameters: ModelParameters;
enableWebUI?: boolean;
}

View File

@@ -0,0 +1,11 @@
export type DownloadStatus = {
name: string;
path: string;
url: string;
transferred: number;
size: number;
speed: number;
progress: number;
downloading: boolean;
done: boolean;
}

View File

@@ -0,0 +1,11 @@
import { ReactElement } from 'react';
export type IntroductionContent = {
[lang: string]: string
}
export type NavCard = {
label: string;
desc: string;
path: string;
icon: ReactElement;
};

View File

@@ -0,0 +1,14 @@
export type ModelSourceItem = {
name: string;
size: number;
lastUpdated: string;
desc?: { [lang: string]: string | undefined; };
SHA256?: string;
url?: string;
downloadUrl?: string;
isComplete?: boolean;
isLocal?: boolean;
localSize?: number;
lastUpdatedMs?: number;
hide?: boolean;
};

View File

@@ -0,0 +1,30 @@
import { ReactElement } from 'react';
import { ConversationMessage } from './chat';
export type PresetType = 'chat' | 'completion' | 'chatInCompletion'
export type Preset = {
name: string,
tag: string,
// if name and sourceUrl are same, it will be overridden when importing
sourceUrl: string,
desc: string,
avatarImg: string,
type: PresetType,
// chat
welcomeMessage: string,
messages: ConversationMessage[],
displayPresetMessages: boolean,
// completion
prompt: string,
stop: string,
injectStart: string,
injectEnd: string,
presystem?: boolean,
userName?: string,
assistantName?: string
}
export type PresetsNavigationItem = {
icon: ReactElement;
element: ReactElement;
};

View File

@@ -0,0 +1,21 @@
export const Languages = {
dev: 'English', // i18n default
zh: '简体中文',
ja: '日本語'
};
export type Language = keyof typeof Languages;
export type SettingsType = {
language: Language
darkMode: boolean
autoUpdatesCheck: boolean
giteeUpdatesSource: boolean
cnMirror: boolean
host: string
dpiScaling: number
customModelsPath: string
customPythonPath: string
apiUrl: string
apiKey: string
apiChatModelName: string
apiCompletionModelName: string
}

View File

@@ -0,0 +1,35 @@
import { ReactElement } from 'react';
export type DataProcessParameters = {
dataPath: string;
vocabPath: string;
}
export type LoraFinetunePrecision = 'bf16' | 'fp16' | 'tf32';
export type LoraFinetuneParameters = {
baseModel: string;
ctxLen: number;
epochSteps: number;
epochCount: number;
epochBegin: number;
epochSave: number;
microBsz: number;
accumGradBatches: number;
preFfn: boolean;
headQk: boolean;
lrInit: string;
lrFinal: string;
warmupSteps: number;
beta1: number;
beta2: number;
adamEps: string;
devices: number;
precision: LoraFinetunePrecision;
gradCp: boolean;
loraR: number;
loraAlpha: number;
loraDropout: number;
loraLoad: string
}
export type TrainNavigationItem = {
element: ReactElement;
};

View File

@@ -15,13 +15,13 @@ import { toast } from 'react-toastify';
import { t } from 'i18next';
import { ToastOptions } from 'react-toastify/dist/types';
import { Button } from '@fluentui/react-components';
import { Language, Languages, SettingsType } from '../pages/Settings';
import { ModelSourceItem } from '../pages/Models';
import { ModelConfig, ModelParameters } from '../pages/Configs';
import { DownloadStatus } from '../pages/Downloads';
import { DataProcessParameters, LoraFinetuneParameters } from '../pages/Train';
import { BrowserOpenURL, WindowShow } from '../../wailsjs/runtime';
import { NavigateFunction } from 'react-router';
import { ModelConfig, ModelParameters } from '../types/configs';
import { DownloadStatus } from '../types/downloads';
import { ModelSourceItem } from '../types/models';
import { Language, Languages, SettingsType } from '../types/settings';
import { DataProcessParameters, LoraFinetuneParameters } from '../types/train';
export type Cache = {
version: string
@@ -282,6 +282,32 @@ export function bytesToKb(size: number) {
return (size / 1024).toFixed(2);
}
export function bytesToReadable(size: number) {
if (size < 1024) return size + ' B';
else if (size < 1024 * 1024) return bytesToKb(size) + ' KB';
else if (size < 1024 * 1024 * 1024) return bytesToMb(size) + ' MB';
else return bytesToGb(size) + ' GB';
}
export function getServerRoot(defaultLocalPort: number) {
const customApiUrl = commonStore.settings.apiUrl.trim().replace(/\/$/, '');
if (customApiUrl)
return customApiUrl;
if (commonStore.platform === 'web')
return '';
return `http://127.0.0.1:${defaultLocalPort}`;
}
export function absPathAsset(path: string) {
if (commonStore.platform === 'web')
return path;
if ((path.length > 0 && path[0] === '/') ||
(path.length > 1 && path[1] === ':')) {
return '=>' + path;
}
return path;
}
export async function checkUpdate(notifyEvenLatest: boolean = false) {
fetch(!commonStore.settings.giteeUpdatesSource ?
'https://api.github.com/repos/josstorer/RWKV-Runner/releases/latest' :

157
frontend/src/webWails.js Normal file
View File

@@ -0,0 +1,157 @@
function defineRuntime(name, func) {
window.runtime[name] = func
}
function defineApp(name, func) {
window.go['backend_golang']['App'][name] = func
}
if (!window.runtime) {
window.runtime = {}
document.title += ' WebUI'
// not implemented
defineRuntime('EventsOnMultiple', () => {
})
defineRuntime('WindowSetLightTheme', () => {
})
defineRuntime('WindowSetDarkTheme', () => {
})
defineRuntime('WindowShow', () => {
})
defineRuntime('WindowHide', () => {
})
// implemented
defineRuntime('ClipboardGetText', async () => {
return await navigator.clipboard.readText()
})
defineRuntime('ClipboardSetText', async (text) => {
await navigator.clipboard.writeText(text)
return true
})
defineRuntime('WindowSetTitle', (title) => {
document.title = title
})
defineRuntime('BrowserOpenURL', (url) => {
window.open(url, '_blank', 'noopener, noreferrer')
})
}
if (!window.go) {
window.go = {}
window.go['backend_golang'] = {}
window.go['backend_golang']['App'] = {}
// not implemented
defineApp('AddToDownloadList', async () => {
})
defineApp('ContinueDownload', async () => {
})
defineApp('ConvertData', async () => {
})
defineApp('ConvertModel', async () => {
})
defineApp('ConvertSafetensors', async () => {
})
defineApp('CopyFile', async () => {
})
defineApp('DeleteFile', async () => {
})
defineApp('DepCheck', async () => {
})
defineApp('DownloadFile', async () => {
})
defineApp('GetPyError', async () => {
})
defineApp('InstallPyDep', async () => {
})
defineApp('IsPortAvailable', async () => {
})
defineApp('MergeLora', async () => {
})
defineApp('OpenFileFolder', async () => {
})
defineApp('PauseDownload', async () => {
})
defineApp('ReadFileInfo', async () => {
})
defineApp('RestartApp', async () => {
})
defineApp('StartServer', async () => {
})
defineApp('StartWebGPUServer', async () => {
})
defineApp('UpdateApp', async () => {
})
defineApp('WslCommand', async () => {
})
defineApp('WslEnable', async () => {
})
defineApp('WslInstallUbuntu', async () => {
})
defineApp('WslIsEnabled', async () => {
})
defineApp('WslStart', async () => {
})
defineApp('WslStop', async () => {
})
// implemented
defineApp('FileExists', async () => {
return false
})
defineApp('GetPlatform', async () => {
return 'web'
})
defineApp('ListDirFiles', async () => {
return []
})
defineApp('OpenOpenFileDialog', async (filterPattern) => {
return new Promise((resolve, reject) => {
const input = document.createElement('input')
input.type = 'file'
input.accept = filterPattern
.replaceAll('*.txt', 'text/plain')
.replaceAll('*.', 'application/')
.replaceAll(';', ',')
input.onchange = e => {
const file = e.target?.files[0]
if (file.type === 'text/plain') {
const reader = new FileReader()
reader.readAsText(file, 'UTF-8')
reader.onload = readerEvent => {
const content = readerEvent.target?.result
resolve({
blob: file,
content: content
})
}
} else {
resolve({
blob: file
})
}
}
input.click()
})
})
defineApp('OpenSaveFileDialog', async (filterPattern, defaultFileName, savedContent) => {
const saver = await import('file-saver')
saver.saveAs(new Blob([savedContent], { type: 'text/plain;charset=utf-8' }), defaultFileName)
return ''
})
defineApp('OpenSaveFileDialogBytes', async (filterPattern, defaultFileName, savedContent) => {
const saver = await import('file-saver')
saver.saveAs(new Blob([new Uint8Array(savedContent)], { type: 'octet/stream' }), defaultFileName)
return ''
})
defineApp('ReadJson', async (fileName) => {
return JSON.parse(localStorage.getItem(fileName))
})
defineApp('SaveJson', async (fileName, data) => {
localStorage.setItem(fileName, JSON.stringify(data))
})
}

View File

@@ -1,6 +1,37 @@
import {defineConfig} from 'vite';
// @ts-ignore
import { dependencies } from './package.json';
import { defineConfig } from 'vite';
import react from '@vitejs/plugin-react';
import {visualizer} from 'rollup-plugin-visualizer';
import { visualizer } from 'rollup-plugin-visualizer';
// dependencies that exist anywhere
const vendor = [
'react', 'react-dom', 'react-router', 'react-router-dom',
'@fluentui/react-icons',
'mobx', 'mobx-react-lite',
'i18next', 'react-i18next',
'usehooks-ts', 'react-toastify',
'classnames'
];
const embedded = [
// split @fluentui/react-components by components
'@fluentui/react-components',
// dependencies that exist in single component
'react-beautiful-dnd',
'@magenta/music', 'html-midi-player',
'react-markdown', 'rehype-highlight', 'rehype-raw', 'remark-breaks', 'remark-gfm'
];
function renderChunks(deps: Record<string, string>) {
let chunks = {};
Object.keys(deps).forEach((key) => {
if ([...vendor, ...embedded].includes(key)) return;
chunks[key] = [key];
});
return chunks;
}
// https://vitejs.dev/config/
export default defineConfig({
@@ -9,5 +40,16 @@ export default defineConfig({
template: 'treemap',
gzipSize: true,
brotliSize: true
})]
})],
build: {
chunkSizeWarningLimit: 3000,
rollupOptions: {
output: {
manualChunks: {
vendor,
...renderChunks(dependencies)
}
}
}
}
});

View File

@@ -28,12 +28,16 @@ export function GetPyError():Promise<string>;
export function InstallPyDep(arg1:string,arg2:boolean):Promise<string>;
export function IsPortAvailable(arg1:number):Promise<boolean>;
export function ListDirFiles(arg1:string):Promise<Array<backend_golang.FileInfo>>;
export function MergeLora(arg1:string,arg2:boolean,arg3:number,arg4:string,arg5:string,arg6:string):Promise<string>;
export function OpenFileFolder(arg1:string,arg2:boolean):Promise<void>;
export function OpenOpenFileDialog(arg1:string):Promise<string>;
export function OpenSaveFileDialog(arg1:string,arg2:string,arg3:string):Promise<string>;
export function OpenSaveFileDialogBytes(arg1:string,arg2:string,arg3:Array<number>):Promise<string>;
@@ -48,7 +52,7 @@ export function RestartApp():Promise<void>;
export function SaveJson(arg1:string,arg2:any):Promise<void>;
export function StartServer(arg1:string,arg2:number,arg3:string,arg4:boolean):Promise<string>;
export function StartServer(arg1:string,arg2:number,arg3:string,arg4:boolean,arg5:boolean):Promise<string>;
export function StartWebGPUServer(arg1:number,arg2:string):Promise<string>;

View File

@@ -54,6 +54,10 @@ export function InstallPyDep(arg1, arg2) {
return window['go']['backend_golang']['App']['InstallPyDep'](arg1, arg2);
}
export function IsPortAvailable(arg1) {
return window['go']['backend_golang']['App']['IsPortAvailable'](arg1);
}
export function ListDirFiles(arg1) {
return window['go']['backend_golang']['App']['ListDirFiles'](arg1);
}
@@ -66,6 +70,10 @@ export function OpenFileFolder(arg1, arg2) {
return window['go']['backend_golang']['App']['OpenFileFolder'](arg1, arg2);
}
export function OpenOpenFileDialog(arg1) {
return window['go']['backend_golang']['App']['OpenOpenFileDialog'](arg1);
}
export function OpenSaveFileDialog(arg1, arg2, arg3) {
return window['go']['backend_golang']['App']['OpenSaveFileDialog'](arg1, arg2, arg3);
}
@@ -94,8 +102,8 @@ export function SaveJson(arg1, arg2) {
return window['go']['backend_golang']['App']['SaveJson'](arg1, arg2);
}
export function StartServer(arg1, arg2, arg3, arg4) {
return window['go']['backend_golang']['App']['StartServer'](arg1, arg2, arg3, arg4);
export function StartServer(arg1, arg2, arg3, arg4, arg5) {
return window['go']['backend_golang']['App']['StartServer'](arg1, arg2, arg3, arg4, arg5);
}
export function StartWebGPUServer(arg1, arg2) {

15
main.go
View File

@@ -27,6 +27,7 @@ func NewFileLoader() *FileLoader {
func (h *FileLoader) ServeHTTP(res http.ResponseWriter, req *http.Request) {
var err error
requestedFilename := strings.TrimPrefix(req.URL.Path, "/")
requestedFilename = strings.TrimPrefix(requestedFilename, "=>") // absolute path
println("Requesting file:", requestedFilename)
fileData, err := os.ReadFile(requestedFilename)
if err != nil {
@@ -43,7 +44,7 @@ var assets embed.FS
//go:embed all:py310/Lib/site-packages/cyac
var cyac embed.FS
//go:embed all:py310/Lib/site-packages/cyac-1.7.dist-info
//go:embed all:py310/Lib/site-packages/cyac-1.9.dist-info
var cyacInfo embed.FS
//go:embed backend-python
@@ -66,6 +67,8 @@ var components embed.FS
func main() {
if buildInfo, ok := debug.ReadBuildInfo(); !ok || strings.Contains(buildInfo.String(), "-ldflags") {
backend.CopyEmbed(assets)
os.RemoveAll("./py310/Lib/site-packages/cyac-1.7.dist-info")
backend.CopyEmbed(cyac)
backend.CopyEmbed(cyacInfo)
backend.CopyEmbed(py)
@@ -93,11 +96,11 @@ func main() {
// Create application with options
err = wails.Run(&options.App{
Title: "RWKV-Runner",
Width: 1024,
Height: 680,
MinWidth: 375,
MinHeight: 640,
Title: "RWKV-Runner",
Width: 1024,
Height: 680,
MinWidth: 375,
MinHeight: 640,
EnableDefaultContextMenu: true,
Windows: &windows.Options{
ZoomFactor: zoomFactor,

View File

@@ -1,5 +1,5 @@
{
"version": "1.4.7",
"version": "1.5.0",
"introduction": {
"en": "RWKV is an open-source, commercially usable large language model with high flexibility and great potential for development.\n### About This Tool\nThis tool aims to lower the barrier of entry for using large language models, making it accessible to everyone. It provides fully automated dependency and model management. You simply need to click and run, following the instructions, to deploy a local large language model. The tool itself is very compact and only requires a single executable file for one-click deployment.\nAdditionally, this tool offers an interface that is fully compatible with the OpenAI API. This means you can use any ChatGPT client as a client for RWKV, enabling capability expansion beyond just chat functionality.\n### Preset Configuration Rules at the Bottom\nThis tool comes with a series of preset configurations to reduce complexity. The naming rules for each configuration represent the following in order: device - required VRAM/memory - model size - model language.\nFor example, \"GPU-8G-3B-EN\" indicates that this configuration is for a graphics card with 8GB of VRAM, a model size of 3 billion parameters, and it uses an English language model.\nLarger model sizes have higher performance and VRAM requirements. Among configurations with the same model size, those with higher VRAM usage will have faster runtime.\nFor example, if you have 12GB of VRAM but running the \"GPU-12G-7B-EN\" configuration is slow, you can downgrade to \"GPU-8G-3B-EN\" for a significant speed improvement.\n### About RWKV\nRWKV is an RNN with Transformer-level LLM performance, which can also be directly trained like a GPT transformer (parallelizable). And it's 100% attention-free. You only need the hidden state at position t to compute the state at position t+1. You can use the \"GPT\" mode to quickly compute the hidden state for the \"RNN\" mode.<br/>So it's combining the best of RNN and transformer - great performance, fast inference, saves VRAM, fast training, \"infinite\" ctx_len, and free sentence embedding (using the final hidden state).",
"zh": "RWKV是一个开源且允许商用的大语言模型灵活性很高且极具发展潜力。\n### 关于本工具\n本工具旨在降低大语言模型的使用门槛做到人人可用本工具提供了全自动化的依赖和模型管理你只需要直接点击运行跟随引导即可完成本地大语言模型的部署工具本身体积极小只需要一个exe即可完成一键部署。\n此外本工具提供了与OpenAI API完全兼容的接口这意味着你可以把任意ChatGPT客户端用作RWKV的客户端实现能力拓展而不局限于聊天。\n### 底部的预设配置规则\n本工具内置了一系列预设配置以降低使用难度每个配置名的规则依次代表着设备-所需显存/内存-模型规模-模型语言。\n例如GPU-8G-3B-CN表示该配置用于显卡需要8G显存模型规模为30亿参数使用的是中文模型。\n模型规模越大性能要求越高显存要求也越高而同样模型规模的配置中显存占用越高的运行速度越快。\n例如当你有12G显存但运行GPU-12G-7B-CN配置速度比较慢可降级成GPU-8G-3B-CN将会大幅提速。\n### 关于RWKV\nRWKV是具有Transformer级别LLM性能的RNN也可以像GPT Transformer一样直接进行训练可并行化。而且它是100% attention-free的。你只需在位置t处获得隐藏状态即可计算位置t + 1处的状态。你可以使用“GPT”模式快速计算用于“RNN”模式的隐藏状态。\n因此它将RNN和Transformer的优点结合起来 - 高性能、快速推理、节省显存、快速训练、“无限”上下文长度以及免费的语句嵌入(使用最终隐藏状态)。"
@@ -15,6 +15,19 @@
}
],
"models": [
{
"name": "RWKV-5-World-1B5-v2-20231025-ctx4096.pth",
"desc": {
"en": "RWKV-5 Global Languages 1.5B v2",
"zh": "RWKV-5 全球语言 1.5B v2",
"ja": "RWKV-5 グローバル言語 1.5B v2"
},
"size": 3155590194,
"SHA256": "5a89f56be7f82ab9dd0835af9a6838f788477471616c02f7b041e3aea0c57435",
"lastUpdated": "2023-10-26T05:49:30",
"url": "https://huggingface.co/BlinkDL/rwkv-5-world/blob/main/RWKV-5-World-1B5-v2-20231025-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-5-world/resolve/main/RWKV-5-World-1B5-v2-20231025-ctx4096.pth"
},
{
"name": "RWKV-4-World-CHNtuned-0.1B-v1-20230617-ctx4096.pth",
"desc": {
@@ -507,8 +520,8 @@
{
"name": "RWKV-4-Raven-1B5-v11-Eng99%-Other1%-20230425-ctx4096.pth",
"desc": {
"en": "English 1.5B v11",
"zh": "英文 1.5B v11"
"en": "English 1.5B v11 (Old Model)",
"zh": "英文 1.5B v11 (旧模型)"
},
"size": 3030279730,
"SHA256": "4ac715aecc5b1c90e8e37eebb8163392699066ec23b18144416e91cb4e78675a",
@@ -520,8 +533,8 @@
{
"name": "RWKV-4-Raven-1B5-v12-Eng98%-Other2%-20230520-ctx4096.pth",
"desc": {
"en": "English 1B5 v12",
"zh": "英文 1B5 v12"
"en": "English 1B5 v12 (Old Model)",
"zh": "英文 1B5 v12 (旧模型)"
},
"size": 3030279730,
"SHA256": "6bbbffb3ee2372dfa9ef49c599e9a2bc0a01b94b6a264ba9bf5bd524fc38f723",
@@ -532,8 +545,8 @@
{
"name": "RWKV-4-Raven-3B-v11-Eng99%-Other1%-20230425-ctx4096.pth",
"desc": {
"en": "English 3B v11",
"zh": "英文 3B v11"
"en": "English 3B v11 (Old Model)",
"zh": "英文 3B v11 (旧模型)"
},
"size": 5969345074,
"SHA256": "982ad3d794efe58992db23c6d694c57a9e62d54718264ec6d6acfae5eb0eea12",
@@ -545,8 +558,8 @@
{
"name": "RWKV-4-Raven-3B-v12-Eng98%-Other2%-20230520-ctx4096.pth",
"desc": {
"en": "English 3B v12",
"zh": "英文 3B v12"
"en": "English 3B v12 (Old Model)",
"zh": "英文 3B v12 (旧模型)"
},
"size": 5969345074,
"SHA256": "1eea1845acfe9729dfdaec66a8d1aeb91a1287d94bebbca5529c13c050540b33",
@@ -557,8 +570,8 @@
{
"name": "RWKV-4-Raven-3B-v11-Eng49%-Chn49%-Jpn1%-Other1%-20230429-ctx4096.pth",
"desc": {
"en": "Chinese 3B v11",
"zh": "中文 3B v11"
"en": "Chinese 3B v11 (Old Model)",
"zh": "中文 3B v11 (旧模型)"
},
"size": 5969345074,
"SHA256": "af12300d9875e0e166c23d6e9b20928db435073060bf1d36f874060de92ada98",
@@ -570,8 +583,8 @@
{
"name": "RWKV-4-Raven-3B-v12-Eng49%-Chn49%-Jpn1%-Other1%-20230527-ctx4096.pth",
"desc": {
"en": "Chinese 3B v12",
"zh": "中文 3B v12"
"en": "Chinese 3B v12 (Old Model)",
"zh": "中文 3B v12 (旧模型)"
},
"size": 5969345330,
"SHA256": "c0abb4b745ba3523b9d8b3e1293110867ee55b1ef3dc8c122212f78396755721",
@@ -582,8 +595,8 @@
{
"name": "RWKV-4-Raven-7B-v11x-Eng99%-Other1%-20230429-ctx8192.pth",
"desc": {
"en": "English 7B v11x",
"zh": "英文 7B v11x"
"en": "English 7B v11x (Old Model)",
"zh": "英文 7B v11x (旧模型)"
},
"size": 14785389874,
"SHA256": "f00d5c75b453f2b20ad875fb5a324564c34024eea25a015f5eb441e4f364c3fe",
@@ -595,8 +608,8 @@
{
"name": "RWKV-4-Raven-7B-v12-Eng98%-Other2%-20230521-ctx8192.pth",
"desc": {
"en": "English 7B v12",
"zh": "英文 7B v12"
"en": "English 7B v12 (Old Model)",
"zh": "英文 7B v12 (旧模型)"
},
"size": 14785389618,
"SHA256": "5a725eaeb9e09b724de6c97e6845dd0283097c7920acd05b46852ab7afa9ec32",
@@ -607,8 +620,8 @@
{
"name": "RWKV-4-Raven-7B-v10x-Eng49%-Chn50%-Other1%-20230423-ctx4096.pth",
"desc": {
"en": "Chinese 7B v10x",
"zh": "中文 7B v10x"
"en": "Chinese 7B v10x (Old Model)",
"zh": "中文 7B v10x (旧模型)"
},
"size": 14785389874,
"SHA256": "7aaf40bb3d440a949db3a146b0a5bbb3e925942b496775b51f5630a582fc236d",
@@ -620,8 +633,8 @@
{
"name": "RWKV-4-Raven-7B-v11-Eng49%-Chn49%-Jpn1%-Other1%-20230430-ctx8192.pth",
"desc": {
"en": "Chinese 7B v11",
"zh": "中文 7B v11"
"en": "Chinese 7B v11 (Old Model)",
"zh": "中文 7B v11 (旧模型)"
},
"size": 14785389874,
"SHA256": "9e67a74964abcb4463711e447ddf47735561d7b40592d2d02b29d2e796a4fd14",
@@ -633,8 +646,8 @@
{
"name": "RWKV-4-Raven-7B-v12-Eng49%-Chn49%-Jpn1%-Other1%-20230530-ctx8192.pth",
"desc": {
"en": "Chinese 7B v12",
"zh": "中文 7B v12"
"en": "Chinese 7B v12 (Old Model)",
"zh": "中文 7B v12 (旧模型)"
},
"size": 14785389874,
"SHA256": "6d4a089ff36d5d9d96b669d425fc5e4e3959cab426535b52e2364df08f58b407",
@@ -645,8 +658,8 @@
{
"name": "RWKV-4-Raven-14B-v11x-Eng99%-Other1%-20230501-ctx8192.pth",
"desc": {
"en": "English 14B v11x",
"zh": "英文 14B v11x"
"en": "English 14B v11x (Old Model)",
"zh": "英文 14B v11x (旧模型)"
},
"size": 28297309490,
"SHA256": "c4bc72406c3c62613e8e2592e8d07ac045f8a88381c728f8eb60af890e299f4d",
@@ -658,8 +671,8 @@
{
"name": "RWKV-4-Raven-14B-v12-Eng98%-Other2%-20230523-ctx8192.pth",
"desc": {
"en": "English 14B v12",
"zh": "英文 14B v12"
"en": "English 14B v12 (Old Model)",
"zh": "英文 14B v12 (旧模型)"
},
"size": 28297309490,
"SHA256": "1193b5a9ceab572e4dbb9ed1d798eab7bf4793d18904d08bd4bf183579338ae7",
@@ -692,6 +705,32 @@
"lastUpdated": "2023-07-17T15:02:08",
"url": "https://huggingface.co/BlinkDL/rwkv-4-music/blob/main/RWKV-4-MIDI-560M-v1-20230717-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-music/resolve/main/RWKV-4-MIDI-560M-v1-20230717-ctx4096.pth"
},
{
"name": "RWKV-5-MIDI-120M-v1-20230728-ctx4096.pth",
"desc": {
"en": "RWKV-5 Music 120M v1",
"zh": "RWKV-5 作曲 120M v1",
"ja": "RWKV-5 作曲 120M v1"
},
"size": 245070513,
"SHA256": "c43d4a2ee7a71a331d05d6cd818dd75f7c48c716e4b98c58e4d27231614b0144",
"lastUpdated": "2023-07-29T02:17:27",
"url": "https://huggingface.co/BlinkDL/rwkv-5-music/blob/main/RWKV-5-MIDI-120M-v1-20230728-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-5-music/resolve/main/RWKV-5-MIDI-120M-v1-20230728-ctx4096.pth"
},
{
"name": "RWKV-5-MIDI-560M-v1-20230902-ctx4096.pth",
"desc": {
"en": "RWKV-5 Music 560M v1",
"zh": "RWKV-5 作曲 560M v1",
"ja": "RWKV-5 作曲 560M v1"
},
"size": 1179631346,
"SHA256": "cb4f2fd8956ca8496d6b2e33bff290c2047759b6fe74884903dbf9c73a11cc77",
"lastUpdated": "2023-09-03T04:48:41",
"url": "https://huggingface.co/BlinkDL/rwkv-5-music/blob/main/RWKV-5-MIDI-560M-v1-20230902-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-5-music/resolve/main/RWKV-5-MIDI-560M-v1-20230902-ctx4096.pth"
}
]
}