Compare commits
56 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b8712e0b89 | ||
|
|
37dda4333d | ||
|
|
64826b9af7 | ||
|
|
47b0c35441 | ||
|
|
1dcda47013 | ||
|
|
1f81a1e5a8 | ||
|
|
35e92d2aef | ||
|
|
0d99e5549e | ||
|
|
fed1594ddc | ||
|
|
14b90bb36b | ||
|
|
f86b7f1f08 | ||
|
|
54355d5a7a | ||
|
|
ff7306349a | ||
|
|
77df56cddc | ||
|
|
97ae139de5 | ||
|
|
afd15ef2c5 | ||
|
|
6c73eae9f6 | ||
|
|
7078f47f72 | ||
|
|
d43954cc88 | ||
|
|
c87de93498 | ||
|
|
810843a5ab | ||
|
|
f7cbd2c803 | ||
|
|
faf1852012 | ||
|
|
43cfab5d4b | ||
|
|
627a20936d | ||
|
|
1d7f19ffaf | ||
|
|
d80565d780 | ||
|
|
d7ba88953d | ||
|
|
30e1c3171e | ||
|
|
1f058b16ac | ||
|
|
4a192f4057 | ||
|
|
0331bf47f7 | ||
|
|
2acdaa96b2 | ||
|
|
1d200d53ab | ||
|
|
df9e1f408e | ||
|
|
4a18696686 | ||
|
|
46b3b285f5 | ||
|
|
1d6aeab9dc | ||
|
|
ab110ba30b | ||
|
|
2f0fa4ee56 | ||
|
|
0005816c1d | ||
|
|
f70672e5a0 | ||
|
|
ee057071a5 | ||
|
|
4f26404002 | ||
|
|
df7652856a | ||
|
|
de755463e3 | ||
|
|
2fe98d9a2c | ||
|
|
2e42039607 | ||
|
|
71abd357a4 | ||
|
|
68228a4552 | ||
|
|
79851433f8 | ||
|
|
bd4de12e05 | ||
|
|
c0aa6aaba9 | ||
|
|
d7abe5f0d1 | ||
|
|
5e5e1e9651 | ||
|
|
f8388a0527 |
18
.github/workflows/release.yml
vendored
18
.github/workflows/release.yml
vendored
@@ -63,10 +63,10 @@ jobs:
|
||||
Expand-Archive ./python-3.10.11-embed-amd64.zip -DestinationPath ./py310
|
||||
$content=Get-Content "./py310/python310._pth"; $content | ForEach-Object {if ($_.ReadCount -eq 3) {"Lib\\site-packages"} else {$_}} | Set-Content ./py310/python310._pth
|
||||
./py310/python ./backend-python/get-pip.py
|
||||
./py310/python -m pip install Cython==0.29.36
|
||||
./py310/python -m pip install Cython==3.0.4
|
||||
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../include" -Destination "py310/include" -Recurse
|
||||
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../libs" -Destination "py310/libs" -Recurse
|
||||
./py310/python -m pip install cyac==1.7
|
||||
./py310/python -m pip install cyac==1.9
|
||||
git clone https://github.com/josStorer/ai00_rwkv_server --depth=1
|
||||
cd ai00_rwkv_server
|
||||
cargo build --release
|
||||
@@ -107,11 +107,10 @@ jobs:
|
||||
mv ./target/x86_64-unknown-linux-gnu/release/ai00_server ../backend-rust/webgpu_server
|
||||
cd ..
|
||||
go install github.com/wailsapp/wails/v2/cmd/wails@latest
|
||||
rm -rf ./backend-python/wkv_cuda_utils
|
||||
rm ./backend-python/rwkv_pip/wkv_cuda.pyd
|
||||
rm ./backend-python/rwkv_pip/rwkv5.pyd
|
||||
rm ./backend-python/rwkv_pip/beta/wkv_cuda.pyd
|
||||
rm ./backend-python/get-pip.py
|
||||
sed -i '1,2d' ./backend-golang/wsl_not_windows.go
|
||||
rm ./backend-golang/wsl.go
|
||||
mv ./backend-golang/wsl_not_windows.go ./backend-golang/wsl.go
|
||||
make
|
||||
mv build/bin/RWKV-Runner build/bin/RWKV-Runner_linux_x64
|
||||
|
||||
@@ -139,11 +138,10 @@ jobs:
|
||||
mv ./target/release/ai00_server ../backend-rust/webgpu_server
|
||||
cd ..
|
||||
go install github.com/wailsapp/wails/v2/cmd/wails@latest
|
||||
rm -rf ./backend-python/wkv_cuda_utils
|
||||
rm ./backend-python/rwkv_pip/wkv_cuda.pyd
|
||||
rm ./backend-python/rwkv_pip/rwkv5.pyd
|
||||
rm ./backend-python/rwkv_pip/beta/wkv_cuda.pyd
|
||||
rm ./backend-python/get-pip.py
|
||||
sed -i '' '1,2d' ./backend-golang/wsl_not_windows.go
|
||||
rm ./backend-golang/wsl.go
|
||||
mv ./backend-golang/wsl_not_windows.go ./backend-golang/wsl.go
|
||||
make
|
||||
cp build/darwin/Readme_Install.txt build/bin/Readme_Install.txt
|
||||
cp build/bin/RWKV-Runner.app/Contents/MacOS/RWKV-Runner build/bin/RWKV-Runner_darwin_universal
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -18,6 +18,7 @@ __pycache__
|
||||
/cmd-helper.bat
|
||||
/install-py-dep.bat
|
||||
/backend-python/wkv_cuda
|
||||
/backend-python/rwkv5
|
||||
*.exe
|
||||
*.old
|
||||
.DS_Store
|
||||
|
||||
@@ -1,12 +1,29 @@
|
||||
## Changes
|
||||
|
||||
- frontend adaptation for api params (user_name, assistant_name, presystem)
|
||||
- custom tokenizer (#77)
|
||||
- enable right-click context menu
|
||||
- upgrade cuda-beta
|
||||
- revert(2d5456): refresh local models when download complete (for macOS)
|
||||
- improve ui desc
|
||||
- chore
|
||||
### Features
|
||||
|
||||
- chat attachment is now related to single message (Experimental)
|
||||
- port occupied detection
|
||||
|
||||
### Upgrades
|
||||
|
||||
- upgrade to rwkv 0.8.20
|
||||
|
||||
### Improvements
|
||||
|
||||
- improve the compatibility between frontend presets and chatgpt api
|
||||
- improve memory usage of state cache
|
||||
|
||||
### Chores
|
||||
|
||||
- update ngrok_connect
|
||||
- python38 compatibility
|
||||
- adjust startup process
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix log encoding error
|
||||
- fix stop button status of Chat page
|
||||
|
||||
## Install
|
||||
|
||||
|
||||
@@ -47,7 +47,9 @@ English | [简体中文](README_ZH.md) | [日本語](README_JA.md)
|
||||
|
||||
</div>
|
||||
|
||||
#### Default configs has enabled custom CUDA kernel acceleration, which is much faster and consumes much less VRAM. If you encounter possible compatibility issues, go to the Configs page and turn off `Use Custom CUDA kernel to Accelerate`.
|
||||
#### Tip: You can deploy [backend-python](./backend-python/) on a server and use this program as a client only. Fill in your server address in the Settings `API URL`.
|
||||
|
||||
#### Default configs has enabled custom CUDA kernel acceleration, which is much faster and consumes much less VRAM. If you encounter possible compatibility issues (output garbled), go to the Configs page and turn off `Use Custom CUDA kernel to Accelerate`, or try to upgrade your gpu driver.
|
||||
|
||||
#### If Windows Defender claims this is a virus, you can try downloading [v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip) and letting it update automatically to the latest version, or add it to the trusted list (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`).
|
||||
|
||||
|
||||
@@ -47,7 +47,9 @@
|
||||
|
||||
</div>
|
||||
|
||||
#### デフォルトの設定はカスタム CUDA カーネルアクセラレーションを有効にしています。互換性の問題が発生する可能性がある場合は、コンフィグページに移動し、`Use Custom CUDA kernel to Accelerate` をオフにしてください。
|
||||
#### ヒント:サーバーに[backend-python](./backend-python/)をデプロイし、このプログラムをクライアントとして使用することができます。設定された`API URL`にサーバーアドレスを入力してください。
|
||||
|
||||
#### デフォルトの設定はカスタム CUDA カーネルアクセラレーションを有効にしています。互換性の問題 (文字化けを出力する) が発生する可能性がある場合は、コンフィグページに移動し、`Use Custom CUDA kernel to Accelerate` をオフにしてください、あるいは、GPUドライバーをアップグレードしてみてください。
|
||||
|
||||
#### Windows Defender がこれをウイルスだと主張する場合は、[v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip) をダウンロードして最新版に自動更新させるか、信頼済みリストに追加してみてください (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`)。
|
||||
|
||||
|
||||
@@ -46,7 +46,9 @@ API兼容的接口,这意味着一切ChatGPT客户端都是RWKV客户端。
|
||||
|
||||
</div>
|
||||
|
||||
#### 预设配置已经开启自定义CUDA算子加速,速度更快,且显存消耗更少。如果你遇到可能的兼容性问题,前往配置页面,关闭`使用自定义CUDA算子加速`
|
||||
#### 小贴士:你可以在服务器部署[backend-python](./backend-python/),然后将此程序仅用作客户端,在设置的`API URL`中填入你的服务器地址
|
||||
|
||||
#### 预设配置已经开启自定义CUDA算子加速,速度更快,且显存消耗更少。如果你遇到可能的兼容性(输出乱码)问题,前往配置页面,关闭`使用自定义CUDA算子加速`,或更新你的显卡驱动
|
||||
|
||||
#### 如果Windows Defender说这是一个病毒,你可以尝试下载[v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip),然后让其自动更新到最新版,或添加信任 (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`)
|
||||
|
||||
|
||||
@@ -53,12 +53,12 @@ type FileInfo struct {
|
||||
ModTime string `json:"modTime"`
|
||||
}
|
||||
|
||||
func (a *App) ReadFileInfo(fileName string) (FileInfo, error) {
|
||||
func (a *App) ReadFileInfo(fileName string) (*FileInfo, error) {
|
||||
info, err := os.Stat(a.exDir + fileName)
|
||||
if err != nil {
|
||||
return FileInfo{}, err
|
||||
return nil, err
|
||||
}
|
||||
return FileInfo{
|
||||
return &FileInfo{
|
||||
Name: info.Name(),
|
||||
Size: info.Size(),
|
||||
IsDir: info.IsDir(),
|
||||
@@ -145,6 +145,20 @@ func (a *App) OpenSaveFileDialogBytes(filterPattern string, defaultFileName stri
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// Only return the path of the selected file, because communication between frontend and backend is slow. Use AssetServer Handler to read the file.
|
||||
func (a *App) OpenOpenFileDialog(filterPattern string) (string, error) {
|
||||
path, err := wruntime.OpenFileDialog(a.ctx, wruntime.OpenDialogOptions{
|
||||
Filters: []wruntime.FileFilter{{Pattern: filterPattern}},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if path == "" {
|
||||
return "", nil
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func (a *App) OpenFileFolder(path string, relative bool) error {
|
||||
var absPath string
|
||||
var err error
|
||||
|
||||
@@ -28,8 +28,7 @@ func (a *App) StartServer(python string, port int, host string, rwkvBeta bool) (
|
||||
|
||||
func (a *App) StartWebGPUServer(port int, host string) (string, error) {
|
||||
args := []string{"./backend-rust/webgpu_server"}
|
||||
args = append(args, "-a", "0", "-t", "backend-rust/assets/rwkv_vocab_v20230424.json",
|
||||
"--port", strconv.Itoa(port), "--ip", host)
|
||||
args = append(args, "--port", strconv.Itoa(port), "--ip", host)
|
||||
return Cmd(args...)
|
||||
}
|
||||
|
||||
@@ -149,9 +148,9 @@ func (a *App) InstallPyDep(python string, cnMirror bool) (string, error) {
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
ChangeFileLine("./py310/python310._pth", 3, "Lib\\site-packages")
|
||||
installScript := python + " ./backend-python/get-pip.py -i https://pypi.tuna.tsinghua.edu.cn/simple\n" +
|
||||
python + " -m pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cu117\n" +
|
||||
python + " -m pip install -r ./backend-python/requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple\n" +
|
||||
installScript := python + " ./backend-python/get-pip.py -i https://pypi.tuna.tsinghua.edu.cn/simple --no-warn-script-location\n" +
|
||||
python + " -m pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cu117 --no-warn-script-location\n" +
|
||||
python + " -m pip install -r ./backend-python/requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple --no-warn-script-location\n" +
|
||||
"exit"
|
||||
if !cnMirror {
|
||||
installScript = strings.Replace(installScript, " -i https://pypi.tuna.tsinghua.edu.cn/simple", "", -1)
|
||||
|
||||
@@ -5,12 +5,15 @@ import (
|
||||
"bufio"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -205,3 +208,12 @@ func Unzip(source, destination string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *App) IsPortAvailable(port int) bool {
|
||||
l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%s", strconv.Itoa(port)))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer l.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
32
backend-python/convert_safetensors.py
vendored
32
backend-python/convert_safetensors.py
vendored
@@ -18,20 +18,31 @@ parser.add_argument(
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
def convert_file(
|
||||
pt_filename: str,
|
||||
sf_filename: str,
|
||||
):
|
||||
def rename_key(rename, name):
|
||||
for k, v in rename.items():
|
||||
if k in name:
|
||||
name = name.replace(k, v)
|
||||
return name
|
||||
|
||||
|
||||
def convert_file(pt_filename: str, sf_filename: str, transpose_names=[], rename={}):
|
||||
loaded = torch.load(pt_filename, map_location="cpu")
|
||||
if "state_dict" in loaded:
|
||||
loaded = loaded["state_dict"]
|
||||
|
||||
loaded = {k: v.clone().half() for k, v in loaded.items()}
|
||||
for k, v in loaded.items():
|
||||
print(f"{k}\t{v.shape}\t{v.dtype}")
|
||||
# for k, v in loaded.items():
|
||||
# print(f'{k}\t{v.shape}\t{v.dtype}')
|
||||
|
||||
# For tensors to be contiguous
|
||||
loaded = {k: v.contiguous() for k, v in loaded.items()}
|
||||
for k, v in loaded.items():
|
||||
for transpose_name in transpose_names:
|
||||
if transpose_name in k:
|
||||
loaded[k] = v.transpose(0, 1)
|
||||
loaded = {rename_key(rename, k).lower(): v.contiguous() for k, v in loaded.items()}
|
||||
|
||||
for k, v in loaded.items():
|
||||
print(f"{k}\t{v.shape}\t{v.dtype}")
|
||||
|
||||
dirname = os.path.dirname(sf_filename)
|
||||
os.makedirs(dirname, exist_ok=True)
|
||||
@@ -46,7 +57,12 @@ def convert_file(
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
convert_file(args.input, args.output)
|
||||
convert_file(
|
||||
args.input,
|
||||
args.output,
|
||||
["lora_A"],
|
||||
{"time_faaaa": "time_first", "lora_A": "lora.0", "lora_B": "lora.1"},
|
||||
)
|
||||
print(f"Saved to {args.output}")
|
||||
except Exception as e:
|
||||
with open("error.txt", "w") as f:
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import multipart
|
||||
import fitz
|
||||
import safetensors
|
||||
import midi2audio
|
||||
import mido
|
||||
@@ -9,6 +11,7 @@ import GPUtil
|
||||
|
||||
import torch
|
||||
import rwkv
|
||||
import langchain
|
||||
import numpy
|
||||
import tokenizers
|
||||
import fastapi
|
||||
|
||||
@@ -2,70 +2,8 @@ import time
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
from typing import Sequence
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
|
||||
|
||||
import psutil
|
||||
from fastapi import Depends, FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
import uvicorn
|
||||
|
||||
from utils.rwkv import *
|
||||
from utils.torch import *
|
||||
from utils.ngrok import *
|
||||
from utils.log import log_middleware
|
||||
from routes import completion, config, state_cache, midi, misc
|
||||
import global_var
|
||||
|
||||
app = FastAPI(dependencies=[Depends(log_middleware)])
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
app.include_router(completion.router)
|
||||
app.include_router(config.router)
|
||||
app.include_router(midi.router)
|
||||
app.include_router(misc.router)
|
||||
app.include_router(state_cache.router)
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
def init():
|
||||
global_var.init()
|
||||
cmd_params = os.environ["RWKV_RUNNER_PARAMS"]
|
||||
global_var.set(
|
||||
global_var.Args, get_args(cmd_params.split(" ") if cmd_params else None)
|
||||
)
|
||||
|
||||
state_cache.init()
|
||||
|
||||
set_torch()
|
||||
|
||||
if os.environ.get("ngrok_token") is not None:
|
||||
ngrok_connect()
|
||||
|
||||
|
||||
@app.get("/", tags=["Root"])
|
||||
def read_root():
|
||||
return {"Hello": "World!"}
|
||||
|
||||
|
||||
@app.post("/exit", tags=["Root"])
|
||||
def exit():
|
||||
parent_pid = os.getpid()
|
||||
parent = psutil.Process(parent_pid)
|
||||
for child in parent.children(recursive=True):
|
||||
child.kill()
|
||||
parent.kill()
|
||||
from typing import Union, Sequence
|
||||
|
||||
|
||||
def get_args(args: Union[Sequence[str], None] = None):
|
||||
@@ -96,6 +34,81 @@ def get_args(args: Union[Sequence[str], None] = None):
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = get_args()
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
|
||||
|
||||
import psutil
|
||||
from contextlib import asynccontextmanager
|
||||
from fastapi import Depends, FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
import uvicorn
|
||||
|
||||
from utils.rwkv import *
|
||||
from utils.torch import *
|
||||
from utils.ngrok import *
|
||||
from utils.log import log_middleware
|
||||
from routes import completion, config, state_cache, midi, misc, file_process
|
||||
import global_var
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
init()
|
||||
yield
|
||||
|
||||
|
||||
app = FastAPI(lifespan=lifespan, dependencies=[Depends(log_middleware)])
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
app.include_router(completion.router)
|
||||
app.include_router(config.router)
|
||||
app.include_router(midi.router)
|
||||
app.include_router(file_process.router)
|
||||
app.include_router(misc.router)
|
||||
app.include_router(state_cache.router)
|
||||
|
||||
|
||||
def init():
|
||||
global_var.init()
|
||||
cmd_params = os.environ["RWKV_RUNNER_PARAMS"]
|
||||
global_var.set(
|
||||
global_var.Args, get_args(cmd_params.split(" ") if cmd_params else None)
|
||||
)
|
||||
|
||||
state_cache.init()
|
||||
|
||||
set_torch()
|
||||
|
||||
if os.environ.get("ngrok_token") is not None:
|
||||
ngrok_connect()
|
||||
|
||||
|
||||
@app.get("/", tags=["Root"])
|
||||
def read_root():
|
||||
return {"Hello": "World!"}
|
||||
|
||||
|
||||
@app.post("/exit", tags=["Root"])
|
||||
def exit():
|
||||
parent_pid = os.getpid()
|
||||
parent = psutil.Process(parent_pid)
|
||||
for child in parent.children(recursive=True):
|
||||
child.kill()
|
||||
parent.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
os.environ["RWKV_RUNNER_PARAMS"] = " ".join(sys.argv[1:])
|
||||
print("--- %s seconds ---" % (time.time() - start_time))
|
||||
uvicorn.run("main:app", port=args.port, host=args.host, workers=1)
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -43,16 +43,18 @@ class ChatCompletionBody(ModelConfigBody):
|
||||
model: Union[str, None] = "rwkv"
|
||||
stream: bool = False
|
||||
stop: Union[str, List[str], None] = default_stop
|
||||
user_name: Union[str, None] = Field(None, description="Internal user name")
|
||||
user_name: Union[str, None] = Field(
|
||||
None, description="Internal user name", min_length=1
|
||||
)
|
||||
assistant_name: Union[str, None] = Field(
|
||||
None, description="Internal assistant name"
|
||||
None, description="Internal assistant name", min_length=1
|
||||
)
|
||||
presystem: bool = Field(
|
||||
True, description="Whether to insert default system prompt at the beginning"
|
||||
)
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"messages": [
|
||||
{"role": Role.User.value, "content": "hello", "raw": False}
|
||||
@@ -79,7 +81,7 @@ class CompletionBody(ModelConfigBody):
|
||||
stop: Union[str, List[str], None] = None
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"prompt": "The following is an epic science fiction masterpiece that is immortalized, "
|
||||
+ "with delicate descriptions and grand depictions of interstellar civilization wars.\nChapter 1.\n",
|
||||
@@ -317,11 +319,13 @@ The following is a coherent verbose detailed conversation between a girl named {
|
||||
completion_text += append_message + "\n\n"
|
||||
completion_text += f"{bot}{interface}"
|
||||
|
||||
user_code = model.pipeline.decode([model.pipeline.encode(user)[0]])
|
||||
bot_code = model.pipeline.decode([model.pipeline.encode(bot)[0]])
|
||||
if type(body.stop) == str:
|
||||
body.stop = [body.stop, f"\n\n{user}", f"\n\n{bot}"]
|
||||
body.stop = [body.stop, f"\n\n{user_code}", f"\n\n{bot_code}"]
|
||||
elif type(body.stop) == list:
|
||||
body.stop.append(f"\n\n{user}")
|
||||
body.stop.append(f"\n\n{bot}")
|
||||
body.stop.append(f"\n\n{user_code}")
|
||||
body.stop.append(f"\n\n{bot_code}")
|
||||
elif body.stop is None:
|
||||
body.stop = default_stop
|
||||
|
||||
@@ -373,7 +377,7 @@ class EmbeddingsBody(BaseModel):
|
||||
fast_mode: bool = False
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"input": "a big apple",
|
||||
"model": "rwkv",
|
||||
|
||||
@@ -10,22 +10,6 @@ import global_var
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
def get_tokens_path(model_path: str):
|
||||
model_path = model_path.lower()
|
||||
tokenizer_dir = f"{pathlib.Path(__file__).parent.parent.resolve()}/rwkv_pip/"
|
||||
|
||||
default_tokens_path = tokenizer_dir + "20B_tokenizer.json"
|
||||
|
||||
if "raven" in model_path:
|
||||
return default_tokens_path
|
||||
elif "world" in model_path:
|
||||
return "rwkv_vocab_v20230424"
|
||||
elif "midi" in model_path:
|
||||
return tokenizer_dir + "tokenizer-midi.json"
|
||||
else:
|
||||
return default_tokens_path
|
||||
|
||||
|
||||
class SwitchModelBody(BaseModel):
|
||||
model: str
|
||||
strategy: str
|
||||
@@ -33,7 +17,7 @@ class SwitchModelBody(BaseModel):
|
||||
customCuda: bool = False
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"model": "models/RWKV-4-World-3B-v1-20230619-ctx4096.pth",
|
||||
"strategy": "cuda fp16",
|
||||
@@ -67,25 +51,10 @@ def switch_model(body: SwitchModelBody, response: Response, request: Request):
|
||||
os.environ["RWKV_CUDA_ON"] = "1" if body.customCuda else "0"
|
||||
|
||||
global_var.set(global_var.Model_Status, global_var.ModelStatus.Loading)
|
||||
tokenizer = (
|
||||
get_tokens_path(body.model)
|
||||
if body.tokenizer is None or body.tokenizer == ""
|
||||
else body.tokenizer
|
||||
)
|
||||
try:
|
||||
global_var.set(
|
||||
global_var.Model,
|
||||
TextRWKV(
|
||||
model=body.model,
|
||||
strategy=body.strategy,
|
||||
tokens_path=tokenizer,
|
||||
)
|
||||
if "midi" not in body.model.lower()
|
||||
else MusicRWKV(
|
||||
model=body.model,
|
||||
strategy=body.strategy,
|
||||
tokens_path=tokenizer,
|
||||
),
|
||||
RWKV(model=body.model, strategy=body.strategy, tokenizer=body.tokenizer),
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
79
backend-python/routes/file_process.py
Normal file
79
backend-python/routes/file_process.py
Normal file
@@ -0,0 +1,79 @@
|
||||
import os
|
||||
from fastapi import (
|
||||
APIRouter,
|
||||
HTTPException,
|
||||
status,
|
||||
Depends,
|
||||
File,
|
||||
UploadFile,
|
||||
)
|
||||
from pydantic import BaseModel
|
||||
from typing import Iterator
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
class FileToTextParams(BaseModel):
|
||||
file_name: str
|
||||
file_encoding: str = "utf-8"
|
||||
|
||||
|
||||
@router.post("/file-to-text", tags=["File Process"])
|
||||
async def file_to_text(
|
||||
params: FileToTextParams = Depends(), file_data: UploadFile = File(...)
|
||||
):
|
||||
from langchain.schema import Document
|
||||
from langchain.document_loaders.blob_loaders import Blob
|
||||
|
||||
# from langchain
|
||||
def parse_text(blob: Blob) -> Iterator[Document]:
|
||||
yield Document(page_content=blob.as_string(), metadata={"source": blob.source})
|
||||
|
||||
# from langchain
|
||||
def parse_pdf(blob: Blob) -> Iterator[Document]:
|
||||
import fitz
|
||||
|
||||
with blob.as_bytes_io() as stream:
|
||||
doc = fitz.Document(stream=stream)
|
||||
|
||||
yield from [
|
||||
Document(
|
||||
page_content=page.get_text(),
|
||||
metadata=dict(
|
||||
{
|
||||
"source": blob.source,
|
||||
"file_path": blob.source,
|
||||
"page": page.number,
|
||||
"total_pages": len(doc),
|
||||
},
|
||||
**{
|
||||
k: doc.metadata[k]
|
||||
for k in doc.metadata
|
||||
if type(doc.metadata[k]) in [str, int]
|
||||
},
|
||||
),
|
||||
)
|
||||
for page in doc
|
||||
]
|
||||
|
||||
file_parsers = {".txt": parse_text, ".pdf": parse_pdf}
|
||||
|
||||
file_name = file_data.filename or params.file_name
|
||||
file_ext = os.path.splitext(file_name)[-1]
|
||||
|
||||
if file_ext not in file_parsers:
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "file type not supported")
|
||||
|
||||
try:
|
||||
pages: Iterator[Document] = file_parsers[file_ext](
|
||||
Blob.from_data(
|
||||
await file_data.read(),
|
||||
encoding=params.file_encoding,
|
||||
path=file_name,
|
||||
)
|
||||
)
|
||||
pages = list(pages)
|
||||
except Exception as e:
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, f"{e}")
|
||||
|
||||
return {"pages": pages}
|
||||
@@ -12,7 +12,7 @@ class TextToMidiBody(BaseModel):
|
||||
text: str
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"text": "p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:2d:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:2d:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:1f:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:26:a g:39:a g:39:a g:3e:a g:3e:a g:42:a g:42:a pi:39:a pi:3e:a pi:42:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0",
|
||||
}
|
||||
@@ -36,7 +36,7 @@ class TxtToMidiBody(BaseModel):
|
||||
midi_path: str
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"txt_path": "midi/sample.txt",
|
||||
"midi_path": "midi/sample.mid",
|
||||
@@ -66,7 +66,7 @@ class MidiToWavBody(BaseModel):
|
||||
sound_font_path: str = "assets/default_sound_font.sf2"
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"midi_path": "midi/sample.mid",
|
||||
"wav_path": "midi/sample.wav",
|
||||
@@ -96,7 +96,7 @@ class TextToWavBody(BaseModel):
|
||||
sound_font_path: str = "assets/default_sound_font.sf2"
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"text": "p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:2d:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:2d:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:1f:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:26:a g:39:a g:39:a g:3e:a g:3e:a g:42:a g:42:a pi:39:a pi:3e:a pi:42:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0",
|
||||
"wav_name": "sample",
|
||||
|
||||
@@ -9,7 +9,7 @@ router = APIRouter()
|
||||
|
||||
trie = None
|
||||
dtrie: Dict = {}
|
||||
max_trie_len = 3000
|
||||
max_trie_len = 300
|
||||
loop_start_id = 1 # to prevent preloaded prompts from being deleted
|
||||
loop_del_trie_id = loop_start_id
|
||||
|
||||
|
||||
1
backend-python/rwkv_pip/beta/model.py
vendored
1
backend-python/rwkv_pip/beta/model.py
vendored
@@ -94,6 +94,7 @@ if os.environ.get("RWKV_CUDA_ON") == "1":
|
||||
f"{current_path}/cuda/att_one_v5.cu",
|
||||
],
|
||||
verbose=True,
|
||||
extra_ldflags=["cublas.lib" if os.name == "nt" else ""],
|
||||
extra_cuda_cflags=[
|
||||
"-t 4",
|
||||
"-std=c++17",
|
||||
|
||||
BIN
backend-python/rwkv_pip/beta/wkv_cuda.pyd
vendored
Normal file
BIN
backend-python/rwkv_pip/beta/wkv_cuda.pyd
vendored
Normal file
Binary file not shown.
75
backend-python/rwkv_pip/cuda/gemm_fp16_cublas.cpp
vendored
Normal file
75
backend-python/rwkv_pip/cuda/gemm_fp16_cublas.cpp
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
#include <cublas_v2.h>
|
||||
#include <cuda.h>
|
||||
#include <cuda_fp16.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <torch/extension.h>
|
||||
#include <c10/cuda/CUDAGuard.h>
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
|
||||
#define CUBLAS_CHECK(condition) \
|
||||
for (cublasStatus_t _cublas_check_status = (condition); \
|
||||
_cublas_check_status != CUBLAS_STATUS_SUCCESS;) \
|
||||
throw std::runtime_error("cuBLAS error " + \
|
||||
std::to_string(_cublas_check_status) + " at " + \
|
||||
std::to_string(__LINE__));
|
||||
|
||||
#define CUDA_CHECK(condition) \
|
||||
for (cudaError_t _cuda_check_status = (condition); \
|
||||
_cuda_check_status != cudaSuccess;) \
|
||||
throw std::runtime_error( \
|
||||
"CUDA error " + std::string(cudaGetErrorString(_cuda_check_status)) + \
|
||||
" at " + std::to_string(__LINE__));
|
||||
|
||||
/*
|
||||
NOTE: blas gemm is column-major by default, but we need row-major output.
|
||||
The data of row-major, transposed matrix is exactly the same as the
|
||||
column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
|
||||
*/
|
||||
void gemm_fp16_cublas(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(a));
|
||||
const auto cuda_data_type = CUDA_R_16F;
|
||||
const auto cuda_c_data_type =
|
||||
c.dtype() == torch::kFloat32 ? CUDA_R_32F : CUDA_R_16F;
|
||||
const auto compute_type = CUDA_R_32F;
|
||||
const float sp_alpha = 1.f;
|
||||
// swap a and b, and use CUBLAS_OP_N. see the notes above
|
||||
std::swap(a, b);
|
||||
const cublasOperation_t cublas_trans_a = CUBLAS_OP_N;
|
||||
const cublasOperation_t cublas_trans_b = CUBLAS_OP_N;
|
||||
// m = (B^T).size(0) = B.size(1), and = A.size(1) after swap,
|
||||
// negative axis is used because of the existence of batch matmul.
|
||||
const int m = a.size(-1);
|
||||
const int k = a.size(-2);
|
||||
const int n = b.size(-2);
|
||||
const int cublas_lda = m;
|
||||
const int cublas_ldb = k;
|
||||
const int cublas_ldc = m;
|
||||
cublasHandle_t cublas_handle = at::cuda::getCurrentCUDABlasHandle();
|
||||
|
||||
#if CUDA_VERSION >= 11000
|
||||
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
|
||||
#else
|
||||
cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP;
|
||||
#endif
|
||||
const float sp_beta = 0.f;
|
||||
if (a.sizes().size() == 2 && b.sizes().size() == 2) {
|
||||
CUBLAS_CHECK(cublasGemmEx(
|
||||
cublas_handle, cublas_trans_a, cublas_trans_b, m, n, k, &sp_alpha,
|
||||
a.data_ptr(), cuda_data_type, cublas_lda, b.data_ptr(), cuda_data_type,
|
||||
cublas_ldb, &sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc,
|
||||
compute_type, algo));
|
||||
} else {
|
||||
// batch matmul
|
||||
assert(a.sizes().size() == 3 && b.sizes().size() == 3);
|
||||
|
||||
const long long int cublas_stride_a = m * k;
|
||||
const long long int cublas_stride_b = k * n;
|
||||
const long long int cublas_stride_c = m * n;
|
||||
CUBLAS_CHECK(cublasGemmStridedBatchedEx(
|
||||
cublas_handle, cublas_trans_a, cublas_trans_b, m,
|
||||
n, k, &sp_alpha, a.data_ptr(), cuda_data_type, cublas_lda,
|
||||
cublas_stride_a, b.data_ptr(), cuda_data_type, cublas_ldb, cublas_stride_b,
|
||||
&sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc, cublas_stride_c,
|
||||
a.size(0), compute_type, algo));
|
||||
}
|
||||
}
|
||||
246
backend-python/rwkv_pip/cuda/operators.cu
vendored
Normal file
246
backend-python/rwkv_pip/cuda/operators.cu
vendored
Normal file
@@ -0,0 +1,246 @@
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include "ATen/ATen.h"
|
||||
#include <cuda_fp16.h>
|
||||
#define MIN_VALUE (-1e38)
|
||||
typedef at::Half fp16;
|
||||
__half *cast(fp16 *ptr) {
|
||||
return reinterpret_cast<__half *>(ptr);
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
__global__ void kernel_wkv_forward(const int B, const int T, const int C,
|
||||
const float *__restrict__ const _w, const float *__restrict__ const _u, const F *__restrict__ const _k, const F *__restrict__ const _v,
|
||||
F *__restrict__ const _y, float *__restrict__ const _aa, float *__restrict__ const _bb, float *__restrict__ const _pp) {
|
||||
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int _b = idx / C;
|
||||
const int _c = idx % C;
|
||||
const int _offset = _b * T * C + _c;
|
||||
const int _state_offset = _b * C + _c;
|
||||
|
||||
float u = _u[_c];
|
||||
float w = _w[_c];
|
||||
const F *__restrict__ const k = _k + _offset;
|
||||
const F *__restrict__ const v = _v + _offset;
|
||||
F *__restrict__ const y = _y + _offset;
|
||||
|
||||
float aa = _aa[_state_offset];
|
||||
float bb = _bb[_state_offset];
|
||||
float pp = _pp[_state_offset];
|
||||
for (int i = 0; i < T; i++) {
|
||||
const int ii = i * C;
|
||||
const float kk = float(k[ii]);
|
||||
const float vv = float(v[ii]);
|
||||
float ww = u + kk;
|
||||
float p = max(pp, ww);
|
||||
float e1 = exp(pp - p);
|
||||
float e2 = exp(ww - p);
|
||||
y[ii] = F((e1 * aa + e2 * vv) / (e1 * bb + e2));
|
||||
ww = w + pp;
|
||||
p = max(ww, kk);
|
||||
e1 = exp(ww - p);
|
||||
e2 = exp(kk - p);
|
||||
aa = e1 * aa + e2 * vv;
|
||||
bb = e1 * bb + e2;
|
||||
pp = p;
|
||||
}
|
||||
_aa[_state_offset] = aa;
|
||||
_bb[_state_offset] = bb;
|
||||
_pp[_state_offset] = pp;
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
void cuda_wkv_forward(int B, int T, int C, float *w, float *u, F *k, F *v, F *y, float *aa, float *bb, float *pp) {
|
||||
dim3 threadsPerBlock( min(C, 32) );
|
||||
assert(B * C % threadsPerBlock.x == 0);
|
||||
dim3 numBlocks(B * C / threadsPerBlock.x);
|
||||
kernel_wkv_forward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, aa, bb, pp);
|
||||
}
|
||||
|
||||
template void cuda_wkv_forward<fp16>(
|
||||
int B, int T, int C,
|
||||
float *w, float *u, fp16 *k, fp16 *v, fp16 *y,
|
||||
float *aa, float *bb, float *pp);
|
||||
template void cuda_wkv_forward<float>(
|
||||
int B, int T, int C,
|
||||
float *w, float *u, float *k, float *v, float *y,
|
||||
float *aa, float *bb, float *pp);
|
||||
|
||||
__global__ void kernel_mm_seq_fp32i8(
|
||||
const int B, const int N, const int M,
|
||||
const float *__restrict__ const x, const int x_stride,
|
||||
const uint8_t *__restrict__ const w, const int w_stride,
|
||||
const float *__restrict__ const mx,
|
||||
const float *__restrict__ const rx,
|
||||
const float *__restrict__ const my,
|
||||
const float *__restrict__ const ry,
|
||||
float *__restrict__ const y, const int y_stride) {
|
||||
|
||||
const int i = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int k = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (i < B && k < M) {
|
||||
float y_local = 0;
|
||||
for (int j = 0; j < N; ++j) {
|
||||
y_local += x[i * x_stride + j] * (
|
||||
(float(w[j * w_stride + k]) + 0.5f)
|
||||
* rx[k] * ry[j] + mx[k] + my[j]
|
||||
);
|
||||
}
|
||||
y[i * y_stride + k] = y_local;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
void cuda_mm8_seq(int B, int N, int M,
|
||||
F *x, int x_stride,
|
||||
uint8_t *w, int w_stride,
|
||||
F *mx, F *rx,
|
||||
F *my, F *ry,
|
||||
F *y, int y_stride);
|
||||
|
||||
template <>
|
||||
void cuda_mm8_seq<float>(int B, int N, int M,
|
||||
float *x, int x_stride,
|
||||
uint8_t *w, int w_stride,
|
||||
float *mx, float *rx,
|
||||
float *my, float *ry,
|
||||
float *y, int y_stride) {
|
||||
dim3 blockSize(1, 128);
|
||||
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
|
||||
kernel_mm_seq_fp32i8<<<gridSize, blockSize>>>(
|
||||
B, N, M, x, x_stride, w, w_stride,
|
||||
mx, rx, my, ry, y, y_stride);
|
||||
}
|
||||
|
||||
__global__ void kernel_mm_seq_fp16i8(
|
||||
const int B, const int N, const int M,
|
||||
const __half *__restrict__ const x, const int x_stride,
|
||||
const uint8_t *__restrict__ const w, const int w_stride,
|
||||
const __half *__restrict__ const mx,
|
||||
const __half *__restrict__ const rx,
|
||||
const __half *__restrict__ const my,
|
||||
const __half *__restrict__ const ry,
|
||||
__half *__restrict__ const y, const int y_stride) {
|
||||
|
||||
const int i = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int k = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (i < B && k < M) {
|
||||
float y_local = 0;
|
||||
for (int j = 0; j < N; ++j) {
|
||||
y_local += __half2float(x[i * x_stride + j]) * (
|
||||
(float(w[j * w_stride + k]) + 0.5f)
|
||||
* __half2float(rx[k]) * __half2float(ry[j])
|
||||
+ __half2float(mx[k]) + __half2float(my[j])
|
||||
);
|
||||
}
|
||||
y[i * y_stride + k] = __float2half(y_local);
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void cuda_mm8_seq<fp16>(int B, int N, int M,
|
||||
fp16 *x, int x_stride,
|
||||
uint8_t *w, int w_stride,
|
||||
fp16 *mx, fp16 *rx,
|
||||
fp16 *my, fp16 *ry,
|
||||
fp16 *y, int y_stride) {
|
||||
dim3 blockSize(1, 128);
|
||||
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
|
||||
kernel_mm_seq_fp16i8<<<gridSize, blockSize>>>(
|
||||
B, N, M, cast(x), x_stride, w, w_stride,
|
||||
cast(mx), cast(rx), cast(my), cast(ry), cast(y), y_stride);
|
||||
}
|
||||
|
||||
#define MM8_ONE_JSPLIT 24
|
||||
#define MM8_ONE_TILE 1024
|
||||
|
||||
__global__ void kernel_mm_one_fp32i8(
|
||||
const int N, const int M,
|
||||
const float *__restrict__ const x,
|
||||
const uint8_t *__restrict__ const w, const int w_stride,
|
||||
const float *__restrict__ const mx,
|
||||
const float *__restrict__ const rx,
|
||||
const float *__restrict__ const my,
|
||||
const float *__restrict__ const ry,
|
||||
float *__restrict__ const y) {
|
||||
|
||||
const int k = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
|
||||
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
|
||||
|
||||
if (k < M) {
|
||||
float y_local = 0;
|
||||
for (int j = j0; j < j1; ++j) {
|
||||
y_local += x[j] * (
|
||||
(float(w[j * w_stride + k]) + 0.5f)
|
||||
* rx[k] * ry[j] + mx[k] + my[j]
|
||||
);
|
||||
}
|
||||
atomicAdd(&y[k], y_local);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
void cuda_mm8_one(int N, int M,
|
||||
F *x,
|
||||
uint8_t *w, int w_stride,
|
||||
F *mx, F *rx,
|
||||
F *my, F *ry,
|
||||
float *y);
|
||||
|
||||
template <>
|
||||
void cuda_mm8_one<float>(int N, int M,
|
||||
float *x,
|
||||
uint8_t *w, int w_stride,
|
||||
float *mx, float *rx,
|
||||
float *my, float *ry,
|
||||
float *y) {
|
||||
dim3 blockSize(1, MM8_ONE_TILE);
|
||||
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
|
||||
kernel_mm_one_fp32i8<<<gridSize, blockSize>>>(
|
||||
N, M, x, w, w_stride,
|
||||
mx, rx, my, ry, y);
|
||||
}
|
||||
|
||||
__global__ void kernel_mm_one_fp16i8(
|
||||
const int N, const int M,
|
||||
const __half *__restrict__ const x,
|
||||
const uint8_t *__restrict__ const w, const int w_stride,
|
||||
const __half *__restrict__ const mx,
|
||||
const __half *__restrict__ const rx,
|
||||
const __half *__restrict__ const my,
|
||||
const __half *__restrict__ const ry,
|
||||
float *__restrict__ const y) {
|
||||
|
||||
const int k = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
|
||||
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
|
||||
|
||||
if (k < M) {
|
||||
float y_local = 0;
|
||||
for (int j = j0; j < j1; ++j) {
|
||||
y_local += __half2float(x[j]) * (
|
||||
(float(w[j * w_stride + k]) + 0.5f)
|
||||
* __half2float(rx[k]) * __half2float(ry[j])
|
||||
+ __half2float(mx[k]) + __half2float(my[j])
|
||||
);
|
||||
}
|
||||
atomicAdd(&y[k], y_local);
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void cuda_mm8_one<fp16>(int N, int M,
|
||||
fp16 *x,
|
||||
uint8_t *w, int w_stride,
|
||||
fp16 *mx, fp16 *rx,
|
||||
fp16 *my, fp16 *ry,
|
||||
float *y) {
|
||||
dim3 blockSize(1, MM8_ONE_TILE);
|
||||
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
|
||||
kernel_mm_one_fp16i8<<<gridSize, blockSize>>>(
|
||||
N, M, cast(x), w, w_stride,
|
||||
cast(mx), cast(rx), cast(my), cast(ry), y);
|
||||
}
|
||||
88
backend-python/rwkv_pip/cuda/rwkv5.cu
vendored
Normal file
88
backend-python/rwkv_pip/cuda/rwkv5.cu
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include "ATen/ATen.h"
|
||||
typedef at::BFloat16 bf16;
|
||||
typedef at::Half fp16;
|
||||
typedef float fp32;
|
||||
|
||||
template <typename F>
|
||||
__global__ void kernel_forward(const int B, const int T, const int C, const int H, float *__restrict__ _state,
|
||||
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u,
|
||||
F *__restrict__ const _y)
|
||||
{
|
||||
const int b = blockIdx.x / H;
|
||||
const int h = blockIdx.x % H;
|
||||
const int i = threadIdx.x;
|
||||
_w += h*_N_;
|
||||
_u += h*_N_;
|
||||
_state += h*_N_*_N_ + i*_N_; // wrong if B > 1 !!!
|
||||
|
||||
__shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
|
||||
|
||||
float state[_N_];
|
||||
#pragma unroll
|
||||
for (int j = 0; j < _N_; j++)
|
||||
state[j] = _state[j];
|
||||
|
||||
__syncthreads();
|
||||
u[i] = float(_u[i]);
|
||||
w[i] = _w[i];
|
||||
__syncthreads();
|
||||
|
||||
for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
|
||||
{
|
||||
__syncthreads();
|
||||
r[i] = float(_r[t]);
|
||||
k[i] = float(_k[t]);
|
||||
__syncthreads();
|
||||
|
||||
const float v = float(_v[t]);
|
||||
float y = 0;
|
||||
|
||||
#pragma unroll
|
||||
for (int j = 0; j < _N_; j+=4)
|
||||
{
|
||||
const float4& r_ = (float4&)(r[j]);
|
||||
const float4& k_ = (float4&)(k[j]);
|
||||
const float4& w_ = (float4&)(w[j]);
|
||||
const float4& u_ = (float4&)(u[j]);
|
||||
float4& s = (float4&)(state[j]);
|
||||
float4 x;
|
||||
|
||||
x.x = k_.x * v;
|
||||
x.y = k_.y * v;
|
||||
x.z = k_.z * v;
|
||||
x.w = k_.w * v;
|
||||
|
||||
y += r_.x * (u_.x * x.x + s.x);
|
||||
y += r_.y * (u_.y * x.y + s.y);
|
||||
y += r_.z * (u_.z * x.z + s.z);
|
||||
y += r_.w * (u_.w * x.w + s.w);
|
||||
|
||||
s.x = s.x * w_.x + x.x;
|
||||
s.y = s.y * w_.y + x.y;
|
||||
s.z = s.z * w_.z + x.z;
|
||||
s.w = s.w * w_.w + x.w;
|
||||
}
|
||||
_y[t] = F(y);
|
||||
}
|
||||
#pragma unroll
|
||||
for (int j = 0; j < _N_; j++)
|
||||
_state[j] = state[j];
|
||||
}
|
||||
|
||||
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y)
|
||||
{
|
||||
assert(H*_N_ == C);
|
||||
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
|
||||
}
|
||||
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y)
|
||||
{
|
||||
assert(H*_N_ == C);
|
||||
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
|
||||
}
|
||||
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y)
|
||||
{
|
||||
assert(H*_N_ == C);
|
||||
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
|
||||
}
|
||||
34
backend-python/rwkv_pip/cuda/rwkv5_op.cpp
vendored
Normal file
34
backend-python/rwkv_pip/cuda/rwkv5_op.cpp
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
#include <torch/extension.h>
|
||||
#include "ATen/ATen.h"
|
||||
#include <c10/cuda/CUDAGuard.h>
|
||||
typedef at::BFloat16 bf16;
|
||||
typedef at::Half fp16;
|
||||
typedef float fp32;
|
||||
|
||||
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y);
|
||||
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y);
|
||||
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y);
|
||||
|
||||
void forward_bf16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
|
||||
cuda_forward_bf16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
|
||||
}
|
||||
void forward_fp16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
|
||||
cuda_forward_fp16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp16>(), k.data_ptr<fp16>(), v.data_ptr<fp16>(), w.data_ptr<float>(), u.data_ptr<fp16>(), y.data_ptr<fp16>());
|
||||
}
|
||||
void forward_fp32(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
|
||||
cuda_forward_fp32(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp32>(), k.data_ptr<fp32>(), v.data_ptr<fp32>(), w.data_ptr<float>(), u.data_ptr<fp32>(), y.data_ptr<fp32>());
|
||||
}
|
||||
|
||||
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
||||
m.def("forward_bf16", &forward_bf16, "rwkv5 forward_bf16");
|
||||
m.def("forward_fp16", &forward_fp16, "rwkv5 forward_fp16");
|
||||
m.def("forward_fp32", &forward_fp32, "rwkv5 forward_fp32");
|
||||
}
|
||||
TORCH_LIBRARY(rwkv5, m) {
|
||||
m.def("forward_bf16", forward_bf16);
|
||||
m.def("forward_fp16", forward_fp16);
|
||||
m.def("forward_fp32", forward_fp32);
|
||||
}
|
||||
141
backend-python/rwkv_pip/cuda/wrapper.cpp
vendored
Normal file
141
backend-python/rwkv_pip/cuda/wrapper.cpp
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
#include <torch/extension.h>
|
||||
#include "ATen/ATen.h"
|
||||
#include <iostream>
|
||||
#include <c10/cuda/CUDAGuard.h>
|
||||
|
||||
typedef at::Half fp16;
|
||||
|
||||
template <typename F>
|
||||
void cuda_wkv_forward(int B, int T, int C,
|
||||
float *w, float *u, F *k, F *v, F *y,
|
||||
float *aa, float *bb, float *pp);
|
||||
template <typename F>
|
||||
void cuda_mm8_seq(int B, int N, int M,
|
||||
F *x, int x_stride,
|
||||
uint8_t *w, int w_stride,
|
||||
F *mx, F *rx,
|
||||
F *my, F *ry,
|
||||
F *y, int y_stride);
|
||||
template <typename F>
|
||||
void cuda_mm8_one(int N, int M,
|
||||
F *x,
|
||||
uint8_t *w, int w_stride,
|
||||
F *mx, F *rx,
|
||||
F *my, F *ry,
|
||||
float *y);
|
||||
|
||||
void wkv_forward(int64_t B, int64_t T, int64_t C,
|
||||
torch::Tensor &w, torch::Tensor &u,
|
||||
torch::Tensor &k, torch::Tensor &v, torch::Tensor &y,
|
||||
torch::Tensor &aa, torch::Tensor &bb, torch::Tensor &pp) {
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
|
||||
switch (k.scalar_type()) {
|
||||
case c10::ScalarType::Half:
|
||||
cuda_wkv_forward(B, T, C,
|
||||
w.data_ptr<float>(), u.data_ptr<float>(),
|
||||
k.data_ptr<fp16>(), v.data_ptr<fp16>(), y.data_ptr<fp16>(),
|
||||
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
|
||||
break;
|
||||
case c10::ScalarType::Float:
|
||||
cuda_wkv_forward(B, T, C,
|
||||
w.data_ptr<float>(), u.data_ptr<float>(),
|
||||
k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>(),
|
||||
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
|
||||
break;
|
||||
default:
|
||||
assert(false && "Only FP16 and FP32 are currently supported");
|
||||
}
|
||||
}
|
||||
|
||||
void mm8_seq(int64_t B, int64_t N, int64_t M,
|
||||
torch::Tensor &x, torch::Tensor &w,
|
||||
torch::Tensor &mx, torch::Tensor &rx,
|
||||
torch::Tensor &my, torch::Tensor &ry,
|
||||
torch::Tensor &y) {
|
||||
assert(x.stride(1) == 1);
|
||||
assert(w.stride(1) == 1);
|
||||
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
|
||||
assert(my.stride(0) == 1 && ry.stride(0) == 1);
|
||||
assert(y.stride(1) == 1);
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
|
||||
switch (x.scalar_type()) {
|
||||
case c10::ScalarType::Half:
|
||||
cuda_mm8_seq(
|
||||
B, N, M,
|
||||
x.data_ptr<fp16>(), x.stride(0),
|
||||
w.data_ptr<uint8_t>(), w.stride(0),
|
||||
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
|
||||
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
|
||||
y.data_ptr<fp16>(), y.stride(0));
|
||||
break;
|
||||
case c10::ScalarType::Float:
|
||||
cuda_mm8_seq(
|
||||
B, N, M,
|
||||
x.data_ptr<float>(), x.stride(0),
|
||||
w.data_ptr<uint8_t>(), w.stride(0),
|
||||
mx.data_ptr<float>(), rx.data_ptr<float>(),
|
||||
my.data_ptr<float>(), ry.data_ptr<float>(),
|
||||
y.data_ptr<float>(), y.stride(0));
|
||||
break;
|
||||
default:
|
||||
assert(false && "Only FP16 and FP32 are currently supported");
|
||||
}
|
||||
}
|
||||
void mm8_one(int64_t N, int64_t M,
|
||||
torch::Tensor &x, torch::Tensor &w,
|
||||
torch::Tensor &mx, torch::Tensor &rx,
|
||||
torch::Tensor &my, torch::Tensor &ry,
|
||||
torch::Tensor &y) {
|
||||
assert(x.stride(0) == 1);
|
||||
assert(w.stride(1) == 1);
|
||||
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
|
||||
assert(my.stride(0) == 1 && ry.stride(0) == 1);
|
||||
assert(y.stride(0) == 1);
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
|
||||
switch (x.scalar_type()) {
|
||||
case c10::ScalarType::Half:
|
||||
cuda_mm8_one(
|
||||
N, M,
|
||||
x.data_ptr<fp16>(),
|
||||
w.data_ptr<uint8_t>(), w.stride(0),
|
||||
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
|
||||
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
|
||||
y.data_ptr<float>());
|
||||
break;
|
||||
case c10::ScalarType::Float:
|
||||
cuda_mm8_one(
|
||||
N, M,
|
||||
x.data_ptr<float>(),
|
||||
w.data_ptr<uint8_t>(), w.stride(0),
|
||||
mx.data_ptr<float>(), rx.data_ptr<float>(),
|
||||
my.data_ptr<float>(), ry.data_ptr<float>(),
|
||||
y.data_ptr<float>());
|
||||
break;
|
||||
default:
|
||||
assert(false && "Only FP16 and FP32 are currently supported");
|
||||
}
|
||||
}
|
||||
|
||||
using torch::Tensor;
|
||||
|
||||
#ifndef DISABLE_CUBLAS_GEMM
|
||||
void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
|
||||
#endif
|
||||
|
||||
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
||||
m.def("wkv_forward", &wkv_forward, "wkv forward");
|
||||
m.def("mm8_seq", &mm8_seq, "mm8 seq");
|
||||
m.def("mm8_one", &mm8_one, "mm8 one");
|
||||
#ifndef DISABLE_CUBLAS_GEMM
|
||||
m.def("gemm_fp16_cublas", &gemm_fp16_cublas, "gemv fp16 cublas");
|
||||
#endif
|
||||
}
|
||||
|
||||
TORCH_LIBRARY(rwkv, m) {
|
||||
m.def("wkv_forward", wkv_forward);
|
||||
m.def("mm8_seq", mm8_seq);
|
||||
m.def("mm8_one", mm8_one);
|
||||
#ifndef DISABLE_CUBLAS_GEMM
|
||||
m.def("gemm_fp16_cublas", gemm_fp16_cublas);
|
||||
#endif
|
||||
}
|
||||
1800
backend-python/rwkv_pip/model.py
vendored
Normal file
1800
backend-python/rwkv_pip/model.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
BIN
backend-python/rwkv_pip/rwkv5.pyd
vendored
Normal file
BIN
backend-python/rwkv_pip/rwkv5.pyd
vendored
Normal file
Binary file not shown.
26
backend-python/rwkv_pip/utils.py
vendored
26
backend-python/rwkv_pip/utils.py
vendored
@@ -16,6 +16,7 @@ class PIPELINE_ARGS:
|
||||
top_k=0,
|
||||
alpha_frequency=0.2,
|
||||
alpha_presence=0.2,
|
||||
alpha_decay=0.996,
|
||||
token_ban=[],
|
||||
token_stop=[],
|
||||
chunk_len=256,
|
||||
@@ -25,6 +26,7 @@ class PIPELINE_ARGS:
|
||||
self.top_k = top_k
|
||||
self.alpha_frequency = alpha_frequency # Frequency Penalty (as in GPT-3)
|
||||
self.alpha_presence = alpha_presence # Presence Penalty (as in GPT-3)
|
||||
self.alpha_decay = alpha_decay # gradually decay the penalty
|
||||
self.token_ban = token_ban # ban the generation of some tokens
|
||||
self.token_stop = token_stop # stop generation whenever you see any token here
|
||||
self.chunk_len = (
|
||||
@@ -33,7 +35,7 @@ class PIPELINE_ARGS:
|
||||
|
||||
|
||||
class PIPELINE:
|
||||
def __init__(self, model, WORD_NAME):
|
||||
def __init__(self, model, WORD_NAME: str):
|
||||
self.model = model
|
||||
if WORD_NAME == "cl100k_base":
|
||||
import tiktoken
|
||||
@@ -47,9 +49,15 @@ class PIPELINE:
|
||||
os.path.dirname(os.path.abspath(__file__)) + "/rwkv_vocab_v20230424.txt"
|
||||
)
|
||||
else:
|
||||
from tokenizers import Tokenizer
|
||||
if WORD_NAME.endswith(".txt"):
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
from rwkv_tokenizer import TRIE_TOKENIZER
|
||||
|
||||
self.tokenizer = Tokenizer.from_file(WORD_NAME)
|
||||
self.tokenizer = TRIE_TOKENIZER(WORD_NAME)
|
||||
else:
|
||||
from tokenizers import Tokenizer
|
||||
|
||||
self.tokenizer = Tokenizer.from_file(WORD_NAME)
|
||||
|
||||
def refine_context(self, context):
|
||||
context = context.strip().split("\n")
|
||||
@@ -73,12 +81,13 @@ class PIPELINE:
|
||||
def sample_logits(self, logits, temperature=1.0, top_p=0.85, top_k=0):
|
||||
probs = F.softmax(logits.float(), dim=-1)
|
||||
top_k = int(top_k)
|
||||
if probs.device == torch.device("cpu"):
|
||||
probs = probs.numpy()
|
||||
# 'privateuseone' is the type of custom devices like `torch_directml.device()`
|
||||
if probs.device.type in ["cpu", "privateuseone"]:
|
||||
probs = probs.cpu().numpy()
|
||||
sorted_ids = np.argsort(probs)
|
||||
sorted_probs = probs[sorted_ids][::-1]
|
||||
cumulative_probs = np.cumsum(sorted_probs)
|
||||
cutoff = float(sorted_probs[np.argmax(cumulative_probs > top_p)])
|
||||
cutoff = float(sorted_probs[np.argmax(cumulative_probs >= top_p)])
|
||||
probs[probs < cutoff] = 0
|
||||
if top_k < len(probs) and top_k > 0:
|
||||
probs[sorted_ids[:-top_k]] = 0
|
||||
@@ -92,7 +101,7 @@ class PIPELINE:
|
||||
sorted_probs = probs[sorted_ids]
|
||||
sorted_probs = torch.flip(sorted_probs, dims=(0,))
|
||||
cumulative_probs = torch.cumsum(sorted_probs, dim=-1).cpu().numpy()
|
||||
cutoff = float(sorted_probs[np.argmax(cumulative_probs > top_p)])
|
||||
cutoff = float(sorted_probs[np.argmax(cumulative_probs >= top_p)])
|
||||
probs[probs < cutoff] = 0
|
||||
if top_k < len(probs) and top_k > 0:
|
||||
probs[sorted_ids[:-top_k]] = 0
|
||||
@@ -127,10 +136,13 @@ class PIPELINE:
|
||||
if token in args.token_stop:
|
||||
break
|
||||
all_tokens += [token]
|
||||
for xxx in occurrence:
|
||||
occurrence[xxx] *= args.alpha_decay
|
||||
if token not in occurrence:
|
||||
occurrence[token] = 1
|
||||
else:
|
||||
occurrence[token] += 1
|
||||
# print(occurrence) # debug
|
||||
|
||||
# output
|
||||
tmp = self.decode(all_tokens[out_last:])
|
||||
|
||||
BIN
backend-python/rwkv_pip/wkv_cuda.pyd
vendored
Normal file
BIN
backend-python/rwkv_pip/wkv_cuda.pyd
vendored
Normal file
Binary file not shown.
@@ -10,7 +10,7 @@ logger = logging.getLogger()
|
||||
logger.setLevel(logging.INFO)
|
||||
formatter = logging.Formatter("%(asctime)s - %(levelname)s\n%(message)s")
|
||||
fh = logging.handlers.RotatingFileHandler(
|
||||
"api.log", mode="a", maxBytes=3 * 1024 * 1024, backupCount=3
|
||||
"api.log", mode="a", maxBytes=3 * 1024 * 1024, backupCount=3, encoding="utf-8"
|
||||
)
|
||||
fh.setFormatter(formatter)
|
||||
logger.addHandler(fh)
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import os
|
||||
import sys
|
||||
import global_var
|
||||
|
||||
|
||||
def ngrok_connect():
|
||||
from pyngrok import ngrok, conf
|
||||
|
||||
conf.set_default(conf.PyngrokConfig(ngrok_path="./ngrok"))
|
||||
conf.set_default(
|
||||
conf.PyngrokConfig(ngrok_path="./ngrok.exe" if os.name == "nt" else "./ngrok")
|
||||
)
|
||||
ngrok.set_auth_token(os.environ["ngrok_token"])
|
||||
http_tunnel = ngrok.connect(8000 if len(sys.argv) == 1 else int(sys.argv[1]))
|
||||
print(http_tunnel.public_url)
|
||||
http_tunnel = ngrok.connect(global_var.get(global_var.Args).port)
|
||||
print(f"ngrok url: {http_tunnel.public_url}")
|
||||
|
||||
@@ -4,7 +4,7 @@ import os
|
||||
import pathlib
|
||||
import copy
|
||||
import re
|
||||
from typing import Dict, Iterable, List, Tuple, Union
|
||||
from typing import Dict, Iterable, List, Tuple, Union, Type
|
||||
from utils.log import quick_log
|
||||
from fastapi import HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -21,33 +21,21 @@ os.environ["TORCH_EXTENSIONS_DIR"] = f"{pathlib.Path(__file__).parent.parent.res
|
||||
|
||||
|
||||
class RWKVType(Enum):
|
||||
NoneType = auto()
|
||||
Raven = auto()
|
||||
World = auto()
|
||||
Music = auto()
|
||||
|
||||
|
||||
class AbstractRWKV(ABC):
|
||||
def __init__(self, model: str, strategy: str, tokens_path: str):
|
||||
rwkv_beta = global_var.get(global_var.Args).rwkv_beta
|
||||
|
||||
# dynamic import to make RWKV_CUDA_ON work
|
||||
if rwkv_beta:
|
||||
from rwkv_pip.beta.model import (
|
||||
RWKV as Model,
|
||||
)
|
||||
else:
|
||||
from rwkv.model import (
|
||||
RWKV as Model,
|
||||
)
|
||||
from rwkv_pip.utils import PIPELINE
|
||||
|
||||
filename, _ = os.path.splitext(os.path.basename(model))
|
||||
self.name = filename
|
||||
self.model = Model(model, strategy)
|
||||
self.pipeline = PIPELINE(self.model, tokens_path)
|
||||
def __init__(self, model, pipeline):
|
||||
self.name = "rwkv"
|
||||
self.model = model
|
||||
self.pipeline = pipeline
|
||||
self.model_state = None
|
||||
self.model_tokens = []
|
||||
self.rwkv_type: RWKVType = None
|
||||
self.rwkv_type: RWKVType = RWKVType.NoneType
|
||||
self.tokenizer_len = len(model.w["emb.weight"])
|
||||
|
||||
self.max_tokens_per_generation = 500
|
||||
self.temperature = 1
|
||||
@@ -348,8 +336,8 @@ class AbstractRWKV(ABC):
|
||||
|
||||
|
||||
class TextRWKV(AbstractRWKV):
|
||||
def __init__(self, model: str, strategy: str, tokens_path: str) -> None:
|
||||
super().__init__(model, strategy, tokens_path)
|
||||
def __init__(self, model, pipeline) -> None:
|
||||
super().__init__(model, pipeline)
|
||||
|
||||
self.CHUNK_LEN = 256
|
||||
|
||||
@@ -361,16 +349,16 @@ class TextRWKV(AbstractRWKV):
|
||||
self.penalty_alpha_frequency = 1
|
||||
|
||||
self.interface = ":"
|
||||
if "world" in self.name.lower():
|
||||
self.rwkv_type = RWKVType.World
|
||||
self.user = "Question"
|
||||
self.bot = "Answer"
|
||||
self.END_OF_LINE = 11
|
||||
else:
|
||||
if self.tokenizer_len < 65536:
|
||||
self.rwkv_type = RWKVType.Raven
|
||||
self.user = "Bob"
|
||||
self.bot = "Alice"
|
||||
self.END_OF_LINE = 187
|
||||
else:
|
||||
self.rwkv_type = RWKVType.World
|
||||
self.user = "User"
|
||||
self.bot = "Assistant"
|
||||
self.END_OF_LINE = 11
|
||||
|
||||
self.AVOID_REPEAT_TOKENS = []
|
||||
AVOID_REPEAT = ",:?!"
|
||||
@@ -469,8 +457,8 @@ The following is a coherent verbose detailed conversation between a girl named {
|
||||
|
||||
|
||||
class MusicRWKV(AbstractRWKV):
|
||||
def __init__(self, model: str, strategy: str, tokens_path: str):
|
||||
super().__init__(model, strategy, tokens_path)
|
||||
def __init__(self, model, pipeline):
|
||||
super().__init__(model, pipeline)
|
||||
|
||||
self.max_tokens_per_generation = 500
|
||||
self.temperature = 1
|
||||
@@ -510,6 +498,52 @@ class MusicRWKV(AbstractRWKV):
|
||||
return " " + delta
|
||||
|
||||
|
||||
def get_tokenizer(tokenizer_len: int):
|
||||
tokenizer_dir = f"{pathlib.Path(__file__).parent.parent.resolve()}/rwkv_pip/"
|
||||
if tokenizer_len < 50277:
|
||||
return tokenizer_dir + "tokenizer-midi.json"
|
||||
elif tokenizer_len < 65536:
|
||||
return tokenizer_dir + "20B_tokenizer.json"
|
||||
else:
|
||||
return "rwkv_vocab_v20230424"
|
||||
|
||||
|
||||
def RWKV(model: str, strategy: str, tokenizer: Union[str, None]) -> AbstractRWKV:
|
||||
rwkv_beta = global_var.get(global_var.Args).rwkv_beta
|
||||
|
||||
# dynamic import to make RWKV_CUDA_ON work
|
||||
if rwkv_beta:
|
||||
from rwkv_pip.beta.model import (
|
||||
RWKV as Model,
|
||||
)
|
||||
else:
|
||||
from rwkv_pip.model import (
|
||||
RWKV as Model,
|
||||
)
|
||||
from rwkv_pip.utils import PIPELINE
|
||||
|
||||
filename, _ = os.path.splitext(os.path.basename(model))
|
||||
model = Model(model, strategy)
|
||||
if not tokenizer:
|
||||
tokenizer = get_tokenizer(len(model.w["emb.weight"]))
|
||||
pipeline = PIPELINE(model, tokenizer)
|
||||
|
||||
rwkv_map: dict[str, Type[AbstractRWKV]] = {
|
||||
"20B_tokenizer": TextRWKV,
|
||||
"rwkv_vocab_v20230424": TextRWKV,
|
||||
"tokenizer-midi": MusicRWKV,
|
||||
}
|
||||
tokenizer_name = os.path.splitext(os.path.basename(tokenizer))[0]
|
||||
rwkv: AbstractRWKV
|
||||
if tokenizer_name in rwkv_map:
|
||||
rwkv = rwkv_map[tokenizer_name](model, pipeline)
|
||||
else:
|
||||
rwkv = TextRWKV(model, pipeline)
|
||||
rwkv.name = filename
|
||||
|
||||
return rwkv
|
||||
|
||||
|
||||
class ModelConfigBody(BaseModel):
|
||||
max_tokens: int = Field(default=None, gt=0, le=102400)
|
||||
temperature: float = Field(default=None, ge=0, le=2)
|
||||
@@ -518,7 +552,7 @@ class ModelConfigBody(BaseModel):
|
||||
frequency_penalty: float = Field(default=None, ge=-2, le=2)
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"max_tokens": 1000,
|
||||
"temperature": 1.2,
|
||||
|
||||
BIN
backend-python/wkv_cuda_utils/wkv_cuda10_30.pyd
vendored
BIN
backend-python/wkv_cuda_utils/wkv_cuda10_30.pyd
vendored
Binary file not shown.
BIN
backend-python/wkv_cuda_utils/wkv_cuda40.pyd
vendored
BIN
backend-python/wkv_cuda_utils/wkv_cuda40.pyd
vendored
Binary file not shown.
734
backend-python/wkv_cuda_utils/wkv_cuda_model.py
vendored
734
backend-python/wkv_cuda_utils/wkv_cuda_model.py
vendored
@@ -1,734 +0,0 @@
|
||||
########################################################################################################
|
||||
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
|
||||
########################################################################################################
|
||||
|
||||
import types, gc, os, time, re
|
||||
import torch
|
||||
from torch.nn import functional as F
|
||||
torch.backends.cudnn.benchmark = True
|
||||
torch.backends.cudnn.allow_tf32 = True
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
current_path = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# https://zhuanlan.zhihu.com/p/612879065
|
||||
def LoadPreCompileLibrary(file):
|
||||
import importlib
|
||||
import os
|
||||
|
||||
import torch
|
||||
|
||||
# load the custom_op_library and register the custom ops
|
||||
lib_dir = os.path.dirname(__file__)
|
||||
if os.name == "nt":
|
||||
# Register the main torchvision library location on the default DLL path
|
||||
import ctypes
|
||||
import sys
|
||||
|
||||
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
|
||||
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
|
||||
prev_error_mode = kernel32.SetErrorMode(0x0001)
|
||||
|
||||
if with_load_library_flags:
|
||||
kernel32.AddDllDirectory.restype = ctypes.c_void_p
|
||||
|
||||
if sys.version_info >= (3, 8):
|
||||
os.add_dll_directory(lib_dir)
|
||||
elif with_load_library_flags:
|
||||
res = kernel32.AddDllDirectory(lib_dir)
|
||||
if res is None:
|
||||
err = ctypes.WinError(ctypes.get_last_error())
|
||||
err.strerror += f' Error adding "{lib_dir}" to the DLL directories.'
|
||||
raise ValueError(err)
|
||||
|
||||
kernel32.SetErrorMode(prev_error_mode)
|
||||
|
||||
loader_details = (
|
||||
importlib.machinery.ExtensionFileLoader,
|
||||
importlib.machinery.EXTENSION_SUFFIXES,
|
||||
)
|
||||
|
||||
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
|
||||
ext_specs = extfinder.find_spec(file)
|
||||
if ext_specs is None:
|
||||
return False
|
||||
|
||||
try:
|
||||
torch.ops.load_library(ext_specs.origin)
|
||||
except OSError as exc:
|
||||
return False
|
||||
return True
|
||||
|
||||
########################################################################################################
|
||||
|
||||
if os.environ.get('RWKV_JIT_ON') != '0':
|
||||
os.environ["RWKV_JIT_ON"] = '1'
|
||||
MyModule = torch.jit.ScriptModule
|
||||
MyFunction = torch.jit.script_method
|
||||
MyStatic = torch.jit.script
|
||||
else:
|
||||
MyModule = torch.nn.Module
|
||||
def __nop(ob):
|
||||
return ob
|
||||
MyFunction = __nop
|
||||
MyStatic = __nop
|
||||
|
||||
if os.environ.get('RWKV_CUDA_ON') == '1':
|
||||
if LoadPreCompileLibrary('wkv_cuda') is False:
|
||||
from torch.utils.cpp_extension import load
|
||||
load(
|
||||
name=f"wkv_cuda",
|
||||
sources=[f"{current_path}/cuda/wrapper.cpp", f"{current_path}/cuda/operators.cu"],
|
||||
verbose=True,
|
||||
extra_cuda_cflags=["-t 4", "-std=c++17", "--use_fast_math", "-O3", "--extra-device-vectorization"],
|
||||
is_python_module=False)
|
||||
|
||||
@MyStatic
|
||||
def cuda_wkv(T: int, C: int, w, u, k, v, aa, bb, pp):
|
||||
assert 1 * C % min(C, 32) == 0
|
||||
assert k.dtype == v.dtype == torch.float16 or k.dtype == v.dtype == torch.float32
|
||||
assert w.dtype == u.dtype == aa.dtype == bb.dtype == pp.dtype == torch.float32
|
||||
w = w.contiguous()
|
||||
u = u.contiguous()
|
||||
k = k.contiguous()
|
||||
v = v.contiguous()
|
||||
y = torch.empty((T, C), device=w.device, memory_format=torch.contiguous_format, dtype=k.dtype)
|
||||
torch.ops.rwkv.wkv_forward(1, T, C, w, u, k, v, y, aa, bb, pp)
|
||||
return y, aa, bb, pp
|
||||
@MyStatic
|
||||
def cuda_mm8_seq(B: int, N: int, M: int, x, w, mx, rx, my, ry):
|
||||
assert x.dtype == mx.dtype == rx.dtype == my.dtype == ry.dtype
|
||||
assert x.dtype == torch.float32 or x.dtype == torch.float16
|
||||
assert w.dtype == torch.uint8
|
||||
assert x.shape == [B, N]
|
||||
assert w.shape == [N, M]
|
||||
assert rx.shape == mx.shape == [M]
|
||||
assert ry.shape == my.shape == [N, 1]
|
||||
y = torch.empty((B, M), device=w.device, dtype=x.dtype)
|
||||
torch.ops.rwkv.mm8_seq(B, N, M, x, w, mx, rx, my, ry, y)
|
||||
return y
|
||||
@MyStatic
|
||||
def cuda_mm8_one(N: int, M: int, x, w, mx, rx, my, ry):
|
||||
assert x.dtype == mx.dtype == rx.dtype == my.dtype == ry.dtype
|
||||
assert x.dtype == torch.float32 or x.dtype == torch.float16
|
||||
assert w.dtype == torch.uint8
|
||||
assert x.shape == [N]
|
||||
assert w.shape == [N, M]
|
||||
assert rx.shape == mx.shape == [M]
|
||||
assert ry.shape == my.shape == [N, 1]
|
||||
y = torch.zeros((M,), device=w.device, dtype=torch.float32)
|
||||
torch.ops.rwkv.mm8_one(N, M, x, w, mx, rx, my, ry, y)
|
||||
return y.to(dtype=x.dtype)
|
||||
else:
|
||||
os.environ["RWKV_CUDA_ON"] = '0'
|
||||
|
||||
########################################################################################################
|
||||
|
||||
class RWKV(MyModule):
|
||||
def __init__(self, model, strategy, verbose = True, convert_and_save_and_exit = None):
|
||||
super().__init__()
|
||||
if verbose:
|
||||
prxxx = lambda *args, **kwargs: print(*args, **kwargs)
|
||||
else:
|
||||
prxxx = lambda *args, **kwargs: None
|
||||
|
||||
STRATEGY_REGEX = r"^(?:(?:^|->) *(?:cuda(?::[\d]+)?|cpu|mps) (?:fp(?:16|32)|bf16)(?:i8|i4|i3)?(?: \*[\d]+\+?)? *)+$"
|
||||
if not re.match(STRATEGY_REGEX, strategy):
|
||||
raise ValueError("Invalid strategy. Please read https://pypi.org/project/rwkv/")
|
||||
|
||||
strategy = ('->'.join([x.strip() for x in strategy.split('->')])).replace('->', ' -> ')
|
||||
self.args = types.SimpleNamespace()
|
||||
args = self.args
|
||||
args.MODEL_NAME = model
|
||||
args.strategy_string = strategy
|
||||
|
||||
# Rescale for fp16 mode: set x = x/2 every X layer (to avoid fp16 overflow)
|
||||
self.RESCALE_LAYER = 6 if 'fp16' in strategy else 0
|
||||
prxxx(f'RWKV_JIT_ON {os.environ["RWKV_JIT_ON"]} RWKV_CUDA_ON {os.environ["RWKV_CUDA_ON"]} RESCALE_LAYER {self.RESCALE_LAYER}\n')
|
||||
|
||||
args.MODEL_NAME = args.MODEL_NAME.strip()
|
||||
if not args.MODEL_NAME.endswith('.pth'):
|
||||
args.MODEL_NAME += '.pth'
|
||||
prxxx(f'Loading {args.MODEL_NAME} ...')
|
||||
with torch.no_grad():
|
||||
self.w = torch.load(args.MODEL_NAME, map_location='cpu') # load model to CPU first
|
||||
gc.collect()
|
||||
w = self.w
|
||||
|
||||
ALREADY_CONVERTED = False
|
||||
if '_strategy' in w:
|
||||
ALREADY_CONVERTED = True
|
||||
assert convert_and_save_and_exit == None # you should only convert a raw model
|
||||
prxxx(f"Converted model: strategy {w['_strategy']}, version {w['_version']}\n")
|
||||
assert w['_strategy'] == args.strategy_string # if you are using a new strategy, re-convert the model
|
||||
assert float(w['_version']) >= 0.7 # sometimes you should re-convert using latest convert_model.py
|
||||
assert w['_rescale_layer'] == self.RESCALE_LAYER
|
||||
del w['_strategy']
|
||||
del w['_version']
|
||||
del w['_rescale_layer']
|
||||
|
||||
args.n_embd = w['emb.weight'].shape[1]
|
||||
args.n_layer = 0
|
||||
keys = list(w.keys())
|
||||
for x in keys:
|
||||
layer_id = int(x.split('.')[1]) if ('blocks.' in x) else 0
|
||||
args.n_layer = max(args.n_layer, layer_id+1)
|
||||
|
||||
####################### Compute strategy
|
||||
|
||||
s = [x.strip().split(' ') for x in strategy.split('->')]
|
||||
plan = [0] * len(s)
|
||||
stream_i = -1
|
||||
stream_count = 0
|
||||
to_allocate = args.n_layer + 1
|
||||
allocated = 0
|
||||
free_slots = 0
|
||||
for i in range(len(s)):
|
||||
si = s[i]
|
||||
si1 = si[1]
|
||||
if si1.startswith('fp32'): si[1] = [torch.float]
|
||||
elif si1.startswith('fp16'): si[1] = [torch.float16]
|
||||
elif si1.startswith('bf16'): si[1] = [torch.bfloat16]
|
||||
if si1.endswith('i8'): si[1] += [torch.uint8]
|
||||
else: si[1] += [si[1][0]]
|
||||
if len(si) > 2:
|
||||
ss = si[2]
|
||||
assert ss.startswith('*')
|
||||
if ss.endswith('+'):
|
||||
plan[i] = int(ss[1:-1])
|
||||
stream_i = i
|
||||
else:
|
||||
plan[i] = int(ss[1:])
|
||||
allocated += plan[i]
|
||||
if allocated >= to_allocate:
|
||||
plan[i] += to_allocate - allocated
|
||||
break
|
||||
else:
|
||||
free_slots += 1
|
||||
if stream_i < 0:
|
||||
if free_slots > 0 and to_allocate > allocated:
|
||||
for i in range(len(s)):
|
||||
if plan[i] == 0:
|
||||
plan[i] = (to_allocate - allocated) // free_slots
|
||||
allocated += plan[i]
|
||||
free_slots -= 1
|
||||
if to_allocate > allocated:
|
||||
plan[len(s)-1] += to_allocate - allocated
|
||||
else:
|
||||
if to_allocate > allocated:
|
||||
stream_count = to_allocate - allocated
|
||||
plan[stream_i] += stream_count
|
||||
prxxx(f'Strategy: (total {args.n_layer}+1={args.n_layer+1} layers)')
|
||||
for i in range(len(s)):
|
||||
ss = s[i]
|
||||
if i != stream_i:
|
||||
prxxx(f'* {ss[0]} {str(ss[1]).replace("torch.","")}, store {plan[i]} layers')
|
||||
else:
|
||||
prxxx(f'* {ss[0]} {str(ss[1]).replace("torch.","")}, store {plan[i]-stream_count} layers, stream {stream_count} layers')
|
||||
plan[i] += (0 if i == 0 else plan[i-1])
|
||||
self.strategy = [None] * (args.n_layer + 1)
|
||||
strategy = self.strategy
|
||||
for n in range(args.n_layer + 1):
|
||||
for i in range(len(s)):
|
||||
if n < plan[i]:
|
||||
strategy[n] = types.SimpleNamespace()
|
||||
strategy[n].device = s[i][0]
|
||||
strategy[n].atype = s[i][1][0]
|
||||
strategy[n].wtype = s[i][1][1]
|
||||
strategy[n].stream = False
|
||||
if i == stream_i and n >= (plan[i] - stream_count):
|
||||
strategy[n].stream = True
|
||||
break
|
||||
prxxx(f"{n}-{strategy[n].device}-{str(strategy[n].atype).replace('torch.','')}-{str(strategy[n].wtype).replace('torch.','')}{'-stream' if strategy[n].stream else ''}",end=' ')
|
||||
prxxx()
|
||||
|
||||
####################### Load weights to self.w
|
||||
|
||||
if not ALREADY_CONVERTED:
|
||||
try: # precompute embedding
|
||||
w['emb.weight'] = F.layer_norm(w['emb.weight'], (args.n_embd,), weight=w['blocks.0.ln0.weight'], bias=w['blocks.0.ln0.bias'])
|
||||
except:
|
||||
w['emb.weight'] = F.layer_norm(w['emb.weight'].float(), (args.n_embd,), weight=w['blocks.0.ln0.weight'].float(), bias=w['blocks.0.ln0.bias'].float())
|
||||
del w['blocks.0.ln0.weight']
|
||||
del w['blocks.0.ln0.bias']
|
||||
|
||||
print_need_newline = False
|
||||
keys = list(w.keys())
|
||||
for x in keys:
|
||||
w[x].requires_grad = False
|
||||
layer_id = int(x.split('.')[1]) if ('blocks.' in x) else 0
|
||||
if ('ln_out.' in x) or ('head.' in x):
|
||||
layer_id = args.n_layer
|
||||
dd = strategy[layer_id]
|
||||
DEVICE = dd.device
|
||||
ATYPE = dd.atype
|
||||
WTYPE = dd.wtype
|
||||
|
||||
if not ALREADY_CONVERTED:
|
||||
if self.RESCALE_LAYER > 0:
|
||||
if 'att.output.weight' in x:
|
||||
w[x] = w[x] / (2 ** int(layer_id // self.RESCALE_LAYER))
|
||||
if 'ffn.value.weight' in x:
|
||||
w[x] = w[x] / (2 ** int(layer_id // self.RESCALE_LAYER))
|
||||
|
||||
if '.time_' in x:
|
||||
w[x] = w[x].squeeze()
|
||||
if 'key.weight' in x or 'value.weight' in x or 'receptance.weight' in x or 'output.weight' in x or 'head.weight' in x:
|
||||
w[x] = w[x].t()
|
||||
|
||||
if '.time_decay' in x: # need fp32 for this
|
||||
w[x] = -torch.exp(w[x].float())
|
||||
elif '.time_first' in x: # need fp32 for this
|
||||
w[x] = w[x].float()
|
||||
else:
|
||||
if (len(w[x].shape) == 2) and ('emb' not in x):
|
||||
if WTYPE != torch.uint8:
|
||||
w[x] = w[x].to(dtype=WTYPE)
|
||||
else:
|
||||
w[x] = w[x].float()
|
||||
|
||||
if w[x].shape[0] > w[x].shape[1]:
|
||||
w[x+'_my'] = torch.amin(w[x], dim=1).unsqueeze(1)
|
||||
w[x] = w[x] - w[x+'_my']
|
||||
w[x+'_mx'] = torch.amin(w[x], dim=0)
|
||||
w[x] = w[x] - w[x+'_mx']
|
||||
w[x+'_rx'] = torch.amax(w[x], dim=0)
|
||||
w[x] = w[x] / w[x+'_rx']
|
||||
w[x+'_ry'] = torch.amax(w[x], dim=1).unsqueeze(1)
|
||||
w[x] = w[x] / w[x+'_ry']
|
||||
else:
|
||||
w[x+'_mx'] = torch.amin(w[x], dim=0)
|
||||
w[x] = w[x] - w[x+'_mx']
|
||||
w[x+'_my'] = torch.amin(w[x], dim=1).unsqueeze(1)
|
||||
w[x] = w[x] - w[x+'_my']
|
||||
w[x+'_rx'] = torch.amax(w[x], dim=0)
|
||||
w[x] = w[x] / w[x+'_rx']
|
||||
w[x+'_ry'] = torch.amax(w[x], dim=1).unsqueeze(1)
|
||||
w[x] = w[x] / w[x+'_ry']
|
||||
|
||||
w[x] = torch.clip(torch.floor(w[x] * 256), min=0, max=255).to(dtype=torch.uint8)
|
||||
w[x+'_mx'] = w[x+'_mx'].to(dtype=ATYPE).contiguous()
|
||||
w[x+'_rx'] = (w[x+'_rx'] / 16).to(dtype=ATYPE).contiguous()
|
||||
w[x+'_my'] = w[x+'_my'].to(dtype=ATYPE).contiguous()
|
||||
w[x+'_ry'] = (w[x+'_ry'] / 16).to(dtype=ATYPE).contiguous()
|
||||
else:
|
||||
w[x] = w[x].to(dtype=ATYPE)
|
||||
|
||||
if convert_and_save_and_exit == None:
|
||||
if 'emb.' in x:
|
||||
w[x] = w[x].contiguous()
|
||||
elif (dd.stream) and (x.endswith('key.weight') or x.endswith('value.weight') or x.endswith('receptance.weight') or x.endswith('output.weight')):
|
||||
try:
|
||||
w[x] = w[x].contiguous().pin_memory() # if you see "CUDA error: out of memory" here, that's out of CPU RAM, not VRAM. Get more RAM :)
|
||||
except:
|
||||
print('Note: You are running out of RAM. Get more CPU RAM. Now this will run much slower.')
|
||||
elif DEVICE != 'cpu':
|
||||
w[x] = w[x].to(device=DEVICE).contiguous()
|
||||
|
||||
if (dd.stream) or (DEVICE != 'cpu'):
|
||||
try:
|
||||
w[x+'_mx'] = w[x+'_mx'].to(device=DEVICE).contiguous()
|
||||
w[x+'_rx'] = w[x+'_rx'].to(device=DEVICE).contiguous()
|
||||
w[x+'_my'] = w[x+'_my'].to(device=DEVICE).contiguous()
|
||||
w[x+'_ry'] = w[x+'_ry'].to(device=DEVICE).contiguous()
|
||||
except:
|
||||
pass
|
||||
|
||||
if 'ffn.value.weight' in x:
|
||||
gc.collect()
|
||||
if 'cuda' in args.strategy_string:
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
shape = [i for i in w[x].shape if i != 1]
|
||||
if len(shape) > 1:
|
||||
shape = f" {str(shape[0]).rjust(5)} {str(shape[1]).rjust(5)}"
|
||||
else:
|
||||
shape = f" {str(shape[0]).rjust(5)} "
|
||||
if layer_id == 0 or layer_id >= args.n_layer-1:
|
||||
if print_need_newline:
|
||||
prxxx('\n', end = '')
|
||||
print_need_newline = False
|
||||
dt = str(w[x].dtype).replace('torch.', '')
|
||||
dt = dt.replace('float32', 'f32').replace('bfloat16', 'bf16').replace('float16', 'f16').replace('uint8', 'i8')
|
||||
prxxx(x.ljust(32), dt.rjust(4), str(w[x].device).rjust(8), shape, ' (pinned)' if w[x].is_pinned() else '')
|
||||
else:
|
||||
print_need_newline = True
|
||||
prxxx('.', end = '', flush = True)
|
||||
|
||||
if convert_and_save_and_exit:
|
||||
w['_strategy'] = args.strategy_string
|
||||
w['_rescale_layer'] = self.RESCALE_LAYER
|
||||
w['_version'] = '0.7'
|
||||
if not convert_and_save_and_exit.endswith('.pth'):
|
||||
convert_and_save_and_exit += '.pth'
|
||||
prxxx(f'Saving to {convert_and_save_and_exit}...')
|
||||
torch.save(w, convert_and_save_and_exit)
|
||||
prxxx(f'Converted and saved. Now this will exit.')
|
||||
exit(0)
|
||||
|
||||
gc.collect()
|
||||
if 'cuda' in args.strategy_string:
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
@MyFunction
|
||||
def torch_mm8_seq(self, x, w, mx, rx, my, ry):
|
||||
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
|
||||
|
||||
@MyFunction
|
||||
def torch_mm8_one(self, x, w, mx, rx, my, ry):
|
||||
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
|
||||
|
||||
if os.environ.get('RWKV_CUDA_ON') == '1':
|
||||
@MyFunction
|
||||
def mm8_seq(self, x, w, mx, rx, my, ry):
|
||||
if w.device.type == 'cuda' and x.dtype == torch.float16:
|
||||
B, N, M = x.shape[0], w.shape[0], w.shape[1]
|
||||
return cuda_mm8_seq(B, N, M, x, w, mx, rx, my, ry)
|
||||
else:
|
||||
return self.torch_mm8_seq(x, w, mx, rx, my, ry)
|
||||
@MyFunction
|
||||
def mm8_one(self, x, w, mx, rx, my, ry):
|
||||
if w.device.type == 'cuda':
|
||||
N, M = w.shape[0], w.shape[1]
|
||||
return cuda_mm8_one(N, M, x, w, mx, rx, my, ry)
|
||||
else:
|
||||
return self.torch_mm8_one(x, w, mx, rx, my, ry)
|
||||
else:
|
||||
@MyFunction
|
||||
def mm8_seq(self, x, w, mx, rx, my, ry):
|
||||
return self.torch_mm8_seq(x, w, mx, rx, my, ry)
|
||||
@MyFunction
|
||||
def mm8_one(self, x, w, mx, rx, my, ry):
|
||||
return self.torch_mm8_one(x, w, mx, rx, my, ry)
|
||||
|
||||
########################################################################################################
|
||||
|
||||
@MyFunction
|
||||
def ffn_one(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(rx @ rw)
|
||||
vx = torch.square(torch.relu(kx @ kw))
|
||||
out = r * (vx @ vw)
|
||||
return x + out, xx
|
||||
|
||||
@MyFunction
|
||||
def ffn_one_i8(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(self.mm8_one(rx, rw, rmx, rrx, rmy, rry))
|
||||
vx = torch.square(torch.relu(self.mm8_one(kx, kw, kmx, krx, kmy, kry)))
|
||||
out = r * (self.mm8_one(vx, vw, vmx, vrx, vmy, vry))
|
||||
return x + out, xx
|
||||
|
||||
########################################################################################################
|
||||
|
||||
@MyFunction
|
||||
def ffn_seq(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(rx @ rw)
|
||||
vx = torch.square(torch.relu(kx @ kw))
|
||||
out = r * (vx @ vw)
|
||||
return x + out, xx[-1,:]
|
||||
|
||||
@MyFunction
|
||||
def ffn_seq_i8(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
|
||||
vx = torch.square(torch.relu(self.mm8_seq(kx, kw, kmx, krx, kmy, kry)))
|
||||
out = r * (self.mm8_seq(vx, vw, vmx, vrx, vmy, vry))
|
||||
return x + out, xx[-1,:]
|
||||
|
||||
########################################################################################################
|
||||
|
||||
@MyFunction
|
||||
def att_one(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(rx @ rw)
|
||||
k = (kx @ kw).float()
|
||||
v = (vx @ vw).float()
|
||||
|
||||
ww = t_first + k
|
||||
p = torch.maximum(pp, ww)
|
||||
e1 = torch.exp(pp - p)
|
||||
e2 = torch.exp(ww - p)
|
||||
wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
|
||||
ww = t_decay + pp
|
||||
p = torch.maximum(ww, k)
|
||||
e1 = torch.exp(ww - p)
|
||||
e2 = torch.exp(k - p)
|
||||
|
||||
out = (r * wkv) @ ow
|
||||
return x + out, xx, e1 * aa + e2 * v, e1 * bb + e2, p
|
||||
|
||||
@MyFunction
|
||||
def att_one_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(self.mm8_one(rx, rw, rmx, rrx, rmy, rry))
|
||||
k = (self.mm8_one(kx, kw, kmx, krx, kmy, kry)).float()
|
||||
v = (self.mm8_one(vx, vw, vmx, vrx, vmy, vry)).float()
|
||||
|
||||
ww = t_first + k
|
||||
p = torch.maximum(pp, ww)
|
||||
e1 = torch.exp(pp - p)
|
||||
e2 = torch.exp(ww - p)
|
||||
wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
|
||||
ww = t_decay + pp
|
||||
p = torch.maximum(ww, k)
|
||||
e1 = torch.exp(ww - p)
|
||||
e2 = torch.exp(k - p)
|
||||
|
||||
out = self.mm8_one(r * wkv, ow, omx, orx, omy, ory)
|
||||
return x + out, xx, e1 * aa + e2 * v, e1 * bb + e2, p
|
||||
|
||||
########################################################################################################
|
||||
|
||||
@MyFunction
|
||||
def att_seq(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(rx @ rw)
|
||||
k = (kx @ kw).float()
|
||||
v = (vx @ vw).float()
|
||||
|
||||
T = x.shape[0]
|
||||
for t in range(T):
|
||||
kk = k[t]
|
||||
vv = v[t]
|
||||
ww = t_first + kk
|
||||
p = torch.maximum(pp, ww)
|
||||
e1 = torch.exp(pp - p)
|
||||
e2 = torch.exp(ww - p)
|
||||
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
|
||||
ww = t_decay + pp
|
||||
p = torch.maximum(ww, kk)
|
||||
e1 = torch.exp(ww - p)
|
||||
e2 = torch.exp(kk - p)
|
||||
aa = e1 * aa + e2 * vv
|
||||
bb = e1 * bb + e2
|
||||
pp = p
|
||||
out = (r * sx) @ ow
|
||||
return x + out, xx[-1,:], aa, bb, pp
|
||||
|
||||
@MyFunction
|
||||
def att_seq_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
|
||||
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
|
||||
k = self.mm8_seq(kx, kw, kmx, krx, kmy, kry).float()
|
||||
v = self.mm8_seq(vx, vw, vmx, vrx, vmy, vry).float()
|
||||
|
||||
T = x.shape[0]
|
||||
for t in range(T):
|
||||
kk = k[t]
|
||||
vv = v[t]
|
||||
ww = t_first + kk
|
||||
p = torch.maximum(pp, ww)
|
||||
e1 = torch.exp(pp - p)
|
||||
e2 = torch.exp(ww - p)
|
||||
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
|
||||
ww = t_decay + pp
|
||||
p = torch.maximum(ww, kk)
|
||||
e1 = torch.exp(ww - p)
|
||||
e2 = torch.exp(kk - p)
|
||||
aa = e1 * aa + e2 * vv
|
||||
bb = e1 * bb + e2
|
||||
pp = p
|
||||
out = self.mm8_seq(r * sx, ow, omx, orx, omy, ory)
|
||||
return x + out, xx[-1,:], aa, bb, pp
|
||||
|
||||
########################################################################################################
|
||||
|
||||
if os.environ["RWKV_CUDA_ON"] == '1':
|
||||
@MyFunction
|
||||
def cuda_att_seq(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
|
||||
T, C = x.size()
|
||||
xx = F.layer_norm(x, (C,), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(rx @ rw)
|
||||
k = kx @ kw
|
||||
v = vx @ vw
|
||||
y, aa, bb, pp = cuda_wkv(T, C, t_decay, t_first, k, v, aa, bb, pp)
|
||||
|
||||
out = (r * y) @ ow
|
||||
return x + out, xx[-1,:], aa, bb, pp
|
||||
|
||||
@MyFunction
|
||||
def cuda_att_seq_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
|
||||
T, C = x.size()
|
||||
xx = F.layer_norm(x, (C,), weight=ln_w, bias=ln_b)
|
||||
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
|
||||
kx = xx * k_mix + sx * (1 - k_mix)
|
||||
vx = xx * v_mix + sx * (1 - v_mix)
|
||||
rx = xx * r_mix + sx * (1 - r_mix)
|
||||
|
||||
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
|
||||
k = self.mm8_seq(kx, kw, kmx, krx, kmy, kry)
|
||||
v = self.mm8_seq(vx, vw, vmx, vrx, vmy, vry)
|
||||
y, aa, bb, pp = cuda_wkv(T, C, t_decay, t_first, k, v, aa, bb, pp)
|
||||
|
||||
out = self.mm8_seq(r * y, ow, omx, orx, omy, ory)
|
||||
return x + out, xx[-1,:], aa, bb, pp
|
||||
|
||||
########################################################################################################
|
||||
|
||||
def forward(self, tokens, state, full_output=False):
|
||||
with torch.no_grad():
|
||||
w = self.w
|
||||
args = self.args
|
||||
|
||||
if state == None:
|
||||
state = [None] * args.n_layer * 5
|
||||
for i in range(args.n_layer): # state: 0=att_xx 1=att_aa 2=att_bb 3=att_pp 4=ffn_xx
|
||||
dd = self.strategy[i]
|
||||
dev = dd.device
|
||||
atype = dd.atype
|
||||
state[i*5+0] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
|
||||
state[i*5+1] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous()
|
||||
state[i*5+2] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous()
|
||||
state[i*5+3] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous() - 1e30
|
||||
state[i*5+4] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
|
||||
|
||||
seq_mode = len(tokens) > 1
|
||||
|
||||
x = w['emb.weight'][tokens if seq_mode else tokens[0]]
|
||||
|
||||
for i in range(args.n_layer):
|
||||
bbb = f'blocks.{i}.'
|
||||
att = f'blocks.{i}.att.'
|
||||
ffn = f'blocks.{i}.ffn.'
|
||||
dd = self.strategy[i]
|
||||
dev = dd.device
|
||||
atype = dd.atype
|
||||
wtype = dd.wtype
|
||||
if seq_mode:
|
||||
if 'cuda' in str(dev) and os.environ["RWKV_CUDA_ON"] == '1':
|
||||
ATT = self.cuda_att_seq if wtype != torch.uint8 else self.cuda_att_seq_i8
|
||||
else:
|
||||
ATT = self.att_seq if wtype != torch.uint8 else self.att_seq_i8
|
||||
FFN = self.ffn_seq if wtype != torch.uint8 else self.ffn_seq_i8
|
||||
else:
|
||||
ATT = self.att_one if wtype != torch.uint8 else self.att_one_i8
|
||||
FFN = self.ffn_one if wtype != torch.uint8 else self.ffn_one_i8
|
||||
|
||||
x = x.to(dtype=atype, device=dev)
|
||||
|
||||
kw = w[f'{att}key.weight']
|
||||
vw = w[f'{att}value.weight']
|
||||
rw = w[f'{att}receptance.weight']
|
||||
ow = w[f'{att}output.weight']
|
||||
if dd.stream:
|
||||
kw = kw.to(device=dev, non_blocking=True)
|
||||
vw = vw.to(device=dev, non_blocking=True)
|
||||
rw = rw.to(device=dev, non_blocking=True)
|
||||
ow = ow.to(device=dev, non_blocking=True)
|
||||
kmx = w[f'{att}key.weight_mx'] if wtype == torch.uint8 else x
|
||||
krx = w[f'{att}key.weight_rx'] if wtype == torch.uint8 else x
|
||||
kmy = w[f'{att}key.weight_my'] if wtype == torch.uint8 else x
|
||||
kry = w[f'{att}key.weight_ry'] if wtype == torch.uint8 else x
|
||||
vmx = w[f'{att}value.weight_mx'] if wtype == torch.uint8 else x
|
||||
vrx = w[f'{att}value.weight_rx'] if wtype == torch.uint8 else x
|
||||
vmy = w[f'{att}value.weight_my'] if wtype == torch.uint8 else x
|
||||
vry = w[f'{att}value.weight_ry'] if wtype == torch.uint8 else x
|
||||
rmx = w[f'{att}receptance.weight_mx'] if wtype == torch.uint8 else x
|
||||
rrx = w[f'{att}receptance.weight_rx'] if wtype == torch.uint8 else x
|
||||
rmy = w[f'{att}receptance.weight_my'] if wtype == torch.uint8 else x
|
||||
rry = w[f'{att}receptance.weight_ry'] if wtype == torch.uint8 else x
|
||||
omx = w[f'{att}output.weight_mx'] if wtype == torch.uint8 else x
|
||||
orx = w[f'{att}output.weight_rx'] if wtype == torch.uint8 else x
|
||||
omy = w[f'{att}output.weight_my'] if wtype == torch.uint8 else x
|
||||
ory = w[f'{att}output.weight_ry'] if wtype == torch.uint8 else x
|
||||
x, state[i*5+0], state[i*5+1], state[i*5+2], state[i*5+3] = ATT(
|
||||
x, state[i*5+0], state[i*5+1], state[i*5+2], state[i*5+3],
|
||||
w[f'{bbb}ln1.weight'], w[f'{bbb}ln1.bias'],
|
||||
w[f'{att}time_mix_k'], w[f'{att}time_mix_v'], w[f'{att}time_mix_r'],
|
||||
w[f'{att}time_decay'], w[f'{att}time_first'],
|
||||
kw, vw, rw, ow,
|
||||
kmx, krx, kmy, kry,
|
||||
vmx, vrx, vmy, vry,
|
||||
rmx, rrx, rmy, rry,
|
||||
omx, orx, omy, ory,
|
||||
)
|
||||
if dd.stream:
|
||||
del kw, vw, rw, ow
|
||||
|
||||
kw = w[f'{ffn}key.weight']
|
||||
vw = w[f'{ffn}value.weight']
|
||||
rw = w[f'{ffn}receptance.weight']
|
||||
if dd.stream:
|
||||
kw = kw.to(device=dev, non_blocking=True)
|
||||
vw = vw.to(device=dev, non_blocking=True)
|
||||
rw = rw.to(device=dev, non_blocking=True)
|
||||
kmx = w[f'{ffn}key.weight_mx'] if wtype == torch.uint8 else x
|
||||
krx = w[f'{ffn}key.weight_rx'] if wtype == torch.uint8 else x
|
||||
kmy = w[f'{ffn}key.weight_my'] if wtype == torch.uint8 else x
|
||||
kry = w[f'{ffn}key.weight_ry'] if wtype == torch.uint8 else x
|
||||
vmx = w[f'{ffn}value.weight_mx'] if wtype == torch.uint8 else x
|
||||
vrx = w[f'{ffn}value.weight_rx'] if wtype == torch.uint8 else x
|
||||
vmy = w[f'{ffn}value.weight_my'] if wtype == torch.uint8 else x
|
||||
vry = w[f'{ffn}value.weight_ry'] if wtype == torch.uint8 else x
|
||||
rmx = w[f'{ffn}receptance.weight_mx'] if wtype == torch.uint8 else x
|
||||
rrx = w[f'{ffn}receptance.weight_rx'] if wtype == torch.uint8 else x
|
||||
rmy = w[f'{ffn}receptance.weight_my'] if wtype == torch.uint8 else x
|
||||
rry = w[f'{ffn}receptance.weight_ry'] if wtype == torch.uint8 else x
|
||||
x, state[i*5+4] = FFN(
|
||||
x, state[i*5+4],
|
||||
w[f'{bbb}ln2.weight'], w[f'{bbb}ln2.bias'],
|
||||
w[f'{ffn}time_mix_k'], w[f'{ffn}time_mix_r'],
|
||||
kw, vw, rw,
|
||||
kmx, krx, kmy, kry,
|
||||
vmx, vrx, vmy, vry,
|
||||
rmx, rrx, rmy, rry,
|
||||
)
|
||||
if dd.stream:
|
||||
del kw, vw, rw
|
||||
|
||||
if self.RESCALE_LAYER > 0:
|
||||
if (i+1) % self.RESCALE_LAYER == 0:
|
||||
x = x / 2
|
||||
|
||||
dd = self.strategy[args.n_layer]
|
||||
x = x[-1,:] if (seq_mode and (not full_output)) else x
|
||||
x = x.to(dtype=dd.atype, device=dd.device)
|
||||
|
||||
x = F.layer_norm(x, (args.n_embd,), weight=w['ln_out.weight'], bias=w['ln_out.bias'])
|
||||
if w['head.weight'].dtype != torch.uint8:
|
||||
x = x @ w['head.weight']
|
||||
else:
|
||||
if seq_mode and full_output:
|
||||
x = self.mm8_seq(x, w['head.weight'], w['head.weight_mx'], w['head.weight_rx'], w['head.weight_my'], w['head.weight_ry'])
|
||||
else:
|
||||
x = self.mm8_one(x, w['head.weight'], w['head.weight_mx'], w['head.weight_rx'], w['head.weight_my'], w['head.weight_ry'])
|
||||
|
||||
return x.float(), state
|
||||
@@ -1,7 +1,5 @@
|
||||
{"text": "1:This is the first document."}
|
||||
{"text": "2:Hello\nWorld"}
|
||||
{"text": "3:1+1=2\n1+2=3\n2+2=4"}
|
||||
{"text": "4:You will be training the GPT version because it's paralleziable and faster to train."}
|
||||
{"text": "5:Read the inference code in src/model.py and try using the final hidden state(.xx .aa .bb)"}
|
||||
{"text": "6:You can fine-tune the model with longer ctxLen and it can quickly adapt to longer ctxLens."}
|
||||
{"text": "7:Consider RWKV 14B. The state has 200 vectors, that is, 5 vectors for each block: fp16 (xx), fp32 (aa), fp32 (bb), fp32 (pp), fp16 (xx)."}
|
||||
{"text": "The following is an epic science fiction masterpiece that is immortalized, with delicate descriptions and grand depictions of interstellar civilization wars.\nChapter 1.\nAs I sit down to write here amidst the shadows of vine-leaves under the blue sky of southern Italy, it comes to me with a certain quality of astonishment that my participation in these amazing adventures of Mr. Cavor was, after all, the outcome of the purest accident. It might have been any one. I fell into these things at a time when I thought myself removed from the slightest possibility of disturbing experiences. I had gone to Lympne because I had imagined it the most uneventful place in the world. “Here, at any rate,” said I, “I shall find peace and a chance to work!”"}
|
||||
{"text": "Translate the following into Chinese.\n\nEnglish: What rooms do you have available?\nChinese: 你们有哪些房间可以提供"}
|
||||
{"text": "User: Hello.\n\nAssistant: I'm here, meow~.\n\nUser: Can you tell some jokes?\n\nAssistant: Of course, master. What kind of jokes would you like to hear?"}
|
||||
{"text": "Instruction: Write a story using the following information\n\nInput: A man named Alex chops a tree down\n\nResponse: Once upon a time, there was a man named Alex who lived in the heart of the forest. He had always been fascinated by trees and spent most of his days exploring the forest and learning about its many wonders. One day, while wandering through the woods, he stumbled upon an old oak tree that stood tall and proud in the middle of a clearing."}
|
||||
{"text": "def get_args(args: Union[Sequence[str], None] = None):\n parser = argparse.ArgumentParser()\n group = parser.add_argument_group(title=\"server arguments\")\n group.add_argument(\n \"--port\",\n type=int,\n default=8000,\n help=\"port to run the server on (default: 8000)\",\n )\n group.add_argument(\n \"--host\",\n type=str,\n default=\"127.0.0.1\",\n help=\"host to run the server on (default: 127.0.0.1)\",\n )"}
|
||||
@@ -100,7 +100,7 @@
|
||||
"Model Config Exception": "モデル設定例外",
|
||||
"Use Gitee Updates Source": "Gitee更新ソースを使用",
|
||||
"Use Custom CUDA kernel to Accelerate": "カスタムCUDAカーネルを使用して加速",
|
||||
"Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues. If it fails to start, please turn off this option.": "このオプションを有効にすると、推論速度が大幅に向上し、一部のVRAMを節約できますが、互換性の問題が生じる可能性があります。起動に失敗した場合は、このオプションをオフにしてください。",
|
||||
"Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues (output garbled). If it fails to start, please turn off this option, or try to upgrade your gpu driver.": "このオプションを有効にすると、推論速度が大幅に向上し、一部のVRAMを節約できますが、互換性の問題 (文字化けを出力する) が生じる可能性があります。起動に失敗した場合は、このオプションを無効にするか、GPUドライバーをアップグレードしてみてください。",
|
||||
"Supported custom cuda file not found": "対応しているカスタムCUDAファイルが見つかりません",
|
||||
"Failed to copy custom cuda file": "カスタムCUDAファイルのコピーに失敗しました",
|
||||
"Downloading update, please wait. If it is not completed, please manually download the program from GitHub and replace the original program.": "更新をダウンロード中です、お待ちください。完了しない場合は、GitHubから手動でプログラムをダウンロードし、元のプログラムを置き換えてください。",
|
||||
@@ -233,7 +233,7 @@
|
||||
"Matched CUDA is not installed": "対応するCUDAがインストールされていません",
|
||||
"Failed to convert data": "データの変換に失敗しました",
|
||||
"Failed to merge model": "モデルのマージに失敗しました",
|
||||
"The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\nWhen you provide a directory path, all the txt files within that directory will be automatically converted into training data. This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\nThe jsonl format file can be referenced at https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl.\nYou can also write it similar to OpenAI's playground format, as shown in https://platform.openai.com/playground/p/default-chat.\nEven for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. If they are different dialogues or topics, they should be written in separate lines.": "データのパスはディレクトリまたはjsonl形式のファイルでなければなりません(将来的にはより多くの形式がサポートされる予定です)。ディレクトリパスを提供した場合、そのディレクトリ内のすべてのtxtファイルが自動的にトレーニングデータに変換されます。これは大規模なライティング、コード生成、または知識ベースのトレーニングで一般的に使用されます。jsonl形式のファイルは、https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl を参照してください。\nhttps://platform.openai.com/playground/p/default-chat のように、OpenAIのプレイグラウンド形式に似た形式で書くこともできます。複数ターンの対話であっても、一行で書く必要があり、行の区切りを示すために`\\n`を使用します。それらが異なる対話やトピックであれば、それらは別々の行に書かれるべきです。",
|
||||
"The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\nWhen you provide a directory path, all the txt files within that directory will be automatically converted into training data. This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\nThe jsonl format file can be referenced at https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl.\nYou can also write it similar to OpenAI's playground format, as shown in https://platform.openai.com/playground/p/default-chat.\nEven for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. If they are different dialogues or topics, they should be written in separate lines.": "データのパスはディレクトリまたはjsonl形式のファイルでなければなりません(将来的にはより多くの形式がサポートされる予定です)。ディレクトリパスを提供した場合、そのディレクトリ内のすべてのtxtファイルが自動的にトレーニングデータに変換されます。これは大規模なライティング、コード生成、または知識ベースのトレーニングで一般的に使用されます。jsonl形式のファイルは、https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl を参照してください。\nhttps://platform.openai.com/playground/p/default-chat のように、OpenAIのプレイグラウンド形式に似た形式で書くこともできます。複数ターンの対話であっても、一行で書く必要があり、行の区切りを示すために`\\n`を使用します。それらが異なる対話やトピックであれば、それらは別々の行に書かれるべきです。",
|
||||
"Size mismatch for blocks. You are attempting to continue training from the LoRA model, but it does not match the base model. Please set LoRA model to None.": "ブロックのサイズが一致しません。LoRAモデルからトレーニングを続けようとしていますが、それはベースモデルと一致しません。LoRAモデルをNoneに設定してください。",
|
||||
"Instruction: Write a story using the following information\n\nInput: A man named Alex chops a tree down\n\nResponse:": "Instruction: Write a story using the following information\n\nInput: アレックスという男が木を切り倒す\n\nResponse:",
|
||||
"Composition": "作曲",
|
||||
@@ -253,5 +253,14 @@
|
||||
"Tokenizer Path (e.g. backend-python/rwkv_pip/20B_tokenizer.json)": "トークナイザーパス (例: backend-python/rwkv_pip/20B_tokenizer.json)",
|
||||
"User Name": "ユーザー名",
|
||||
"Assistant Name": "アシスタント名",
|
||||
"Insert default system prompt at the beginning": "最初にデフォルトのシステムプロンプトを挿入"
|
||||
"Insert default system prompt at the beginning": "最初にデフォルトのシステムプロンプトを挿入",
|
||||
"Format Content": "内容フォーマットの規格化",
|
||||
"Add An Attachment (Accepts pdf, txt)": "添付ファイルを追加 (pdf, txtを受け付けます)",
|
||||
"Uploading Attachment": "添付ファイルアップロード中",
|
||||
"Remove Attachment": "添付ファイルを削除",
|
||||
"The content of file": "ファイル",
|
||||
"is as follows. When replying to me, consider the file content and respond accordingly:": "の内容は以下の通りです。私に返信する際は、ファイルの内容を考慮して適切に返信してください:",
|
||||
"What's the file name": "ファイル名は何ですか",
|
||||
"The file name is: ": "ファイル名は次のとおりです: ",
|
||||
"Port is occupied. Change it in Configs page or close the program that occupies the port.": "ポートが占有されています。設定ページで変更するか、ポートを占有しているプログラムを終了してください。"
|
||||
}
|
||||
@@ -100,7 +100,7 @@
|
||||
"Model Config Exception": "模型配置异常",
|
||||
"Use Gitee Updates Source": "使用Gitee更新源",
|
||||
"Use Custom CUDA kernel to Accelerate": "使用自定义CUDA算子加速",
|
||||
"Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues. If it fails to start, please turn off this option.": "开启这个选项能大大提升推理速度并节省显存,但可能存在兼容性问题,如果启动失败,请关闭此选项",
|
||||
"Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues (output garbled). If it fails to start, please turn off this option, or try to upgrade your gpu driver.": "开启这个选项能大大提升推理速度并节省显存,但可能存在兼容性(回复乱码)问题,如果发生相关问题,请关闭此选项。或更新你的显卡驱动",
|
||||
"Supported custom cuda file not found": "没有找到支持的自定义cuda文件",
|
||||
"Failed to copy custom cuda file": "自定义cuda文件复制失败",
|
||||
"Downloading update, please wait. If it is not completed, please manually download the program from GitHub and replace the original program.": "正在下载更新,请等待。如果一直未完成,请从Github手动下载并覆盖原程序",
|
||||
@@ -233,7 +233,7 @@
|
||||
"Matched CUDA is not installed": "未安装匹配的CUDA",
|
||||
"Failed to convert data": "数据转换失败",
|
||||
"Failed to merge model": "合并模型失败",
|
||||
"The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\nWhen you provide a directory path, all the txt files within that directory will be automatically converted into training data. This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\nThe jsonl format file can be referenced at https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl.\nYou can also write it similar to OpenAI's playground format, as shown in https://platform.openai.com/playground/p/default-chat.\nEven for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. If they are different dialogues or topics, they should be written in separate lines.": "数据路径必须是一个文件夹,或者jsonl格式文件 (未来会支持更多格式)\n\n当你填写的路径是一个文件夹时,该文件夹内的所有txt文件会被自动转换为训练数据,通常这用于大批量训练写作,代码生成或知识库\n\njsonl文件的格式参考 https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl\n你也可以仿照openai的playground编写,参考 https://platform.openai.com/playground/p/default-chat\n即使是多轮对话也必须写在一行,用`\\n`表示换行,如果是不同对话或主题,则另起一行",
|
||||
"The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\nWhen you provide a directory path, all the txt files within that directory will be automatically converted into training data. This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\nThe jsonl format file can be referenced at https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl.\nYou can also write it similar to OpenAI's playground format, as shown in https://platform.openai.com/playground/p/default-chat.\nEven for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. If they are different dialogues or topics, they should be written in separate lines.": "数据路径必须是一个文件夹,或者jsonl格式文件 (未来会支持更多格式)\n\n当你填写的路径是一个文件夹时,该文件夹内的所有txt文件会被自动转换为训练数据,通常这用于大批量训练写作,代码生成或知识库\n\njsonl文件的格式参考 https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl 以及 https://zhuanlan.zhihu.com/p/643433851\n你也可以仿照openai的playground编写,参考 https://platform.openai.com/playground/p/default-chat\n即使是多轮对话也必须写在一行,用`\\n`表示换行,如果是不同对话或主题,则另起一行",
|
||||
"Size mismatch for blocks. You are attempting to continue training from the LoRA model, but it does not match the base model. Please set LoRA model to None.": "尺寸不匹配块。你正在尝试从LoRA模型继续训练,但该LoRA模型与基底模型不匹配,请将LoRA模型设为空",
|
||||
"Instruction: Write a story using the following information\n\nInput: A man named Alex chops a tree down\n\nResponse:": "Instruction: Write a story using the following information\n\nInput: 艾利克斯砍倒了一棵树\n\nResponse:",
|
||||
"Composition": "作曲",
|
||||
@@ -253,5 +253,14 @@
|
||||
"Tokenizer Path (e.g. backend-python/rwkv_pip/20B_tokenizer.json)": "Tokenizer路径 (例如: backend-python/rwkv_pip/20B_tokenizer.json)",
|
||||
"User Name": "用户名称",
|
||||
"Assistant Name": "AI名称",
|
||||
"Insert default system prompt at the beginning": "在开头自动插入默认系统提示"
|
||||
"Insert default system prompt at the beginning": "在开头自动插入默认系统提示",
|
||||
"Format Content": "规范格式",
|
||||
"Add An Attachment (Accepts pdf, txt)": "添加一个附件 (支持pdf, txt)",
|
||||
"Uploading Attachment": "正在上传附件",
|
||||
"Remove Attachment": "移除附件",
|
||||
"The content of file": "文件",
|
||||
"is as follows. When replying to me, consider the file content and respond accordingly:": "内容如下。回复时考虑文件内容并做出相应回复:",
|
||||
"What's the file name": "文件名是什么",
|
||||
"The file name is: ": "文件名是:",
|
||||
"Port is occupied. Change it in Configs page or close the program that occupies the port.": "端口被占用。请在配置页面更改端口,或关闭占用端口的程序"
|
||||
}
|
||||
@@ -2,8 +2,8 @@ import React, { FC, MouseEventHandler, ReactElement } from 'react';
|
||||
import commonStore, { ModelStatus } from '../stores/commonStore';
|
||||
import {
|
||||
AddToDownloadList,
|
||||
CopyFile,
|
||||
FileExists,
|
||||
IsPortAvailable,
|
||||
StartServer,
|
||||
StartWebGPUServer
|
||||
} from '../../wailsjs/go/backend_golang/App';
|
||||
@@ -11,7 +11,7 @@ import { Button } from '@fluentui/react-components';
|
||||
import { observer } from 'mobx-react-lite';
|
||||
import { exit, getStatus, readRoot, switchModel, updateConfig } from '../apis';
|
||||
import { toast } from 'react-toastify';
|
||||
import { checkDependencies, getStrategy, getSupportedCustomCudaFile, toastWithButton } from '../utils';
|
||||
import { checkDependencies, getStrategy, toastWithButton } from '../utils';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { ToolTipButton } from './ToolTipButton';
|
||||
import { Play16Regular, Stop16Regular } from '@fluentui/react-icons';
|
||||
@@ -113,15 +113,23 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
|
||||
const port = modelConfig.apiParameters.apiPort;
|
||||
|
||||
await exit(1000).catch(() => {
|
||||
});
|
||||
if (!await IsPortAvailable(port)) {
|
||||
await exit(1000).catch(() => {
|
||||
});
|
||||
if (!await IsPortAvailable(port)) {
|
||||
toast(t('Port is occupied. Change it in Configs page or close the program that occupies the port.'), { type: 'error' });
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const startServer = webgpu ?
|
||||
(_: string, port: number, host: string) => StartWebGPUServer(port, host)
|
||||
: StartServer;
|
||||
const isUsingCudaBeta = modelConfig.modelParameters.device === 'CUDA-Beta';
|
||||
|
||||
startServer(commonStore.settings.customPythonPath, port, commonStore.settings.host !== '127.0.0.1' ? '0.0.0.0' : '127.0.0.1',
|
||||
modelConfig.modelParameters.device === 'CUDA-Beta'
|
||||
isUsingCudaBeta
|
||||
).catch((e) => {
|
||||
const errMsg = e.message || e;
|
||||
if (errMsg.includes('path contains space'))
|
||||
@@ -162,22 +170,26 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
if ((modelConfig.modelParameters.device.includes('CUDA') || modelConfig.modelParameters.device === 'Custom')
|
||||
&& modelConfig.modelParameters.useCustomCuda && !strategy.includes('fp32')) {
|
||||
if (commonStore.platform === 'windows') {
|
||||
customCudaFile = getSupportedCustomCudaFile();
|
||||
if (customCudaFile) {
|
||||
FileExists('./py310/Lib/site-packages/rwkv/model.py').then((exist) => {
|
||||
// defensive measure. As Python has already been launched, will only take effect the next time it runs.
|
||||
if (!exist) CopyFile('./backend-python/wkv_cuda_utils/wkv_cuda_model.py', './py310/Lib/site-packages/rwkv/model.py');
|
||||
});
|
||||
await CopyFile(customCudaFile, './py310/Lib/site-packages/rwkv/wkv_cuda.pyd').catch(() => {
|
||||
FileExists('./py310/Lib/site-packages/rwkv/wkv_cuda.pyd').then((exist) => {
|
||||
if (!exist) {
|
||||
customCudaFile = '';
|
||||
toast(t('Failed to copy custom cuda file'), { type: 'error' });
|
||||
}
|
||||
});
|
||||
});
|
||||
} else
|
||||
toast(t('Supported custom cuda file not found'), { type: 'warning' });
|
||||
// this part is currently unused because there's no longer a need to use different kernels for different GPUs, but it might still be needed in the future
|
||||
//
|
||||
// customCudaFile = getSupportedCustomCudaFile(isUsingCudaBeta);
|
||||
// if (customCudaFile) {
|
||||
// let kernelTargetPath: string;
|
||||
// if (isUsingCudaBeta)
|
||||
// kernelTargetPath = './backend-python/rwkv_pip/beta/wkv_cuda.pyd';
|
||||
// else
|
||||
// kernelTargetPath = './backend-python/rwkv_pip/wkv_cuda.pyd';
|
||||
// await CopyFile(customCudaFile, kernelTargetPath).catch(() => {
|
||||
// FileExists(kernelTargetPath).then((exist) => {
|
||||
// if (!exist) {
|
||||
// customCudaFile = '';
|
||||
// toast(t('Failed to copy custom cuda file'), { type: 'error' });
|
||||
// }
|
||||
// });
|
||||
// });
|
||||
// } else
|
||||
// toast(t('Supported custom cuda file not found'), { type: 'warning' });
|
||||
customCudaFile = 'any';
|
||||
} else {
|
||||
customCudaFile = 'any';
|
||||
}
|
||||
|
||||
@@ -10,14 +10,22 @@ import { KebabHorizontalIcon, PencilIcon, SyncIcon, TrashIcon } from '@primer/oc
|
||||
import logo from '../assets/images/logo.png';
|
||||
import MarkdownRender from '../components/MarkdownRender';
|
||||
import { ToolTipButton } from '../components/ToolTipButton';
|
||||
import { ArrowCircleUp28Regular, Delete28Regular, RecordStop28Regular, Save28Regular } from '@fluentui/react-icons';
|
||||
import {
|
||||
ArrowCircleUp28Regular,
|
||||
ArrowClockwise16Regular,
|
||||
Attach16Regular,
|
||||
Delete28Regular,
|
||||
Dismiss16Regular,
|
||||
RecordStop28Regular,
|
||||
Save28Regular
|
||||
} from '@fluentui/react-icons';
|
||||
import { CopyButton } from '../components/CopyButton';
|
||||
import { ReadButton } from '../components/ReadButton';
|
||||
import { toast } from 'react-toastify';
|
||||
import { WorkHeader } from '../components/WorkHeader';
|
||||
import { DialogButton } from '../components/DialogButton';
|
||||
import { OpenFileFolder, OpenSaveFileDialog } from '../../wailsjs/go/backend_golang/App';
|
||||
import { toastWithButton } from '../utils';
|
||||
import { OpenFileFolder, OpenOpenFileDialog, OpenSaveFileDialog } from '../../wailsjs/go/backend_golang/App';
|
||||
import { absPathAsset, bytesToReadable, toastWithButton } from '../utils';
|
||||
import { PresetsButton } from './PresetsManager/PresetsButton';
|
||||
import { useMediaQuery } from 'usehooks-ts';
|
||||
|
||||
@@ -57,7 +65,7 @@ export type ConversationMessage = {
|
||||
content: string;
|
||||
}
|
||||
|
||||
let chatSseController: AbortController | null = null;
|
||||
let chatSseControllers: { [id: string]: AbortController } = {};
|
||||
|
||||
const MoreUtilsButton: FC<{ uuid: string, setEditing: (editing: boolean) => void }> = observer(({
|
||||
uuid,
|
||||
@@ -83,6 +91,7 @@ const MoreUtilsButton: FC<{ uuid: string, setEditing: (editing: boolean) => void
|
||||
onClick={() => {
|
||||
commonStore.conversationOrder.splice(commonStore.conversationOrder.indexOf(uuid), 1);
|
||||
delete commonStore.conversation[uuid];
|
||||
commonStore.setAttachment(uuid, null);
|
||||
}} />
|
||||
</MenuPopover>
|
||||
</Menu>;
|
||||
@@ -114,6 +123,13 @@ const ChatMessageItem: FC<{
|
||||
}
|
||||
};
|
||||
|
||||
let avatarImg: string | undefined;
|
||||
if (commonStore.activePreset && messageItem.sender === botName) {
|
||||
avatarImg = absPathAsset(commonStore.activePreset.avatarImg);
|
||||
} else if (messageItem.avatarImg) {
|
||||
avatarImg = messageItem.avatarImg;
|
||||
}
|
||||
|
||||
return <div
|
||||
className={classnames(
|
||||
'flex gap-2 mb-2 overflow-hidden',
|
||||
@@ -131,7 +147,7 @@ const ChatMessageItem: FC<{
|
||||
<Avatar
|
||||
color={messageItem.color}
|
||||
name={messageItem.sender}
|
||||
image={(commonStore.activePreset && messageItem.sender === botName) ? { src: commonStore.activePreset.avatarImg } : messageItem.avatarImg ? { src: messageItem.avatarImg } : undefined}
|
||||
image={avatarImg ? { src: avatarImg } : undefined}
|
||||
/>
|
||||
<div
|
||||
className={classnames(
|
||||
@@ -142,13 +158,31 @@ const ChatMessageItem: FC<{
|
||||
)}
|
||||
>
|
||||
{!editing ?
|
||||
<MarkdownRender>{messageItem.content}</MarkdownRender> :
|
||||
<div className="flex flex-col">
|
||||
<MarkdownRender>{messageItem.content}</MarkdownRender>
|
||||
{uuid in commonStore.attachments &&
|
||||
<div className="flex grow">
|
||||
<div className="grow" />
|
||||
<ToolTipButton className="whitespace-nowrap"
|
||||
text={
|
||||
commonStore.attachments[uuid][0].name.replace(
|
||||
new RegExp('(^[^\\.]{5})[^\\.]+'), '$1...')
|
||||
}
|
||||
desc={`${commonStore.attachments[uuid][0].name} (${bytesToReadable(commonStore.attachments[uuid][0].size)})`}
|
||||
size="small" shape="circular" appearance="secondary" />
|
||||
</div>
|
||||
}
|
||||
</div> :
|
||||
<Textarea ref={textareaRef}
|
||||
className="grow"
|
||||
style={{ minWidth: 0 }}
|
||||
value={messageItem.content}
|
||||
onChange={(e) => {
|
||||
messageItem.content = e.target.value;
|
||||
commonStore.conversation[uuid].type = MessageType.Normal;
|
||||
commonStore.conversation[uuid].done = true;
|
||||
commonStore.setConversation(commonStore.conversation);
|
||||
commonStore.setConversationOrder([...commonStore.conversationOrder]);
|
||||
}}
|
||||
onBlur={() => {
|
||||
setEditingInner(false);
|
||||
@@ -166,6 +200,10 @@ const ChatMessageItem: FC<{
|
||||
messageItem.sender === botName && uuid !== welcomeUuid &&
|
||||
<ToolTipButton desc={t('Retry')} size="small" appearance="subtle"
|
||||
icon={<SyncIcon />} onClick={() => {
|
||||
if (uuid in chatSseControllers) {
|
||||
chatSseControllers[uuid].abort();
|
||||
delete chatSseControllers[uuid];
|
||||
}
|
||||
onSubmit(null, uuid, null, uuid, false);
|
||||
}} />
|
||||
}
|
||||
@@ -187,15 +225,7 @@ const ChatPanel: FC = observer(() => {
|
||||
const currentConfig = commonStore.getCurrentModelConfig();
|
||||
const apiParams = currentConfig.apiParameters;
|
||||
const port = apiParams.apiPort;
|
||||
|
||||
let lastMessageId: string;
|
||||
let generating: boolean = false;
|
||||
if (commonStore.conversationOrder.length > 0) {
|
||||
lastMessageId = commonStore.conversationOrder[commonStore.conversationOrder.length - 1];
|
||||
const lastMessage = commonStore.conversation[lastMessageId];
|
||||
if (lastMessage.sender === botName)
|
||||
generating = !lastMessage.done;
|
||||
}
|
||||
const generating: boolean = Object.keys(chatSseControllers).length > 0;
|
||||
|
||||
useEffect(() => {
|
||||
if (inputRef.current)
|
||||
@@ -260,6 +290,11 @@ const ChatPanel: FC = observer(() => {
|
||||
commonStore.setConversation(commonStore.conversation);
|
||||
commonStore.conversationOrder.push(newId);
|
||||
commonStore.setConversationOrder(commonStore.conversationOrder);
|
||||
|
||||
if (commonStore.currentTempAttachment) {
|
||||
commonStore.setAttachment(newId, [commonStore.currentTempAttachment]);
|
||||
commonStore.setCurrentTempAttachment(null);
|
||||
}
|
||||
}
|
||||
|
||||
let startIndex = startUuid ? commonStore.conversationOrder.indexOf(startUuid) : 0;
|
||||
@@ -271,6 +306,17 @@ const ChatPanel: FC = observer(() => {
|
||||
if (uuid === welcomeUuid)
|
||||
return;
|
||||
const messageItem = commonStore.conversation[uuid];
|
||||
if (uuid in commonStore.attachments) {
|
||||
const attachment = commonStore.attachments[uuid][0];
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: t('The content of file') + ` "${attachment.name}" `
|
||||
+ t('is as follows. When replying to me, consider the file content and respond accordingly:')
|
||||
+ '\n\n' + attachment.content
|
||||
});
|
||||
messages.push({ role: 'user', content: t('What\'s the file name') });
|
||||
messages.push({ role: 'assistant', content: t('The file name is: ') + attachment.name });
|
||||
}
|
||||
if (messageItem.done && messageItem.type === MessageType.Normal && messageItem.sender === userName) {
|
||||
messages.push({ role: 'user', content: messageItem.content });
|
||||
} else if (messageItem.done && messageItem.type === MessageType.Normal && messageItem.sender === botName) {
|
||||
@@ -296,7 +342,8 @@ const ChatPanel: FC = observer(() => {
|
||||
commonStore.setConversationOrder(commonStore.conversationOrder);
|
||||
setTimeout(scrollToBottom);
|
||||
let answer = '';
|
||||
chatSseController = new AbortController();
|
||||
const chatSseController = new AbortController();
|
||||
chatSseControllers[answerId] = chatSseController;
|
||||
fetchEventSource( // https://api.openai.com/v1/chat/completions || http://127.0.0.1:${port}/chat/completions
|
||||
commonStore.settings.apiUrl ?
|
||||
commonStore.settings.apiUrl + '/v1/chat/completions' :
|
||||
@@ -313,14 +360,16 @@ const ChatPanel: FC = observer(() => {
|
||||
model: commonStore.settings.apiChatModelName, // 'gpt-3.5-turbo'
|
||||
temperature: apiParams.temperature,
|
||||
top_p: apiParams.topP,
|
||||
user_name: commonStore.activePreset?.userName,
|
||||
assistant_name: commonStore.activePreset?.assistantName,
|
||||
presystem: commonStore.activePreset?.presystem
|
||||
user_name: commonStore.activePreset?.userName || undefined,
|
||||
assistant_name: commonStore.activePreset?.assistantName || undefined,
|
||||
presystem: commonStore.activePreset?.presystem && undefined
|
||||
}),
|
||||
signal: chatSseController?.signal,
|
||||
onmessage(e) {
|
||||
scrollToBottom();
|
||||
if (e.data.trim() === '[DONE]') {
|
||||
if (answerId! in chatSseControllers)
|
||||
delete chatSseControllers[answerId!];
|
||||
commonStore.conversation[answerId!].done = true;
|
||||
commonStore.conversation[answerId!].content = commonStore.conversation[answerId!].content.trim();
|
||||
commonStore.setConversation(commonStore.conversation);
|
||||
@@ -350,9 +399,13 @@ const ChatPanel: FC = observer(() => {
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
if (answerId! in chatSseControllers)
|
||||
delete chatSseControllers[answerId!];
|
||||
console.log('Connection closed');
|
||||
},
|
||||
onerror(err) {
|
||||
if (answerId! in chatSseControllers)
|
||||
delete chatSseControllers[answerId!];
|
||||
commonStore.conversation[answerId!].type = MessageType.Error;
|
||||
commonStore.conversation[answerId!].done = true;
|
||||
err = err.message || err;
|
||||
@@ -380,33 +433,124 @@ const ChatPanel: FC = observer(() => {
|
||||
size={mq ? 'large' : 'small'} shape="circular" appearance="subtle" title={t('Clear')}
|
||||
contentText={t('Are you sure you want to clear the conversation? It cannot be undone.')}
|
||||
onConfirm={() => {
|
||||
if (generating)
|
||||
chatSseController?.abort();
|
||||
if (generating) {
|
||||
for (const id in chatSseControllers) {
|
||||
chatSseControllers[id].abort();
|
||||
}
|
||||
chatSseControllers = {};
|
||||
}
|
||||
commonStore.setConversation({});
|
||||
commonStore.setConversationOrder([]);
|
||||
}} />
|
||||
<Textarea
|
||||
ref={inputRef}
|
||||
style={{ minWidth: 0 }}
|
||||
className="grow"
|
||||
resize="vertical"
|
||||
placeholder={t('Type your message here')!}
|
||||
value={commonStore.currentInput}
|
||||
onChange={(e) => commonStore.setCurrentInput(e.target.value)}
|
||||
onKeyDown={handleKeyDownOrClick}
|
||||
/>
|
||||
<div className="relative flex grow">
|
||||
<Textarea
|
||||
ref={inputRef}
|
||||
style={{ minWidth: 0 }}
|
||||
className="grow"
|
||||
resize="vertical"
|
||||
placeholder={t('Type your message here')!}
|
||||
value={commonStore.currentInput}
|
||||
onChange={(e) => commonStore.setCurrentInput(e.target.value)}
|
||||
onKeyDown={handleKeyDownOrClick}
|
||||
/>
|
||||
<div className="absolute right-2 bottom-2">
|
||||
{!commonStore.currentTempAttachment ?
|
||||
<ToolTipButton
|
||||
desc={commonStore.attachmentUploading ?
|
||||
t('Uploading Attachment') :
|
||||
t('Add An Attachment (Accepts pdf, txt)')}
|
||||
icon={commonStore.attachmentUploading ?
|
||||
<ArrowClockwise16Regular className="animate-spin" />
|
||||
: <Attach16Regular />}
|
||||
size="small" shape="circular" appearance="secondary"
|
||||
onClick={() => {
|
||||
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl) {
|
||||
toast(t('Please click the button in the top right corner to start the model'), { type: 'warning' });
|
||||
return;
|
||||
}
|
||||
|
||||
if (commonStore.attachmentUploading)
|
||||
return;
|
||||
|
||||
OpenOpenFileDialog('*.txt;*.pdf').then(async filePath => {
|
||||
if (!filePath)
|
||||
return;
|
||||
|
||||
commonStore.setAttachmentUploading(true);
|
||||
|
||||
// Both are slow. Communication between frontend and backend is slow. Use AssetServer Handler to read the file.
|
||||
// const blob = new Blob([atob(info.content as unknown as string)]); // await fetch(`data:application/octet-stream;base64,${info.content}`).then(r => r.blob());
|
||||
const blob = await fetch(absPathAsset(filePath)).then(r => r.blob());
|
||||
const attachmentName = filePath.split(/[\\/]/).pop();
|
||||
const urlPath = `/file-to-text?file_name=${attachmentName}`;
|
||||
const bodyForm = new FormData();
|
||||
bodyForm.append('file_data', blob, attachmentName);
|
||||
fetch(commonStore.settings.apiUrl ?
|
||||
commonStore.settings.apiUrl + urlPath :
|
||||
`http://127.0.0.1:${port}${urlPath}`, {
|
||||
method: 'POST',
|
||||
body: bodyForm
|
||||
}).then(async r => {
|
||||
if (r.status === 200) {
|
||||
const pages = (await r.json()).pages as any[];
|
||||
let attachmentContent: string;
|
||||
if (pages.length === 1)
|
||||
attachmentContent = pages[0].page_content;
|
||||
else
|
||||
attachmentContent = pages.map((p, i) => `Page ${i + 1}:\n${p.page_content}`).join('\n\n');
|
||||
commonStore.setCurrentTempAttachment(
|
||||
{
|
||||
name: attachmentName!,
|
||||
size: blob.size,
|
||||
content: attachmentContent
|
||||
});
|
||||
} else {
|
||||
toast(r.statusText + '\n' + (await r.text()), {
|
||||
type: 'error'
|
||||
});
|
||||
}
|
||||
commonStore.setAttachmentUploading(false);
|
||||
}
|
||||
).catch(e => {
|
||||
commonStore.setAttachmentUploading(false);
|
||||
toast(t('Error') + ' - ' + (e.message || e), { type: 'error', autoClose: 2500 });
|
||||
});
|
||||
}).catch(e => {
|
||||
toast(t('Error') + ' - ' + (e.message || e), { type: 'error', autoClose: 2500 });
|
||||
});
|
||||
}}
|
||||
/> :
|
||||
<div>
|
||||
<ToolTipButton
|
||||
text={
|
||||
commonStore.currentTempAttachment.name.replace(
|
||||
new RegExp('(^[^\\.]{5})[^\\.]+'), '$1...')
|
||||
}
|
||||
desc={`${commonStore.currentTempAttachment.name} (${bytesToReadable(commonStore.currentTempAttachment.size)})`}
|
||||
size="small" shape="circular" appearance="secondary" />
|
||||
<ToolTipButton desc={t('Remove Attachment')}
|
||||
icon={<Dismiss16Regular />}
|
||||
size="small" shape="circular" appearance="subtle"
|
||||
onClick={() => {
|
||||
commonStore.setCurrentTempAttachment(null);
|
||||
}} />
|
||||
</div>
|
||||
}
|
||||
</div>
|
||||
</div>
|
||||
<ToolTipButton desc={generating ? t('Stop') : t('Send')}
|
||||
icon={generating ? <RecordStop28Regular /> : <ArrowCircleUp28Regular />}
|
||||
size={mq ? 'large' : 'small'} shape="circular" appearance="subtle"
|
||||
onClick={(e) => {
|
||||
if (generating) {
|
||||
chatSseController?.abort();
|
||||
if (lastMessageId) {
|
||||
commonStore.conversation[lastMessageId].type = MessageType.Error;
|
||||
commonStore.conversation[lastMessageId].done = true;
|
||||
commonStore.setConversation(commonStore.conversation);
|
||||
commonStore.setConversationOrder([...commonStore.conversationOrder]);
|
||||
for (const id in chatSseControllers) {
|
||||
chatSseControllers[id].abort();
|
||||
commonStore.conversation[id].type = MessageType.Error;
|
||||
commonStore.conversation[id].done = true;
|
||||
}
|
||||
chatSseControllers = {};
|
||||
commonStore.setConversation(commonStore.conversation);
|
||||
commonStore.setConversationOrder([...commonStore.conversationOrder]);
|
||||
} else {
|
||||
handleKeyDownOrClick(e);
|
||||
}
|
||||
@@ -417,8 +561,8 @@ const ChatPanel: FC = observer(() => {
|
||||
onClick={() => {
|
||||
let savedContent: string = '';
|
||||
const isWorldModel = commonStore.getCurrentModelConfig().modelParameters.modelName.toLowerCase().includes('world');
|
||||
const user = isWorldModel ? 'Question' : 'Bob';
|
||||
const bot = isWorldModel ? 'Answer' : 'Alice';
|
||||
const user = isWorldModel ? 'User' : 'Bob';
|
||||
const bot = isWorldModel ? 'Assistant' : 'Alice';
|
||||
commonStore.conversationOrder.forEach((uuid) => {
|
||||
if (uuid === welcomeUuid)
|
||||
return;
|
||||
|
||||
@@ -269,6 +269,13 @@ const CompletionPanel: FC = observer(() => {
|
||||
} />
|
||||
</div>
|
||||
<div className="grow" />
|
||||
<div className="flex justify-between gap-2">
|
||||
<Button className="grow" onClick={() => {
|
||||
const newPrompt = prompt.replace(/\n+\ /g, '\n').split('\n').map((line) => line.trim()).join('\n');
|
||||
setPrompt(newPrompt);
|
||||
commonStore.setCompletionSubmittedPrompt(newPrompt);
|
||||
}}>{t('Format Content')}</Button>
|
||||
</div>
|
||||
<div className="flex justify-between gap-2">
|
||||
<ToolTipButton desc={t('Regenerate')} icon={<ArrowSync20Regular />} onClick={() => {
|
||||
completionSseController?.abort();
|
||||
|
||||
@@ -319,7 +319,7 @@ const CompositionPanel: FC = observer(() => {
|
||||
toastWithButton(t('File Saved'), t('Open'), () => {
|
||||
OpenFileFolder(path, false);
|
||||
});
|
||||
}).catch((e: any) => {
|
||||
}).catch((e) => {
|
||||
toast(t('Error') + ' - ' + (e.message || e), { type: 'error', autoClose: 2500 });
|
||||
});
|
||||
} else {
|
||||
|
||||
@@ -423,7 +423,7 @@ export const Configs: FC = observer(() => {
|
||||
{
|
||||
(selectedConfig.modelParameters.device.includes('CUDA') || selectedConfig.modelParameters.device === 'Custom') &&
|
||||
<Labeled label={t('Use Custom CUDA kernel to Accelerate')}
|
||||
desc={t('Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues. If it fails to start, please turn off this option.')}
|
||||
desc={t('Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues (output garbled). If it fails to start, please turn off this option, or try to upgrade your gpu driver.')}
|
||||
content={
|
||||
<Switch checked={selectedConfig.modelParameters.useCustomCuda}
|
||||
onChange={(e, data) => {
|
||||
|
||||
@@ -36,6 +36,7 @@ import { ClipboardGetText, ClipboardSetText } from '../../../wailsjs/runtime';
|
||||
import { toast } from 'react-toastify';
|
||||
import { CustomToastContainer } from '../../components/CustomToastContainer';
|
||||
import { v4 as uuid } from 'uuid';
|
||||
import { absPathAsset } from '../../utils';
|
||||
|
||||
export type PresetType = 'chat' | 'completion' | 'chatInCompletion'
|
||||
|
||||
@@ -124,7 +125,7 @@ export const PresetCard: FC<{
|
||||
const { t } = useTranslation();
|
||||
|
||||
return <PresetCardFrame onClick={onClick}>
|
||||
<img src={avatarImg} className="rounded-xl select-none ml-auto mr-auto h-28" />
|
||||
<img src={absPathAsset(avatarImg)} className="rounded-xl select-none ml-auto mr-auto h-28" />
|
||||
<Text size={400}>{name}</Text>
|
||||
<Text size={200} style={{
|
||||
overflow: 'hidden', textOverflow: 'ellipsis',
|
||||
@@ -167,8 +168,14 @@ export const ChatPresetEditor: FC<{
|
||||
const importPreset = () => {
|
||||
ClipboardGetText().then((text) => {
|
||||
try {
|
||||
if (!text.trim().startsWith('{'))
|
||||
text = new TextDecoder().decode(
|
||||
new Uint8Array(atob(text)
|
||||
.split('')
|
||||
.map((c) => c.charCodeAt(0))));
|
||||
const preset = JSON.parse(text);
|
||||
setEditingPreset(preset);
|
||||
setEditingMessages(false);
|
||||
toast(t('Imported successfully'), {
|
||||
type: 'success',
|
||||
autoClose: 1000
|
||||
@@ -242,7 +249,7 @@ export const ChatPresetEditor: FC<{
|
||||
<Button appearance="subtle" icon={<Dismiss20Regular />} />
|
||||
</DialogTrigger>
|
||||
</div>
|
||||
<img src={editingPreset.avatarImg} className="rounded-xl select-none ml-auto mr-auto h-28" />
|
||||
<img src={absPathAsset(editingPreset.avatarImg)} className="rounded-xl select-none ml-auto mr-auto h-28" />
|
||||
<Labeled flex breakline label={t('Name')}
|
||||
content={
|
||||
<div className="flex gap-2">
|
||||
|
||||
@@ -42,7 +42,7 @@ export type SettingsType = {
|
||||
}
|
||||
|
||||
export const Settings: FC = observer(() => {
|
||||
const { t, i18n } = useTranslation();
|
||||
const { t } = useTranslation();
|
||||
const advancedHeaderRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
useEffect(() => {
|
||||
|
||||
@@ -414,7 +414,7 @@ const LoraFinetune: FC = observer(() => {
|
||||
contentText={t('The data path should be a directory or a file in jsonl format (more formats will be supported in the future).\n\n' +
|
||||
'When you provide a directory path, all the txt files within that directory will be automatically converted into training data. ' +
|
||||
'This is commonly used for large-scale training in writing, code generation, or knowledge bases.\n\n' +
|
||||
'The jsonl format file can be referenced at https://github.com/Abel2076/json2binidx_tool/blob/main/sample.jsonl.\n' +
|
||||
'The jsonl format file can be referenced at https://github.com/josStorer/RWKV-Runner/blob/master/finetune/data/sample.jsonl.\n' +
|
||||
'You can also write it similar to OpenAI\'s playground format, as shown in https://platform.openai.com/playground/p/default-chat.\n' +
|
||||
'Even for multi-turn conversations, they must be written in a single line using `\\n` to indicate line breaks. ' +
|
||||
'If they are different dialogues or topics, they should be written in separate lines.')} />
|
||||
|
||||
@@ -31,9 +31,13 @@ export type Status = {
|
||||
device_name: string;
|
||||
}
|
||||
|
||||
export type Platform = 'windows' | 'darwin' | 'linux';
|
||||
export type Attachment = {
|
||||
name: string;
|
||||
size: number;
|
||||
content: string;
|
||||
}
|
||||
|
||||
const labels = ['January', 'February', 'March', 'April', 'May', 'June', 'July'];
|
||||
export type Platform = 'windows' | 'darwin' | 'linux';
|
||||
|
||||
class CommonStore {
|
||||
// global
|
||||
@@ -54,6 +58,9 @@ class CommonStore {
|
||||
conversation: Conversation = {};
|
||||
conversationOrder: string[] = [];
|
||||
activePreset: Preset | null = null;
|
||||
attachmentUploading: boolean = false;
|
||||
attachments: { [uuid: string]: Attachment[] } = {};
|
||||
currentTempAttachment: Attachment | null = null;
|
||||
// completion
|
||||
completionPreset: CompletionPreset | null = null;
|
||||
completionGenerating: boolean = false;
|
||||
@@ -325,6 +332,25 @@ class CommonStore {
|
||||
setLoraModels(value: string[]) {
|
||||
this.loraModels = value;
|
||||
}
|
||||
|
||||
setAttachmentUploading(value: boolean) {
|
||||
this.attachmentUploading = value;
|
||||
}
|
||||
|
||||
setAttachments(value: { [uuid: string]: Attachment[] }) {
|
||||
this.attachments = value;
|
||||
}
|
||||
|
||||
setAttachment(uuid: string, value: Attachment[] | null) {
|
||||
if (value === null)
|
||||
delete this.attachments[uuid];
|
||||
else
|
||||
this.attachments[uuid] = value;
|
||||
}
|
||||
|
||||
setCurrentTempAttachment(value: Attachment | null) {
|
||||
this.currentTempAttachment = value;
|
||||
}
|
||||
}
|
||||
|
||||
export default new CommonStore();
|
||||
@@ -1,6 +1,5 @@
|
||||
import {
|
||||
AddToDownloadList,
|
||||
CopyFile,
|
||||
DeleteFile,
|
||||
DepCheck,
|
||||
InstallPyDep,
|
||||
@@ -184,7 +183,7 @@ export const getStrategy = (modelConfig: ModelConfig | undefined = undefined) =>
|
||||
case 'CUDA':
|
||||
case 'CUDA-Beta':
|
||||
if (avoidOverflow)
|
||||
strategy = 'cuda fp32 *1 -> ';
|
||||
strategy = params.useCustomCuda ? 'cuda fp16 *1 -> ' : 'cuda fp32 *1 -> ';
|
||||
strategy += 'cuda ';
|
||||
strategy += params.precision === 'fp16' ? 'fp16' : params.precision === 'int8' ? 'fp16i8' : 'fp32';
|
||||
if (params.storedLayers < params.maxStoredLayers)
|
||||
@@ -283,6 +282,21 @@ export function bytesToKb(size: number) {
|
||||
return (size / 1024).toFixed(2);
|
||||
}
|
||||
|
||||
export function bytesToReadable(size: number) {
|
||||
if (size < 1024) return size + ' B';
|
||||
else if (size < 1024 * 1024) return bytesToKb(size) + ' KB';
|
||||
else if (size < 1024 * 1024 * 1024) return bytesToMb(size) + ' MB';
|
||||
else return bytesToGb(size) + ' GB';
|
||||
}
|
||||
|
||||
export function absPathAsset(path: string) {
|
||||
if ((path.length > 0 && path[0] === '/') ||
|
||||
(path.length > 1 && path[1] === ':')) {
|
||||
return '=>' + path;
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
export async function checkUpdate(notifyEvenLatest: boolean = false) {
|
||||
fetch(!commonStore.settings.giteeUpdatesSource ?
|
||||
'https://api.github.com/repos/josstorer/RWKV-Runner/releases/latest' :
|
||||
@@ -402,8 +416,6 @@ export const checkDependencies = async (navigate: NavigateFunction) => {
|
||||
return false;
|
||||
}
|
||||
commonStore.setDepComplete(true);
|
||||
if (commonStore.platform === 'windows')
|
||||
CopyFile('./backend-python/wkv_cuda_utils/wkv_cuda_model.py', './py310/Lib/site-packages/rwkv/model.py');
|
||||
}
|
||||
return true;
|
||||
};
|
||||
@@ -428,12 +440,16 @@ export function toastWithButton(text: string, buttonText: string, onClickButton:
|
||||
return id;
|
||||
}
|
||||
|
||||
export function getSupportedCustomCudaFile() {
|
||||
export function getSupportedCustomCudaFile(isBeta: boolean) {
|
||||
if ([' 10', ' 16', ' 20', ' 30', 'MX', 'Tesla P', 'Quadro P', 'NVIDIA P', 'TITAN X', 'TITAN RTX', 'RTX A',
|
||||
'Quadro RTX 4000', 'Quadro RTX 5000', 'Tesla T4', 'NVIDIA A10', 'NVIDIA A40'].some(v => commonStore.status.device_name.includes(v)))
|
||||
return './backend-python/wkv_cuda_utils/wkv_cuda10_30.pyd';
|
||||
return isBeta ?
|
||||
'./backend-python/wkv_cuda_utils/beta/wkv_cuda10_30.pyd' :
|
||||
'./backend-python/wkv_cuda_utils/wkv_cuda10_30.pyd';
|
||||
else if ([' 40', 'RTX 5000 Ada', 'RTX 6000 Ada', 'RTX TITAN Ada', 'NVIDIA L40'].some(v => commonStore.status.device_name.includes(v)))
|
||||
return './backend-python/wkv_cuda_utils/wkv_cuda40.pyd';
|
||||
return isBeta ?
|
||||
'./backend-python/wkv_cuda_utils/beta/wkv_cuda40.pyd' :
|
||||
'./backend-python/wkv_cuda_utils/wkv_cuda40.pyd';
|
||||
else
|
||||
return '';
|
||||
}
|
||||
4
frontend/wailsjs/go/backend_golang/App.d.ts
generated
vendored
4
frontend/wailsjs/go/backend_golang/App.d.ts
generated
vendored
@@ -28,12 +28,16 @@ export function GetPyError():Promise<string>;
|
||||
|
||||
export function InstallPyDep(arg1:string,arg2:boolean):Promise<string>;
|
||||
|
||||
export function IsPortAvailable(arg1:number):Promise<boolean>;
|
||||
|
||||
export function ListDirFiles(arg1:string):Promise<Array<backend_golang.FileInfo>>;
|
||||
|
||||
export function MergeLora(arg1:string,arg2:boolean,arg3:number,arg4:string,arg5:string,arg6:string):Promise<string>;
|
||||
|
||||
export function OpenFileFolder(arg1:string,arg2:boolean):Promise<void>;
|
||||
|
||||
export function OpenOpenFileDialog(arg1:string):Promise<string>;
|
||||
|
||||
export function OpenSaveFileDialog(arg1:string,arg2:string,arg3:string):Promise<string>;
|
||||
|
||||
export function OpenSaveFileDialogBytes(arg1:string,arg2:string,arg3:Array<number>):Promise<string>;
|
||||
|
||||
8
frontend/wailsjs/go/backend_golang/App.js
generated
8
frontend/wailsjs/go/backend_golang/App.js
generated
@@ -54,6 +54,10 @@ export function InstallPyDep(arg1, arg2) {
|
||||
return window['go']['backend_golang']['App']['InstallPyDep'](arg1, arg2);
|
||||
}
|
||||
|
||||
export function IsPortAvailable(arg1) {
|
||||
return window['go']['backend_golang']['App']['IsPortAvailable'](arg1);
|
||||
}
|
||||
|
||||
export function ListDirFiles(arg1) {
|
||||
return window['go']['backend_golang']['App']['ListDirFiles'](arg1);
|
||||
}
|
||||
@@ -66,6 +70,10 @@ export function OpenFileFolder(arg1, arg2) {
|
||||
return window['go']['backend_golang']['App']['OpenFileFolder'](arg1, arg2);
|
||||
}
|
||||
|
||||
export function OpenOpenFileDialog(arg1) {
|
||||
return window['go']['backend_golang']['App']['OpenOpenFileDialog'](arg1);
|
||||
}
|
||||
|
||||
export function OpenSaveFileDialog(arg1, arg2, arg3) {
|
||||
return window['go']['backend_golang']['App']['OpenSaveFileDialog'](arg1, arg2, arg3);
|
||||
}
|
||||
|
||||
14
main.go
14
main.go
@@ -27,6 +27,7 @@ func NewFileLoader() *FileLoader {
|
||||
func (h *FileLoader) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
var err error
|
||||
requestedFilename := strings.TrimPrefix(req.URL.Path, "/")
|
||||
requestedFilename = strings.TrimPrefix(requestedFilename, "=>") // absolute path
|
||||
println("Requesting file:", requestedFilename)
|
||||
fileData, err := os.ReadFile(requestedFilename)
|
||||
if err != nil {
|
||||
@@ -43,7 +44,7 @@ var assets embed.FS
|
||||
//go:embed all:py310/Lib/site-packages/cyac
|
||||
var cyac embed.FS
|
||||
|
||||
//go:embed all:py310/Lib/site-packages/cyac-1.7.dist-info
|
||||
//go:embed all:py310/Lib/site-packages/cyac-1.9.dist-info
|
||||
var cyacInfo embed.FS
|
||||
|
||||
//go:embed backend-python
|
||||
@@ -66,6 +67,7 @@ var components embed.FS
|
||||
|
||||
func main() {
|
||||
if buildInfo, ok := debug.ReadBuildInfo(); !ok || strings.Contains(buildInfo.String(), "-ldflags") {
|
||||
os.RemoveAll("./py310/Lib/site-packages/cyac-1.7.dist-info")
|
||||
backend.CopyEmbed(cyac)
|
||||
backend.CopyEmbed(cyacInfo)
|
||||
backend.CopyEmbed(py)
|
||||
@@ -93,11 +95,11 @@ func main() {
|
||||
|
||||
// Create application with options
|
||||
err = wails.Run(&options.App{
|
||||
Title: "RWKV-Runner",
|
||||
Width: 1024,
|
||||
Height: 680,
|
||||
MinWidth: 375,
|
||||
MinHeight: 640,
|
||||
Title: "RWKV-Runner",
|
||||
Width: 1024,
|
||||
Height: 680,
|
||||
MinWidth: 375,
|
||||
MinHeight: 640,
|
||||
EnableDefaultContextMenu: true,
|
||||
Windows: &windows.Options{
|
||||
ZoomFactor: zoomFactor,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"version": "1.4.5",
|
||||
"version": "1.4.9",
|
||||
"introduction": {
|
||||
"en": "RWKV is an open-source, commercially usable large language model with high flexibility and great potential for development.\n### About This Tool\nThis tool aims to lower the barrier of entry for using large language models, making it accessible to everyone. It provides fully automated dependency and model management. You simply need to click and run, following the instructions, to deploy a local large language model. The tool itself is very compact and only requires a single executable file for one-click deployment.\nAdditionally, this tool offers an interface that is fully compatible with the OpenAI API. This means you can use any ChatGPT client as a client for RWKV, enabling capability expansion beyond just chat functionality.\n### Preset Configuration Rules at the Bottom\nThis tool comes with a series of preset configurations to reduce complexity. The naming rules for each configuration represent the following in order: device - required VRAM/memory - model size - model language.\nFor example, \"GPU-8G-3B-EN\" indicates that this configuration is for a graphics card with 8GB of VRAM, a model size of 3 billion parameters, and it uses an English language model.\nLarger model sizes have higher performance and VRAM requirements. Among configurations with the same model size, those with higher VRAM usage will have faster runtime.\nFor example, if you have 12GB of VRAM but running the \"GPU-12G-7B-EN\" configuration is slow, you can downgrade to \"GPU-8G-3B-EN\" for a significant speed improvement.\n### About RWKV\nRWKV is an RNN with Transformer-level LLM performance, which can also be directly trained like a GPT transformer (parallelizable). And it's 100% attention-free. You only need the hidden state at position t to compute the state at position t+1. You can use the \"GPT\" mode to quickly compute the hidden state for the \"RNN\" mode.<br/>So it's combining the best of RNN and transformer - great performance, fast inference, saves VRAM, fast training, \"infinite\" ctx_len, and free sentence embedding (using the final hidden state).",
|
||||
"zh": "RWKV是一个开源且允许商用的大语言模型,灵活性很高且极具发展潜力。\n### 关于本工具\n本工具旨在降低大语言模型的使用门槛,做到人人可用,本工具提供了全自动化的依赖和模型管理,你只需要直接点击运行,跟随引导,即可完成本地大语言模型的部署,工具本身体积极小,只需要一个exe即可完成一键部署。\n此外,本工具提供了与OpenAI API完全兼容的接口,这意味着你可以把任意ChatGPT客户端用作RWKV的客户端,实现能力拓展,而不局限于聊天。\n### 底部的预设配置规则\n本工具内置了一系列预设配置,以降低使用难度,每个配置名的规则,依次代表着:设备-所需显存/内存-模型规模-模型语言。\n例如,GPU-8G-3B-CN,表示该配置用于显卡,需要8G显存,模型规模为30亿参数,使用的是中文模型。\n模型规模越大,性能要求越高,显存要求也越高,而同样模型规模的配置中,显存占用越高的,运行速度越快。\n例如当你有12G显存,但运行GPU-12G-7B-CN配置速度比较慢,可降级成GPU-8G-3B-CN,将会大幅提速。\n### 关于RWKV\nRWKV是具有Transformer级别LLM性能的RNN,也可以像GPT Transformer一样直接进行训练(可并行化)。而且它是100% attention-free的。你只需在位置t处获得隐藏状态即可计算位置t + 1处的状态。你可以使用“GPT”模式快速计算用于“RNN”模式的隐藏状态。\n因此,它将RNN和Transformer的优点结合起来 - 高性能、快速推理、节省显存、快速训练、“无限”上下文长度以及免费的语句嵌入(使用最终隐藏状态)。"
|
||||
@@ -15,6 +15,19 @@
|
||||
}
|
||||
],
|
||||
"models": [
|
||||
{
|
||||
"name": "RWKV-5-World-1B5-v2-20231025-ctx4096.pth",
|
||||
"desc": {
|
||||
"en": "RWKV-5 Global Languages 1.5B v2",
|
||||
"zh": "RWKV-5 全球语言 1.5B v2",
|
||||
"ja": "RWKV-5 グローバル言語 1.5B v2"
|
||||
},
|
||||
"size": 3155590194,
|
||||
"SHA256": "5a89f56be7f82ab9dd0835af9a6838f788477471616c02f7b041e3aea0c57435",
|
||||
"lastUpdated": "2023-10-26T05:49:30",
|
||||
"url": "https://huggingface.co/BlinkDL/rwkv-5-world/blob/main/RWKV-5-World-1B5-v2-20231025-ctx4096.pth",
|
||||
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-5-world/resolve/main/RWKV-5-World-1B5-v2-20231025-ctx4096.pth"
|
||||
},
|
||||
{
|
||||
"name": "RWKV-4-World-CHNtuned-0.1B-v1-20230617-ctx4096.pth",
|
||||
"desc": {
|
||||
@@ -507,8 +520,8 @@
|
||||
{
|
||||
"name": "RWKV-4-Raven-1B5-v11-Eng99%-Other1%-20230425-ctx4096.pth",
|
||||
"desc": {
|
||||
"en": "English 1.5B v11",
|
||||
"zh": "英文 1.5B v11"
|
||||
"en": "English 1.5B v11 (Old Model)",
|
||||
"zh": "英文 1.5B v11 (旧模型)"
|
||||
},
|
||||
"size": 3030279730,
|
||||
"SHA256": "4ac715aecc5b1c90e8e37eebb8163392699066ec23b18144416e91cb4e78675a",
|
||||
@@ -520,8 +533,8 @@
|
||||
{
|
||||
"name": "RWKV-4-Raven-1B5-v12-Eng98%-Other2%-20230520-ctx4096.pth",
|
||||
"desc": {
|
||||
"en": "English 1B5 v12",
|
||||
"zh": "英文 1B5 v12"
|
||||
"en": "English 1B5 v12 (Old Model)",
|
||||
"zh": "英文 1B5 v12 (旧模型)"
|
||||
},
|
||||
"size": 3030279730,
|
||||
"SHA256": "6bbbffb3ee2372dfa9ef49c599e9a2bc0a01b94b6a264ba9bf5bd524fc38f723",
|
||||
@@ -532,8 +545,8 @@
|
||||
{
|
||||
"name": "RWKV-4-Raven-3B-v11-Eng99%-Other1%-20230425-ctx4096.pth",
|
||||
"desc": {
|
||||
"en": "English 3B v11",
|
||||
"zh": "英文 3B v11"
|
||||
"en": "English 3B v11 (Old Model)",
|
||||
"zh": "英文 3B v11 (旧模型)"
|
||||
},
|
||||
"size": 5969345074,
|
||||
"SHA256": "982ad3d794efe58992db23c6d694c57a9e62d54718264ec6d6acfae5eb0eea12",
|
||||
@@ -545,8 +558,8 @@
|
||||
{
|
||||
"name": "RWKV-4-Raven-3B-v12-Eng98%-Other2%-20230520-ctx4096.pth",
|
||||
"desc": {
|
||||
"en": "English 3B v12",
|
||||
"zh": "英文 3B v12"
|
||||
"en": "English 3B v12 (Old Model)",
|
||||
"zh": "英文 3B v12 (旧模型)"
|
||||
},
|
||||
"size": 5969345074,
|
||||
"SHA256": "1eea1845acfe9729dfdaec66a8d1aeb91a1287d94bebbca5529c13c050540b33",
|
||||
@@ -557,8 +570,8 @@
|
||||
{
|
||||
"name": "RWKV-4-Raven-3B-v11-Eng49%-Chn49%-Jpn1%-Other1%-20230429-ctx4096.pth",
|
||||
"desc": {
|
||||
"en": "Chinese 3B v11",
|
||||
"zh": "中文 3B v11"
|
||||
"en": "Chinese 3B v11 (Old Model)",
|
||||
"zh": "中文 3B v11 (旧模型)"
|
||||
},
|
||||
"size": 5969345074,
|
||||
"SHA256": "af12300d9875e0e166c23d6e9b20928db435073060bf1d36f874060de92ada98",
|
||||
@@ -570,8 +583,8 @@
|
||||
{
|
||||
"name": "RWKV-4-Raven-3B-v12-Eng49%-Chn49%-Jpn1%-Other1%-20230527-ctx4096.pth",
|
||||
"desc": {
|
||||
"en": "Chinese 3B v12",
|
||||
"zh": "中文 3B v12"
|
||||
"en": "Chinese 3B v12 (Old Model)",
|
||||
"zh": "中文 3B v12 (旧模型)"
|
||||
},
|
||||
"size": 5969345330,
|
||||
"SHA256": "c0abb4b745ba3523b9d8b3e1293110867ee55b1ef3dc8c122212f78396755721",
|
||||
@@ -582,8 +595,8 @@
|
||||
{
|
||||
"name": "RWKV-4-Raven-7B-v11x-Eng99%-Other1%-20230429-ctx8192.pth",
|
||||
"desc": {
|
||||
"en": "English 7B v11x",
|
||||
"zh": "英文 7B v11x"
|
||||
"en": "English 7B v11x (Old Model)",
|
||||
"zh": "英文 7B v11x (旧模型)"
|
||||
},
|
||||
"size": 14785389874,
|
||||
"SHA256": "f00d5c75b453f2b20ad875fb5a324564c34024eea25a015f5eb441e4f364c3fe",
|
||||
@@ -595,8 +608,8 @@
|
||||
{
|
||||
"name": "RWKV-4-Raven-7B-v12-Eng98%-Other2%-20230521-ctx8192.pth",
|
||||
"desc": {
|
||||
"en": "English 7B v12",
|
||||
"zh": "英文 7B v12"
|
||||
"en": "English 7B v12 (Old Model)",
|
||||
"zh": "英文 7B v12 (旧模型)"
|
||||
},
|
||||
"size": 14785389618,
|
||||
"SHA256": "5a725eaeb9e09b724de6c97e6845dd0283097c7920acd05b46852ab7afa9ec32",
|
||||
@@ -607,8 +620,8 @@
|
||||
{
|
||||
"name": "RWKV-4-Raven-7B-v10x-Eng49%-Chn50%-Other1%-20230423-ctx4096.pth",
|
||||
"desc": {
|
||||
"en": "Chinese 7B v10x",
|
||||
"zh": "中文 7B v10x"
|
||||
"en": "Chinese 7B v10x (Old Model)",
|
||||
"zh": "中文 7B v10x (旧模型)"
|
||||
},
|
||||
"size": 14785389874,
|
||||
"SHA256": "7aaf40bb3d440a949db3a146b0a5bbb3e925942b496775b51f5630a582fc236d",
|
||||
@@ -620,8 +633,8 @@
|
||||
{
|
||||
"name": "RWKV-4-Raven-7B-v11-Eng49%-Chn49%-Jpn1%-Other1%-20230430-ctx8192.pth",
|
||||
"desc": {
|
||||
"en": "Chinese 7B v11",
|
||||
"zh": "中文 7B v11"
|
||||
"en": "Chinese 7B v11 (Old Model)",
|
||||
"zh": "中文 7B v11 (旧模型)"
|
||||
},
|
||||
"size": 14785389874,
|
||||
"SHA256": "9e67a74964abcb4463711e447ddf47735561d7b40592d2d02b29d2e796a4fd14",
|
||||
@@ -633,8 +646,8 @@
|
||||
{
|
||||
"name": "RWKV-4-Raven-7B-v12-Eng49%-Chn49%-Jpn1%-Other1%-20230530-ctx8192.pth",
|
||||
"desc": {
|
||||
"en": "Chinese 7B v12",
|
||||
"zh": "中文 7B v12"
|
||||
"en": "Chinese 7B v12 (Old Model)",
|
||||
"zh": "中文 7B v12 (旧模型)"
|
||||
},
|
||||
"size": 14785389874,
|
||||
"SHA256": "6d4a089ff36d5d9d96b669d425fc5e4e3959cab426535b52e2364df08f58b407",
|
||||
@@ -645,8 +658,8 @@
|
||||
{
|
||||
"name": "RWKV-4-Raven-14B-v11x-Eng99%-Other1%-20230501-ctx8192.pth",
|
||||
"desc": {
|
||||
"en": "English 14B v11x",
|
||||
"zh": "英文 14B v11x"
|
||||
"en": "English 14B v11x (Old Model)",
|
||||
"zh": "英文 14B v11x (旧模型)"
|
||||
},
|
||||
"size": 28297309490,
|
||||
"SHA256": "c4bc72406c3c62613e8e2592e8d07ac045f8a88381c728f8eb60af890e299f4d",
|
||||
@@ -658,8 +671,8 @@
|
||||
{
|
||||
"name": "RWKV-4-Raven-14B-v12-Eng98%-Other2%-20230523-ctx8192.pth",
|
||||
"desc": {
|
||||
"en": "English 14B v12",
|
||||
"zh": "英文 14B v12"
|
||||
"en": "English 14B v12 (Old Model)",
|
||||
"zh": "英文 14B v12 (旧模型)"
|
||||
},
|
||||
"size": 28297309490,
|
||||
"SHA256": "1193b5a9ceab572e4dbb9ed1d798eab7bf4793d18904d08bd4bf183579338ae7",
|
||||
@@ -692,6 +705,32 @@
|
||||
"lastUpdated": "2023-07-17T15:02:08",
|
||||
"url": "https://huggingface.co/BlinkDL/rwkv-4-music/blob/main/RWKV-4-MIDI-560M-v1-20230717-ctx4096.pth",
|
||||
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-music/resolve/main/RWKV-4-MIDI-560M-v1-20230717-ctx4096.pth"
|
||||
},
|
||||
{
|
||||
"name": "RWKV-5-MIDI-120M-v1-20230728-ctx4096.pth",
|
||||
"desc": {
|
||||
"en": "RWKV-5 Music 120M v1",
|
||||
"zh": "RWKV-5 作曲 120M v1",
|
||||
"ja": "RWKV-5 作曲 120M v1"
|
||||
},
|
||||
"size": 245070513,
|
||||
"SHA256": "c43d4a2ee7a71a331d05d6cd818dd75f7c48c716e4b98c58e4d27231614b0144",
|
||||
"lastUpdated": "2023-07-29T02:17:27",
|
||||
"url": "https://huggingface.co/BlinkDL/rwkv-5-music/blob/main/RWKV-5-MIDI-120M-v1-20230728-ctx4096.pth",
|
||||
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-5-music/resolve/main/RWKV-5-MIDI-120M-v1-20230728-ctx4096.pth"
|
||||
},
|
||||
{
|
||||
"name": "RWKV-5-MIDI-560M-v1-20230902-ctx4096.pth",
|
||||
"desc": {
|
||||
"en": "RWKV-5 Music 560M v1",
|
||||
"zh": "RWKV-5 作曲 560M v1",
|
||||
"ja": "RWKV-5 作曲 560M v1"
|
||||
},
|
||||
"size": 1179631346,
|
||||
"SHA256": "cb4f2fd8956ca8496d6b2e33bff290c2047759b6fe74884903dbf9c73a11cc77",
|
||||
"lastUpdated": "2023-09-03T04:48:41",
|
||||
"url": "https://huggingface.co/BlinkDL/rwkv-5-music/blob/main/RWKV-5-MIDI-560M-v1-20230902-ctx4096.pth",
|
||||
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-5-music/resolve/main/RWKV-5-MIDI-560M-v1-20230902-ctx4096.pth"
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user