Compare commits

...

48 Commits

Author SHA1 Message Date
josc146
4f26404002 release v1.4.8 2023-10-03 15:05:13 +08:00
josc146
df7652856a completion page: add format content button 2023-10-03 14:54:36 +08:00
josc146
de755463e3 improve overflow 2023-10-03 14:27:44 +08:00
josc146
2fe98d9a2c add rwkv5 cuda kernel error prompt 2023-10-03 14:25:31 +08:00
josc146
2e42039607 chore 2023-10-03 14:04:46 +08:00
josc146
71abd357a4 update startup 2023-10-03 13:50:58 +08:00
josc146
68228a4552 rwkv5 pre-compiled kernel (for windows) 2023-10-03 13:39:07 +08:00
josc146
79851433f8 upgrade rwkv pip (0.8.13) 2023-10-03 13:33:55 +08:00
github-actions[bot]
bd4de12e05 release v1.4.7 2023-09-18 15:04:47 +00:00
josc146
c0aa6aaba9 release v1.4.7 2023-09-18 23:03:54 +08:00
josc146
d7abe5f0d1 add pre-compiled beta cuda kernel (rwkv-beta==0.8.5, 40%+ faster for fp16) (thanks to #180, pre-compiled kernel of RTX 40 Series will be included later) 2023-09-18 23:02:49 +08:00
josc146
5e5e1e9651 custom tokenizer .txt support 2023-09-18 17:20:55 +08:00
github-actions[bot]
f8388a0527 release v1.4.6 2023-09-16 05:06:08 +00:00
josc146
f8b764ef8f release v1.4.6 2023-09-16 13:05:34 +08:00
josc146
fcfaa5944e frontend feature adaptation for api params (user_name, assistant_name, presystem) 2023-09-16 13:02:06 +08:00
josc146
f89e89c1c9 chore 2023-09-16 12:23:16 +08:00
josc146
a25965530c custom tokenizer (#77) 2023-09-16 00:34:11 +08:00
josc146
971124d0d7 upgrade to wails@v2.6.0 (EnableDefaultContextMenu: true) 2023-09-16 00:29:45 +08:00
josc146
d7dcc90008 chore 2023-09-15 16:31:14 +08:00
josc146
df969fcfc6 upgrade cuda-beta 2023-09-15 16:30:11 +08:00
josc146
c4042bbfd8 improve ui desc 2023-09-15 16:26:32 +08:00
josc146
4112200b4c revert(2d5456): refresh local models when download complete (for macOS) 2023-09-15 16:25:04 +08:00
Ikko Eltociear Ashimine
3f9a54e36f Update README_JA.md
add translation.
2023-09-13 16:11:43 +08:00
github-actions[bot]
3ed4456135 release v1.4.5 2023-08-27 15:57:18 +00:00
josc146
e0df9ae47b release v1.4.5 2023-08-27 23:56:37 +08:00
josc146
87b2c3ed7d fix build 2023-08-27 23:56:30 +08:00
josc146
50ff7ef6bc always use requirements.txt 2023-08-27 23:52:52 +08:00
josc146
c7a580ca8a update manifest 2023-08-27 23:16:56 +08:00
josc146
eaae7624a7 add HardwareMonitor (Windows Only) 2023-08-27 22:53:18 +08:00
josc146
fcd59de6fb correct Preset UI description 2023-08-27 21:37:32 +08:00
josc146
1bbe127209 fix webgpu_server file permissions of linux and macos 2023-08-27 21:22:26 +08:00
josc146
b868adc058 chore 2023-08-27 21:21:34 +08:00
josc146
a24b78e8c3 python-backend: extra ChatCompletionBody params (raw, presystem);
add default_stop when stop is null
2023-08-27 21:21:11 +08:00
josc146
c8025f1cff allow message content to be empty 2023-08-27 21:02:54 +08:00
josc146
fe0860dbf0 fix lora finetune max_epochs (#170) 2023-08-24 22:49:57 +08:00
josc146
02d5d641d1 chore 2023-08-24 22:48:54 +08:00
github-actions[bot]
a057bb6c5b release v1.4.4 2023-08-16 15:33:53 +00:00
josc146
c9e4ae7fa1 release v1.4.4 2023-08-16 23:33:22 +08:00
josc146
79a97b2bc4 webgpu release support 2023-08-16 23:31:04 +08:00
josc146
ef53951a16 webgpu support 2023-08-16 23:07:58 +08:00
josc146
74f1a1c033 chore 2023-08-16 21:11:58 +08:00
josc146
ce986cfc6d chore 2023-08-16 12:50:22 +08:00
josc146
61cea2a784 add misc API (/models and /dashboard/billing/credit_grants) 2023-08-14 23:37:55 +08:00
josc146
8a13bd3c1e add rwkv-cuda-beta support (faster) 2023-08-14 22:07:15 +08:00
josc146
da68926e9c chore (AddStateBody class) 2023-08-13 21:27:29 +08:00
josc146
e0b7453883 allow multiple systems 2023-08-04 22:27:55 +08:00
josc146
91e2828a95 allow completions input to be null 2023-08-04 22:22:59 +08:00
github-actions[bot]
bcf6409536 release v1.4.3 2023-07-31 14:51:01 +00:00
73 changed files with 73834 additions and 977 deletions

1
.gitattributes vendored
View File

@@ -2,6 +2,7 @@ backend-python/rwkv_pip/** linguist-vendored
backend-python/wkv_cuda_utils/** linguist-vendored
backend-python/get-pip.py linguist-vendored
backend-python/convert_model.py linguist-vendored
backend-python/convert_safetensors.py linguist-vendored
backend-python/utils/midi.py linguist-vendored
build/** linguist-vendored
finetune/lora/** linguist-vendored

View File

@@ -48,10 +48,17 @@ jobs:
id: cp310
with:
python-version: '3.10'
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
target: wasm32-unknown-unknown
- uses: crazy-max/ghaction-chocolatey@v2
with:
args: install upx
- run: |
Start-BitsTransfer https://github.com/josStorer/LibreHardwareMonitor.Console/releases/download/v0.1.0/LibreHardwareMonitor.Console.zip ./LibreHardwareMonitor.Console.zip
Expand-Archive ./LibreHardwareMonitor.Console.zip -DestinationPath ./components/LibreHardwareMonitor.Console
Start-BitsTransfer https://www.python.org/ftp/python/3.10.11/python-3.10.11-embed-amd64.zip ./python-3.10.11-embed-amd64.zip
Expand-Archive ./python-3.10.11-embed-amd64.zip -DestinationPath ./py310
$content=Get-Content "./py310/python310._pth"; $content | ForEach-Object {if ($_.ReadCount -eq 3) {"Lib\\site-packages"} else {$_}} | Set-Content ./py310/python310._pth
@@ -60,7 +67,13 @@ jobs:
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../include" -Destination "py310/include" -Recurse
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../libs" -Destination "py310/libs" -Recurse
./py310/python -m pip install cyac==1.7
git clone https://github.com/josStorer/ai00_rwkv_server --depth=1
cd ai00_rwkv_server
cargo build --release
mv ./target/release/ai00_server.exe ../backend-rust/webgpu_server.exe
cd ..
go install github.com/wailsapp/wails/v2/cmd/wails@latest
(Get-Content -Path ./backend-golang/app.go) -replace "//go:custom_build windows ", "" | Set-Content -Path ./backend-golang/app.go
make
Rename-Item -Path "build/bin/RWKV-Runner.exe" -NewName "RWKV-Runner_windows_x64.exe"
@@ -76,12 +89,27 @@ jobs:
- uses: actions/setup-go@v4
with:
go-version: '1.20.5'
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
target: wasm32-unknown-unknown
- run: |
sudo apt-get update
sudo apt-get install upx
sudo apt-get install build-essential libgtk-3-dev libwebkit2gtk-4.0-dev
git clone https://github.com/josStorer/ai00_rwkv_server --depth=1
cd ai00_rwkv_server
sudo apt-get install libudev-dev
sudo apt-get install libasound2-dev
rustup target add x86_64-unknown-linux-gnu
cargo build --release --target x86_64-unknown-linux-gnu
mv ./target/x86_64-unknown-linux-gnu/release/ai00_server ../backend-rust/webgpu_server
cd ..
go install github.com/wailsapp/wails/v2/cmd/wails@latest
rm -rf ./backend-python/wkv_cuda_utils
rm ./backend-python/rwkv_pip/wkv_cuda.pyd
rm ./backend-python/rwkv_pip/rwkv5.pyd
rm ./backend-python/rwkv_pip/beta/wkv_cuda.pyd
rm ./backend-python/get-pip.py
sed -i '1,2d' ./backend-golang/wsl_not_windows.go
rm ./backend-golang/wsl.go
@@ -101,9 +129,21 @@ jobs:
- uses: actions/setup-go@v4
with:
go-version: '1.20.5'
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
target: wasm32-unknown-unknown
- run: |
git clone https://github.com/josStorer/ai00_rwkv_server --depth=1
cd ai00_rwkv_server
cargo build --release
mv ./target/release/ai00_server ../backend-rust/webgpu_server
cd ..
go install github.com/wailsapp/wails/v2/cmd/wails@latest
rm -rf ./backend-python/wkv_cuda_utils
rm ./backend-python/rwkv_pip/wkv_cuda.pyd
rm ./backend-python/rwkv_pip/rwkv5.pyd
rm ./backend-python/rwkv_pip/beta/wkv_cuda.pyd
rm ./backend-python/get-pip.py
sed -i '' '1,2d' ./backend-golang/wsl_not_windows.go
rm ./backend-golang/wsl.go

3
.gitignore vendored
View File

@@ -5,6 +5,8 @@ __pycache__
.idea
.vs
*.pth
*.st
*.safetensors
*.bin
/config.json
/cache.json
@@ -24,3 +26,4 @@ __pycache__
train_log.txt
finetune/json2binidx_tool/data
/wsl.state
/components

View File

@@ -1,9 +1,8 @@
## Changes
- japanese UI
- global penalty
- allow custom user_name and assistant_name (`/chat/completions` API)
- update defaultConfigs
- latest rwkv-5.2 is now supported (with pre-compiled kernel for windows)
- completion page: add format content button
- chore
## Install

View File

@@ -91,8 +91,8 @@ body.json:
## 埋め込み API の例
Note: v1.4.0 has improved the quality of embeddings API. The generated results are not compatible
with previous versions. If you are using embeddings API to generate knowledge bases or similar, please regenerate.
注意: v1.4.0 では、埋め込み API の品質が向上しました。生成される結果は、以前のバージョンとは互換性がありません。
もし、embeddings API を使って知識ベースなどを生成している場合は、再生成してください。
LangChain を使用している場合は、`OpenAIEmbeddings(openai_api_base="http://127.0.0.1:8000", openai_api_key="sk-")`
を使用してください

View File

@@ -1,6 +1,7 @@
package backend_golang
import (
"bufio"
"context"
"errors"
"net/http"
@@ -8,6 +9,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
"syscall"
"github.com/fsnotify/fsnotify"
"github.com/minio/selfupdate"
@@ -41,6 +43,7 @@ func (a *App) OnStartup(ctx context.Context) {
a.cmdPrefix = "cd " + a.exDir + " && "
}
os.Chmod("./backend-rust/webgpu_server", 0777)
os.Mkdir(a.exDir+"models", os.ModePerm)
os.Mkdir(a.exDir+"lora-models", os.ModePerm)
os.Mkdir(a.exDir+"finetune/json2binidx_tool/data", os.ModePerm)
@@ -50,7 +53,18 @@ func (a *App) OnStartup(ctx context.Context) {
}
a.downloadLoop()
a.watchFs()
a.monitorHardware()
}
func (a *App) OnBeforeClose(ctx context.Context) bool {
if monitor != nil {
monitor.Process.Kill()
}
return false
}
func (a *App) watchFs() {
watcher, err := fsnotify.NewWatcher()
if err == nil {
watcher.Add("./lora-models")
@@ -62,7 +76,7 @@ func (a *App) OnStartup(ctx context.Context) {
if !ok {
return
}
wruntime.EventsEmit(ctx, "fsnotify", event.Name)
wruntime.EventsEmit(a.ctx, "fsnotify", event.Name)
case _, ok := <-watcher.Errors:
if !ok {
return
@@ -73,6 +87,37 @@ func (a *App) OnStartup(ctx context.Context) {
}
}
var monitor *exec.Cmd
func (a *App) monitorHardware() {
if runtime.GOOS != "windows" {
return
}
monitor = exec.Command("./components/LibreHardwareMonitor.Console/LibreHardwareMonitor.Console.exe")
stdout, err := monitor.StdoutPipe()
if err != nil {
monitor = nil
return
}
go func() {
reader := bufio.NewReader(stdout)
for {
line, _, err := reader.ReadLine()
if err != nil {
wruntime.EventsEmit(a.ctx, "monitorerr", err.Error())
break
}
wruntime.EventsEmit(a.ctx, "monitor", string(line))
}
}()
monitor.SysProcAttr = &syscall.SysProcAttr{}
//go:custom_build windows monitor.SysProcAttr.HideWindow = true
monitor.Start()
}
func (a *App) UpdateApp(url string) (broken bool, err error) {
resp, err := http.Get(url)
if err != nil {

View File

@@ -10,7 +10,7 @@ import (
"strings"
)
func (a *App) StartServer(python string, port int, host string) (string, error) {
func (a *App) StartServer(python string, port int, host string, rwkvBeta bool) (string, error) {
var err error
if python == "" {
python, err = GetPython()
@@ -18,7 +18,19 @@ func (a *App) StartServer(python string, port int, host string) (string, error)
if err != nil {
return "", err
}
return Cmd(python, "./backend-python/main.py", strconv.Itoa(port), host)
args := []string{python, "./backend-python/main.py"}
if rwkvBeta {
args = append(args, "--rwkv-beta")
}
args = append(args, "--port", strconv.Itoa(port), "--host", host)
return Cmd(args...)
}
func (a *App) StartWebGPUServer(port int, host string) (string, error) {
args := []string{"./backend-rust/webgpu_server"}
args = append(args, "-a", "0", "-t", "backend-rust/assets/rwkv_vocab_v20230424.json",
"--port", strconv.Itoa(port), "--ip", host)
return Cmd(args...)
}
func (a *App) ConvertModel(python string, modelPath string, strategy string, outPath string) (string, error) {
@@ -32,6 +44,17 @@ func (a *App) ConvertModel(python string, modelPath string, strategy string, out
return Cmd(python, "./backend-python/convert_model.py", "--in", modelPath, "--out", outPath, "--strategy", strategy)
}
func (a *App) ConvertSafetensors(python string, modelPath string, outPath string) (string, error) {
var err error
if python == "" {
python, err = GetPython()
}
if err != nil {
return "", err
}
return Cmd(python, "./backend-python/convert_safetensors.py", "--input", modelPath, "--output", outPath)
}
func (a *App) ConvertData(python string, input string, outputPrefix string, vocab string) (string, error) {
var err error
if python == "" {
@@ -132,7 +155,6 @@ func (a *App) InstallPyDep(python string, cnMirror bool) (string, error) {
"exit"
if !cnMirror {
installScript = strings.Replace(installScript, " -i https://pypi.tuna.tsinghua.edu.cn/simple", "", -1)
installScript = strings.Replace(installScript, "requirements.txt", "requirements_versions.txt", -1)
}
err = os.WriteFile("./install-py-dep.bat", []byte(installScript), 0644)
if err != nil {

53
backend-python/convert_safetensors.py vendored Normal file
View File

@@ -0,0 +1,53 @@
import json
import os
import sys
import copy
import torch
from safetensors.torch import load_file, save_file
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, help="Path to input pth model")
parser.add_argument(
"--output",
type=str,
default="./converted.st",
help="Path to output safetensors model",
)
args = parser.parse_args()
def convert_file(
pt_filename: str,
sf_filename: str,
):
loaded = torch.load(pt_filename, map_location="cpu")
if "state_dict" in loaded:
loaded = loaded["state_dict"]
loaded = {k: v.clone().half() for k, v in loaded.items()}
for k, v in loaded.items():
print(f"{k}\t{v.shape}\t{v.dtype}")
# For tensors to be contiguous
loaded = {k: v.contiguous() for k, v in loaded.items()}
dirname = os.path.dirname(sf_filename)
os.makedirs(dirname, exist_ok=True)
save_file(loaded, sf_filename, metadata={"format": "pt"})
reloaded = load_file(sf_filename)
for k in loaded:
pt_tensor = loaded[k]
sf_tensor = reloaded[k]
if not torch.equal(pt_tensor, sf_tensor):
raise RuntimeError(f"The output tensors do not match for key {k}")
if __name__ == "__main__":
try:
convert_file(args.input, args.output)
print(f"Saved to {args.output}")
except Exception as e:
with open("error.txt", "w") as f:
f.write(str(e))

View File

@@ -1,3 +1,4 @@
import safetensors
import midi2audio
import mido
import lm_dataformat

View File

@@ -1,5 +1,6 @@
from enum import Enum, auto
Args = "args"
Model = "model"
Model_Status = "model_status"
Model_Config = "model_config"

View File

@@ -1,5 +1,11 @@
import time
start_time = time.time()
import os
import sys
import argparse
from typing import Sequence
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
@@ -12,7 +18,7 @@ from utils.rwkv import *
from utils.torch import *
from utils.ngrok import *
from utils.log import log_middleware
from routes import completion, config, state_cache, midi
from routes import completion, config, state_cache, midi, misc
import global_var
app = FastAPI(dependencies=[Depends(log_middleware)])
@@ -28,12 +34,18 @@ app.add_middleware(
app.include_router(completion.router)
app.include_router(config.router)
app.include_router(midi.router)
app.include_router(misc.router)
app.include_router(state_cache.router)
@app.on_event("startup")
def init():
global_var.init()
cmd_params = os.environ["RWKV_RUNNER_PARAMS"]
global_var.set(
global_var.Args, get_args(cmd_params.split(" ") if cmd_params else None)
)
state_cache.init()
set_torch()
@@ -56,9 +68,34 @@ def exit():
parent.kill()
if __name__ == "__main__":
uvicorn.run(
"main:app",
port=8000 if len(sys.argv) < 2 else int(sys.argv[1]),
host="127.0.0.1" if len(sys.argv) < 3 else sys.argv[2],
def get_args(args: Union[Sequence[str], None] = None):
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title="server arguments")
group.add_argument(
"--port",
type=int,
default=8000,
help="port to run the server on (default: 8000)",
)
group.add_argument(
"--host",
type=str,
default="127.0.0.1",
help="host to run the server on (default: 127.0.0.1)",
)
group = parser.add_argument_group(title="mode arguments")
group.add_argument(
"--rwkv-beta",
action="store_true",
help="whether to use rwkv-beta (default: False)",
)
args = parser.parse_args(args)
return args
if __name__ == "__main__":
args = get_args()
os.environ["RWKV_RUNNER_PARAMS"] = " ".join(sys.argv[1:])
print("--- %s seconds ---" % (time.time() - start_time))
uvicorn.run("main:app", port=args.port, host=args.host, workers=1)

Binary file not shown.

View File

@@ -2,11 +2,12 @@ import asyncio
import json
from threading import Lock
from typing import List, Union
from enum import Enum
import base64
from fastapi import APIRouter, Request, status, HTTPException
from sse_starlette.sse import EventSourceResponse
from pydantic import BaseModel
from pydantic import BaseModel, Field
import numpy as np
import tiktoken
from utils.rwkv import *
@@ -16,34 +17,52 @@ import global_var
router = APIRouter()
class Role(Enum):
User = "user"
Assistant = "assistant"
System = "system"
class Message(BaseModel):
role: str
content: str
role: Role
content: str = Field(min_length=0)
raw: bool = Field(False, description="Whether to treat content as raw text")
default_stop = [
"\n\nUser",
"\n\nQuestion",
"\n\nQ",
"\n\nHuman",
"\n\nBob",
]
class ChatCompletionBody(ModelConfigBody):
messages: List[Message]
model: str = "rwkv"
messages: Union[List[Message], None]
model: Union[str, None] = "rwkv"
stream: bool = False
stop: Union[str, List[str]] = [
"\n\nUser",
"\n\nQuestion",
"\n\nQ",
"\n\nHuman",
"\n\nBob",
]
user_name: str = None
assistant_name: str = None
stop: Union[str, List[str], None] = default_stop
user_name: Union[str, None] = Field(None, description="Internal user name")
assistant_name: Union[str, None] = Field(
None, description="Internal assistant name"
)
presystem: bool = Field(
True, description="Whether to insert default system prompt at the beginning"
)
class Config:
schema_extra = {
"example": {
"messages": [{"role": "user", "content": "hello"}],
"messages": [
{"role": Role.User.value, "content": "hello", "raw": False}
],
"model": "rwkv",
"stream": False,
"stop": None,
"user_name": None,
"assistant_name": None,
"presystem": True,
"max_tokens": 1000,
"temperature": 1.2,
"top_p": 0.5,
@@ -54,10 +73,10 @@ class ChatCompletionBody(ModelConfigBody):
class CompletionBody(ModelConfigBody):
prompt: Union[str, List[str]]
model: str = "rwkv"
prompt: Union[str, List[str], None]
model: Union[str, None] = "rwkv"
stream: bool = False
stop: Union[str, List[str]] = None
stop: Union[str, List[str], None] = None
class Config:
schema_extra = {
@@ -87,7 +106,7 @@ async def eval_rwkv(
body: ModelConfigBody,
prompt: str,
stream: bool,
stop: Union[str, List[str]],
stop: Union[str, List[str], None],
chat_mode: bool,
):
global requests_num
@@ -200,7 +219,7 @@ async def eval_rwkv(
"choices": [
{
"message": {
"role": "assistant",
"role": Role.Assistant.value,
"content": response,
},
"index": 0,
@@ -223,17 +242,8 @@ async def chat_completions(body: ChatCompletionBody, request: Request):
if model is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "model not loaded")
question = body.messages[-1]
if question.role == "user":
question = question.content
elif question.role == "system":
question = body.messages[-2]
if question.role == "user":
question = question.content
else:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "no question found")
else:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "no question found")
if body.messages is None or body.messages == []:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "messages not found")
interface = model.interface
user = model.user if body.user_name is None else body.user_name
@@ -241,30 +251,43 @@ async def chat_completions(body: ChatCompletionBody, request: Request):
is_raven = model.rwkv_type == RWKVType.Raven
completion_text = (
f"""
completion_text: str = ""
basic_system: Union[str, None] = None
if body.presystem:
if body.messages[0].role == Role.System:
basic_system = body.messages[0].content
if basic_system is None:
completion_text = (
f"""
The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. \
{bot} is very intelligent, creative and friendly. \
{bot} is unlikely to disagree with {user}, and {bot} doesn't like to ask {user} questions. \
{bot} likes to tell {user} a lot about herself and her opinions. \
{bot} usually gives {user} kind, helpful and informative advices.\n
"""
if is_raven
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
for message in body.messages:
if message.role == "system":
completion_text = (
f"The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. "
if is_raven
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ message.content.replace("\\n", "\n")
.replace("\r\n", "\n")
.replace("\n\n", "\n")
.replace("\n", " ")
.strip()
.replace("You are", f"{bot} is" if is_raven else "I am")
else (
f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
)
else:
if not body.messages[0].raw:
basic_system = (
basic_system.replace("\r\n", "\n")
.replace("\r", "\n")
.replace("\n\n", "\n")
.replace("\n", " ")
.strip()
)
completion_text = (
(
f"The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. "
if is_raven
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
)
+ basic_system.replace("You are", f"{bot} is" if is_raven else "I am")
.replace("you are", f"{bot} is" if is_raven else "I am")
.replace("You're", f"{bot} is" if is_raven else "I'm")
.replace("you're", f"{bot} is" if is_raven else "I'm")
@@ -275,33 +298,32 @@ The following is a coherent verbose detailed conversation between a girl named {
.replace("", f"{bot}" if is_raven else "")
+ "\n\n"
)
break
for message in body.messages:
if message.role == "user":
completion_text += (
f"{user}{interface} "
+ message.content.replace("\\n", "\n")
.replace("\r\n", "\n")
for message in body.messages[(0 if basic_system is None else 1) :]:
append_message: str = ""
if message.role == Role.User:
append_message = f"{user}{interface} " + message.content
elif message.role == Role.Assistant:
append_message = f"{bot}{interface} " + message.content
elif message.role == Role.System:
append_message = message.content
if not message.raw:
append_message = (
append_message.replace("\r\n", "\n")
.replace("\r", "\n")
.replace("\n\n", "\n")
.strip()
+ "\n\n"
)
elif message.role == "assistant":
completion_text += (
f"{bot}{interface} "
+ message.content.replace("\\n", "\n")
.replace("\r\n", "\n")
.replace("\n\n", "\n")
.strip()
+ "\n\n"
)
completion_text += append_message + "\n\n"
completion_text += f"{bot}{interface}"
if type(body.stop) == str:
body.stop = [body.stop, f"\n\n{user}", f"\n\n{bot}"]
else:
elif type(body.stop) == list:
body.stop.append(f"\n\n{user}")
body.stop.append(f"\n\n{bot}")
elif body.stop is None:
body.stop = default_stop
if body.stream:
return EventSourceResponse(
@@ -345,8 +367,8 @@ async def completions(body: CompletionBody, request: Request):
class EmbeddingsBody(BaseModel):
input: Union[str, List[str], List[List[int]]]
model: str = "rwkv"
input: Union[str, List[str], List[List[int]], None]
model: Union[str, None] = "rwkv"
encoding_format: str = None
fast_mode: bool = False

View File

@@ -29,6 +29,7 @@ def get_tokens_path(model_path: str):
class SwitchModelBody(BaseModel):
model: str
strategy: str
tokenizer: Union[str, None] = None
customCuda: bool = False
class Config:
@@ -36,6 +37,7 @@ class SwitchModelBody(BaseModel):
"example": {
"model": "models/RWKV-4-World-3B-v1-20230619-ctx4096.pth",
"strategy": "cuda fp16",
"tokenizer": None,
"customCuda": False,
}
}
@@ -65,19 +67,24 @@ def switch_model(body: SwitchModelBody, response: Response, request: Request):
os.environ["RWKV_CUDA_ON"] = "1" if body.customCuda else "0"
global_var.set(global_var.Model_Status, global_var.ModelStatus.Loading)
tokenizer = (
get_tokens_path(body.model)
if body.tokenizer is None or body.tokenizer == ""
else body.tokenizer
)
try:
global_var.set(
global_var.Model,
TextRWKV(
model=body.model,
strategy=body.strategy,
tokens_path=get_tokens_path(body.model),
tokens_path=tokenizer,
)
if "midi" not in body.model.lower()
else MusicRWKV(
model=body.model,
strategy=body.strategy,
tokens_path=get_tokens_path(body.model),
tokens_path=tokenizer,
),
)
except Exception as e:

View File

@@ -0,0 +1,131 @@
from fastapi import APIRouter, HTTPException, status
from utils.rwkv import AbstractRWKV
import global_var
router = APIRouter()
@router.get("/dashboard/billing/credit_grants", tags=["MISC"])
def credit_grants():
return {
"object": "credit_summary",
"total_granted": 10000,
"total_used": 0,
"total_available": 10000,
"grants": {
"object": "list",
"data": [
{
"object": "credit_grant",
"grant_amount": 10000,
"used_amount": 0,
"effective_at": 1672531200,
"expires_at": 33229440000,
}
],
},
}
fake_models = [
{
"id": "gpt-3.5-turbo",
"object": "model",
"created": 1677610602,
"owned_by": "openai",
"permission": [
{
"id": "modelperm-zy5TOjnE2zVaicIcKO9bQDgX",
"object": "model_permission",
"created": 1690864883,
"allow_create_engine": False,
"allow_sampling": True,
"allow_logprobs": True,
"allow_search_indices": False,
"allow_view": True,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False,
}
],
"root": "gpt-3.5-turbo",
"parent": None,
},
{
"id": "text-davinci-003",
"object": "model",
"created": 1669599635,
"owned_by": "openai-internal",
"permission": [
{
"id": "modelperm-a6niqBmW2JaGmo0fDO7FEt1n",
"object": "model_permission",
"created": 1690930172,
"allow_create_engine": False,
"allow_sampling": True,
"allow_logprobs": True,
"allow_search_indices": False,
"allow_view": True,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False,
}
],
"root": "text-davinci-003",
"parent": None,
},
]
@router.get("/v1/models", tags=["MISC"])
@router.get("/models", tags=["MISC"])
def models():
model: AbstractRWKV = global_var.get(global_var.Model)
model_name = model.name if model else "rwkv"
return {
"object": "list",
"data": [
{
"id": model_name,
"object": "model",
"owned_by": "rwkv",
"root": model_name,
"parent": None,
},
*fake_models,
],
}
@router.get("/v1/models/{model_id}", tags=["MISC"])
@router.get("/models/{model_id}", tags=["MISC"])
def model(model_id: str):
for fake_model in fake_models:
if fake_model["id"] == model_id:
return fake_model
if "rwkv" in model_id.lower():
model: AbstractRWKV = global_var.get(global_var.Model)
model_name = model.name if model else "rwkv"
return {
"id": model_name,
"object": "model",
"owned_by": "rwkv",
"root": model_name,
"parent": None,
}
raise HTTPException(
status.HTTP_404_NOT_FOUND,
{
"error": {
"message": f"The model '{model_id}' does not exist",
"type": "invalid_request_error",
"param": "model",
"code": "model_not_found",
}
},
)

View File

@@ -1,4 +1,4 @@
from typing import Any, Dict, List
from typing import Any, Dict, List, Union
from utils.log import quick_log
from fastapi import APIRouter, HTTPException, Request, Response, status
from pydantic import BaseModel
@@ -60,7 +60,7 @@ def enable_state_cache():
class AddStateBody(BaseModel):
prompt: str
tokens: List[str]
tokens: List[Union[str, int]]
state: Any
logits: Any
@@ -96,7 +96,7 @@ def add_state(body: AddStateBody):
quick_log(
None,
None,
f"New Trie Id: {id}\nTrie Len: {len(trie)}\nTrie Buff Size: {trie.buff_size()}\nDtrie Buff Size Of Id: {_get_a_dtrie_buff_size(dtrie[id])}",
f"New Trie Id: {id}\nTrie Len: {len(trie)}\nTrie Buff Size: {trie.buff_size()}\nDtrie Buff Size Of Id: {__get_a_dtrie_buff_size(dtrie[id])}",
)
return "success"
except Exception as e:
@@ -124,7 +124,7 @@ class LongestPrefixStateBody(BaseModel):
prompt: str
def _get_a_dtrie_buff_size(dtrie_v):
def __get_a_dtrie_buff_size(dtrie_v):
# print(sys.getsizeof(dtrie_v["tokens"][0])) # str
# print(sys.getsizeof(dtrie_v["tokens"][0]) * len(dtrie_v["tokens"]))
# print(dtrie_v["state"][0][0].element_size())

View File

@@ -0,0 +1,124 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "element_wise.h"
#include "util.h"
// Equivalent Python code:
// ww = t_first + k
// p = torch.maximum(pp, ww)
// e1 = torch.exp(pp - p)
// e2 = torch.exp(ww - p)
// wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
// ww = t_decay + pp
// p = torch.maximum(ww, k)
// e1 = torch.exp(ww - p)
// e2 = torch.exp(k - p)
// t1 = e1 * aa + e2 * v
// t2 = e1 * bb + e2
// r = r * wkv
// return t1, t2, p, r
struct WkvForwardOne {
const float *t_first;
const float *k;
const float *pp;
const float *aa;
const float *bb;
const float *t_decay;
const float *v;
/* out */ float *t1;
/* out */ float *t2;
/* out */ float *p;
/* in & out */ half *r;
__device__ void operator()(int i) const {
float ww = t_first[i] + k[i];
float pp_ = pp[i];
float p_ = (pp_ > ww) ? pp_ : ww;
float e1 = expf(pp_ - p_);
float e2 = expf(ww - p_);
float aa_ = aa[i];
float bb_ = bb[i];
float v_ = v[i];
r[i] = __hmul(r[i], __float2half(((e1 * aa_ + e2 * v_) / (e1 * bb_ + e2))));
ww = t_decay[i] + pp_;
float k_ = k[i];
p_ = (ww > k_) ? ww : k_;
e1 = expf(ww - p_);
e2 = expf(k_ - p_);
t1[i] = e1 * aa_ + e2 * v_;
t2[i] = e1 * bb_ + e2;
p[i] = p_;
}
};
/*
Equivalent Python code:
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
*/
struct Mix {
const half *xx;
const half *sx;
const half *k_mix;
const half *v_mix;
const half *r_mix;
/* out */ half *kx;
/* out */ half *vx;
/* out */ half *rx;
__device__ void operator()(int i) const {
half xx_ = xx[i];
half sx_ = sx[i];
half k_mix_ = k_mix[i];
half v_mix_ = v_mix[i];
half r_mix_ = r_mix[i];
kx[i] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
vx[i] = __hadd(__hmul(xx_, v_mix_),
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
rx[i] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
};
using torch::Tensor;
void gemm_fp16_cublas_tensor(Tensor a, Tensor b, Tensor c);
Tensor att_one(Tensor x, Tensor ln_w, Tensor ln_b, Tensor sx, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw,
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx, Tensor rw,
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
/* imm */ Tensor k, Tensor pp, Tensor ww, Tensor aa, Tensor bb,
Tensor t_decay, /* imm */ Tensor v, /* in & out */ Tensor r,
/* out */ Tensor x_plus_out, /* out */ Tensor t1,
/* out */ Tensor t2, /* out */ Tensor p) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
element_wise(Mix{data_ptr<half>(xx), data_ptr<half>(sx),
data_ptr<half>(k_mix), data_ptr<half>(v_mix),
data_ptr<half>(r_mix), data_ptr<half>(kx),
data_ptr<half>(vx), data_ptr<half>(rx)},
x.numel());
gemm_fp16_cublas_tensor(kx, kw, k);
gemm_fp16_cublas_tensor(vx, vw, v);
gemm_fp16_cublas_tensor(rx, rw, r);
at::sigmoid_(r);
element_wise(WkvForwardOne{data_ptr<float>(t_first), data_ptr<float>(k),
data_ptr<float>(pp), data_ptr<float>(aa),
data_ptr<float>(bb), data_ptr<float>(t_decay),
data_ptr<float>(v), data_ptr<float>(t1),
data_ptr<float>(t2), data_ptr<float>(p),
data_ptr<half>(r)},
x.numel());
gemm_fp16_cublas_tensor(r, ow, x_plus_out);
x_plus_out += x;
return xx;
}

View File

@@ -0,0 +1,109 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "element_wise.h"
#include "util.h"
// Equivalent Python code:
// s1 = t_first * a + s
// s2 = a + t_decay * s
struct Fused1 {
const float *t_first;
const float *t_decay;
const float *a;
const float *s;
const int32_t inner_size;
/* out */ float *s1;
/* out */ float *s2;
__device__ void operator()(int i) const {
const int j = i / inner_size;
s1[i] = t_first[j] * a[i] + s[i];
s2[i] = a[i] + t_decay[j] * s[i];
}
};
/*
Equivalent Python code:
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
*/
struct Mix {
const half *xx;
const half *sx;
const half *k_mix;
const half *v_mix;
const half *r_mix;
/* out */ half *kx;
/* out */ half *vx;
/* out */ half *rx;
__device__ void operator()(int i) const {
half xx_ = xx[i];
half sx_ = sx[i];
half k_mix_ = k_mix[i];
half v_mix_ = v_mix[i];
half r_mix_ = r_mix[i];
kx[i] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
vx[i] = __hadd(__hmul(xx_, v_mix_),
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
rx[i] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
};
using torch::Tensor;
void gemm_fp16_cublas_tensor(Tensor a, Tensor b, Tensor c);
Tensor att_one_v5(Tensor x, Tensor sx, Tensor s, Tensor ln_w, Tensor ln_b,
Tensor lx_w, Tensor lx_b, Tensor k_mix, Tensor v_mix,
Tensor r_mix, Tensor kw,
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx,
Tensor rw,
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
/* imm */ Tensor k, Tensor t_decay, /* imm */ Tensor v,
/* imm */ Tensor r, /* imm */ Tensor s1,
/* out */ Tensor x_plus_out, /* out */ Tensor s2) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
element_wise(Mix{data_ptr<half>(xx), data_ptr<half>(sx),
data_ptr<half>(k_mix), data_ptr<half>(v_mix),
data_ptr<half>(r_mix), data_ptr<half>(kx),
data_ptr<half>(vx), data_ptr<half>(rx)},
x.numel());
int H = t_decay.size(0);
int S = x.size(-1) / H;
gemm_fp16_cublas_tensor(rx, rw, r);
r = at::reshape(r, {H, 1, S});
gemm_fp16_cublas_tensor(kx, kw, k);
k = at::reshape(k, {H, S, 1});
gemm_fp16_cublas_tensor(vx, vw, v);
v = at::reshape(v, {H, 1, S});
{
Tensor a = at::matmul(k, v);
// s1 = t_first * a + s
// s2 = a + t_decay * s
element_wise(Fused1{data_ptr<float>(t_first), data_ptr<float>(t_decay),
data_ptr<float>(a), data_ptr<float>(s),
static_cast<int32_t>(a.size(1) * a.size(2)),
data_ptr<float>(s1), data_ptr<float>(s2)},
a.numel());
}
Tensor out = at::matmul(r, s1);
out = at::flatten(out);
out = at::squeeze(at::group_norm(at::unsqueeze(out, 0), H, lx_w, lx_b), 0);
out = at::_cast_Half(out);
gemm_fp16_cublas_tensor(out, ow, x_plus_out);
x_plus_out += x;
return xx;
}

View File

@@ -0,0 +1,178 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "util.h"
#include "element_wise.h"
using torch::Tensor;
void gemm_fp16_cublas(const void *a, const void *b, void *c, int m,
int n, int k, bool output_fp32);
// based on `kernel_wkv_forward`, fusing more operations
__global__ void kernel_wkv_forward_new(
const int B, const int T, const int C, const float *__restrict__ const _w,
const float *__restrict__ const _u, const float *__restrict__ const _k,
const float *__restrict__ const _v, const half *__restrict__ const r,
half *__restrict__ const _y, float *__restrict__ const _aa,
float *__restrict__ const _bb, float *__restrict__ const _pp) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int _b = idx / C;
const int _c = idx % C;
const int _offset = _b * T * C + _c;
const int _state_offset = _b * C + _c;
float u = _u[_c];
float w = _w[_c];
const float *__restrict__ const k = _k + _offset;
const float *__restrict__ const v = _v + _offset;
half *__restrict__ const y = _y + _offset;
float aa = _aa[_state_offset];
float bb = _bb[_state_offset];
float pp = _pp[_state_offset];
for (int i = 0; i < T; i++) {
const int ii = i * C;
const float kk = k[ii];
const float vv = v[ii];
float ww = u + kk;
float p = max(pp, ww);
float e1 = exp(pp - p);
float e2 = exp(ww - p);
y[ii] = __float2half((e1 * aa + e2 * vv) / (e1 * bb + e2));
ww = w + pp;
p = max(ww, kk);
e1 = exp(ww - p);
e2 = exp(kk - p);
aa = e1 * aa + e2 * vv;
bb = e1 * bb + e2;
pp = p;
}
_aa[_state_offset] = aa;
_bb[_state_offset] = bb;
_pp[_state_offset] = pp;
}
void cuda_wkv_forward_new(int B, int T, int C, float *w, float *u, float *k,
float *v, half *r, half *y, float *aa, float *bb,
float *pp) {
dim3 threadsPerBlock(min(C, 32));
assert(B * C % threadsPerBlock.x == 0);
dim3 numBlocks(B * C / threadsPerBlock.x);
kernel_wkv_forward_new<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, r,
y, aa, bb, pp);
}
__global__ void _att_mix(const half *xx, const half *sx, const half *k_mix,
const half *v_mix, const half *r_mix,
const int outer_size, const int inner_size, half *kx,
half *vx, half *rx) {
for (int idx2 = blockIdx.x * blockDim.x + threadIdx.x; idx2 < inner_size;
idx2 += blockDim.x * gridDim.x) {
half k_mix_ = k_mix[idx2];
half v_mix_ = v_mix[idx2];
half r_mix_ = r_mix[idx2];
for (int row = 0; row < outer_size; ++row) {
int idx1 = row * inner_size + idx2;
half xx_ = xx[idx1];
half sx_ = sx[idx1];
kx[idx1] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
vx[idx1] = __hadd(__hmul(xx_, v_mix_),
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
rx[idx1] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
}
}
void att_mix(const half *xx, const half *sx, const half *k_mix,
const half *v_mix, const half *r_mix, const int outer_size,
const int inner_size, half *kx, half *vx, half *rx) {
// 256 is good enough on most GPUs
const int32_t BLOCK_SIZE = 256;
assert(inner_size % BLOCK_SIZE == 0);
_att_mix<<<inner_size / BLOCK_SIZE, BLOCK_SIZE>>>(
xx, sx, k_mix, v_mix, r_mix, outer_size, inner_size, kx, vx, rx);
}
struct InplaceSigmoid {
__device__ __forceinline__ half operator()(int i) const {
ptr[i] = __float2half(1.0 / (1.0 + exp(-__half2float(ptr[i]))));
}
half *ptr;
};
struct InplaceMul {
__device__ __forceinline__ half operator()(int i) const {
y[i] = __hmul(x[i], y[i]);
}
half *y;
half *x;
};
/*
Equivalent Python code:
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
k = gemm(kx, kw, output_dtype=torch.float32)
v = gemm(vx, vw, output_dtype=torch.float32)
T = x.shape[0]
for t in range(T):
kk = k[t]
vv = v[t]
ww = t_first + kk
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, kk)
e1 = torch.exp(ww - p)
e2 = torch.exp(kk - p)
aa = e1 * aa + e2 * vv
bb = e1 * bb + e2
pp = p
out = gemm(r * sx, ow)
return x + out, xx[-1,:], aa, bb, pp
*/
Tensor att_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
Tensor ow, Tensor t_first, Tensor pp, Tensor aa, Tensor bb,
Tensor t_decay, /* imm */ Tensor buf, /* out */ Tensor x_plus_out) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
sx = at::cat({sx.unsqueeze(0), xx.slice(0, 0, -1)}, 0);
char* buf_ptr = (char*)buf.data_ptr();
half* kx = (half*)buf_ptr;
half* vx = kx + x.numel();
half* rx = vx + x.numel();
half* wkv_y = rx + x.numel();
att_mix(data_ptr<half>(xx), data_ptr<half>(sx), data_ptr<half>(k_mix),
data_ptr<half>(v_mix), data_ptr<half>(r_mix), xx.size(0), xx.size(1),
kx, vx, rx);
float* k = reinterpret_cast<float*>(wkv_y + x.numel());
float* v = k + x.size(0) * kw.size(1);
half* r = reinterpret_cast<half*>(v + x.size(0) * vw.size(1));
gemm_fp16_cublas(kx, kw.data_ptr(), k, x.size(0), kw.size(1), kw.size(0), true);
gemm_fp16_cublas(vx, vw.data_ptr(), v, x.size(0), vw.size(1), vw.size(0), true);
gemm_fp16_cublas(rx, rw.data_ptr(), r, x.size(0), rw.size(1), rw.size(0), false);
element_wise(InplaceSigmoid{r}, x.size(0) * rw.size(1));
cuda_wkv_forward_new(1, x.size(0), x.size(1), data_ptr<float>(t_decay),
data_ptr<float>(t_first), k, v, r,
wkv_y, data_ptr<float>(aa),
data_ptr<float>(bb), data_ptr<float>(pp));
element_wise(InplaceMul{wkv_y, r}, x.numel());
gemm_fp16_cublas(wkv_y, ow.data_ptr(), x_plus_out.data_ptr(), x.size(0), ow.size(1), ow.size(0), false);
x_plus_out += x;
return xx;
}

View File

@@ -0,0 +1,21 @@
#include <cassert>
#include <cstddef>
#include <cstdint>
template <typename Func> __global__ void _element_wise(Func func, int n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
func(i);
}
}
// NOTE: packed data type (e.g. float4) is a overkill for current sizes
// (4096 in 7B model and 768 in 0.1B model),
// and is not faster than the plain float version.
template <typename Func>
void element_wise(Func func, int n) {
// 256 is good enough on most GPUs
const int32_t BLOCK_SIZE = 256;
assert(n % BLOCK_SIZE == 0);
_element_wise<<<n / BLOCK_SIZE, BLOCK_SIZE>>>(func, n);
}

165
backend-python/rwkv_pip/beta/cuda/ffn.cu vendored Normal file
View File

@@ -0,0 +1,165 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "element_wise.h"
#include "util.h"
using torch::Tensor;
void gemm_fp16_cublas(const void *a, const void *b, void *c, int ori_m,
int ori_n, int ori_k, bool output_fp32);
__global__ void _ffn_seq_mix(const half *xx, const half *sx, const half *k_mix,
const half *r_mix, const int outer_size,
const int inner_size, half *kx, half *rx) {
for (int idx2 = blockIdx.x * blockDim.x + threadIdx.x; idx2 < inner_size;
idx2 += blockDim.x * gridDim.x) {
half k_mix_ = k_mix[idx2];
half r_mix_ = r_mix[idx2];
for (int row = 0; row < outer_size; ++row) {
int idx1 = row * inner_size + idx2;
half xx_ = xx[idx1];
half sx_ = sx[idx1];
kx[idx1] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
rx[idx1] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
}
}
void ffn_seq_mix(const half *xx, const half *sx, const half *k_mix,
const half *r_mix, const int outer_size, const int inner_size,
half *kx, half *rx) {
// 256 is good enough on most GPUs
const int32_t BLOCK_SIZE = 256;
assert(inner_size % BLOCK_SIZE == 0);
_ffn_seq_mix<<<inner_size / BLOCK_SIZE, BLOCK_SIZE>>>(
xx, sx, k_mix, r_mix, outer_size, inner_size, kx, rx);
}
struct InplaceSigmoid {
__device__ __forceinline__ void operator()(int i) const {
ptr[i] = __float2half(1.0 / (1.0 + exp(-__half2float(ptr[i]))));
}
half *ptr;
};
struct InplaceReLUAndSquare {
__device__ __forceinline__ void operator()(int i) const {
// __hmax is not defined in old cuda
if (__hgt(ptr[i], __float2half(0))) {
ptr[i] = __hmul(ptr[i], ptr[i]);
} else {
ptr[i] = __float2half(0);
}
}
half *ptr;
};
struct InplaceFma {
__device__ __forceinline__ void operator()(int i) const {
a[i] = __hfma(a[i], b[i], c[i]);
}
half *a;
const half *b;
const half *c;
};
/*
Equivalent Python code:
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
vx = torch.square(torch.relu(gemm(kx, kw)))
out = r * gemm(vx, vw)
return x + out, xx[-1,:]
*/
Tensor ffn_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
/* imm */ Tensor buf,
/* out */ Tensor x_plus_out) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
sx = at::cat({sx.unsqueeze(0), xx.slice(0, 0, -1)}, 0);
char *buf_ptr = (char *)buf.data_ptr();
half *kx = (half *)buf_ptr;
half *rx = kx + x.numel();
half *vx = rx + x.numel();
half *r = vx + x.size(0) * kw.size(1);
ffn_seq_mix(data_ptr<half>(xx), data_ptr<half>(sx), data_ptr<half>(k_mix),
data_ptr<half>(r_mix), xx.size(0), xx.size(1), kx, rx);
gemm_fp16_cublas(rx, rw.data_ptr(), r, x.size(0), rw.size(1), x.size(1),
false);
element_wise(InplaceSigmoid{r}, x.size(0) * rw.size(1));
gemm_fp16_cublas(kx, kw.data_ptr(), vx, x.size(0), kw.size(1), x.size(1),
false);
element_wise(InplaceReLUAndSquare{vx}, x.size(0) * kw.size(1));
gemm_fp16_cublas(vx, vw.data_ptr(), x_plus_out.data_ptr(), x.size(0),
vw.size(1), vw.size(0), false);
element_wise(InplaceFma{data_ptr<half>(x_plus_out), r, data_ptr<half>(x)},
x_plus_out.numel());
return xx;
}
struct FfnOneMix {
__device__ __forceinline__ void operator()(int idx) {
half k_mix_ = k_mix[idx];
half r_mix_ = r_mix[idx];
half xx_ = xx[idx];
half sx_ = sx[idx];
kx[idx] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
rx[idx] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
half *k_mix;
half *r_mix;
half *xx;
half *sx;
half *kx;
half *rx;
};
/*
Equivalent Python code:
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
vx = torch.square(torch.relu(gemm(kx, kw)))
out = r * gemm(vx, vw)
return x + out, xx
*/
Tensor ffn_one(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
/* imm */ Tensor buf,
/* out */ Tensor x_plus_out) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
char *buf_ptr = (char *)buf.data_ptr();
half *kx = (half *)buf_ptr;
half *rx = kx + x.numel();
half *vx = rx + x.numel();
half *r = vx + x.size(0) * kw.size(1);
element_wise(FfnOneMix{data_ptr<half>(k_mix), data_ptr<half>(r_mix),
data_ptr<half>(xx), data_ptr<half>(sx), kx, rx},
x.numel());
// vector * matrix, so m = 1
gemm_fp16_cublas(rx, rw.data_ptr(), r, 1, rw.size(1), rw.size(0), false);
element_wise(InplaceSigmoid{r}, rw.size(1));
gemm_fp16_cublas(kx, kw.data_ptr(), vx, 1, kw.size(1), kw.size(0), false);
element_wise(InplaceReLUAndSquare{vx}, kw.size(1));
gemm_fp16_cublas(vx, vw.data_ptr(), x_plus_out.data_ptr(), 1, vw.size(1),
vw.size(0), false);
element_wise(InplaceFma{data_ptr<half>(x_plus_out), r, data_ptr<half>(x)},
x_plus_out.numel());
return xx;
}

View File

@@ -0,0 +1,128 @@
#include <cublas_v2.h>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#define CUBLAS_CHECK(condition) \
for (cublasStatus_t _cublas_check_status = (condition); \
_cublas_check_status != CUBLAS_STATUS_SUCCESS;) \
throw std::runtime_error("cuBLAS error " + \
std::to_string(_cublas_check_status) + " at " + \
std::to_string(__LINE__));
#define CUDA_CHECK(condition) \
for (cudaError_t _cuda_check_status = (condition); \
_cuda_check_status != cudaSuccess;) \
throw std::runtime_error( \
"CUDA error " + std::string(cudaGetErrorString(_cuda_check_status)) + \
" at " + std::to_string(__LINE__));
cublasHandle_t get_cublas_handle() {
static cublasHandle_t cublas_handle = []() {
cublasHandle_t handle = nullptr;
CUBLAS_CHECK(cublasCreate(&handle));
#if CUDA_VERSION < 11000
CUBLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
#else
CUBLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
#endif // CUDA_VERSION < 11000
return handle;
}();
return cublas_handle;
}
/*
NOTE: blas gemm is column-major by default, but we need row-major output.
The data of row-major, transposed matrix is exactly the same as the
column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
*/
void gemm_fp16_cublas(const void *a, const void *b, void *c, int ori_m,
int ori_n, int ori_k, bool output_fp32) {
const auto cuda_data_type = CUDA_R_16F;
const auto cuda_c_data_type = output_fp32 ? CUDA_R_32F : CUDA_R_16F;
const auto compute_type = CUDA_R_32F;
const float sp_alpha = 1.f;
// use CUBLAS_OP_N. see the notes above
const cublasOperation_t cublas_trans_a = CUBLAS_OP_N;
const cublasOperation_t cublas_trans_b = CUBLAS_OP_N;
// m = (B^T).size(0) = B.size(1) = n;
const int cublas_m = ori_n;
const int cublas_k = ori_k;
// comptiable with rwkv one mode, where 1-D tensor * 2-D tensor
// const int n = a.dense_dim() == 1 ? 1 : a.size(0);
const int cublas_n = ori_m;
const int cublas_lda = cublas_m;
const int cublas_ldb = cublas_k;
const int cublas_ldc = cublas_m;
cublasHandle_t cublas_handle = get_cublas_handle();
#if CUDA_VERSION >= 11000
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
#else
cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP;
#endif
const float sp_beta = 0.f;
CUBLAS_CHECK(cublasGemmEx(
cublas_handle, cublas_trans_a, cublas_trans_b, cublas_m, cublas_n,
cublas_k, &sp_alpha, b, cuda_data_type, cublas_lda,
a, cuda_data_type, cublas_ldb, &sp_beta, c,
cuda_c_data_type, cublas_ldc, compute_type, algo));
}
/*
NOTE: blas gemm is column-major by default, but we need row-major output.
The data of row-major, transposed matrix is exactly the same as the
column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
*/
void gemm_fp16_cublas_tensor(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
if (a.sizes().size() == 1) {
assert(b.sizes().size() == 2);
a = at::unsqueeze(a, 0);
}
const auto cuda_data_type = CUDA_R_16F;
const auto cuda_c_data_type =
c.dtype() == torch::kFloat32 ? CUDA_R_32F : CUDA_R_16F;
const auto compute_type = CUDA_R_32F;
const float sp_alpha = 1.f;
// swap a and b, and use CUBLAS_OP_N. see the notes above
std::swap(a, b);
const cublasOperation_t cublas_trans_a = CUBLAS_OP_N;
const cublasOperation_t cublas_trans_b = CUBLAS_OP_N;
// m = (B^T).size(0) = B.size(1), and = A.size(1) after swap,
// negative axis is used because of the existence of batch matmul.
const int m = a.size(-1);
const int k = a.size(-2);
const int n = b.size(-2);
const int cublas_lda = m;
const int cublas_ldb = k;
const int cublas_ldc = m;
cublasHandle_t cublas_handle = get_cublas_handle();
#if CUDA_VERSION >= 11000
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
#else
cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP;
#endif
const float sp_beta = 0.f;
if (a.sizes().size() == 2 && b.sizes().size() == 2) {
CUBLAS_CHECK(cublasGemmEx(
cublas_handle, cublas_trans_a, cublas_trans_b, m, n, k, &sp_alpha,
a.data_ptr(), cuda_data_type, cublas_lda, b.data_ptr(), cuda_data_type,
cublas_ldb, &sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc,
compute_type, algo));
} else {
// batch matmul
assert(a.sizes().size() == 3 && b.sizes().size() == 3);
const long long int cublas_stride_a = m * k;
const long long int cublas_stride_b = k * n;
const long long int cublas_stride_c = m * n;
CUBLAS_CHECK(cublasGemmStridedBatchedEx(
cublas_handle, cublas_trans_a, cublas_trans_b, m,
n, k, &sp_alpha, a.data_ptr(), cuda_data_type, cublas_lda,
cublas_stride_a, b.data_ptr(), cuda_data_type, cublas_ldb, cublas_stride_b,
&sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc, cublas_stride_c,
a.size(0), compute_type, algo));
}
}

View File

@@ -0,0 +1,246 @@
#include <stdio.h>
#include <assert.h>
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#define MIN_VALUE (-1e38)
typedef at::Half fp16;
__half *cast(fp16 *ptr) {
return reinterpret_cast<__half *>(ptr);
}
template <typename F>
__global__ void kernel_wkv_forward(const int B, const int T, const int C,
const float *__restrict__ const _w, const float *__restrict__ const _u, const F *__restrict__ const _k, const F *__restrict__ const _v,
F *__restrict__ const _y, float *__restrict__ const _aa, float *__restrict__ const _bb, float *__restrict__ const _pp) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int _b = idx / C;
const int _c = idx % C;
const int _offset = _b * T * C + _c;
const int _state_offset = _b * C + _c;
float u = _u[_c];
float w = _w[_c];
const F *__restrict__ const k = _k + _offset;
const F *__restrict__ const v = _v + _offset;
F *__restrict__ const y = _y + _offset;
float aa = _aa[_state_offset];
float bb = _bb[_state_offset];
float pp = _pp[_state_offset];
for (int i = 0; i < T; i++) {
const int ii = i * C;
const float kk = float(k[ii]);
const float vv = float(v[ii]);
float ww = u + kk;
float p = max(pp, ww);
float e1 = exp(pp - p);
float e2 = exp(ww - p);
y[ii] = F((e1 * aa + e2 * vv) / (e1 * bb + e2));
ww = w + pp;
p = max(ww, kk);
e1 = exp(ww - p);
e2 = exp(kk - p);
aa = e1 * aa + e2 * vv;
bb = e1 * bb + e2;
pp = p;
}
_aa[_state_offset] = aa;
_bb[_state_offset] = bb;
_pp[_state_offset] = pp;
}
template <typename F>
void cuda_wkv_forward(int B, int T, int C, float *w, float *u, F *k, F *v, F *y, float *aa, float *bb, float *pp) {
dim3 threadsPerBlock( min(C, 32) );
assert(B * C % threadsPerBlock.x == 0);
dim3 numBlocks(B * C / threadsPerBlock.x);
kernel_wkv_forward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, aa, bb, pp);
}
template void cuda_wkv_forward<fp16>(
int B, int T, int C,
float *w, float *u, fp16 *k, fp16 *v, fp16 *y,
float *aa, float *bb, float *pp);
template void cuda_wkv_forward<float>(
int B, int T, int C,
float *w, float *u, float *k, float *v, float *y,
float *aa, float *bb, float *pp);
__global__ void kernel_mm_seq_fp32i8(
const int B, const int N, const int M,
const float *__restrict__ const x, const int x_stride,
const uint8_t *__restrict__ const w, const int w_stride,
const float *__restrict__ const mx,
const float *__restrict__ const rx,
const float *__restrict__ const my,
const float *__restrict__ const ry,
float *__restrict__ const y, const int y_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int k = blockIdx.y * blockDim.y + threadIdx.y;
if (i < B && k < M) {
float y_local = 0;
for (int j = 0; j < N; ++j) {
y_local += x[i * x_stride + j] * (
(float(w[j * w_stride + k]) + 0.5f)
* rx[k] * ry[j] + mx[k] + my[j]
);
}
y[i * y_stride + k] = y_local;
}
}
template <typename F>
void cuda_mm8_seq(int B, int N, int M,
F *x, int x_stride,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
F *y, int y_stride);
template <>
void cuda_mm8_seq<float>(int B, int N, int M,
float *x, int x_stride,
uint8_t *w, int w_stride,
float *mx, float *rx,
float *my, float *ry,
float *y, int y_stride) {
dim3 blockSize(1, 128);
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_seq_fp32i8<<<gridSize, blockSize>>>(
B, N, M, x, x_stride, w, w_stride,
mx, rx, my, ry, y, y_stride);
}
__global__ void kernel_mm_seq_fp16i8(
const int B, const int N, const int M,
const __half *__restrict__ const x, const int x_stride,
const uint8_t *__restrict__ const w, const int w_stride,
const __half *__restrict__ const mx,
const __half *__restrict__ const rx,
const __half *__restrict__ const my,
const __half *__restrict__ const ry,
__half *__restrict__ const y, const int y_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int k = blockIdx.y * blockDim.y + threadIdx.y;
if (i < B && k < M) {
float y_local = 0;
for (int j = 0; j < N; ++j) {
y_local += __half2float(x[i * x_stride + j]) * (
(float(w[j * w_stride + k]) + 0.5f)
* __half2float(rx[k]) * __half2float(ry[j])
+ __half2float(mx[k]) + __half2float(my[j])
);
}
y[i * y_stride + k] = __float2half(y_local);
}
}
template <>
void cuda_mm8_seq<fp16>(int B, int N, int M,
fp16 *x, int x_stride,
uint8_t *w, int w_stride,
fp16 *mx, fp16 *rx,
fp16 *my, fp16 *ry,
fp16 *y, int y_stride) {
dim3 blockSize(1, 128);
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_seq_fp16i8<<<gridSize, blockSize>>>(
B, N, M, cast(x), x_stride, w, w_stride,
cast(mx), cast(rx), cast(my), cast(ry), cast(y), y_stride);
}
#define MM8_ONE_JSPLIT 24
#define MM8_ONE_TILE 1024
__global__ void kernel_mm_one_fp32i8(
const int N, const int M,
const float *__restrict__ const x,
const uint8_t *__restrict__ const w, const int w_stride,
const float *__restrict__ const mx,
const float *__restrict__ const rx,
const float *__restrict__ const my,
const float *__restrict__ const ry,
float *__restrict__ const y) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
if (k < M) {
float y_local = 0;
for (int j = j0; j < j1; ++j) {
y_local += x[j] * (
(float(w[j * w_stride + k]) + 0.5f)
* rx[k] * ry[j] + mx[k] + my[j]
);
}
atomicAdd(&y[k], y_local);
}
}
template <typename F>
void cuda_mm8_one(int N, int M,
F *x,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
float *y);
template <>
void cuda_mm8_one<float>(int N, int M,
float *x,
uint8_t *w, int w_stride,
float *mx, float *rx,
float *my, float *ry,
float *y) {
dim3 blockSize(1, MM8_ONE_TILE);
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_one_fp32i8<<<gridSize, blockSize>>>(
N, M, x, w, w_stride,
mx, rx, my, ry, y);
}
__global__ void kernel_mm_one_fp16i8(
const int N, const int M,
const __half *__restrict__ const x,
const uint8_t *__restrict__ const w, const int w_stride,
const __half *__restrict__ const mx,
const __half *__restrict__ const rx,
const __half *__restrict__ const my,
const __half *__restrict__ const ry,
float *__restrict__ const y) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
if (k < M) {
float y_local = 0;
for (int j = j0; j < j1; ++j) {
y_local += __half2float(x[j]) * (
(float(w[j * w_stride + k]) + 0.5f)
* __half2float(rx[k]) * __half2float(ry[j])
+ __half2float(mx[k]) + __half2float(my[j])
);
}
atomicAdd(&y[k], y_local);
}
}
template <>
void cuda_mm8_one<fp16>(int N, int M,
fp16 *x,
uint8_t *w, int w_stride,
fp16 *mx, fp16 *rx,
fp16 *my, fp16 *ry,
float *y) {
dim3 blockSize(1, MM8_ONE_TILE);
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_one_fp16i8<<<gridSize, blockSize>>>(
N, M, cast(x), w, w_stride,
cast(mx), cast(rx), cast(my), cast(ry), y);
}

View File

@@ -0,0 +1,7 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
template <typename T> T *data_ptr(torch::Tensor x) { return x.data_ptr<T>(); }
template <> inline half *data_ptr(torch::Tensor x) {
return reinterpret_cast<half *>(x.data_ptr<at::Half>());
}

View File

@@ -0,0 +1,181 @@
#include <torch/extension.h>
#include "ATen/ATen.h"
#include <iostream>
#include <c10/cuda/CUDAGuard.h>
typedef at::Half fp16;
template <typename F>
void cuda_wkv_forward(int B, int T, int C,
float *w, float *u, F *k, F *v, F *y,
float *aa, float *bb, float *pp);
template <typename F>
void cuda_mm8_seq(int B, int N, int M,
F *x, int x_stride,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
F *y, int y_stride);
template <typename F>
void cuda_mm8_one(int N, int M,
F *x,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
float *y);
void wkv_forward(int64_t B, int64_t T, int64_t C,
torch::Tensor &w, torch::Tensor &u,
torch::Tensor &k, torch::Tensor &v, torch::Tensor &y,
torch::Tensor &aa, torch::Tensor &bb, torch::Tensor &pp) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (k.scalar_type()) {
case c10::ScalarType::Half:
cuda_wkv_forward(B, T, C,
w.data_ptr<float>(), u.data_ptr<float>(),
k.data_ptr<fp16>(), v.data_ptr<fp16>(), y.data_ptr<fp16>(),
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
break;
case c10::ScalarType::Float:
cuda_wkv_forward(B, T, C,
w.data_ptr<float>(), u.data_ptr<float>(),
k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>(),
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
void mm8_seq(int64_t B, int64_t N, int64_t M,
torch::Tensor &x, torch::Tensor &w,
torch::Tensor &mx, torch::Tensor &rx,
torch::Tensor &my, torch::Tensor &ry,
torch::Tensor &y) {
assert(x.stride(1) == 1);
assert(w.stride(1) == 1);
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
assert(my.stride(0) == 1 && ry.stride(0) == 1);
assert(y.stride(1) == 1);
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (x.scalar_type()) {
case c10::ScalarType::Half:
cuda_mm8_seq(
B, N, M,
x.data_ptr<fp16>(), x.stride(0),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
y.data_ptr<fp16>(), y.stride(0));
break;
case c10::ScalarType::Float:
cuda_mm8_seq(
B, N, M,
x.data_ptr<float>(), x.stride(0),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<float>(), rx.data_ptr<float>(),
my.data_ptr<float>(), ry.data_ptr<float>(),
y.data_ptr<float>(), y.stride(0));
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
void mm8_one(int64_t N, int64_t M,
torch::Tensor &x, torch::Tensor &w,
torch::Tensor &mx, torch::Tensor &rx,
torch::Tensor &my, torch::Tensor &ry,
torch::Tensor &y) {
assert(x.stride(0) == 1);
assert(w.stride(1) == 1);
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
assert(my.stride(0) == 1 && ry.stride(0) == 1);
assert(y.stride(0) == 1);
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (x.scalar_type()) {
case c10::ScalarType::Half:
cuda_mm8_one(
N, M,
x.data_ptr<fp16>(),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
y.data_ptr<float>());
break;
case c10::ScalarType::Float:
cuda_mm8_one(
N, M,
x.data_ptr<float>(),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<float>(), rx.data_ptr<float>(),
my.data_ptr<float>(), ry.data_ptr<float>(),
y.data_ptr<float>());
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
using torch::Tensor;
#ifndef DISABLE_CUBLAS_GEMM
void gemm_fp16_cublas_tensor(Tensor a, Tensor b, Tensor c);
#endif
Tensor att_one(Tensor x, Tensor ln_w, Tensor ln_b, Tensor sx, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw,
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx, Tensor rw,
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
/* imm */ Tensor k, Tensor pp, Tensor ww, Tensor aa, Tensor bb,
Tensor t_decay, /* imm */ Tensor v, /* in & out */ Tensor r,
/* out */ Tensor x_plus_out, /* out */ Tensor t1,
/* out */ Tensor t2, /* out */ Tensor p);
Tensor att_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
Tensor ow, Tensor t_first, Tensor pp, Tensor aa, Tensor bb,
Tensor t_decay, /* imm */ Tensor buf, /* out */ Tensor x_plus_out);
Tensor att_one_v5(Tensor x, Tensor sx, Tensor s, Tensor ln_w, Tensor ln_b,
Tensor lx_w, Tensor lx_b, Tensor k_mix, Tensor v_mix,
Tensor r_mix, Tensor kw,
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx,
Tensor rw,
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
/* imm */ Tensor k, Tensor t_decay, /* imm */ Tensor v,
/* imm */ Tensor r, /* imm */ Tensor s1,
/* out */ Tensor x_plus_out, /* out */ Tensor s2);
Tensor ffn_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
/* imm */ Tensor buf,
/* out */ Tensor x_plus_out);
Tensor ffn_one(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
/* imm */ Tensor buf,
/* out */ Tensor x_plus_out);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("wkv_forward", &wkv_forward, "wkv forward");
m.def("mm8_seq", &mm8_seq, "mm8 seq");
m.def("mm8_one", &mm8_one, "mm8 one");
m.def("gemm_fp16_cublas", &gemm_fp16_cublas_tensor, "gemv fp16 cublas");
m.def("att_one", &att_one, "att one");
m.def("att_one_v5", &att_one_v5, "att one v5");
m.def("att_seq", &att_seq, "att seq");
m.def("ffn_seq", &ffn_seq, "ffn seq");
m.def("ffn_one", &ffn_one, "ffn one");
}
TORCH_LIBRARY(rwkv, m) {
m.def("wkv_forward", wkv_forward);
m.def("mm8_seq", mm8_seq);
m.def("mm8_one", mm8_one);
m.def("gemm_fp16_cublas", gemm_fp16_cublas_tensor);
m.def("att_one", att_one);
m.def("att_one_v5", &att_one_v5);
m.def("att_seq", att_seq);
m.def("ffn_seq", ffn_seq);
m.def("ffn_one", ffn_one);
}

1821
backend-python/rwkv_pip/beta/model.py vendored Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

124
backend-python/rwkv_pip/cuda/att_one.cu vendored Normal file
View File

@@ -0,0 +1,124 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "element_wise.h"
#include "util.h"
// Equivalent Python code:
// ww = t_first + k
// p = torch.maximum(pp, ww)
// e1 = torch.exp(pp - p)
// e2 = torch.exp(ww - p)
// wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
// ww = t_decay + pp
// p = torch.maximum(ww, k)
// e1 = torch.exp(ww - p)
// e2 = torch.exp(k - p)
// t1 = e1 * aa + e2 * v
// t2 = e1 * bb + e2
// r = r * wkv
// return t1, t2, p, r
struct WkvForwardOne {
const float *t_first;
const float *k;
const float *pp;
const float *aa;
const float *bb;
const float *t_decay;
const float *v;
/* out */ float *t1;
/* out */ float *t2;
/* out */ float *p;
/* in & out */ half *r;
__device__ void operator()(int i) const {
float ww = t_first[i] + k[i];
float pp_ = pp[i];
float p_ = (pp_ > ww) ? pp_ : ww;
float e1 = expf(pp_ - p_);
float e2 = expf(ww - p_);
float aa_ = aa[i];
float bb_ = bb[i];
float v_ = v[i];
r[i] = __hmul(r[i], __float2half(((e1 * aa_ + e2 * v_) / (e1 * bb_ + e2))));
ww = t_decay[i] + pp_;
float k_ = k[i];
p_ = (ww > k_) ? ww : k_;
e1 = expf(ww - p_);
e2 = expf(k_ - p_);
t1[i] = e1 * aa_ + e2 * v_;
t2[i] = e1 * bb_ + e2;
p[i] = p_;
}
};
/*
Equivalent Python code:
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
*/
struct Mix {
const half *xx;
const half *sx;
const half *k_mix;
const half *v_mix;
const half *r_mix;
/* out */ half *kx;
/* out */ half *vx;
/* out */ half *rx;
__device__ void operator()(int i) const {
half xx_ = xx[i];
half sx_ = sx[i];
half k_mix_ = k_mix[i];
half v_mix_ = v_mix[i];
half r_mix_ = r_mix[i];
kx[i] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
vx[i] = __hadd(__hmul(xx_, v_mix_),
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
rx[i] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
};
using torch::Tensor;
void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
Tensor att_one(Tensor x, Tensor ln_w, Tensor ln_b, Tensor sx, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw,
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx, Tensor rw,
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
/* imm */ Tensor k, Tensor pp, Tensor ww, Tensor aa, Tensor bb,
Tensor t_decay, /* imm */ Tensor v, /* in & out */ Tensor r,
/* out */ Tensor x_plus_out, /* out */ Tensor t1,
/* out */ Tensor t2, /* out */ Tensor p) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
element_wise(Mix{data_ptr<half>(xx), data_ptr<half>(sx),
data_ptr<half>(k_mix), data_ptr<half>(v_mix),
data_ptr<half>(r_mix), data_ptr<half>(kx),
data_ptr<half>(vx), data_ptr<half>(rx)},
x.numel());
gemm_fp16_cublas(kx, kw, k);
gemm_fp16_cublas(vx, vw, v);
gemm_fp16_cublas(rx, rw, r);
at::sigmoid_(r);
element_wise(WkvForwardOne{data_ptr<float>(t_first), data_ptr<float>(k),
data_ptr<float>(pp), data_ptr<float>(aa),
data_ptr<float>(bb), data_ptr<float>(t_decay),
data_ptr<float>(v), data_ptr<float>(t1),
data_ptr<float>(t2), data_ptr<float>(p),
data_ptr<half>(r)},
x.numel());
gemm_fp16_cublas(r, ow, x_plus_out);
x_plus_out += x;
return xx;
}

179
backend-python/rwkv_pip/cuda/att_seq.cu vendored Normal file
View File

@@ -0,0 +1,179 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "util.h"
#include "element_wise.h"
using torch::Tensor;
void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
void gemm_fp16_cublas(const void *a, const void *b, void *c, int m,
int n, int k, bool output_fp32);
// based on `kernel_wkv_forward`, fusing more operations
__global__ void kernel_wkv_forward_new(
const int B, const int T, const int C, const float *__restrict__ const _w,
const float *__restrict__ const _u, const float *__restrict__ const _k,
const float *__restrict__ const _v, const half *__restrict__ const r,
half *__restrict__ const _y, float *__restrict__ const _aa,
float *__restrict__ const _bb, float *__restrict__ const _pp) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int _b = idx / C;
const int _c = idx % C;
const int _offset = _b * T * C + _c;
const int _state_offset = _b * C + _c;
float u = _u[_c];
float w = _w[_c];
const float *__restrict__ const k = _k + _offset;
const float *__restrict__ const v = _v + _offset;
half *__restrict__ const y = _y + _offset;
float aa = _aa[_state_offset];
float bb = _bb[_state_offset];
float pp = _pp[_state_offset];
for (int i = 0; i < T; i++) {
const int ii = i * C;
const float kk = k[ii];
const float vv = v[ii];
float ww = u + kk;
float p = max(pp, ww);
float e1 = exp(pp - p);
float e2 = exp(ww - p);
y[ii] = __float2half((e1 * aa + e2 * vv) / (e1 * bb + e2));
ww = w + pp;
p = max(ww, kk);
e1 = exp(ww - p);
e2 = exp(kk - p);
aa = e1 * aa + e2 * vv;
bb = e1 * bb + e2;
pp = p;
}
_aa[_state_offset] = aa;
_bb[_state_offset] = bb;
_pp[_state_offset] = pp;
}
void cuda_wkv_forward_new(int B, int T, int C, float *w, float *u, float *k,
float *v, half *r, half *y, float *aa, float *bb,
float *pp) {
dim3 threadsPerBlock(min(C, 32));
assert(B * C % threadsPerBlock.x == 0);
dim3 numBlocks(B * C / threadsPerBlock.x);
kernel_wkv_forward_new<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, r,
y, aa, bb, pp);
}
__global__ void _att_mix(const half *xx, const half *sx, const half *k_mix,
const half *v_mix, const half *r_mix,
const int outer_size, const int inner_size, half *kx,
half *vx, half *rx) {
for (int idx2 = blockIdx.x * blockDim.x + threadIdx.x; idx2 < inner_size;
idx2 += blockDim.x * gridDim.x) {
half k_mix_ = k_mix[idx2];
half v_mix_ = v_mix[idx2];
half r_mix_ = r_mix[idx2];
for (int row = 0; row < outer_size; ++row) {
int idx1 = row * inner_size + idx2;
half xx_ = xx[idx1];
half sx_ = sx[idx1];
kx[idx1] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
vx[idx1] = __hadd(__hmul(xx_, v_mix_),
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
rx[idx1] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
}
}
void att_mix(const half *xx, const half *sx, const half *k_mix,
const half *v_mix, const half *r_mix, const int outer_size,
const int inner_size, half *kx, half *vx, half *rx) {
// 256 is good enough on most GPUs
const int32_t BLOCK_SIZE = 256;
assert(inner_size % BLOCK_SIZE == 0);
_att_mix<<<inner_size / BLOCK_SIZE, BLOCK_SIZE>>>(
xx, sx, k_mix, v_mix, r_mix, outer_size, inner_size, kx, vx, rx);
}
struct InplaceSigmoid {
__device__ __forceinline__ half operator()(int i) const {
ptr[i] = __float2half(1.0 / (1.0 + exp(-__half2float(ptr[i]))));
}
half *ptr;
};
struct InplaceMul {
__device__ __forceinline__ half operator()(int i) const {
y[i] = __hmul(x[i], y[i]);
}
half *y;
half *x;
};
/*
Equivalent Python code:
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
k = gemm(kx, kw, output_dtype=torch.float32)
v = gemm(vx, vw, output_dtype=torch.float32)
T = x.shape[0]
for t in range(T):
kk = k[t]
vv = v[t]
ww = t_first + kk
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, kk)
e1 = torch.exp(ww - p)
e2 = torch.exp(kk - p)
aa = e1 * aa + e2 * vv
bb = e1 * bb + e2
pp = p
out = gemm(r * sx, ow)
return x + out, xx[-1,:], aa, bb, pp
*/
Tensor att_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
Tensor ow, Tensor t_first, Tensor pp, Tensor aa, Tensor bb,
Tensor t_decay, /* imm */ Tensor buf, /* out */ Tensor x_plus_out) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
sx = at::cat({sx.unsqueeze(0), xx.slice(0, 0, -1)}, 0);
char* buf_ptr = (char*)buf.data_ptr();
half* kx = (half*)buf_ptr;
half* vx = kx + x.numel();
half* rx = vx + x.numel();
half* wkv_y = rx + x.numel();
att_mix(data_ptr<half>(xx), data_ptr<half>(sx), data_ptr<half>(k_mix),
data_ptr<half>(v_mix), data_ptr<half>(r_mix), xx.size(0), xx.size(1),
kx, vx, rx);
float* k = reinterpret_cast<float*>(wkv_y + x.numel());
float* v = k + x.size(0) * kw.size(1);
half* r = reinterpret_cast<half*>(v + x.size(0) * vw.size(1));
gemm_fp16_cublas(kx, kw.data_ptr(), k, x.size(0), kw.size(1), kw.size(0), true);
gemm_fp16_cublas(vx, vw.data_ptr(), v, x.size(0), vw.size(1), vw.size(0), true);
gemm_fp16_cublas(rx, rw.data_ptr(), r, x.size(0), rw.size(1), rw.size(0), false);
element_wise(InplaceSigmoid{r}, x.size(0) * rw.size(1));
cuda_wkv_forward_new(1, x.size(0), x.size(1), data_ptr<float>(t_decay),
data_ptr<float>(t_first), k, v, r,
wkv_y, data_ptr<float>(aa),
data_ptr<float>(bb), data_ptr<float>(pp));
element_wise(InplaceMul{wkv_y, r}, x.numel());
gemm_fp16_cublas(wkv_y, ow.data_ptr(), x_plus_out.data_ptr(), x.size(0), ow.size(1), ow.size(0), false);
x_plus_out += x;
return xx;
}

View File

@@ -0,0 +1,21 @@
#include <cassert>
#include <cstddef>
#include <cstdint>
template <typename Func> __global__ void _element_wise(Func func, int n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
func(i);
}
}
// NOTE: packed data type (e.g. float4) is a overkill for current sizes
// (4096 in 7B model and 768 in 0.1B model),
// and is not faster than the plain float version.
template <typename Func>
void element_wise(Func func, int n) {
// 256 is good enough on most GPUs
const int32_t BLOCK_SIZE = 256;
assert(n % BLOCK_SIZE == 0);
_element_wise<<<n / BLOCK_SIZE, BLOCK_SIZE>>>(func, n);
}

165
backend-python/rwkv_pip/cuda/ffn.cu vendored Normal file
View File

@@ -0,0 +1,165 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "element_wise.h"
#include "util.h"
using torch::Tensor;
void gemm_fp16_cublas(const void *a, const void *b, void *c, int ori_m,
int ori_n, int ori_k, bool output_fp32);
__global__ void _ffn_seq_mix(const half *xx, const half *sx, const half *k_mix,
const half *r_mix, const int outer_size,
const int inner_size, half *kx, half *rx) {
for (int idx2 = blockIdx.x * blockDim.x + threadIdx.x; idx2 < inner_size;
idx2 += blockDim.x * gridDim.x) {
half k_mix_ = k_mix[idx2];
half r_mix_ = r_mix[idx2];
for (int row = 0; row < outer_size; ++row) {
int idx1 = row * inner_size + idx2;
half xx_ = xx[idx1];
half sx_ = sx[idx1];
kx[idx1] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
rx[idx1] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
}
}
void ffn_seq_mix(const half *xx, const half *sx, const half *k_mix,
const half *r_mix, const int outer_size, const int inner_size,
half *kx, half *rx) {
// 256 is good enough on most GPUs
const int32_t BLOCK_SIZE = 256;
assert(inner_size % BLOCK_SIZE == 0);
_ffn_seq_mix<<<inner_size / BLOCK_SIZE, BLOCK_SIZE>>>(
xx, sx, k_mix, r_mix, outer_size, inner_size, kx, rx);
}
struct InplaceSigmoid {
__device__ __forceinline__ void operator()(int i) const {
ptr[i] = __float2half(1.0 / (1.0 + exp(-__half2float(ptr[i]))));
}
half *ptr;
};
struct InplaceReLUAndSquare {
__device__ __forceinline__ void operator()(int i) const {
// __hmax is not defined in old cuda
if (__hgt(ptr[i], __float2half(0))) {
ptr[i] = __hmul(ptr[i], ptr[i]);
} else {
ptr[i] = __float2half(0);
}
}
half *ptr;
};
struct InplaceFma {
__device__ __forceinline__ void operator()(int i) const {
a[i] = __hfma(a[i], b[i], c[i]);
}
half *a;
const half *b;
const half *c;
};
/*
Equivalent Python code:
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
vx = torch.square(torch.relu(gemm(kx, kw)))
out = r * gemm(vx, vw)
return x + out, xx[-1,:]
*/
Tensor ffn_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
/* imm */ Tensor buf,
/* out */ Tensor x_plus_out) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
sx = at::cat({sx.unsqueeze(0), xx.slice(0, 0, -1)}, 0);
char *buf_ptr = (char *)buf.data_ptr();
half *kx = (half *)buf_ptr;
half *rx = kx + x.numel();
half *vx = rx + x.numel();
half *r = vx + x.size(0) * kw.size(1);
ffn_seq_mix(data_ptr<half>(xx), data_ptr<half>(sx), data_ptr<half>(k_mix),
data_ptr<half>(r_mix), xx.size(0), xx.size(1), kx, rx);
gemm_fp16_cublas(rx, rw.data_ptr(), r, x.size(0), rw.size(1), x.size(1),
false);
element_wise(InplaceSigmoid{r}, x.size(0) * rw.size(1));
gemm_fp16_cublas(kx, kw.data_ptr(), vx, x.size(0), kw.size(1), x.size(1),
false);
element_wise(InplaceReLUAndSquare{vx}, x.size(0) * kw.size(1));
gemm_fp16_cublas(vx, vw.data_ptr(), x_plus_out.data_ptr(), x.size(0),
vw.size(1), vw.size(0), false);
element_wise(InplaceFma{data_ptr<half>(x_plus_out), r, data_ptr<half>(x)},
x_plus_out.numel());
return xx;
}
struct FfnOneMix {
__device__ __forceinline__ void operator()(int idx) {
half k_mix_ = k_mix[idx];
half r_mix_ = r_mix[idx];
half xx_ = xx[idx];
half sx_ = sx[idx];
kx[idx] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
rx[idx] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
half *k_mix;
half *r_mix;
half *xx;
half *sx;
half *kx;
half *rx;
};
/*
Equivalent Python code:
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
vx = torch.square(torch.relu(gemm(kx, kw)))
out = r * gemm(vx, vw)
return x + out, xx
*/
Tensor ffn_one(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
/* imm */ Tensor buf,
/* out */ Tensor x_plus_out) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
char *buf_ptr = (char *)buf.data_ptr();
half *kx = (half *)buf_ptr;
half *rx = kx + x.numel();
half *vx = rx + x.numel();
half *r = vx + x.size(0) * kw.size(1);
element_wise(FfnOneMix{data_ptr<half>(k_mix), data_ptr<half>(r_mix),
data_ptr<half>(xx), data_ptr<half>(sx), kx, rx},
x.numel());
// vector * matrix, so m = 1
gemm_fp16_cublas(rx, rw.data_ptr(), r, 1, rw.size(1), rw.size(0), false);
element_wise(InplaceSigmoid{r}, rw.size(1));
gemm_fp16_cublas(kx, kw.data_ptr(), vx, 1, kw.size(1), kw.size(0), false);
element_wise(InplaceReLUAndSquare{vx}, kw.size(1));
gemm_fp16_cublas(vx, vw.data_ptr(), x_plus_out.data_ptr(), 1, vw.size(1),
vw.size(0), false);
element_wise(InplaceFma{data_ptr<half>(x_plus_out), r, data_ptr<half>(x)},
x_plus_out.numel());
return xx;
}

View File

@@ -0,0 +1,86 @@
#include <cublas_v2.h>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#define CUBLAS_CHECK(condition) \
for (cublasStatus_t _cublas_check_status = (condition); \
_cublas_check_status != CUBLAS_STATUS_SUCCESS;) \
throw std::runtime_error("cuBLAS error " + \
std::to_string(_cublas_check_status) + " at " + \
std::to_string(__LINE__));
#define CUDA_CHECK(condition) \
for (cudaError_t _cuda_check_status = (condition); \
_cuda_check_status != cudaSuccess;) \
throw std::runtime_error( \
"CUDA error " + std::string(cudaGetErrorString(_cuda_check_status)) + \
" at " + std::to_string(__LINE__));
cublasHandle_t get_cublas_handle() {
static cublasHandle_t cublas_handle = []() {
cublasHandle_t handle = nullptr;
CUBLAS_CHECK(cublasCreate(&handle));
#if CUDA_VERSION < 11000
CUBLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
#else
CUBLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
#endif // CUDA_VERSION < 11000
return handle;
}();
return cublas_handle;
}
/*
NOTE: blas gemm is column-major by default, but we need row-major output.
The data of row-major, transposed matrix is exactly the same as the
column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
*/
void gemm_fp16_cublas(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
const auto cuda_data_type = CUDA_R_16F;
const auto cuda_c_data_type =
c.dtype() == torch::kFloat32 ? CUDA_R_32F : CUDA_R_16F;
const auto compute_type = CUDA_R_32F;
const float sp_alpha = 1.f;
// swap a and b, and use CUBLAS_OP_N. see the notes above
std::swap(a, b);
const cublasOperation_t cublas_trans_a = CUBLAS_OP_N;
const cublasOperation_t cublas_trans_b = CUBLAS_OP_N;
// m = (B^T).size(0) = B.size(1), and = A.size(1) after swap,
// negative axis is used because of the existence of batch matmul.
const int m = a.size(-1);
const int k = a.size(-2);
const int n = b.size(-2);
const int cublas_lda = m;
const int cublas_ldb = k;
const int cublas_ldc = m;
cublasHandle_t cublas_handle = get_cublas_handle();
#if CUDA_VERSION >= 11000
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
#else
cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP;
#endif
const float sp_beta = 0.f;
if (a.sizes().size() == 2 && b.sizes().size() == 2) {
CUBLAS_CHECK(cublasGemmEx(
cublas_handle, cublas_trans_a, cublas_trans_b, m, n, k, &sp_alpha,
a.data_ptr(), cuda_data_type, cublas_lda, b.data_ptr(), cuda_data_type,
cublas_ldb, &sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc,
compute_type, algo));
} else {
// batch matmul
assert(a.sizes().size() == 3 && b.sizes().size() == 3);
const long long int cublas_stride_a = m * k;
const long long int cublas_stride_b = k * n;
const long long int cublas_stride_c = m * n;
CUBLAS_CHECK(cublasGemmStridedBatchedEx(
cublas_handle, cublas_trans_a, cublas_trans_b, m,
n, k, &sp_alpha, a.data_ptr(), cuda_data_type, cublas_lda,
cublas_stride_a, b.data_ptr(), cuda_data_type, cublas_ldb, cublas_stride_b,
&sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc, cublas_stride_c,
a.size(0), compute_type, algo));
}
}

View File

@@ -0,0 +1,246 @@
#include <stdio.h>
#include <assert.h>
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#define MIN_VALUE (-1e38)
typedef at::Half fp16;
__half *cast(fp16 *ptr) {
return reinterpret_cast<__half *>(ptr);
}
template <typename F>
__global__ void kernel_wkv_forward(const int B, const int T, const int C,
const float *__restrict__ const _w, const float *__restrict__ const _u, const F *__restrict__ const _k, const F *__restrict__ const _v,
F *__restrict__ const _y, float *__restrict__ const _aa, float *__restrict__ const _bb, float *__restrict__ const _pp) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int _b = idx / C;
const int _c = idx % C;
const int _offset = _b * T * C + _c;
const int _state_offset = _b * C + _c;
float u = _u[_c];
float w = _w[_c];
const F *__restrict__ const k = _k + _offset;
const F *__restrict__ const v = _v + _offset;
F *__restrict__ const y = _y + _offset;
float aa = _aa[_state_offset];
float bb = _bb[_state_offset];
float pp = _pp[_state_offset];
for (int i = 0; i < T; i++) {
const int ii = i * C;
const float kk = float(k[ii]);
const float vv = float(v[ii]);
float ww = u + kk;
float p = max(pp, ww);
float e1 = exp(pp - p);
float e2 = exp(ww - p);
y[ii] = F((e1 * aa + e2 * vv) / (e1 * bb + e2));
ww = w + pp;
p = max(ww, kk);
e1 = exp(ww - p);
e2 = exp(kk - p);
aa = e1 * aa + e2 * vv;
bb = e1 * bb + e2;
pp = p;
}
_aa[_state_offset] = aa;
_bb[_state_offset] = bb;
_pp[_state_offset] = pp;
}
template <typename F>
void cuda_wkv_forward(int B, int T, int C, float *w, float *u, F *k, F *v, F *y, float *aa, float *bb, float *pp) {
dim3 threadsPerBlock( min(C, 32) );
assert(B * C % threadsPerBlock.x == 0);
dim3 numBlocks(B * C / threadsPerBlock.x);
kernel_wkv_forward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, aa, bb, pp);
}
template void cuda_wkv_forward<fp16>(
int B, int T, int C,
float *w, float *u, fp16 *k, fp16 *v, fp16 *y,
float *aa, float *bb, float *pp);
template void cuda_wkv_forward<float>(
int B, int T, int C,
float *w, float *u, float *k, float *v, float *y,
float *aa, float *bb, float *pp);
__global__ void kernel_mm_seq_fp32i8(
const int B, const int N, const int M,
const float *__restrict__ const x, const int x_stride,
const uint8_t *__restrict__ const w, const int w_stride,
const float *__restrict__ const mx,
const float *__restrict__ const rx,
const float *__restrict__ const my,
const float *__restrict__ const ry,
float *__restrict__ const y, const int y_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int k = blockIdx.y * blockDim.y + threadIdx.y;
if (i < B && k < M) {
float y_local = 0;
for (int j = 0; j < N; ++j) {
y_local += x[i * x_stride + j] * (
(float(w[j * w_stride + k]) + 0.5f)
* rx[k] * ry[j] + mx[k] + my[j]
);
}
y[i * y_stride + k] = y_local;
}
}
template <typename F>
void cuda_mm8_seq(int B, int N, int M,
F *x, int x_stride,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
F *y, int y_stride);
template <>
void cuda_mm8_seq<float>(int B, int N, int M,
float *x, int x_stride,
uint8_t *w, int w_stride,
float *mx, float *rx,
float *my, float *ry,
float *y, int y_stride) {
dim3 blockSize(1, 128);
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_seq_fp32i8<<<gridSize, blockSize>>>(
B, N, M, x, x_stride, w, w_stride,
mx, rx, my, ry, y, y_stride);
}
__global__ void kernel_mm_seq_fp16i8(
const int B, const int N, const int M,
const __half *__restrict__ const x, const int x_stride,
const uint8_t *__restrict__ const w, const int w_stride,
const __half *__restrict__ const mx,
const __half *__restrict__ const rx,
const __half *__restrict__ const my,
const __half *__restrict__ const ry,
__half *__restrict__ const y, const int y_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int k = blockIdx.y * blockDim.y + threadIdx.y;
if (i < B && k < M) {
float y_local = 0;
for (int j = 0; j < N; ++j) {
y_local += __half2float(x[i * x_stride + j]) * (
(float(w[j * w_stride + k]) + 0.5f)
* __half2float(rx[k]) * __half2float(ry[j])
+ __half2float(mx[k]) + __half2float(my[j])
);
}
y[i * y_stride + k] = __float2half(y_local);
}
}
template <>
void cuda_mm8_seq<fp16>(int B, int N, int M,
fp16 *x, int x_stride,
uint8_t *w, int w_stride,
fp16 *mx, fp16 *rx,
fp16 *my, fp16 *ry,
fp16 *y, int y_stride) {
dim3 blockSize(1, 128);
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_seq_fp16i8<<<gridSize, blockSize>>>(
B, N, M, cast(x), x_stride, w, w_stride,
cast(mx), cast(rx), cast(my), cast(ry), cast(y), y_stride);
}
#define MM8_ONE_JSPLIT 24
#define MM8_ONE_TILE 1024
__global__ void kernel_mm_one_fp32i8(
const int N, const int M,
const float *__restrict__ const x,
const uint8_t *__restrict__ const w, const int w_stride,
const float *__restrict__ const mx,
const float *__restrict__ const rx,
const float *__restrict__ const my,
const float *__restrict__ const ry,
float *__restrict__ const y) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
if (k < M) {
float y_local = 0;
for (int j = j0; j < j1; ++j) {
y_local += x[j] * (
(float(w[j * w_stride + k]) + 0.5f)
* rx[k] * ry[j] + mx[k] + my[j]
);
}
atomicAdd(&y[k], y_local);
}
}
template <typename F>
void cuda_mm8_one(int N, int M,
F *x,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
float *y);
template <>
void cuda_mm8_one<float>(int N, int M,
float *x,
uint8_t *w, int w_stride,
float *mx, float *rx,
float *my, float *ry,
float *y) {
dim3 blockSize(1, MM8_ONE_TILE);
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_one_fp32i8<<<gridSize, blockSize>>>(
N, M, x, w, w_stride,
mx, rx, my, ry, y);
}
__global__ void kernel_mm_one_fp16i8(
const int N, const int M,
const __half *__restrict__ const x,
const uint8_t *__restrict__ const w, const int w_stride,
const __half *__restrict__ const mx,
const __half *__restrict__ const rx,
const __half *__restrict__ const my,
const __half *__restrict__ const ry,
float *__restrict__ const y) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
if (k < M) {
float y_local = 0;
for (int j = j0; j < j1; ++j) {
y_local += __half2float(x[j]) * (
(float(w[j * w_stride + k]) + 0.5f)
* __half2float(rx[k]) * __half2float(ry[j])
+ __half2float(mx[k]) + __half2float(my[j])
);
}
atomicAdd(&y[k], y_local);
}
}
template <>
void cuda_mm8_one<fp16>(int N, int M,
fp16 *x,
uint8_t *w, int w_stride,
fp16 *mx, fp16 *rx,
fp16 *my, fp16 *ry,
float *y) {
dim3 blockSize(1, MM8_ONE_TILE);
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_one_fp16i8<<<gridSize, blockSize>>>(
N, M, cast(x), w, w_stride,
cast(mx), cast(rx), cast(my), cast(ry), y);
}

88
backend-python/rwkv_pip/cuda/rwkv5.cu vendored Normal file
View File

@@ -0,0 +1,88 @@
#include <stdio.h>
#include <assert.h>
#include "ATen/ATen.h"
typedef at::BFloat16 bf16;
typedef at::Half fp16;
typedef float fp32;
template <typename F>
__global__ void kernel_forward(const int B, const int T, const int C, const int H, float *__restrict__ _state,
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u,
F *__restrict__ const _y)
{
const int b = blockIdx.x / H;
const int h = blockIdx.x % H;
const int i = threadIdx.x;
_w += h*_N_;
_u += h*_N_;
_state += h*_N_*_N_ + i*_N_; // wrong if B > 1 !!!
__shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
float state[_N_];
#pragma unroll
for (int j = 0; j < _N_; j++)
state[j] = _state[j];
__syncthreads();
u[i] = float(_u[i]);
w[i] = _w[i];
__syncthreads();
for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
{
__syncthreads();
r[i] = float(_r[t]);
k[i] = float(_k[t]);
__syncthreads();
const float v = float(_v[t]);
float y = 0;
#pragma unroll
for (int j = 0; j < _N_; j+=4)
{
const float4& r_ = (float4&)(r[j]);
const float4& k_ = (float4&)(k[j]);
const float4& w_ = (float4&)(w[j]);
const float4& u_ = (float4&)(u[j]);
float4& s = (float4&)(state[j]);
float4 x;
x.x = k_.x * v;
x.y = k_.y * v;
x.z = k_.z * v;
x.w = k_.w * v;
y += r_.x * (u_.x * x.x + s.x);
y += r_.y * (u_.y * x.y + s.y);
y += r_.z * (u_.z * x.z + s.z);
y += r_.w * (u_.w * x.w + s.w);
s.x = s.x * w_.x + x.x;
s.y = s.y * w_.y + x.y;
s.z = s.z * w_.z + x.z;
s.w = s.w * w_.w + x.w;
}
_y[t] = F(y);
}
#pragma unroll
for (int j = 0; j < _N_; j++)
_state[j] = state[j];
}
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}

View File

@@ -0,0 +1,30 @@
#include <torch/extension.h>
#include "ATen/ATen.h"
typedef at::BFloat16 bf16;
typedef at::Half fp16;
typedef float fp32;
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y);
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y);
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y);
void forward_bf16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
cuda_forward_bf16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
}
void forward_fp16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
cuda_forward_fp16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp16>(), k.data_ptr<fp16>(), v.data_ptr<fp16>(), w.data_ptr<float>(), u.data_ptr<fp16>(), y.data_ptr<fp16>());
}
void forward_fp32(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
cuda_forward_fp32(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp32>(), k.data_ptr<fp32>(), v.data_ptr<fp32>(), w.data_ptr<float>(), u.data_ptr<fp32>(), y.data_ptr<fp32>());
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward_bf16", &forward_bf16, "rwkv5 forward_bf16");
m.def("forward_fp16", &forward_fp16, "rwkv5 forward_fp16");
m.def("forward_fp32", &forward_fp32, "rwkv5 forward_fp32");
}
TORCH_LIBRARY(rwkv5, m) {
m.def("forward_bf16", forward_bf16);
m.def("forward_fp16", forward_fp16);
m.def("forward_fp32", forward_fp32);
}

7
backend-python/rwkv_pip/cuda/util.h vendored Normal file
View File

@@ -0,0 +1,7 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
template <typename T> T *data_ptr(torch::Tensor x) { return x.data_ptr<T>(); }
template <> inline half *data_ptr(torch::Tensor x) {
return reinterpret_cast<half *>(x.data_ptr<at::Half>());
}

141
backend-python/rwkv_pip/cuda/wrapper.cpp vendored Normal file
View File

@@ -0,0 +1,141 @@
#include <torch/extension.h>
#include "ATen/ATen.h"
#include <iostream>
#include <c10/cuda/CUDAGuard.h>
typedef at::Half fp16;
template <typename F>
void cuda_wkv_forward(int B, int T, int C,
float *w, float *u, F *k, F *v, F *y,
float *aa, float *bb, float *pp);
template <typename F>
void cuda_mm8_seq(int B, int N, int M,
F *x, int x_stride,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
F *y, int y_stride);
template <typename F>
void cuda_mm8_one(int N, int M,
F *x,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
float *y);
void wkv_forward(int64_t B, int64_t T, int64_t C,
torch::Tensor &w, torch::Tensor &u,
torch::Tensor &k, torch::Tensor &v, torch::Tensor &y,
torch::Tensor &aa, torch::Tensor &bb, torch::Tensor &pp) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (k.scalar_type()) {
case c10::ScalarType::Half:
cuda_wkv_forward(B, T, C,
w.data_ptr<float>(), u.data_ptr<float>(),
k.data_ptr<fp16>(), v.data_ptr<fp16>(), y.data_ptr<fp16>(),
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
break;
case c10::ScalarType::Float:
cuda_wkv_forward(B, T, C,
w.data_ptr<float>(), u.data_ptr<float>(),
k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>(),
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
void mm8_seq(int64_t B, int64_t N, int64_t M,
torch::Tensor &x, torch::Tensor &w,
torch::Tensor &mx, torch::Tensor &rx,
torch::Tensor &my, torch::Tensor &ry,
torch::Tensor &y) {
assert(x.stride(1) == 1);
assert(w.stride(1) == 1);
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
assert(my.stride(0) == 1 && ry.stride(0) == 1);
assert(y.stride(1) == 1);
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (x.scalar_type()) {
case c10::ScalarType::Half:
cuda_mm8_seq(
B, N, M,
x.data_ptr<fp16>(), x.stride(0),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
y.data_ptr<fp16>(), y.stride(0));
break;
case c10::ScalarType::Float:
cuda_mm8_seq(
B, N, M,
x.data_ptr<float>(), x.stride(0),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<float>(), rx.data_ptr<float>(),
my.data_ptr<float>(), ry.data_ptr<float>(),
y.data_ptr<float>(), y.stride(0));
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
void mm8_one(int64_t N, int64_t M,
torch::Tensor &x, torch::Tensor &w,
torch::Tensor &mx, torch::Tensor &rx,
torch::Tensor &my, torch::Tensor &ry,
torch::Tensor &y) {
assert(x.stride(0) == 1);
assert(w.stride(1) == 1);
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
assert(my.stride(0) == 1 && ry.stride(0) == 1);
assert(y.stride(0) == 1);
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (x.scalar_type()) {
case c10::ScalarType::Half:
cuda_mm8_one(
N, M,
x.data_ptr<fp16>(),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
y.data_ptr<float>());
break;
case c10::ScalarType::Float:
cuda_mm8_one(
N, M,
x.data_ptr<float>(),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<float>(), rx.data_ptr<float>(),
my.data_ptr<float>(), ry.data_ptr<float>(),
y.data_ptr<float>());
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
using torch::Tensor;
#ifndef DISABLE_CUBLAS_GEMM
void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
#endif
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("wkv_forward", &wkv_forward, "wkv forward");
m.def("mm8_seq", &mm8_seq, "mm8 seq");
m.def("mm8_one", &mm8_one, "mm8 one");
#ifndef DISABLE_CUBLAS_GEMM
m.def("gemm_fp16_cublas", &gemm_fp16_cublas, "gemv fp16 cublas");
#endif
}
TORCH_LIBRARY(rwkv, m) {
m.def("wkv_forward", wkv_forward);
m.def("mm8_seq", mm8_seq);
m.def("mm8_one", mm8_one);
#ifndef DISABLE_CUBLAS_GEMM
m.def("gemm_fp16_cublas", gemm_fp16_cublas);
#endif
}

1882
backend-python/rwkv_pip/model.py vendored Normal file

File diff suppressed because it is too large Load Diff

BIN
backend-python/rwkv_pip/rwkv5.pyd vendored Normal file

Binary file not shown.

View File

@@ -16,6 +16,7 @@ class PIPELINE_ARGS:
top_k=0,
alpha_frequency=0.2,
alpha_presence=0.2,
alpha_decay=0.996,
token_ban=[],
token_stop=[],
chunk_len=256,
@@ -25,6 +26,7 @@ class PIPELINE_ARGS:
self.top_k = top_k
self.alpha_frequency = alpha_frequency # Frequency Penalty (as in GPT-3)
self.alpha_presence = alpha_presence # Presence Penalty (as in GPT-3)
self.alpha_decay = alpha_decay # gradually decay the penalty
self.token_ban = token_ban # ban the generation of some tokens
self.token_stop = token_stop # stop generation whenever you see any token here
self.chunk_len = (
@@ -33,7 +35,7 @@ class PIPELINE_ARGS:
class PIPELINE:
def __init__(self, model, WORD_NAME):
def __init__(self, model, WORD_NAME: str):
self.model = model
if WORD_NAME == "cl100k_base":
import tiktoken
@@ -47,9 +49,15 @@ class PIPELINE:
os.path.dirname(os.path.abspath(__file__)) + "/rwkv_vocab_v20230424.txt"
)
else:
from tokenizers import Tokenizer
if WORD_NAME.endswith(".txt"):
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from rwkv_tokenizer import TRIE_TOKENIZER
self.tokenizer = Tokenizer.from_file(WORD_NAME)
self.tokenizer = TRIE_TOKENIZER(WORD_NAME)
else:
from tokenizers import Tokenizer
self.tokenizer = Tokenizer.from_file(WORD_NAME)
def refine_context(self, context):
context = context.strip().split("\n")
@@ -78,7 +86,7 @@ class PIPELINE:
sorted_ids = np.argsort(probs)
sorted_probs = probs[sorted_ids][::-1]
cumulative_probs = np.cumsum(sorted_probs)
cutoff = float(sorted_probs[np.argmax(cumulative_probs > top_p)])
cutoff = float(sorted_probs[np.argmax(cumulative_probs >= top_p)])
probs[probs < cutoff] = 0
if top_k < len(probs) and top_k > 0:
probs[sorted_ids[:-top_k]] = 0
@@ -92,7 +100,7 @@ class PIPELINE:
sorted_probs = probs[sorted_ids]
sorted_probs = torch.flip(sorted_probs, dims=(0,))
cumulative_probs = torch.cumsum(sorted_probs, dim=-1).cpu().numpy()
cutoff = float(sorted_probs[np.argmax(cumulative_probs > top_p)])
cutoff = float(sorted_probs[np.argmax(cumulative_probs >= top_p)])
probs[probs < cutoff] = 0
if top_k < len(probs) and top_k > 0:
probs[sorted_ids[:-top_k]] = 0
@@ -127,10 +135,13 @@ class PIPELINE:
if token in args.token_stop:
break
all_tokens += [token]
for xxx in occurrence:
occurrence[xxx] *= args.alpha_decay
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
# print(occurrence) # debug
# output
tmp = self.decode(all_tokens[out_last:])

BIN
backend-python/rwkv_pip/wkv_cuda.pyd vendored Normal file

Binary file not shown.

View File

@@ -2,6 +2,8 @@ import json
import logging
from typing import Any
from fastapi import Request
from pydantic import BaseModel
from enum import Enum
logger = logging.getLogger()
@@ -14,12 +16,21 @@ fh.setFormatter(formatter)
logger.addHandler(fh)
class ClsEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, BaseModel):
return obj.dict()
if isinstance(obj, Enum):
return obj.value
return super().default(obj)
def quick_log(request: Request, body: Any, response: str):
try:
logger.info(
f"Client: {request.client if request else ''}\nUrl: {request.url if request else ''}\n"
+ (
f"Body: {json.dumps(body.__dict__, default=vars, ensure_ascii=False)}\n"
f"Body: {json.dumps(body.__dict__, ensure_ascii=False, cls=ClsEncoder)}\n"
if body
else ""
)

View File

@@ -10,6 +10,7 @@ from fastapi import HTTPException
from pydantic import BaseModel, Field
import numpy as np
from routes import state_cache
import global_var
END_OF_TEXT = 0
@@ -27,7 +28,17 @@ class RWKVType(Enum):
class AbstractRWKV(ABC):
def __init__(self, model: str, strategy: str, tokens_path: str):
from rwkv.model import RWKV as Model # dynamic import to make RWKV_CUDA_ON work
rwkv_beta = global_var.get(global_var.Args).rwkv_beta
# dynamic import to make RWKV_CUDA_ON work
if rwkv_beta:
from rwkv_pip.beta.model import (
RWKV as Model,
)
else:
from rwkv_pip.model import (
RWKV as Model,
)
from rwkv_pip.utils import PIPELINE
filename, _ = os.path.splitext(os.path.basename(model))
@@ -221,7 +232,7 @@ class AbstractRWKV(ABC):
return state[0].tolist(), token_len
def generate(
self, prompt: str, stop: Union[str, List[str]] = None
self, prompt: str, stop: Union[str, List[str], None] = None
) -> Iterable[Tuple[str, str, int, int]]:
quick_log(None, None, "Generation Prompt:\n" + prompt)
cache = None
@@ -438,8 +449,10 @@ The following is a coherent verbose detailed conversation between a girl named {
{bot} usually gives {user} kind, helpful and informative advices.\n
"""
if self.rwkv_type == RWKVType.Raven
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
else (
f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
)
logits, _ = self.run_rnn(self.fix_tokens(self.pipeline.encode(preset_system)))
try:

Binary file not shown.

Binary file not shown.

View File

@@ -1,734 +0,0 @@
########################################################################################################
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
########################################################################################################
import types, gc, os, time, re
import torch
from torch.nn import functional as F
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.allow_tf32 = True
torch.backends.cuda.matmul.allow_tf32 = True
current_path = os.path.dirname(os.path.abspath(__file__))
# https://zhuanlan.zhihu.com/p/612879065
def LoadPreCompileLibrary(file):
import importlib
import os
import torch
# load the custom_op_library and register the custom ops
lib_dir = os.path.dirname(__file__)
if os.name == "nt":
# Register the main torchvision library location on the default DLL path
import ctypes
import sys
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
prev_error_mode = kernel32.SetErrorMode(0x0001)
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
if sys.version_info >= (3, 8):
os.add_dll_directory(lib_dir)
elif with_load_library_flags:
res = kernel32.AddDllDirectory(lib_dir)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += f' Error adding "{lib_dir}" to the DLL directories.'
raise ValueError(err)
kernel32.SetErrorMode(prev_error_mode)
loader_details = (
importlib.machinery.ExtensionFileLoader,
importlib.machinery.EXTENSION_SUFFIXES,
)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
ext_specs = extfinder.find_spec(file)
if ext_specs is None:
return False
try:
torch.ops.load_library(ext_specs.origin)
except OSError as exc:
return False
return True
########################################################################################################
if os.environ.get('RWKV_JIT_ON') != '0':
os.environ["RWKV_JIT_ON"] = '1'
MyModule = torch.jit.ScriptModule
MyFunction = torch.jit.script_method
MyStatic = torch.jit.script
else:
MyModule = torch.nn.Module
def __nop(ob):
return ob
MyFunction = __nop
MyStatic = __nop
if os.environ.get('RWKV_CUDA_ON') == '1':
if LoadPreCompileLibrary('wkv_cuda') is False:
from torch.utils.cpp_extension import load
load(
name=f"wkv_cuda",
sources=[f"{current_path}/cuda/wrapper.cpp", f"{current_path}/cuda/operators.cu"],
verbose=True,
extra_cuda_cflags=["-t 4", "-std=c++17", "--use_fast_math", "-O3", "--extra-device-vectorization"],
is_python_module=False)
@MyStatic
def cuda_wkv(T: int, C: int, w, u, k, v, aa, bb, pp):
assert 1 * C % min(C, 32) == 0
assert k.dtype == v.dtype == torch.float16 or k.dtype == v.dtype == torch.float32
assert w.dtype == u.dtype == aa.dtype == bb.dtype == pp.dtype == torch.float32
w = w.contiguous()
u = u.contiguous()
k = k.contiguous()
v = v.contiguous()
y = torch.empty((T, C), device=w.device, memory_format=torch.contiguous_format, dtype=k.dtype)
torch.ops.rwkv.wkv_forward(1, T, C, w, u, k, v, y, aa, bb, pp)
return y, aa, bb, pp
@MyStatic
def cuda_mm8_seq(B: int, N: int, M: int, x, w, mx, rx, my, ry):
assert x.dtype == mx.dtype == rx.dtype == my.dtype == ry.dtype
assert x.dtype == torch.float32 or x.dtype == torch.float16
assert w.dtype == torch.uint8
assert x.shape == [B, N]
assert w.shape == [N, M]
assert rx.shape == mx.shape == [M]
assert ry.shape == my.shape == [N, 1]
y = torch.empty((B, M), device=w.device, dtype=x.dtype)
torch.ops.rwkv.mm8_seq(B, N, M, x, w, mx, rx, my, ry, y)
return y
@MyStatic
def cuda_mm8_one(N: int, M: int, x, w, mx, rx, my, ry):
assert x.dtype == mx.dtype == rx.dtype == my.dtype == ry.dtype
assert x.dtype == torch.float32 or x.dtype == torch.float16
assert w.dtype == torch.uint8
assert x.shape == [N]
assert w.shape == [N, M]
assert rx.shape == mx.shape == [M]
assert ry.shape == my.shape == [N, 1]
y = torch.zeros((M,), device=w.device, dtype=torch.float32)
torch.ops.rwkv.mm8_one(N, M, x, w, mx, rx, my, ry, y)
return y.to(dtype=x.dtype)
else:
os.environ["RWKV_CUDA_ON"] = '0'
########################################################################################################
class RWKV(MyModule):
def __init__(self, model, strategy, verbose = True, convert_and_save_and_exit = None):
super().__init__()
if verbose:
prxxx = lambda *args, **kwargs: print(*args, **kwargs)
else:
prxxx = lambda *args, **kwargs: None
STRATEGY_REGEX = r"^(?:(?:^|->) *(?:cuda(?::[\d]+)?|cpu|mps) (?:fp(?:16|32)|bf16)(?:i8|i4|i3)?(?: \*[\d]+\+?)? *)+$"
if not re.match(STRATEGY_REGEX, strategy):
raise ValueError("Invalid strategy. Please read https://pypi.org/project/rwkv/")
strategy = ('->'.join([x.strip() for x in strategy.split('->')])).replace('->', ' -> ')
self.args = types.SimpleNamespace()
args = self.args
args.MODEL_NAME = model
args.strategy_string = strategy
# Rescale for fp16 mode: set x = x/2 every X layer (to avoid fp16 overflow)
self.RESCALE_LAYER = 6 if 'fp16' in strategy else 0
prxxx(f'RWKV_JIT_ON {os.environ["RWKV_JIT_ON"]} RWKV_CUDA_ON {os.environ["RWKV_CUDA_ON"]} RESCALE_LAYER {self.RESCALE_LAYER}\n')
args.MODEL_NAME = args.MODEL_NAME.strip()
if not args.MODEL_NAME.endswith('.pth'):
args.MODEL_NAME += '.pth'
prxxx(f'Loading {args.MODEL_NAME} ...')
with torch.no_grad():
self.w = torch.load(args.MODEL_NAME, map_location='cpu') # load model to CPU first
gc.collect()
w = self.w
ALREADY_CONVERTED = False
if '_strategy' in w:
ALREADY_CONVERTED = True
assert convert_and_save_and_exit == None # you should only convert a raw model
prxxx(f"Converted model: strategy {w['_strategy']}, version {w['_version']}\n")
assert w['_strategy'] == args.strategy_string # if you are using a new strategy, re-convert the model
assert float(w['_version']) >= 0.7 # sometimes you should re-convert using latest convert_model.py
assert w['_rescale_layer'] == self.RESCALE_LAYER
del w['_strategy']
del w['_version']
del w['_rescale_layer']
args.n_embd = w['emb.weight'].shape[1]
args.n_layer = 0
keys = list(w.keys())
for x in keys:
layer_id = int(x.split('.')[1]) if ('blocks.' in x) else 0
args.n_layer = max(args.n_layer, layer_id+1)
####################### Compute strategy
s = [x.strip().split(' ') for x in strategy.split('->')]
plan = [0] * len(s)
stream_i = -1
stream_count = 0
to_allocate = args.n_layer + 1
allocated = 0
free_slots = 0
for i in range(len(s)):
si = s[i]
si1 = si[1]
if si1.startswith('fp32'): si[1] = [torch.float]
elif si1.startswith('fp16'): si[1] = [torch.float16]
elif si1.startswith('bf16'): si[1] = [torch.bfloat16]
if si1.endswith('i8'): si[1] += [torch.uint8]
else: si[1] += [si[1][0]]
if len(si) > 2:
ss = si[2]
assert ss.startswith('*')
if ss.endswith('+'):
plan[i] = int(ss[1:-1])
stream_i = i
else:
plan[i] = int(ss[1:])
allocated += plan[i]
if allocated >= to_allocate:
plan[i] += to_allocate - allocated
break
else:
free_slots += 1
if stream_i < 0:
if free_slots > 0 and to_allocate > allocated:
for i in range(len(s)):
if plan[i] == 0:
plan[i] = (to_allocate - allocated) // free_slots
allocated += plan[i]
free_slots -= 1
if to_allocate > allocated:
plan[len(s)-1] += to_allocate - allocated
else:
if to_allocate > allocated:
stream_count = to_allocate - allocated
plan[stream_i] += stream_count
prxxx(f'Strategy: (total {args.n_layer}+1={args.n_layer+1} layers)')
for i in range(len(s)):
ss = s[i]
if i != stream_i:
prxxx(f'* {ss[0]} {str(ss[1]).replace("torch.","")}, store {plan[i]} layers')
else:
prxxx(f'* {ss[0]} {str(ss[1]).replace("torch.","")}, store {plan[i]-stream_count} layers, stream {stream_count} layers')
plan[i] += (0 if i == 0 else plan[i-1])
self.strategy = [None] * (args.n_layer + 1)
strategy = self.strategy
for n in range(args.n_layer + 1):
for i in range(len(s)):
if n < plan[i]:
strategy[n] = types.SimpleNamespace()
strategy[n].device = s[i][0]
strategy[n].atype = s[i][1][0]
strategy[n].wtype = s[i][1][1]
strategy[n].stream = False
if i == stream_i and n >= (plan[i] - stream_count):
strategy[n].stream = True
break
prxxx(f"{n}-{strategy[n].device}-{str(strategy[n].atype).replace('torch.','')}-{str(strategy[n].wtype).replace('torch.','')}{'-stream' if strategy[n].stream else ''}",end=' ')
prxxx()
####################### Load weights to self.w
if not ALREADY_CONVERTED:
try: # precompute embedding
w['emb.weight'] = F.layer_norm(w['emb.weight'], (args.n_embd,), weight=w['blocks.0.ln0.weight'], bias=w['blocks.0.ln0.bias'])
except:
w['emb.weight'] = F.layer_norm(w['emb.weight'].float(), (args.n_embd,), weight=w['blocks.0.ln0.weight'].float(), bias=w['blocks.0.ln0.bias'].float())
del w['blocks.0.ln0.weight']
del w['blocks.0.ln0.bias']
print_need_newline = False
keys = list(w.keys())
for x in keys:
w[x].requires_grad = False
layer_id = int(x.split('.')[1]) if ('blocks.' in x) else 0
if ('ln_out.' in x) or ('head.' in x):
layer_id = args.n_layer
dd = strategy[layer_id]
DEVICE = dd.device
ATYPE = dd.atype
WTYPE = dd.wtype
if not ALREADY_CONVERTED:
if self.RESCALE_LAYER > 0:
if 'att.output.weight' in x:
w[x] = w[x] / (2 ** int(layer_id // self.RESCALE_LAYER))
if 'ffn.value.weight' in x:
w[x] = w[x] / (2 ** int(layer_id // self.RESCALE_LAYER))
if '.time_' in x:
w[x] = w[x].squeeze()
if 'key.weight' in x or 'value.weight' in x or 'receptance.weight' in x or 'output.weight' in x or 'head.weight' in x:
w[x] = w[x].t()
if '.time_decay' in x: # need fp32 for this
w[x] = -torch.exp(w[x].float())
elif '.time_first' in x: # need fp32 for this
w[x] = w[x].float()
else:
if (len(w[x].shape) == 2) and ('emb' not in x):
if WTYPE != torch.uint8:
w[x] = w[x].to(dtype=WTYPE)
else:
w[x] = w[x].float()
if w[x].shape[0] > w[x].shape[1]:
w[x+'_my'] = torch.amin(w[x], dim=1).unsqueeze(1)
w[x] = w[x] - w[x+'_my']
w[x+'_mx'] = torch.amin(w[x], dim=0)
w[x] = w[x] - w[x+'_mx']
w[x+'_rx'] = torch.amax(w[x], dim=0)
w[x] = w[x] / w[x+'_rx']
w[x+'_ry'] = torch.amax(w[x], dim=1).unsqueeze(1)
w[x] = w[x] / w[x+'_ry']
else:
w[x+'_mx'] = torch.amin(w[x], dim=0)
w[x] = w[x] - w[x+'_mx']
w[x+'_my'] = torch.amin(w[x], dim=1).unsqueeze(1)
w[x] = w[x] - w[x+'_my']
w[x+'_rx'] = torch.amax(w[x], dim=0)
w[x] = w[x] / w[x+'_rx']
w[x+'_ry'] = torch.amax(w[x], dim=1).unsqueeze(1)
w[x] = w[x] / w[x+'_ry']
w[x] = torch.clip(torch.floor(w[x] * 256), min=0, max=255).to(dtype=torch.uint8)
w[x+'_mx'] = w[x+'_mx'].to(dtype=ATYPE).contiguous()
w[x+'_rx'] = (w[x+'_rx'] / 16).to(dtype=ATYPE).contiguous()
w[x+'_my'] = w[x+'_my'].to(dtype=ATYPE).contiguous()
w[x+'_ry'] = (w[x+'_ry'] / 16).to(dtype=ATYPE).contiguous()
else:
w[x] = w[x].to(dtype=ATYPE)
if convert_and_save_and_exit == None:
if 'emb.' in x:
w[x] = w[x].contiguous()
elif (dd.stream) and (x.endswith('key.weight') or x.endswith('value.weight') or x.endswith('receptance.weight') or x.endswith('output.weight')):
try:
w[x] = w[x].contiguous().pin_memory() # if you see "CUDA error: out of memory" here, that's out of CPU RAM, not VRAM. Get more RAM :)
except:
print('Note: You are running out of RAM. Get more CPU RAM. Now this will run much slower.')
elif DEVICE != 'cpu':
w[x] = w[x].to(device=DEVICE).contiguous()
if (dd.stream) or (DEVICE != 'cpu'):
try:
w[x+'_mx'] = w[x+'_mx'].to(device=DEVICE).contiguous()
w[x+'_rx'] = w[x+'_rx'].to(device=DEVICE).contiguous()
w[x+'_my'] = w[x+'_my'].to(device=DEVICE).contiguous()
w[x+'_ry'] = w[x+'_ry'].to(device=DEVICE).contiguous()
except:
pass
if 'ffn.value.weight' in x:
gc.collect()
if 'cuda' in args.strategy_string:
torch.cuda.empty_cache()
shape = [i for i in w[x].shape if i != 1]
if len(shape) > 1:
shape = f" {str(shape[0]).rjust(5)} {str(shape[1]).rjust(5)}"
else:
shape = f" {str(shape[0]).rjust(5)} "
if layer_id == 0 or layer_id >= args.n_layer-1:
if print_need_newline:
prxxx('\n', end = '')
print_need_newline = False
dt = str(w[x].dtype).replace('torch.', '')
dt = dt.replace('float32', 'f32').replace('bfloat16', 'bf16').replace('float16', 'f16').replace('uint8', 'i8')
prxxx(x.ljust(32), dt.rjust(4), str(w[x].device).rjust(8), shape, ' (pinned)' if w[x].is_pinned() else '')
else:
print_need_newline = True
prxxx('.', end = '', flush = True)
if convert_and_save_and_exit:
w['_strategy'] = args.strategy_string
w['_rescale_layer'] = self.RESCALE_LAYER
w['_version'] = '0.7'
if not convert_and_save_and_exit.endswith('.pth'):
convert_and_save_and_exit += '.pth'
prxxx(f'Saving to {convert_and_save_and_exit}...')
torch.save(w, convert_and_save_and_exit)
prxxx(f'Converted and saved. Now this will exit.')
exit(0)
gc.collect()
if 'cuda' in args.strategy_string:
torch.cuda.empty_cache()
@MyFunction
def torch_mm8_seq(self, x, w, mx, rx, my, ry):
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
@MyFunction
def torch_mm8_one(self, x, w, mx, rx, my, ry):
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
if os.environ.get('RWKV_CUDA_ON') == '1':
@MyFunction
def mm8_seq(self, x, w, mx, rx, my, ry):
if w.device.type == 'cuda' and x.dtype == torch.float16:
B, N, M = x.shape[0], w.shape[0], w.shape[1]
return cuda_mm8_seq(B, N, M, x, w, mx, rx, my, ry)
else:
return self.torch_mm8_seq(x, w, mx, rx, my, ry)
@MyFunction
def mm8_one(self, x, w, mx, rx, my, ry):
if w.device.type == 'cuda':
N, M = w.shape[0], w.shape[1]
return cuda_mm8_one(N, M, x, w, mx, rx, my, ry)
else:
return self.torch_mm8_one(x, w, mx, rx, my, ry)
else:
@MyFunction
def mm8_seq(self, x, w, mx, rx, my, ry):
return self.torch_mm8_seq(x, w, mx, rx, my, ry)
@MyFunction
def mm8_one(self, x, w, mx, rx, my, ry):
return self.torch_mm8_one(x, w, mx, rx, my, ry)
########################################################################################################
@MyFunction
def ffn_one(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
vx = torch.square(torch.relu(kx @ kw))
out = r * (vx @ vw)
return x + out, xx
@MyFunction
def ffn_one_i8(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_one(rx, rw, rmx, rrx, rmy, rry))
vx = torch.square(torch.relu(self.mm8_one(kx, kw, kmx, krx, kmy, kry)))
out = r * (self.mm8_one(vx, vw, vmx, vrx, vmy, vry))
return x + out, xx
########################################################################################################
@MyFunction
def ffn_seq(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
vx = torch.square(torch.relu(kx @ kw))
out = r * (vx @ vw)
return x + out, xx[-1,:]
@MyFunction
def ffn_seq_i8(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
vx = torch.square(torch.relu(self.mm8_seq(kx, kw, kmx, krx, kmy, kry)))
out = r * (self.mm8_seq(vx, vw, vmx, vrx, vmy, vry))
return x + out, xx[-1,:]
########################################################################################################
@MyFunction
def att_one(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
k = (kx @ kw).float()
v = (vx @ vw).float()
ww = t_first + k
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, k)
e1 = torch.exp(ww - p)
e2 = torch.exp(k - p)
out = (r * wkv) @ ow
return x + out, xx, e1 * aa + e2 * v, e1 * bb + e2, p
@MyFunction
def att_one_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_one(rx, rw, rmx, rrx, rmy, rry))
k = (self.mm8_one(kx, kw, kmx, krx, kmy, kry)).float()
v = (self.mm8_one(vx, vw, vmx, vrx, vmy, vry)).float()
ww = t_first + k
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, k)
e1 = torch.exp(ww - p)
e2 = torch.exp(k - p)
out = self.mm8_one(r * wkv, ow, omx, orx, omy, ory)
return x + out, xx, e1 * aa + e2 * v, e1 * bb + e2, p
########################################################################################################
@MyFunction
def att_seq(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
k = (kx @ kw).float()
v = (vx @ vw).float()
T = x.shape[0]
for t in range(T):
kk = k[t]
vv = v[t]
ww = t_first + kk
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, kk)
e1 = torch.exp(ww - p)
e2 = torch.exp(kk - p)
aa = e1 * aa + e2 * vv
bb = e1 * bb + e2
pp = p
out = (r * sx) @ ow
return x + out, xx[-1,:], aa, bb, pp
@MyFunction
def att_seq_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
k = self.mm8_seq(kx, kw, kmx, krx, kmy, kry).float()
v = self.mm8_seq(vx, vw, vmx, vrx, vmy, vry).float()
T = x.shape[0]
for t in range(T):
kk = k[t]
vv = v[t]
ww = t_first + kk
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, kk)
e1 = torch.exp(ww - p)
e2 = torch.exp(kk - p)
aa = e1 * aa + e2 * vv
bb = e1 * bb + e2
pp = p
out = self.mm8_seq(r * sx, ow, omx, orx, omy, ory)
return x + out, xx[-1,:], aa, bb, pp
########################################################################################################
if os.environ["RWKV_CUDA_ON"] == '1':
@MyFunction
def cuda_att_seq(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
T, C = x.size()
xx = F.layer_norm(x, (C,), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
k = kx @ kw
v = vx @ vw
y, aa, bb, pp = cuda_wkv(T, C, t_decay, t_first, k, v, aa, bb, pp)
out = (r * y) @ ow
return x + out, xx[-1,:], aa, bb, pp
@MyFunction
def cuda_att_seq_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
T, C = x.size()
xx = F.layer_norm(x, (C,), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
k = self.mm8_seq(kx, kw, kmx, krx, kmy, kry)
v = self.mm8_seq(vx, vw, vmx, vrx, vmy, vry)
y, aa, bb, pp = cuda_wkv(T, C, t_decay, t_first, k, v, aa, bb, pp)
out = self.mm8_seq(r * y, ow, omx, orx, omy, ory)
return x + out, xx[-1,:], aa, bb, pp
########################################################################################################
def forward(self, tokens, state, full_output=False):
with torch.no_grad():
w = self.w
args = self.args
if state == None:
state = [None] * args.n_layer * 5
for i in range(args.n_layer): # state: 0=att_xx 1=att_aa 2=att_bb 3=att_pp 4=ffn_xx
dd = self.strategy[i]
dev = dd.device
atype = dd.atype
state[i*5+0] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
state[i*5+1] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous()
state[i*5+2] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous()
state[i*5+3] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous() - 1e30
state[i*5+4] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
seq_mode = len(tokens) > 1
x = w['emb.weight'][tokens if seq_mode else tokens[0]]
for i in range(args.n_layer):
bbb = f'blocks.{i}.'
att = f'blocks.{i}.att.'
ffn = f'blocks.{i}.ffn.'
dd = self.strategy[i]
dev = dd.device
atype = dd.atype
wtype = dd.wtype
if seq_mode:
if 'cuda' in str(dev) and os.environ["RWKV_CUDA_ON"] == '1':
ATT = self.cuda_att_seq if wtype != torch.uint8 else self.cuda_att_seq_i8
else:
ATT = self.att_seq if wtype != torch.uint8 else self.att_seq_i8
FFN = self.ffn_seq if wtype != torch.uint8 else self.ffn_seq_i8
else:
ATT = self.att_one if wtype != torch.uint8 else self.att_one_i8
FFN = self.ffn_one if wtype != torch.uint8 else self.ffn_one_i8
x = x.to(dtype=atype, device=dev)
kw = w[f'{att}key.weight']
vw = w[f'{att}value.weight']
rw = w[f'{att}receptance.weight']
ow = w[f'{att}output.weight']
if dd.stream:
kw = kw.to(device=dev, non_blocking=True)
vw = vw.to(device=dev, non_blocking=True)
rw = rw.to(device=dev, non_blocking=True)
ow = ow.to(device=dev, non_blocking=True)
kmx = w[f'{att}key.weight_mx'] if wtype == torch.uint8 else x
krx = w[f'{att}key.weight_rx'] if wtype == torch.uint8 else x
kmy = w[f'{att}key.weight_my'] if wtype == torch.uint8 else x
kry = w[f'{att}key.weight_ry'] if wtype == torch.uint8 else x
vmx = w[f'{att}value.weight_mx'] if wtype == torch.uint8 else x
vrx = w[f'{att}value.weight_rx'] if wtype == torch.uint8 else x
vmy = w[f'{att}value.weight_my'] if wtype == torch.uint8 else x
vry = w[f'{att}value.weight_ry'] if wtype == torch.uint8 else x
rmx = w[f'{att}receptance.weight_mx'] if wtype == torch.uint8 else x
rrx = w[f'{att}receptance.weight_rx'] if wtype == torch.uint8 else x
rmy = w[f'{att}receptance.weight_my'] if wtype == torch.uint8 else x
rry = w[f'{att}receptance.weight_ry'] if wtype == torch.uint8 else x
omx = w[f'{att}output.weight_mx'] if wtype == torch.uint8 else x
orx = w[f'{att}output.weight_rx'] if wtype == torch.uint8 else x
omy = w[f'{att}output.weight_my'] if wtype == torch.uint8 else x
ory = w[f'{att}output.weight_ry'] if wtype == torch.uint8 else x
x, state[i*5+0], state[i*5+1], state[i*5+2], state[i*5+3] = ATT(
x, state[i*5+0], state[i*5+1], state[i*5+2], state[i*5+3],
w[f'{bbb}ln1.weight'], w[f'{bbb}ln1.bias'],
w[f'{att}time_mix_k'], w[f'{att}time_mix_v'], w[f'{att}time_mix_r'],
w[f'{att}time_decay'], w[f'{att}time_first'],
kw, vw, rw, ow,
kmx, krx, kmy, kry,
vmx, vrx, vmy, vry,
rmx, rrx, rmy, rry,
omx, orx, omy, ory,
)
if dd.stream:
del kw, vw, rw, ow
kw = w[f'{ffn}key.weight']
vw = w[f'{ffn}value.weight']
rw = w[f'{ffn}receptance.weight']
if dd.stream:
kw = kw.to(device=dev, non_blocking=True)
vw = vw.to(device=dev, non_blocking=True)
rw = rw.to(device=dev, non_blocking=True)
kmx = w[f'{ffn}key.weight_mx'] if wtype == torch.uint8 else x
krx = w[f'{ffn}key.weight_rx'] if wtype == torch.uint8 else x
kmy = w[f'{ffn}key.weight_my'] if wtype == torch.uint8 else x
kry = w[f'{ffn}key.weight_ry'] if wtype == torch.uint8 else x
vmx = w[f'{ffn}value.weight_mx'] if wtype == torch.uint8 else x
vrx = w[f'{ffn}value.weight_rx'] if wtype == torch.uint8 else x
vmy = w[f'{ffn}value.weight_my'] if wtype == torch.uint8 else x
vry = w[f'{ffn}value.weight_ry'] if wtype == torch.uint8 else x
rmx = w[f'{ffn}receptance.weight_mx'] if wtype == torch.uint8 else x
rrx = w[f'{ffn}receptance.weight_rx'] if wtype == torch.uint8 else x
rmy = w[f'{ffn}receptance.weight_my'] if wtype == torch.uint8 else x
rry = w[f'{ffn}receptance.weight_ry'] if wtype == torch.uint8 else x
x, state[i*5+4] = FFN(
x, state[i*5+4],
w[f'{bbb}ln2.weight'], w[f'{bbb}ln2.bias'],
w[f'{ffn}time_mix_k'], w[f'{ffn}time_mix_r'],
kw, vw, rw,
kmx, krx, kmy, kry,
vmx, vrx, vmy, vry,
rmx, rrx, rmy, rry,
)
if dd.stream:
del kw, vw, rw
if self.RESCALE_LAYER > 0:
if (i+1) % self.RESCALE_LAYER == 0:
x = x / 2
dd = self.strategy[args.n_layer]
x = x[-1,:] if (seq_mode and (not full_output)) else x
x = x.to(dtype=dd.atype, device=dd.device)
x = F.layer_norm(x, (args.n_embd,), weight=w['ln_out.weight'], bias=w['ln_out.bias'])
if w['head.weight'].dtype != torch.uint8:
x = x @ w['head.weight']
else:
if seq_mode and full_output:
x = self.mm8_seq(x, w['head.weight'], w['head.weight_mx'], w['head.weight_rx'], w['head.weight_my'], w['head.weight_ry'])
else:
x = self.mm8_one(x, w['head.weight'], w['head.weight_mx'], w['head.weight_rx'], w['head.weight_my'], w['head.weight_ry'])
return x.float(), state

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
For Mac and Linux users, please manually install Python 3.10 (usually the latest systems come with it built-in). You can specify the Python interpreter to use in Settings.
对于Mac和Linux用户请手动安装 Python3.10 (通常最新的系统已经内置了). 你可以在设置中指定使用的Python解释器.
MacおよびLinuxのユーザーの方は、Python3.10を手動でインストールしてください(通常、最新のシステムには既に組み込まれています)。 設定メニューで使用するPythonインタプリタを指定することができます。
For Mac and Linux users, please manually install Python 3.10 (usually the latest systems come with it built-in). You can specify the Python interpreter to use in Settings. (which python3)
对于Mac和Linux用户请手动安装 Python3.10 (通常最新的系统已经内置了). 你可以在设置中指定使用的Python解释器. (which python3)
MacおよびLinuxのユーザーの方は、Python3.10を手動でインストールしてください(通常、最新のシステムには既に組み込まれています)。 設定メニューで使用するPythonインタプリタを指定することができます。 (which python3)
Please execute this program in an empty directory. All related dependencies will be placed in this directory.
请将本程序放在一个空目录内执行, 所有相关依赖均会放置于此目录.

View File

@@ -1,3 +1,5 @@
echo $@
if [[ ${cnMirror} == 1 ]]; then
export PIP_INDEX_URL="https://pypi.tuna.tsinghua.edu.cn/simple"
if grep -q "mirrors.aliyun.com" /etc/apt/sources.list; then

View File

@@ -184,7 +184,7 @@ if __name__ == "__main__":
args.num_sanity_val_steps = 0
args.check_val_every_n_epoch = int(1e20)
args.log_every_n_steps = int(1e20)
args.max_epochs = -1 # continue forever
args.max_epochs = args.epoch_count # continue forever
args.betas = (args.beta1, args.beta2)
args.real_bsz = int(args.num_nodes) * int(args.devices) * args.micro_bsz
os.environ["RWKV_T_MAX"] = str(args.ctx_len)
@@ -373,7 +373,7 @@ if __name__ == "__main__":
for param in module.parameters():
param.requires_grad = True
elif enable_time_finetune and any(
n.startswith("time") for n, _ in module.named_parameters()
n.startswith("time") for n, _ in module.named_parameters()
):
for pname, param in module.named_parameters():
if pname.startswith("time"):
@@ -381,7 +381,7 @@ if __name__ == "__main__":
param.requires_grad = True
if (
len(args.load_model) == 0 or args.my_pile_stage == 1
len(args.load_model) == 0 or args.my_pile_stage == 1
): # shall we build the initial weights?
init_weight_name = f"{args.proj_dir}/rwkv-init.pth"
generate_init_weight(model, init_weight_name) # save initial weights
@@ -423,8 +423,8 @@ if __name__ == "__main__":
)
if (
args.lr_init > 1e-4
or trainer.world_size * args.micro_bsz * trainer.accumulate_grad_batches < 8
args.lr_init > 1e-4
or trainer.world_size * args.micro_bsz * trainer.accumulate_grad_batches < 8
):
if "I_KNOW_WHAT_IM_DOING" in os.environ:
if trainer.global_rank == 0:
@@ -459,10 +459,10 @@ if __name__ == "__main__":
if "deepspeed" in args.strategy:
trainer.strategy.config["zero_optimization"]["allgather_bucket_size"] = (
args.ds_bucket_mb * 1000 * 1000
args.ds_bucket_mb * 1000 * 1000
)
trainer.strategy.config["zero_optimization"]["reduce_bucket_size"] = (
args.ds_bucket_mb * 1000 * 1000
args.ds_bucket_mb * 1000 * 1000
)
# must set shuffle=False, persistent_workers=False (because worker is in another thread)

View File

@@ -128,6 +128,7 @@
"Chinese Kongfu": "中国武術",
"Allow external access to the API (service must be restarted)": "APIへの外部アクセスを許可する (サービスを再起動する必要があります)",
"Custom": "カスタム",
"CUDA (Beta, Faster)": "CUDA (ベータ、高速)",
"Reset All Configs": "すべての設定をリセット",
"Cancel": "キャンセル",
"Confirm": "確認",
@@ -177,7 +178,7 @@
"Failed to import. Please copy a preset to the clipboard.": "インポートに失敗しました。プリセットをクリップボードにコピーしてください。",
"Clipboard is empty.": "クリップボードが空です。",
"Successfully copied to clipboard.": "クリップボードにコピーしました。",
"Edit Messages": "メッセージの編集",
"Edit Character Settings": "キャラクター設定を編集",
"Go Back": "戻る",
"Description": "説明",
"Avatar Url": "アバターURL",
@@ -225,7 +226,7 @@
"Please select a LoRA model": "LoRAモデルを選択してください",
"You are using sample data for training. For formal training, please make sure to create your own jsonl file.": "トレーニングにはサンプルデータを使用しています。正式なトレーニングのためには、自身でjsonlファイルを作成してください。",
"WSL is not running, please retry. If it keeps happening, it means you may be using an outdated version of WSL, run \"wsl --update\" to update.": "WSLが実行されていません、もう一度試してください。これが続く場合、古いバージョンのWSLを使用している可能性があります。\"wsl --update\"を実行して更新してください。",
"Memory is not enough, try to increase the virtual memory or use a smaller base model.": "メモリが不足しています、仮想メモリを増やすか小さなベースモデルを使用してみてください。",
"Memory is not enough, try to increase the virtual memory (Swap of WSL) or use a smaller base model.": "メモリが不足しています、仮想メモリ (WSL Swap) を増やすか小さなベースモデルを使用してみてください。",
"VRAM is not enough": "ビデオRAMが不足しています",
"Training data is not enough, reduce context length or add more data for training": "トレーニングデータが不足しています、コンテキストの長さを減らすか、トレーニング用のデータをさらに追加してください",
"You are using WSL 1 for training, please upgrade to WSL 2. e.g. Run \"wsl --set-version Ubuntu-22.04 2\"": "トレーニングにWSL 1を使用しています、WSL 2にアップグレードしてください。例:\"wsl --set-version Ubuntu-22.04 2\"を実行する",
@@ -240,5 +241,19 @@
"Auto Play At The End": "最後に自動再生",
"No File to save": "保存するファイルがありません",
"File Saved": "ファイルが保存されました",
"Failed to load local sound font, please check if the files exist - assets/sound-font": "ローカルサウンドフォントの読み込みに失敗しました、ファイルが存在するか確認してください - assets/sound-font"
"Failed to load local sound font, please check if the files exist - assets/sound-font": "ローカルサウンドフォントの読み込みに失敗しました、ファイルが存在するか確認してください - assets/sound-font",
"Please convert model to safe tensors format first": "モデルを安全なテンソル形式に変換してください",
"Convert To Safe Tensors Format": "安全なテンソル形式に変換",
"Please change Strategy to WebGPU to use safetensors format": "StrategyをWebGPUに変更して、安全なテンソル形式を使用してください",
"Preview Only": "プレビューのみ",
"RAM": "RAM",
"VRAM": "VRAM",
"GPU Usage": "GPU使用率",
"Use Custom Tokenizer": "カスタムトークナイザーを使用する",
"Tokenizer Path (e.g. backend-python/rwkv_pip/20B_tokenizer.json)": "トークナイザーパス (例: backend-python/rwkv_pip/20B_tokenizer.json)",
"User Name": "ユーザー名",
"Assistant Name": "アシスタント名",
"Insert default system prompt at the beginning": "最初にデフォルトのシステムプロンプトを挿入",
"Please Enable Custom CUDA Kernel. Latest RWKV-5 requires os.environ['RWKV_CUDA_ON'] == '1' (will fix soon).": "カスタムCUDAカーネルを有効にしてください。最新のRWKV-5ではos.environ['RWKV_CUDA_ON'] == '1'が必要です(近日中に修正します)。",
"Format Content": "内容フォーマットの規格化"
}

View File

@@ -128,6 +128,7 @@
"Chinese Kongfu": "情境冒险",
"Allow external access to the API (service must be restarted)": "允许外部访问API (必须重启服务)",
"Custom": "自定义",
"CUDA (Beta, Faster)": "CUDA (Beta, 更快)",
"Reset All Configs": "重置所有配置",
"Cancel": "取消",
"Confirm": "确认",
@@ -177,7 +178,7 @@
"Failed to import. Please copy a preset to the clipboard.": "导入失败。请复制一个预设到剪贴板",
"Clipboard is empty.": "剪贴板没有内容",
"Successfully copied to clipboard.": "成功复制到剪贴板",
"Edit Messages": "编辑对话",
"Edit Character Settings": "编辑人设",
"Go Back": "返回",
"Description": "描述",
"Avatar Url": "头像图片地址",
@@ -225,7 +226,7 @@
"Please select a LoRA model": "请选择一个LoRA模型",
"You are using sample data for training. For formal training, please make sure to create your own jsonl file.": "你正在使用示例数据训练对于正式训练场合请务必创建你自己的jsonl训练数据",
"WSL is not running, please retry. If it keeps happening, it means you may be using an outdated version of WSL, run \"wsl --update\" to update.": "WSL没有运行请重试。如果一直出现此错误意味着你可能正在使用旧版本的WSL请在cmd执行\"wsl --update\"以更新",
"Memory is not enough, try to increase the virtual memory or use a smaller base model.": "内存不足,尝试增加虚拟内存,或使用一个更小规模的基底模型",
"Memory is not enough, try to increase the virtual memory (Swap of WSL) or use a smaller base model.": "内存不足,尝试增加虚拟内存(WSL Swap),或使用一个更小规模的基底模型",
"VRAM is not enough": "显存不足",
"Training data is not enough, reduce context length or add more data for training": "训练数据不足,请减小上下文长度或增加训练数据",
"You are using WSL 1 for training, please upgrade to WSL 2. e.g. Run \"wsl --set-version Ubuntu-22.04 2\"": "你正在使用WSL 1进行训练请升级到WSL 2。例如运行\"wsl --set-version Ubuntu-22.04 2\"",
@@ -240,5 +241,19 @@
"Auto Play At The End": "结束时自动播放",
"No File to save": "无文件可保存",
"File Saved": "文件已保存",
"Failed to load local sound font, please check if the files exist - assets/sound-font": "加载本地音色资源失败,请检查文件是否存在 - assets/sound-font"
"Failed to load local sound font, please check if the files exist - assets/sound-font": "加载本地音色资源失败,请检查文件是否存在 - assets/sound-font",
"Please convert model to safe tensors format first": "请先将模型转换为Safetensors格式",
"Convert To Safe Tensors Format": "转换为Safetensors格式",
"Please change Strategy to WebGPU to use safetensors format": "请将Strategy改为WebGPU以使用safetensors格式",
"Preview Only": "仅预览",
"RAM": "内存",
"VRAM": "显存",
"GPU Usage": "GPU占用",
"Use Custom Tokenizer": "使用自定义Tokenizer",
"Tokenizer Path (e.g. backend-python/rwkv_pip/20B_tokenizer.json)": "Tokenizer路径 (例如: backend-python/rwkv_pip/20B_tokenizer.json)",
"User Name": "用户名称",
"Assistant Name": "AI名称",
"Insert default system prompt at the beginning": "在开头自动插入默认系统提示",
"Please Enable Custom CUDA Kernel. Latest RWKV-5 requires os.environ['RWKV_CUDA_ON'] == '1' (will fix soon).": "请启用自定义CUDA算子。最新的RWKV-5需要os.environ['RWKV_CUDA_ON'] == '1' (未来会修复)",
"Format Content": "规范格式"
}

View File

@@ -11,7 +11,7 @@ export const ResetConfigsButton: FC<{ afterConfirm?: () => void }> = ({ afterCon
return <DialogButton icon={<ArrowReset20Regular />} tooltip={t('Reset All Configs')} title={t('Reset All Configs')}
contentText={t('Are you sure you want to reset all configs? This will obtain the latest preset configs, but will override your custom configs and cannot be undone.')}
onConfirm={() => {
commonStore.setModelConfigs(commonStore.platform != 'darwin' ? defaultModelConfigs : defaultModelConfigsMac, false);
commonStore.setModelConfigs(commonStore.platform !== 'darwin' ? defaultModelConfigs : defaultModelConfigsMac, false);
commonStore.setCurrentConfigIndex(0, true);
afterConfirm?.();
}} />;

View File

@@ -1,11 +1,11 @@
import React, { FC, MouseEventHandler, ReactElement } from 'react';
import commonStore, { ModelStatus } from '../stores/commonStore';
import { AddToDownloadList, CopyFile, FileExists, StartServer } from '../../wailsjs/go/backend_golang/App';
import { AddToDownloadList, FileExists, StartServer, StartWebGPUServer } from '../../wailsjs/go/backend_golang/App';
import { Button } from '@fluentui/react-components';
import { observer } from 'mobx-react-lite';
import { exit, getStatus, readRoot, switchModel, updateConfig } from '../apis';
import { toast } from 'react-toastify';
import { checkDependencies, getStrategy, getSupportedCustomCudaFile, toastWithButton } from '../utils';
import { checkDependencies, getStrategy, toastWithButton } from '../utils';
import { useTranslation } from 'react-i18next';
import { ToolTipButton } from './ToolTipButton';
import { Play16Regular, Stop16Regular } from '@fluentui/react-icons';
@@ -39,6 +39,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
commonStore.setStatus({ status: ModelStatus.Starting });
const modelConfig = commonStore.getCurrentModelConfig();
const webgpu = modelConfig.modelParameters.device === 'WebGPU';
let modelName = '';
let modelPath = '';
if (modelConfig && modelConfig.modelParameters) {
@@ -50,9 +51,32 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
return;
}
const ok = await checkDependencies(navigate);
if (!ok)
return;
if (webgpu) {
if (!['.st', '.safetensors'].some(ext => modelPath.endsWith(ext))) {
const stModelPath = modelPath.replace(/\.pth$/, '.st');
if (await FileExists(stModelPath)) {
modelPath = stModelPath;
} else {
toast(t('Please convert model to safe tensors format first'), { type: 'error' });
commonStore.setStatus({ status: ModelStatus.Offline });
return;
}
}
}
if (!webgpu) {
if (['.st', '.safetensors'].some(ext => modelPath.endsWith(ext))) {
toast(t('Please change Strategy to WebGPU to use safetensors format'), { type: 'error' });
commonStore.setStatus({ status: ModelStatus.Offline });
return;
}
}
if (!webgpu) {
const ok = await checkDependencies(navigate);
if (!ok)
return;
}
const currentModelSource = commonStore.modelSourceList.find(item => item.name === modelName);
@@ -85,7 +109,15 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
await exit(1000).catch(() => {
});
StartServer(commonStore.settings.customPythonPath, port, commonStore.settings.host !== '127.0.0.1' ? '0.0.0.0' : '127.0.0.1').catch((e) => {
const startServer = webgpu ?
(_: string, port: number, host: string) => StartWebGPUServer(port, host)
: StartServer;
const isUsingCudaBeta = modelConfig.modelParameters.device === 'CUDA-Beta';
startServer(commonStore.settings.customPythonPath, port, commonStore.settings.host !== '127.0.0.1' ? '0.0.0.0' : '127.0.0.1',
isUsingCudaBeta
).catch((e) => {
const errMsg = e.message || e;
if (errMsg.includes('path contains space'))
toast(`${t('Error')} - ${t('File Path Cannot Contain Space')}`, { type: 'error' });
@@ -102,41 +134,49 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
if (r.ok && !loading) {
loading = true;
clearInterval(intervalId);
await getStatus().then(status => {
if (status)
commonStore.setStatus(status);
});
if (!webgpu) {
await getStatus().then(status => {
if (status)
commonStore.setStatus(status);
});
}
commonStore.setStatus({ status: ModelStatus.Loading });
toast(t('Loading Model'), { type: 'info' });
updateConfig({
max_tokens: modelConfig.apiParameters.maxResponseToken,
temperature: modelConfig.apiParameters.temperature,
top_p: modelConfig.apiParameters.topP,
presence_penalty: modelConfig.apiParameters.presencePenalty,
frequency_penalty: modelConfig.apiParameters.frequencyPenalty
});
if (!webgpu) {
updateConfig({
max_tokens: modelConfig.apiParameters.maxResponseToken,
temperature: modelConfig.apiParameters.temperature,
top_p: modelConfig.apiParameters.topP,
presence_penalty: modelConfig.apiParameters.presencePenalty,
frequency_penalty: modelConfig.apiParameters.frequencyPenalty
});
}
const strategy = getStrategy(modelConfig);
let customCudaFile = '';
if ((modelConfig.modelParameters.device === 'CUDA' || modelConfig.modelParameters.device === 'Custom')
if ((modelConfig.modelParameters.device.includes('CUDA') || modelConfig.modelParameters.device === 'Custom')
&& modelConfig.modelParameters.useCustomCuda && !strategy.includes('fp32')) {
if (commonStore.platform === 'windows') {
customCudaFile = getSupportedCustomCudaFile();
if (customCudaFile) {
FileExists('./py310/Lib/site-packages/rwkv/model.py').then((exist) => {
// defensive measure. As Python has already been launched, will only take effect the next time it runs.
if (!exist) CopyFile('./backend-python/wkv_cuda_utils/wkv_cuda_model.py', './py310/Lib/site-packages/rwkv/model.py');
});
await CopyFile(customCudaFile, './py310/Lib/site-packages/rwkv/wkv_cuda.pyd').catch(() => {
FileExists('./py310/Lib/site-packages/rwkv/wkv_cuda.pyd').then((exist) => {
if (!exist) {
customCudaFile = '';
toast(t('Failed to copy custom cuda file'), { type: 'error' });
}
});
});
} else
toast(t('Supported custom cuda file not found'), { type: 'warning' });
// this part is currently unused because there's no longer a need to use different kernels for different GPUs, but it might still be needed in the future
//
// customCudaFile = getSupportedCustomCudaFile(isUsingCudaBeta);
// if (customCudaFile) {
// let kernelTargetPath: string;
// if (isUsingCudaBeta)
// kernelTargetPath = './backend-python/rwkv_pip/beta/wkv_cuda.pyd';
// else
// kernelTargetPath = './backend-python/rwkv_pip/wkv_cuda.pyd';
// await CopyFile(customCudaFile, kernelTargetPath).catch(() => {
// FileExists(kernelTargetPath).then((exist) => {
// if (!exist) {
// customCudaFile = '';
// toast(t('Failed to copy custom cuda file'), { type: 'error' });
// }
// });
// });
// } else
// toast(t('Supported custom cuda file not found'), { type: 'warning' });
customCudaFile = 'any';
} else {
customCudaFile = 'any';
}
@@ -145,6 +185,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
switchModel({
model: modelPath,
strategy: strategy,
tokenizer: modelConfig.modelParameters.useCustomTokenizer ? modelConfig.modelParameters.customTokenizer : undefined,
customCuda: customCudaFile !== ''
}).then(async (r) => {
if (r.ok) {
@@ -170,7 +211,8 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
'invalid header or archive is corrupted': 'The model file is corrupted, please download again.',
'no NVIDIA driver': 'Found no NVIDIA driver, please install the latest driver.',
'CUDA out of memory': 'VRAM is not enough, please reduce stored layers or use a lower precision in Configs page.',
'Ninja is required to load C++ extensions': 'Failed to enable custom CUDA kernel, ninja is required to load C++ extensions. You may be using the CPU version of PyTorch, please reinstall PyTorch with CUDA. Or if you are using a custom Python interpreter, you must compile the CUDA kernel by yourself or disable Custom CUDA kernel acceleration.'
'Ninja is required to load C++ extensions': 'Failed to enable custom CUDA kernel, ninja is required to load C++ extensions. You may be using the CPU version of PyTorch, please reinstall PyTorch with CUDA. Or if you are using a custom Python interpreter, you must compile the CUDA kernel by yourself or disable Custom CUDA kernel acceleration.',
'Please Enable Custom CUDA Kernel': 'Please Enable Custom CUDA Kernel. Latest RWKV-5 requires os.environ[\'RWKV_CUDA_ON\'] == \'1\' (will fix soon).'
};
const matchedError = Object.entries(errorsMap).find(([key, _]) => error.includes(key));
const message = matchedError ? t(matchedError[1]) : error;

View File

@@ -312,7 +312,10 @@ const ChatPanel: FC = observer(() => {
stream: true,
model: commonStore.settings.apiChatModelName, // 'gpt-3.5-turbo'
temperature: apiParams.temperature,
top_p: apiParams.topP
top_p: apiParams.topP,
user_name: commonStore.activePreset?.userName,
assistant_name: commonStore.activePreset?.assistantName,
presystem: commonStore.activePreset?.presystem
}),
signal: chatSseController?.signal,
onmessage(e) {

View File

@@ -269,6 +269,13 @@ const CompletionPanel: FC = observer(() => {
} />
</div>
<div className="grow" />
<div className="flex justify-between gap-2">
<Button className="grow" onClick={() => {
const newPrompt = prompt.replace(/\n+\ /g, '\n').split('\n').map((line) => line.trim()).join('\n');
setPrompt(newPrompt);
commonStore.setCompletionSubmittedPrompt(newPrompt);
}}>{t('Format Content')}</Button>
</div>
<div className="flex justify-between gap-2">
<ToolTipButton desc={t('Regenerate')} icon={<ArrowSync20Regular />} onClick={() => {
completionSseController?.abort();

View File

@@ -1,6 +1,19 @@
import { Dropdown, Input, Label, Option, Select, Switch, Text } from '@fluentui/react-components';
import {
Accordion,
AccordionHeader,
AccordionItem,
AccordionPanel,
Checkbox,
Dropdown,
Input,
Label,
Option,
Select,
Switch,
Text
} from '@fluentui/react-components';
import { AddCircle20Regular, DataUsageSettings20Regular, Delete20Regular, Save20Regular } from '@fluentui/react-icons';
import React, { FC } from 'react';
import React, { FC, useEffect, useRef } from 'react';
import { Section } from '../components/Section';
import { Labeled } from '../components/Labeled';
import { ToolTipButton } from '../components/ToolTipButton';
@@ -13,13 +26,14 @@ import { Page } from '../components/Page';
import { useNavigate } from 'react-router';
import { RunButton } from '../components/RunButton';
import { updateConfig } from '../apis';
import { ConvertModel, FileExists, GetPyError } from '../../wailsjs/go/backend_golang/App';
import { getStrategy } from '../utils';
import { ConvertModel, ConvertSafetensors, FileExists, GetPyError } from '../../wailsjs/go/backend_golang/App';
import { checkDependencies, getStrategy } from '../utils';
import { useTranslation } from 'react-i18next';
import { WindowShow } from '../../wailsjs/runtime/runtime';
import strategyImg from '../assets/images/strategy.jpg';
import strategyZhImg from '../assets/images/strategy_zh.jpg';
import { ResetConfigsButton } from '../components/ResetConfigsButton';
import { useMediaQuery } from 'usehooks-ts';
export type ApiParameters = {
apiPort: number
@@ -30,7 +44,7 @@ export type ApiParameters = {
frequencyPenalty: number;
}
export type Device = 'CPU' | 'CUDA' | 'MPS' | 'Custom';
export type Device = 'CPU' | 'CUDA' | 'CUDA-Beta' | 'WebGPU' | 'MPS' | 'Custom';
export type Precision = 'fp16' | 'int8' | 'fp32';
export type ModelParameters = {
@@ -42,6 +56,8 @@ export type ModelParameters = {
maxStoredLayers: number;
useCustomCuda?: boolean;
customStrategy?: string;
useCustomTokenizer?: boolean;
customTokenizer?: string;
}
export type ModelConfig = {
@@ -56,9 +72,16 @@ export const Configs: FC = observer(() => {
const [selectedIndex, setSelectedIndex] = React.useState(commonStore.currentModelConfigIndex);
const [selectedConfig, setSelectedConfig] = React.useState(commonStore.modelConfigs[selectedIndex]);
const [displayStrategyImg, setDisplayStrategyImg] = React.useState(false);
const advancedHeaderRef = useRef<HTMLDivElement>(null);
const mq = useMediaQuery('(min-width: 640px)');
const navigate = useNavigate();
const port = selectedConfig.apiParameters.apiPort;
useEffect(() => {
if (advancedHeaderRef.current)
(advancedHeaderRef.current.firstElementChild as HTMLElement).style.padding = '0';
}, []);
const updateSelectedIndex = (newIndex: number) => {
setSelectedIndex(newIndex);
setSelectedConfig(commonStore.modelConfigs[newIndex]);
@@ -128,7 +151,8 @@ export const Configs: FC = observer(() => {
setSelectedIndex(0);
setSelectedConfig(commonStore.modelConfigs[0]);
}} />
<ToolTipButton desc={t('Save Config')} icon={<Save20Regular />} onClick={onClickSave} />
<ToolTipButton desc={mq ? '' : t('Save Config')} icon={<Save20Regular />} text={mq ? t('Save Config') : null}
onClick={onClickSave} />
</div>
<div className="flex items-center gap-4">
<Label>{t('Config Name')}</Label>
@@ -237,40 +261,84 @@ export const Configs: FC = observer(() => {
}} />
</div>
} />
<ToolTipButton text={t('Convert')}
desc={t('Convert model with these configs. Using a converted model will greatly improve the loading speed, but model parameters of the converted model cannot be modified.')}
onClick={async () => {
if (commonStore.platform == 'darwin') {
toast(t('MacOS is not yet supported for performing this operation, please do it manually.'), { type: 'info' });
return;
} else if (commonStore.platform == 'linux') {
toast(t('Linux is not yet supported for performing this operation, please do it manually.'), { type: 'info' });
return;
}
const modelPath = `${commonStore.settings.customModelsPath}/${selectedConfig.modelParameters.modelName}`;
if (await FileExists(modelPath)) {
const strategy = getStrategy(selectedConfig);
const newModelPath = modelPath + '-' + strategy.replace(/[:> *+]/g, '-');
toast(t('Start Converting'), { autoClose: 1000, type: 'info' });
ConvertModel(commonStore.settings.customPythonPath, modelPath, strategy, newModelPath).then(async () => {
if (!await FileExists(newModelPath + '.pth')) {
toast(t('Convert Failed') + ' - ' + await GetPyError(), { type: 'error' });
} else {
toast(`${t('Convert Success')} - ${newModelPath}`, { type: 'success' });
{
selectedConfig.modelParameters.device !== 'WebGPU' ?
<ToolTipButton text={t('Convert')}
desc={t('Convert model with these configs. Using a converted model will greatly improve the loading speed, but model parameters of the converted model cannot be modified.')}
onClick={async () => {
if (commonStore.platform === 'darwin') {
toast(t('MacOS is not yet supported for performing this operation, please do it manually.') + ' (backend-python/convert_model.py)', { type: 'info' });
return;
} else if (commonStore.platform === 'linux') {
toast(t('Linux is not yet supported for performing this operation, please do it manually.') + ' (backend-python/convert_model.py)', { type: 'info' });
return;
}
}).catch(e => {
const errMsg = e.message || e;
if (errMsg.includes('path contains space'))
toast(`${t('Convert Failed')} - ${t('File Path Cannot Contain Space')}`, { type: 'error' });
else
toast(`${t('Convert Failed')} - ${e.message || e}`, { type: 'error' });
});
setTimeout(WindowShow, 1000);
} else {
toast(`${t('Model Not Found')} - ${modelPath}`, { type: 'error' });
}
}} />
const ok = await checkDependencies(navigate);
if (!ok)
return;
const modelPath = `${commonStore.settings.customModelsPath}/${selectedConfig.modelParameters.modelName}`;
if (await FileExists(modelPath)) {
const strategy = getStrategy(selectedConfig);
const newModelPath = modelPath + '-' + strategy.replace(/[:> *+]/g, '-');
toast(t('Start Converting'), { autoClose: 1000, type: 'info' });
ConvertModel(commonStore.settings.customPythonPath, modelPath, strategy, newModelPath).then(async () => {
if (!await FileExists(newModelPath + '.pth')) {
toast(t('Convert Failed') + ' - ' + await GetPyError(), { type: 'error' });
} else {
toast(`${t('Convert Success')} - ${newModelPath}`, { type: 'success' });
}
}).catch(e => {
const errMsg = e.message || e;
if (errMsg.includes('path contains space'))
toast(`${t('Convert Failed')} - ${t('File Path Cannot Contain Space')}`, { type: 'error' });
else
toast(`${t('Convert Failed')} - ${e.message || e}`, { type: 'error' });
});
setTimeout(WindowShow, 1000);
} else {
toast(`${t('Model Not Found')} - ${modelPath}`, { type: 'error' });
}
}} /> :
<ToolTipButton text={t('Convert To Safe Tensors Format')}
desc=""
onClick={async () => {
if (commonStore.platform === 'darwin') {
toast(t('MacOS is not yet supported for performing this operation, please do it manually.') + ' (backend-python/convert_safetensors.py)', { type: 'info' });
return;
} else if (commonStore.platform === 'linux') {
toast(t('Linux is not yet supported for performing this operation, please do it manually.') + ' (backend-python/convert_safetensors.py)', { type: 'info' });
return;
}
const ok = await checkDependencies(navigate);
if (!ok)
return;
const modelPath = `${commonStore.settings.customModelsPath}/${selectedConfig.modelParameters.modelName}`;
if (await FileExists(modelPath)) {
toast(t('Start Converting'), { autoClose: 1000, type: 'info' });
const newModelPath = modelPath.replace(/\.pth$/, '.st');
ConvertSafetensors(commonStore.settings.customPythonPath, modelPath, newModelPath).then(async () => {
if (!await FileExists(newModelPath)) {
toast(t('Convert Failed') + ' - ' + await GetPyError(), { type: 'error' });
} else {
toast(`${t('Convert Success')} - ${newModelPath}`, { type: 'success' });
}
}).catch(e => {
const errMsg = e.message || e;
if (errMsg.includes('path contains space'))
toast(`${t('Convert Failed')} - ${t('File Path Cannot Contain Space')}`, { type: 'error' });
else
toast(`${t('Convert Failed')} - ${e.message || e}`, { type: 'error' });
});
setTimeout(WindowShow, 1000);
} else {
toast(`${t('Model Not Found')} - ${modelPath}`, { type: 'error' });
}
}} />
}
<Labeled label={t('Strategy')} content={
<Dropdown style={{ minWidth: 0 }} className="grow" value={t(selectedConfig.modelParameters.device)!}
selectedOptions={[selectedConfig.modelParameters.device]}
@@ -284,11 +352,13 @@ export const Configs: FC = observer(() => {
<Option value="CPU">CPU</Option>
{commonStore.platform === 'darwin' && <Option value="MPS">MPS</Option>}
<Option value="CUDA">CUDA</Option>
<Option value="CUDA-Beta">{t('CUDA (Beta, Faster)')!}</Option>
<Option value="WebGPU">WebGPU</Option>
<Option value="Custom">{t('Custom')!}</Option>
</Dropdown>
} />
{
selectedConfig.modelParameters.device != 'Custom' && <Labeled label={t('Precision')}
selectedConfig.modelParameters.device !== 'Custom' && <Labeled label={t('Precision')}
desc={t('int8 uses less VRAM, but has slightly lower quality. fp16 has higher quality, and fp32 has the best quality.')}
content={
<Dropdown style={{ minWidth: 0 }} className="grow"
@@ -303,17 +373,17 @@ export const Configs: FC = observer(() => {
}}>
<Option>fp16</Option>
<Option>int8</Option>
<Option>fp32</Option>
{selectedConfig.modelParameters.device !== 'WebGPU' && <Option>fp32</Option>}
</Dropdown>
} />
}
{
selectedConfig.modelParameters.device == 'CUDA' &&
selectedConfig.modelParameters.device.includes('CUDA') &&
<Labeled label={t('Current Strategy')}
content={<Text> {getStrategy(selectedConfig)} </Text>} />
}
{
selectedConfig.modelParameters.device == 'CUDA' &&
selectedConfig.modelParameters.device.includes('CUDA') &&
<Labeled label={t('Stored Layers')}
desc={t('Number of the neural network layers loaded into VRAM, the more you load, the faster the speed, but it consumes more VRAM. (If your VRAM is not enough, it will fail to load)')}
content={
@@ -326,9 +396,7 @@ export const Configs: FC = observer(() => {
}} />
} />
}
{
selectedConfig.modelParameters.device == 'CUDA' && <div />
}
{selectedConfig.modelParameters.device.includes('CUDA') && <div />}
{
displayStrategyImg &&
<img style={{ width: '80vh', height: 'auto', zIndex: 100 }}
@@ -336,13 +404,13 @@ export const Configs: FC = observer(() => {
src={commonStore.settings.language === 'zh' ? strategyZhImg : strategyImg} />
}
{
selectedConfig.modelParameters.device == 'Custom' &&
selectedConfig.modelParameters.device === 'Custom' &&
<Labeled label="Strategy"
onMouseEnter={() => setDisplayStrategyImg(true)}
onMouseLeave={() => setDisplayStrategyImg(false)}
content={
<Input className="grow"
placeholder={commonStore.platform != 'darwin' ? 'cuda:0 fp16 *20 -> cuda:1 fp16' : 'mps fp32'}
placeholder={commonStore.platform !== 'darwin' ? 'cuda:0 fp16 *20 -> cuda:1 fp16' : 'mps fp32'}
value={selectedConfig.modelParameters.customStrategy}
onChange={(e, data) => {
setSelectedConfigModelParams({
@@ -351,9 +419,9 @@ export const Configs: FC = observer(() => {
}} />
} />
}
{selectedConfig.modelParameters.device == 'Custom' && <div />}
{selectedConfig.modelParameters.device === 'Custom' && <div />}
{
selectedConfig.modelParameters.device != 'CPU' && selectedConfig.modelParameters.device != 'MPS' &&
(selectedConfig.modelParameters.device.includes('CUDA') || selectedConfig.modelParameters.device === 'Custom') &&
<Labeled label={t('Use Custom CUDA kernel to Accelerate')}
desc={t('Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues. If it fails to start, please turn off this option.')}
content={
@@ -365,6 +433,40 @@ export const Configs: FC = observer(() => {
}} />
} />
}
{selectedConfig.modelParameters.device !== 'WebGPU' &&
<Accordion className="sm:col-span-2" collapsible
openItems={!commonStore.modelParamsCollapsed && 'advanced'}
onToggle={(e, data) => {
if (data.value === 'advanced')
commonStore.setModelParamsCollapsed(!commonStore.modelParamsCollapsed);
}}>
<AccordionItem value="advanced">
<AccordionHeader ref={advancedHeaderRef} size="small">{t('Advanced')}</AccordionHeader>
<AccordionPanel>
<div className="flex flex-col">
<div className="flex grow">
<Checkbox className="select-none"
size="large" label={t('Use Custom Tokenizer')}
checked={selectedConfig.modelParameters.useCustomTokenizer}
onChange={(_, data) => {
setSelectedConfigModelParams({
useCustomTokenizer: data.checked as boolean
});
}} />
<Input className="grow"
placeholder={t('Tokenizer Path (e.g. backend-python/rwkv_pip/20B_tokenizer.json)')!}
value={selectedConfig.modelParameters.customTokenizer}
onChange={(e, data) => {
setSelectedConfigModelParams({
customTokenizer: data.value
});
}} />
</div>
</div>
</AccordionPanel>
</AccordionItem>
</Accordion>
}
</div>
}
/>

View File

@@ -1,10 +1,10 @@
import React, { FC } from 'react';
import React, { FC, useEffect } from 'react';
import { useTranslation } from 'react-i18next';
import { Page } from '../components/Page';
import { observer } from 'mobx-react-lite';
import commonStore from '../stores/commonStore';
import { Divider, Field, ProgressBar } from '@fluentui/react-components';
import { bytesToGb, bytesToKb, bytesToMb } from '../utils';
import { bytesToGb, bytesToKb, bytesToMb, refreshLocalModels } from '../utils';
import { ToolTipButton } from '../components/ToolTipButton';
import { Folder20Regular, Pause20Regular, Play20Regular } from '@fluentui/react-icons';
import { AddToDownloadList, OpenFileFolder, PauseDownload } from '../../wailsjs/go/backend_golang/App';
@@ -23,6 +23,12 @@ export type DownloadStatus = {
export const Downloads: FC = observer(() => {
const { t } = useTranslation();
const finishedModelsLen = commonStore.downloadList.filter((status) => status.done && status.name.endsWith('.pth')).length;
useEffect(() => {
if (finishedModelsLen > 0)
refreshLocalModels({ models: commonStore.modelSourceList }, false);
console.log('finishedModelsLen:', finishedModelsLen);
}, [finishedModelsLen]);
let displayList = commonStore.downloadList.slice();
const downloadListNames = displayList.map(s => s.name);

View File

@@ -56,6 +56,9 @@ export type Preset = {
stop: string,
injectStart: string,
injectEnd: string,
presystem?: boolean,
userName?: string,
assistantName?: string
}
export const defaultPreset: Preset = {
@@ -250,14 +253,41 @@ export const ChatPresetEditor: FC<{
}} />
<Button onClick={() => {
setEditingMessages(!editingMessages);
}}>{!editingMessages ? t('Edit Messages') : t('Go Back')}</Button>
}}>{!editingMessages ? t('Edit Character Settings') : t('Go Back')}</Button>
</div>
} />
{
editingMessages ?
<MessagesEditor /> :
<div className="flex flex-col gap-1">
<Labeled flex spaceBetween label={t('Insert default system prompt at the beginning')}
content={
<Switch checked={editingPreset.presystem === undefined ? true : editingPreset.presystem}
onChange={(e, data) => {
setEditingPreset({
presystem: data.checked
});
}} />
} />
<Labeled flex breakline label={t('User Name')}
content={
<Input placeholder="User" value={editingPreset.userName} onChange={(e, data) => {
setEditingPreset({
userName: data.value
});
}} />
} />
<Labeled flex breakline label={t('Assistant Name')}
content={
<Input placeholder="Assistant" value={editingPreset.assistantName} onChange={(e, data) => {
setEditingPreset({
assistantName: data.value
});
}} />
} />
<MessagesEditor />
</div> :
<div className="flex flex-col gap-1 p-2 overflow-x-hidden overflow-y-auto">
<Labeled flex breakline label={t('Description')}
<Labeled flex breakline label={`${t('Description')} (${t('Preview Only')})`}
content={
<Input value={editingPreset.desc} onChange={(e, data) => {
setEditingPreset({

View File

@@ -126,7 +126,7 @@ export const Settings: FC = observer(() => {
} />
}
{
commonStore.settings.language === 'zh' && commonStore.platform != 'linux' &&
commonStore.settings.language === 'zh' && commonStore.platform !== 'linux' &&
<Labeled label={t('Use Tsinghua Pip Mirrors')} flex spaceBetween content={
<Switch checked={commonStore.settings.cnMirror}
onChange={(e, data) => {

View File

@@ -154,7 +154,7 @@ const showError = (e: any) => {
};
const errorsMap = Object.entries({
'python3 ./finetune/lora/train.py': 'Memory is not enough, try to increase the virtual memory or use a smaller base model.',
'python3 ./finetune/lora/train.py': 'Memory is not enough, try to increase the virtual memory (Swap of WSL) or use a smaller base model.',
'cuda out of memory': 'VRAM is not enough',
'valueerror: high <= 0': 'Training data is not enough, reduce context length or add more data for training',
'+= \'+ptx\'': 'You are using WSL 1 for training, please upgrade to WSL 2. e.g. Run "wsl --set-version Ubuntu-22.04 2"',
@@ -219,7 +219,7 @@ const Terminal: FC = observer(() => {
WslStart().then(() => {
addWslMessage('WSL> ' + input);
setInput('');
WslCommand(input).catch(showError);
WslCommand(input).then(WindowShow).catch(showError);
}).catch(showError);
}
};

View File

@@ -2,11 +2,12 @@ import commonStore, { Platform } from './stores/commonStore';
import { GetPlatform, ListDirFiles, ReadJson } from '../wailsjs/go/backend_golang/App';
import { Cache, checkUpdate, downloadProgramFiles, LocalConfig, refreshLocalModels, refreshModels } from './utils';
import { getStatus } from './apis';
import { EventsOn } from '../wailsjs/runtime';
import { EventsOn, WindowSetTitle } from '../wailsjs/runtime';
import manifest from '../../manifest.json';
import { defaultModelConfigs, defaultModelConfigsMac } from './pages/defaultConfigs';
import { Preset } from './pages/PresetsManager/PresetsButton';
import { wslHandler } from './pages/Train';
import { t } from 'i18next';
export async function startup() {
downloadProgramFiles();
@@ -23,6 +24,8 @@ export async function startup() {
initPresets();
initHardwareMonitor();
await GetPlatform().then(p => commonStore.setPlatform(p as Platform));
await initConfig();
@@ -70,7 +73,7 @@ async function initConfig() {
configData.currentModelConfigIndex >= 0 && configData.currentModelConfigIndex < configData.modelConfigs.length)
commonStore.setCurrentConfigIndex(configData.currentModelConfigIndex, false);
}).catch(() => {
commonStore.setModelConfigs(commonStore.platform != 'darwin' ? defaultModelConfigs : defaultModelConfigsMac, true);
commonStore.setModelConfigs(commonStore.platform !== 'darwin' ? defaultModelConfigs : defaultModelConfigsMac, true);
});
}
@@ -117,3 +120,20 @@ async function initLocalModelsNotify() {
refreshLocalModels({ models: commonStore.modelSourceList }, false); //TODO fix bug that only add models
});
}
type monitorData = {
usedMemory: number;
totalMemory: number;
gpuUsage: number;
gpuPower: number;
usedVram: number;
totalVram: number;
}
async function initHardwareMonitor() {
EventsOn('monitor', (data: string) => {
const results: monitorData = JSON.parse(data);
if (results)
WindowSetTitle(`RWKV-Runner (${t('RAM')}: ${results.usedMemory.toFixed(1)}/${results.totalMemory.toFixed(1)} GB, ${t('VRAM')}: ${(results.usedVram / 1024).toFixed(1)}/${(results.totalVram / 1024).toFixed(1)} GB, ${t('GPU Usage')}: ${results.gpuUsage}%)`);
});
}

View File

@@ -74,6 +74,7 @@ class CommonStore {
// configs
currentModelConfigIndex: number = 0;
modelConfigs: ModelConfig[] = [];
modelParamsCollapsed: boolean = true;
// models
modelSourceManifestList: string = 'https://cdn.jsdelivr.net/gh/josstorer/RWKV-Runner@master/manifest.json;';
modelSourceList: ModelSourceItem[] = [];
@@ -167,7 +168,7 @@ class CommonStore {
createModelConfig = (config: ModelConfig = defaultModelConfigs[0], saveConfig: boolean = true) => {
if (config.name === defaultModelConfigs[0].name) {
// deep copy
config = JSON.parse(JSON.stringify(commonStore.platform != 'darwin' ? defaultModelConfigs[0] : defaultModelConfigsMac[0]));
config = JSON.parse(JSON.stringify(commonStore.platform !== 'darwin' ? defaultModelConfigs[0] : defaultModelConfigsMac[0]));
config.name = new Date().toLocaleString();
}
this.modelConfigs.push(config);
@@ -259,6 +260,10 @@ class CommonStore {
this.advancedCollapsed = value;
}
setModelParamsCollapsed(value: boolean) {
this.modelParamsCollapsed = value;
}
setLastUnfinishedModelDownloads(value: DownloadStatus[]) {
this.lastUnfinishedModelDownloads = value;
}

View File

@@ -1,6 +1,5 @@
import {
AddToDownloadList,
CopyFile,
DeleteFile,
DepCheck,
InstallPyDep,
@@ -57,6 +56,8 @@ export async function refreshBuiltInModels(readCache: boolean = false) {
return cache;
}
const modelSuffix = ['.pth', '.st', '.safetensors'];
export async function refreshLocalModels(cache: {
models: ModelSourceItem[]
}, filter: boolean = true, initUnfinishedModels: boolean = false) {
@@ -65,7 +66,7 @@ export async function refreshLocalModels(cache: {
await ListDirFiles(commonStore.settings.customModelsPath).then((data) => {
cache.models.push(...data.flatMap(d => {
if (!d.isDir && d.name.endsWith('.pth'))
if (!d.isDir && modelSuffix.some((ext => d.name.endsWith(ext))))
return [{
name: d.name,
size: d.size,
@@ -146,7 +147,7 @@ export async function refreshRemoteModels(cache: { models: ModelSourceItem[] })
.catch(() => {
});
cache.models = cache.models.filter((model, index, self) => {
return model.name.endsWith('.pth')
return modelSuffix.some((ext => model.name.endsWith(ext)))
&& index === self.findIndex(
m => m.name === model.name || (m.SHA256 && m.SHA256 === model.SHA256 && m.size === model.size));
});
@@ -176,9 +177,13 @@ export const getStrategy = (modelConfig: ModelConfig | undefined = undefined) =>
strategy += 'cpu ';
strategy += params.precision === 'int8' ? 'fp32i8' : 'fp32';
break;
case 'WebGPU':
strategy += params.precision === 'int8' ? 'fp16i8' : 'fp16';
break;
case 'CUDA':
case 'CUDA-Beta':
if (avoidOverflow)
strategy = 'cuda fp32 *1 -> ';
strategy = params.useCustomCuda ? 'cuda fp16 *1 -> ' : 'cuda fp32 *1 -> ';
strategy += 'cuda ';
strategy += params.precision === 'fp16' ? 'fp16' : params.precision === 'int8' ? 'fp16i8' : 'fp32';
if (params.storedLayers < params.maxStoredLayers)
@@ -239,7 +244,7 @@ export function downloadProgramFiles() {
manifest.programFiles.forEach(({ url, path }) => {
if (path)
ReadFileInfo(path).then(info => {
if (info.size == 0 && url)
if (info.size === 0 && url)
AddToDownloadList(path, url.replace('@master', '@v' + manifest.version));
}).catch(() => {
if (url)
@@ -372,7 +377,7 @@ export const checkDependencies = async (navigate: NavigateFunction) => {
});
} else {
toast(depErrorMsg, { type: 'info', position: 'bottom-left' });
if (commonStore.platform != 'linux')
if (commonStore.platform !== 'linux')
toastWithButton(t('Python dependencies are incomplete, would you like to install them?'), t('Install'), () => {
InstallPyDep(commonStore.settings.customPythonPath, commonStore.settings.cnMirror).catch((e) => {
const errMsg = e.message || e;
@@ -396,8 +401,6 @@ export const checkDependencies = async (navigate: NavigateFunction) => {
return false;
}
commonStore.setDepComplete(true);
if (commonStore.platform === 'windows')
CopyFile('./backend-python/wkv_cuda_utils/wkv_cuda_model.py', './py310/Lib/site-packages/rwkv/model.py');
}
return true;
};
@@ -422,12 +425,16 @@ export function toastWithButton(text: string, buttonText: string, onClickButton:
return id;
}
export function getSupportedCustomCudaFile() {
export function getSupportedCustomCudaFile(isBeta: boolean) {
if ([' 10', ' 16', ' 20', ' 30', 'MX', 'Tesla P', 'Quadro P', 'NVIDIA P', 'TITAN X', 'TITAN RTX', 'RTX A',
'Quadro RTX 4000', 'Quadro RTX 5000', 'Tesla T4', 'NVIDIA A10', 'NVIDIA A40'].some(v => commonStore.status.device_name.includes(v)))
return './backend-python/wkv_cuda_utils/wkv_cuda10_30.pyd';
return isBeta ?
'./backend-python/wkv_cuda_utils/beta/wkv_cuda10_30.pyd' :
'./backend-python/wkv_cuda_utils/wkv_cuda10_30.pyd';
else if ([' 40', 'RTX 5000 Ada', 'RTX 6000 Ada', 'RTX TITAN Ada', 'NVIDIA L40'].some(v => commonStore.status.device_name.includes(v)))
return './backend-python/wkv_cuda_utils/wkv_cuda40.pyd';
return isBeta ?
'./backend-python/wkv_cuda_utils/beta/wkv_cuda40.pyd' :
'./backend-python/wkv_cuda_utils/wkv_cuda40.pyd';
else
return '';
}

View File

@@ -10,6 +10,8 @@ export function ConvertData(arg1:string,arg2:string,arg3:string,arg4:string):Pro
export function ConvertModel(arg1:string,arg2:string,arg3:string,arg4:string):Promise<string>;
export function ConvertSafetensors(arg1:string,arg2:string,arg3:string):Promise<string>;
export function CopyFile(arg1:string,arg2:string):Promise<void>;
export function DeleteFile(arg1:string):Promise<void>;
@@ -46,7 +48,9 @@ export function RestartApp():Promise<void>;
export function SaveJson(arg1:string,arg2:any):Promise<void>;
export function StartServer(arg1:string,arg2:number,arg3:string):Promise<string>;
export function StartServer(arg1:string,arg2:number,arg3:string,arg4:boolean):Promise<string>;
export function StartWebGPUServer(arg1:number,arg2:string):Promise<string>;
export function UpdateApp(arg1:string):Promise<boolean>;

View File

@@ -18,6 +18,10 @@ export function ConvertModel(arg1, arg2, arg3, arg4) {
return window['go']['backend_golang']['App']['ConvertModel'](arg1, arg2, arg3, arg4);
}
export function ConvertSafetensors(arg1, arg2, arg3) {
return window['go']['backend_golang']['App']['ConvertSafetensors'](arg1, arg2, arg3);
}
export function CopyFile(arg1, arg2) {
return window['go']['backend_golang']['App']['CopyFile'](arg1, arg2);
}
@@ -90,8 +94,12 @@ export function SaveJson(arg1, arg2) {
return window['go']['backend_golang']['App']['SaveJson'](arg1, arg2);
}
export function StartServer(arg1, arg2, arg3) {
return window['go']['backend_golang']['App']['StartServer'](arg1, arg2, arg3);
export function StartServer(arg1, arg2, arg3, arg4) {
return window['go']['backend_golang']['App']['StartServer'](arg1, arg2, arg3, arg4);
}
export function StartWebGPUServer(arg1, arg2) {
return window['go']['backend_golang']['App']['StartWebGPUServer'](arg1, arg2);
}
export function UpdateApp(arg1) {

11
go.mod
View File

@@ -4,15 +4,16 @@ go 1.20
require (
github.com/cavaliergopher/grab/v3 v3.0.1
github.com/fsnotify/fsnotify v1.6.0
github.com/minio/selfupdate v0.6.0
github.com/nyaosorg/go-windows-su v0.2.1
github.com/ubuntu/gowsl v0.0.0-20230615094051-94945650cc1e
github.com/wailsapp/wails/v2 v2.5.1
github.com/wailsapp/wails/v2 v2.6.0
)
require (
aead.dev/minisign v0.2.0 // indirect
github.com/bep/debounce v1.2.1 // indirect
github.com/fsnotify/fsnotify v1.6.0
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/jchv/go-winloader v0.0.0-20210711035445-715c2860da7e // indirect
@@ -22,8 +23,7 @@ require (
github.com/leaanthony/gosod v1.0.3 // indirect
github.com/leaanthony/slicer v1.6.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/nyaosorg/go-windows-su v0.2.1
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
@@ -33,9 +33,10 @@ require (
github.com/ubuntu/decorate v0.0.0-20230125165522-2d5b0a9bb117 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.2 // indirect
github.com/wailsapp/go-webview2 v1.0.1 // indirect
github.com/wailsapp/mimetype v1.4.1 // indirect
golang.org/x/crypto v0.9.0 // indirect
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sys v0.9.0 // indirect
golang.org/x/text v0.9.0 // indirect

14
go.sum
View File

@@ -36,8 +36,8 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/minio/selfupdate v0.6.0 h1:i76PgT0K5xO9+hjzKcacQtO7+MjJ4JKA8Ak8XQ9DDwU=
github.com/minio/selfupdate v0.6.0/go.mod h1:bO02GTIPCMQFTEvE5h4DjYB58bCoZ35XLeBf0buTDdM=
github.com/nyaosorg/go-windows-su v0.2.1 h1:5V0XavLyjOqPUp7psxxCvBISaneU4XmFPSMlejSl5sc=
@@ -69,17 +69,19 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/wailsapp/go-webview2 v1.0.1 h1:dEJIeEApW/MhO2tTMISZBFZPuW7kwrFA1NtgFB1z1II=
github.com/wailsapp/go-webview2 v1.0.1/go.mod h1:Uk2BePfCRzttBBjFrBmqKGJd41P6QIHeV9kTgIeOZNo=
github.com/wailsapp/mimetype v1.4.1 h1:pQN9ycO7uo4vsUUuPeHEYoUkLVkaRntMnHJxVwYhwHs=
github.com/wailsapp/mimetype v1.4.1/go.mod h1:9aV5k31bBOv5z6u+QP8TltzvNGJPmNJD4XlAL3U+j3o=
github.com/wailsapp/wails/v2 v2.5.1 h1:mfG+2kWqQXYOwdgI43HEILjOZDXbk5woPYI3jP2b+js=
github.com/wailsapp/wails/v2 v2.5.1/go.mod h1:jbOZbcr/zm79PxXxAjP8UoVlDd9wLW3uDs+isIthDfs=
github.com/wailsapp/wails/v2 v2.6.0 h1:EyH0zR/EO6dDiqNy8qU5spaXDfkluiq77xrkabPYD4c=
github.com/wailsapp/wails/v2 v2.6.0/go.mod h1:WBG9KKWuw0FKfoepBrr/vRlyTmHaMibWesK3yz6nNiM=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=

12
main.go
View File

@@ -49,6 +49,9 @@ var cyacInfo embed.FS
//go:embed backend-python
var py embed.FS
//go:embed backend-rust
var webgpu embed.FS
//go:embed finetune
var finetune embed.FS
@@ -58,14 +61,19 @@ var midi embed.FS
//go:embed assets/sound-font
var midiAssets embed.FS
//go:embed components
var components embed.FS
func main() {
if buildInfo, ok := debug.ReadBuildInfo(); !ok || strings.Contains(buildInfo.String(), "-ldflags") {
backend.CopyEmbed(cyac)
backend.CopyEmbed(cyacInfo)
backend.CopyEmbed(py)
backend.CopyEmbed(webgpu)
backend.CopyEmbed(finetune)
backend.CopyEmbed(midi)
backend.CopyEmbed(midiAssets)
backend.CopyEmbed(components)
}
// Create an instance of the app structure
@@ -90,6 +98,7 @@ func main() {
Height: 680,
MinWidth: 375,
MinHeight: 640,
EnableDefaultContextMenu: true,
Windows: &windows.Options{
ZoomFactor: zoomFactor,
IsZoomControlEnabled: true,
@@ -98,7 +107,8 @@ func main() {
Assets: assets,
Handler: NewFileLoader(),
},
OnStartup: app.OnStartup,
OnStartup: app.OnStartup,
OnBeforeClose: app.OnBeforeClose,
Bind: []any{
app,
},

View File

@@ -1,5 +1,5 @@
{
"version": "1.4.2",
"version": "1.4.7",
"introduction": {
"en": "RWKV is an open-source, commercially usable large language model with high flexibility and great potential for development.\n### About This Tool\nThis tool aims to lower the barrier of entry for using large language models, making it accessible to everyone. It provides fully automated dependency and model management. You simply need to click and run, following the instructions, to deploy a local large language model. The tool itself is very compact and only requires a single executable file for one-click deployment.\nAdditionally, this tool offers an interface that is fully compatible with the OpenAI API. This means you can use any ChatGPT client as a client for RWKV, enabling capability expansion beyond just chat functionality.\n### Preset Configuration Rules at the Bottom\nThis tool comes with a series of preset configurations to reduce complexity. The naming rules for each configuration represent the following in order: device - required VRAM/memory - model size - model language.\nFor example, \"GPU-8G-3B-EN\" indicates that this configuration is for a graphics card with 8GB of VRAM, a model size of 3 billion parameters, and it uses an English language model.\nLarger model sizes have higher performance and VRAM requirements. Among configurations with the same model size, those with higher VRAM usage will have faster runtime.\nFor example, if you have 12GB of VRAM but running the \"GPU-12G-7B-EN\" configuration is slow, you can downgrade to \"GPU-8G-3B-EN\" for a significant speed improvement.\n### About RWKV\nRWKV is an RNN with Transformer-level LLM performance, which can also be directly trained like a GPT transformer (parallelizable). And it's 100% attention-free. You only need the hidden state at position t to compute the state at position t+1. You can use the \"GPT\" mode to quickly compute the hidden state for the \"RNN\" mode.<br/>So it's combining the best of RNN and transformer - great performance, fast inference, saves VRAM, fast training, \"infinite\" ctx_len, and free sentence embedding (using the final hidden state).",
"zh": "RWKV是一个开源且允许商用的大语言模型灵活性很高且极具发展潜力。\n### 关于本工具\n本工具旨在降低大语言模型的使用门槛做到人人可用本工具提供了全自动化的依赖和模型管理你只需要直接点击运行跟随引导即可完成本地大语言模型的部署工具本身体积极小只需要一个exe即可完成一键部署。\n此外本工具提供了与OpenAI API完全兼容的接口这意味着你可以把任意ChatGPT客户端用作RWKV的客户端实现能力拓展而不局限于聊天。\n### 底部的预设配置规则\n本工具内置了一系列预设配置以降低使用难度每个配置名的规则依次代表着设备-所需显存/内存-模型规模-模型语言。\n例如GPU-8G-3B-CN表示该配置用于显卡需要8G显存模型规模为30亿参数使用的是中文模型。\n模型规模越大性能要求越高显存要求也越高而同样模型规模的配置中显存占用越高的运行速度越快。\n例如当你有12G显存但运行GPU-12G-7B-CN配置速度比较慢可降级成GPU-8G-3B-CN将会大幅提速。\n### 关于RWKV\nRWKV是具有Transformer级别LLM性能的RNN也可以像GPT Transformer一样直接进行训练可并行化。而且它是100% attention-free的。你只需在位置t处获得隐藏状态即可计算位置t + 1处的状态。你可以使用“GPT”模式快速计算用于“RNN”模式的隐藏状态。\n因此它将RNN和Transformer的优点结合起来 - 高性能、快速推理、节省显存、快速训练、“无限”上下文长度以及免费的语句嵌入(使用最终隐藏状态)。"
@@ -301,6 +301,58 @@
"url": "https://huggingface.co/BlinkDL/rwkv-4-world/blob/main/RWKV-4-World-7B-v1-20230626-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-7B-v1-20230626-ctx4096.pth"
},
{
"name": "RWKV-claude-4-World-7B-20230805-ctx65k.pth",
"desc": {
"en": "Global Languages 7B v1 Ctx65k Claude Like",
"zh": "全球语言 7B v1 65k上下文 Claude功能",
"ja": "グローバル言語 7B v1 65kコンテキスト Claude機能"
},
"size": 15035391533,
"SHA256": "8cd25f8a1ab58965993cc47b3b2f99585836eed008a2e44526c258189ea751a6",
"lastUpdated": "2023-08-05T08:52:20",
"url": "https://huggingface.co/xiaol/RWKV-claude-4-World-7B-65k/blob/main/RWKV-claude-4-World-7B-20230805-ctx65k.pth",
"downloadUrl": "https://huggingface.co/xiaol/RWKV-claude-4-World-7B-65k/resolve/main/RWKV-claude-4-World-7B-20230805-ctx65k.pth"
},
{
"name": "RWKV-toolformer-translation-japanese-chinese-english-7B-World-20230815-ctx128k.pth",
"desc": {
"en": "Global Languages 7B v1 Ctx128k Toolformer",
"zh": "全球语言 7B v1 128k上下文 Toolformer",
"ja": "グローバル言語 7B v1 128kコンテキスト Toolformer"
},
"size": 15035391533,
"SHA256": "648a3b21055bdab77021ce278da80fbada8dcaae0b3d41d1eca9aa194c1fd25f",
"lastUpdated": "2023-08-15T07:18:23",
"url": "https://huggingface.co/xiaol/RWKV-toolformer-translation-japanese-chinese-english-7B-World-128k/blob/main/RWKV-toolformer-translation-japanese-chinese-english-7B-World-20230815-ctx128k.pth",
"downloadUrl": "https://huggingface.co/xiaol/RWKV-toolformer-translation-japanese-chinese-english-7B-World-128k/resolve/main/RWKV-toolformer-translation-japanese-chinese-english-7B-World-20230815-ctx128k.pth"
},
{
"name": "RWKV-code-4-World-7B-20230820-ctx32k.pth",
"desc": {
"en": "Global Languages 7B v1 Ctx32k Code Ability",
"zh": "全球语言 7B v1 32k上下文 代码能力",
"ja": "グローバル言語 7B v1 32kコンテキスト コード能力"
},
"size": 15035391533,
"SHA256": "19666620437ae3a5fb06e16a52729d67e449fca155fab3d5861ffe9ecf247404",
"lastUpdated": "2023-08-20T05:00:17",
"url": "https://huggingface.co/xiaol/RWKV-Code-7B-world-32k/blob/main/RWKV-code-4-World-7B-20230820-ctx32k.pth",
"downloadUrl": "https://huggingface.co/xiaol/RWKV-Code-7B-world-32k/resolve/main/RWKV-code-4-World-7B-20230820-ctx32k.pth"
},
{
"name": "wizard-rwkv-4-world-ctx32k.pth",
"desc": {
"en": "Global Languages 7B v1 Ctx32k Wikipedia",
"zh": "全球语言 7B v1 32k上下文 维基百科",
"ja": "グローバル言語 7B v1 32kコンテキスト ウィキペディア"
},
"size": 15035391538,
"SHA256": "c5d991f315a1676d4bed93dd91f803b1376096e7a4af5bf72b339d055f53bac7",
"lastUpdated": "2023-07-29T03:21:47",
"url": "https://huggingface.co/xiaol/wizard-rwkv-world-7B-ctx32k/blob/main/wizard-rwkv-4-world-ctx32k.pth",
"downloadUrl": "https://huggingface.co/xiaol/wizard-rwkv-world-7B-ctx32k/resolve/main/wizard-rwkv-4-world-ctx32k.pth"
},
{
"name": "RWKV-4-World-CHNtuned-7B-v1-20230709-ctx4096.pth",
"desc": {
@@ -327,6 +379,45 @@
"url": "https://huggingface.co/xiaol/readflow-rwkv-4-world-ctx32k/blob/main/Readflow-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k.pth",
"downloadUrl": "https://huggingface.co/xiaol/readflow-rwkv-4-world-ctx32k/resolve/main/Readflow-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k.pth"
},
{
"name": "novel-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k.pth",
"desc": {
"en": "Global Languages 7B v1 Enhanced Chinese Ctx32k Novel Outline Ability",
"zh": "全球语言 7B v1 中文增强 32k上下文 小说大纲扩写",
"ja": "グローバル言語 7B v1 中国語強化 32kコンテキスト 小説のあらすじを書く"
},
"size": 15035391538,
"SHA256": "0fe2415ce61af52a8c38c071b475c01b4c9f8a4f2b4aaed6181f0334f3faf7f4",
"lastUpdated": "2023-07-28T13:30:59",
"url": "https://huggingface.co/xiaol/ruotangwx-rwkv-7b-novel-32k/blob/main/novel-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k.pth",
"downloadUrl": "https://huggingface.co/xiaol/ruotangwx-rwkv-7b-novel-32k/resolve/main/novel-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k.pth"
},
{
"name": "chatgal-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k-1000.pth",
"desc": {
"en": "Global Languages 7B v1 Enhanced Chinese Ctx32k GalGame 1000",
"zh": "全球语言 7B v1 中文增强 32k上下文 GalGame 1000",
"ja": "グローバル言語 7B v1 中国語強化 32kコンテキスト GalGame 1000"
},
"size": 15035391543,
"SHA256": "aaed29cfd1bddee47c48f564aa800eb001f62fd03290d772647d5678e40d66e8",
"lastUpdated": "2023-07-21T08:59:18",
"url": "https://huggingface.co/xiaol/chatgal-rwkv-7b-world-32k/blob/main/chatgal-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k-1000.pth",
"downloadUrl": "https://huggingface.co/xiaol/chatgal-rwkv-7b-world-32k/resolve/main/chatgal-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k-1000.pth"
},
{
"name": "chatgal-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k-500.pth",
"desc": {
"en": "Global Languages 7B v1 Enhanced Chinese Ctx32k GalGame 500",
"zh": "全球语言 7B v1 中文增强 32k上下文 GalGame 500",
"ja": "グローバル言語 7B v1 中国語強化 32kコンテキスト GalGame 500"
},
"size": 15035391538,
"SHA256": "b5d347d5dedb4f398ec31489ab87b75b1dee772ae7d0a34c26635cf5d95c8794",
"lastUpdated": "2023-07-21T07:31:05",
"url": "https://huggingface.co/xiaol/chatgal-rwkv-7b-world-32k/blob/main/chatgal-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k-500.pth",
"downloadUrl": "https://huggingface.co/xiaol/chatgal-rwkv-7b-world-32k/resolve/main/chatgal-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k-500.pth"
},
{
"name": "RWKV-4-World-JPNtuned-7B-v1-20230718-ctx4096.pth",
"desc": {
@@ -340,6 +431,19 @@
"url": "https://huggingface.co/BlinkDL/rwkv-4-world/blob/main/RWKV-4-World-JPNtuned-7B-v1-20230718-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-JPNtuned-7B-v1-20230718-ctx4096.pth"
},
{
"name": "RWKV-novel-4-World-7B-20230810-ctx128k.pth",
"desc": {
"en": "Global Languages Writer 7B v1 Ctx128k",
"zh": "全球语言写作 7B v1 128k上下文",
"ja": "グローバル言語ライター 7B v1 128kコンテキスト"
},
"size": 15035391533,
"SHA256": "5e429c49e4cab2f29a93f87a80635422c8710d70e5b1d962c078e47d957389c8",
"lastUpdated": "2023-08-10T06:30:32",
"url": "https://huggingface.co/xiaol/rwkv-7B-world-novel-128k/blob/main/RWKV-novel-4-World-7B-20230810-ctx128k.pth",
"downloadUrl": "https://huggingface.co/xiaol/rwkv-7B-world-novel-128k/resolve/main/RWKV-novel-4-World-7B-20230810-ctx128k.pth"
},
{
"name": "RWKV-4-Novel-7B-v1-ChnEng-ChnPro-20230410-ctx4096.pth",
"desc": {

View File

@@ -2,6 +2,7 @@
- ^backend-python/wkv_cuda_utils/
- ^backend-python/get-pip\.py
- ^backend-python/convert_model\.py
- ^backend-python/convert_safetensors\.py
- ^backend-python/utils/midi\.py
- ^build/
- ^finetune/lora/