Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1b83bf261a | ||
|
|
2a7d22dab1 | ||
|
|
f7494b0cfb | ||
|
|
9ca91d59ec | ||
|
|
11feaa6e68 | ||
|
|
18d4b2304e | ||
|
|
2f45e9c33a | ||
|
|
f7df10cb66 | ||
|
|
46e9a2f5b2 | ||
|
|
69b8d2e0a1 | ||
|
|
0ddd2e9fea | ||
|
|
01c95f5bc4 | ||
|
|
e0bf44d82f | ||
|
|
f328e84ea7 | ||
|
|
c81f5015a1 | ||
|
|
e2b086e2f7 | ||
|
|
da632565d5 | ||
|
|
556b667cc0 | ||
|
|
82c9825da8 | ||
|
|
26b30f0dbe | ||
|
|
be3b69c65c | ||
|
|
07cab6949e | ||
|
|
18d58ce124 | ||
|
|
b8f8837a8f | ||
|
|
0c796c8cfc | ||
|
|
b14fbc29b7 | ||
|
|
6e29f97881 | ||
|
|
a164939161 | ||
|
|
09ab11ef01 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -3,6 +3,7 @@ backend-python/wkv_cuda_utils/** linguist-vendored
|
||||
backend-python/get-pip.py linguist-vendored
|
||||
backend-python/convert_model.py linguist-vendored
|
||||
backend-python/convert_safetensors.py linguist-vendored
|
||||
backend-python/convert_pytorch_to_ggml.py linguist-vendored
|
||||
backend-python/utils/midi.py linguist-vendored
|
||||
build/** linguist-vendored
|
||||
finetune/lora/** linguist-vendored
|
||||
|
||||
9
.github/workflows/release.yml
vendored
9
.github/workflows/release.yml
vendored
@@ -65,7 +65,10 @@ jobs:
|
||||
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../libs" -Destination "py310/libs" -Recurse
|
||||
./py310/python -m pip install cyac==1.9
|
||||
go install github.com/wailsapp/wails/v2/cmd/wails@latest
|
||||
del ./backend-python/rwkv_pip/cpp/librwkv.dylib
|
||||
del ./backend-python/rwkv_pip/cpp/librwkv.so
|
||||
(Get-Content -Path ./backend-golang/app.go) -replace "//go:custom_build windows ", "" | Set-Content -Path ./backend-golang/app.go
|
||||
(Get-Content -Path ./backend-golang/utils.go) -replace "//go:custom_build windows ", "" | Set-Content -Path ./backend-golang/utils.go
|
||||
make
|
||||
Rename-Item -Path "build/bin/RWKV-Runner.exe" -NewName "RWKV-Runner_windows_x64.exe"
|
||||
|
||||
@@ -93,6 +96,9 @@ jobs:
|
||||
rm ./backend-python/rwkv_pip/rwkv6.pyd
|
||||
rm ./backend-python/rwkv_pip/beta/wkv_cuda.pyd
|
||||
rm ./backend-python/get-pip.py
|
||||
rm ./backend-python/rwkv_pip/cpp/librwkv.dylib
|
||||
rm ./backend-python/rwkv_pip/cpp/rwkv.dll
|
||||
rm ./backend-python/rwkv_pip/webgpu/web_rwkv_py.cp310-win_amd64.pyd
|
||||
make
|
||||
mv build/bin/RWKV-Runner build/bin/RWKV-Runner_linux_x64
|
||||
|
||||
@@ -117,6 +123,9 @@ jobs:
|
||||
rm ./backend-python/rwkv_pip/rwkv6.pyd
|
||||
rm ./backend-python/rwkv_pip/beta/wkv_cuda.pyd
|
||||
rm ./backend-python/get-pip.py
|
||||
rm ./backend-python/rwkv_pip/cpp/rwkv.dll
|
||||
rm ./backend-python/rwkv_pip/cpp/librwkv.so
|
||||
rm ./backend-python/rwkv_pip/webgpu/web_rwkv_py.cp310-win_amd64.pyd
|
||||
make
|
||||
cp build/darwin/Readme_Install.txt build/bin/Readme_Install.txt
|
||||
cp build/bin/RWKV-Runner.app/Contents/MacOS/RWKV-Runner build/bin/RWKV-Runner_darwin_universal
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -8,6 +8,7 @@ __pycache__
|
||||
*.st
|
||||
*.safetensors
|
||||
*.bin
|
||||
*.mid
|
||||
/config.json
|
||||
/cache.json
|
||||
/presets.json
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
## Changes
|
||||
|
||||
- allow importing midi file
|
||||
- add midi tracks to webUI
|
||||
- improve current instrument display
|
||||
- fix generation instrumentType
|
||||
- chore
|
||||
- update midi_filter_config.json
|
||||
- Composition Option: Only Auto Play Generated Content
|
||||
|
||||
## Install
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ English | [简体中文](README_ZH.md) | [日本語](README_JA.md)
|
||||
- Easy-to-understand and operate parameter configuration, along with various operation guidance prompts.
|
||||
- Built-in model conversion tool.
|
||||
- Built-in download management and remote model inspection.
|
||||
- Built-in one-click LoRA Finetune.
|
||||
- Built-in one-click LoRA Finetune. (Windows Only)
|
||||
- Can also be used as an OpenAI ChatGPT and GPT-Playground client. (Fill in the API URL and API Key in Settings page)
|
||||
- Multilingual localization.
|
||||
- Theme switching.
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
- 分かりやすく操作しやすいパラメータ設定、各種操作ガイダンスプロンプトとともに
|
||||
- 内蔵モデル変換ツール
|
||||
- ダウンロード管理とリモートモデル検査機能内蔵
|
||||
- 内蔵のLoRA微調整機能を搭載しています
|
||||
- 内蔵のLoRA微調整機能を搭載しています (Windowsのみ)
|
||||
- このプログラムは、OpenAI ChatGPTとGPT Playgroundのクライアントとしても使用できます(設定ページで `API URL` と `API Key`
|
||||
を入力してください)
|
||||
- 多言語ローカライズ
|
||||
|
||||
@@ -68,7 +68,7 @@ API兼容的接口,这意味着一切ChatGPT客户端都是RWKV客户端。
|
||||
- 易于理解和操作的参数配置,及各类操作引导提示
|
||||
- 内置模型转换工具
|
||||
- 内置下载管理和远程模型检视
|
||||
- 内置一键LoRA微调
|
||||
- 内置一键LoRA微调 (仅限Windows)
|
||||
- 也可用作 OpenAI ChatGPT 和 GPT Playground 客户端 (在设置内填写API URL和API Key)
|
||||
- 多语言本地化
|
||||
- 主题切换
|
||||
|
||||
@@ -14,6 +14,13 @@ import (
|
||||
wruntime "github.com/wailsapp/wails/v2/pkg/runtime"
|
||||
)
|
||||
|
||||
func (a *App) SaveFile(path string, savedContent []byte) error {
|
||||
if err := os.WriteFile(a.exDir+path, savedContent, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *App) SaveJson(fileName string, jsonData any) error {
|
||||
text, err := json.MarshalIndent(jsonData, "", " ")
|
||||
if err != nil {
|
||||
@@ -195,3 +202,12 @@ func (a *App) OpenFileFolder(path string, relative bool) error {
|
||||
}
|
||||
return errors.New("unsupported OS")
|
||||
}
|
||||
|
||||
func (a *App) StartFile(path string) error {
|
||||
cmd, err := CmdHelper(true, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cmd.Start()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (a *App) StartServer(python string, port int, host string, webui bool, rwkvBeta bool) (string, error) {
|
||||
func (a *App) StartServer(python string, port int, host string, webui bool, rwkvBeta bool, rwkvcpp bool, webgpu bool) (string, error) {
|
||||
var err error
|
||||
if python == "" {
|
||||
python, err = GetPython()
|
||||
@@ -25,6 +25,12 @@ func (a *App) StartServer(python string, port int, host string, webui bool, rwkv
|
||||
if rwkvBeta {
|
||||
args = append(args, "--rwkv-beta")
|
||||
}
|
||||
if rwkvcpp {
|
||||
args = append(args, "--rwkv.cpp")
|
||||
}
|
||||
if webgpu {
|
||||
args = append(args, "--webgpu")
|
||||
}
|
||||
args = append(args, "--port", strconv.Itoa(port), "--host", host)
|
||||
return Cmd(args...)
|
||||
}
|
||||
@@ -52,6 +58,32 @@ func (a *App) ConvertSafetensors(modelPath string, outPath string) (string, erro
|
||||
return Cmd(args...)
|
||||
}
|
||||
|
||||
func (a *App) ConvertSafetensorsWithPython(python string, modelPath string, outPath string) (string, error) {
|
||||
var err error
|
||||
if python == "" {
|
||||
python, err = GetPython()
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return Cmd(python, "./backend-python/convert_safetensors.py", "--input", modelPath, "--output", outPath)
|
||||
}
|
||||
|
||||
func (a *App) ConvertGGML(python string, modelPath string, outPath string, Q51 bool) (string, error) {
|
||||
var err error
|
||||
if python == "" {
|
||||
python, err = GetPython()
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dataType := "FP16"
|
||||
if Q51 {
|
||||
dataType = "Q5_1"
|
||||
}
|
||||
return Cmd(python, "./backend-python/convert_pytorch_to_ggml.py", modelPath, outPath, dataType)
|
||||
}
|
||||
|
||||
func (a *App) ConvertData(python string, input string, outputPrefix string, vocab string) (string, error) {
|
||||
var err error
|
||||
if python == "" {
|
||||
|
||||
@@ -15,33 +15,50 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func CmdHelper(hideWindow bool, args ...string) (*exec.Cmd, error) {
|
||||
if runtime.GOOS != "windows" {
|
||||
return nil, errors.New("unsupported OS")
|
||||
}
|
||||
filename := "./cmd-helper.bat"
|
||||
_, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
if err := os.WriteFile(filename, []byte("start %*"), 0644); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
cmdHelper, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.Contains(cmdHelper, " ") {
|
||||
for _, arg := range args {
|
||||
if strings.Contains(arg, " ") {
|
||||
return nil, errors.New("path contains space") // golang bug https://github.com/golang/go/issues/17149#issuecomment-473976818
|
||||
}
|
||||
}
|
||||
}
|
||||
cmd := exec.Command(cmdHelper, args...)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{}
|
||||
//go:custom_build windows cmd.SysProcAttr.HideWindow = hideWindow
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
func Cmd(args ...string) (string, error) {
|
||||
switch platform := runtime.GOOS; platform {
|
||||
case "windows":
|
||||
if err := os.WriteFile("./cmd-helper.bat", []byte("start %*"), 0644); err != nil {
|
||||
return "", err
|
||||
}
|
||||
cmdHelper, err := filepath.Abs("./cmd-helper")
|
||||
cmd, err := CmdHelper(true, args...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if strings.Contains(cmdHelper, " ") {
|
||||
for _, arg := range args {
|
||||
if strings.Contains(arg, " ") {
|
||||
return "", errors.New("path contains space") // golang bug https://github.com/golang/go/issues/17149#issuecomment-473976818
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command(cmdHelper, args...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
_, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(out), nil
|
||||
return "", nil
|
||||
case "darwin":
|
||||
ex, err := os.Executable()
|
||||
if err != nil {
|
||||
|
||||
169
backend-python/convert_pytorch_to_ggml.py
vendored
Normal file
169
backend-python/convert_pytorch_to_ggml.py
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
# Converts an RWKV model checkpoint in PyTorch format to an rwkv.cpp compatible file.
|
||||
# Usage: python convert_pytorch_to_ggml.py C:\RWKV-4-Pile-169M-20220807-8023.pth C:\rwkv.cpp-169M-FP16.bin FP16
|
||||
# Get model checkpoints from https://huggingface.co/BlinkDL
|
||||
# See FILE_FORMAT.md for the documentation on the file format.
|
||||
|
||||
import argparse
|
||||
import struct
|
||||
import torch
|
||||
from typing import Dict
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Convert an RWKV model checkpoint in PyTorch format to an rwkv.cpp compatible file"
|
||||
)
|
||||
parser.add_argument("src_path", help="Path to PyTorch checkpoint file")
|
||||
parser.add_argument(
|
||||
"dest_path", help="Path to rwkv.cpp checkpoint file, will be overwritten"
|
||||
)
|
||||
parser.add_argument(
|
||||
"data_type",
|
||||
help="Data type, FP16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0",
|
||||
type=str,
|
||||
choices=[
|
||||
"FP16",
|
||||
"Q4_0",
|
||||
"Q4_1",
|
||||
"Q5_0",
|
||||
"Q5_1",
|
||||
"Q8_0",
|
||||
],
|
||||
default="FP16",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def get_layer_count(state_dict: Dict[str, torch.Tensor]) -> int:
|
||||
n_layer: int = 0
|
||||
|
||||
while f"blocks.{n_layer}.ln1.weight" in state_dict:
|
||||
n_layer += 1
|
||||
|
||||
assert n_layer > 0
|
||||
|
||||
return n_layer
|
||||
|
||||
|
||||
def write_state_dict(
|
||||
state_dict: Dict[str, torch.Tensor], dest_path: str, data_type: str
|
||||
) -> None:
|
||||
emb_weight: torch.Tensor = state_dict["emb.weight"]
|
||||
|
||||
n_layer: int = get_layer_count(state_dict)
|
||||
n_vocab: int = emb_weight.shape[0]
|
||||
n_embed: int = emb_weight.shape[1]
|
||||
|
||||
is_v5_1_or_2: bool = "blocks.0.att.ln_x.weight" in state_dict
|
||||
is_v5_2: bool = "blocks.0.att.gate.weight" in state_dict
|
||||
|
||||
if is_v5_2:
|
||||
print("Detected RWKV v5.2")
|
||||
elif is_v5_1_or_2:
|
||||
print("Detected RWKV v5.1")
|
||||
else:
|
||||
print("Detected RWKV v4")
|
||||
|
||||
with open(dest_path, "wb") as out_file:
|
||||
is_FP16: bool = data_type == "FP16" or data_type == "float16"
|
||||
|
||||
out_file.write(
|
||||
struct.pack(
|
||||
# Disable padding with '='
|
||||
"=iiiiii",
|
||||
# Magic: 'ggmf' in hex
|
||||
0x67676D66,
|
||||
101,
|
||||
n_vocab,
|
||||
n_embed,
|
||||
n_layer,
|
||||
1 if is_FP16 else 0,
|
||||
)
|
||||
)
|
||||
|
||||
for k in state_dict.keys():
|
||||
tensor: torch.Tensor = state_dict[k].float()
|
||||
|
||||
if ".time_" in k:
|
||||
tensor = tensor.squeeze()
|
||||
|
||||
if is_v5_1_or_2:
|
||||
if ".time_decay" in k:
|
||||
if is_v5_2:
|
||||
tensor = torch.exp(-torch.exp(tensor)).unsqueeze(-1)
|
||||
else:
|
||||
tensor = torch.exp(-torch.exp(tensor)).reshape(-1, 1, 1)
|
||||
|
||||
if ".time_first" in k:
|
||||
tensor = torch.exp(tensor).reshape(-1, 1, 1)
|
||||
|
||||
if ".time_faaaa" in k:
|
||||
tensor = tensor.unsqueeze(-1)
|
||||
else:
|
||||
if ".time_decay" in k:
|
||||
tensor = -torch.exp(tensor)
|
||||
|
||||
# Keep 1-dim vectors and small matrices in FP32
|
||||
if is_FP16 and len(tensor.shape) > 1 and ".time_" not in k:
|
||||
tensor = tensor.half()
|
||||
|
||||
shape = tensor.shape
|
||||
|
||||
print(f"Writing {k}, shape {shape}, type {tensor.dtype}")
|
||||
|
||||
k_encoded: bytes = k.encode("utf-8")
|
||||
|
||||
out_file.write(
|
||||
struct.pack(
|
||||
"=iii",
|
||||
len(shape),
|
||||
len(k_encoded),
|
||||
1 if tensor.dtype == torch.float16 else 0,
|
||||
)
|
||||
)
|
||||
|
||||
# Dimension order is reversed here:
|
||||
# * PyTorch shape is (x rows, y columns)
|
||||
# * ggml shape is (y elements in a row, x elements in a column)
|
||||
# Both shapes represent the same tensor.
|
||||
for dim in reversed(tensor.shape):
|
||||
out_file.write(struct.pack("=i", dim))
|
||||
|
||||
out_file.write(k_encoded)
|
||||
|
||||
tensor.numpy().tofile(out_file)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
|
||||
print(f"Reading {args.src_path}")
|
||||
|
||||
state_dict: Dict[str, torch.Tensor] = torch.load(args.src_path, map_location="cpu")
|
||||
|
||||
temp_output: str = args.dest_path
|
||||
if args.data_type.startswith("Q"):
|
||||
import re
|
||||
|
||||
temp_output = re.sub(r"Q[4,5,8]_[0,1]", "fp16", temp_output)
|
||||
write_state_dict(state_dict, temp_output, "FP16")
|
||||
if args.data_type.startswith("Q"):
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
|
||||
from rwkv_pip.cpp import rwkv_cpp_shared_library
|
||||
|
||||
library = rwkv_cpp_shared_library.load_rwkv_shared_library()
|
||||
library.rwkv_quantize_model_file(temp_output, args.dest_path, args.data_type)
|
||||
|
||||
print("Done")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
with open("error.txt", "w") as f:
|
||||
f.write(str(e))
|
||||
27
backend-python/convert_safetensors.py
vendored
27
backend-python/convert_safetensors.py
vendored
@@ -30,6 +30,33 @@ def convert_file(pt_filename: str, sf_filename: str, rename={}, transpose_names=
|
||||
if "state_dict" in loaded:
|
||||
loaded = loaded["state_dict"]
|
||||
|
||||
kk = list(loaded.keys())
|
||||
version = 4
|
||||
for x in kk:
|
||||
if "ln_x" in x:
|
||||
version = max(5, version)
|
||||
if "gate.weight" in x:
|
||||
version = max(5.1, version)
|
||||
if int(version) == 5 and "att.time_decay" in x:
|
||||
if len(loaded[x].shape) > 1:
|
||||
if loaded[x].shape[1] > 1:
|
||||
version = max(5.2, version)
|
||||
if "time_maa" in x:
|
||||
version = max(6, version)
|
||||
|
||||
if version == 5.1 and "midi" in pt_filename.lower():
|
||||
import numpy as np
|
||||
|
||||
np.set_printoptions(precision=4, suppress=True, linewidth=200)
|
||||
kk = list(loaded.keys())
|
||||
_, n_emb = loaded["emb.weight"].shape
|
||||
for k in kk:
|
||||
if "time_decay" in k or "time_faaaa" in k:
|
||||
# print(k, mm[k].shape)
|
||||
loaded[k] = (
|
||||
loaded[k].unsqueeze(1).repeat(1, n_emb // loaded[k].shape[0])
|
||||
)
|
||||
|
||||
loaded = {k: v.clone().half() for k, v in loaded.items()}
|
||||
# for k, v in loaded.items():
|
||||
# print(f'{k}\t{v.shape}\t{v.dtype}')
|
||||
|
||||
@@ -32,6 +32,16 @@ def get_args(args: Union[Sequence[str], None] = None):
|
||||
action="store_true",
|
||||
help="whether to use rwkv-beta (default: False)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--rwkv.cpp",
|
||||
action="store_true",
|
||||
help="whether to use rwkv.cpp (default: False)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--webgpu",
|
||||
action="store_true",
|
||||
help="whether to use webgpu (default: False)",
|
||||
)
|
||||
args = parser.parse_args(args)
|
||||
|
||||
return args
|
||||
|
||||
@@ -8,7 +8,6 @@ import base64
|
||||
from fastapi import APIRouter, Request, status, HTTPException
|
||||
from sse_starlette.sse import EventSourceResponse
|
||||
from pydantic import BaseModel, Field
|
||||
import numpy as np
|
||||
import tiktoken
|
||||
from utils.rwkv import *
|
||||
from utils.log import quick_log
|
||||
@@ -396,6 +395,8 @@ class EmbeddingsBody(BaseModel):
|
||||
|
||||
|
||||
def embedding_base64(embedding: List[float]) -> str:
|
||||
import numpy as np
|
||||
|
||||
return base64.b64encode(np.array(embedding).astype(np.float32)).decode("utf-8")
|
||||
|
||||
|
||||
|
||||
@@ -49,19 +49,13 @@ def switch_model(body: SwitchModelBody, response: Response, request: Request):
|
||||
if body.model == "":
|
||||
return "success"
|
||||
|
||||
STRATEGY_REGEX = r"^(?:(?:^|->) *(?:cuda(?::[\d]+)?|cpu|mps|dml) (?:fp(?:16|32)|bf16)(?:i8|i4|i3)?(?: \*[\d]+\+?)? *)+$"
|
||||
if not re.match(STRATEGY_REGEX, body.strategy):
|
||||
raise HTTPException(
|
||||
Status.HTTP_400_BAD_REQUEST,
|
||||
"Invalid strategy. Please read https://pypi.org/project/rwkv/",
|
||||
)
|
||||
devices = set(
|
||||
[
|
||||
x.strip().split(" ")[0].replace("cuda:0", "cuda")
|
||||
for x in body.strategy.split("->")
|
||||
]
|
||||
)
|
||||
print(f"Devices: {devices}")
|
||||
print(f"Strategy Devices: {devices}")
|
||||
# if len(devices) > 1:
|
||||
# state_cache.disable_state_cache()
|
||||
# else:
|
||||
@@ -80,6 +74,10 @@ def switch_model(body: SwitchModelBody, response: Response, request: Request):
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
import traceback
|
||||
|
||||
print(traceback.format_exc())
|
||||
|
||||
quick_log(request, body, f"Exception: {e}")
|
||||
global_var.set(global_var.Model_Status, global_var.ModelStatus.Offline)
|
||||
raise HTTPException(
|
||||
|
||||
@@ -37,10 +37,14 @@ def text_to_midi(body: TextToMidiBody):
|
||||
async def midi_to_text(file_data: UploadFile):
|
||||
vocab_config = "backend-python/utils/midi_vocab_config.json"
|
||||
cfg = VocabConfig.from_json(vocab_config)
|
||||
filter_config = "backend-python/utils/midi_filter_config.json"
|
||||
filter_cfg = FilterConfig.from_json(filter_config)
|
||||
mid = mido.MidiFile(file=file_data.file)
|
||||
text = convert_midi_to_str(cfg, mid)
|
||||
output_list = convert_midi_to_str(cfg, filter_cfg, mid)
|
||||
if len(output_list) == 0:
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "bad midi file")
|
||||
|
||||
return {"text": text}
|
||||
return {"text": output_list[0]}
|
||||
|
||||
|
||||
class TxtToMidiBody(BaseModel):
|
||||
|
||||
@@ -87,13 +87,34 @@ def add_state(body: AddStateBody):
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
devices: List[torch.device] = []
|
||||
state: Union[Any, None] = None
|
||||
|
||||
if body.state is not None:
|
||||
if type(body.state) == list or type(body.state) == np.ndarray:
|
||||
devices = [
|
||||
(
|
||||
tensor.device
|
||||
if hasattr(tensor, "device")
|
||||
else torch.device("cpu")
|
||||
)
|
||||
for tensor in body.state
|
||||
]
|
||||
state = (
|
||||
[tensor.cpu() for tensor in body.state]
|
||||
if hasattr(body.state[0], "device")
|
||||
else copy.deepcopy(body.state)
|
||||
)
|
||||
else:
|
||||
pass # WebGPU
|
||||
|
||||
id: int = trie.insert(body.prompt)
|
||||
devices: List[torch.device] = [tensor.device for tensor in body.state]
|
||||
dtrie[id] = {
|
||||
"tokens": copy.deepcopy(body.tokens),
|
||||
"state": [tensor.cpu() for tensor in body.state],
|
||||
"state": state,
|
||||
"logits": copy.deepcopy(body.logits),
|
||||
"devices": devices,
|
||||
}
|
||||
@@ -169,6 +190,7 @@ def longest_prefix_state(body: LongestPrefixStateBody, request: Request):
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
|
||||
id = -1
|
||||
try:
|
||||
@@ -180,12 +202,16 @@ def longest_prefix_state(body: LongestPrefixStateBody, request: Request):
|
||||
v = dtrie[id]
|
||||
devices: List[torch.device] = v["devices"]
|
||||
prompt: str = trie[id]
|
||||
state: Union[Any, None] = v["state"]
|
||||
|
||||
if state is not None and type(state) == list and hasattr(state[0], "device"):
|
||||
state = [tensor.to(devices[i]) for i, tensor in enumerate(state)]
|
||||
|
||||
quick_log(request, body, "Hit:\n" + prompt)
|
||||
return {
|
||||
"prompt": prompt,
|
||||
"tokens": v["tokens"],
|
||||
"state": [tensor.to(devices[i]) for i, tensor in enumerate(v["state"])],
|
||||
"state": state,
|
||||
"logits": v["logits"],
|
||||
}
|
||||
else:
|
||||
|
||||
BIN
backend-python/rwkv_pip/cpp/librwkv.dylib
vendored
Normal file
BIN
backend-python/rwkv_pip/cpp/librwkv.dylib
vendored
Normal file
Binary file not shown.
BIN
backend-python/rwkv_pip/cpp/librwkv.so
vendored
Normal file
BIN
backend-python/rwkv_pip/cpp/librwkv.so
vendored
Normal file
Binary file not shown.
14
backend-python/rwkv_pip/cpp/model.py
vendored
Normal file
14
backend-python/rwkv_pip/cpp/model.py
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
from typing import Any, List, Union
|
||||
from . import rwkv_cpp_model
|
||||
from . import rwkv_cpp_shared_library
|
||||
|
||||
|
||||
class RWKV:
|
||||
def __init__(self, model_path: str, strategy=None):
|
||||
self.library = rwkv_cpp_shared_library.load_rwkv_shared_library()
|
||||
self.model = rwkv_cpp_model.RWKVModel(self.library, model_path)
|
||||
self.w = {} # fake weight
|
||||
self.w["emb.weight"] = [0] * self.model.n_vocab
|
||||
|
||||
def forward(self, tokens: List[int], state: Union[Any, None] = None):
|
||||
return self.model.eval_sequence_in_chunks(tokens, state, use_numpy=True)
|
||||
BIN
backend-python/rwkv_pip/cpp/rwkv.dll
vendored
Normal file
BIN
backend-python/rwkv_pip/cpp/rwkv.dll
vendored
Normal file
Binary file not shown.
369
backend-python/rwkv_pip/cpp/rwkv_cpp_model.py
vendored
Normal file
369
backend-python/rwkv_pip/cpp/rwkv_cpp_model.py
vendored
Normal file
@@ -0,0 +1,369 @@
|
||||
import os
|
||||
import multiprocessing
|
||||
|
||||
# Pre-import PyTorch, if available.
|
||||
# This fixes "OSError: [WinError 127] The specified procedure could not be found".
|
||||
try:
|
||||
import torch
|
||||
except ModuleNotFoundError:
|
||||
pass
|
||||
|
||||
# I'm sure this is not strictly correct, but let's keep this crutch for now.
|
||||
try:
|
||||
import rwkv_cpp_shared_library
|
||||
except ModuleNotFoundError:
|
||||
from . import rwkv_cpp_shared_library
|
||||
|
||||
from typing import TypeVar, Optional, Tuple, List
|
||||
|
||||
# A value of this type is either a numpy's ndarray or a PyTorch's Tensor.
|
||||
NumpyArrayOrPyTorchTensor: TypeVar = TypeVar('NumpyArrayOrPyTorchTensor')
|
||||
|
||||
class RWKVModel:
|
||||
"""
|
||||
An RWKV model managed by rwkv.cpp library.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
shared_library: rwkv_cpp_shared_library.RWKVSharedLibrary,
|
||||
model_path: str,
|
||||
thread_count: int = max(1, multiprocessing.cpu_count() // 2),
|
||||
gpu_layer_count: int = 0,
|
||||
**kwargs
|
||||
) -> None:
|
||||
"""
|
||||
Loads the model and prepares it for inference.
|
||||
In case of any error, this method will throw an exception.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
shared_library : RWKVSharedLibrary
|
||||
rwkv.cpp shared library.
|
||||
model_path : str
|
||||
Path to RWKV model file in ggml format.
|
||||
thread_count : int
|
||||
Thread count to use. If not set, defaults to CPU count / 2.
|
||||
gpu_layer_count : int
|
||||
Count of layers to offload onto the GPU, must be >= 0.
|
||||
See documentation of `gpu_offload_layers` for details about layer offloading.
|
||||
"""
|
||||
|
||||
if 'gpu_layers_count' in kwargs:
|
||||
gpu_layer_count = kwargs['gpu_layers_count']
|
||||
|
||||
assert os.path.isfile(model_path), f'{model_path} is not a file'
|
||||
assert thread_count > 0, 'Thread count must be > 0'
|
||||
assert gpu_layer_count >= 0, 'GPU layer count must be >= 0'
|
||||
|
||||
self._library: rwkv_cpp_shared_library.RWKVSharedLibrary = shared_library
|
||||
|
||||
self._ctx: rwkv_cpp_shared_library.RWKVContext = self._library.rwkv_init_from_file(model_path, thread_count)
|
||||
|
||||
if gpu_layer_count > 0:
|
||||
self.gpu_offload_layers(gpu_layer_count)
|
||||
|
||||
self._state_buffer_element_count: int = self._library.rwkv_get_state_buffer_element_count(self._ctx)
|
||||
self._logits_buffer_element_count: int = self._library.rwkv_get_logits_buffer_element_count(self._ctx)
|
||||
|
||||
self._valid: bool = True
|
||||
|
||||
def gpu_offload_layers(self, layer_count: int) -> bool:
|
||||
"""
|
||||
Offloads specified count of model layers onto the GPU. Offloaded layers are evaluated using cuBLAS or CLBlast.
|
||||
For the purposes of this function, model head (unembedding matrix) is treated as an additional layer:
|
||||
- pass `model.n_layer` to offload all layers except model head
|
||||
- pass `model.n_layer + 1` to offload all layers, including model head
|
||||
|
||||
Returns true if at least one layer was offloaded.
|
||||
If rwkv.cpp was compiled without cuBLAS and CLBlast support, this function is a no-op and always returns false.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
layer_count : int
|
||||
Count of layers to offload onto the GPU, must be >= 0.
|
||||
"""
|
||||
|
||||
assert layer_count >= 0, 'Layer count must be >= 0'
|
||||
|
||||
return self._library.rwkv_gpu_offload_layers(self._ctx, layer_count)
|
||||
|
||||
@property
|
||||
def n_vocab(self) -> int:
|
||||
return self._library.rwkv_get_n_vocab(self._ctx)
|
||||
|
||||
@property
|
||||
def n_embed(self) -> int:
|
||||
return self._library.rwkv_get_n_embed(self._ctx)
|
||||
|
||||
@property
|
||||
def n_layer(self) -> int:
|
||||
return self._library.rwkv_get_n_layer(self._ctx)
|
||||
|
||||
def eval(
|
||||
self,
|
||||
token: int,
|
||||
state_in: Optional[NumpyArrayOrPyTorchTensor],
|
||||
state_out: Optional[NumpyArrayOrPyTorchTensor] = None,
|
||||
logits_out: Optional[NumpyArrayOrPyTorchTensor] = None,
|
||||
use_numpy: bool = False
|
||||
) -> Tuple[NumpyArrayOrPyTorchTensor, NumpyArrayOrPyTorchTensor]:
|
||||
"""
|
||||
Evaluates the model for a single token.
|
||||
In case of any error, this method will throw an exception.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
token : int
|
||||
Index of next token to be seen by the model. Must be in range 0 <= token < n_vocab.
|
||||
state_in : Optional[NumpyArrayOrTorchTensor]
|
||||
State from previous call of this method. If this is a first pass, set it to None.
|
||||
state_out : Optional[NumpyArrayOrTorchTensor]
|
||||
Optional output tensor for state. If provided, must be of type float32, contiguous and of shape (state_buffer_element_count).
|
||||
logits_out : Optional[NumpyArrayOrTorchTensor]
|
||||
Optional output tensor for logits. If provided, must be of type float32, contiguous and of shape (logits_buffer_element_count).
|
||||
use_numpy : bool
|
||||
If set to True, numpy's ndarrays will be created instead of PyTorch's Tensors.
|
||||
This parameter is ignored if any tensor parameter is not None; in such case,
|
||||
type of returned tensors will match the type of received tensors.
|
||||
|
||||
Returns
|
||||
-------
|
||||
logits, state
|
||||
Logits vector of shape (n_vocab); state for the next step.
|
||||
"""
|
||||
|
||||
assert self._valid, 'Model was freed'
|
||||
|
||||
use_numpy = self._detect_numpy_usage([state_in, state_out, logits_out], use_numpy)
|
||||
|
||||
if state_in is not None:
|
||||
self._validate_tensor(state_in, 'state_in', self._state_buffer_element_count)
|
||||
|
||||
state_in_ptr = self._get_data_ptr(state_in)
|
||||
else:
|
||||
state_in_ptr = 0
|
||||
|
||||
if state_out is not None:
|
||||
self._validate_tensor(state_out, 'state_out', self._state_buffer_element_count)
|
||||
else:
|
||||
state_out = self._zeros_float32(self._state_buffer_element_count, use_numpy)
|
||||
|
||||
if logits_out is not None:
|
||||
self._validate_tensor(logits_out, 'logits_out', self._logits_buffer_element_count)
|
||||
else:
|
||||
logits_out = self._zeros_float32(self._logits_buffer_element_count, use_numpy)
|
||||
|
||||
self._library.rwkv_eval(
|
||||
self._ctx,
|
||||
token,
|
||||
state_in_ptr,
|
||||
self._get_data_ptr(state_out),
|
||||
self._get_data_ptr(logits_out)
|
||||
)
|
||||
|
||||
return logits_out, state_out
|
||||
|
||||
def eval_sequence(
|
||||
self,
|
||||
tokens: List[int],
|
||||
state_in: Optional[NumpyArrayOrPyTorchTensor],
|
||||
state_out: Optional[NumpyArrayOrPyTorchTensor] = None,
|
||||
logits_out: Optional[NumpyArrayOrPyTorchTensor] = None,
|
||||
use_numpy: bool = False
|
||||
) -> Tuple[NumpyArrayOrPyTorchTensor, NumpyArrayOrPyTorchTensor]:
|
||||
"""
|
||||
Evaluates the model for a sequence of tokens.
|
||||
|
||||
NOTE ON GGML NODE LIMIT
|
||||
|
||||
ggml has a hard-coded limit on max amount of nodes in a computation graph. The sequence graph is built in a way that quickly exceedes
|
||||
this limit when using large models and/or large sequence lengths.
|
||||
Fortunately, rwkv.cpp's fork of ggml has increased limit which was tested to work for sequence lengths up to 64 for 14B models.
|
||||
|
||||
If you get `GGML_ASSERT: ...\\ggml.c:16941: cgraph->n_nodes < GGML_MAX_NODES`, this means you've exceeded the limit.
|
||||
To get rid of the assertion failure, reduce the model size and/or sequence length.
|
||||
|
||||
In case of any error, this method will throw an exception.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tokens : List[int]
|
||||
Indices of the next tokens to be seen by the model. Must be in range 0 <= token < n_vocab.
|
||||
state_in : Optional[NumpyArrayOrTorchTensor]
|
||||
State from previous call of this method. If this is a first pass, set it to None.
|
||||
state_out : Optional[NumpyArrayOrTorchTensor]
|
||||
Optional output tensor for state. If provided, must be of type float32, contiguous and of shape (state_buffer_element_count).
|
||||
logits_out : Optional[NumpyArrayOrTorchTensor]
|
||||
Optional output tensor for logits. If provided, must be of type float32, contiguous and of shape (logits_buffer_element_count).
|
||||
use_numpy : bool
|
||||
If set to True, numpy's ndarrays will be created instead of PyTorch's Tensors.
|
||||
This parameter is ignored if any tensor parameter is not None; in such case,
|
||||
type of returned tensors will match the type of received tensors.
|
||||
|
||||
Returns
|
||||
-------
|
||||
logits, state
|
||||
Logits vector of shape (n_vocab); state for the next step.
|
||||
"""
|
||||
|
||||
assert self._valid, 'Model was freed'
|
||||
|
||||
use_numpy = self._detect_numpy_usage([state_in, state_out, logits_out], use_numpy)
|
||||
|
||||
if state_in is not None:
|
||||
self._validate_tensor(state_in, 'state_in', self._state_buffer_element_count)
|
||||
|
||||
state_in_ptr = self._get_data_ptr(state_in)
|
||||
else:
|
||||
state_in_ptr = 0
|
||||
|
||||
if state_out is not None:
|
||||
self._validate_tensor(state_out, 'state_out', self._state_buffer_element_count)
|
||||
else:
|
||||
state_out = self._zeros_float32(self._state_buffer_element_count, use_numpy)
|
||||
|
||||
if logits_out is not None:
|
||||
self._validate_tensor(logits_out, 'logits_out', self._logits_buffer_element_count)
|
||||
else:
|
||||
logits_out = self._zeros_float32(self._logits_buffer_element_count, use_numpy)
|
||||
|
||||
self._library.rwkv_eval_sequence(
|
||||
self._ctx,
|
||||
tokens,
|
||||
state_in_ptr,
|
||||
self._get_data_ptr(state_out),
|
||||
self._get_data_ptr(logits_out)
|
||||
)
|
||||
|
||||
return logits_out, state_out
|
||||
|
||||
def eval_sequence_in_chunks(
|
||||
self,
|
||||
tokens: List[int],
|
||||
state_in: Optional[NumpyArrayOrPyTorchTensor],
|
||||
state_out: Optional[NumpyArrayOrPyTorchTensor] = None,
|
||||
logits_out: Optional[NumpyArrayOrPyTorchTensor] = None,
|
||||
chunk_size: int = 16,
|
||||
use_numpy: bool = False
|
||||
) -> Tuple[NumpyArrayOrPyTorchTensor, NumpyArrayOrPyTorchTensor]:
|
||||
"""
|
||||
Evaluates the model for a sequence of tokens using `eval_sequence`, splitting a potentially long sequence into fixed-length chunks.
|
||||
This function is useful for processing complete prompts and user input in chat & role-playing use-cases.
|
||||
It is recommended to use this function instead of `eval_sequence` to avoid mistakes and get maximum performance.
|
||||
|
||||
Chunking allows processing sequences of thousands of tokens, while not reaching the ggml's node limit and not consuming too much memory.
|
||||
A reasonable and recommended value of chunk size is 16. If you want maximum performance, try different chunk sizes in range [2..64]
|
||||
and choose one that works the best in your use case.
|
||||
|
||||
In case of any error, this method will throw an exception.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tokens : List[int]
|
||||
Indices of the next tokens to be seen by the model. Must be in range 0 <= token < n_vocab.
|
||||
chunk_size : int
|
||||
Size of each chunk in tokens, must be positive.
|
||||
state_in : Optional[NumpyArrayOrTorchTensor]
|
||||
State from previous call of this method. If this is a first pass, set it to None.
|
||||
state_out : Optional[NumpyArrayOrTorchTensor]
|
||||
Optional output tensor for state. If provided, must be of type float32, contiguous and of shape (state_buffer_element_count).
|
||||
logits_out : Optional[NumpyArrayOrTorchTensor]
|
||||
Optional output tensor for logits. If provided, must be of type float32, contiguous and of shape (logits_buffer_element_count).
|
||||
use_numpy : bool
|
||||
If set to True, numpy's ndarrays will be created instead of PyTorch's Tensors.
|
||||
This parameter is ignored if any tensor parameter is not None; in such case,
|
||||
type of returned tensors will match the type of received tensors.
|
||||
|
||||
Returns
|
||||
-------
|
||||
logits, state
|
||||
Logits vector of shape (n_vocab); state for the next step.
|
||||
"""
|
||||
|
||||
assert self._valid, 'Model was freed'
|
||||
|
||||
use_numpy = self._detect_numpy_usage([state_in, state_out, logits_out], use_numpy)
|
||||
|
||||
if state_in is not None:
|
||||
self._validate_tensor(state_in, 'state_in', self._state_buffer_element_count)
|
||||
|
||||
state_in_ptr = self._get_data_ptr(state_in)
|
||||
else:
|
||||
state_in_ptr = 0
|
||||
|
||||
if state_out is not None:
|
||||
self._validate_tensor(state_out, 'state_out', self._state_buffer_element_count)
|
||||
else:
|
||||
state_out = self._zeros_float32(self._state_buffer_element_count, use_numpy)
|
||||
|
||||
if logits_out is not None:
|
||||
self._validate_tensor(logits_out, 'logits_out', self._logits_buffer_element_count)
|
||||
else:
|
||||
logits_out = self._zeros_float32(self._logits_buffer_element_count, use_numpy)
|
||||
|
||||
self._library.rwkv_eval_sequence_in_chunks(
|
||||
self._ctx,
|
||||
tokens,
|
||||
chunk_size,
|
||||
state_in_ptr,
|
||||
self._get_data_ptr(state_out),
|
||||
self._get_data_ptr(logits_out)
|
||||
)
|
||||
|
||||
return logits_out, state_out
|
||||
|
||||
def free(self) -> None:
|
||||
"""
|
||||
Frees all allocated resources.
|
||||
In case of any error, this method will throw an exception.
|
||||
The object must not be used anymore after calling this method.
|
||||
"""
|
||||
|
||||
assert self._valid, 'Already freed'
|
||||
|
||||
self._valid = False
|
||||
|
||||
self._library.rwkv_free(self._ctx)
|
||||
|
||||
def __del__(self) -> None:
|
||||
# Free the context on GC in case user forgot to call free() explicitly.
|
||||
if hasattr(self, '_valid') and self._valid:
|
||||
self.free()
|
||||
|
||||
def _is_pytorch_tensor(self, tensor: NumpyArrayOrPyTorchTensor) -> bool:
|
||||
return hasattr(tensor, '__module__') and tensor.__module__ == 'torch'
|
||||
|
||||
def _detect_numpy_usage(self, tensors: List[Optional[NumpyArrayOrPyTorchTensor]], use_numpy_by_default: bool) -> bool:
|
||||
for tensor in tensors:
|
||||
if tensor is not None:
|
||||
return False if self._is_pytorch_tensor(tensor) else True
|
||||
|
||||
return use_numpy_by_default
|
||||
|
||||
def _validate_tensor(self, tensor: NumpyArrayOrPyTorchTensor, name: str, size: int) -> None:
|
||||
if self._is_pytorch_tensor(tensor):
|
||||
tensor: torch.Tensor = tensor
|
||||
assert tensor.device == torch.device('cpu'), f'{name} is not on CPU'
|
||||
assert tensor.dtype == torch.float32, f'{name} is not of type float32'
|
||||
assert tensor.shape == (size,), f'{name} has invalid shape {tensor.shape}, expected ({size})'
|
||||
assert tensor.is_contiguous(), f'{name} is not contiguous'
|
||||
else:
|
||||
import numpy as np
|
||||
tensor: np.ndarray = tensor
|
||||
assert tensor.dtype == np.float32, f'{name} is not of type float32'
|
||||
assert tensor.shape == (size,), f'{name} has invalid shape {tensor.shape}, expected ({size})'
|
||||
assert tensor.data.contiguous, f'{name} is not contiguous'
|
||||
|
||||
def _get_data_ptr(self, tensor: NumpyArrayOrPyTorchTensor):
|
||||
if self._is_pytorch_tensor(tensor):
|
||||
return tensor.data_ptr()
|
||||
else:
|
||||
return tensor.ctypes.data
|
||||
|
||||
def _zeros_float32(self, element_count: int, use_numpy: bool) -> NumpyArrayOrPyTorchTensor:
|
||||
if use_numpy:
|
||||
import numpy as np
|
||||
return np.zeros(element_count, dtype=np.float32)
|
||||
else:
|
||||
return torch.zeros(element_count, dtype=torch.float32, device='cpu')
|
||||
444
backend-python/rwkv_pip/cpp/rwkv_cpp_shared_library.py
vendored
Normal file
444
backend-python/rwkv_pip/cpp/rwkv_cpp_shared_library.py
vendored
Normal file
@@ -0,0 +1,444 @@
|
||||
import os
|
||||
import sys
|
||||
import ctypes
|
||||
import pathlib
|
||||
import platform
|
||||
from typing import Optional, List, Tuple, Callable
|
||||
|
||||
QUANTIZED_FORMAT_NAMES: Tuple[str, str, str, str, str] = (
|
||||
'Q4_0',
|
||||
'Q4_1',
|
||||
'Q5_0',
|
||||
'Q5_1',
|
||||
'Q8_0'
|
||||
)
|
||||
|
||||
P_FLOAT = ctypes.POINTER(ctypes.c_float)
|
||||
P_INT = ctypes.POINTER(ctypes.c_int32)
|
||||
|
||||
class RWKVContext:
|
||||
|
||||
def __init__(self, ptr: ctypes.pointer) -> None:
|
||||
self.ptr: ctypes.pointer = ptr
|
||||
|
||||
class RWKVSharedLibrary:
|
||||
"""
|
||||
Python wrapper around rwkv.cpp shared library.
|
||||
"""
|
||||
|
||||
def __init__(self, shared_library_path: str) -> None:
|
||||
"""
|
||||
Loads the shared library from specified file.
|
||||
In case of any error, this method will throw an exception.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
shared_library_path : str
|
||||
Path to rwkv.cpp shared library. On Windows, it would look like 'rwkv.dll'. On UNIX, 'rwkv.so'.
|
||||
"""
|
||||
# When Python is greater than 3.8, we need to reprocess the custom dll
|
||||
# according to the documentation to prevent loading failure errors.
|
||||
# https://docs.python.org/3/whatsnew/3.8.html#ctypes
|
||||
if platform.system().lower() == 'windows':
|
||||
self.library = ctypes.CDLL(shared_library_path, winmode=0)
|
||||
else:
|
||||
self.library = ctypes.cdll.LoadLibrary(shared_library_path)
|
||||
|
||||
self.library.rwkv_init_from_file.argtypes = [ctypes.c_char_p, ctypes.c_uint32]
|
||||
self.library.rwkv_init_from_file.restype = ctypes.c_void_p
|
||||
|
||||
self.library.rwkv_gpu_offload_layers.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
|
||||
self.library.rwkv_gpu_offload_layers.restype = ctypes.c_bool
|
||||
|
||||
self.library.rwkv_eval.argtypes = [
|
||||
ctypes.c_void_p, # ctx
|
||||
ctypes.c_int32, # token
|
||||
P_FLOAT, # state_in
|
||||
P_FLOAT, # state_out
|
||||
P_FLOAT # logits_out
|
||||
]
|
||||
self.library.rwkv_eval.restype = ctypes.c_bool
|
||||
|
||||
self.library.rwkv_eval_sequence.argtypes = [
|
||||
ctypes.c_void_p, # ctx
|
||||
P_INT, # tokens
|
||||
ctypes.c_size_t, # token count
|
||||
P_FLOAT, # state_in
|
||||
P_FLOAT, # state_out
|
||||
P_FLOAT # logits_out
|
||||
]
|
||||
self.library.rwkv_eval_sequence.restype = ctypes.c_bool
|
||||
|
||||
self.library.rwkv_eval_sequence_in_chunks.argtypes = [
|
||||
ctypes.c_void_p, # ctx
|
||||
P_INT, # tokens
|
||||
ctypes.c_size_t, # token count
|
||||
ctypes.c_size_t, # chunk size
|
||||
P_FLOAT, # state_in
|
||||
P_FLOAT, # state_out
|
||||
P_FLOAT # logits_out
|
||||
]
|
||||
self.library.rwkv_eval_sequence_in_chunks.restype = ctypes.c_bool
|
||||
|
||||
self.library.rwkv_get_n_vocab.argtypes = [ctypes.c_void_p]
|
||||
self.library.rwkv_get_n_vocab.restype = ctypes.c_size_t
|
||||
|
||||
self.library.rwkv_get_n_embed.argtypes = [ctypes.c_void_p]
|
||||
self.library.rwkv_get_n_embed.restype = ctypes.c_size_t
|
||||
|
||||
self.library.rwkv_get_n_layer.argtypes = [ctypes.c_void_p]
|
||||
self.library.rwkv_get_n_layer.restype = ctypes.c_size_t
|
||||
|
||||
self.library.rwkv_get_state_buffer_element_count.argtypes = [ctypes.c_void_p]
|
||||
self.library.rwkv_get_state_buffer_element_count.restype = ctypes.c_uint32
|
||||
|
||||
self.library.rwkv_get_logits_buffer_element_count.argtypes = [ctypes.c_void_p]
|
||||
self.library.rwkv_get_logits_buffer_element_count.restype = ctypes.c_uint32
|
||||
|
||||
self.library.rwkv_free.argtypes = [ctypes.c_void_p]
|
||||
self.library.rwkv_free.restype = None
|
||||
|
||||
self.library.rwkv_free.argtypes = [ctypes.c_void_p]
|
||||
self.library.rwkv_free.restype = None
|
||||
|
||||
self.library.rwkv_quantize_model_file.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p]
|
||||
self.library.rwkv_quantize_model_file.restype = ctypes.c_bool
|
||||
|
||||
self.library.rwkv_get_system_info_string.argtypes = []
|
||||
self.library.rwkv_get_system_info_string.restype = ctypes.c_char_p
|
||||
|
||||
self.nullptr = ctypes.cast(0, ctypes.c_void_p)
|
||||
|
||||
def rwkv_init_from_file(self, model_file_path: str, thread_count: int) -> RWKVContext:
|
||||
"""
|
||||
Loads the model from a file and prepares it for inference.
|
||||
Throws an exception in case of any error. Error messages would be printed to stderr.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
model_file_path : str
|
||||
Path to model file in ggml format.
|
||||
thread_count : int
|
||||
Count of threads to use, must be positive.
|
||||
"""
|
||||
|
||||
ptr = self.library.rwkv_init_from_file(model_file_path.encode('utf-8'), ctypes.c_uint32(thread_count))
|
||||
|
||||
assert ptr is not None, 'rwkv_init_from_file failed, check stderr'
|
||||
|
||||
return RWKVContext(ptr)
|
||||
|
||||
def rwkv_gpu_offload_layers(self, ctx: RWKVContext, layer_count: int) -> bool:
|
||||
"""
|
||||
Offloads specified count of model layers onto the GPU. Offloaded layers are evaluated using cuBLAS or CLBlast.
|
||||
For the purposes of this function, model head (unembedding matrix) is treated as an additional layer:
|
||||
- pass `rwkv_get_n_layer(ctx)` to offload all layers except model head
|
||||
- pass `rwkv_get_n_layer(ctx) + 1` to offload all layers, including model head
|
||||
Returns true if at least one layer was offloaded.
|
||||
If rwkv.cpp was compiled without cuBLAS and CLBlast support, this function is a no-op and always returns false.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ctx : RWKVContext
|
||||
RWKV context obtained from rwkv_init_from_file.
|
||||
layer_count : int
|
||||
Count of layers to offload onto the GPU, must be >= 0.
|
||||
"""
|
||||
|
||||
assert layer_count >= 0, 'Layer count must be >= 0'
|
||||
|
||||
return self.library.rwkv_gpu_offload_layers(ctx.ptr, ctypes.c_uint32(layer_count))
|
||||
|
||||
def rwkv_eval(
|
||||
self,
|
||||
ctx: RWKVContext,
|
||||
token: int,
|
||||
state_in_address: Optional[int],
|
||||
state_out_address: int,
|
||||
logits_out_address: int
|
||||
) -> None:
|
||||
"""
|
||||
Evaluates the model for a single token.
|
||||
Throws an exception in case of any error. Error messages would be printed to stderr.
|
||||
Not thread-safe. For parallel inference, call rwkv_clone_context to create one rwkv_context for each thread.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ctx : RWKVContext
|
||||
RWKV context obtained from rwkv_init_from_file.
|
||||
token : int
|
||||
Next token index, in range 0 <= token < n_vocab.
|
||||
state_in_address : int
|
||||
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count; or None, if this is a first pass.
|
||||
state_out_address : int
|
||||
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count. This buffer will be written to.
|
||||
logits_out_address : int
|
||||
Address of the first element of a FP32 buffer of size rwkv_get_logits_buffer_element_count. This buffer will be written to.
|
||||
"""
|
||||
|
||||
assert self.library.rwkv_eval(
|
||||
ctx.ptr,
|
||||
ctypes.c_int32(token),
|
||||
ctypes.cast(0 if state_in_address is None else state_in_address, P_FLOAT),
|
||||
ctypes.cast(state_out_address, P_FLOAT),
|
||||
ctypes.cast(logits_out_address, P_FLOAT)
|
||||
), 'rwkv_eval failed, check stderr'
|
||||
|
||||
def rwkv_eval_sequence(
|
||||
self,
|
||||
ctx: RWKVContext,
|
||||
tokens: List[int],
|
||||
state_in_address: Optional[int],
|
||||
state_out_address: int,
|
||||
logits_out_address: int
|
||||
) -> None:
|
||||
"""
|
||||
Evaluates the model for a sequence of tokens.
|
||||
Uses a faster algorithm than `rwkv_eval` if you do not need the state and logits for every token. Best used with sequence lengths of 64 or so.
|
||||
Has to build a computation graph on the first call for a given sequence, but will use this cached graph for subsequent calls of the same sequence length.
|
||||
|
||||
NOTE ON GGML NODE LIMIT
|
||||
|
||||
ggml has a hard-coded limit on max amount of nodes in a computation graph. The sequence graph is built in a way that quickly exceedes
|
||||
this limit when using large models and/or large sequence lengths.
|
||||
Fortunately, rwkv.cpp's fork of ggml has increased limit which was tested to work for sequence lengths up to 64 for 14B models.
|
||||
|
||||
If you get `GGML_ASSERT: ...\\ggml.c:16941: cgraph->n_nodes < GGML_MAX_NODES`, this means you've exceeded the limit.
|
||||
To get rid of the assertion failure, reduce the model size and/or sequence length.
|
||||
|
||||
Not thread-safe. For parallel inference, call `rwkv_clone_context` to create one rwkv_context for each thread.
|
||||
Throws an exception in case of any error. Error messages would be printed to stderr.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ctx : RWKVContext
|
||||
RWKV context obtained from rwkv_init_from_file.
|
||||
tokens : List[int]
|
||||
Next token indices, in range 0 <= token < n_vocab.
|
||||
state_in_address : int
|
||||
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count; or None, if this is a first pass.
|
||||
state_out_address : int
|
||||
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count. This buffer will be written to.
|
||||
logits_out_address : int
|
||||
Address of the first element of a FP32 buffer of size rwkv_get_logits_buffer_element_count. This buffer will be written to.
|
||||
"""
|
||||
|
||||
assert self.library.rwkv_eval_sequence(
|
||||
ctx.ptr,
|
||||
ctypes.cast((ctypes.c_int32 * len(tokens))(*tokens), P_INT),
|
||||
ctypes.c_size_t(len(tokens)),
|
||||
ctypes.cast(0 if state_in_address is None else state_in_address, P_FLOAT),
|
||||
ctypes.cast(state_out_address, P_FLOAT),
|
||||
ctypes.cast(logits_out_address, P_FLOAT)
|
||||
), 'rwkv_eval_sequence failed, check stderr'
|
||||
|
||||
def rwkv_eval_sequence_in_chunks(
|
||||
self,
|
||||
ctx: RWKVContext,
|
||||
tokens: List[int],
|
||||
chunk_size: int,
|
||||
state_in_address: Optional[int],
|
||||
state_out_address: int,
|
||||
logits_out_address: int
|
||||
) -> None:
|
||||
"""
|
||||
Evaluates the model for a sequence of tokens using `rwkv_eval_sequence`, splitting a potentially long sequence into fixed-length chunks.
|
||||
This function is useful for processing complete prompts and user input in chat & role-playing use-cases.
|
||||
It is recommended to use this function instead of `rwkv_eval_sequence` to avoid mistakes and get maximum performance.
|
||||
|
||||
Chunking allows processing sequences of thousands of tokens, while not reaching the ggml's node limit and not consuming too much memory.
|
||||
A reasonable and recommended value of chunk size is 16. If you want maximum performance, try different chunk sizes in range [2..64]
|
||||
and choose one that works the best in your use case.
|
||||
|
||||
Not thread-safe. For parallel inference, call `rwkv_clone_context` to create one rwkv_context for each thread.
|
||||
Throws an exception in case of any error. Error messages would be printed to stderr.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ctx : RWKVContext
|
||||
RWKV context obtained from rwkv_init_from_file.
|
||||
tokens : List[int]
|
||||
Next token indices, in range 0 <= token < n_vocab.
|
||||
chunk_size : int
|
||||
Size of each chunk in tokens, must be positive.
|
||||
state_in_address : int
|
||||
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count; or None, if this is a first pass.
|
||||
state_out_address : int
|
||||
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count. This buffer will be written to.
|
||||
logits_out_address : int
|
||||
Address of the first element of a FP32 buffer of size rwkv_get_logits_buffer_element_count. This buffer will be written to.
|
||||
"""
|
||||
|
||||
assert self.library.rwkv_eval_sequence_in_chunks(
|
||||
ctx.ptr,
|
||||
ctypes.cast((ctypes.c_int32 * len(tokens))(*tokens), P_INT),
|
||||
ctypes.c_size_t(len(tokens)),
|
||||
ctypes.c_size_t(chunk_size),
|
||||
ctypes.cast(0 if state_in_address is None else state_in_address, P_FLOAT),
|
||||
ctypes.cast(state_out_address, P_FLOAT),
|
||||
ctypes.cast(logits_out_address, P_FLOAT)
|
||||
), 'rwkv_eval_sequence_in_chunks failed, check stderr'
|
||||
|
||||
def rwkv_get_n_vocab(self, ctx: RWKVContext) -> int:
|
||||
"""
|
||||
Returns the number of tokens in the given model's vocabulary.
|
||||
Useful for telling 20B_tokenizer models (n_vocab = 50277) apart from World models (n_vocab = 65536).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ctx : RWKVContext
|
||||
RWKV context obtained from rwkv_init_from_file.
|
||||
"""
|
||||
|
||||
return self.library.rwkv_get_n_vocab(ctx.ptr)
|
||||
|
||||
def rwkv_get_n_embed(self, ctx: RWKVContext) -> int:
|
||||
"""
|
||||
Returns the number of elements in the given model's embedding.
|
||||
Useful for reading individual fields of a model's hidden state.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ctx : RWKVContext
|
||||
RWKV context obtained from rwkv_init_from_file.
|
||||
"""
|
||||
|
||||
return self.library.rwkv_get_n_embed(ctx.ptr)
|
||||
|
||||
def rwkv_get_n_layer(self, ctx: RWKVContext) -> int:
|
||||
"""
|
||||
Returns the number of layers in the given model.
|
||||
A layer is a pair of RWKV and FFN operations, stacked multiple times throughout the model.
|
||||
Embedding matrix and model head (unembedding matrix) are NOT counted in `n_layer`.
|
||||
Useful for always offloading the entire model to GPU.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ctx : RWKVContext
|
||||
RWKV context obtained from rwkv_init_from_file.
|
||||
"""
|
||||
|
||||
return self.library.rwkv_get_n_layer(ctx.ptr)
|
||||
|
||||
def rwkv_get_state_buffer_element_count(self, ctx: RWKVContext) -> int:
|
||||
"""
|
||||
Returns count of FP32 elements in state buffer.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ctx : RWKVContext
|
||||
RWKV context obtained from rwkv_init_from_file.
|
||||
"""
|
||||
|
||||
return self.library.rwkv_get_state_buffer_element_count(ctx.ptr)
|
||||
|
||||
def rwkv_get_logits_buffer_element_count(self, ctx: RWKVContext) -> int:
|
||||
"""
|
||||
Returns count of FP32 elements in logits buffer.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ctx : RWKVContext
|
||||
RWKV context obtained from rwkv_init_from_file.
|
||||
"""
|
||||
|
||||
return self.library.rwkv_get_logits_buffer_element_count(ctx.ptr)
|
||||
|
||||
def rwkv_free(self, ctx: RWKVContext) -> None:
|
||||
"""
|
||||
Frees all allocated memory and the context.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ctx : RWKVContext
|
||||
RWKV context obtained from rwkv_init_from_file.
|
||||
"""
|
||||
|
||||
self.library.rwkv_free(ctx.ptr)
|
||||
|
||||
ctx.ptr = self.nullptr
|
||||
|
||||
def rwkv_quantize_model_file(self, model_file_path_in: str, model_file_path_out: str, format_name: str) -> None:
|
||||
"""
|
||||
Quantizes FP32 or FP16 model to one of INT4 formats.
|
||||
Throws an exception in case of any error. Error messages would be printed to stderr.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
model_file_path_in : str
|
||||
Path to model file in ggml format, must be either FP32 or FP16.
|
||||
model_file_path_out : str
|
||||
Quantized model will be written here.
|
||||
format_name : str
|
||||
One of QUANTIZED_FORMAT_NAMES.
|
||||
"""
|
||||
|
||||
assert format_name in QUANTIZED_FORMAT_NAMES, f'Unknown format name {format_name}, use one of {QUANTIZED_FORMAT_NAMES}'
|
||||
|
||||
assert self.library.rwkv_quantize_model_file(
|
||||
model_file_path_in.encode('utf-8'),
|
||||
model_file_path_out.encode('utf-8'),
|
||||
format_name.encode('utf-8')
|
||||
), 'rwkv_quantize_model_file failed, check stderr'
|
||||
|
||||
def rwkv_get_system_info_string(self) -> str:
|
||||
"""
|
||||
Returns system information string.
|
||||
"""
|
||||
|
||||
return self.library.rwkv_get_system_info_string().decode('utf-8')
|
||||
|
||||
def load_rwkv_shared_library() -> RWKVSharedLibrary:
|
||||
"""
|
||||
Attempts to find rwkv.cpp shared library and load it.
|
||||
To specify exact path to the library, create an instance of RWKVSharedLibrary explicitly.
|
||||
"""
|
||||
|
||||
file_name: str
|
||||
|
||||
if 'win32' in sys.platform or 'cygwin' in sys.platform:
|
||||
file_name = 'rwkv.dll'
|
||||
elif 'darwin' in sys.platform:
|
||||
file_name = 'librwkv.dylib'
|
||||
else:
|
||||
file_name = 'librwkv.so'
|
||||
|
||||
# Possible sub-paths to the library relative to the repo dir.
|
||||
child_paths: List[Callable[[pathlib.Path], pathlib.Path]] = [
|
||||
# No lookup for Debug config here.
|
||||
# I assume that if a user wants to debug the library,
|
||||
# they will be able to find the library and set the exact path explicitly.
|
||||
lambda p: p / 'backend-python' / 'rwkv_pip' / 'cpp' / file_name,
|
||||
lambda p: p / 'bin' / 'Release' / file_name,
|
||||
lambda p: p / 'bin' / file_name,
|
||||
# Some people prefer to build in the "build" subdirectory.
|
||||
lambda p: p / 'build' / 'bin' / 'Release' / file_name,
|
||||
lambda p: p / 'build' / 'bin' / file_name,
|
||||
lambda p: p / 'build' / file_name,
|
||||
# Fallback.
|
||||
lambda p: p / file_name
|
||||
]
|
||||
|
||||
working_dir: pathlib.Path = pathlib.Path(os.path.abspath(os.getcwd()))
|
||||
|
||||
parent_paths: List[pathlib.Path] = [
|
||||
# Possible repo dirs relative to the working dir.
|
||||
# ./python/rwkv_cpp
|
||||
working_dir.parent.parent,
|
||||
# ./python
|
||||
working_dir.parent,
|
||||
# .
|
||||
working_dir,
|
||||
# Repo dir relative to this Python file.
|
||||
pathlib.Path(os.path.abspath(__file__)).parent.parent.parent
|
||||
]
|
||||
|
||||
for parent_path in parent_paths:
|
||||
for child_path in child_paths:
|
||||
full_path: pathlib.Path = child_path(parent_path)
|
||||
|
||||
if os.path.isfile(full_path):
|
||||
return RWKVSharedLibrary(str(full_path))
|
||||
|
||||
assert False, (f'Failed to find {file_name} automatically; '
|
||||
f'you need to find the library and create RWKVSharedLibrary specifying the path to it')
|
||||
18
backend-python/rwkv_pip/utils.py
vendored
18
backend-python/rwkv_pip/utils.py
vendored
@@ -78,12 +78,24 @@ class PIPELINE:
|
||||
def decode(self, x):
|
||||
return self.tokenizer.decode(x)
|
||||
|
||||
def np_softmax(self, x: np.ndarray, axis: int):
|
||||
x -= x.max(axis=axis, keepdims=True)
|
||||
e: np.ndarray = np.exp(x)
|
||||
return e / e.sum(axis=axis, keepdims=True)
|
||||
|
||||
def sample_logits(self, logits, temperature=1.0, top_p=0.85, top_k=0):
|
||||
probs = F.softmax(logits.float(), dim=-1)
|
||||
if type(logits) == list:
|
||||
logits = np.array(logits)
|
||||
np_logits = type(logits) == np.ndarray
|
||||
if np_logits:
|
||||
probs = self.np_softmax(logits, axis=-1)
|
||||
else:
|
||||
probs = F.softmax(logits.float(), dim=-1)
|
||||
top_k = int(top_k)
|
||||
# 'privateuseone' is the type of custom devices like `torch_directml.device()`
|
||||
if probs.device.type in ["cpu", "privateuseone"]:
|
||||
probs = probs.cpu().numpy()
|
||||
if np_logits or probs.device.type in ["cpu", "privateuseone"]:
|
||||
if not np_logits:
|
||||
probs = probs.cpu().numpy()
|
||||
sorted_ids = np.argsort(probs)
|
||||
sorted_probs = probs[sorted_ids][::-1]
|
||||
cumulative_probs = np.cumsum(sorted_probs)
|
||||
|
||||
26
backend-python/rwkv_pip/webgpu/model.py
vendored
Normal file
26
backend-python/rwkv_pip/webgpu/model.py
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
from typing import Any, List, Union
|
||||
|
||||
try:
|
||||
import web_rwkv_py as wrp
|
||||
except ModuleNotFoundError:
|
||||
try:
|
||||
from . import web_rwkv_py as wrp
|
||||
except ImportError:
|
||||
raise ModuleNotFoundError(
|
||||
"web_rwkv_py not found, install it from https://github.com/cryscan/web-rwkv-py"
|
||||
)
|
||||
|
||||
|
||||
class RWKV:
|
||||
def __init__(self, model_path: str, strategy: str = None):
|
||||
self.model = wrp.v5.Model(
|
||||
model_path,
|
||||
turbo=False,
|
||||
quant=32 if "i8" in strategy else None,
|
||||
quant_nf4=26 if "i4" in strategy else None,
|
||||
)
|
||||
self.w = {} # fake weight
|
||||
self.w["emb.weight"] = [0] * wrp.peek_info(model_path).num_vocab
|
||||
|
||||
def forward(self, tokens: List[int], state: Union[Any, None] = None):
|
||||
return wrp.v5.run_one(self.model, tokens, state)
|
||||
BIN
backend-python/rwkv_pip/webgpu/web_rwkv_py.cp310-win_amd64.pyd
vendored
Normal file
BIN
backend-python/rwkv_pip/webgpu/web_rwkv_py.cp310-win_amd64.pyd
vendored
Normal file
Binary file not shown.
71
backend-python/utils/midi.py
vendored
71
backend-python/utils/midi.py
vendored
@@ -52,6 +52,8 @@ class VocabConfig:
|
||||
bin_name_to_program_name: Dict[str, str]
|
||||
# Mapping from program number to instrument name.
|
||||
instrument_names: Dict[str, str]
|
||||
# Manual override for velocity bins. Each element is the max velocity value for that bin by index.
|
||||
velocity_bins_override: Optional[List[int]] = None
|
||||
|
||||
def __post_init__(self):
|
||||
self.validate()
|
||||
@@ -116,6 +118,12 @@ class VocabConfig:
|
||||
raise ValueError("velocity_bins must be at least 2")
|
||||
if len(self.bin_instrument_names) > 16:
|
||||
raise ValueError("bin_instruments must have at most 16 values")
|
||||
if self.velocity_bins_override:
|
||||
print("VocabConfig is using velocity_bins_override. Ignoring velocity_exp.")
|
||||
if len(self.velocity_bins_override) != self.velocity_bins:
|
||||
raise ValueError(
|
||||
"velocity_bins_override must have same length as velocity_bins"
|
||||
)
|
||||
if (
|
||||
self.ch10_instrument_bin_name
|
||||
and self.ch10_instrument_bin_name not in self.bin_instrument_names
|
||||
@@ -156,6 +164,11 @@ class VocabUtils:
|
||||
|
||||
def velocity_to_bin(self, velocity: float) -> int:
|
||||
velocity = max(0, min(velocity, self.cfg.velocity_events - 1))
|
||||
if self.cfg.velocity_bins_override:
|
||||
for i, v in enumerate(self.cfg.velocity_bins_override):
|
||||
if velocity <= v:
|
||||
return i
|
||||
return 0
|
||||
binsize = self.cfg.velocity_events / (self.cfg.velocity_bins - 1)
|
||||
if self.cfg.velocity_exp == 1.0:
|
||||
return ceil(velocity / binsize)
|
||||
@@ -176,6 +189,8 @@ class VocabUtils:
|
||||
)
|
||||
|
||||
def bin_to_velocity(self, bin: int) -> int:
|
||||
if self.cfg.velocity_bins_override:
|
||||
return self.cfg.velocity_bins_override[bin]
|
||||
binsize = self.cfg.velocity_events / (self.cfg.velocity_bins - 1)
|
||||
if self.cfg.velocity_exp == 1.0:
|
||||
return max(0, ceil(bin * binsize - 1))
|
||||
@@ -358,13 +373,32 @@ class AugmentConfig:
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FilterConfig:
|
||||
# Whether to filter out MIDI files with duplicate MD5 hashes.
|
||||
deduplicate_md5: bool
|
||||
# Minimum time delay between notes in a file before splitting into multiple documents.
|
||||
piece_split_delay: float
|
||||
# Minimum length of a piece in milliseconds.
|
||||
min_piece_length: float
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, path: str):
|
||||
with open(path, "r") as f:
|
||||
config = json.load(f)
|
||||
return cls(**config)
|
||||
|
||||
|
||||
def mix_volume(velocity: int, volume: int, expression: int) -> float:
|
||||
return velocity * (volume / 127.0) * (expression / 127.0)
|
||||
|
||||
|
||||
def convert_midi_to_str(
|
||||
cfg: VocabConfig, mid: mido.MidiFile, augment: AugmentValues = None
|
||||
) -> str:
|
||||
cfg: VocabConfig,
|
||||
filter_cfg: FilterConfig,
|
||||
mid: mido.MidiFile,
|
||||
augment: AugmentValues = None,
|
||||
) -> List[str]:
|
||||
utils = VocabUtils(cfg)
|
||||
if augment is None:
|
||||
augment = AugmentValues.default()
|
||||
@@ -390,7 +424,9 @@ def convert_midi_to_str(
|
||||
} # {channel: {(note, program) -> True}}
|
||||
started_flag = False
|
||||
|
||||
output_list = []
|
||||
output = ["<start>"]
|
||||
output_length_ms = 0.0
|
||||
token_data_buffer: List[
|
||||
Tuple[int, int, int, float]
|
||||
] = [] # need to sort notes between wait tokens
|
||||
@@ -432,16 +468,33 @@ def convert_midi_to_str(
|
||||
token_data_buffer = []
|
||||
|
||||
def consume_note_program_data(prog: int, chan: int, note: int, vel: float):
|
||||
nonlocal output, started_flag, delta_time_ms, cfg, utils, token_data_buffer
|
||||
nonlocal output, output_length_ms, started_flag, delta_time_ms, cfg, utils, token_data_buffer
|
||||
is_token_valid = (
|
||||
utils.prog_data_to_token_data(prog, chan, note, vel) is not None
|
||||
)
|
||||
if not is_token_valid:
|
||||
return
|
||||
|
||||
if delta_time_ms > filter_cfg.piece_split_delay * 1000.0:
|
||||
# check if any notes are still held
|
||||
silent = True
|
||||
for channel in channel_notes.keys():
|
||||
if len(channel_notes[channel]) > 0:
|
||||
silent = False
|
||||
break
|
||||
if silent:
|
||||
flush_token_data_buffer()
|
||||
output.append("<end>")
|
||||
if output_length_ms > filter_cfg.min_piece_length * 1000.0:
|
||||
output_list.append(" ".join(output))
|
||||
output = ["<start>"]
|
||||
output_length_ms = 0.0
|
||||
started_flag = False
|
||||
if started_flag:
|
||||
wait_tokens = utils.data_to_wait_tokens(delta_time_ms)
|
||||
if len(wait_tokens) > 0:
|
||||
flush_token_data_buffer()
|
||||
output_length_ms += delta_time_ms
|
||||
output += wait_tokens
|
||||
delta_time_ms = 0.0
|
||||
token_data_buffer.append((prog, chan, note, vel * augment.velocity_mod_factor))
|
||||
@@ -510,7 +563,9 @@ def convert_midi_to_str(
|
||||
|
||||
flush_token_data_buffer()
|
||||
output.append("<end>")
|
||||
return " ".join(output)
|
||||
if output_length_ms > filter_cfg.min_piece_length * 1000.0:
|
||||
output_list.append(" ".join(output))
|
||||
return output_list
|
||||
|
||||
|
||||
def generate_program_change_messages(cfg: VocabConfig):
|
||||
@@ -633,10 +688,10 @@ def token_to_midi_message(
|
||||
if utils.cfg.decode_fix_repeated_notes:
|
||||
if (channel, note) in state.active_notes:
|
||||
del state.active_notes[(channel, note)]
|
||||
yield mido.Message(
|
||||
"note_off", note=note, time=ticks, channel=channel
|
||||
), state
|
||||
ticks = 0
|
||||
yield mido.Message(
|
||||
"note_off", note=note, time=ticks, channel=channel
|
||||
), state
|
||||
ticks = 0
|
||||
state.active_notes[(channel, note)] = state.total_time
|
||||
yield mido.Message(
|
||||
"note_on", note=note, velocity=velocity, time=ticks, channel=channel
|
||||
|
||||
5
backend-python/utils/midi_filter_config.json
Normal file
5
backend-python/utils/midi_filter_config.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"deduplicate_md5": true,
|
||||
"piece_split_delay": 10000,
|
||||
"min_piece_length": 0
|
||||
}
|
||||
@@ -8,7 +8,6 @@ from typing import Dict, Iterable, List, Tuple, Union, Type
|
||||
from utils.log import quick_log
|
||||
from fastapi import HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
import numpy as np
|
||||
from routes import state_cache
|
||||
import global_var
|
||||
|
||||
@@ -68,6 +67,8 @@ class AbstractRWKV(ABC):
|
||||
pass
|
||||
|
||||
def get_embedding(self, input: str, fast_mode: bool) -> Tuple[List[float], int]:
|
||||
import numpy as np
|
||||
|
||||
if fast_mode:
|
||||
embedding, token_len = self.__fast_embedding(
|
||||
self.fix_tokens(self.pipeline.encode(input)), None
|
||||
@@ -222,6 +223,8 @@ class AbstractRWKV(ABC):
|
||||
def generate(
|
||||
self, prompt: str, stop: Union[str, List[str], None] = None
|
||||
) -> Iterable[Tuple[str, str, int, int]]:
|
||||
import numpy as np
|
||||
|
||||
quick_log(None, None, "Generation Prompt:\n" + prompt)
|
||||
cache = None
|
||||
delta_prompt = prompt
|
||||
@@ -231,7 +234,7 @@ class AbstractRWKV(ABC):
|
||||
)
|
||||
except HTTPException:
|
||||
pass
|
||||
if cache is None or cache["prompt"] == "":
|
||||
if cache is None or cache["prompt"] == "" or cache["state"] is None:
|
||||
self.model_state = None
|
||||
self.model_tokens = []
|
||||
else:
|
||||
@@ -510,15 +513,28 @@ def get_tokenizer(tokenizer_len: int):
|
||||
|
||||
def RWKV(model: str, strategy: str, tokenizer: Union[str, None]) -> AbstractRWKV:
|
||||
rwkv_beta = global_var.get(global_var.Args).rwkv_beta
|
||||
rwkv_cpp = getattr(global_var.get(global_var.Args), "rwkv.cpp")
|
||||
webgpu = global_var.get(global_var.Args).webgpu
|
||||
|
||||
if "midi" in model.lower() or "abc" in model.lower():
|
||||
os.environ["RWKV_RESCALE_LAYER"] = "999"
|
||||
|
||||
# dynamic import to make RWKV_CUDA_ON work
|
||||
if rwkv_beta:
|
||||
print("Using rwkv-beta")
|
||||
from rwkv_pip.beta.model import (
|
||||
RWKV as Model,
|
||||
)
|
||||
elif rwkv_cpp:
|
||||
print("Using rwkv.cpp, strategy is ignored")
|
||||
from rwkv_pip.cpp.model import (
|
||||
RWKV as Model,
|
||||
)
|
||||
elif webgpu:
|
||||
print("Using webgpu")
|
||||
from rwkv_pip.webgpu.model import (
|
||||
RWKV as Model,
|
||||
)
|
||||
else:
|
||||
from rwkv_pip.model import (
|
||||
RWKV as Model,
|
||||
|
||||
5
build/darwin/Readme_Install.txt
vendored
5
build/darwin/Readme_Install.txt
vendored
@@ -1,3 +1,8 @@
|
||||
Client Download URL:
|
||||
客户端下载地址:
|
||||
クライアントのダウンロードURL:
|
||||
https://github.com/josStorer/RWKV-Runner/releases/latest/download/RWKV-Runner_macos_universal.zip
|
||||
|
||||
For Mac and Linux users, please manually install Python 3.10 (usually the latest systems come with it built-in). You can specify the Python interpreter to use in Settings. (which python3)
|
||||
对于Mac和Linux用户,请手动安装 Python3.10 (通常最新的系统已经内置了). 你可以在设置中指定使用的Python解释器. (which python3)
|
||||
MacおよびLinuxのユーザーの方は、Python3.10を手動でインストールしてください(通常、最新のシステムには既に組み込まれています)。 設定メニューで使用するPythonインタプリタを指定することができます。 (which python3)
|
||||
|
||||
5
build/linux/Readme_Install.txt
vendored
5
build/linux/Readme_Install.txt
vendored
@@ -1,3 +1,8 @@
|
||||
Client Download URL:
|
||||
客户端下载地址:
|
||||
クライアントのダウンロードURL:
|
||||
https://github.com/josStorer/RWKV-Runner/releases/latest/download/RWKV-Runner_linux_x64
|
||||
|
||||
For Mac and Linux users, please manually install Python 3.10 (usually the latest systems come with it built-in). You can specify the Python interpreter to use in Settings.
|
||||
对于Mac和Linux用户,请手动安装 Python3.10 (通常最新的系统已经内置了). 你可以在设置中指定使用的Python解释器.
|
||||
MacおよびLinuxのユーザーの方は、Python3.10を手動でインストールしてください(通常、最新のシステムには既に組み込まれています)。 設定メニューで使用するPythonインタプリタを指定することができます。
|
||||
|
||||
5
build/windows/Readme_Install.txt
vendored
5
build/windows/Readme_Install.txt
vendored
@@ -1,3 +1,8 @@
|
||||
Client Download URL:
|
||||
客户端下载地址:
|
||||
クライアントのダウンロードURL:
|
||||
https://github.com/josStorer/RWKV-Runner/releases/latest/download/RWKV-Runner_windows_x64.exe
|
||||
|
||||
Please execute this program in an empty directory. All related dependencies will be placed in this directory.
|
||||
请将本程序放在一个空目录内执行, 所有相关依赖均会放置于此目录.
|
||||
このプログラムを空のディレクトリで実行してください。関連するすべての依存関係は、このディレクトリに配置されます。
|
||||
|
||||
@@ -128,7 +128,7 @@
|
||||
"Chinese Kongfu": "中国武術",
|
||||
"Allow external access to the API (service must be restarted)": "APIへの外部アクセスを許可する (サービスを再起動する必要があります)",
|
||||
"Custom": "カスタム",
|
||||
"CUDA (Beta, Faster)": "CUDA (ベータ、高速)",
|
||||
"CUDA (Beta, Faster)": "CUDA (Beta, 高速)",
|
||||
"Reset All Configs": "すべての設定をリセット",
|
||||
"Cancel": "キャンセル",
|
||||
"Confirm": "確認",
|
||||
@@ -313,5 +313,13 @@
|
||||
"Music": "音楽",
|
||||
"Other": "その他",
|
||||
"Import MIDI": "MIDIをインポート",
|
||||
"Current Instrument": "現在の楽器"
|
||||
"Current Instrument": "現在の楽器",
|
||||
"Please convert model to GGML format first": "モデルをGGML形式に変換してください",
|
||||
"Convert To GGML Format": "GGML形式に変換",
|
||||
"CPU (rwkv.cpp, Faster)": "CPU (rwkv.cpp, 高速)",
|
||||
"Play With External Player": "外部プレーヤーで再生",
|
||||
"Core API URL": "コアAPI URL",
|
||||
"Override core API URL(/chat/completions and /completions). If you don't know what this is, leave it blank.": "コアAPI URLを上書きします(/chat/completions と /completions)。何であるかわからない場合は空白のままにしてください。",
|
||||
"Please change Strategy to CPU (rwkv.cpp) to use ggml format": "StrategyをCPU (rwkv.cpp)に変更して、ggml形式を使用してください",
|
||||
"Only Auto Play Generated Content": "生成されたコンテンツのみ自動再生"
|
||||
}
|
||||
@@ -313,5 +313,13 @@
|
||||
"Music": "音乐",
|
||||
"Other": "其他",
|
||||
"Import MIDI": "导入MIDI",
|
||||
"Current Instrument": "当前乐器"
|
||||
"Current Instrument": "当前乐器",
|
||||
"Please convert model to GGML format first": "请先将模型转换为GGML格式",
|
||||
"Convert To GGML Format": "转换为GGML格式",
|
||||
"CPU (rwkv.cpp, Faster)": "CPU (rwkv.cpp, 更快)",
|
||||
"Play With External Player": "使用外部播放器播放",
|
||||
"Core API URL": "核心 API URL",
|
||||
"Override core API URL(/chat/completions and /completions). If you don't know what this is, leave it blank.": "覆盖核心的 API URL (/chat/completions 和 /completions)。如果你不知道这是什么,请留空",
|
||||
"Please change Strategy to CPU (rwkv.cpp) to use ggml format": "请将Strategy改为CPU (rwkv.cpp)以使用ggml格式",
|
||||
"Only Auto Play Generated Content": "仅自动播放新生成的内容"
|
||||
}
|
||||
@@ -17,7 +17,8 @@ import { ToolTipButton } from './ToolTipButton';
|
||||
import { Play16Regular, Stop16Regular } from '@fluentui/react-icons';
|
||||
import { useNavigate } from 'react-router';
|
||||
import { WindowShow } from '../../wailsjs/runtime';
|
||||
import { convertToSt } from '../utils/convert-to-st';
|
||||
import { convertToGGML, convertToSt } from '../utils/convert-model';
|
||||
import { Precision } from '../types/configs';
|
||||
|
||||
const mainButtonText = {
|
||||
[ModelStatus.Offline]: 'Run',
|
||||
@@ -47,6 +48,8 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
|
||||
const modelConfig = commonStore.getCurrentModelConfig();
|
||||
const webgpu = modelConfig.modelParameters.device === 'WebGPU';
|
||||
const webgpuPython = modelConfig.modelParameters.device === 'WebGPU (Python)';
|
||||
const cpp = modelConfig.modelParameters.device === 'CPU (rwkv.cpp)';
|
||||
let modelName = '';
|
||||
let modelPath = '';
|
||||
if (modelConfig && modelConfig.modelParameters) {
|
||||
@@ -75,7 +78,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
});
|
||||
};
|
||||
|
||||
if (webgpu) {
|
||||
if (webgpu || webgpuPython) {
|
||||
if (!['.st', '.safetensors'].some(ext => modelPath.endsWith(ext))) {
|
||||
const stModelPath = modelPath.replace(/\.pth$/, '.st');
|
||||
if (await FileExists(stModelPath)) {
|
||||
@@ -90,7 +93,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
return;
|
||||
} else {
|
||||
toastWithButton(t('Please convert model to safe tensors format first'), t('Convert'), () => {
|
||||
convertToSt(modelConfig);
|
||||
convertToSt(modelConfig, navigate);
|
||||
});
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
return;
|
||||
@@ -98,7 +101,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
}
|
||||
}
|
||||
|
||||
if (!webgpu) {
|
||||
if (!webgpu && !webgpuPython) {
|
||||
if (['.st', '.safetensors'].some(ext => modelPath.endsWith(ext))) {
|
||||
toast(t('Please change Strategy to WebGPU to use safetensors format'), { type: 'error' });
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
@@ -112,6 +115,38 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
return;
|
||||
}
|
||||
|
||||
if (cpp) {
|
||||
if (!['.bin'].some(ext => modelPath.endsWith(ext))) {
|
||||
const precision: Precision = modelConfig.modelParameters.precision === 'Q5_1' ? 'Q5_1' : 'fp16';
|
||||
const ggmlModelPath = modelPath.replace(/\.pth$/, `-${precision}.bin`);
|
||||
if (await FileExists(ggmlModelPath)) {
|
||||
modelPath = ggmlModelPath;
|
||||
} else if (!await FileExists(modelPath)) {
|
||||
showDownloadPrompt(t('Model file not found'), modelName);
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
return;
|
||||
} else if (!currentModelSource?.isComplete) {
|
||||
showDownloadPrompt(t('Model file download is not complete'), modelName);
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
return;
|
||||
} else {
|
||||
toastWithButton(t('Please convert model to GGML format first'), t('Convert'), () => {
|
||||
convertToGGML(modelConfig, navigate);
|
||||
});
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!cpp) {
|
||||
if (['.bin'].some(ext => modelPath.endsWith(ext))) {
|
||||
toast(t('Please change Strategy to CPU (rwkv.cpp) to use ggml format'), { type: 'error' });
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!await FileExists(modelPath)) {
|
||||
showDownloadPrompt(t('Model file not found'), modelName);
|
||||
commonStore.setStatus({ status: ModelStatus.Offline });
|
||||
@@ -142,7 +177,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
const isUsingCudaBeta = modelConfig.modelParameters.device === 'CUDA-Beta';
|
||||
|
||||
startServer(commonStore.settings.customPythonPath, port, commonStore.settings.host !== '127.0.0.1' ? '0.0.0.0' : '127.0.0.1',
|
||||
!!modelConfig.enableWebUI, isUsingCudaBeta
|
||||
!!modelConfig.enableWebUI, isUsingCudaBeta, cpp, webgpuPython
|
||||
).catch((e) => {
|
||||
const errMsg = e.message || e;
|
||||
if (errMsg.includes('path contains space'))
|
||||
@@ -182,7 +217,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
|
||||
const strategy = getStrategy(modelConfig);
|
||||
let customCudaFile = '';
|
||||
if ((modelConfig.modelParameters.device.includes('CUDA') || modelConfig.modelParameters.device === 'Custom')
|
||||
if ((modelConfig.modelParameters.device.startsWith('CUDA') || modelConfig.modelParameters.device === 'Custom')
|
||||
&& modelConfig.modelParameters.useCustomCuda
|
||||
&& !strategy.split('->').some(s => ['cuda', 'fp32'].every(v => s.includes(v)))) {
|
||||
if (commonStore.platform === 'windows') {
|
||||
@@ -230,7 +265,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
||||
navigate({ pathname: '/' + buttonName.toLowerCase() });
|
||||
};
|
||||
|
||||
if ((modelConfig.modelParameters.device === 'CUDA' || modelConfig.modelParameters.device === 'CUDA-Beta') &&
|
||||
if (modelConfig.modelParameters.device.startsWith('CUDA') &&
|
||||
modelConfig.modelParameters.storedLayers < modelConfig.modelParameters.maxStoredLayers &&
|
||||
commonStore.monitorData && commonStore.monitorData.totalVram !== 0 &&
|
||||
(commonStore.monitorData.usedVram / commonStore.monitorData.totalVram) < 0.9)
|
||||
|
||||
@@ -436,7 +436,7 @@ const ChatPanel: FC = observer(() => {
|
||||
const chatSseController = new AbortController();
|
||||
chatSseControllers[answerId] = chatSseController;
|
||||
fetchEventSource( // https://api.openai.com/v1/chat/completions || http://127.0.0.1:${port}/v1/chat/completions
|
||||
getServerRoot(port) + '/v1/chat/completions',
|
||||
getServerRoot(port, true) + '/v1/chat/completions',
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
|
||||
@@ -82,7 +82,7 @@ const CompletionPanel: FC = observer(() => {
|
||||
let answer = '';
|
||||
completionSseController = new AbortController();
|
||||
fetchEventSource( // https://api.openai.com/v1/completions || http://127.0.0.1:${port}/v1/completions
|
||||
getServerRoot(port) + '/v1/completions',
|
||||
getServerRoot(port, true) + '/v1/completions',
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
|
||||
@@ -21,7 +21,9 @@ import {
|
||||
FileExists,
|
||||
OpenFileFolder,
|
||||
OpenMidiPort,
|
||||
OpenSaveFileDialogBytes
|
||||
OpenSaveFileDialogBytes,
|
||||
SaveFile,
|
||||
StartFile
|
||||
} from '../../wailsjs/go/backend_golang/App';
|
||||
import { getServerRoot, getSoundFont, toastWithButton } from '../utils';
|
||||
import { CompositionParams } from '../types/composition';
|
||||
@@ -98,6 +100,31 @@ const CompositionPanel: FC = observer(() => {
|
||||
}
|
||||
}, []);
|
||||
|
||||
const externalPlayListener = () => {
|
||||
const params = commonStore.compositionParams;
|
||||
const saveAndPlay = async (midi: ArrayBuffer, path: string) => {
|
||||
await SaveFile(path, Array.from(new Uint8Array(midi)));
|
||||
StartFile(path);
|
||||
};
|
||||
if (params.externalPlay) {
|
||||
if (params.midi) {
|
||||
setTimeout(() => {
|
||||
playerRef.current?.stop();
|
||||
});
|
||||
saveAndPlay(params.midi, './midi/last.mid').catch((e: string) => {
|
||||
if (e.includes('being used'))
|
||||
saveAndPlay(params.midi!, './midi/last-2.mid');
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
useEffect(() => {
|
||||
playerRef.current?.addEventListener('start', externalPlayListener);
|
||||
return () => {
|
||||
playerRef.current?.removeEventListener('start', externalPlayListener);
|
||||
};
|
||||
}, [params.externalPlay]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!(commonStore.activeMidiDeviceIndex in commonStore.midiPorts)) {
|
||||
commonStore.setActiveMidiDeviceIndex(-1);
|
||||
@@ -123,9 +150,16 @@ const CompositionPanel: FC = observer(() => {
|
||||
});
|
||||
updateNs(ns);
|
||||
if (autoPlay) {
|
||||
setTimeout(() => {
|
||||
playerRef.current?.start();
|
||||
});
|
||||
if (commonStore.compositionParams.externalPlay)
|
||||
externalPlayListener();
|
||||
else {
|
||||
if (commonStore.compositionParams.playOnlyGeneratedContent && playerRef.current) {
|
||||
playerRef.current.currentTime = Math.max(commonStore.compositionParams.generationStartTime - 1, 0);
|
||||
}
|
||||
setTimeout(() => {
|
||||
playerRef.current?.start();
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -143,7 +177,7 @@ const CompositionPanel: FC = observer(() => {
|
||||
let answer = '';
|
||||
compositionSseController = new AbortController();
|
||||
fetchEventSource( // https://api.openai.com/v1/completions || http://127.0.0.1:${port}/v1/completions
|
||||
getServerRoot(port) + '/v1/completions',
|
||||
getServerRoot(port, true) + '/v1/completions',
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
@@ -268,12 +302,30 @@ const CompositionPanel: FC = observer(() => {
|
||||
setSoundFont();
|
||||
}} />
|
||||
}
|
||||
{
|
||||
commonStore.platform === 'windows' &&
|
||||
<Checkbox className="select-none"
|
||||
size="large" label={t('Play With External Player')} checked={params.externalPlay}
|
||||
onChange={async (_, data) => {
|
||||
setParams({
|
||||
externalPlay: data.checked as boolean
|
||||
});
|
||||
}} />
|
||||
}
|
||||
<Checkbox className="select-none"
|
||||
size="large" label={t('Auto Play At The End')} checked={params.autoPlay} onChange={(_, data) => {
|
||||
setParams({
|
||||
autoPlay: data.checked as boolean
|
||||
});
|
||||
}} />
|
||||
<Checkbox className="select-none"
|
||||
size="large" label={t('Only Auto Play Generated Content')} checked={params.playOnlyGeneratedContent}
|
||||
onChange={async (_, data) => {
|
||||
setParams({
|
||||
autoPlay: data.checked as boolean || commonStore.compositionParams.autoPlay,
|
||||
playOnlyGeneratedContent: data.checked as boolean
|
||||
});
|
||||
}} />
|
||||
<Labeled flex breakline label={t('MIDI Input')}
|
||||
desc={t('Select the MIDI input device to be used.')}
|
||||
content={
|
||||
@@ -319,6 +371,9 @@ const CompositionPanel: FC = observer(() => {
|
||||
contentText={t('Are you sure you want to reset this page? It cannot be undone.')}
|
||||
onConfirm={() => {
|
||||
commonStore.setCompositionSubmittedPrompt(defaultCompositionPrompt);
|
||||
setParams({
|
||||
generationStartTime: 0
|
||||
});
|
||||
setPrompt(defaultCompositionPrompt);
|
||||
}} />
|
||||
<Button className="grow" appearance="primary" onClick={() => {
|
||||
@@ -328,6 +383,9 @@ const CompositionPanel: FC = observer(() => {
|
||||
generateNs(params.autoPlay);
|
||||
} else {
|
||||
commonStore.setCompositionGenerating(true);
|
||||
setParams({
|
||||
generationStartTime: playerRef.current ? playerRef.current.duration : 0
|
||||
});
|
||||
onSubmit(params.prompt);
|
||||
}
|
||||
}}>{!commonStore.compositionGenerating ? t('Generate') : t('Stop')}</Button>
|
||||
|
||||
@@ -27,18 +27,19 @@ import { Page } from '../components/Page';
|
||||
import { useNavigate } from 'react-router';
|
||||
import { RunButton } from '../components/RunButton';
|
||||
import { updateConfig } from '../apis';
|
||||
import { ConvertModel, FileExists, GetPyError } from '../../wailsjs/go/backend_golang/App';
|
||||
import { checkDependencies, getStrategy } from '../utils';
|
||||
import { getStrategy } from '../utils';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { WindowShow } from '../../wailsjs/runtime';
|
||||
import strategyImg from '../assets/images/strategy.jpg';
|
||||
import strategyZhImg from '../assets/images/strategy_zh.jpg';
|
||||
import { ResetConfigsButton } from '../components/ResetConfigsButton';
|
||||
import { useMediaQuery } from 'usehooks-ts';
|
||||
import { ApiParameters, Device, ModelParameters, Precision } from '../types/configs';
|
||||
import { convertToSt } from '../utils/convert-to-st';
|
||||
import { convertModel, convertToGGML, convertToSt } from '../utils/convert-model';
|
||||
|
||||
const ConfigSelector: FC<{ selectedIndex: number, updateSelectedIndex: (i: number) => void }> = observer(({ selectedIndex, updateSelectedIndex }) => {
|
||||
const ConfigSelector: FC<{
|
||||
selectedIndex: number,
|
||||
updateSelectedIndex: (i: number) => void
|
||||
}> = observer(({ selectedIndex, updateSelectedIndex }) => {
|
||||
return (
|
||||
<Dropdown style={{ minWidth: 0 }} className="grow" value={commonStore.modelConfigs[selectedIndex].name}
|
||||
selectedOptions={[selectedIndex.toString()]}
|
||||
@@ -245,48 +246,17 @@ const Configs: FC = observer(() => {
|
||||
</div>
|
||||
} />
|
||||
{
|
||||
selectedConfig.modelParameters.device !== 'WebGPU' ?
|
||||
<ToolTipButton text={t('Convert')}
|
||||
desc={t('Convert model with these configs. Using a converted model will greatly improve the loading speed, but model parameters of the converted model cannot be modified.')}
|
||||
onClick={async () => {
|
||||
if (commonStore.platform === 'darwin') {
|
||||
toast(t('MacOS is not yet supported for performing this operation, please do it manually.') + ' (backend-python/convert_model.py)', { type: 'info' });
|
||||
return;
|
||||
} else if (commonStore.platform === 'linux') {
|
||||
toast(t('Linux is not yet supported for performing this operation, please do it manually.') + ' (backend-python/convert_model.py)', { type: 'info' });
|
||||
return;
|
||||
}
|
||||
|
||||
const ok = await checkDependencies(navigate);
|
||||
if (!ok)
|
||||
return;
|
||||
|
||||
const modelPath = `${commonStore.settings.customModelsPath}/${selectedConfig.modelParameters.modelName}`;
|
||||
if (await FileExists(modelPath)) {
|
||||
const strategy = getStrategy(selectedConfig);
|
||||
const newModelPath = modelPath + '-' + strategy.replace(/[:> *+]/g, '-');
|
||||
toast(t('Start Converting'), { autoClose: 1000, type: 'info' });
|
||||
ConvertModel(commonStore.settings.customPythonPath, modelPath, strategy, newModelPath).then(async () => {
|
||||
if (!await FileExists(newModelPath + '.pth')) {
|
||||
toast(t('Convert Failed') + ' - ' + await GetPyError(), { type: 'error' });
|
||||
} else {
|
||||
toast(`${t('Convert Success')} - ${newModelPath}`, { type: 'success' });
|
||||
}
|
||||
}).catch(e => {
|
||||
const errMsg = e.message || e;
|
||||
if (errMsg.includes('path contains space'))
|
||||
toast(`${t('Convert Failed')} - ${t('File Path Cannot Contain Space')}`, { type: 'error' });
|
||||
else
|
||||
toast(`${t('Convert Failed')} - ${e.message || e}`, { type: 'error' });
|
||||
});
|
||||
setTimeout(WindowShow, 1000);
|
||||
} else {
|
||||
toast(`${t('Model Not Found')} - ${modelPath}`, { type: 'error' });
|
||||
}
|
||||
}} /> :
|
||||
<ToolTipButton text={t('Convert To Safe Tensors Format')}
|
||||
!selectedConfig.modelParameters.device.startsWith('WebGPU') ?
|
||||
(selectedConfig.modelParameters.device !== 'CPU (rwkv.cpp)' ?
|
||||
<ToolTipButton text={t('Convert')}
|
||||
desc={t('Convert model with these configs. Using a converted model will greatly improve the loading speed, but model parameters of the converted model cannot be modified.')}
|
||||
onClick={() => convertModel(selectedConfig, navigate)} /> :
|
||||
<ToolTipButton text={t('Convert To GGML Format')}
|
||||
desc=""
|
||||
onClick={() => convertToGGML(selectedConfig, navigate)} />)
|
||||
: <ToolTipButton text={t('Convert To Safe Tensors Format')}
|
||||
desc=""
|
||||
onClick={() => convertToSt(selectedConfig)} />
|
||||
onClick={() => convertToSt(selectedConfig, navigate)} />
|
||||
}
|
||||
<Labeled label={t('Strategy')} content={
|
||||
<Dropdown style={{ minWidth: 0 }} className="grow" value={t(selectedConfig.modelParameters.device)!}
|
||||
@@ -299,10 +269,12 @@ const Configs: FC = observer(() => {
|
||||
}
|
||||
}}>
|
||||
<Option value="CPU">CPU</Option>
|
||||
<Option value="CPU (rwkv.cpp)">{t('CPU (rwkv.cpp, Faster)')!}</Option>
|
||||
{commonStore.platform === 'darwin' && <Option value="MPS">MPS</Option>}
|
||||
<Option value="CUDA">CUDA</Option>
|
||||
<Option value="CUDA-Beta">{t('CUDA (Beta, Faster)')!}</Option>
|
||||
<Option value="WebGPU">WebGPU</Option>
|
||||
<Option value="WebGPU (Python)">WebGPU (Python)</Option>
|
||||
<Option value="Custom">{t('Custom')!}</Option>
|
||||
</Dropdown>
|
||||
} />
|
||||
@@ -310,7 +282,8 @@ const Configs: FC = observer(() => {
|
||||
selectedConfig.modelParameters.device !== 'Custom' && <Labeled label={t('Precision')}
|
||||
desc={t('int8 uses less VRAM, but has slightly lower quality. fp16 has higher quality.')}
|
||||
content={
|
||||
<Dropdown style={{ minWidth: 0 }} className="grow"
|
||||
<Dropdown
|
||||
style={{ minWidth: 0 }} className="grow"
|
||||
value={selectedConfig.modelParameters.precision}
|
||||
selectedOptions={[selectedConfig.modelParameters.precision]}
|
||||
onOptionSelect={(_, data) => {
|
||||
@@ -322,19 +295,21 @@ const Configs: FC = observer(() => {
|
||||
}}>
|
||||
{selectedConfig.modelParameters.device !== 'CPU' && selectedConfig.modelParameters.device !== 'MPS' &&
|
||||
<Option>fp16</Option>}
|
||||
<Option>int8</Option>
|
||||
{selectedConfig.modelParameters.device === 'WebGPU' && <Option>nf4</Option>}
|
||||
{selectedConfig.modelParameters.device !== 'WebGPU' && <Option>fp32</Option>}
|
||||
{selectedConfig.modelParameters.device !== 'CPU (rwkv.cpp)' && <Option>int8</Option>}
|
||||
{selectedConfig.modelParameters.device.startsWith('WebGPU') && <Option>nf4</Option>}
|
||||
{selectedConfig.modelParameters.device !== 'CPU (rwkv.cpp)' && !selectedConfig.modelParameters.device.startsWith('WebGPU') &&
|
||||
<Option>fp32</Option>}
|
||||
{selectedConfig.modelParameters.device === 'CPU (rwkv.cpp)' && <Option>Q5_1</Option>}
|
||||
</Dropdown>
|
||||
} />
|
||||
}
|
||||
{
|
||||
selectedConfig.modelParameters.device.includes('CUDA') &&
|
||||
selectedConfig.modelParameters.device.startsWith('CUDA') &&
|
||||
<Labeled label={t('Current Strategy')}
|
||||
content={<Text> {getStrategy(selectedConfig)} </Text>} />
|
||||
}
|
||||
{
|
||||
selectedConfig.modelParameters.device.includes('CUDA') &&
|
||||
selectedConfig.modelParameters.device.startsWith('CUDA') &&
|
||||
<Labeled label={t('Stored Layers')}
|
||||
desc={t('Number of the neural network layers loaded into VRAM, the more you load, the faster the speed, but it consumes more VRAM. (If your VRAM is not enough, it will fail to load)')}
|
||||
content={
|
||||
@@ -347,7 +322,7 @@ const Configs: FC = observer(() => {
|
||||
}} />
|
||||
} />
|
||||
}
|
||||
{selectedConfig.modelParameters.device.includes('CUDA') && <div />}
|
||||
{selectedConfig.modelParameters.device.startsWith('CUDA') && <div />}
|
||||
{
|
||||
displayStrategyImg &&
|
||||
<img style={{ width: '80vh', height: 'auto', zIndex: 100 }}
|
||||
@@ -372,7 +347,7 @@ const Configs: FC = observer(() => {
|
||||
}
|
||||
{selectedConfig.modelParameters.device === 'Custom' && <div />}
|
||||
{
|
||||
(selectedConfig.modelParameters.device.includes('CUDA') || selectedConfig.modelParameters.device === 'Custom') &&
|
||||
(selectedConfig.modelParameters.device.startsWith('CUDA') || selectedConfig.modelParameters.device === 'Custom') &&
|
||||
<Labeled label={t('Use Custom CUDA kernel to Accelerate')}
|
||||
desc={t('Enabling this option can greatly improve inference speed and save some VRAM, but there may be compatibility issues (output garbled). If it fails to start, please turn off this option, or try to upgrade your gpu driver.')}
|
||||
content={
|
||||
@@ -421,6 +396,7 @@ const Configs: FC = observer(() => {
|
||||
</div>
|
||||
}
|
||||
/>
|
||||
{mq && <div style={{ minHeight: '30px' }} />}
|
||||
</div>
|
||||
<div className="flex flex-row-reverse sm:fixed bottom-2 right-2">
|
||||
<div className="flex gap-2">
|
||||
|
||||
@@ -186,6 +186,16 @@ export const AdvancedGeneralSettings: FC = observer(() => {
|
||||
</Dropdown>
|
||||
</div>
|
||||
} />
|
||||
<Labeled label={t('Core API URL')}
|
||||
desc={t('Override core API URL(/chat/completions and /completions). If you don\'t know what this is, leave it blank.')}
|
||||
content={
|
||||
<Input style={{ minWidth: 0 }} className="grow" value={commonStore.settings.coreApiUrl}
|
||||
onChange={(e, data) => {
|
||||
commonStore.setSettings({
|
||||
coreApiUrl: data.value
|
||||
});
|
||||
}} />
|
||||
} />
|
||||
</div>;
|
||||
});
|
||||
|
||||
|
||||
@@ -94,8 +94,11 @@ class CommonStore {
|
||||
topP: 0.8,
|
||||
autoPlay: true,
|
||||
useLocalSoundFont: false,
|
||||
externalPlay: false,
|
||||
midi: null,
|
||||
ns: null
|
||||
ns: null,
|
||||
generationStartTime: 0,
|
||||
playOnlyGeneratedContent: true
|
||||
};
|
||||
compositionGenerating: boolean = false;
|
||||
compositionSubmittedPrompt: string = defaultCompositionPrompt;
|
||||
@@ -174,7 +177,8 @@ class CommonStore {
|
||||
apiUrl: '',
|
||||
apiKey: '',
|
||||
apiChatModelName: 'rwkv',
|
||||
apiCompletionModelName: 'rwkv'
|
||||
apiCompletionModelName: 'rwkv',
|
||||
coreApiUrl: ''
|
||||
};
|
||||
// about
|
||||
about: AboutContent = manifest.about;
|
||||
|
||||
@@ -9,8 +9,11 @@ export type CompositionParams = {
|
||||
topP: number,
|
||||
autoPlay: boolean,
|
||||
useLocalSoundFont: boolean,
|
||||
externalPlay: boolean,
|
||||
midi: ArrayBuffer | null,
|
||||
ns: NoteSequence | null
|
||||
ns: NoteSequence | null,
|
||||
generationStartTime: number,
|
||||
playOnlyGeneratedContent: boolean,
|
||||
}
|
||||
export type Track = {
|
||||
id: string;
|
||||
|
||||
@@ -6,8 +6,8 @@ export type ApiParameters = {
|
||||
presencePenalty: number;
|
||||
frequencyPenalty: number;
|
||||
}
|
||||
export type Device = 'CPU' | 'CUDA' | 'CUDA-Beta' | 'WebGPU' | 'MPS' | 'Custom';
|
||||
export type Precision = 'fp16' | 'int8' | 'fp32' | 'nf4';
|
||||
export type Device = 'CPU' | 'CPU (rwkv.cpp)' | 'CUDA' | 'CUDA-Beta' | 'WebGPU' | 'WebGPU (Python)' | 'MPS' | 'Custom';
|
||||
export type Precision = 'fp16' | 'int8' | 'fp32' | 'nf4' | 'Q5_1';
|
||||
export type ModelParameters = {
|
||||
// different models can not have the same name
|
||||
modelName: string;
|
||||
|
||||
@@ -19,4 +19,5 @@ export type SettingsType = {
|
||||
apiKey: string
|
||||
apiChatModelName: string
|
||||
apiCompletionModelName: string
|
||||
coreApiUrl: string
|
||||
}
|
||||
118
frontend/src/utils/convert-model.ts
Normal file
118
frontend/src/utils/convert-model.ts
Normal file
@@ -0,0 +1,118 @@
|
||||
import { toast } from 'react-toastify';
|
||||
import commonStore from '../stores/commonStore';
|
||||
import { t } from 'i18next';
|
||||
import {
|
||||
ConvertGGML,
|
||||
ConvertModel,
|
||||
ConvertSafetensors,
|
||||
ConvertSafetensorsWithPython,
|
||||
FileExists,
|
||||
GetPyError
|
||||
} from '../../wailsjs/go/backend_golang/App';
|
||||
import { WindowShow } from '../../wailsjs/runtime';
|
||||
import { ModelConfig, Precision } from '../types/configs';
|
||||
import { checkDependencies, getStrategy } from './index';
|
||||
import { NavigateFunction } from 'react-router';
|
||||
|
||||
export const convertModel = async (selectedConfig: ModelConfig, navigate: NavigateFunction) => {
|
||||
if (commonStore.platform === 'darwin') {
|
||||
toast(t('MacOS is not yet supported for performing this operation, please do it manually.') + ' (backend-python/convert_model.py)', { type: 'info' });
|
||||
return;
|
||||
} else if (commonStore.platform === 'linux') {
|
||||
toast(t('Linux is not yet supported for performing this operation, please do it manually.') + ' (backend-python/convert_model.py)', { type: 'info' });
|
||||
return;
|
||||
}
|
||||
|
||||
const ok = await checkDependencies(navigate);
|
||||
if (!ok)
|
||||
return;
|
||||
|
||||
const modelPath = `${commonStore.settings.customModelsPath}/${selectedConfig.modelParameters.modelName}`;
|
||||
if (await FileExists(modelPath)) {
|
||||
const strategy = getStrategy(selectedConfig);
|
||||
const newModelPath = modelPath + '-' + strategy.replace(/[:> *+]/g, '-');
|
||||
toast(t('Start Converting'), { autoClose: 2000, type: 'info' });
|
||||
ConvertModel(commonStore.settings.customPythonPath, modelPath, strategy, newModelPath).then(async () => {
|
||||
if (!await FileExists(newModelPath + '.pth')) {
|
||||
toast(t('Convert Failed') + ' - ' + await GetPyError(), { type: 'error' });
|
||||
} else {
|
||||
toast(`${t('Convert Success')} - ${newModelPath}`, { type: 'success' });
|
||||
}
|
||||
}).catch(e => {
|
||||
const errMsg = e.message || e;
|
||||
if (errMsg.includes('path contains space'))
|
||||
toast(`${t('Convert Failed')} - ${t('File Path Cannot Contain Space')}`, { type: 'error' });
|
||||
else
|
||||
toast(`${t('Convert Failed')} - ${e.message || e}`, { type: 'error' });
|
||||
});
|
||||
setTimeout(WindowShow, 1000);
|
||||
} else {
|
||||
toast(`${t('Model Not Found')} - ${modelPath}`, { type: 'error' });
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
export const convertToSt = async (selectedConfig: ModelConfig, navigate: NavigateFunction) => {
|
||||
const webgpuPython = selectedConfig.modelParameters.device === 'WebGPU (Python)';
|
||||
if (webgpuPython) {
|
||||
const ok = await checkDependencies(navigate);
|
||||
if (!ok)
|
||||
return;
|
||||
}
|
||||
|
||||
const modelPath = `${commonStore.settings.customModelsPath}/${selectedConfig.modelParameters.modelName}`;
|
||||
if (await FileExists(modelPath)) {
|
||||
toast(t('Start Converting'), { autoClose: 2000, type: 'info' });
|
||||
const newModelPath = modelPath.replace(/\.pth$/, '.st');
|
||||
const convert = webgpuPython ?
|
||||
(input: string, output: string) => ConvertSafetensorsWithPython(commonStore.settings.customPythonPath, input, output)
|
||||
: ConvertSafetensors;
|
||||
convert(modelPath, newModelPath).then(async () => {
|
||||
if (!await FileExists(newModelPath)) {
|
||||
if (commonStore.platform === 'windows' || commonStore.platform === 'linux')
|
||||
toast(t('Convert Failed') + ' - ' + await GetPyError(), { type: 'error' });
|
||||
} else {
|
||||
toast(`${t('Convert Success')} - ${newModelPath}`, { type: 'success' });
|
||||
}
|
||||
}).catch(e => {
|
||||
const errMsg = e.message || e;
|
||||
if (errMsg.includes('path contains space'))
|
||||
toast(`${t('Convert Failed')} - ${t('File Path Cannot Contain Space')}`, { type: 'error' });
|
||||
else
|
||||
toast(`${t('Convert Failed')} - ${e.message || e}`, { type: 'error' });
|
||||
});
|
||||
setTimeout(WindowShow, 1000);
|
||||
} else {
|
||||
toast(`${t('Model Not Found')} - ${modelPath}`, { type: 'error' });
|
||||
}
|
||||
};
|
||||
|
||||
export const convertToGGML = async (selectedConfig: ModelConfig, navigate: NavigateFunction) => {
|
||||
const ok = await checkDependencies(navigate);
|
||||
if (!ok)
|
||||
return;
|
||||
|
||||
const modelPath = `${commonStore.settings.customModelsPath}/${selectedConfig.modelParameters.modelName}`;
|
||||
if (await FileExists(modelPath)) {
|
||||
toast(t('Start Converting'), { autoClose: 2000, type: 'info' });
|
||||
const precision: Precision = selectedConfig.modelParameters.precision === 'Q5_1' ? 'Q5_1' : 'fp16';
|
||||
const newModelPath = modelPath.replace(/\.pth$/, `-${precision}.bin`);
|
||||
ConvertGGML(commonStore.settings.customPythonPath, modelPath, newModelPath, precision === 'Q5_1').then(async () => {
|
||||
if (!await FileExists(newModelPath)) {
|
||||
if (commonStore.platform === 'windows' || commonStore.platform === 'linux')
|
||||
toast(t('Convert Failed') + ' - ' + await GetPyError(), { type: 'error' });
|
||||
} else {
|
||||
toast(`${t('Convert Success')} - ${newModelPath}`, { type: 'success' });
|
||||
}
|
||||
}).catch(e => {
|
||||
const errMsg = e.message || e;
|
||||
if (errMsg.includes('path contains space'))
|
||||
toast(`${t('Convert Failed')} - ${t('File Path Cannot Contain Space')}`, { type: 'error' });
|
||||
else
|
||||
toast(`${t('Convert Failed')} - ${e.message || e}`, { type: 'error' });
|
||||
});
|
||||
setTimeout(WindowShow, 1000);
|
||||
} else {
|
||||
toast(`${t('Model Not Found')} - ${modelPath}`, { type: 'error' });
|
||||
}
|
||||
};
|
||||
@@ -1,31 +0,0 @@
|
||||
import { toast } from 'react-toastify';
|
||||
import commonStore from '../stores/commonStore';
|
||||
import { t } from 'i18next';
|
||||
import { ConvertSafetensors, FileExists, GetPyError } from '../../wailsjs/go/backend_golang/App';
|
||||
import { WindowShow } from '../../wailsjs/runtime';
|
||||
import { ModelConfig } from '../types/configs';
|
||||
|
||||
export const convertToSt = async (selectedConfig: ModelConfig) => {
|
||||
const modelPath = `${commonStore.settings.customModelsPath}/${selectedConfig.modelParameters.modelName}`;
|
||||
if (await FileExists(modelPath)) {
|
||||
toast(t('Start Converting'), { autoClose: 2000, type: 'info' });
|
||||
const newModelPath = modelPath.replace(/\.pth$/, '.st');
|
||||
ConvertSafetensors(modelPath, newModelPath).then(async () => {
|
||||
if (!await FileExists(newModelPath)) {
|
||||
if (commonStore.platform === 'windows' || commonStore.platform === 'linux')
|
||||
toast(t('Convert Failed') + ' - ' + await GetPyError(), { type: 'error' });
|
||||
} else {
|
||||
toast(`${t('Convert Success')} - ${newModelPath}`, { type: 'success' });
|
||||
}
|
||||
}).catch(e => {
|
||||
const errMsg = e.message || e;
|
||||
if (errMsg.includes('path contains space'))
|
||||
toast(`${t('Convert Failed')} - ${t('File Path Cannot Contain Space')}`, { type: 'error' });
|
||||
else
|
||||
toast(`${t('Convert Failed')} - ${e.message || e}`, { type: 'error' });
|
||||
});
|
||||
setTimeout(WindowShow, 1000);
|
||||
} else {
|
||||
toast(`${t('Model Not Found')} - ${modelPath}`, { type: 'error' });
|
||||
}
|
||||
};
|
||||
@@ -51,11 +51,11 @@ export async function refreshBuiltInModels(readCache: boolean = false) {
|
||||
await ReadJson('cache.json').then((cacheData: Cache) => {
|
||||
if (cacheData.models)
|
||||
cache.models = cacheData.models;
|
||||
else cache.models = manifest.models;
|
||||
else cache.models = manifest.models.slice();
|
||||
}).catch(() => {
|
||||
cache.models = manifest.models;
|
||||
cache.models = manifest.models.slice();
|
||||
});
|
||||
else cache.models = manifest.models;
|
||||
else cache.models = manifest.models.slice();
|
||||
|
||||
commonStore.setModelSourceList(cache.models);
|
||||
await saveCache().catch(() => {
|
||||
@@ -63,7 +63,7 @@ export async function refreshBuiltInModels(readCache: boolean = false) {
|
||||
return cache;
|
||||
}
|
||||
|
||||
const modelSuffix = ['.pth', '.st', '.safetensors'];
|
||||
const modelSuffix = ['.pth', '.st', '.safetensors', '.bin'];
|
||||
|
||||
export async function refreshLocalModels(cache: {
|
||||
models: ModelSourceItem[]
|
||||
@@ -192,6 +192,7 @@ export const getStrategy = (modelConfig: ModelConfig | undefined = undefined) =>
|
||||
strategy += params.precision === 'int8' ? 'fp32i8' : 'fp32';
|
||||
break;
|
||||
case 'WebGPU':
|
||||
case 'WebGPU (Python)':
|
||||
strategy += params.precision === 'nf4' ? 'fp16i4' : params.precision === 'int8' ? 'fp16i8' : 'fp16';
|
||||
break;
|
||||
case 'CUDA':
|
||||
@@ -303,7 +304,11 @@ export function bytesToReadable(size: number) {
|
||||
else return bytesToGb(size) + ' GB';
|
||||
}
|
||||
|
||||
export function getServerRoot(defaultLocalPort: number) {
|
||||
export function getServerRoot(defaultLocalPort: number, isCore: boolean = false) {
|
||||
const coreCustomApiUrl = commonStore.settings.coreApiUrl.trim().replace(/\/$/, '');
|
||||
if (isCore && coreCustomApiUrl)
|
||||
return coreCustomApiUrl;
|
||||
|
||||
const defaultRoot = `http://127.0.0.1:${defaultLocalPort}`;
|
||||
if (commonStore.status.status !== ModelStatus.Offline)
|
||||
return defaultRoot;
|
||||
|
||||
10
frontend/wailsjs/go/backend_golang/App.d.ts
generated
vendored
10
frontend/wailsjs/go/backend_golang/App.d.ts
generated
vendored
@@ -10,10 +10,14 @@ export function ContinueDownload(arg1:string):Promise<void>;
|
||||
|
||||
export function ConvertData(arg1:string,arg2:string,arg3:string,arg4:string):Promise<string>;
|
||||
|
||||
export function ConvertGGML(arg1:string,arg2:string,arg3:string,arg4:boolean):Promise<string>;
|
||||
|
||||
export function ConvertModel(arg1:string,arg2:string,arg3:string,arg4:string):Promise<string>;
|
||||
|
||||
export function ConvertSafetensors(arg1:string,arg2:string):Promise<string>;
|
||||
|
||||
export function ConvertSafetensorsWithPython(arg1:string,arg2:string,arg3:string):Promise<string>;
|
||||
|
||||
export function CopyFile(arg1:string,arg2:string):Promise<void>;
|
||||
|
||||
export function DeleteFile(arg1:string):Promise<void>;
|
||||
@@ -56,9 +60,13 @@ export function ReadJson(arg1:string):Promise<any>;
|
||||
|
||||
export function RestartApp():Promise<void>;
|
||||
|
||||
export function SaveFile(arg1:string,arg2:Array<number>):Promise<void>;
|
||||
|
||||
export function SaveJson(arg1:string,arg2:any):Promise<void>;
|
||||
|
||||
export function StartServer(arg1:string,arg2:number,arg3:string,arg4:boolean,arg5:boolean):Promise<string>;
|
||||
export function StartFile(arg1:string):Promise<void>;
|
||||
|
||||
export function StartServer(arg1:string,arg2:number,arg3:string,arg4:boolean,arg5:boolean,arg6:boolean,arg7:boolean):Promise<string>;
|
||||
|
||||
export function StartWebGPUServer(arg1:number,arg2:string):Promise<string>;
|
||||
|
||||
|
||||
20
frontend/wailsjs/go/backend_golang/App.js
generated
20
frontend/wailsjs/go/backend_golang/App.js
generated
@@ -18,6 +18,10 @@ export function ConvertData(arg1, arg2, arg3, arg4) {
|
||||
return window['go']['backend_golang']['App']['ConvertData'](arg1, arg2, arg3, arg4);
|
||||
}
|
||||
|
||||
export function ConvertGGML(arg1, arg2, arg3, arg4) {
|
||||
return window['go']['backend_golang']['App']['ConvertGGML'](arg1, arg2, arg3, arg4);
|
||||
}
|
||||
|
||||
export function ConvertModel(arg1, arg2, arg3, arg4) {
|
||||
return window['go']['backend_golang']['App']['ConvertModel'](arg1, arg2, arg3, arg4);
|
||||
}
|
||||
@@ -26,6 +30,10 @@ export function ConvertSafetensors(arg1, arg2) {
|
||||
return window['go']['backend_golang']['App']['ConvertSafetensors'](arg1, arg2);
|
||||
}
|
||||
|
||||
export function ConvertSafetensorsWithPython(arg1, arg2, arg3) {
|
||||
return window['go']['backend_golang']['App']['ConvertSafetensorsWithPython'](arg1, arg2, arg3);
|
||||
}
|
||||
|
||||
export function CopyFile(arg1, arg2) {
|
||||
return window['go']['backend_golang']['App']['CopyFile'](arg1, arg2);
|
||||
}
|
||||
@@ -110,12 +118,20 @@ export function RestartApp() {
|
||||
return window['go']['backend_golang']['App']['RestartApp']();
|
||||
}
|
||||
|
||||
export function SaveFile(arg1, arg2) {
|
||||
return window['go']['backend_golang']['App']['SaveFile'](arg1, arg2);
|
||||
}
|
||||
|
||||
export function SaveJson(arg1, arg2) {
|
||||
return window['go']['backend_golang']['App']['SaveJson'](arg1, arg2);
|
||||
}
|
||||
|
||||
export function StartServer(arg1, arg2, arg3, arg4, arg5) {
|
||||
return window['go']['backend_golang']['App']['StartServer'](arg1, arg2, arg3, arg4, arg5);
|
||||
export function StartFile(arg1) {
|
||||
return window['go']['backend_golang']['App']['StartFile'](arg1);
|
||||
}
|
||||
|
||||
export function StartServer(arg1, arg2, arg3, arg4, arg5, arg6, arg7) {
|
||||
return window['go']['backend_golang']['App']['StartServer'](arg1, arg2, arg3, arg4, arg5, arg6, arg7);
|
||||
}
|
||||
|
||||
export function StartWebGPUServer(arg1, arg2) {
|
||||
|
||||
14
main.go
14
main.go
@@ -11,6 +11,7 @@ import (
|
||||
backend "rwkv-runner/backend-golang"
|
||||
|
||||
"github.com/wailsapp/wails/v2"
|
||||
wailsLogger "github.com/wailsapp/wails/v2/pkg/logger"
|
||||
"github.com/wailsapp/wails/v2/pkg/options"
|
||||
"github.com/wailsapp/wails/v2/pkg/options/assetserver"
|
||||
"github.com/wailsapp/wails/v2/pkg/options/windows"
|
||||
@@ -66,7 +67,10 @@ var midiAssets embed.FS
|
||||
var components embed.FS
|
||||
|
||||
func main() {
|
||||
dev := true
|
||||
if buildInfo, ok := debug.ReadBuildInfo(); !ok || strings.Contains(buildInfo.String(), "-ldflags") {
|
||||
dev = false
|
||||
|
||||
backend.CopyEmbed(assets)
|
||||
os.RemoveAll("./py310/Lib/site-packages/cyac-1.7.dist-info")
|
||||
backend.CopyEmbed(cyac)
|
||||
@@ -94,11 +98,18 @@ func main() {
|
||||
app.HasConfigData = false
|
||||
}
|
||||
|
||||
var logger wailsLogger.Logger
|
||||
if dev {
|
||||
logger = wailsLogger.NewDefaultLogger()
|
||||
} else {
|
||||
logger = wailsLogger.NewFileLogger("crash.log")
|
||||
}
|
||||
|
||||
// Create application with options
|
||||
err = wails.Run(&options.App{
|
||||
Title: "RWKV-Runner",
|
||||
Width: 1024,
|
||||
Height: 680,
|
||||
Height: 700,
|
||||
MinWidth: 375,
|
||||
MinHeight: 640,
|
||||
EnableDefaultContextMenu: true,
|
||||
@@ -115,6 +126,7 @@ func main() {
|
||||
Bind: []any{
|
||||
app,
|
||||
},
|
||||
Logger: logger,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"version": "1.5.9",
|
||||
"version": "1.6.4",
|
||||
"introduction": {
|
||||
"en": "RWKV is an open-source, commercially usable large language model with high flexibility and great potential for development.\n### About This Tool\nThis tool aims to lower the barrier of entry for using large language models, making it accessible to everyone. It provides fully automated dependency and model management. You simply need to click and run, following the instructions, to deploy a local large language model. The tool itself is very compact and only requires a single executable file for one-click deployment.\nAdditionally, this tool offers an interface that is fully compatible with the OpenAI API. This means you can use any ChatGPT client as a client for RWKV, enabling capability expansion beyond just chat functionality.\n### Preset Configuration Rules at the Bottom\nThis tool comes with a series of preset configurations to reduce complexity. The naming rules for each configuration represent the following in order: device - required VRAM/memory - model size - model language.\nFor example, \"GPU-8G-3B-EN\" indicates that this configuration is for a graphics card with 8GB of VRAM, a model size of 3 billion parameters, and it uses an English language model.\nLarger model sizes have higher performance and VRAM requirements. Among configurations with the same model size, those with higher VRAM usage will have faster runtime.\nFor example, if you have 12GB of VRAM but running the \"GPU-12G-7B-EN\" configuration is slow, you can downgrade to \"GPU-8G-3B-EN\" for a significant speed improvement.\n### About RWKV\nRWKV is an RNN with Transformer-level LLM performance, which can also be directly trained like a GPT transformer (parallelizable). And it's 100% attention-free. You only need the hidden state at position t to compute the state at position t+1. You can use the \"GPT\" mode to quickly compute the hidden state for the \"RNN\" mode.<br/>So it's combining the best of RNN and transformer - great performance, fast inference, saves VRAM, fast training, \"infinite\" ctx_len, and free sentence embedding (using the final hidden state).",
|
||||
"zh": "RWKV是一个开源且允许商用的大语言模型,灵活性很高且极具发展潜力。\n### 关于本工具\n本工具旨在降低大语言模型的使用门槛,做到人人可用,本工具提供了全自动化的依赖和模型管理,你只需要直接点击运行,跟随引导,即可完成本地大语言模型的部署,工具本身体积极小,只需要一个exe即可完成一键部署。\n此外,本工具提供了与OpenAI API完全兼容的接口,这意味着你可以把任意ChatGPT客户端用作RWKV的客户端,实现能力拓展,而不局限于聊天。\n### 底部的预设配置规则\n本工具内置了一系列预设配置,以降低使用难度,每个配置名的规则,依次代表着:设备-所需显存/内存-模型规模-模型语言。\n例如,GPU-8G-3B-CN,表示该配置用于显卡,需要8G显存,模型规模为30亿参数,使用的是中文模型。\n模型规模越大,性能要求越高,显存要求也越高,而同样模型规模的配置中,显存占用越高的,运行速度越快。\n例如当你有12G显存,但运行GPU-12G-7B-CN配置速度比较慢,可降级成GPU-8G-3B-CN,将会大幅提速。\n### 关于RWKV\nRWKV是具有Transformer级别LLM性能的RNN,也可以像GPT Transformer一样直接进行训练(可并行化)。而且它是100% attention-free的。你只需在位置t处获得隐藏状态即可计算位置t + 1处的状态。你可以使用“GPT”模式快速计算用于“RNN”模式的隐藏状态。\n因此,它将RNN和Transformer的优点结合起来 - 高性能、快速推理、节省显存、快速训练、“无限”上下文长度以及免费的语句嵌入(使用最终隐藏状态)。"
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
- ^backend-python/get-pip\.py
|
||||
- ^backend-python/convert_model\.py
|
||||
- ^backend-python/convert_safetensors\.py
|
||||
- ^backend-python/convert_pytorch_to_ggml\.py linguist-vendored
|
||||
- ^backend-python/utils/midi\.py
|
||||
- ^build/
|
||||
- ^finetune/lora/
|
||||
|
||||
Reference in New Issue
Block a user