Compare commits

...

24 Commits

Author SHA1 Message Date
josc146
f8b764ef8f release v1.4.6 2023-09-16 13:05:34 +08:00
josc146
fcfaa5944e frontend feature adaptation for api params (user_name, assistant_name, presystem) 2023-09-16 13:02:06 +08:00
josc146
f89e89c1c9 chore 2023-09-16 12:23:16 +08:00
josc146
a25965530c custom tokenizer (#77) 2023-09-16 00:34:11 +08:00
josc146
971124d0d7 upgrade to wails@v2.6.0 (EnableDefaultContextMenu: true) 2023-09-16 00:29:45 +08:00
josc146
d7dcc90008 chore 2023-09-15 16:31:14 +08:00
josc146
df969fcfc6 upgrade cuda-beta 2023-09-15 16:30:11 +08:00
josc146
c4042bbfd8 improve ui desc 2023-09-15 16:26:32 +08:00
josc146
4112200b4c revert(2d5456): refresh local models when download complete (for macOS) 2023-09-15 16:25:04 +08:00
Ikko Eltociear Ashimine
3f9a54e36f Update README_JA.md
add translation.
2023-09-13 16:11:43 +08:00
github-actions[bot]
3ed4456135 release v1.4.5 2023-08-27 15:57:18 +00:00
josc146
e0df9ae47b release v1.4.5 2023-08-27 23:56:37 +08:00
josc146
87b2c3ed7d fix build 2023-08-27 23:56:30 +08:00
josc146
50ff7ef6bc always use requirements.txt 2023-08-27 23:52:52 +08:00
josc146
c7a580ca8a update manifest 2023-08-27 23:16:56 +08:00
josc146
eaae7624a7 add HardwareMonitor (Windows Only) 2023-08-27 22:53:18 +08:00
josc146
fcd59de6fb correct Preset UI description 2023-08-27 21:37:32 +08:00
josc146
1bbe127209 fix webgpu_server file permissions of linux and macos 2023-08-27 21:22:26 +08:00
josc146
b868adc058 chore 2023-08-27 21:21:34 +08:00
josc146
a24b78e8c3 python-backend: extra ChatCompletionBody params (raw, presystem);
add default_stop when stop is null
2023-08-27 21:21:11 +08:00
josc146
c8025f1cff allow message content to be empty 2023-08-27 21:02:54 +08:00
josc146
fe0860dbf0 fix lora finetune max_epochs (#170) 2023-08-24 22:49:57 +08:00
josc146
02d5d641d1 chore 2023-08-24 22:48:54 +08:00
github-actions[bot]
a057bb6c5b release v1.4.4 2023-08-16 15:33:53 +00:00
35 changed files with 1041 additions and 201 deletions

View File

@@ -57,6 +57,8 @@ jobs:
with:
args: install upx
- run: |
Start-BitsTransfer https://github.com/josStorer/LibreHardwareMonitor.Console/releases/download/v0.1.0/LibreHardwareMonitor.Console.zip ./LibreHardwareMonitor.Console.zip
Expand-Archive ./LibreHardwareMonitor.Console.zip -DestinationPath ./components/LibreHardwareMonitor.Console
Start-BitsTransfer https://www.python.org/ftp/python/3.10.11/python-3.10.11-embed-amd64.zip ./python-3.10.11-embed-amd64.zip
Expand-Archive ./python-3.10.11-embed-amd64.zip -DestinationPath ./py310
$content=Get-Content "./py310/python310._pth"; $content | ForEach-Object {if ($_.ReadCount -eq 3) {"Lib\\site-packages"} else {$_}} | Set-Content ./py310/python310._pth
@@ -71,6 +73,7 @@ jobs:
mv ./target/release/ai00_server.exe ../backend-rust/webgpu_server.exe
cd ..
go install github.com/wailsapp/wails/v2/cmd/wails@latest
(Get-Content -Path ./backend-golang/app.go) -replace "//go:custom_build windows ", "" | Set-Content -Path ./backend-golang/app.go
make
Rename-Item -Path "build/bin/RWKV-Runner.exe" -NewName "RWKV-Runner_windows_x64.exe"

1
.gitignore vendored
View File

@@ -26,3 +26,4 @@ __pycache__
train_log.txt
finetune/json2binidx_tool/data
/wsl.state
/components

View File

@@ -1,10 +1,12 @@
## Changes
- webgpu support (AMD, Intel, Nvidia, Apple)
- add rwkv-cuda-beta support (faster)
- add misc API (`/models` and `/dashboard/billing/credit_grants`)
- allow multiple systems
- allow completions input to be null
- frontend adaptation for api params (user_name, assistant_name, presystem)
- custom tokenizer (#77)
- enable right-click context menu
- upgrade cuda-beta
- revert(2d5456): refresh local models when download complete (for macOS)
- improve ui desc
- chore
## Install

View File

@@ -91,8 +91,8 @@ body.json:
## 埋め込み API の例
Note: v1.4.0 has improved the quality of embeddings API. The generated results are not compatible
with previous versions. If you are using embeddings API to generate knowledge bases or similar, please regenerate.
注意: v1.4.0 では、埋め込み API の品質が向上しました。生成される結果は、以前のバージョンとは互換性がありません。
もし、embeddings API を使って知識ベースなどを生成している場合は、再生成してください。
LangChain を使用している場合は、`OpenAIEmbeddings(openai_api_base="http://127.0.0.1:8000", openai_api_key="sk-")`
を使用してください

View File

@@ -1,6 +1,7 @@
package backend_golang
import (
"bufio"
"context"
"errors"
"net/http"
@@ -8,6 +9,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
"syscall"
"github.com/fsnotify/fsnotify"
"github.com/minio/selfupdate"
@@ -41,6 +43,7 @@ func (a *App) OnStartup(ctx context.Context) {
a.cmdPrefix = "cd " + a.exDir + " && "
}
os.Chmod("./backend-rust/webgpu_server", 0777)
os.Mkdir(a.exDir+"models", os.ModePerm)
os.Mkdir(a.exDir+"lora-models", os.ModePerm)
os.Mkdir(a.exDir+"finetune/json2binidx_tool/data", os.ModePerm)
@@ -50,7 +53,18 @@ func (a *App) OnStartup(ctx context.Context) {
}
a.downloadLoop()
a.watchFs()
a.monitorHardware()
}
func (a *App) OnBeforeClose(ctx context.Context) bool {
if monitor != nil {
monitor.Process.Kill()
}
return false
}
func (a *App) watchFs() {
watcher, err := fsnotify.NewWatcher()
if err == nil {
watcher.Add("./lora-models")
@@ -62,7 +76,7 @@ func (a *App) OnStartup(ctx context.Context) {
if !ok {
return
}
wruntime.EventsEmit(ctx, "fsnotify", event.Name)
wruntime.EventsEmit(a.ctx, "fsnotify", event.Name)
case _, ok := <-watcher.Errors:
if !ok {
return
@@ -73,6 +87,37 @@ func (a *App) OnStartup(ctx context.Context) {
}
}
var monitor *exec.Cmd
func (a *App) monitorHardware() {
if runtime.GOOS != "windows" {
return
}
monitor = exec.Command("./components/LibreHardwareMonitor.Console/LibreHardwareMonitor.Console.exe")
stdout, err := monitor.StdoutPipe()
if err != nil {
monitor = nil
return
}
go func() {
reader := bufio.NewReader(stdout)
for {
line, _, err := reader.ReadLine()
if err != nil {
wruntime.EventsEmit(a.ctx, "monitorerr", err.Error())
break
}
wruntime.EventsEmit(a.ctx, "monitor", string(line))
}
}()
monitor.SysProcAttr = &syscall.SysProcAttr{}
//go:custom_build windows monitor.SysProcAttr.HideWindow = true
monitor.Start()
}
func (a *App) UpdateApp(url string) (broken bool, err error) {
resp, err := http.Get(url)
if err != nil {

View File

@@ -155,7 +155,6 @@ func (a *App) InstallPyDep(python string, cnMirror bool) (string, error) {
"exit"
if !cnMirror {
installScript = strings.Replace(installScript, " -i https://pypi.tuna.tsinghua.edu.cn/simple", "", -1)
installScript = strings.Replace(installScript, "requirements.txt", "requirements_versions.txt", -1)
}
err = os.WriteFile("./install-py-dep.bat", []byte(installScript), 0644)
if err != nil {

Binary file not shown.

View File

@@ -25,32 +25,44 @@ class Role(Enum):
class Message(BaseModel):
role: Role
content: str = Field(min_length=1)
content: str = Field(min_length=0)
raw: bool = Field(False, description="Whether to treat content as raw text")
default_stop = [
"\n\nUser",
"\n\nQuestion",
"\n\nQ",
"\n\nHuman",
"\n\nBob",
]
class ChatCompletionBody(ModelConfigBody):
messages: Union[List[Message], None]
model: str = "rwkv"
model: Union[str, None] = "rwkv"
stream: bool = False
stop: Union[str, List[str], None] = [
"\n\nUser",
"\n\nQuestion",
"\n\nQ",
"\n\nHuman",
"\n\nBob",
]
user_name: Union[str, None] = None
assistant_name: Union[str, None] = None
stop: Union[str, List[str], None] = default_stop
user_name: Union[str, None] = Field(None, description="Internal user name")
assistant_name: Union[str, None] = Field(
None, description="Internal assistant name"
)
presystem: bool = Field(
True, description="Whether to insert default system prompt at the beginning"
)
class Config:
schema_extra = {
"example": {
"messages": [{"role": Role.User.value, "content": "hello"}],
"messages": [
{"role": Role.User.value, "content": "hello", "raw": False}
],
"model": "rwkv",
"stream": False,
"stop": None,
"user_name": None,
"assistant_name": None,
"presystem": True,
"max_tokens": 1000,
"temperature": 1.2,
"top_p": 0.5,
@@ -62,7 +74,7 @@ class ChatCompletionBody(ModelConfigBody):
class CompletionBody(ModelConfigBody):
prompt: Union[str, List[str], None]
model: str = "rwkv"
model: Union[str, None] = "rwkv"
stream: bool = False
stop: Union[str, List[str], None] = None
@@ -233,10 +245,6 @@ async def chat_completions(body: ChatCompletionBody, request: Request):
if body.messages is None or body.messages == []:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "messages not found")
basic_system: str = ""
if body.messages[0].role == Role.System:
basic_system = body.messages[0].content
interface = model.interface
user = model.user if body.user_name is None else body.user_name
bot = model.bot if body.assistant_name is None else body.assistant_name
@@ -244,46 +252,54 @@ async def chat_completions(body: ChatCompletionBody, request: Request):
is_raven = model.rwkv_type == RWKVType.Raven
completion_text: str = ""
if basic_system == "":
completion_text = (
f"""
basic_system: Union[str, None] = None
if body.presystem:
if body.messages[0].role == Role.System:
basic_system = body.messages[0].content
if basic_system is None:
completion_text = (
f"""
The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. \
{bot} is very intelligent, creative and friendly. \
{bot} is unlikely to disagree with {user}, and {bot} doesn't like to ask {user} questions. \
{bot} likes to tell {user} a lot about herself and her opinions. \
{bot} usually gives {user} kind, helpful and informative advices.\n
"""
if is_raven
else (
f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
)
elif basic_system != "":
completion_text = (
(
f"The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. "
if is_raven
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
else (
f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
)
else:
if not body.messages[0].raw:
basic_system = (
basic_system.replace("\r\n", "\n")
.replace("\r", "\n")
.replace("\n\n", "\n")
.replace("\n", " ")
.strip()
)
completion_text = (
(
f"The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. "
if is_raven
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
)
+ basic_system.replace("You are", f"{bot} is" if is_raven else "I am")
.replace("you are", f"{bot} is" if is_raven else "I am")
.replace("You're", f"{bot} is" if is_raven else "I'm")
.replace("you're", f"{bot} is" if is_raven else "I'm")
.replace("You", f"{bot}" if is_raven else "I")
.replace("you", f"{bot}" if is_raven else "I")
.replace("Your", f"{bot}'s" if is_raven else "My")
.replace("your", f"{bot}'s" if is_raven else "my")
.replace("", f"{bot}" if is_raven else "")
+ "\n\n"
)
+ basic_system.replace("\r\n", "\n")
.replace("\r", "\n")
.replace("\n\n", "\n")
.replace("\n", " ")
.strip()
.replace("You are", f"{bot} is" if is_raven else "I am")
.replace("you are", f"{bot} is" if is_raven else "I am")
.replace("You're", f"{bot} is" if is_raven else "I'm")
.replace("you're", f"{bot} is" if is_raven else "I'm")
.replace("You", f"{bot}" if is_raven else "I")
.replace("you", f"{bot}" if is_raven else "I")
.replace("Your", f"{bot}'s" if is_raven else "My")
.replace("your", f"{bot}'s" if is_raven else "my")
.replace("", f"{bot}" if is_raven else "")
+ "\n\n"
)
for message in body.messages[(0 if basic_system == "" else 1) :]:
for message in body.messages[(0 if basic_system is None else 1) :]:
append_message: str = ""
if message.role == Role.User:
append_message = f"{user}{interface} " + message.content
@@ -291,20 +307,23 @@ The following is a coherent verbose detailed conversation between a girl named {
append_message = f"{bot}{interface} " + message.content
elif message.role == Role.System:
append_message = message.content
completion_text += (
append_message.replace("\r\n", "\n")
.replace("\r", "\n")
.replace("\n\n", "\n")
.strip()
+ "\n\n"
)
if not message.raw:
append_message = (
append_message.replace("\r\n", "\n")
.replace("\r", "\n")
.replace("\n\n", "\n")
.strip()
)
completion_text += append_message + "\n\n"
completion_text += f"{bot}{interface}"
if type(body.stop) == str:
body.stop = [body.stop, f"\n\n{user}", f"\n\n{bot}"]
else:
elif type(body.stop) == list:
body.stop.append(f"\n\n{user}")
body.stop.append(f"\n\n{bot}")
elif body.stop is None:
body.stop = default_stop
if body.stream:
return EventSourceResponse(
@@ -349,7 +368,7 @@ async def completions(body: CompletionBody, request: Request):
class EmbeddingsBody(BaseModel):
input: Union[str, List[str], List[List[int]], None]
model: str = "rwkv"
model: Union[str, None] = "rwkv"
encoding_format: str = None
fast_mode: bool = False

View File

@@ -29,6 +29,7 @@ def get_tokens_path(model_path: str):
class SwitchModelBody(BaseModel):
model: str
strategy: str
tokenizer: Union[str, None] = None
customCuda: bool = False
class Config:
@@ -36,6 +37,7 @@ class SwitchModelBody(BaseModel):
"example": {
"model": "models/RWKV-4-World-3B-v1-20230619-ctx4096.pth",
"strategy": "cuda fp16",
"tokenizer": None,
"customCuda": False,
}
}
@@ -65,19 +67,24 @@ def switch_model(body: SwitchModelBody, response: Response, request: Request):
os.environ["RWKV_CUDA_ON"] = "1" if body.customCuda else "0"
global_var.set(global_var.Model_Status, global_var.ModelStatus.Loading)
tokenizer = (
get_tokens_path(body.model)
if body.tokenizer is None or body.tokenizer == ""
else body.tokenizer
)
try:
global_var.set(
global_var.Model,
TextRWKV(
model=body.model,
strategy=body.strategy,
tokens_path=get_tokens_path(body.model),
tokens_path=tokenizer,
)
if "midi" not in body.model.lower()
else MusicRWKV(
model=body.model,
strategy=body.strategy,
tokens_path=get_tokens_path(body.model),
tokens_path=tokenizer,
),
)
except Exception as e:

View File

@@ -96,7 +96,7 @@ def add_state(body: AddStateBody):
quick_log(
None,
None,
f"New Trie Id: {id}\nTrie Len: {len(trie)}\nTrie Buff Size: {trie.buff_size()}\nDtrie Buff Size Of Id: {_get_a_dtrie_buff_size(dtrie[id])}",
f"New Trie Id: {id}\nTrie Len: {len(trie)}\nTrie Buff Size: {trie.buff_size()}\nDtrie Buff Size Of Id: {__get_a_dtrie_buff_size(dtrie[id])}",
)
return "success"
except Exception as e:
@@ -124,7 +124,7 @@ class LongestPrefixStateBody(BaseModel):
prompt: str
def _get_a_dtrie_buff_size(dtrie_v):
def __get_a_dtrie_buff_size(dtrie_v):
# print(sys.getsizeof(dtrie_v["tokens"][0])) # str
# print(sys.getsizeof(dtrie_v["tokens"][0]) * len(dtrie_v["tokens"]))
# print(dtrie_v["state"][0][0].element_size())

View File

@@ -88,7 +88,7 @@ struct Mix {
using torch::Tensor;
void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
void gemm_fp16_cublas_tensor(Tensor a, Tensor b, Tensor c);
Tensor att_one(Tensor x, Tensor ln_w, Tensor ln_b, Tensor sx, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw,
@@ -105,9 +105,9 @@ Tensor att_one(Tensor x, Tensor ln_w, Tensor ln_b, Tensor sx, Tensor k_mix,
data_ptr<half>(vx), data_ptr<half>(rx)},
x.numel());
gemm_fp16_cublas(kx, kw, k);
gemm_fp16_cublas(vx, vw, v);
gemm_fp16_cublas(rx, rw, r);
gemm_fp16_cublas_tensor(kx, kw, k);
gemm_fp16_cublas_tensor(vx, vw, v);
gemm_fp16_cublas_tensor(rx, rw, r);
at::sigmoid_(r);
element_wise(WkvForwardOne{data_ptr<float>(t_first), data_ptr<float>(k),
@@ -118,7 +118,7 @@ Tensor att_one(Tensor x, Tensor ln_w, Tensor ln_b, Tensor sx, Tensor k_mix,
data_ptr<half>(r)},
x.numel());
gemm_fp16_cublas(r, ow, x_plus_out);
gemm_fp16_cublas_tensor(r, ow, x_plus_out);
x_plus_out += x;
return xx;
}

View File

@@ -0,0 +1,109 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "element_wise.h"
#include "util.h"
// Equivalent Python code:
// s1 = t_first * a + s
// s2 = a + t_decay * s
struct Fused1 {
const float *t_first;
const float *t_decay;
const float *a;
const float *s;
const int32_t inner_size;
/* out */ float *s1;
/* out */ float *s2;
__device__ void operator()(int i) const {
const int j = i / inner_size;
s1[i] = t_first[j] * a[i] + s[i];
s2[i] = a[i] + t_decay[j] * s[i];
}
};
/*
Equivalent Python code:
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
*/
struct Mix {
const half *xx;
const half *sx;
const half *k_mix;
const half *v_mix;
const half *r_mix;
/* out */ half *kx;
/* out */ half *vx;
/* out */ half *rx;
__device__ void operator()(int i) const {
half xx_ = xx[i];
half sx_ = sx[i];
half k_mix_ = k_mix[i];
half v_mix_ = v_mix[i];
half r_mix_ = r_mix[i];
kx[i] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
vx[i] = __hadd(__hmul(xx_, v_mix_),
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
rx[i] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
};
using torch::Tensor;
void gemm_fp16_cublas_tensor(Tensor a, Tensor b, Tensor c);
Tensor att_one_v5(Tensor x, Tensor sx, Tensor s, Tensor ln_w, Tensor ln_b,
Tensor lx_w, Tensor lx_b, Tensor k_mix, Tensor v_mix,
Tensor r_mix, Tensor kw,
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx,
Tensor rw,
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
/* imm */ Tensor k, Tensor t_decay, /* imm */ Tensor v,
/* imm */ Tensor r, /* imm */ Tensor s1,
/* out */ Tensor x_plus_out, /* out */ Tensor s2) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
element_wise(Mix{data_ptr<half>(xx), data_ptr<half>(sx),
data_ptr<half>(k_mix), data_ptr<half>(v_mix),
data_ptr<half>(r_mix), data_ptr<half>(kx),
data_ptr<half>(vx), data_ptr<half>(rx)},
x.numel());
int H = t_decay.size(0);
int S = x.size(-1) / H;
gemm_fp16_cublas_tensor(rx, rw, r);
r = at::reshape(r, {H, 1, S});
gemm_fp16_cublas_tensor(kx, kw, k);
k = at::reshape(k, {H, S, 1});
gemm_fp16_cublas_tensor(vx, vw, v);
v = at::reshape(v, {H, 1, S});
{
Tensor a = at::matmul(k, v);
// s1 = t_first * a + s
// s2 = a + t_decay * s
element_wise(Fused1{data_ptr<float>(t_first), data_ptr<float>(t_decay),
data_ptr<float>(a), data_ptr<float>(s),
static_cast<int32_t>(a.size(1) * a.size(2)),
data_ptr<float>(s1), data_ptr<float>(s2)},
a.numel());
}
Tensor out = at::matmul(r, s1);
out = at::flatten(out);
out = at::squeeze(at::group_norm(at::unsqueeze(out, 0), H, lx_w, lx_b), 0);
out = at::_cast_Half(out);
gemm_fp16_cublas_tensor(out, ow, x_plus_out);
x_plus_out += x;
return xx;
}

View File

@@ -8,7 +8,6 @@
using torch::Tensor;
void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
void gemm_fp16_cublas(const void *a, const void *b, void *c, int m,
int n, int k, bool output_fp32);

View File

@@ -70,11 +70,59 @@ void gemm_fp16_cublas(const void *a, const void *b, void *c, int ori_m,
cuda_c_data_type, cublas_ldc, compute_type, algo));
}
void gemm_fp16_cublas(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
// comptiable with rwkv one mode, 1-D tensor * 2-D tensor
const int m = a.dense_dim() == 1 ? 1 : a.size(0);
const int n = b.size(1);
const int k = b.size(0);
gemm_fp16_cublas(a.data_ptr(), b.data_ptr(), c.data_ptr(), m, n, k,
c.dtype() == torch::kFloat32);
/*
NOTE: blas gemm is column-major by default, but we need row-major output.
The data of row-major, transposed matrix is exactly the same as the
column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
*/
void gemm_fp16_cublas_tensor(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
if (a.sizes().size() == 1) {
assert(b.sizes().size() == 2);
a = at::unsqueeze(a, 0);
}
const auto cuda_data_type = CUDA_R_16F;
const auto cuda_c_data_type =
c.dtype() == torch::kFloat32 ? CUDA_R_32F : CUDA_R_16F;
const auto compute_type = CUDA_R_32F;
const float sp_alpha = 1.f;
// swap a and b, and use CUBLAS_OP_N. see the notes above
std::swap(a, b);
const cublasOperation_t cublas_trans_a = CUBLAS_OP_N;
const cublasOperation_t cublas_trans_b = CUBLAS_OP_N;
// m = (B^T).size(0) = B.size(1), and = A.size(1) after swap,
// negative axis is used because of the existence of batch matmul.
const int m = a.size(-1);
const int k = a.size(-2);
const int n = b.size(-2);
const int cublas_lda = m;
const int cublas_ldb = k;
const int cublas_ldc = m;
cublasHandle_t cublas_handle = get_cublas_handle();
#if CUDA_VERSION >= 11000
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
#else
cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP;
#endif
const float sp_beta = 0.f;
if (a.sizes().size() == 2 && b.sizes().size() == 2) {
CUBLAS_CHECK(cublasGemmEx(
cublas_handle, cublas_trans_a, cublas_trans_b, m, n, k, &sp_alpha,
a.data_ptr(), cuda_data_type, cublas_lda, b.data_ptr(), cuda_data_type,
cublas_ldb, &sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc,
compute_type, algo));
} else {
// batch matmul
assert(a.sizes().size() == 3 && b.sizes().size() == 3);
const long long int cublas_stride_a = m * k;
const long long int cublas_stride_b = k * n;
const long long int cublas_stride_c = m * n;
CUBLAS_CHECK(cublasGemmStridedBatchedEx(
cublas_handle, cublas_trans_a, cublas_trans_b, m,
n, k, &sp_alpha, a.data_ptr(), cuda_data_type, cublas_lda,
cublas_stride_a, b.data_ptr(), cuda_data_type, cublas_ldb, cublas_stride_b,
&sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc, cublas_stride_c,
a.size(0), compute_type, algo));
}
}

View File

@@ -118,7 +118,9 @@ void mm8_one(int64_t N, int64_t M,
using torch::Tensor;
void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
#ifndef DISABLE_CUBLAS_GEMM
void gemm_fp16_cublas_tensor(Tensor a, Tensor b, Tensor c);
#endif
Tensor att_one(Tensor x, Tensor ln_w, Tensor ln_b, Tensor sx, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw,
@@ -134,6 +136,16 @@ Tensor att_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor ow, Tensor t_first, Tensor pp, Tensor aa, Tensor bb,
Tensor t_decay, /* imm */ Tensor buf, /* out */ Tensor x_plus_out);
Tensor att_one_v5(Tensor x, Tensor sx, Tensor s, Tensor ln_w, Tensor ln_b,
Tensor lx_w, Tensor lx_b, Tensor k_mix, Tensor v_mix,
Tensor r_mix, Tensor kw,
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx,
Tensor rw,
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
/* imm */ Tensor k, Tensor t_decay, /* imm */ Tensor v,
/* imm */ Tensor r, /* imm */ Tensor s1,
/* out */ Tensor x_plus_out, /* out */ Tensor s2);
Tensor ffn_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
/* imm */ Tensor buf,
@@ -148,8 +160,9 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("wkv_forward", &wkv_forward, "wkv forward");
m.def("mm8_seq", &mm8_seq, "mm8 seq");
m.def("mm8_one", &mm8_one, "mm8 one");
m.def("gemm_fp16_cublas", &gemm_fp16_cublas, "gemv fp16 cublas");
m.def("gemm_fp16_cublas", &gemm_fp16_cublas_tensor, "gemv fp16 cublas");
m.def("att_one", &att_one, "att one");
m.def("att_one_v5", &att_one_v5, "att one v5");
m.def("att_seq", &att_seq, "att seq");
m.def("ffn_seq", &ffn_seq, "ffn seq");
m.def("ffn_one", &ffn_one, "ffn one");
@@ -159,8 +172,9 @@ TORCH_LIBRARY(rwkv, m) {
m.def("wkv_forward", wkv_forward);
m.def("mm8_seq", mm8_seq);
m.def("mm8_one", mm8_one);
m.def("gemm_fp16_cublas", gemm_fp16_cublas);
m.def("gemm_fp16_cublas", gemm_fp16_cublas_tensor);
m.def("att_one", att_one);
m.def("att_one_v5", &att_one_v5);
m.def("att_seq", att_seq);
m.def("ffn_seq", ffn_seq);
m.def("ffn_one", ffn_one);

View File

@@ -3,7 +3,7 @@
########################################################################################################
from typing import Optional
import types, gc, os, time, re
import types, gc, os, time, re, platform
import torch
from torch.nn import functional as F
@@ -91,6 +91,7 @@ if os.environ.get("RWKV_CUDA_ON") == "1":
f"{current_path}/cuda/att_one.cu",
f"{current_path}/cuda/att_seq.cu",
f"{current_path}/cuda/ffn.cu",
f"{current_path}/cuda/att_one_v5.cu",
],
verbose=True,
extra_cuda_cflags=[
@@ -149,26 +150,40 @@ if os.environ.get("RWKV_CUDA_ON") == "1":
torch.ops.rwkv.mm8_one(N, M, x, w, mx, rx, my, ry, y)
return y.to(dtype=x.dtype)
else:
os.environ["RWKV_CUDA_ON"] = "0"
if os.environ.get("RWKV_CUDA_ON") == "1":
@MyStatic
def gemm(a, b, output_dtype: Optional[torch.dtype] = None):
if output_dtype is None:
output_dtype = a.dtype
if a.dtype == b.dtype == torch.float16 and a.device.type == "cuda":
assert len(b.shape) == 2
if len(a.shape) == 1:
assert len(b.shape) == 2
c = torch.empty((b.shape[-1],), dtype=output_dtype, device=a.device)
a = a.unsqueeze(0)
else:
c = torch.empty(
(a.shape[0], b.shape[-1]), dtype=output_dtype, device=a.device
)
assert len(a.shape) == len(b.shape)
assert len(a.shape) == 2 or len(a.shape) == 3
# torch.empty((*a.shape[:-1], b.shape[-1])) doesn't work with jit
if len(a.shape) == 2:
c = torch.empty(
(a.shape[0], b.shape[-1]), dtype=output_dtype, device=a.device
)
else:
c = torch.empty(
(a.shape[0], a.shape[1], b.shape[-1]),
dtype=output_dtype,
device=a.device,
)
torch.ops.rwkv.gemm_fp16_cublas(a, b, c)
return c
else:
return (a @ b).to(output_dtype)
else:
os.environ["RWKV_CUDA_ON"] = "0"
def gemm(a, b, output_dtype: Optional[torch.dtype] = None):
if output_dtype is None:
@@ -217,7 +232,7 @@ class RWKV(MyModule):
) # load model to CPU first
# it is supported to load a pure meta-tensor state dict (e.g. for quick testing)
for k, v in self.w.items():
if v.is_meta:
if isinstance(v, torch.Tensor) and v.is_meta:
# torch.zeros_like(v, device='cpu') doesn't produce an all-zero tensor
# if v is a meta tensor
self.w[k] = torch.zeros(v.shape, dtype=v.dtype, device="cpu")
@@ -247,9 +262,14 @@ class RWKV(MyModule):
args.n_embd = w["emb.weight"].shape[1]
args.n_layer = 0
keys = list(w.keys())
self.version = 4
for x in keys:
layer_id = int(x.split(".")[1]) if ("blocks." in x) else 0
args.n_layer = max(args.n_layer, layer_id + 1)
if "ln_x" in x:
self.version = 5
if self.version == 5 and "att.time_decay" in x:
args.n_head = w[x].shape[0]
####################### Compute strategy
@@ -352,6 +372,20 @@ class RWKV(MyModule):
del w["blocks.0.ln0.bias"]
print_need_newline = False
REAL_TIME_FIRST = False
for x in list(w.keys()):
if ".time_faaaa" in x:
REAL_TIME_FIRST = True
if REAL_TIME_FIRST:
w = {
k.replace(".time_faaaa", ".time_first")
if ".time_faaaa" in k
else k: v
for k, v in w.items()
}
self.w = w
keys = list(w.keys())
for x in keys:
w[x].requires_grad = False
@@ -382,8 +416,19 @@ class RWKV(MyModule):
w[x] = w[x].t()
if ".time_decay" in x: # need fp32 for this
w[x] = -torch.exp(w[x].float())
if self.version == 4:
w[x] = -torch.exp(w[x].float())
elif self.version == 5:
w[x] = torch.exp(-torch.exp(w[x].float())).reshape(-1, 1, 1)
elif ".time_first" in x: # need fp32 for this
if self.version == 4:
w[x] = w[x].float()
elif self.version == 5:
if REAL_TIME_FIRST:
w[x] = w[x].float().reshape(-1, 1, 1)
else:
w[x] = torch.exp(w[x].float()).reshape(-1, 1, 1)
elif ".ln_x" in x: # need fp32 for group_norm
w[x] = w[x].float()
else:
if (len(w[x].shape) == 2) and ("emb" not in x):
@@ -931,6 +976,147 @@ class RWKV(MyModule):
########################################################################################################
@MyFunction
def att_one_v5(
self,
x,
sx,
s,
ln_w,
ln_b,
lx_w,
lx_b,
k_mix,
v_mix,
r_mix,
t_decay,
t_first,
kw,
vw,
rw,
ow,
kmx,
krx,
kmy,
kry,
vmx,
vrx,
vmy,
vry,
rmx,
rrx,
rmy,
rry,
omx,
orx,
omy,
ory,
):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
H = t_decay.shape[0]
S = x.shape[-1] // H
r = gemm(rx, rw, output_dtype=torch.float32).view(H, 1, S)
k = gemm(kx, kw, output_dtype=torch.float32).view(H, S, 1)
v = gemm(vx, vw, output_dtype=torch.float32).view(H, 1, S)
a = gemm(k, v)
out = r @ (t_first * a + s)
s = a + t_decay * s
out = out.flatten()
out = F.group_norm(
out.unsqueeze(0), num_groups=H, weight=lx_w, bias=lx_b
).squeeze(0)
out = out.to(dtype=x.dtype)
out = gemm(out, ow)
return x + out, xx, s
@MyFunction
def att_seq_v5(
self,
x,
sx,
s,
ln_w,
ln_b,
lx_w,
lx_b,
k_mix,
v_mix,
r_mix,
t_decay,
t_first,
kw,
vw,
rw,
ow,
kmx,
krx,
kmy,
kry,
vmx,
vrx,
vmy,
vry,
rmx,
rrx,
rmy,
rry,
omx,
orx,
omy,
ory,
):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1, :]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
H = t_decay.shape[0]
S = x.shape[-1] // H
T = x.shape[0]
w = t_decay.reshape(-1, 1)
u = t_first.reshape(-1, 1)
ws = w.pow(T).reshape(H, 1, 1)
ind = torch.arange(T - 1, -1, -1, device=w.device).unsqueeze(0).repeat(H, 1)
w = w.repeat(1, T).pow(ind)
wk = w.reshape(H, 1, T)
wb = wk.transpose(-2, -1).flip(1)
w = torch.cat([w[:, 1:], u], dim=1)
w = F.pad(w, (0, T))
w = torch.tile(w, [T])
w = w[:, :-T].reshape(-1, T, 2 * T - 1)
w = w[:, :, T - 1 :].reshape(H, T, T)
r = gemm(rx, rw, output_dtype=torch.float32).view(T, H, S).transpose(0, 1)
k = (
gemm(kx, kw, output_dtype=torch.float32)
.view(T, H, S)
.transpose(0, 1)
.transpose(-2, -1)
)
v = gemm(vx, vw, output_dtype=torch.float32).view(T, H, S).transpose(0, 1)
out = ((r @ k) * w) @ v + (r @ s) * wb
s = ws * s + (k * wk) @ v
out = out.transpose(0, 1).contiguous().reshape(T, H * S)
out = F.group_norm(out, num_groups=H, weight=lx_w, bias=lx_b)
out = out.to(dtype=x.dtype)
out = gemm(out, ow)
return x + out, xx[-1, :], s
########################################################################################################
if os.environ["RWKV_CUDA_ON"] == "1":
@MyFunction
@@ -1140,7 +1326,7 @@ class RWKV(MyModule):
xx = torch.ops.rwkv.ffn_seq(
x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, buf, x_plus_out
)
return x_plus_out, xx[-1:]
return x_plus_out, xx[-1, :]
@MyFunction
def cuda_att_one_fp16(
@@ -1220,6 +1406,86 @@ class RWKV(MyModule):
)
return x_plus_out_t, xx, t1_t, t2_t, p_t
@MyFunction
def cuda_att_one_v5_fp16(
self,
x,
sx,
s,
ln_w,
ln_b,
lx_w,
lx_b,
k_mix,
v_mix,
r_mix,
t_decay,
t_first,
kw,
vw,
rw,
ow,
kmx,
krx,
kmy,
kry,
vmx,
vrx,
vmy,
vry,
rmx,
rrx,
rmy,
rry,
omx,
orx,
omy,
ory,
):
kx = torch.empty_like(x)
vx = torch.empty_like(x)
rx = torch.empty_like(x)
H = t_decay.shape[0]
S = x.shape[-1] // H
r = torch.empty((H * S,), dtype=torch.float32, device=x.device)
k = torch.empty((H * S,), dtype=torch.float32, device=x.device)
v = torch.empty((H * S,), dtype=torch.float32, device=x.device)
s1 = torch.empty((H, S, S), dtype=torch.float32, device=x.device)
s2 = torch.empty((H, S, S), dtype=torch.float32, device=x.device)
x_plus_out = torch.empty_like(x)
xx = torch.ops.rwkv.att_one_v5(
x,
sx,
s,
ln_w,
ln_b,
lx_w,
lx_b,
k_mix,
v_mix,
r_mix,
kw,
kx,
vw,
vx,
rw,
rx,
ow,
t_first,
k,
t_decay,
v,
r,
s1,
x_plus_out,
s2,
)
return x_plus_out, xx, s2
@MyFunction
def cuda_ffn_one_fp16(
self,
@@ -1265,34 +1531,63 @@ class RWKV(MyModule):
args = self.args
if state == None:
state = [None] * args.n_layer * 5
for i in range(
args.n_layer
): # state: 0=att_xx 1=att_aa 2=att_bb 3=att_pp 4=ffn_xx
dd = self.strategy[i]
dev = dd.device
atype = dd.atype
state[i * 5 + 0] = torch.zeros(
args.n_embd, dtype=atype, requires_grad=False, device=dev
).contiguous()
state[i * 5 + 1] = torch.zeros(
args.n_embd, dtype=torch.float, requires_grad=False, device=dev
).contiguous()
state[i * 5 + 2] = torch.zeros(
args.n_embd, dtype=torch.float, requires_grad=False, device=dev
).contiguous()
state[i * 5 + 3] = (
torch.zeros(
if self.version == 4:
state = [None] * args.n_layer * 5
for i in range(
args.n_layer
): # state: 0=att_xx 1=att_aa 2=att_bb 3=att_pp 4=ffn_xx
dd = self.strategy[i]
dev = dd.device
atype = dd.atype
state[i * 5 + 0] = torch.zeros(
args.n_embd, dtype=atype, requires_grad=False, device=dev
).contiguous()
state[i * 5 + 1] = torch.zeros(
args.n_embd,
dtype=torch.float,
requires_grad=False,
device=dev,
).contiguous()
- 1e30
)
state[i * 5 + 4] = torch.zeros(
args.n_embd, dtype=atype, requires_grad=False, device=dev
).contiguous()
state[i * 5 + 2] = torch.zeros(
args.n_embd,
dtype=torch.float,
requires_grad=False,
device=dev,
).contiguous()
state[i * 5 + 3] = (
torch.zeros(
args.n_embd,
dtype=torch.float,
requires_grad=False,
device=dev,
).contiguous()
- 1e30
)
state[i * 5 + 4] = torch.zeros(
args.n_embd, dtype=atype, requires_grad=False, device=dev
).contiguous()
elif self.version == 5:
state = [None] * args.n_layer * 3
for i in range(args.n_layer): # state: 0=att_xx 1=att_kv 2=ffn_xx
dd = self.strategy[i]
dev = dd.device
atype = dd.atype
state[i * 3 + 0] = torch.zeros(
args.n_embd, dtype=atype, requires_grad=False, device=dev
).contiguous()
state[i * 3 + 1] = torch.zeros(
(
args.n_head,
args.n_embd // args.n_head,
args.n_embd // args.n_head,
),
dtype=torch.float,
requires_grad=False,
device=dev,
).contiguous()
state[i * 3 + 2] = torch.zeros(
args.n_embd, dtype=atype, requires_grad=False, device=dev
).contiguous()
seq_mode = len(tokens) > 1
@@ -1317,9 +1612,13 @@ class RWKV(MyModule):
ATT = self.cuda_att_seq_i8
else:
ATT = self.cuda_att_seq_naive
if self.version == 5:
ATT = self.att_seq_v5
else:
ATT = self.att_one if wtype != torch.uint8 else self.att_one_i8
FFN = self.ffn_one if wtype != torch.uint8 else self.ffn_one_i8
if self.version == 5:
ATT = self.att_one_v5
if (
"cuda" in str(dev)
and os.environ["RWKV_CUDA_ON"] == "1"
@@ -1327,6 +1626,8 @@ class RWKV(MyModule):
):
ATT = self.cuda_att_one_fp16
FFN = self.cuda_ffn_one_fp16
if self.version == 5:
ATT = self.cuda_att_one_v5_fp16
x = x.to(dtype=atype, device=dev)
@@ -1355,46 +1656,82 @@ class RWKV(MyModule):
orx = w[f"{att}output.weight_rx"] if wtype == torch.uint8 else x
omy = w[f"{att}output.weight_my"] if wtype == torch.uint8 else x
ory = w[f"{att}output.weight_ry"] if wtype == torch.uint8 else x
(
x,
state[i * 5 + 0],
state[i * 5 + 1],
state[i * 5 + 2],
state[i * 5 + 3],
) = ATT(
x,
state[i * 5 + 0],
state[i * 5 + 1],
state[i * 5 + 2],
state[i * 5 + 3],
w[f"{bbb}ln1.weight"],
w[f"{bbb}ln1.bias"],
w[f"{att}time_mix_k"],
w[f"{att}time_mix_v"],
w[f"{att}time_mix_r"],
w[f"{att}time_decay"],
w[f"{att}time_first"],
kw,
vw,
rw,
ow,
kmx,
krx,
kmy,
kry,
vmx,
vrx,
vmy,
vry,
rmx,
rrx,
rmy,
rry,
omx,
orx,
omy,
ory,
)
if self.version == 4:
(
x,
state[i * 5 + 0],
state[i * 5 + 1],
state[i * 5 + 2],
state[i * 5 + 3],
) = ATT(
x,
state[i * 5 + 0],
state[i * 5 + 1],
state[i * 5 + 2],
state[i * 5 + 3],
w[f"{bbb}ln1.weight"],
w[f"{bbb}ln1.bias"],
w[f"{att}time_mix_k"],
w[f"{att}time_mix_v"],
w[f"{att}time_mix_r"],
w[f"{att}time_decay"],
w[f"{att}time_first"],
kw,
vw,
rw,
ow,
kmx,
krx,
kmy,
kry,
vmx,
vrx,
vmy,
vry,
rmx,
rrx,
rmy,
rry,
omx,
orx,
omy,
ory,
)
elif self.version == 5:
x, state[i * 3 + 0], state[i * 3 + 1] = ATT(
x,
state[i * 3 + 0],
state[i * 3 + 1],
w[f"{bbb}ln1.weight"],
w[f"{bbb}ln1.bias"],
w[f"{att}ln_x.weight"],
w[f"{att}ln_x.bias"],
w[f"{att}time_mix_k"],
w[f"{att}time_mix_v"],
w[f"{att}time_mix_r"],
w[f"{att}time_decay"],
w[f"{att}time_first"],
kw,
vw,
rw,
ow,
kmx,
krx,
kmy,
kry,
vmx,
vrx,
vmy,
vry,
rmx,
rrx,
rmy,
rry,
omx,
orx,
omy,
ory,
)
if dd.stream:
del kw, vw, rw, ow
@@ -1417,9 +1754,13 @@ class RWKV(MyModule):
rrx = w[f"{ffn}receptance.weight_rx"] if wtype == torch.uint8 else x
rmy = w[f"{ffn}receptance.weight_my"] if wtype == torch.uint8 else x
rry = w[f"{ffn}receptance.weight_ry"] if wtype == torch.uint8 else x
x, state[i * 5 + 4] = FFN(
if self.version == 4:
offset = i * 5 + 4
elif self.version == 5:
offset = i * 3 + 2
x, state[offset] = FFN(
x,
state[i * 5 + 4],
state[offset],
w[f"{bbb}ln2.weight"],
w[f"{bbb}ln2.bias"],
w[f"{ffn}time_mix_k"],

View File

@@ -1,6 +1,6 @@
For Mac and Linux users, please manually install Python 3.10 (usually the latest systems come with it built-in). You can specify the Python interpreter to use in Settings.
对于Mac和Linux用户请手动安装 Python3.10 (通常最新的系统已经内置了). 你可以在设置中指定使用的Python解释器.
MacおよびLinuxのユーザーの方は、Python3.10を手動でインストールしてください(通常、最新のシステムには既に組み込まれています)。 設定メニューで使用するPythonインタプリタを指定することができます。
For Mac and Linux users, please manually install Python 3.10 (usually the latest systems come with it built-in). You can specify the Python interpreter to use in Settings. (which python3)
对于Mac和Linux用户请手动安装 Python3.10 (通常最新的系统已经内置了). 你可以在设置中指定使用的Python解释器. (which python3)
MacおよびLinuxのユーザーの方は、Python3.10を手動でインストールしてください(通常、最新のシステムには既に組み込まれています)。 設定メニューで使用するPythonインタプリタを指定することができます。 (which python3)
Please execute this program in an empty directory. All related dependencies will be placed in this directory.
请将本程序放在一个空目录内执行, 所有相关依赖均会放置于此目录.

View File

@@ -1,3 +1,5 @@
echo $@
if [[ ${cnMirror} == 1 ]]; then
export PIP_INDEX_URL="https://pypi.tuna.tsinghua.edu.cn/simple"
if grep -q "mirrors.aliyun.com" /etc/apt/sources.list; then

View File

@@ -184,7 +184,7 @@ if __name__ == "__main__":
args.num_sanity_val_steps = 0
args.check_val_every_n_epoch = int(1e20)
args.log_every_n_steps = int(1e20)
args.max_epochs = -1 # continue forever
args.max_epochs = args.epoch_count # continue forever
args.betas = (args.beta1, args.beta2)
args.real_bsz = int(args.num_nodes) * int(args.devices) * args.micro_bsz
os.environ["RWKV_T_MAX"] = str(args.ctx_len)
@@ -373,7 +373,7 @@ if __name__ == "__main__":
for param in module.parameters():
param.requires_grad = True
elif enable_time_finetune and any(
n.startswith("time") for n, _ in module.named_parameters()
n.startswith("time") for n, _ in module.named_parameters()
):
for pname, param in module.named_parameters():
if pname.startswith("time"):
@@ -381,7 +381,7 @@ if __name__ == "__main__":
param.requires_grad = True
if (
len(args.load_model) == 0 or args.my_pile_stage == 1
len(args.load_model) == 0 or args.my_pile_stage == 1
): # shall we build the initial weights?
init_weight_name = f"{args.proj_dir}/rwkv-init.pth"
generate_init_weight(model, init_weight_name) # save initial weights
@@ -423,8 +423,8 @@ if __name__ == "__main__":
)
if (
args.lr_init > 1e-4
or trainer.world_size * args.micro_bsz * trainer.accumulate_grad_batches < 8
args.lr_init > 1e-4
or trainer.world_size * args.micro_bsz * trainer.accumulate_grad_batches < 8
):
if "I_KNOW_WHAT_IM_DOING" in os.environ:
if trainer.global_rank == 0:
@@ -459,10 +459,10 @@ if __name__ == "__main__":
if "deepspeed" in args.strategy:
trainer.strategy.config["zero_optimization"]["allgather_bucket_size"] = (
args.ds_bucket_mb * 1000 * 1000
args.ds_bucket_mb * 1000 * 1000
)
trainer.strategy.config["zero_optimization"]["reduce_bucket_size"] = (
args.ds_bucket_mb * 1000 * 1000
args.ds_bucket_mb * 1000 * 1000
)
# must set shuffle=False, persistent_workers=False (because worker is in another thread)

View File

@@ -178,7 +178,7 @@
"Failed to import. Please copy a preset to the clipboard.": "インポートに失敗しました。プリセットをクリップボードにコピーしてください。",
"Clipboard is empty.": "クリップボードが空です。",
"Successfully copied to clipboard.": "クリップボードにコピーしました。",
"Edit Messages": "メッセージの編集",
"Edit Character Settings": "キャラクター設定を編集",
"Go Back": "戻る",
"Description": "説明",
"Avatar Url": "アバターURL",
@@ -226,7 +226,7 @@
"Please select a LoRA model": "LoRAモデルを選択してください",
"You are using sample data for training. For formal training, please make sure to create your own jsonl file.": "トレーニングにはサンプルデータを使用しています。正式なトレーニングのためには、自身でjsonlファイルを作成してください。",
"WSL is not running, please retry. If it keeps happening, it means you may be using an outdated version of WSL, run \"wsl --update\" to update.": "WSLが実行されていません、もう一度試してください。これが続く場合、古いバージョンのWSLを使用している可能性があります。\"wsl --update\"を実行して更新してください。",
"Memory is not enough, try to increase the virtual memory or use a smaller base model.": "メモリが不足しています、仮想メモリを増やすか小さなベースモデルを使用してみてください。",
"Memory is not enough, try to increase the virtual memory (Swap of WSL) or use a smaller base model.": "メモリが不足しています、仮想メモリ (WSL Swap) を増やすか小さなベースモデルを使用してみてください。",
"VRAM is not enough": "ビデオRAMが不足しています",
"Training data is not enough, reduce context length or add more data for training": "トレーニングデータが不足しています、コンテキストの長さを減らすか、トレーニング用のデータをさらに追加してください",
"You are using WSL 1 for training, please upgrade to WSL 2. e.g. Run \"wsl --set-version Ubuntu-22.04 2\"": "トレーニングにWSL 1を使用しています、WSL 2にアップグレードしてください。例:\"wsl --set-version Ubuntu-22.04 2\"を実行する",
@@ -244,5 +244,14 @@
"Failed to load local sound font, please check if the files exist - assets/sound-font": "ローカルサウンドフォントの読み込みに失敗しました、ファイルが存在するか確認してください - assets/sound-font",
"Please convert model to safe tensors format first": "モデルを安全なテンソル形式に変換してください",
"Convert To Safe Tensors Format": "安全なテンソル形式に変換",
"Please change Strategy to WebGPU to use safetensors format": "StrategyをWebGPUに変更して、安全なテンソル形式を使用してください"
"Please change Strategy to WebGPU to use safetensors format": "StrategyをWebGPUに変更して、安全なテンソル形式を使用してください",
"Preview Only": "プレビューのみ",
"RAM": "RAM",
"VRAM": "VRAM",
"GPU Usage": "GPU使用率",
"Use Custom Tokenizer": "カスタムトークナイザーを使用する",
"Tokenizer Path (e.g. backend-python/rwkv_pip/20B_tokenizer.json)": "トークナイザーパス (例: backend-python/rwkv_pip/20B_tokenizer.json)",
"User Name": "ユーザー名",
"Assistant Name": "アシスタント名",
"Insert default system prompt at the beginning": "最初にデフォルトのシステムプロンプトを挿入"
}

View File

@@ -178,7 +178,7 @@
"Failed to import. Please copy a preset to the clipboard.": "导入失败。请复制一个预设到剪贴板",
"Clipboard is empty.": "剪贴板没有内容",
"Successfully copied to clipboard.": "成功复制到剪贴板",
"Edit Messages": "编辑对话",
"Edit Character Settings": "编辑人设",
"Go Back": "返回",
"Description": "描述",
"Avatar Url": "头像图片地址",
@@ -226,7 +226,7 @@
"Please select a LoRA model": "请选择一个LoRA模型",
"You are using sample data for training. For formal training, please make sure to create your own jsonl file.": "你正在使用示例数据训练对于正式训练场合请务必创建你自己的jsonl训练数据",
"WSL is not running, please retry. If it keeps happening, it means you may be using an outdated version of WSL, run \"wsl --update\" to update.": "WSL没有运行请重试。如果一直出现此错误意味着你可能正在使用旧版本的WSL请在cmd执行\"wsl --update\"以更新",
"Memory is not enough, try to increase the virtual memory or use a smaller base model.": "内存不足,尝试增加虚拟内存,或使用一个更小规模的基底模型",
"Memory is not enough, try to increase the virtual memory (Swap of WSL) or use a smaller base model.": "内存不足,尝试增加虚拟内存(WSL Swap),或使用一个更小规模的基底模型",
"VRAM is not enough": "显存不足",
"Training data is not enough, reduce context length or add more data for training": "训练数据不足,请减小上下文长度或增加训练数据",
"You are using WSL 1 for training, please upgrade to WSL 2. e.g. Run \"wsl --set-version Ubuntu-22.04 2\"": "你正在使用WSL 1进行训练请升级到WSL 2。例如运行\"wsl --set-version Ubuntu-22.04 2\"",
@@ -244,5 +244,14 @@
"Failed to load local sound font, please check if the files exist - assets/sound-font": "加载本地音色资源失败,请检查文件是否存在 - assets/sound-font",
"Please convert model to safe tensors format first": "请先将模型转换为Safetensors格式",
"Convert To Safe Tensors Format": "转换为Safetensors格式",
"Please change Strategy to WebGPU to use safetensors format": "请将Strategy改为WebGPU以使用safetensors格式"
"Please change Strategy to WebGPU to use safetensors format": "请将Strategy改为WebGPU以使用safetensors格式",
"Preview Only": "仅预览",
"RAM": "内存",
"VRAM": "显存",
"GPU Usage": "GPU占用",
"Use Custom Tokenizer": "使用自定义Tokenizer",
"Tokenizer Path (e.g. backend-python/rwkv_pip/20B_tokenizer.json)": "Tokenizer路径 (例如: backend-python/rwkv_pip/20B_tokenizer.json)",
"User Name": "用户名称",
"Assistant Name": "AI名称",
"Insert default system prompt at the beginning": "在开头自动插入默认系统提示"
}

View File

@@ -186,6 +186,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
switchModel({
model: modelPath,
strategy: strategy,
tokenizer: modelConfig.modelParameters.useCustomTokenizer ? modelConfig.modelParameters.customTokenizer : undefined,
customCuda: customCudaFile !== ''
}).then(async (r) => {
if (r.ok) {

View File

@@ -312,7 +312,10 @@ const ChatPanel: FC = observer(() => {
stream: true,
model: commonStore.settings.apiChatModelName, // 'gpt-3.5-turbo'
temperature: apiParams.temperature,
top_p: apiParams.topP
top_p: apiParams.topP,
user_name: commonStore.activePreset?.userName,
assistant_name: commonStore.activePreset?.assistantName,
presystem: commonStore.activePreset?.presystem
}),
signal: chatSseController?.signal,
onmessage(e) {

View File

@@ -1,6 +1,19 @@
import { Dropdown, Input, Label, Option, Select, Switch, Text } from '@fluentui/react-components';
import {
Accordion,
AccordionHeader,
AccordionItem,
AccordionPanel,
Checkbox,
Dropdown,
Input,
Label,
Option,
Select,
Switch,
Text
} from '@fluentui/react-components';
import { AddCircle20Regular, DataUsageSettings20Regular, Delete20Regular, Save20Regular } from '@fluentui/react-icons';
import React, { FC } from 'react';
import React, { FC, useEffect, useRef } from 'react';
import { Section } from '../components/Section';
import { Labeled } from '../components/Labeled';
import { ToolTipButton } from '../components/ToolTipButton';
@@ -43,6 +56,8 @@ export type ModelParameters = {
maxStoredLayers: number;
useCustomCuda?: boolean;
customStrategy?: string;
useCustomTokenizer?: boolean;
customTokenizer?: string;
}
export type ModelConfig = {
@@ -57,10 +72,16 @@ export const Configs: FC = observer(() => {
const [selectedIndex, setSelectedIndex] = React.useState(commonStore.currentModelConfigIndex);
const [selectedConfig, setSelectedConfig] = React.useState(commonStore.modelConfigs[selectedIndex]);
const [displayStrategyImg, setDisplayStrategyImg] = React.useState(false);
const advancedHeaderRef = useRef<HTMLDivElement>(null);
const mq = useMediaQuery('(min-width: 640px)');
const navigate = useNavigate();
const port = selectedConfig.apiParameters.apiPort;
useEffect(() => {
if (advancedHeaderRef.current)
(advancedHeaderRef.current.firstElementChild as HTMLElement).style.padding = '0';
}, []);
const updateSelectedIndex = (newIndex: number) => {
setSelectedIndex(newIndex);
setSelectedConfig(commonStore.modelConfigs[newIndex]);
@@ -130,7 +151,7 @@ export const Configs: FC = observer(() => {
setSelectedIndex(0);
setSelectedConfig(commonStore.modelConfigs[0]);
}} />
<ToolTipButton desc={mq ? '' : t('Save Config')} icon={<Save20Regular />} text={mq ? t('Save Config') : ''}
<ToolTipButton desc={mq ? '' : t('Save Config')} icon={<Save20Regular />} text={mq ? t('Save Config') : null}
onClick={onClickSave} />
</div>
<div className="flex items-center gap-4">
@@ -412,6 +433,40 @@ export const Configs: FC = observer(() => {
}} />
} />
}
{selectedConfig.modelParameters.device !== 'WebGPU' &&
<Accordion className="sm:col-span-2" collapsible
openItems={!commonStore.modelParamsCollapsed && 'advanced'}
onToggle={(e, data) => {
if (data.value === 'advanced')
commonStore.setModelParamsCollapsed(!commonStore.modelParamsCollapsed);
}}>
<AccordionItem value="advanced">
<AccordionHeader ref={advancedHeaderRef} size="small">{t('Advanced')}</AccordionHeader>
<AccordionPanel>
<div className="flex flex-col">
<div className="flex grow">
<Checkbox className="select-none"
size="large" label={t('Use Custom Tokenizer')}
checked={selectedConfig.modelParameters.useCustomTokenizer}
onChange={(_, data) => {
setSelectedConfigModelParams({
useCustomTokenizer: data.checked as boolean
});
}} />
<Input className="grow"
placeholder={t('Tokenizer Path (e.g. backend-python/rwkv_pip/20B_tokenizer.json)')!}
value={selectedConfig.modelParameters.customTokenizer}
onChange={(e, data) => {
setSelectedConfigModelParams({
customTokenizer: data.value
});
}} />
</div>
</div>
</AccordionPanel>
</AccordionItem>
</Accordion>
}
</div>
}
/>

View File

@@ -1,10 +1,10 @@
import React, { FC } from 'react';
import React, { FC, useEffect } from 'react';
import { useTranslation } from 'react-i18next';
import { Page } from '../components/Page';
import { observer } from 'mobx-react-lite';
import commonStore from '../stores/commonStore';
import { Divider, Field, ProgressBar } from '@fluentui/react-components';
import { bytesToGb, bytesToKb, bytesToMb } from '../utils';
import { bytesToGb, bytesToKb, bytesToMb, refreshLocalModels } from '../utils';
import { ToolTipButton } from '../components/ToolTipButton';
import { Folder20Regular, Pause20Regular, Play20Regular } from '@fluentui/react-icons';
import { AddToDownloadList, OpenFileFolder, PauseDownload } from '../../wailsjs/go/backend_golang/App';
@@ -23,6 +23,12 @@ export type DownloadStatus = {
export const Downloads: FC = observer(() => {
const { t } = useTranslation();
const finishedModelsLen = commonStore.downloadList.filter((status) => status.done && status.name.endsWith('.pth')).length;
useEffect(() => {
if (finishedModelsLen > 0)
refreshLocalModels({ models: commonStore.modelSourceList }, false);
console.log('finishedModelsLen:', finishedModelsLen);
}, [finishedModelsLen]);
let displayList = commonStore.downloadList.slice();
const downloadListNames = displayList.map(s => s.name);

View File

@@ -56,6 +56,9 @@ export type Preset = {
stop: string,
injectStart: string,
injectEnd: string,
presystem?: boolean,
userName?: string,
assistantName?: string
}
export const defaultPreset: Preset = {
@@ -250,14 +253,41 @@ export const ChatPresetEditor: FC<{
}} />
<Button onClick={() => {
setEditingMessages(!editingMessages);
}}>{!editingMessages ? t('Edit Messages') : t('Go Back')}</Button>
}}>{!editingMessages ? t('Edit Character Settings') : t('Go Back')}</Button>
</div>
} />
{
editingMessages ?
<MessagesEditor /> :
<div className="flex flex-col gap-1">
<Labeled flex spaceBetween label={t('Insert default system prompt at the beginning')}
content={
<Switch checked={editingPreset.presystem === undefined ? true : editingPreset.presystem}
onChange={(e, data) => {
setEditingPreset({
presystem: data.checked
});
}} />
} />
<Labeled flex breakline label={t('User Name')}
content={
<Input placeholder="User" value={editingPreset.userName} onChange={(e, data) => {
setEditingPreset({
userName: data.value
});
}} />
} />
<Labeled flex breakline label={t('Assistant Name')}
content={
<Input placeholder="Assistant" value={editingPreset.assistantName} onChange={(e, data) => {
setEditingPreset({
assistantName: data.value
});
}} />
} />
<MessagesEditor />
</div> :
<div className="flex flex-col gap-1 p-2 overflow-x-hidden overflow-y-auto">
<Labeled flex breakline label={t('Description')}
<Labeled flex breakline label={`${t('Description')} (${t('Preview Only')})`}
content={
<Input value={editingPreset.desc} onChange={(e, data) => {
setEditingPreset({

View File

@@ -154,7 +154,7 @@ const showError = (e: any) => {
};
const errorsMap = Object.entries({
'python3 ./finetune/lora/train.py': 'Memory is not enough, try to increase the virtual memory or use a smaller base model.',
'python3 ./finetune/lora/train.py': 'Memory is not enough, try to increase the virtual memory (Swap of WSL) or use a smaller base model.',
'cuda out of memory': 'VRAM is not enough',
'valueerror: high <= 0': 'Training data is not enough, reduce context length or add more data for training',
'+= \'+ptx\'': 'You are using WSL 1 for training, please upgrade to WSL 2. e.g. Run "wsl --set-version Ubuntu-22.04 2"',
@@ -219,7 +219,7 @@ const Terminal: FC = observer(() => {
WslStart().then(() => {
addWslMessage('WSL> ' + input);
setInput('');
WslCommand(input).catch(showError);
WslCommand(input).then(WindowShow).catch(showError);
}).catch(showError);
}
};

View File

@@ -2,11 +2,12 @@ import commonStore, { Platform } from './stores/commonStore';
import { GetPlatform, ListDirFiles, ReadJson } from '../wailsjs/go/backend_golang/App';
import { Cache, checkUpdate, downloadProgramFiles, LocalConfig, refreshLocalModels, refreshModels } from './utils';
import { getStatus } from './apis';
import { EventsOn } from '../wailsjs/runtime';
import { EventsOn, WindowSetTitle } from '../wailsjs/runtime';
import manifest from '../../manifest.json';
import { defaultModelConfigs, defaultModelConfigsMac } from './pages/defaultConfigs';
import { Preset } from './pages/PresetsManager/PresetsButton';
import { wslHandler } from './pages/Train';
import { t } from 'i18next';
export async function startup() {
downloadProgramFiles();
@@ -23,6 +24,8 @@ export async function startup() {
initPresets();
initHardwareMonitor();
await GetPlatform().then(p => commonStore.setPlatform(p as Platform));
await initConfig();
@@ -117,3 +120,20 @@ async function initLocalModelsNotify() {
refreshLocalModels({ models: commonStore.modelSourceList }, false); //TODO fix bug that only add models
});
}
type monitorData = {
usedMemory: number;
totalMemory: number;
gpuUsage: number;
gpuPower: number;
usedVram: number;
totalVram: number;
}
async function initHardwareMonitor() {
EventsOn('monitor', (data: string) => {
const results: monitorData = JSON.parse(data);
if (results)
WindowSetTitle(`RWKV-Runner (${t('RAM')}: ${results.usedMemory.toFixed(1)}/${results.totalMemory.toFixed(1)} GB, ${t('VRAM')}: ${(results.usedVram / 1024).toFixed(1)}/${(results.totalVram / 1024).toFixed(1)} GB, ${t('GPU Usage')}: ${results.gpuUsage}%)`);
});
}

View File

@@ -74,6 +74,7 @@ class CommonStore {
// configs
currentModelConfigIndex: number = 0;
modelConfigs: ModelConfig[] = [];
modelParamsCollapsed: boolean = true;
// models
modelSourceManifestList: string = 'https://cdn.jsdelivr.net/gh/josstorer/RWKV-Runner@master/manifest.json;';
modelSourceList: ModelSourceItem[] = [];
@@ -259,6 +260,10 @@ class CommonStore {
this.advancedCollapsed = value;
}
setModelParamsCollapsed(value: boolean) {
this.modelParamsCollapsed = value;
}
setLastUnfinishedModelDownloads(value: DownloadStatus[]) {
this.lastUnfinishedModelDownloads = value;
}

11
go.mod
View File

@@ -4,15 +4,16 @@ go 1.20
require (
github.com/cavaliergopher/grab/v3 v3.0.1
github.com/fsnotify/fsnotify v1.6.0
github.com/minio/selfupdate v0.6.0
github.com/nyaosorg/go-windows-su v0.2.1
github.com/ubuntu/gowsl v0.0.0-20230615094051-94945650cc1e
github.com/wailsapp/wails/v2 v2.5.1
github.com/wailsapp/wails/v2 v2.6.0
)
require (
aead.dev/minisign v0.2.0 // indirect
github.com/bep/debounce v1.2.1 // indirect
github.com/fsnotify/fsnotify v1.6.0
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/jchv/go-winloader v0.0.0-20210711035445-715c2860da7e // indirect
@@ -22,8 +23,7 @@ require (
github.com/leaanthony/gosod v1.0.3 // indirect
github.com/leaanthony/slicer v1.6.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/nyaosorg/go-windows-su v0.2.1
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
@@ -33,9 +33,10 @@ require (
github.com/ubuntu/decorate v0.0.0-20230125165522-2d5b0a9bb117 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.2 // indirect
github.com/wailsapp/go-webview2 v1.0.1 // indirect
github.com/wailsapp/mimetype v1.4.1 // indirect
golang.org/x/crypto v0.9.0 // indirect
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sys v0.9.0 // indirect
golang.org/x/text v0.9.0 // indirect

14
go.sum
View File

@@ -36,8 +36,8 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/minio/selfupdate v0.6.0 h1:i76PgT0K5xO9+hjzKcacQtO7+MjJ4JKA8Ak8XQ9DDwU=
github.com/minio/selfupdate v0.6.0/go.mod h1:bO02GTIPCMQFTEvE5h4DjYB58bCoZ35XLeBf0buTDdM=
github.com/nyaosorg/go-windows-su v0.2.1 h1:5V0XavLyjOqPUp7psxxCvBISaneU4XmFPSMlejSl5sc=
@@ -69,17 +69,19 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/wailsapp/go-webview2 v1.0.1 h1:dEJIeEApW/MhO2tTMISZBFZPuW7kwrFA1NtgFB1z1II=
github.com/wailsapp/go-webview2 v1.0.1/go.mod h1:Uk2BePfCRzttBBjFrBmqKGJd41P6QIHeV9kTgIeOZNo=
github.com/wailsapp/mimetype v1.4.1 h1:pQN9ycO7uo4vsUUuPeHEYoUkLVkaRntMnHJxVwYhwHs=
github.com/wailsapp/mimetype v1.4.1/go.mod h1:9aV5k31bBOv5z6u+QP8TltzvNGJPmNJD4XlAL3U+j3o=
github.com/wailsapp/wails/v2 v2.5.1 h1:mfG+2kWqQXYOwdgI43HEILjOZDXbk5woPYI3jP2b+js=
github.com/wailsapp/wails/v2 v2.5.1/go.mod h1:jbOZbcr/zm79PxXxAjP8UoVlDd9wLW3uDs+isIthDfs=
github.com/wailsapp/wails/v2 v2.6.0 h1:EyH0zR/EO6dDiqNy8qU5spaXDfkluiq77xrkabPYD4c=
github.com/wailsapp/wails/v2 v2.6.0/go.mod h1:WBG9KKWuw0FKfoepBrr/vRlyTmHaMibWesK3yz6nNiM=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=

View File

@@ -61,6 +61,9 @@ var midi embed.FS
//go:embed assets/sound-font
var midiAssets embed.FS
//go:embed components
var components embed.FS
func main() {
if buildInfo, ok := debug.ReadBuildInfo(); !ok || strings.Contains(buildInfo.String(), "-ldflags") {
backend.CopyEmbed(cyac)
@@ -70,6 +73,7 @@ func main() {
backend.CopyEmbed(finetune)
backend.CopyEmbed(midi)
backend.CopyEmbed(midiAssets)
backend.CopyEmbed(components)
}
// Create an instance of the app structure
@@ -94,6 +98,7 @@ func main() {
Height: 680,
MinWidth: 375,
MinHeight: 640,
EnableDefaultContextMenu: true,
Windows: &windows.Options{
ZoomFactor: zoomFactor,
IsZoomControlEnabled: true,
@@ -102,7 +107,8 @@ func main() {
Assets: assets,
Handler: NewFileLoader(),
},
OnStartup: app.OnStartup,
OnStartup: app.OnStartup,
OnBeforeClose: app.OnBeforeClose,
Bind: []any{
app,
},

View File

@@ -1,5 +1,5 @@
{
"version": "1.4.3",
"version": "1.4.5",
"introduction": {
"en": "RWKV is an open-source, commercially usable large language model with high flexibility and great potential for development.\n### About This Tool\nThis tool aims to lower the barrier of entry for using large language models, making it accessible to everyone. It provides fully automated dependency and model management. You simply need to click and run, following the instructions, to deploy a local large language model. The tool itself is very compact and only requires a single executable file for one-click deployment.\nAdditionally, this tool offers an interface that is fully compatible with the OpenAI API. This means you can use any ChatGPT client as a client for RWKV, enabling capability expansion beyond just chat functionality.\n### Preset Configuration Rules at the Bottom\nThis tool comes with a series of preset configurations to reduce complexity. The naming rules for each configuration represent the following in order: device - required VRAM/memory - model size - model language.\nFor example, \"GPU-8G-3B-EN\" indicates that this configuration is for a graphics card with 8GB of VRAM, a model size of 3 billion parameters, and it uses an English language model.\nLarger model sizes have higher performance and VRAM requirements. Among configurations with the same model size, those with higher VRAM usage will have faster runtime.\nFor example, if you have 12GB of VRAM but running the \"GPU-12G-7B-EN\" configuration is slow, you can downgrade to \"GPU-8G-3B-EN\" for a significant speed improvement.\n### About RWKV\nRWKV is an RNN with Transformer-level LLM performance, which can also be directly trained like a GPT transformer (parallelizable). And it's 100% attention-free. You only need the hidden state at position t to compute the state at position t+1. You can use the \"GPT\" mode to quickly compute the hidden state for the \"RNN\" mode.<br/>So it's combining the best of RNN and transformer - great performance, fast inference, saves VRAM, fast training, \"infinite\" ctx_len, and free sentence embedding (using the final hidden state).",
"zh": "RWKV是一个开源且允许商用的大语言模型灵活性很高且极具发展潜力。\n### 关于本工具\n本工具旨在降低大语言模型的使用门槛做到人人可用本工具提供了全自动化的依赖和模型管理你只需要直接点击运行跟随引导即可完成本地大语言模型的部署工具本身体积极小只需要一个exe即可完成一键部署。\n此外本工具提供了与OpenAI API完全兼容的接口这意味着你可以把任意ChatGPT客户端用作RWKV的客户端实现能力拓展而不局限于聊天。\n### 底部的预设配置规则\n本工具内置了一系列预设配置以降低使用难度每个配置名的规则依次代表着设备-所需显存/内存-模型规模-模型语言。\n例如GPU-8G-3B-CN表示该配置用于显卡需要8G显存模型规模为30亿参数使用的是中文模型。\n模型规模越大性能要求越高显存要求也越高而同样模型规模的配置中显存占用越高的运行速度越快。\n例如当你有12G显存但运行GPU-12G-7B-CN配置速度比较慢可降级成GPU-8G-3B-CN将会大幅提速。\n### 关于RWKV\nRWKV是具有Transformer级别LLM性能的RNN也可以像GPT Transformer一样直接进行训练可并行化。而且它是100% attention-free的。你只需在位置t处获得隐藏状态即可计算位置t + 1处的状态。你可以使用“GPT”模式快速计算用于“RNN”模式的隐藏状态。\n因此它将RNN和Transformer的优点结合起来 - 高性能、快速推理、节省显存、快速训练、“无限”上下文长度以及免费的语句嵌入(使用最终隐藏状态)。"
@@ -301,6 +301,58 @@
"url": "https://huggingface.co/BlinkDL/rwkv-4-world/blob/main/RWKV-4-World-7B-v1-20230626-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-7B-v1-20230626-ctx4096.pth"
},
{
"name": "RWKV-claude-4-World-7B-20230805-ctx65k.pth",
"desc": {
"en": "Global Languages 7B v1 Ctx65k Claude Like",
"zh": "全球语言 7B v1 65k上下文 Claude功能",
"ja": "グローバル言語 7B v1 65kコンテキスト Claude機能"
},
"size": 15035391533,
"SHA256": "8cd25f8a1ab58965993cc47b3b2f99585836eed008a2e44526c258189ea751a6",
"lastUpdated": "2023-08-05T08:52:20",
"url": "https://huggingface.co/xiaol/RWKV-claude-4-World-7B-65k/blob/main/RWKV-claude-4-World-7B-20230805-ctx65k.pth",
"downloadUrl": "https://huggingface.co/xiaol/RWKV-claude-4-World-7B-65k/resolve/main/RWKV-claude-4-World-7B-20230805-ctx65k.pth"
},
{
"name": "RWKV-toolformer-translation-japanese-chinese-english-7B-World-20230815-ctx128k.pth",
"desc": {
"en": "Global Languages 7B v1 Ctx128k Toolformer",
"zh": "全球语言 7B v1 128k上下文 Toolformer",
"ja": "グローバル言語 7B v1 128kコンテキスト Toolformer"
},
"size": 15035391533,
"SHA256": "648a3b21055bdab77021ce278da80fbada8dcaae0b3d41d1eca9aa194c1fd25f",
"lastUpdated": "2023-08-15T07:18:23",
"url": "https://huggingface.co/xiaol/RWKV-toolformer-translation-japanese-chinese-english-7B-World-128k/blob/main/RWKV-toolformer-translation-japanese-chinese-english-7B-World-20230815-ctx128k.pth",
"downloadUrl": "https://huggingface.co/xiaol/RWKV-toolformer-translation-japanese-chinese-english-7B-World-128k/resolve/main/RWKV-toolformer-translation-japanese-chinese-english-7B-World-20230815-ctx128k.pth"
},
{
"name": "RWKV-code-4-World-7B-20230820-ctx32k.pth",
"desc": {
"en": "Global Languages 7B v1 Ctx32k Code Ability",
"zh": "全球语言 7B v1 32k上下文 代码能力",
"ja": "グローバル言語 7B v1 32kコンテキスト コード能力"
},
"size": 15035391533,
"SHA256": "19666620437ae3a5fb06e16a52729d67e449fca155fab3d5861ffe9ecf247404",
"lastUpdated": "2023-08-20T05:00:17",
"url": "https://huggingface.co/xiaol/RWKV-Code-7B-world-32k/blob/main/RWKV-code-4-World-7B-20230820-ctx32k.pth",
"downloadUrl": "https://huggingface.co/xiaol/RWKV-Code-7B-world-32k/resolve/main/RWKV-code-4-World-7B-20230820-ctx32k.pth"
},
{
"name": "wizard-rwkv-4-world-ctx32k.pth",
"desc": {
"en": "Global Languages 7B v1 Ctx32k Wikipedia",
"zh": "全球语言 7B v1 32k上下文 维基百科",
"ja": "グローバル言語 7B v1 32kコンテキスト ウィキペディア"
},
"size": 15035391538,
"SHA256": "c5d991f315a1676d4bed93dd91f803b1376096e7a4af5bf72b339d055f53bac7",
"lastUpdated": "2023-07-29T03:21:47",
"url": "https://huggingface.co/xiaol/wizard-rwkv-world-7B-ctx32k/blob/main/wizard-rwkv-4-world-ctx32k.pth",
"downloadUrl": "https://huggingface.co/xiaol/wizard-rwkv-world-7B-ctx32k/resolve/main/wizard-rwkv-4-world-ctx32k.pth"
},
{
"name": "RWKV-4-World-CHNtuned-7B-v1-20230709-ctx4096.pth",
"desc": {
@@ -327,6 +379,45 @@
"url": "https://huggingface.co/xiaol/readflow-rwkv-4-world-ctx32k/blob/main/Readflow-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k.pth",
"downloadUrl": "https://huggingface.co/xiaol/readflow-rwkv-4-world-ctx32k/resolve/main/Readflow-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k.pth"
},
{
"name": "novel-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k.pth",
"desc": {
"en": "Global Languages 7B v1 Enhanced Chinese Ctx32k Novel Outline Ability",
"zh": "全球语言 7B v1 中文增强 32k上下文 小说大纲扩写",
"ja": "グローバル言語 7B v1 中国語強化 32kコンテキスト 小説のあらすじを書く"
},
"size": 15035391538,
"SHA256": "0fe2415ce61af52a8c38c071b475c01b4c9f8a4f2b4aaed6181f0334f3faf7f4",
"lastUpdated": "2023-07-28T13:30:59",
"url": "https://huggingface.co/xiaol/ruotangwx-rwkv-7b-novel-32k/blob/main/novel-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k.pth",
"downloadUrl": "https://huggingface.co/xiaol/ruotangwx-rwkv-7b-novel-32k/resolve/main/novel-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k.pth"
},
{
"name": "chatgal-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k-1000.pth",
"desc": {
"en": "Global Languages 7B v1 Enhanced Chinese Ctx32k GalGame 1000",
"zh": "全球语言 7B v1 中文增强 32k上下文 GalGame 1000",
"ja": "グローバル言語 7B v1 中国語強化 32kコンテキスト GalGame 1000"
},
"size": 15035391543,
"SHA256": "aaed29cfd1bddee47c48f564aa800eb001f62fd03290d772647d5678e40d66e8",
"lastUpdated": "2023-07-21T08:59:18",
"url": "https://huggingface.co/xiaol/chatgal-rwkv-7b-world-32k/blob/main/chatgal-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k-1000.pth",
"downloadUrl": "https://huggingface.co/xiaol/chatgal-rwkv-7b-world-32k/resolve/main/chatgal-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k-1000.pth"
},
{
"name": "chatgal-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k-500.pth",
"desc": {
"en": "Global Languages 7B v1 Enhanced Chinese Ctx32k GalGame 500",
"zh": "全球语言 7B v1 中文增强 32k上下文 GalGame 500",
"ja": "グローバル言語 7B v1 中国語強化 32kコンテキスト GalGame 500"
},
"size": 15035391538,
"SHA256": "b5d347d5dedb4f398ec31489ab87b75b1dee772ae7d0a34c26635cf5d95c8794",
"lastUpdated": "2023-07-21T07:31:05",
"url": "https://huggingface.co/xiaol/chatgal-rwkv-7b-world-32k/blob/main/chatgal-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k-500.pth",
"downloadUrl": "https://huggingface.co/xiaol/chatgal-rwkv-7b-world-32k/resolve/main/chatgal-RWKV-4-World-CHNtuned-7B-v1-20230709-ctx32k-500.pth"
},
{
"name": "RWKV-4-World-JPNtuned-7B-v1-20230718-ctx4096.pth",
"desc": {
@@ -340,6 +431,19 @@
"url": "https://huggingface.co/BlinkDL/rwkv-4-world/blob/main/RWKV-4-World-JPNtuned-7B-v1-20230718-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-JPNtuned-7B-v1-20230718-ctx4096.pth"
},
{
"name": "RWKV-novel-4-World-7B-20230810-ctx128k.pth",
"desc": {
"en": "Global Languages Writer 7B v1 Ctx128k",
"zh": "全球语言写作 7B v1 128k上下文",
"ja": "グローバル言語ライター 7B v1 128kコンテキスト"
},
"size": 15035391533,
"SHA256": "5e429c49e4cab2f29a93f87a80635422c8710d70e5b1d962c078e47d957389c8",
"lastUpdated": "2023-08-10T06:30:32",
"url": "https://huggingface.co/xiaol/rwkv-7B-world-novel-128k/blob/main/RWKV-novel-4-World-7B-20230810-ctx128k.pth",
"downloadUrl": "https://huggingface.co/xiaol/rwkv-7B-world-novel-128k/resolve/main/RWKV-novel-4-World-7B-20230810-ctx128k.pth"
},
{
"name": "RWKV-4-Novel-7B-v1-ChnEng-ChnPro-20230410-ctx4096.pth",
"desc": {