Compare commits

...

32 Commits

Author SHA1 Message Date
josc146
70f2271b94 release v1.2.8 2023-06-21 23:12:17 +08:00
josc146
15cd689741 adjust MoreUtilsButton 2023-06-21 23:11:22 +08:00
josc146
82a68593bb exact avoidOverflow 2023-06-21 23:08:34 +08:00
github-actions[bot]
21910af96a release v1.2.7 2023-06-21 14:09:27 +00:00
josc146
412a0fe135 release v1.2.7 2023-06-21 22:08:57 +08:00
josc146
cf0972ba52 avoid overflow 2023-06-21 22:02:42 +08:00
josc146
3fe9ef4546 chore 2023-06-21 22:00:29 +08:00
josc146
4cd5a56070 add more chat utils (retry, edit, delete) 2023-06-21 21:20:21 +08:00
josc146
35a7437714 chore 2023-06-21 17:13:04 +08:00
josc146
131a7ddf4a fix the prompt cache that contains potential error 2023-06-21 16:07:16 +08:00
josc146
1465908574 update SupportedCustomCuda 2023-06-21 13:48:09 +08:00
josc146
3eb10f08bb rename 100+ Languages to Global Languages 2023-06-21 12:44:49 +08:00
josc146
b20990d380 when precision is fp32, disable customCuda 2023-06-21 12:14:11 +08:00
josc146
1a5bf4a95e improve InstallPyDep for non-english path 2023-06-21 12:08:04 +08:00
github-actions[bot]
3d123524e7 release v1.2.6 2023-06-20 16:59:55 +00:00
josc146
25a41e51b3 release v1.2.6 2023-06-21 00:46:57 +08:00
josc146
f998ff239a add chat and completion error messages 2023-06-21 00:26:50 +08:00
josc146
bae9ae6551 allow custom api url, key, model 2023-06-20 23:24:51 +08:00
josc146
285e8b1577 add DPI Scaling setting 2023-06-20 22:22:14 +08:00
josc146
ce915cdf6a chore 2023-06-20 22:18:45 +08:00
github-actions[bot]
84317a03e8 release v1.2.5 2023-06-20 09:02:57 +00:00
josc146
ac1fa09604 release v1.2.5 2023-06-20 17:02:28 +08:00
josc146
43bc08648d update manifest 2023-06-20 16:07:52 +08:00
josc146
e93c77394d add usage 2023-06-20 15:55:52 +08:00
josc146
4b2509e643 chore 2023-06-20 15:34:34 +08:00
josc146
14fbb437ff embeddings api example 2023-06-20 00:30:49 +08:00
josc146
8963543159 embeddings api compatible with openai api and langchain(sdk) 2023-06-19 22:51:06 +08:00
josc146
377f71b16b type 2023-06-19 22:32:02 +08:00
josc146
d32351c130 exact model name 2023-06-19 22:30:49 +08:00
josc146
967be6f88f refactor completions api 2023-06-18 20:16:52 +08:00
josc146
fcdda71b46 typo 2023-06-17 19:32:47 +08:00
github-actions[bot]
138251932c release v1.2.4 2023-06-15 16:37:43 +00:00
30 changed files with 1140 additions and 459 deletions

View File

@@ -77,6 +77,7 @@ jobs:
with:
go-version: '1.20.5'
- run: |
sudo apt-get update
sudo apt-get install upx
sudo apt-get install build-essential libgtk-3-dev libwebkit2gtk-4.0-dev
go install github.com/wailsapp/wails/v2/cmd/wails@latest

1
.gitignore vendored
View File

@@ -13,6 +13,7 @@ __pycache__
/py310
*.zip
/cmd-helper.bat
/install-py-dep.bat
/backend-python/wkv_cuda
*.exe
*.old

View File

@@ -1,11 +1,7 @@
## Changes
- improve api docs
- improve error messages
- fix the state cache crash caused by bad prompts
- clear confirm for chat page
- save conversation button
- chore
- exact avoidOverflow
- adjust MoreUtilsButton
## Install

View File

@@ -87,6 +87,45 @@ body.json:
}
```
## Embeddings API Example
If you are using langchain, just use `OpenAIEmbeddings(openai_api_base="http://127.0.0.1:8000", openai_api_key="sk-")`
```python
import numpy as np
import requests
def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
values = [
"I am a girl",
"我是个女孩",
"私は女の子です",
"广东人爱吃福建人",
"我是个人类",
"I am a human",
"that dog is so cute",
"私はねこむすめです、にゃん♪",
"宇宙级特大事件!号外号外!"
]
embeddings = []
for v in values:
r = requests.post("http://127.0.0.1:8000/embeddings", json={"input": v})
embedding = r.json()["data"][0]["embedding"]
embeddings.append(embedding)
compared_embedding = embeddings[0]
embeddings_cos_sim = [cosine_similarity(compared_embedding, e) for e in embeddings]
for i in np.argsort(embeddings_cos_sim)[::-1]:
print(f"{embeddings_cos_sim[i]:.10f} - {values[i]}")
```
## Todo
- [ ] Model training functionality

View File

@@ -46,7 +46,7 @@ API兼容的接口这意味着一切ChatGPT客户端都是RWKV客户端。
</div>
#### 注意 目前RWKV中文模型质量一般推荐使用英文模型或World(100+ 语言)体验实际RWKV能力
#### 注意 目前RWKV中文模型质量一般推荐使用英文模型或World(全球语言)体验实际RWKV能力
#### 预设配置已经开启自定义CUDA算子加速速度更快且显存消耗更少。如果你遇到可能的兼容性问题前往配置页面关闭`使用自定义CUDA算子加速`
@@ -87,6 +87,45 @@ body.json:
}
```
## Embeddings API 示例
如果你在用langchain, 直接使用 `OpenAIEmbeddings(openai_api_base="http://127.0.0.1:8000", openai_api_key="sk-")`
```python
import numpy as np
import requests
def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
values = [
"I am a girl",
"我是个女孩",
"私は女の子です",
"广东人爱吃福建人",
"我是个人类",
"I am a human",
"that dog is so cute",
"私はねこむすめです、にゃん♪",
"宇宙级特大事件!号外号外!"
]
embeddings = []
for v in values:
r = requests.post("http://127.0.0.1:8000/embeddings", json={"input": v})
embedding = r.json()["data"][0]["embedding"]
embeddings.append(embedding)
compared_embedding = embeddings[0]
embeddings_cos_sim = [cosine_similarity(compared_embedding, e) for e in embeddings]
for i in np.argsort(embeddings_cos_sim)[::-1]:
print(f"{embeddings_cos_sim[i]:.10f} - {values[i]}")
```
## Todo
- [ ] 模型训练功能

View File

@@ -2,6 +2,7 @@ package backend_golang
import (
"context"
"errors"
"net/http"
"os"
"os/exec"
@@ -14,9 +15,11 @@ import (
// App struct
type App struct {
ctx context.Context
exDir string
cmdPrefix string
ctx context.Context
HasConfigData bool
ConfigData map[string]any
exDir string
cmdPrefix string
}
// NewApp creates a new App application struct
@@ -64,6 +67,19 @@ func (a *App) UpdateApp(url string) (broken bool, err error) {
return false, nil
}
func (a *App) RestartApp() error {
if runtime.GOOS == "windows" {
name, err := os.Executable()
if err != nil {
return err
}
exec.Command(name, os.Args[1:]...).Start()
wruntime.Quit(a.ctx)
return nil
}
return errors.New("unsupported OS")
}
func (a *App) GetPlatform() string {
return runtime.GOOS
}

View File

@@ -4,7 +4,6 @@ import (
"errors"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
@@ -52,11 +51,7 @@ func (a *App) InstallPyDep(python string, cnMirror bool) (string, error) {
if python == "" {
python, err = GetPython()
if runtime.GOOS == "windows" {
python, err = filepath.Abs(python)
if err != nil {
return "", err
}
python = `"` + python + `"`
python = `"%CD%/` + python + `"`
}
}
if err != nil {

View File

@@ -1,4 +1,6 @@
import tiktoken
import GPUtil
import torch
import rwkv
import fastapi

Binary file not shown.

View File

@@ -2,10 +2,13 @@ import asyncio
import json
from threading import Lock
from typing import List
import base64
from fastapi import APIRouter, Request, status, HTTPException
from sse_starlette.sse import EventSourceResponse
from pydantic import BaseModel
import numpy as np
import tiktoken
from utils.rwkv import *
from utils.log import quick_log
import global_var
@@ -40,11 +43,171 @@ class ChatCompletionBody(ModelConfigBody):
}
class CompletionBody(ModelConfigBody):
prompt: str
model: str = "rwkv"
stream: bool = False
stop: str = None
class Config:
schema_extra = {
"example": {
"prompt": "The following is an epic science fiction masterpiece that is immortalized, "
+ "with delicate descriptions and grand depictions of interstellar civilization wars.\nChapter 1.\n",
"model": "rwkv",
"stream": False,
"stop": None,
"max_tokens": 100,
"temperature": 1.2,
"top_p": 0.5,
"presence_penalty": 0.4,
"frequency_penalty": 0.4,
}
}
completion_lock = Lock()
requests_num = 0
async def eval_rwkv(
model: RWKV,
request: Request,
body: ModelConfigBody,
prompt: str,
stream: bool,
stop: str,
chat_mode: bool,
):
global requests_num
requests_num = requests_num + 1
quick_log(request, None, "Start Waiting. RequestsNum: " + str(requests_num))
while completion_lock.locked():
if await request.is_disconnected():
requests_num = requests_num - 1
print(f"{request.client} Stop Waiting (Lock)")
quick_log(
request,
None,
"Stop Waiting (Lock). RequestsNum: " + str(requests_num),
)
return
await asyncio.sleep(0.1)
else:
completion_lock.acquire()
if await request.is_disconnected():
completion_lock.release()
requests_num = requests_num - 1
print(f"{request.client} Stop Waiting (Lock)")
quick_log(
request,
None,
"Stop Waiting (Lock). RequestsNum: " + str(requests_num),
)
return
set_rwkv_config(model, global_var.get(global_var.Model_Config))
set_rwkv_config(model, body)
response, prompt_tokens, completion_tokens = "", 0, 0
for response, delta, prompt_tokens, completion_tokens in model.generate(
prompt,
stop=stop,
):
if await request.is_disconnected():
break
if stream:
yield json.dumps(
{
"object": "chat.completion.chunk"
if chat_mode
else "text_completion",
"response": response,
"model": model.name,
"choices": [
{
"delta": {"content": delta},
"index": 0,
"finish_reason": None,
}
if chat_mode
else {
"text": delta,
"index": 0,
"finish_reason": None,
}
],
}
)
# torch_gc()
requests_num = requests_num - 1
completion_lock.release()
if await request.is_disconnected():
print(f"{request.client} Stop Waiting")
quick_log(
request,
body,
response + "\nStop Waiting. RequestsNum: " + str(requests_num),
)
return
quick_log(
request,
body,
response + "\nFinished. RequestsNum: " + str(requests_num),
)
if stream:
yield json.dumps(
{
"object": "chat.completion.chunk"
if chat_mode
else "text_completion",
"response": response,
"model": model.name,
"choices": [
{
"delta": {},
"index": 0,
"finish_reason": "stop",
}
if chat_mode
else {
"text": "",
"index": 0,
"finish_reason": "stop",
}
],
}
)
yield "[DONE]"
else:
yield {
"object": "chat.completion" if chat_mode else "text_completion",
"response": response,
"model": model.name,
"usage": {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens,
},
"choices": [
{
"message": {
"role": "assistant",
"content": response,
},
"index": 0,
"finish_reason": "stop",
}
if chat_mode
else {
"text": response,
"index": 0,
"finish_reason": "stop",
}
],
}
@router.post("/v1/chat/completions")
@router.post("/chat/completions")
async def chat_completions(body: ChatCompletionBody, request: Request):
@@ -77,7 +240,8 @@ The following is a coherent verbose detailed conversation between a girl named {
{bot} usually gives {user} kind, helpful and informative advices.\n
"""
if user == "Bob"
else f"{user}{interface} hi\n\n{bot}{interface} Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
for message in body.messages:
if message.role == "system":
@@ -123,156 +287,20 @@ The following is a coherent verbose detailed conversation between a girl named {
)
completion_text += f"{bot}{interface}"
async def eval_rwkv():
global requests_num
requests_num = requests_num + 1
quick_log(request, None, "Start Waiting. RequestsNum: " + str(requests_num))
while completion_lock.locked():
if await request.is_disconnected():
requests_num = requests_num - 1
print(f"{request.client} Stop Waiting (Lock)")
quick_log(
request,
None,
"Stop Waiting (Lock). RequestsNum: " + str(requests_num),
)
return
await asyncio.sleep(0.1)
else:
completion_lock.acquire()
if await request.is_disconnected():
completion_lock.release()
requests_num = requests_num - 1
print(f"{request.client} Stop Waiting (Lock)")
quick_log(
request,
None,
"Stop Waiting (Lock). RequestsNum: " + str(requests_num),
)
return
set_rwkv_config(model, global_var.get(global_var.Model_Config))
set_rwkv_config(model, body)
if body.stream:
response = ""
for response, delta in model.generate(
completion_text,
stop=f"\n\n{user}" if body.stop is None else body.stop,
):
if await request.is_disconnected():
break
yield json.dumps(
{
"response": response,
"model": "rwkv",
"choices": [
{
"delta": {"content": delta},
"index": 0,
"finish_reason": None,
}
],
}
)
# torch_gc()
requests_num = requests_num - 1
completion_lock.release()
if await request.is_disconnected():
print(f"{request.client} Stop Waiting")
quick_log(
request,
body,
response + "\nStop Waiting. RequestsNum: " + str(requests_num),
)
return
quick_log(
request,
body,
response + "\nFinished. RequestsNum: " + str(requests_num),
)
yield json.dumps(
{
"response": response,
"model": "rwkv",
"choices": [
{
"delta": {},
"index": 0,
"finish_reason": "stop",
}
],
}
)
yield "[DONE]"
else:
response = ""
for response, delta in model.generate(
completion_text,
stop=f"\n\n{user}" if body.stop is None else body.stop,
):
if await request.is_disconnected():
break
# torch_gc()
requests_num = requests_num - 1
completion_lock.release()
if await request.is_disconnected():
print(f"{request.client} Stop Waiting")
quick_log(
request,
body,
response + "\nStop Waiting. RequestsNum: " + str(requests_num),
)
return
quick_log(
request,
body,
response + "\nFinished. RequestsNum: " + str(requests_num),
)
yield {
"response": response,
"model": "rwkv",
"choices": [
{
"message": {
"role": "assistant",
"content": response,
},
"index": 0,
"finish_reason": "stop",
}
],
}
stop = f"\n\n{user}" if body.stop is None else body.stop
if body.stream:
return EventSourceResponse(eval_rwkv())
return EventSourceResponse(
eval_rwkv(model, request, body, completion_text, body.stream, stop, True)
)
else:
try:
return await eval_rwkv().__anext__()
return await eval_rwkv(
model, request, body, completion_text, body.stream, stop, True
).__anext__()
except StopAsyncIteration:
return None
class CompletionBody(ModelConfigBody):
prompt: str
model: str = "rwkv"
stream: bool = False
stop: str = None
class Config:
schema_extra = {
"example": {
"prompt": "The following is an epic science fiction masterpiece that is immortalized, with delicate descriptions and grand depictions of interstellar civilization wars.\nChapter 1.\n",
"model": "rwkv",
"stream": False,
"stop": None,
"max_tokens": 100,
"temperature": 1.2,
"top_p": 0.5,
"presence_penalty": 0.4,
"frequency_penalty": 0.4,
}
}
@router.post("/v1/completions")
@router.post("/completions")
async def completions(body: CompletionBody, request: Request):
@@ -283,120 +311,142 @@ async def completions(body: CompletionBody, request: Request):
if body.prompt is None or body.prompt == "":
raise HTTPException(status.HTTP_400_BAD_REQUEST, "prompt not found")
async def eval_rwkv():
global requests_num
requests_num = requests_num + 1
quick_log(request, None, "Start Waiting. RequestsNum: " + str(requests_num))
while completion_lock.locked():
if await request.is_disconnected():
requests_num = requests_num - 1
print(f"{request.client} Stop Waiting (Lock)")
quick_log(
request,
None,
"Stop Waiting (Lock). RequestsNum: " + str(requests_num),
)
return
await asyncio.sleep(0.1)
else:
completion_lock.acquire()
if await request.is_disconnected():
completion_lock.release()
requests_num = requests_num - 1
print(f"{request.client} Stop Waiting (Lock)")
quick_log(
request,
None,
"Stop Waiting (Lock). RequestsNum: " + str(requests_num),
)
return
set_rwkv_config(model, global_var.get(global_var.Model_Config))
set_rwkv_config(model, body)
if body.stream:
response = ""
for response, delta in model.generate(body.prompt, stop=body.stop):
if await request.is_disconnected():
break
yield json.dumps(
{
"response": response,
"model": "rwkv",
"choices": [
{
"text": delta,
"index": 0,
"finish_reason": None,
}
],
}
)
# torch_gc()
requests_num = requests_num - 1
completion_lock.release()
if await request.is_disconnected():
print(f"{request.client} Stop Waiting")
quick_log(
request,
body,
response + "\nStop Waiting. RequestsNum: " + str(requests_num),
)
return
quick_log(
request,
body,
response + "\nFinished. RequestsNum: " + str(requests_num),
)
yield json.dumps(
{
"response": response,
"model": "rwkv",
"choices": [
{
"text": "",
"index": 0,
"finish_reason": "stop",
}
],
}
)
yield "[DONE]"
else:
response = ""
for response, delta in model.generate(body.prompt, stop=body.stop):
if await request.is_disconnected():
break
# torch_gc()
requests_num = requests_num - 1
completion_lock.release()
if await request.is_disconnected():
print(f"{request.client} Stop Waiting")
quick_log(
request,
body,
response + "\nStop Waiting. RequestsNum: " + str(requests_num),
)
return
quick_log(
request,
body,
response + "\nFinished. RequestsNum: " + str(requests_num),
)
yield {
"response": response,
"model": "rwkv",
"choices": [
{
"text": response,
"index": 0,
"finish_reason": "stop",
}
],
}
if body.stream:
return EventSourceResponse(eval_rwkv())
return EventSourceResponse(
eval_rwkv(model, request, body, body.prompt, body.stream, body.stop, False)
)
else:
try:
return await eval_rwkv().__anext__()
return await eval_rwkv(
model, request, body, body.prompt, body.stream, body.stop, False
).__anext__()
except StopAsyncIteration:
return None
class EmbeddingsBody(BaseModel):
input: str | List[str] | List[List[int]]
model: str = "rwkv"
encoding_format: str = None
fast_mode: bool = False
class Config:
schema_extra = {
"example": {
"input": "a big apple",
"model": "rwkv",
"encoding_format": None,
"fast_mode": False,
}
}
def embedding_base64(embedding: List[float]) -> str:
return base64.b64encode(np.array(embedding).astype(np.float32)).decode("utf-8")
@router.post("/v1/embeddings")
@router.post("/embeddings")
@router.post("/v1/engines/text-embedding-ada-002/embeddings")
@router.post("/engines/text-embedding-ada-002/embeddings")
async def embeddings(body: EmbeddingsBody, request: Request):
model: RWKV = global_var.get(global_var.Model)
if model is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "model not loaded")
if body.input is None or body.input == "" or body.input == [] or body.input == [[]]:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "input not found")
global requests_num
requests_num = requests_num + 1
quick_log(request, None, "Start Waiting. RequestsNum: " + str(requests_num))
while completion_lock.locked():
if await request.is_disconnected():
requests_num = requests_num - 1
print(f"{request.client} Stop Waiting (Lock)")
quick_log(
request,
None,
"Stop Waiting (Lock). RequestsNum: " + str(requests_num),
)
return
await asyncio.sleep(0.1)
else:
completion_lock.acquire()
if await request.is_disconnected():
completion_lock.release()
requests_num = requests_num - 1
print(f"{request.client} Stop Waiting (Lock)")
quick_log(
request,
None,
"Stop Waiting (Lock). RequestsNum: " + str(requests_num),
)
return
base64_format = False
if body.encoding_format == "base64":
base64_format = True
embeddings = []
prompt_tokens = 0
if type(body.input) == list:
if type(body.input[0]) == list:
encoding = tiktoken.model.encoding_for_model("text-embedding-ada-002")
for i in range(len(body.input)):
if await request.is_disconnected():
break
input = encoding.decode(body.input[i])
embedding, token_len = model.get_embedding(input, body.fast_mode)
prompt_tokens = prompt_tokens + token_len
if base64_format:
embedding = embedding_base64(embedding)
embeddings.append(embedding)
else:
for i in range(len(body.input)):
if await request.is_disconnected():
break
embedding, token_len = model.get_embedding(
body.input[i], body.fast_mode
)
prompt_tokens = prompt_tokens + token_len
if base64_format:
embedding = embedding_base64(embedding)
embeddings.append(embedding)
else:
embedding, prompt_tokens = model.get_embedding(body.input, body.fast_mode)
if base64_format:
embedding = embedding_base64(embedding)
embeddings.append(embedding)
requests_num = requests_num - 1
completion_lock.release()
if await request.is_disconnected():
print(f"{request.client} Stop Waiting")
quick_log(
request,
None,
"Stop Waiting. RequestsNum: " + str(requests_num),
)
return
quick_log(
request,
None,
"Finished. RequestsNum: " + str(requests_num),
)
ret_data = [
{
"object": "embedding",
"index": i,
"embedding": embedding,
}
for i, embedding in enumerate(embeddings)
]
return {
"object": "list",
"data": ret_data,
"model": model.name,
"usage": {"prompt_tokens": prompt_tokens, "total_tokens": prompt_tokens},
}

View File

@@ -32,7 +32,7 @@ class SwitchModelBody(BaseModel):
class Config:
schema_extra = {
"example": {
"model": "models/RWKV-4-World-3B-v1-OnlyForTest_80%_trained-20230612-ctx4096.pth",
"model": "models/RWKV-4-World-3B-v1-20230619-ctx4096.pth",
"strategy": "cuda fp16",
"customCuda": False,
}

View File

@@ -48,8 +48,8 @@ def add_state(body: AddStateBody):
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
try:
id = trie.insert(body.prompt)
device = body.state[0].device
id: int = trie.insert(body.prompt)
device: torch.device = body.state[0].device
dtrie[id] = {
"tokens": copy.deepcopy(body.tokens),
"state": [tensor.cpu() for tensor in body.state]
@@ -110,7 +110,7 @@ def _get_a_dtrie_buff_size(dtrie_v):
# print(dtrie_v["logits"][0].element_size())
# print(dtrie_v["logits"].nelement())
# print(dtrie_v["logits"][0].element_size() * dtrie_v["logits"].nelement())
return 54 * len(dtrie_v["tokens"]) + 491520 + 262144 + 28
return 54 * len(dtrie_v["tokens"]) + 491520 + 262144 + 28 # TODO
@router.post("/longest-prefix-state")
@@ -127,8 +127,9 @@ def longest_prefix_state(body: LongestPrefixStateBody, request: Request):
pass
if id != -1:
v = dtrie[id]
device = v["device"]
prompt = trie[id]
device: torch.device = v["device"]
prompt: str = trie[id]
quick_log(request, body, "Hit:\n" + prompt)
return {
"prompt": prompt,
@@ -137,7 +138,7 @@ def longest_prefix_state(body: LongestPrefixStateBody, request: Request):
if device != torch.device("cpu")
else v["state"],
"logits": v["logits"],
"device": device,
"device": device.type,
}
else:
return {

View File

@@ -1,10 +1,12 @@
import os
import pathlib
import copy
from typing import Dict, List
from typing import Dict, List, Tuple
from utils.log import quick_log
from fastapi import HTTPException
from pydantic import BaseModel, Field
import torch
import numpy as np
from rwkv_pip.utils import PIPELINE
from routes import state_cache
@@ -21,6 +23,8 @@ class RWKV:
def __init__(self, model: str, strategy: str, tokens_path: str) -> None:
from rwkv.model import RWKV as Model # dynamic import to make RWKV_CUDA_ON work
filename, _ = os.path.splitext(os.path.basename(model))
self.name = filename
self.model = Model(model, strategy)
self.pipeline = PIPELINE(self.model, tokens_path)
self.model_state = None
@@ -64,9 +68,10 @@ The following is a coherent verbose detailed conversation between a girl named {
{bot} usually gives {user} kind, helpful and informative advices.\n
"""
if self.user == "Bob"
else f"{user}{interface} hi\n\n{bot}{interface} Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
logits = self.run_rnn(self.fix_tokens(self.pipeline.encode(preset_system)))
logits, _ = self.run_rnn(self.fix_tokens(self.pipeline.encode(preset_system)))
try:
state_cache.add_state(
state_cache.AddStateBody(
@@ -87,6 +92,7 @@ The following is a coherent verbose detailed conversation between a girl named {
def run_rnn(self, _tokens: List[str], newline_adj: int = 0):
tokens = [int(x) for x in _tokens]
token_len = len(tokens)
self.model_tokens += tokens
while len(tokens) > 0:
@@ -99,7 +105,157 @@ The following is a coherent verbose detailed conversation between a girl named {
if self.model_tokens[-1] in self.AVOID_REPEAT_TOKENS:
out[self.model_tokens[-1]] = -999999999
return out
return out, token_len
def get_embedding(self, input: str, fast_mode: bool) -> Tuple[List[float], int]:
if fast_mode:
embedding, token_len = self.fast_embedding(
self.fix_tokens(self.pipeline.encode(input)), None
)
else:
self.model_state = None
self.model_tokens = []
_, token_len = self.run_rnn(self.fix_tokens(self.pipeline.encode(input)))
embedding = self.model_state[-5].tolist()
embedding = (embedding / np.linalg.norm(embedding)).tolist()
return embedding, token_len
def fast_embedding(self, tokens: List[str], state):
tokens = [int(x) for x in tokens]
token_len = len(tokens)
self = self.model
with torch.no_grad():
w = self.w
args = self.args
if state == None:
state = [None] * args.n_layer * 5
for i in range(
args.n_layer
): # state: 0=att_xx 1=att_aa 2=att_bb 3=att_pp 4=ffn_xx
dd = self.strategy[i]
dev = dd.device
atype = dd.atype
state[i * 5 + 0] = torch.zeros(
args.n_embd, dtype=atype, requires_grad=False, device=dev
).contiguous()
state[i * 5 + 1] = torch.zeros(
args.n_embd, dtype=torch.float, requires_grad=False, device=dev
).contiguous()
state[i * 5 + 2] = torch.zeros(
args.n_embd, dtype=torch.float, requires_grad=False, device=dev
).contiguous()
state[i * 5 + 3] = (
torch.zeros(
args.n_embd,
dtype=torch.float,
requires_grad=False,
device=dev,
).contiguous()
- 1e30
)
state[i * 5 + 4] = torch.zeros(
args.n_embd, dtype=atype, requires_grad=False, device=dev
).contiguous()
break
seq_mode = len(tokens) > 1
x = w["emb.weight"][tokens if seq_mode else tokens[0]]
for i in range(args.n_layer):
bbb = f"blocks.{i}."
att = f"blocks.{i}.att."
ffn = f"blocks.{i}.ffn."
dd = self.strategy[i]
dev = dd.device
atype = dd.atype
wtype = dd.wtype
if seq_mode:
if "cuda" in str(dev) and os.environ["RWKV_CUDA_ON"] == "1":
ATT = (
self.cuda_att_seq
if wtype != torch.uint8
else self.cuda_att_seq_i8
)
else:
ATT = self.att_seq if wtype != torch.uint8 else self.att_seq_i8
FFN = self.ffn_seq if wtype != torch.uint8 else self.ffn_seq_i8
else:
ATT = self.att_one if wtype != torch.uint8 else self.att_one_i8
FFN = self.ffn_one if wtype != torch.uint8 else self.ffn_one_i8
x = x.to(dtype=atype, device=dev)
kw = w[f"{att}key.weight"]
vw = w[f"{att}value.weight"]
rw = w[f"{att}receptance.weight"]
ow = w[f"{att}output.weight"]
if dd.stream:
kw = kw.to(device=dev, non_blocking=True)
vw = vw.to(device=dev, non_blocking=True)
rw = rw.to(device=dev, non_blocking=True)
ow = ow.to(device=dev, non_blocking=True)
kmx = w[f"{att}key.weight_mx"] if wtype == torch.uint8 else x
krx = w[f"{att}key.weight_rx"] if wtype == torch.uint8 else x
kmy = w[f"{att}key.weight_my"] if wtype == torch.uint8 else x
kry = w[f"{att}key.weight_ry"] if wtype == torch.uint8 else x
vmx = w[f"{att}value.weight_mx"] if wtype == torch.uint8 else x
vrx = w[f"{att}value.weight_rx"] if wtype == torch.uint8 else x
vmy = w[f"{att}value.weight_my"] if wtype == torch.uint8 else x
vry = w[f"{att}value.weight_ry"] if wtype == torch.uint8 else x
rmx = w[f"{att}receptance.weight_mx"] if wtype == torch.uint8 else x
rrx = w[f"{att}receptance.weight_rx"] if wtype == torch.uint8 else x
rmy = w[f"{att}receptance.weight_my"] if wtype == torch.uint8 else x
rry = w[f"{att}receptance.weight_ry"] if wtype == torch.uint8 else x
omx = w[f"{att}output.weight_mx"] if wtype == torch.uint8 else x
orx = w[f"{att}output.weight_rx"] if wtype == torch.uint8 else x
omy = w[f"{att}output.weight_my"] if wtype == torch.uint8 else x
ory = w[f"{att}output.weight_ry"] if wtype == torch.uint8 else x
(
x,
state[i * 5 + 0],
state[i * 5 + 1],
state[i * 5 + 2],
state[i * 5 + 3],
) = ATT(
x,
state[i * 5 + 0],
state[i * 5 + 1],
state[i * 5 + 2],
state[i * 5 + 3],
w[f"{bbb}ln1.weight"],
w[f"{bbb}ln1.bias"],
w[f"{att}time_mix_k"],
w[f"{att}time_mix_v"],
w[f"{att}time_mix_r"],
w[f"{att}time_decay"],
w[f"{att}time_first"],
kw,
vw,
rw,
ow,
kmx,
krx,
kmy,
kry,
vmx,
vrx,
vmy,
vry,
rmx,
rrx,
rmy,
rry,
omx,
orx,
omy,
ory,
)
return state[0].tolist(), token_len
def generate(self, prompt: str, stop: str = None):
quick_log(None, None, "Generation Prompt:\n" + prompt)
@@ -120,8 +276,11 @@ The following is a coherent verbose detailed conversation between a girl named {
self.model_tokens = copy.deepcopy(cache["tokens"])
logits = copy.deepcopy(cache["logits"])
prompt_token_len = 0
if delta_prompt != "":
logits = self.run_rnn(self.fix_tokens(self.pipeline.encode(delta_prompt)))
logits, prompt_token_len = self.run_rnn(
self.fix_tokens(self.pipeline.encode(delta_prompt))
)
try:
state_cache.add_state(
state_cache.AddStateBody(
@@ -139,6 +298,7 @@ The following is a coherent verbose detailed conversation between a girl named {
occurrence: Dict = {}
completion_token_len = 0
response = ""
for i in range(self.max_tokens_per_generation):
for n in occurrence:
@@ -151,20 +311,20 @@ The following is a coherent verbose detailed conversation between a girl named {
)
if token == END_OF_TEXT:
yield response, ""
yield response, "", prompt_token_len, completion_token_len
break
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
logits = self.run_rnn([token])
logits, _ = self.run_rnn([token])
completion_token_len = completion_token_len + 1
delta: str = self.pipeline.decode(self.model_tokens[out_last:])
if "\ufffd" not in delta: # avoid utf-8 display issues
response += delta
if stop is not None:
if stop in response:
response = response.split(stop)[0]
try:
state_cache.add_state(
state_cache.AddStateBody(
@@ -176,7 +336,8 @@ The following is a coherent verbose detailed conversation between a girl named {
)
except HTTPException:
pass
yield response, ""
response = response.split(stop)[0]
yield response, "", prompt_token_len, completion_token_len
break
out_last = begin + i + 1
if i == self.max_tokens_per_generation - 1:
@@ -191,7 +352,7 @@ The following is a coherent verbose detailed conversation between a girl named {
)
except HTTPException:
pass
yield response, delta
yield response, delta, prompt_token_len, completion_token_len
class ModelConfigBody(BaseModel):

View File

@@ -70,7 +70,7 @@
"Type your message here": "在此输入消息",
"Copy": "复制",
"Read Aloud": "朗读",
"Hello! I'm RWKV, an open-source and commercially available large language model.": "你好! 我是RWKV, 一个开源可商用的大语言模型.",
"Hello! I'm RWKV, an open-source and commercially usable large language model.": "你好! 我是RWKV, 一个开源可商用的大语言模型.",
"This tool's API is compatible with OpenAI API. It can be used with any ChatGPT tool you like. Go to the settings of some ChatGPT tool, replace the 'https://api.openai.com' part in the API address with '": "本工具的API与OpenAI API兼容. 因此可以配合任意你喜欢的ChatGPT工具使用. 打开某个ChatGPT工具的设置, 将API地址中的'https://api.openai.com'部分替换为'",
"New Version Available": "新版本可用",
"Update": "更新",
@@ -148,5 +148,14 @@
"Are you sure you want to clear the conversation? It cannot be undone.": "你确定要清空对话吗?这无法撤销",
"Save": "保存",
"Conversation Saved": "对话已保存",
"Open": "打开"
"Open": "打开",
"DPI Scaling": "显示缩放",
"Restart the app to apply DPI Scaling.": "重启应用以使显示缩放生效",
"Restart": "重启",
"API Chat Model Name": "API聊天模型名",
"API Completion Model Name": "API补全模型名",
"Localhost": "本地",
"Retry": "重试",
"Delete": "删除",
"Edit": "编辑"
}

View File

@@ -4,7 +4,7 @@ import { useTranslation } from 'react-i18next';
import { ClipboardSetText } from '../../wailsjs/runtime';
import { ToolTipButton } from './ToolTipButton';
export const CopyButton: FC<{ content: string }> = ({ content }) => {
export const CopyButton: FC<{ content: string, showDelay?: number, }> = ({ content, showDelay = 0 }) => {
const { t } = useTranslation();
const [copied, setCopied] = useState(false);
@@ -19,7 +19,8 @@ export const CopyButton: FC<{ content: string }> = ({ content }) => {
};
return (
<ToolTipButton desc={t('Copy')} size="small" appearance="subtle" icon={copied ? <CheckIcon /> : <CopyIcon />}
<ToolTipButton desc={t('Copy')} size="small" appearance="subtle" showDelay={showDelay}
icon={copied ? <CheckIcon /> : <CopyIcon />}
onClick={onClick} />
);
};

View File

@@ -7,13 +7,28 @@ import { observer } from 'mobx-react-lite';
const synth = window.speechSynthesis;
export const ReadButton: FC<{ content: string }> = observer(({ content }) => {
export const ReadButton: FC<{
content: string,
inSpeaking?: boolean,
showDelay?: number,
setSpeakingOuter?: (speaking: boolean) => void
}> = observer(({
content,
inSpeaking = false,
showDelay = 0,
setSpeakingOuter
}) => {
const { t } = useTranslation();
const [speaking, setSpeaking] = useState(false);
const [speaking, setSpeaking] = useState(inSpeaking);
let lang: string = commonStore.settings.language;
if (lang === 'dev')
lang = 'en';
const setSpeakingInner = (speaking: boolean) => {
setSpeakingOuter?.(speaking);
setSpeaking(speaking);
};
const startSpeak = () => {
synth.cancel();
@@ -31,22 +46,22 @@ export const ReadButton: FC<{ content: string }> = observer(({ content }) => {
Object.assign(utterance, {
rate: 1,
volume: 1,
onend: () => setSpeaking(false),
onerror: () => setSpeaking(false),
onend: () => setSpeakingInner(false),
onerror: () => setSpeakingInner(false),
voice: voice
});
synth.speak(utterance);
setSpeaking(true);
setSpeakingInner(true);
};
const stopSpeak = () => {
synth.cancel();
setSpeaking(false);
setSpeakingInner(false);
};
return (
<ToolTipButton desc={t('Read Aloud')} size="small" appearance="subtle"
<ToolTipButton desc={t('Read Aloud')} size="small" appearance="subtle" showDelay={showDelay}
icon={speaking ? <MuteIcon /> : <UnmuteIcon />}
onClick={speaking ? stopSpeak : startSpeak} />
);

View File

@@ -167,9 +167,10 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
frequency_penalty: modelConfig.apiParameters.frequencyPenalty
});
const strategy = getStrategy(modelConfig);
let customCudaFile = '';
if ((modelConfig.modelParameters.device === 'CUDA' || modelConfig.modelParameters.device === 'Custom')
&& modelConfig.modelParameters.useCustomCuda) {
&& modelConfig.modelParameters.useCustomCuda && !strategy.includes('fp32')) {
if (commonStore.platform === 'windows') {
customCudaFile = getSupportedCustomCudaFile();
if (customCudaFile) {
@@ -194,7 +195,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
switchModel({
model: modelPath,
strategy: getStrategy(modelConfig),
strategy: strategy,
customCuda: customCudaFile !== ''
}).then(async (r) => {
if (r.ok) {

View File

@@ -11,6 +11,7 @@ export const ToolTipButton: FC<{
appearance?: 'secondary' | 'primary' | 'outline' | 'subtle' | 'transparent';
disabled?: boolean,
onClick?: MouseEventHandler
showDelay?: number,
}> = ({
text,
desc,
@@ -20,10 +21,11 @@ export const ToolTipButton: FC<{
shape,
appearance,
disabled,
onClick
onClick,
showDelay = 0
}) => {
return (
<Tooltip content={desc} showDelay={0} hideDelay={0} relationship="label">
<Tooltip content={desc} showDelay={showDelay} hideDelay={0} relationship="label">
<Button className={className} disabled={disabled} icon={icon} onClick={onClick} size={size} shape={shape}
appearance={appearance}>{text}</Button>
</Tooltip>

View File

@@ -1,12 +1,13 @@
import React, { FC, useEffect, useRef } from 'react';
import React, { FC, useCallback, useEffect, useRef, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { Avatar, PresenceBadge, Textarea } from '@fluentui/react-components';
import { Avatar, Button, Menu, MenuPopover, MenuTrigger, PresenceBadge, Textarea } from '@fluentui/react-components';
import commonStore, { ModelStatus } from '../stores/commonStore';
import { observer } from 'mobx-react-lite';
import { v4 as uuid } from 'uuid';
import classnames from 'classnames';
import { fetchEventSource } from '@microsoft/fetch-event-source';
import { ConversationPair, getConversationPairs, Record } from '../utils/get-conversation-pairs';
import { KebabHorizontalIcon, PencilIcon, SyncIcon, TrashIcon } from '@primer/octicons-react';
import { ConversationPair } from '../utils/get-conversation-pairs';
import logo from '../assets/images/logo.jpg';
import MarkdownRender from '../components/MarkdownRender';
import { ToolTipButton } from '../components/ToolTipButton';
@@ -22,6 +23,8 @@ import { toastWithButton } from '../utils';
export const userName = 'M E';
export const botName = 'A I';
export const welcomeUuid = 'welcome';
export enum MessageType {
Normal,
Error
@@ -48,6 +51,126 @@ export type Conversation = {
let chatSseController: AbortController | null = null;
const MoreUtilsButton: FC<{ uuid: string, setEditing: (editing: boolean) => void }> = observer(({
uuid,
setEditing
}) => {
const { t } = useTranslation();
const [speaking, setSpeaking] = useState(false);
const messageItem = commonStore.conversation[uuid];
return <Menu>
<MenuTrigger disableButtonEnhancement>
<Button icon={<KebabHorizontalIcon />} size="small" appearance="subtle" />
</MenuTrigger>
<MenuPopover style={{ minWidth: 0 }}>
<CopyButton content={messageItem.content} showDelay={500} />
<ReadButton content={messageItem.content} inSpeaking={speaking} showDelay={500} setSpeakingOuter={setSpeaking} />
<ToolTipButton desc={t('Edit')} icon={<PencilIcon />} showDelay={500} size="small" appearance="subtle"
onClick={() => {
setEditing(true);
}} />
<ToolTipButton desc={t('Delete')} icon={<TrashIcon />} showDelay={500} size="small" appearance="subtle"
onClick={() => {
commonStore.conversationOrder.splice(commonStore.conversationOrder.indexOf(uuid), 1);
delete commonStore.conversation[uuid];
}} />
</MenuPopover>
</Menu>;
});
const ChatMessageItem: FC<{
uuid: string, onSubmit: (message: string | null, answerId: string | null,
startUuid: string | null, endUuid: string | null, includeEndUuid: boolean) => void
}> = observer(({ uuid, onSubmit }) => {
const { t } = useTranslation();
const [editing, setEditing] = useState(false);
const textareaRef = useRef<HTMLTextAreaElement>(null);
const messageItem = commonStore.conversation[uuid];
console.log(uuid);
const setEditingInner = (editing: boolean) => {
setEditing(editing);
if (editing) {
setTimeout(() => {
const textarea = textareaRef.current;
if (textarea) {
textarea.focus();
textarea.selectionStart = textarea.value.length;
textarea.selectionEnd = textarea.value.length;
textarea.style.height = textarea.scrollHeight + 'px';
}
});
}
};
return <div
className={classnames(
'flex gap-2 mb-2 overflow-hidden',
messageItem.side === 'left' ? 'flex-row' : 'flex-row-reverse'
)}
onMouseEnter={() => {
const utils = document.getElementById('utils-' + uuid);
if (utils) utils.classList.remove('invisible');
}}
onMouseLeave={() => {
const utils = document.getElementById('utils-' + uuid);
if (utils) utils.classList.add('invisible');
}}
>
<Avatar
color={messageItem.color}
name={messageItem.sender}
image={messageItem.avatarImg ? { src: messageItem.avatarImg } : undefined}
/>
<div
className={classnames(
'flex p-2 rounded-lg overflow-hidden',
editing ? 'grow' : '',
messageItem.side === 'left' ? 'bg-gray-200' : 'bg-blue-500',
messageItem.side === 'left' ? 'text-gray-600' : 'text-white'
)}
>
{!editing ?
<MarkdownRender>{messageItem.content}</MarkdownRender> :
<Textarea ref={textareaRef}
className="grow"
style={{ minWidth: 0 }}
value={messageItem.content}
onChange={(e) => {
messageItem.content = e.target.value;
}}
onBlur={() => {
setEditingInner(false);
}} />}
</div>
<div className="flex flex-col gap-1 items-start">
<div className="grow" />
{(messageItem.type === MessageType.Error || !messageItem.done) &&
<PresenceBadge size="extra-small" status={
messageItem.type === MessageType.Error ? 'busy' : 'away'
} />
}
<div className="flex invisible" id={'utils-' + uuid}>
{
messageItem.sender === botName && uuid !== welcomeUuid &&
<ToolTipButton desc={t('Retry')} size="small" appearance="subtle"
icon={<SyncIcon />} onClick={() => {
onSubmit(null, uuid, null, uuid, false);
}} />
}
<ToolTipButton desc={t('Edit')} icon={<PencilIcon />} size="small" appearance="subtle"
onClick={() => {
setEditingInner(true);
}} />
<MoreUtilsButton uuid={uuid} setEditing={setEditingInner} />
</div>
</div>
</div>;
});
const ChatPanel: FC = observer(() => {
const { t } = useTranslation();
const bodyRef = useRef<HTMLDivElement>(null);
@@ -71,15 +194,15 @@ const ChatPanel: FC = observer(() => {
useEffect(() => {
if (commonStore.conversationOrder.length === 0) {
commonStore.setConversationOrder(['welcome']);
commonStore.setConversationOrder([welcomeUuid]);
commonStore.setConversation({
'welcome': {
[welcomeUuid]: {
sender: botName,
type: MessageType.Normal,
color: 'colorful',
avatarImg: logo,
time: new Date().toISOString(),
content: t('Hello! I\'m RWKV, an open-source and commercially available large language model.'),
content: t('Hello! I\'m RWKV, an open-source and commercially usable large language model.'),
side: 'left',
done: true
}
@@ -96,7 +219,7 @@ const ChatPanel: FC = observer(() => {
e.stopPropagation();
if (e.type === 'click' || (e.keyCode === 13 && !e.shiftKey)) {
e.preventDefault();
if (commonStore.status.status === ModelStatus.Offline) {
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl) {
toast(t('Please click the button in the top right corner to start the model'), { type: 'warning' });
return;
}
@@ -106,38 +229,48 @@ const ChatPanel: FC = observer(() => {
}
};
const onSubmit = (message: string) => {
const newId = uuid();
commonStore.conversation[newId] = {
sender: userName,
type: MessageType.Normal,
color: 'brand',
time: new Date().toISOString(),
content: message,
side: 'right',
done: true
};
commonStore.setConversation(commonStore.conversation);
commonStore.conversationOrder.push(newId);
commonStore.setConversationOrder(commonStore.conversationOrder);
// if message is not null, create a user message;
// if answerId is not null, override the answer with new response;
// if startUuid is null, start generating api body messages from first message;
// if endUuid is null, generate api body messages until last message;
const onSubmit = useCallback((message: string | null = null, answerId: string | null = null,
startUuid: string | null = null, endUuid: string | null = null, includeEndUuid: boolean = false) => {
if (message) {
const newId = uuid();
commonStore.conversation[newId] = {
sender: userName,
type: MessageType.Normal,
color: 'brand',
time: new Date().toISOString(),
content: message,
side: 'right',
done: true
};
commonStore.setConversation(commonStore.conversation);
commonStore.conversationOrder.push(newId);
commonStore.setConversationOrder(commonStore.conversationOrder);
}
const records: Record[] = [];
commonStore.conversationOrder.forEach((uuid, index) => {
let startIndex = startUuid ? commonStore.conversationOrder.indexOf(startUuid) : 0;
let endIndex = endUuid ? (commonStore.conversationOrder.indexOf(endUuid) + (includeEndUuid ? 1 : 0)) : commonStore.conversationOrder.length;
let targetRange = commonStore.conversationOrder.slice(startIndex, endIndex);
const messages: ConversationPair[] = [];
targetRange.forEach((uuid, index) => {
if (uuid === welcomeUuid)
return;
const messageItem = commonStore.conversation[uuid];
if (messageItem.done && messageItem.type === MessageType.Normal && messageItem.sender === botName) {
if (index > 0) {
const questionId = commonStore.conversationOrder[index - 1];
const question = commonStore.conversation[questionId];
if (question.done && question.type === MessageType.Normal && question.sender === userName) {
records.push({ question: question.content, answer: messageItem.content });
}
}
if (messageItem.done && messageItem.type === MessageType.Normal && messageItem.sender === userName) {
messages.push({ role: 'user', content: messageItem.content });
} else if (messageItem.done && messageItem.type === MessageType.Normal && messageItem.sender === botName) {
messages.push({ role: 'assistant', content: messageItem.content });
}
});
const messages = getConversationPairs(records, false);
(messages as ConversationPair[]).push({ role: 'user', content: message });
const answerId = uuid();
if (answerId === null) {
answerId = uuid();
commonStore.conversationOrder.push(answerId);
}
commonStore.conversation[answerId] = {
sender: botName,
type: MessageType.Normal,
@@ -149,30 +282,31 @@ const ChatPanel: FC = observer(() => {
done: false
};
commonStore.setConversation(commonStore.conversation);
commonStore.conversationOrder.push(answerId);
commonStore.setConversationOrder(commonStore.conversationOrder);
setTimeout(scrollToBottom);
let answer = '';
chatSseController = new AbortController();
fetchEventSource(`http://127.0.0.1:${port}/chat/completions`, // https://api.openai.com/v1/chat/completions || http://127.0.0.1:${port}/chat/completions
fetchEventSource( // https://api.openai.com/v1/chat/completions || http://127.0.0.1:${port}/chat/completions
commonStore.settings.apiUrl ?
commonStore.settings.apiUrl + '/v1/chat/completions' :
`http://127.0.0.1:${port}/chat/completions`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer sk-`
Authorization: `Bearer ${commonStore.settings.apiKey}`
},
body: JSON.stringify({
messages,
stream: true,
model: 'gpt-3.5-turbo'
model: commonStore.settings.apiChatModelName // 'gpt-3.5-turbo'
}),
signal: chatSseController?.signal,
onmessage(e) {
console.log('sse message', e);
scrollToBottom();
if (e.data === '[DONE]') {
commonStore.conversation[answerId].done = true;
commonStore.conversation[answerId].content = commonStore.conversation[answerId].content.trim();
commonStore.conversation[answerId!].done = true;
commonStore.conversation[answerId!].content = commonStore.conversation[answerId!].content.trim();
commonStore.setConversation(commonStore.conversation);
commonStore.setConversationOrder([...commonStore.conversationOrder]);
return;
@@ -186,72 +320,42 @@ const ChatPanel: FC = observer(() => {
}
if (data.choices && Array.isArray(data.choices) && data.choices.length > 0) {
answer += data.choices[0]?.delta?.content || '';
commonStore.conversation[answerId].content = answer;
commonStore.conversation[answerId!].content = answer;
commonStore.setConversation(commonStore.conversation);
commonStore.setConversationOrder([...commonStore.conversationOrder]);
}
},
async onopen(response) {
if (response.status !== 200) {
commonStore.conversation[answerId!].content += '\n[ERROR]\n```\n' + response.statusText + '\n' + (await response.text()) + '\n```';
commonStore.setConversation(commonStore.conversation);
commonStore.setConversationOrder([...commonStore.conversationOrder]);
setTimeout(scrollToBottom);
}
},
onclose() {
console.log('Connection closed');
},
onerror(err) {
commonStore.conversation[answerId].type = MessageType.Error;
commonStore.conversation[answerId].done = true;
commonStore.conversation[answerId!].type = MessageType.Error;
commonStore.conversation[answerId!].done = true;
err = err.message || err;
if (err && !err.includes('ReadableStreamDefaultReader'))
commonStore.conversation[answerId!].content += '\n[ERROR]\n```\n' + err + '\n```';
commonStore.setConversation(commonStore.conversation);
commonStore.setConversationOrder([...commonStore.conversationOrder]);
setTimeout(scrollToBottom);
throw err;
}
});
};
}, []);
return (
<div className="flex flex-col w-full grow gap-4 pt-4 overflow-hidden">
<div ref={bodyRef} className="grow overflow-y-scroll overflow-x-hidden pr-2">
{commonStore.conversationOrder.map((uuid, index) => {
const messageItem = commonStore.conversation[uuid];
return <div
key={uuid}
className={classnames(
'flex gap-2 mb-2 overflow-hidden',
messageItem.side === 'left' ? 'flex-row' : 'flex-row-reverse'
)}
onMouseEnter={() => {
const utils = document.getElementById('utils-' + uuid);
if (utils) utils.classList.remove('invisible');
}}
onMouseLeave={() => {
const utils = document.getElementById('utils-' + uuid);
if (utils) utils.classList.add('invisible');
}}
>
<Avatar
color={messageItem.color}
name={messageItem.sender}
image={messageItem.avatarImg ? { src: messageItem.avatarImg } : undefined}
/>
<div
className={classnames(
'p-2 rounded-lg overflow-hidden',
messageItem.side === 'left' ? 'bg-gray-200' : 'bg-blue-500',
messageItem.side === 'left' ? 'text-gray-600' : 'text-white'
)}
>
<MarkdownRender>{messageItem.content}</MarkdownRender>
</div>
<div className="flex flex-col gap-1 items-start">
<div className="grow" />
{(messageItem.type === MessageType.Error || !messageItem.done) &&
<PresenceBadge size="extra-small" status={
messageItem.type === MessageType.Error ? 'busy' : 'away'
} />
}
<div className="flex invisible" id={'utils-' + uuid}>
<ReadButton content={messageItem.content} />
<CopyButton content={messageItem.content} />
</div>
</div>
</div>;
})}
{commonStore.conversationOrder.map(uuid =>
<ChatMessageItem key={uuid} uuid={uuid} onSubmit={onSubmit} />
)}
</div>
<div className="flex items-end gap-2">
<DialogButton tooltip={t('Clear')}

View File

@@ -180,7 +180,7 @@ const CompletionPanel: FC = observer(() => {
};
const onSubmit = (prompt: string) => {
if (commonStore.status.status === ModelStatus.Offline) {
if (commonStore.status.status === ModelStatus.Offline && !commonStore.settings.apiUrl) {
toast(t('Please click the button in the top right corner to start the model'), { type: 'warning' });
commonStore.setCompletionGenerating(false);
return;
@@ -190,17 +190,20 @@ const CompletionPanel: FC = observer(() => {
let answer = '';
completionSseController = new AbortController();
fetchEventSource(`http://127.0.0.1:${port}/completions`, // https://api.openai.com/v1/completions || http://127.0.0.1:${port}/completions
fetchEventSource( // https://api.openai.com/v1/completions || http://127.0.0.1:${port}/completions
commonStore.settings.apiUrl ?
commonStore.settings.apiUrl + '/v1/completions' :
`http://127.0.0.1:${port}/completions`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer sk-`
Authorization: `Bearer ${commonStore.settings.apiKey}`
},
body: JSON.stringify({
prompt,
stream: true,
model: 'text-davinci-003',
model: commonStore.settings.apiCompletionModelName, // 'text-davinci-003'
max_tokens: params.maxResponseToken,
temperature: params.temperature,
top_p: params.topP,
@@ -210,7 +213,6 @@ const CompletionPanel: FC = observer(() => {
}),
signal: completionSseController?.signal,
onmessage(e) {
console.log('sse message', e);
scrollToBottom();
if (e.data === '[DONE]') {
commonStore.setCompletionGenerating(false);
@@ -228,10 +230,22 @@ const CompletionPanel: FC = observer(() => {
setPrompt(prompt + answer.trim() + params.injectEnd.replaceAll('\\n', '\n'));
}
},
async onopen(response) {
if (response.status !== 200) {
toast(response.statusText + '\n' + (await response.text()), {
type: 'error'
});
}
},
onclose() {
console.log('Connection closed');
},
onerror(err) {
err = err.message || err;
if (err && !err.includes('ReadableStreamDefaultReader'))
toast(err, {
type: 'error'
});
commonStore.setCompletionGenerating(false);
throw err;
}
@@ -260,7 +274,7 @@ const CompletionPanel: FC = observer(() => {
<Option key={preset.name} value={preset.name}>{t(preset.name)!}</Option>)
}
</Dropdown>
<div className="flex flex-col gap-1 overflow-x-hidden overflow-y-auto">
<div className="flex flex-col gap-1 overflow-x-hidden overflow-y-auto p-1">
<Labeled flex breakline label={t('Max Response Token')}
desc={t('By default, the maximum number of tokens that can be answered in a single response, it can be changed by the user by specifying API parameters.')}
content={
@@ -297,7 +311,7 @@ const CompletionPanel: FC = observer(() => {
<Labeled flex breakline label={t('Presence Penalty')}
desc={t('Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.')}
content={
<ValuedSlider value={params.presencePenalty} min={-2} max={2}
<ValuedSlider value={params.presencePenalty} min={0} max={2}
step={0.1} input
onChange={(e, data) => {
setParams({
@@ -308,7 +322,7 @@ const CompletionPanel: FC = observer(() => {
<Labeled flex breakline label={t('Frequency Penalty')}
desc={t('Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.')}
content={
<ValuedSlider value={params.frequencyPenalty} min={-2} max={2}
<ValuedSlider value={params.frequencyPenalty} min={0} max={2}
step={0.1} input
onChange={(e, data) => {
setParams({

View File

@@ -14,7 +14,8 @@ import { Labeled } from '../components/Labeled';
import commonStore from '../stores/commonStore';
import { observer } from 'mobx-react-lite';
import { useTranslation } from 'react-i18next';
import { checkUpdate } from '../utils';
import { checkUpdate, toastWithButton } from '../utils';
import { RestartApp } from '../../wailsjs/go/backend_golang/App';
export const Languages = {
dev: 'English', // i18n default
@@ -30,8 +31,13 @@ export type SettingsType = {
giteeUpdatesSource: boolean
cnMirror: boolean
host: string
dpiScaling: number
customModelsPath: string
customPythonPath: string
apiUrl: string
apiKey: string
apiChatModelName: string
apiCompletionModelName: string
}
export const Settings: FC = observer(() => {
@@ -45,7 +51,7 @@ export const Settings: FC = observer(() => {
return (
<Page title={t('Settings')} content={
<div className="flex flex-col gap-2 overflow-hidden">
<div className="flex flex-col gap-2 overflow-y-auto overflow-x-hidden p-1">
<Labeled label={t('Language')} flex spaceBetween content={
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
value={Languages[commonStore.settings.language]}
@@ -56,7 +62,6 @@ export const Settings: FC = observer(() => {
commonStore.setSettings({
language: lang
});
i18n.changeLanguage(lang);
}
}}>
{
@@ -65,6 +70,31 @@ export const Settings: FC = observer(() => {
}
</Dropdown>
} />
{
commonStore.platform === 'windows' &&
<Labeled label={t('DPI Scaling')} flex spaceBetween content={
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
value={commonStore.settings.dpiScaling + '%'}
selectedOptions={[commonStore.settings.dpiScaling.toString()]}
onOptionSelect={(_, data) => {
if (data.optionValue) {
commonStore.setSettings({
dpiScaling: Number(data.optionValue)
});
toastWithButton(t('Restart the app to apply DPI Scaling.'), t('Restart'), () => {
RestartApp();
}, {
autoClose: 5000
});
}
}}>
{
Array.from({ length: 7 }, (_, i) => (i + 2) * 25).map((v, i) =>
<Option key={i} value={v.toString()}>{v + '%'}</Option>)
}
</Dropdown>
} />
}
<Labeled label={t('Dark Mode')} flex spaceBetween content={
<Switch checked={commonStore.settings.darkMode}
onChange={(e, data) => {
@@ -113,8 +143,11 @@ export const Settings: FC = observer(() => {
});
}} />
} />
<Accordion collapsible>
<AccordionItem value="1">
<Accordion collapsible openItems={!commonStore.advancedCollapsed && 'advanced'} onToggle={(e, data) => {
if (data.value === 'advanced')
commonStore.setAdvancedCollapsed(!commonStore.advancedCollapsed);
}}>
<AccordionItem value="advanced">
<AccordionHeader ref={advancedHeaderRef} size="large">{t('Advanced')}</AccordionHeader>
<AccordionPanel>
<div className="flex flex-col gap-2 overflow-hidden">
@@ -138,6 +171,102 @@ export const Settings: FC = observer(() => {
});
}} />
} />
<Labeled label={'API URL'}
content={
<div className="flex gap-2">
<Input style={{ minWidth: 0 }} className="grow" value={commonStore.settings.apiUrl}
onChange={(e, data) => {
commonStore.setSettings({
apiUrl: data.value
});
}} />
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
value="..." selectedOptions={[]} expandIcon={null}
onOptionSelect={(_, data) => {
commonStore.setSettings({
apiUrl: data.optionValue
});
if (data.optionText === 'OpenAI') {
if (commonStore.settings.apiChatModelName === 'rwkv')
commonStore.setSettings({
apiChatModelName: 'gpt-3.5-turbo'
});
if (commonStore.settings.apiCompletionModelName === 'rwkv')
commonStore.setSettings({
apiCompletionModelName: 'text-davinci-003'
});
}
}}>
<Option value="">{t('Localhost')!}</Option>
<Option value="https://api.openai.com">OpenAI</Option>
</Dropdown>
</div>
} />
<Labeled label={'API Key'}
content={
<Input className="grow" placeholder="sk-" value={commonStore.settings.apiKey}
onChange={(e, data) => {
commonStore.setSettings({
apiKey: data.value
});
}} />
} />
<Labeled label={t('API Chat Model Name')}
content={
<div className="flex gap-2">
<Input style={{ minWidth: 0 }} className="grow" placeholder="rwkv"
value={commonStore.settings.apiChatModelName}
onChange={(e, data) => {
commonStore.setSettings({
apiChatModelName: data.value
});
}} />
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
value="..." selectedOptions={[]} expandIcon={null}
onOptionSelect={(_, data) => {
if (data.optionValue) {
commonStore.setSettings({
apiChatModelName: data.optionValue
});
}
}}>
{
['rwkv', 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-32k-0613', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613']
.map((v, i) =>
<Option key={i} value={v}>{v}</Option>
)
}
</Dropdown>
</div>
} />
<Labeled label={t('API Completion Model Name')}
content={
<div className="flex gap-2">
<Input style={{ minWidth: 0 }} className="grow" placeholder="rwkv"
value={commonStore.settings.apiCompletionModelName}
onChange={(e, data) => {
commonStore.setSettings({
apiCompletionModelName: data.value
});
}} />
<Dropdown style={{ minWidth: 0 }} listbox={{ style: { minWidth: 0 } }}
value="..." selectedOptions={[]} expandIcon={null}
onOptionSelect={(_, data) => {
if (data.optionValue) {
commonStore.setSettings({
apiCompletionModelName: data.optionValue
});
}
}}>
{
['rwkv', 'text-davinci-003', 'text-davinci-002', 'text-curie-001', 'text-babbage-001', 'text-ada-001']
.map((v, i) =>
<Option key={i} value={v}>{v}</Option>
)
}
</Dropdown>
</div>
} />
</div>
</AccordionPanel>
</AccordionItem>

View File

@@ -88,7 +88,7 @@ export const defaultModelConfigsMac: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-3B-v1-OnlyForTest_80%_trained-20230612-ctx4096.pth',
modelName: 'RWKV-4-World-3B-v1-20230619-ctx4096.pth',
device: 'MPS',
precision: 'fp32',
storedLayers: 41,
@@ -145,7 +145,7 @@ export const defaultModelConfigsMac: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_75%_trained-20230615-ctx4096.pth',
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_84%_trained-20230618-ctx4096.pth',
device: 'MPS',
precision: 'fp32',
storedLayers: 41,
@@ -200,7 +200,7 @@ export const defaultModelConfigsMac: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-3B-v1-OnlyForTest_80%_trained-20230612-ctx4096.pth',
modelName: 'RWKV-4-World-3B-v1-20230619-ctx4096.pth',
device: 'CPU',
precision: 'fp32',
storedLayers: 41,
@@ -254,7 +254,7 @@ export const defaultModelConfigsMac: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_75%_trained-20230615-ctx4096.pth',
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_84%_trained-20230618-ctx4096.pth',
device: 'CPU',
precision: 'fp32',
storedLayers: 41,
@@ -311,7 +311,7 @@ export const defaultModelConfigs: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-3B-v1-OnlyForTest_80%_trained-20230612-ctx4096.pth',
modelName: 'RWKV-4-World-3B-v1-20230619-ctx4096.pth',
device: 'CUDA',
precision: 'int8',
storedLayers: 6,
@@ -422,7 +422,7 @@ export const defaultModelConfigs: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-3B-v1-OnlyForTest_80%_trained-20230612-ctx4096.pth',
modelName: 'RWKV-4-World-3B-v1-20230619-ctx4096.pth',
device: 'CUDA',
precision: 'int8',
storedLayers: 24,
@@ -479,7 +479,7 @@ export const defaultModelConfigs: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_75%_trained-20230615-ctx4096.pth',
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_84%_trained-20230618-ctx4096.pth',
device: 'CUDA',
precision: 'int8',
storedLayers: 8,
@@ -555,7 +555,7 @@ export const defaultModelConfigs: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-3B-v1-OnlyForTest_80%_trained-20230612-ctx4096.pth',
modelName: 'RWKV-4-World-3B-v1-20230619-ctx4096.pth',
device: 'CUDA',
precision: 'int8',
storedLayers: 41,
@@ -612,7 +612,7 @@ export const defaultModelConfigs: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_75%_trained-20230615-ctx4096.pth',
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_84%_trained-20230618-ctx4096.pth',
device: 'CUDA',
precision: 'int8',
storedLayers: 18,
@@ -687,7 +687,7 @@ export const defaultModelConfigs: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-3B-v1-OnlyForTest_80%_trained-20230612-ctx4096.pth',
modelName: 'RWKV-4-World-3B-v1-20230619-ctx4096.pth',
device: 'CUDA',
precision: 'fp16',
storedLayers: 41,
@@ -744,7 +744,7 @@ export const defaultModelConfigs: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_75%_trained-20230615-ctx4096.pth',
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_84%_trained-20230618-ctx4096.pth',
device: 'CUDA',
precision: 'int8',
storedLayers: 27,
@@ -801,7 +801,7 @@ export const defaultModelConfigs: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_75%_trained-20230615-ctx4096.pth',
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_84%_trained-20230618-ctx4096.pth',
device: 'CUDA',
precision: 'int8',
storedLayers: 41,
@@ -877,7 +877,7 @@ export const defaultModelConfigs: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_75%_trained-20230615-ctx4096.pth',
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_84%_trained-20230618-ctx4096.pth',
device: 'CUDA',
precision: 'fp16',
storedLayers: 41,
@@ -1027,7 +1027,7 @@ export const defaultModelConfigs: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-3B-v1-OnlyForTest_80%_trained-20230612-ctx4096.pth',
modelName: 'RWKV-4-World-3B-v1-20230619-ctx4096.pth',
device: 'CPU',
precision: 'fp32',
storedLayers: 41,
@@ -1081,7 +1081,7 @@ export const defaultModelConfigs: ModelConfig[] = [
frequencyPenalty: 0.4
},
modelParameters: {
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_75%_trained-20230615-ctx4096.pth',
modelName: 'RWKV-4-World-7B-v1-OnlyForTest_84%_trained-20230618-ctx4096.pth',
device: 'CPU',
precision: 'fp32',
storedLayers: 41,

View File

@@ -56,6 +56,7 @@ class CommonStore {
// downloads
downloadList: DownloadStatus[] = [];
// settings
advancedCollapsed: boolean = true;
settings: SettingsType = {
language: getUserLanguage(),
darkMode: !isSystemLightMode(),
@@ -63,8 +64,13 @@ class CommonStore {
giteeUpdatesSource: getUserLanguage() === 'zh',
cnMirror: getUserLanguage() === 'zh',
host: '127.0.0.1',
dpiScaling: 100,
customModelsPath: './models',
customPythonPath: ''
customPythonPath: '',
apiUrl: '',
apiKey: 'sk-',
apiChatModelName: 'rwkv',
apiCompletionModelName: 'rwkv'
};
// about
about: AboutContent = manifest.about;
@@ -187,6 +193,10 @@ class CommonStore {
setCurrentInput(value: string) {
this.currentInput = value;
}
setAdvancedCollapsed(value: boolean) {
this.advancedCollapsed = value;
}
}
export default new CommonStore();

View File

@@ -126,19 +126,28 @@ export const getStrategy = (modelConfig: ModelConfig | undefined = undefined) =>
let params: ModelParameters;
if (modelConfig) params = modelConfig.modelParameters;
else params = commonStore.getCurrentModelConfig().modelParameters;
const modelName = params.modelName.toLowerCase();
const avoidOverflow = params.precision !== 'fp32' && modelName.includes('world') && (modelName.includes('0.1b') || modelName.includes('0.4b') ||
modelName.includes('1.5b') || modelName.includes('1b5'));
let strategy = '';
switch (params.device) {
case 'CPU':
if (avoidOverflow)
strategy = 'cpu fp32 *1 -> ';
strategy += 'cpu ';
strategy += params.precision === 'int8' ? 'fp32i8' : 'fp32';
break;
case 'CUDA':
if (avoidOverflow)
strategy = 'cuda fp32 *1 -> ';
strategy += 'cuda ';
strategy += params.precision === 'fp16' ? 'fp16' : params.precision === 'int8' ? 'fp16i8' : 'fp32';
if (params.storedLayers < params.maxStoredLayers)
strategy += ` *${params.storedLayers}+`;
break;
case 'MPS':
if (avoidOverflow)
strategy = 'mps fp32 *1 -> ';
strategy += 'mps ';
strategy += params.precision === 'fp16' ? 'fp16' : params.precision === 'int8' ? 'fp16i8' : 'fp32';
break;
@@ -318,9 +327,10 @@ export function toastWithButton(text: string, buttonText: string, onClickButton:
}
export function getSupportedCustomCudaFile() {
if ([' 10', ' 16', ' 20', ' 30', 'MX', 'Tesla P', 'Quadro P', 'NVIDIA P', 'TITAN X', 'TITAN RTX', 'RTX A'].some(v => commonStore.status.device_name.includes(v)))
if ([' 10', ' 16', ' 20', ' 30', 'MX', 'Tesla P', 'Quadro P', 'NVIDIA P', 'TITAN X', 'TITAN RTX', 'RTX A',
'Quadro RTX 4000', 'Quadro RTX 5000', 'Tesla T4', 'NVIDIA A10', 'NVIDIA A40'].some(v => commonStore.status.device_name.includes(v)))
return './backend-python/wkv_cuda_utils/wkv_cuda10_30.pyd';
else if ([' 40', 'RTX TITAN Ada'].some(v => commonStore.status.device_name.includes(v)))
else if ([' 40', 'RTX 5000 Ada', 'RTX 6000 Ada', 'RTX TITAN Ada', 'NVIDIA L40'].some(v => commonStore.status.device_name.includes(v)))
return './backend-python/wkv_cuda_utils/wkv_cuda40.pyd';
else
return '';

View File

@@ -34,6 +34,8 @@ export function ReadFileInfo(arg1:string):Promise<backend_golang.FileInfo>;
export function ReadJson(arg1:string):Promise<any>;
export function RestartApp():Promise<void>;
export function SaveJson(arg1:string,arg2:any):Promise<void>;
export function StartServer(arg1:string,arg2:number,arg3:string):Promise<string>;

View File

@@ -66,6 +66,10 @@ export function ReadJson(arg1) {
return window['go']['backend_golang']['App']['ReadJson'](arg1);
}
export function RestartApp() {
return window['go']['backend_golang']['App']['RestartApp']();
}
export function SaveJson(arg1, arg2) {
return window['go']['backend_golang']['App']['SaveJson'](arg1, arg2);
}

19
main.go
View File

@@ -11,6 +11,7 @@ import (
"github.com/wailsapp/wails/v2"
"github.com/wailsapp/wails/v2/pkg/options"
"github.com/wailsapp/wails/v2/pkg/options/assetserver"
"github.com/wailsapp/wails/v2/pkg/options/windows"
)
//go:embed all:frontend/dist
@@ -36,13 +37,29 @@ func main() {
// Create an instance of the app structure
app := backend.NewApp()
var zoomFactor float64 = 1.0
data, err := app.ReadJson("config.json")
if err == nil {
app.HasConfigData = true
app.ConfigData = data.(map[string]any)
if dpiScaling, ok := app.ConfigData["settings"].(map[string]any)["dpiScaling"]; ok {
zoomFactor = dpiScaling.(float64) / 100
}
} else {
app.HasConfigData = false
}
// Create application with options
err := wails.Run(&options.App{
err = wails.Run(&options.App{
Title: "RWKV-Runner",
Width: 1024,
Height: 680,
MinWidth: 375,
MinHeight: 640,
Windows: &windows.Options{
ZoomFactor: zoomFactor,
IsZoomControlEnabled: true,
},
AssetServer: &assetserver.Options{
Assets: assets,
},

View File

@@ -1,5 +1,5 @@
{
"version": "1.2.3",
"version": "1.2.7",
"introduction": {
"en": "RWKV is an open-source, commercially usable large language model with high flexibility and great potential for development.\n### About This Tool\nThis tool aims to lower the barrier of entry for using large language models, making it accessible to everyone. It provides fully automated dependency and model management. You simply need to click and run, following the instructions, to deploy a local large language model. The tool itself is very compact and only requires a single executable file for one-click deployment.\nAdditionally, this tool offers an interface that is fully compatible with the OpenAI API. This means you can use any ChatGPT client as a client for RWKV, enabling capability expansion beyond just chat functionality.\n### Preset Configuration Rules at the Bottom\nThis tool comes with a series of preset configurations to reduce complexity. The naming rules for each configuration represent the following in order: device - required VRAM/memory - model size - model language.\nFor example, \"GPU-8G-3B-EN\" indicates that this configuration is for a graphics card with 8GB of VRAM, a model size of 3 billion parameters, and it uses an English language model.\nLarger model sizes have higher performance and VRAM requirements. Among configurations with the same model size, those with higher VRAM usage will have faster runtime.\nFor example, if you have 12GB of VRAM but running the \"GPU-12G-7B-EN\" configuration is slow, you can downgrade to \"GPU-8G-3B-EN\" for a significant speed improvement.\n### About RWKV\nRWKV is an RNN with Transformer-level LLM performance, which can also be directly trained like a GPT transformer (parallelizable). And it's 100% attention-free. You only need the hidden state at position t to compute the state at position t+1. You can use the \"GPT\" mode to quickly compute the hidden state for the \"RNN\" mode.<br/>So it's combining the best of RNN and transformer - great performance, fast inference, saves VRAM, fast training, \"infinite\" ctx_len, and free sentence embedding (using the final hidden state).",
"zh": "RWKV是一个开源且允许商用的大语言模型灵活性很高且极具发展潜力。\n### 关于本工具\n本工具旨在降低大语言模型的使用门槛做到人人可用本工具提供了全自动化的依赖和模型管理你只需要直接点击运行跟随引导即可完成本地大语言模型的部署工具本身体积极小只需要一个exe即可完成一键部署。\n此外本工具提供了与OpenAI API完全兼容的接口这意味着你可以把任意ChatGPT客户端用作RWKV的客户端实现能力拓展而不局限于聊天。\n### 底部的预设配置规则\n本工具内置了一系列预设配置以降低使用难度每个配置名的规则依次代表着设备-所需显存/内存-模型规模-模型语言。\n例如GPU-8G-3B-CN表示该配置用于显卡需要8G显存模型规模为30亿参数使用的是中文模型。\n模型规模越大性能要求越高显存要求也越高而同样模型规模的配置中显存占用越高的运行速度越快。\n例如当你有12G显存但运行GPU-12G-7B-CN配置速度比较慢可降级成GPU-8G-3B-CN将会大幅提速。\n### 关于RWKV\nRWKV是具有Transformer级别LLM性能的RNN也可以像GPT Transformer一样直接进行训练可并行化。而且它是100% attention-free的。你只需在位置t处获得隐藏状态即可计算位置t + 1处的状态。你可以使用“GPT”模式快速计算用于“RNN”模式的隐藏状态。\n因此它将RNN和Transformer的优点结合起来 - 高性能、快速推理、节省显存、快速训练、“无限”上下文长度以及免费的语句嵌入(使用最终隐藏状态)。"
@@ -15,11 +15,23 @@
}
],
"models": [
{
"name": "RWKV-4-World-CHNtuned-0.1B-v1-20230617-ctx4096.pth",
"desc": {
"en": "Global Languages 0.1B v1 Enhanced Chinese",
"zh": "全球语言 0.1B v1 中文增强"
},
"size": 385594610,
"SHA256": "a3888f9958d378ee6d4976ae1c02edb698f4382e426086febafb4a69417b9080",
"lastUpdated": "2023-06-17T18:35:26",
"url": "https://huggingface.co/BlinkDL/rwkv-4-world/blob/main/RWKV-4-World-CHNtuned-0.1B-v1-20230617-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-CHNtuned-0.1B-v1-20230617-ctx4096.pth"
},
{
"name": "RWKV-4-World-0.1B-v1-20230520-ctx4096.pth",
"desc": {
"en": "100+ Languages 0.1B v1",
"zh": "100+ 语言 0.1B v1"
"en": "Global Languages 0.1B v1",
"zh": "全球语言 0.1B v1"
},
"size": 385594610,
"SHA256": "a10ef99df2a8f8a6801edf4fc92a9c49bedd63dcb900d3e5667a2136b3d671e7",
@@ -27,11 +39,23 @@
"url": "https://huggingface.co/BlinkDL/rwkv-4-world/blob/main/RWKV-4-World-0.1B-v1-20230520-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-0.1B-v1-20230520-ctx4096.pth"
},
{
"name": "RWKV-4-World-CHNtuned-0.4B-v1-20230618-ctx4096.pth",
"desc": {
"en": "Global Languages 0.4B v1 Enhanced Chinese",
"zh": "全球语言 0.4B v1 中文增强"
},
"size": 923362866,
"SHA256": "dbd5302cbee596bbc900f97eb10b2af3001a7f2c7e4d8643bf8683b2cdbdd324",
"lastUpdated": "2023-06-18T10:46:50",
"url": "https://huggingface.co/BlinkDL/rwkv-4-world/blob/main/RWKV-4-World-CHNtuned-0.4B-v1-20230618-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-CHNtuned-0.4B-v1-20230618-ctx4096.pth"
},
{
"name": "RWKV-4-World-0.4B-v1-20230529-ctx4096.pth",
"desc": {
"en": "100+ Languages 0.4B v1",
"zh": "100+ 语言 0.4B v1"
"en": "Global Languages 0.4B v1",
"zh": "全球语言 0.4B v1"
},
"size": 923362866,
"SHA256": "4b4a2733cf5e5dc97dd62106f391d99895d16b11c5ccd10c89f28c52067a4919",
@@ -39,11 +63,23 @@
"url": "https://huggingface.co/BlinkDL/rwkv-4-world/blob/main/RWKV-4-World-0.4B-v1-20230529-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-0.4B-v1-20230529-ctx4096.pth"
},
{
"name": "RWKV-4-World-CHNtuned-1.5B-v1-20230620-ctx4096.pth",
"desc": {
"en": "Global Languages 1.5B v1 Enhanced Chinese",
"zh": "全球语言 1.5B v1 中文增强"
},
"size": 3155281586,
"SHA256": "9f31f2ed5fe52dcf2d50208eb2efd764b9674dba2adb1baeff61997b4390a26b",
"lastUpdated": "2023-06-20T06:35:37",
"url": "https://huggingface.co/BlinkDL/rwkv-4-world/blob/main/RWKV-4-World-CHNtuned-1.5B-v1-20230620-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-CHNtuned-1.5B-v1-20230620-ctx4096.pth"
},
{
"name": "RWKV-4-World-1.5B-v1-OnlyForTest_57%_trained-20230529-ctx4096.pth",
"desc": {
"en": "100+ Languages 1.5B v1 Test",
"zh": "100+ 语言 1.5B v1 测试"
"en": "Global Languages 1.5B v1 Test",
"zh": "全球语言 1.5B v1 测试"
},
"size": 3155281581,
"SHA256": "ac36770931776c5aa179690918c9a3b0b5f4ebe3301ea3574a7e182209778788",
@@ -55,8 +91,8 @@
{
"name": "RWKV-4-World-1.5B-v1-OnlyForTest_81%_trained-20230603-ctx4096.pth",
"desc": {
"en": "100+ Languages 1.5B v1 Test",
"zh": "100+ 语言 1.5B v1 测试"
"en": "Global Languages 1.5B v1 Test",
"zh": "全球语言 1.5B v1 测试"
},
"size": 3155281581,
"SHA256": "044fb10daa71f4c012493ac8ef455c8c3301095b5f009dae58f0f6382a53e23c",
@@ -68,8 +104,8 @@
{
"name": "RWKV-4-World-1.5B-v1-20230607-ctx4096.pth",
"desc": {
"en": "100+ Languages 1.5B v1",
"zh": "100+ 语言 1.5B v1"
"en": "Global Languages 1.5B v1",
"zh": "全球语言 1.5B v1"
},
"size": 3155281586,
"SHA256": "05bad4ab0ce41250064153d5352587b83215a82eb50134489675129bd4ad1087",
@@ -81,8 +117,8 @@
{
"name": "RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth",
"desc": {
"en": "100+ Languages 1.5B v1 fixed",
"zh": "100+ 语言 1.5B v1 修复"
"en": "Global Languages 1.5B v1 fixed",
"zh": "全球语言 1.5B v1 修复"
},
"size": 3155281586,
"SHA256": "71f0c3229f9227cbcb8ae5fee6461197129a57e26366c4d23a49058417b046c9",
@@ -93,8 +129,8 @@
{
"name": "RWKV-4-World-3B-v1-OnlyForTest_35%_trained-20230529-ctx4096.pth",
"desc": {
"en": "100+ Languages 3B v1 Test",
"zh": "100+ 语言 3B v1 测试"
"en": "Global Languages 3B v1 Test",
"zh": "全球语言 3B v1 测试"
},
"size": 6125597613,
"SHA256": "e4ee6e91a80d56de43bc79841f3a8be3b7b215d7d9788f79c467b9b1f7f03cb8",
@@ -106,8 +142,8 @@
{
"name": "RWKV-4-World-3B-v1-OnlyForTest_52%_trained-20230603-ctx4096.pth",
"desc": {
"en": "100+ Languages 3B v1 Test",
"zh": "100+ 语言 3B v1 测试"
"en": "Global Languages 3B v1 Test",
"zh": "全球语言 3B v1 测试"
},
"size": 6125597613,
"SHA256": "aad3671078a0c686368add4f4b695a76c2ba1ddd505a64c0949bb003beeee9a3",
@@ -119,8 +155,8 @@
{
"name": "RWKV-4-World-3B-v1-OnlyForTest_64%_trained-20230607-ctx4096.pth",
"desc": {
"en": "100+ Languages 3B v1 Test",
"zh": "100+ 语言 3B v1 测试"
"en": "Global Languages 3B v1 Test",
"zh": "全球语言 3B v1 测试"
},
"size": 6125597613,
"SHA256": "49e8675e09e0786ca12a554442c37b9e809ed93e9211af937cd149968a6b81e9",
@@ -132,20 +168,33 @@
{
"name": "RWKV-4-World-3B-v1-OnlyForTest_80%_trained-20230612-ctx4096.pth",
"desc": {
"en": "100+ Languages 3B v1 Test",
"zh": "100+ 语言 3B v1 测试"
"en": "Global Languages 3B v1 Test",
"zh": "全球语言 3B v1 测试"
},
"size": 6125597613,
"SHA256": "3bb10caf3017871435d83f39facc8a729fd774020390153470f004eb3ef645bd",
"lastUpdated": "2023-06-12T06:31:32",
"url": "https://huggingface.co/BlinkDL/rwkv-4-world/blob/main/RWKV-4-World-3B-v1-OnlyForTest_80%25_trained-20230612-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-3B-v1-OnlyForTest_80%25_trained-20230612-ctx4096.pth"
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-3B-v1-OnlyForTest_80%25_trained-20230612-ctx4096.pth",
"hide": true
},
{
"name": "RWKV-4-World-3B-v1-20230619-ctx4096.pth",
"desc": {
"en": "Global Languages 3B v1",
"zh": "全球语言 3B v1"
},
"size": 6125597618,
"SHA256": "1b227af317fa25b6939ab3c7cd321226ca48b8fe4bbbd2df3db669f1482c54ba",
"lastUpdated": "2023-06-20T03:00:51",
"url": "https://huggingface.co/BlinkDL/rwkv-4-world/blob/main/RWKV-4-World-3B-v1-20230619-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-3B-v1-20230619-ctx4096.pth"
},
{
"name": "RWKV-4-World-7B-v1-OnlyForTest_30%_trained-20230529-ctx4096.pth",
"desc": {
"en": "100+ Languages 7B v1 Test",
"zh": "100+ 语言 7B v1 测试"
"en": "Global Languages 7B v1 Test",
"zh": "全球语言 7B v1 测试"
},
"size": 15035393581,
"SHA256": "05f91562b2ae8b025226e40b3fb536d6f8eb3c142ac899c0808ee1c9dc189ec4",
@@ -157,8 +206,8 @@
{
"name": "RWKV-4-World-7B-v1-OnlyForTest_40%_trained-20230601-ctx4096.pth",
"desc": {
"en": "100+ Languages 7B v1 Test",
"zh": "100+ 语言 7B v1 测试"
"en": "Global Languages 7B v1 Test",
"zh": "全球语言 7B v1 测试"
},
"size": 15035393581,
"SHA256": "63c060c472e45b6c3af2baaaee448ffd95f9b46e3cc6e1ef70ce7ecb1d01bcfa",
@@ -170,8 +219,8 @@
{
"name": "RWKV-4-World-7B-v1-OnlyForTest_52%_trained-20230606-ctx4096.pth",
"desc": {
"en": "100+ Languages 7B v1 Test",
"zh": "100+ 语言 7B v1 测试"
"en": "Global Languages 7B v1 Test",
"zh": "全球语言 7B v1 测试"
},
"size": 15035393581,
"SHA256": "636405626eadbab230e1a7dc2855bb6244e09b5850547dda7103f650b4849de7",
@@ -183,8 +232,8 @@
{
"name": "RWKV-4-World-7B-v1-OnlyForTest_64%_trained-20230610-ctx4096.pth",
"desc": {
"en": "100+ Languages 7B v1 Test",
"zh": "100+ 语言 7B v1 测试"
"en": "Global Languages 7B v1 Test",
"zh": "全球语言 7B v1 测试"
},
"size": 15035393581,
"SHA256": "8039be276f555318a5b2e9ad82b9d70001c12bd2e3e668048615fc7b09d5d9a4",
@@ -196,14 +245,27 @@
{
"name": "RWKV-4-World-7B-v1-OnlyForTest_75%_trained-20230615-ctx4096.pth",
"desc": {
"en": "100+ Languages 7B v1 Test",
"zh": "100+ 语言 7B v1 测试"
"en": "Global Languages 7B v1 Test",
"zh": "全球语言 7B v1 测试"
},
"size": 15035393581,
"SHA256": "a5f4246a18698a350a49988de7a8a01cbd765f8d11ee6427cabb93bf659f2d0d",
"lastUpdated": "2023-06-15T15:09:11",
"url": "https://huggingface.co/BlinkDL/rwkv-4-world/blob/main/RWKV-4-World-7B-v1-OnlyForTest_75%25_trained-20230615-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-7B-v1-OnlyForTest_75%25_trained-20230615-ctx4096.pth"
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-7B-v1-OnlyForTest_75%25_trained-20230615-ctx4096.pth",
"hide": true
},
{
"name": "RWKV-4-World-7B-v1-OnlyForTest_84%_trained-20230618-ctx4096.pth",
"desc": {
"en": "Global Languages 7B v1 Test",
"zh": "全球语言 7B v1 测试"
},
"size": 15035393581,
"SHA256": "dfb56e8ba32907cb47df83c8d702e7f350d9ad50a59b71b031da4681637588b3",
"lastUpdated": "2023-06-19T01:28:17",
"url": "https://huggingface.co/BlinkDL/rwkv-4-world/blob/main/RWKV-4-World-7B-v1-OnlyForTest_84%25_trained-20230618-ctx4096.pth",
"downloadUrl": "https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-7B-v1-OnlyForTest_84%25_trained-20230618-ctx4096.pth"
},
{
"name": "RWKV-4-Novel-7B-v1-ChnEng-ChnPro-20230410-ctx4096.pth",