Compare commits

...

504 Commits

Author SHA1 Message Date
josc146
ac34edec7f release v1.6.0 2023-12-10 23:46:25 +08:00
josc146
6dd8ffa037 bump to wails v2.7.1 2023-12-10 23:43:40 +08:00
josc146
eaed3f40a2 improve current instrument display 2023-12-10 23:37:23 +08:00
josc146
e48f39375e add midi tracks to webUI 2023-12-10 23:08:44 +08:00
josc146
9b7b651ef9 feat: import midi file 2023-12-10 22:38:31 +08:00
josc146
b5623cb9c2 fix generation instrumentType 2023-12-10 22:32:06 +08:00
josc146
144d12b463 chore 2023-12-10 21:13:36 +08:00
josc146
fa452f5518 bump to wails v2.7.0 2023-12-09 14:56:48 +08:00
josc146
a159d21d45 Update README_JA.md 2023-12-09 13:09:53 +08:00
josc146
3a00bbf44d update readme 2023-12-09 12:56:15 +08:00
github-actions[bot]
9f5e94fa8f release v1.5.9 2023-12-08 11:22:21 +00:00
josc146
87e1daa733 release v1.5.9 2023-12-08 19:22:01 +08:00
josc146
f5900179e0 model tags classifier 2023-12-08 18:17:53 +08:00
josc146
51e162970e always reset to activePreset 2023-12-08 17:10:23 +08:00
josc146
0b339ad0f6 improve ConfigSelector performance of Configs page 2023-12-08 16:36:15 +08:00
josc146
60693d6a29 improve presets interaction 2023-12-08 15:36:53 +08:00
josc146
eea53a6e9e add available tag for model downloaded configs 2023-12-08 15:34:45 +08:00
josc146
8a19181a38 chore 2023-12-08 15:30:46 +08:00
josc146
94d835c7ae better customCuda condition 2023-12-08 15:30:05 +08:00
josc146
d9e25ad69f better state cache 2023-12-08 15:28:33 +08:00
josc146
75244fbd8b disable hashed assets 2023-12-08 11:22:31 +08:00
josc146
5ce84edc3d add web-rwkv-converter (Safetensors Convert no longer depends on Python) 2023-12-07 23:26:39 +08:00
josc146
1c683087f4 update ci webgpu components 2023-12-07 23:04:56 +08:00
josc146
85a3b39cbc fix webWails undefined functions 2023-12-06 23:19:56 +08:00
josc146
cc6c24f0c3 add python-3.10.11-embed-amd64.zip cnMirror and chore 2023-12-06 23:19:22 +08:00
josc146
c733b6419c for devices that gpu is not supported, use cpu to merge lora 2023-12-06 23:17:13 +08:00
josc146
c853c5b60b chore 2023-12-06 23:09:39 +08:00
josc146
053a08f5b7 update convert_safetensors.py 2023-12-06 23:08:40 +08:00
josc146
f7227cd1c1 update ci webgpu components 2023-12-06 23:08:20 +08:00
josc146
861e245062 RWKV_RESCALE_LAYER 999 for music model 2023-12-04 17:51:21 +08:00
josc146
8f0fc7db56 update README_ZH.md 2023-11-30 22:07:16 +08:00
josc146
3dd06fa70e update README_ZH.md 2023-11-30 21:49:31 +08:00
josc146
86a855e7bc fix damaged logo 2023-11-30 21:48:14 +08:00
github-actions[bot]
b3110d4ad8 release v1.5.8 2023-11-30 05:04:31 +00:00
josc146
602004ad34 release v1.5.8 2023-11-30 13:04:02 +08:00
josc146
a8b4f0bb7e lora finetune version check 2023-11-30 13:01:38 +08:00
josc146
24cc8be085 add high loss warning 2023-11-30 12:40:16 +08:00
josc146
a96d7aef8d display mainInstrument of track 2023-11-30 12:36:03 +08:00
josc146
cbe299583b improve details of MIDI Input 2023-11-30 11:57:52 +08:00
josc146
68c70a362b darkmode of midi tracks 2023-11-30 11:56:45 +08:00
josc146
a78c346371 fix NoteOff ElapsedTime of MIDI Tracks 2023-11-30 11:55:10 +08:00
github-actions[bot]
102763b94d release v1.5.7 2023-11-29 15:01:26 +00:00
josc146
ad65765ba8 release v1.5.7 2023-11-29 22:59:47 +08:00
josc146
d04fd7cb87 fix lib 2023-11-29 22:59:42 +08:00
github-actions[bot]
b398cbb591 release v1.5.6 2023-11-29 13:22:21 +00:00
josc146
19b97e985c release v1.5.6 2023-11-29 21:21:50 +08:00
josc146
93bf74a320 fix NoteOff 2023-11-29 21:21:42 +08:00
josc146
7daae23bbb update defaultConfigs 2023-11-29 21:21:29 +08:00
josc146
0d0a3f15cc chore 2023-11-29 21:21:14 +08:00
github-actions[bot]
04fbb38861 release v1.5.5 2023-11-29 11:32:40 +00:00
josc146
d666c6032b release v1.5.5 2023-11-29 19:31:56 +08:00
josc146
93e8660d69 add instruments i18n 2023-11-29 19:31:52 +08:00
josc146
e687cf02bb try to use local soundfont by default 2023-11-29 19:17:19 +08:00
josc146
e858f1477a update locales 2023-11-29 19:10:01 +08:00
josc146
a2062ae9cc feat: save MIDI tracks to generation area; playing tracks and audio preview are still under development 2023-11-29 19:04:41 +08:00
josc146
34112c79c7 fix autoPlayed midi cannot be stopped 2023-11-29 15:28:43 +08:00
josc146
b625b8a6d1 MIDI Recording and details improvement 2023-11-29 14:05:58 +08:00
josc146
14a13d5768 basic MIDI Input Audio Tracks 2023-11-28 15:34:06 +08:00
josc146
7ce464ecda improve details 2023-11-26 22:54:59 +08:00
github-actions[bot]
2c1f89383f release v1.5.4 2023-11-24 11:22:42 +00:00
josc146
e666c50f77 release v1.5.4 2023-11-24 19:22:07 +08:00
josc146
1b441752b0 chore 2023-11-24 19:21:58 +08:00
josc146
e01897b24d improve launch flow of webgpu mode 2023-11-24 19:21:14 +08:00
josc146
6146d910b4 improve launch flow of webgpu mode 2023-11-24 18:36:44 +08:00
josc146
0063c171f3 upgrade to rwkv 0.8.22 (rwkv6 support) 2023-11-24 17:55:16 +08:00
josc146
bea3c29c1c update defaultConfigs 2023-11-24 17:13:22 +08:00
josc146
5f543c2545 update manifest 2023-11-24 16:35:21 +08:00
josc146
177b2c54d9 allow reading attachments even if the model is offline 2023-11-24 16:25:21 +08:00
josc146
645e8e2f44 chore 2023-11-24 15:58:53 +08:00
josc146
f2d0dda2ff allow safetensors converter on macOS 2023-11-21 22:32:25 +08:00
josc146
3a449e7b46 fix fs watcher of macOS 2023-11-21 22:30:42 +08:00
github-actions[bot]
18d2ecb7a7 release v1.5.3 2023-11-20 16:22:32 +00:00
josc146
bb3a93b419 release v1.5.3 2023-11-21 00:21:09 +08:00
josc146
1334f0e5ba chore 2023-11-21 00:20:54 +08:00
josc146
8781416cfb add hf-mirror for cn users 2023-11-21 00:04:23 +08:00
josc146
a9819139b8 add sidePanel for Chat page 2023-11-20 23:47:39 +08:00
josc146
66e43c9d9b display lastModelName at the top (WorkHeader) 2023-11-20 23:27:44 +08:00
josc146
41e5bd5eb8 change ValuedSlider's step to 100 2023-11-20 23:25:39 +08:00
josc146
48fef0235b add webgpu nf4 2023-11-20 21:10:10 +08:00
josc146
d435436525 improve finetune error 2023-11-20 20:39:00 +08:00
josc146
cd7a9896dc improve styles 2023-11-20 20:16:55 +08:00
josc146
bbcc6b07b6 improve precision description 2023-11-20 20:13:30 +08:00
josc146
646bcd81c0 fix webgpu permission for macos 2023-11-20 20:12:20 +08:00
josc146
dbf0dccc9d add tokenizer(/switch-model) to /docs 2023-11-20 20:11:45 +08:00
josc146
437de2be20 improve lazy loading ui 2023-11-18 13:59:37 +08:00
josc146
f739c61197 fix a finetune bug 2023-11-17 22:37:21 +08:00
josc146
01d3c89ea4 add rwkv API URL Option; update OpenAI models Option 2023-11-17 22:16:49 +08:00
josc146
d18218f21a use local API when it's working, even if a custom API URL is provided 2023-11-17 21:53:29 +08:00
josc146
c8470e77fd fix state_cache of deploy mode 2023-11-17 21:32:11 +08:00
josc146
9ede7d7c6d strict default_stop 2023-11-17 21:18:52 +08:00
josc146
a59c4436c8 macos: change default webgpu backend to aarch64-apple-darwin 2023-11-17 21:16:08 +08:00
josc146
068be2bfc4 update setup comments 2023-11-17 20:47:33 +08:00
josc146
94a5dc4fb7 update setup.sh comments 2023-11-14 17:38:24 +08:00
github-actions[bot]
9f288de951 release v1.5.2 2023-11-09 14:11:40 +00:00
josc146
3d5c3dcd31 release v1.5.2 2023-11-09 22:11:05 +08:00
josc146
0a4876a564 improve user guide 2023-11-09 22:07:01 +08:00
josc146
4f0558ae34 add client upgrade progress 2023-11-09 21:38:02 +08:00
josc146
f03c9cf25f improve mobile view 2023-11-09 12:21:01 +08:00
josc146
07797537d1 add RWKV-Runner WebUI to Server-Deploy-Examples 2023-11-09 00:21:02 +08:00
github-actions[bot]
0c3a50cb07 release v1.5.1 2023-11-08 15:41:53 +00:00
josc146
c7dcff52a1 release v1.5.1 2023-11-08 23:41:17 +08:00
josc146
c6ef32958e when client webUI enabled, set server into deployment mode 2023-11-08 23:31:13 +08:00
josc146
7235e1067b add deployment mode. If /switch-model with deploy: true, will disable /switch-model, /exit and other dangerous APIs (state cache APIs, part of midi APIs) 2023-11-08 23:29:42 +08:00
josc146
0594290b92 disable WebUI Option of WebGPU Mode (webgpu not supported yet) 2023-11-08 23:05:59 +08:00
josc146
d249a4c29a print error.txt 2023-11-08 22:57:38 +08:00
josc146
02ba37fab4 improve api url getter 2023-11-08 22:25:41 +08:00
josc146
b5a6f8a425 set deepspeed to 0.11.2 to avoid finetune error 2023-11-08 22:20:11 +08:00
josc146
1ad86d737c chore 2023-11-08 22:18:49 +08:00
josc146
cfa3669f6f fix /docs default api params (Pydantic v2) 2023-11-07 22:53:11 +08:00
josc146
26d4c9f0ed chore 2023-11-07 22:28:13 +08:00
josc146
3ddcf9f62e add webui entry 2023-11-07 22:24:06 +08:00
josc146
e734fce64f create webui assets 2023-11-07 22:23:26 +08:00
josc146
150beb578c chore 2023-11-07 22:23:00 +08:00
josc146
db6fbe8366 add python webui server 2023-11-07 22:22:29 +08:00
josc146
46f52923c3 improve webui 2023-11-07 22:21:41 +08:00
josc146
893be5cf43 webui build 2023-11-07 19:27:21 +08:00
github-actions[bot]
384e4ce4d0 release v1.5.0 2023-11-05 13:10:50 +00:00
josc146
b8712e0b89 release v1.5.0 2023-11-05 21:10:21 +08:00
josc146
37dda4333d chat attachment is now related to single message 2023-11-05 21:05:06 +08:00
josc146
64826b9af7 fix log encoding error 2023-11-05 21:00:31 +08:00
josc146
47b0c35441 update ngrok_connect 2023-11-04 20:22:28 +08:00
josc146
1dcda47013 improve startup process 2023-11-04 20:21:55 +08:00
josc146
1f81a1e5a8 upgrade to rwkv 0.8.20 2023-11-03 23:27:14 +08:00
josc146
35e92d2aef chore 2023-11-03 23:22:52 +08:00
josc146
0d99e5549e port occupied detection 2023-11-03 21:18:42 +08:00
josc146
fed1594ddc fix stop button status of Chat page 2023-10-30 21:09:23 +08:00
josc146
14b90bb36b improve dml mode performance (20% faster, https://github.com/BlinkDL/ChatRWKV/pull/181) 2023-10-30 20:24:57 +08:00
josc146
f86b7f1f08 python38 compatibility 2023-10-29 14:11:11 +08:00
josc146
54355d5a7a improve the compatibility between frontend presets and chatgpt api 2023-10-28 23:06:19 +08:00
josc146
ff7306349a improve memory usage of state cache 2023-10-28 23:04:49 +08:00
github-actions[bot]
77df56cddc release v1.4.9 2023-10-27 06:04:00 +00:00
josc146
97ae139de5 release v1.4.9 2023-10-27 14:03:28 +08:00
josc146
afd15ef2c5 base64 preset support 2023-10-27 13:35:29 +08:00
josc146
6c73eae9f6 edited chat message now is marked as Normal 2023-10-27 13:11:12 +08:00
josc146
7078f47f72 allow avatarImg to be local absolute path 2023-10-27 12:53:20 +08:00
josc146
d43954cc88 improve message interruption and retry for Chat page 2023-10-27 12:13:05 +08:00
josc146
c87de93498 allow conversation with some document (.pdf, .txt) 2023-10-27 11:36:29 +08:00
josc146
810843a5ab update manifest.json 2023-10-27 00:48:37 +08:00
josc146
f7cbd2c803 update manifest.json 2023-10-26 18:04:06 +08:00
josc146
faf1852012 update stop strategy 2023-10-26 17:47:40 +08:00
josc146
43cfab5d4b change default World series prefix to User/Assistant 2023-10-26 16:58:53 +08:00
josc146
627a20936d RWKVType now no longer relies on the file name 2023-10-26 16:55:33 +08:00
josc146
1d7f19ffaf update sample.jsonl 2023-10-26 14:08:16 +08:00
josc146
d80565d780 mark rwkv raven series as old model 2023-10-26 13:32:59 +08:00
josc146
d7ba88953d chore 2023-10-25 22:53:14 +08:00
josc146
30e1c3171e update kernel (CUDA Compute Capability 5.3) 2023-10-25 22:53:14 +08:00
josc146
1f058b16ac update kernel (CUDA Compute Capability 6.1, Previously 7.5) 2023-10-25 22:53:13 +08:00
josc146
4a192f4057 upgrade to webgpu 0.2.2 (https://github.com/josStorer/ai00_rwkv_server) 2023-10-25 21:02:44 +08:00
josc146
0331bf47f7 upgrade rwkv 0.8.16 (DirectML support; rwkv 5.2 no longer needs to ensure custom cuda kernel enabled) 2023-10-25 17:56:18 +08:00
josc146
2acdaa96b2 chore 2023-10-25 17:51:59 +08:00
josc146
1d200d53ab fix beta linux kernel 2023-10-25 17:51:13 +08:00
josc146
df9e1f408e add /file-to-text api 2023-10-25 17:14:33 +08:00
josc146
4a18696686 add pip --no-warn-script-location 2023-10-25 17:08:50 +08:00
josc146
46b3b285f5 upgrade packages 2023-10-25 17:07:40 +08:00
josc146
1d6aeab9dc fix the make command on Linux and macOS, no longer need manual operations on the wsl.go file. (#158, #173, #207) 2023-10-25 16:12:34 +08:00
josc146
ab110ba30b chore 2023-10-24 23:41:18 +08:00
josc146
2f0fa4ee56 update readme 2023-10-24 21:11:55 +08:00
josc146
0005816c1d fix linux kernel (partial revert 68228a45) 2023-10-05 00:08:18 +08:00
josc146
f70672e5a0 update .gitignore 2023-10-05 00:08:02 +08:00
github-actions[bot]
ee057071a5 release v1.4.8 2023-10-03 07:05:41 +00:00
josc146
4f26404002 release v1.4.8 2023-10-03 15:05:13 +08:00
josc146
df7652856a completion page: add format content button 2023-10-03 14:54:36 +08:00
josc146
de755463e3 improve overflow 2023-10-03 14:27:44 +08:00
josc146
2fe98d9a2c add rwkv5 cuda kernel error prompt 2023-10-03 14:25:31 +08:00
josc146
2e42039607 chore 2023-10-03 14:04:46 +08:00
josc146
71abd357a4 update startup 2023-10-03 13:50:58 +08:00
josc146
68228a4552 rwkv5 pre-compiled kernel (for windows) 2023-10-03 13:39:07 +08:00
josc146
79851433f8 upgrade rwkv pip (0.8.13) 2023-10-03 13:33:55 +08:00
github-actions[bot]
bd4de12e05 release v1.4.7 2023-09-18 15:04:47 +00:00
josc146
c0aa6aaba9 release v1.4.7 2023-09-18 23:03:54 +08:00
josc146
d7abe5f0d1 add pre-compiled beta cuda kernel (rwkv-beta==0.8.5, 40%+ faster for fp16) (thanks to #180, pre-compiled kernel of RTX 40 Series will be included later) 2023-09-18 23:02:49 +08:00
josc146
5e5e1e9651 custom tokenizer .txt support 2023-09-18 17:20:55 +08:00
github-actions[bot]
f8388a0527 release v1.4.6 2023-09-16 05:06:08 +00:00
josc146
f8b764ef8f release v1.4.6 2023-09-16 13:05:34 +08:00
josc146
fcfaa5944e frontend feature adaptation for api params (user_name, assistant_name, presystem) 2023-09-16 13:02:06 +08:00
josc146
f89e89c1c9 chore 2023-09-16 12:23:16 +08:00
josc146
a25965530c custom tokenizer (#77) 2023-09-16 00:34:11 +08:00
josc146
971124d0d7 upgrade to wails@v2.6.0 (EnableDefaultContextMenu: true) 2023-09-16 00:29:45 +08:00
josc146
d7dcc90008 chore 2023-09-15 16:31:14 +08:00
josc146
df969fcfc6 upgrade cuda-beta 2023-09-15 16:30:11 +08:00
josc146
c4042bbfd8 improve ui desc 2023-09-15 16:26:32 +08:00
josc146
4112200b4c revert(2d5456): refresh local models when download complete (for macOS) 2023-09-15 16:25:04 +08:00
Ikko Eltociear Ashimine
3f9a54e36f Update README_JA.md
add translation.
2023-09-13 16:11:43 +08:00
github-actions[bot]
3ed4456135 release v1.4.5 2023-08-27 15:57:18 +00:00
josc146
e0df9ae47b release v1.4.5 2023-08-27 23:56:37 +08:00
josc146
87b2c3ed7d fix build 2023-08-27 23:56:30 +08:00
josc146
50ff7ef6bc always use requirements.txt 2023-08-27 23:52:52 +08:00
josc146
c7a580ca8a update manifest 2023-08-27 23:16:56 +08:00
josc146
eaae7624a7 add HardwareMonitor (Windows Only) 2023-08-27 22:53:18 +08:00
josc146
fcd59de6fb correct Preset UI description 2023-08-27 21:37:32 +08:00
josc146
1bbe127209 fix webgpu_server file permissions of linux and macos 2023-08-27 21:22:26 +08:00
josc146
b868adc058 chore 2023-08-27 21:21:34 +08:00
josc146
a24b78e8c3 python-backend: extra ChatCompletionBody params (raw, presystem);
add default_stop when stop is null
2023-08-27 21:21:11 +08:00
josc146
c8025f1cff allow message content to be empty 2023-08-27 21:02:54 +08:00
josc146
fe0860dbf0 fix lora finetune max_epochs (#170) 2023-08-24 22:49:57 +08:00
josc146
02d5d641d1 chore 2023-08-24 22:48:54 +08:00
github-actions[bot]
a057bb6c5b release v1.4.4 2023-08-16 15:33:53 +00:00
josc146
c9e4ae7fa1 release v1.4.4 2023-08-16 23:33:22 +08:00
josc146
79a97b2bc4 webgpu release support 2023-08-16 23:31:04 +08:00
josc146
ef53951a16 webgpu support 2023-08-16 23:07:58 +08:00
josc146
74f1a1c033 chore 2023-08-16 21:11:58 +08:00
josc146
ce986cfc6d chore 2023-08-16 12:50:22 +08:00
josc146
61cea2a784 add misc API (/models and /dashboard/billing/credit_grants) 2023-08-14 23:37:55 +08:00
josc146
8a13bd3c1e add rwkv-cuda-beta support (faster) 2023-08-14 22:07:15 +08:00
josc146
da68926e9c chore (AddStateBody class) 2023-08-13 21:27:29 +08:00
josc146
e0b7453883 allow multiple systems 2023-08-04 22:27:55 +08:00
josc146
91e2828a95 allow completions input to be null 2023-08-04 22:22:59 +08:00
github-actions[bot]
bcf6409536 release v1.4.3 2023-07-31 14:51:01 +00:00
josc146
d7d4f87620 release v1.4.3 2023-07-31 22:50:29 +08:00
josc146
b3e35a4cdd allow custom user_name and assistant_name (/chat/completions API) 2023-07-31 22:48:54 +08:00
josc146
8764c37b03 RWKVType 2023-07-31 22:46:13 +08:00
josc146
d12a173f39 global penalty 2023-07-31 22:02:28 +08:00
josc146
64fa939c19 japanese UI chore 2023-07-29 21:44:33 +08:00
josc146
9c8e7b2f08 japanese UI 2023-07-29 21:19:45 +08:00
josc146
abfd668523 update defaultConfigs 2023-07-29 19:41:54 +08:00
github-actions[bot]
ebacf383f5 release v1.4.2 2023-07-29 11:34:18 +00:00
josc146
eb25dc6bcb release v1.4.2 2023-07-29 19:33:52 +08:00
josc146
aecacde819 remove response field of completions api 2023-07-29 19:20:43 +08:00
josc146
3ef22239eb improve default ChatCompletion stop 2023-07-29 19:19:38 +08:00
josc146
719090cc8c improve python backend startup speed 2023-07-29 19:18:01 +08:00
josc146
dbb8374d89 update defaultConfigs 2023-07-29 19:16:44 +08:00
github-actions[bot]
4d875a8c00 release v1.4.1 2023-07-28 14:16:37 +00:00
josc146
30b6d66a2d release v1.4.1 2023-07-28 22:14:53 +08:00
josc146
9d89b6f4db fix params 2023-07-28 22:13:19 +08:00
josc146
d2928e54f7 fix failed to build cyac 2023-07-28 21:40:17 +08:00
josc146
49ba5c97f7 update readme 2023-07-28 13:13:14 +08:00
github-actions[bot]
4054fac359 release v1.4.0 2023-07-28 05:06:42 +00:00
josc146
dfae1d9645 release v1.4.0 2023-07-28 13:05:55 +08:00
josc146
0f16a0dd1b remove LoraFinetunePrecision fp32 2023-07-28 12:53:41 +08:00
josc146
cb05a8a2ae update manifest 2023-07-28 12:50:39 +08:00
josc146
a51385173c add CPU-120M-Music config 2023-07-28 12:45:31 +08:00
josc146
4e18222a35 improve RunButton prompt 2023-07-28 12:45:13 +08:00
josc146
daabcf58a0 add Composition Page (RWKV-Music) 2023-07-28 12:30:05 +08:00
josc146
d0fd480bd6 chore 2023-07-26 22:24:26 +08:00
josc146
1df345b5eb improve embeddings API results 2023-07-25 20:30:43 +08:00
josc146
77868c798b chore 2023-07-25 16:37:06 +08:00
josc146
f56748a941 improve python backend startup speed 2023-07-25 16:14:29 +08:00
josc146
29c5b1d804 add midi api 2023-07-25 16:11:17 +08:00
josc146
34095a6c36 support for stop array 2023-07-25 16:10:22 +08:00
josc146
05b9b42b56 add support for MIDI RWKV 2023-07-25 16:09:31 +08:00
josc146
211ae342af improve sse fetch 2023-07-25 15:59:37 +08:00
josc146
5ae683e915 update presets 2023-07-25 15:53:25 +08:00
josc146
dc59fb39c7 update readme 2023-07-18 14:21:09 +08:00
josc146
49960774ee update readme 2023-07-18 14:16:50 +08:00
github-actions[bot]
b718452618 release v1.3.9 2023-07-17 05:05:17 +00:00
josc146
15ae312b37 release v1.3.9 2023-07-17 13:03:32 +08:00
josc146
6938b5b20e change chinese translation of completion 2023-07-17 13:03:11 +08:00
josc146
9b3b06ab04 fix input with array type (#96, #107) 2023-07-17 12:59:45 +08:00
josc146
e2a7c93753 fix always show Convert Failed when converting model 2023-07-16 16:54:18 +08:00
github-actions[bot]
34349aee0b release v1.3.8 2023-07-15 14:29:14 +00:00
josc146
8e79370e95 release v1.3.8 2023-07-15 22:28:49 +08:00
josc146
652c35322b save conversation as txt (originally in md) 2023-07-15 22:12:59 +08:00
josc146
e2fc57ac24 training: fix data EOL format 2023-07-11 12:19:39 +08:00
josc146
994fc7c828 fix cross-device state cache exception 2023-07-11 11:20:12 +08:00
josc146
b9a960d984 update readme 2023-07-10 23:06:19 +08:00
josc146
3baf260f4d update readme 2023-07-10 22:59:22 +08:00
github-actions[bot]
d037ded146 release v1.3.7 2023-07-10 13:50:05 +00:00
josc146
622287f3da release v1.3.7 2023-07-10 21:49:33 +08:00
josc146
5d12bf74f6 update presets 2023-07-10 21:43:58 +08:00
josc146
c88f9321f5 update manifest 2023-07-10 20:49:31 +08:00
josc146
f9f1d5c9fc improve /completions api compatibility 2023-07-10 20:45:08 +08:00
josc146
0edec68376 improve training data path compatibility 2023-07-10 20:44:09 +08:00
josc146
ee63dc25f4 update readme 2023-07-09 13:56:36 +08:00
josc146
fee8fe73f2 fix loss parser 2023-07-09 13:33:06 +08:00
github-actions[bot]
1689f9e7e7 release v1.3.6 2023-07-09 04:41:11 +00:00
josc146
a1ed0cb2e9 release v1.3.6 2023-07-09 12:40:42 +08:00
josc146
5ee5fa7e6e fix load_state_dict crash 2023-07-09 12:33:29 +08:00
josc146
d8c70453ec format 2023-07-09 12:32:50 +08:00
josc146
e930eb5967 extra vc check 2023-07-09 12:18:51 +08:00
josc146
aec6ad636a chore 2023-07-09 12:10:14 +08:00
josc146
750c91bd3e update logo 2023-07-09 11:59:23 +08:00
josc146
fcc3886db1 improve error messages for training 2023-07-09 11:39:44 +08:00
josc146
22afc98be5 fix loss parser 2023-07-09 11:32:05 +08:00
josc146
5b1a9448e6 fix jsonl data when using directory as training data 2023-07-09 11:31:07 +08:00
github-actions[bot]
07d89e3eeb release v1.3.5 2023-07-07 13:58:33 +00:00
josc146
96e97d9c1e release v1.3.5 2023-07-07 21:58:08 +08:00
josc146
bcb125e168 support using directory as training data 2023-07-07 21:57:01 +08:00
josc146
6fbb86667c improve python script error messages 2023-07-07 20:16:35 +08:00
josc146
2d545604f4 refresh local models in real-time (#98) 2023-07-07 20:14:55 +08:00
josc146
7210a7481e improve finetune guide 2023-07-07 19:10:31 +08:00
josc146
55210c89e2 improve wsl dependencies installation 2023-07-07 18:57:51 +08:00
josc146
c725d11dd9 fix loss parser 2023-07-07 13:56:08 +08:00
josc146
ba2a6bd06c update Related Repositories 2023-07-07 13:54:57 +08:00
josc146
57b80c6ed0 fix build for macos and linux 2023-07-07 13:54:07 +08:00
josc146
115c59d5e1 chore 2023-07-07 13:53:39 +08:00
github-actions[bot]
543ff468b7 release v1.3.4 2023-07-03 14:32:06 +00:00
josc146
96ae47989e release v1.3.4 2023-07-03 22:31:37 +08:00
josc146
368932a610 improve finetune compatibility 2023-07-03 22:28:01 +08:00
josc146
f2cd531fcb fix build for macos and linux 2023-07-03 22:22:55 +08:00
josc146
511652b71c improve finetune compatibility 2023-07-03 22:19:20 +08:00
github-actions[bot]
525fb132d6 release v1.3.3 2023-07-03 13:40:51 +00:00
josc146
5acb1fd958 release v1.3.3 2023-07-03 21:40:22 +08:00
josc146
76761ee453 improve lora finetune process (need to be refactored) 2023-07-03 21:40:16 +08:00
github-actions[bot]
134b2884e6 release v1.3.2 2023-07-03 09:43:01 +00:00
josc146
261e7c8916 release v1.3.2 2023-07-03 17:42:28 +08:00
josc146
987854fe49 lora finetune (need to be refactored) 2023-07-03 17:41:47 +08:00
josc146
c54d10795f chore 2023-07-03 16:42:11 +08:00
github-actions[bot]
b7d9ab0845 release v1.3.1 2023-07-01 11:35:45 +00:00
josc146
176800444a release v1.3.1 2023-07-01 19:35:20 +08:00
josc146
00c13cfc3f improve compatibility for linux 2023-07-01 19:32:58 +08:00
Ikko Eltociear Ashimine
620e0228ed Add Japanese README (#100)
* Add Japanese README

* minor fix
2023-06-30 12:37:45 +08:00
josc146
87ca694b0b chore 2023-06-29 20:14:52 +08:00
josc146
417389c5f6 improve for python3.8 3.9 2023-06-29 20:12:11 +08:00
github-actions[bot]
fa9f62b42c release v1.3.0 2023-06-28 13:26:51 +00:00
josc146
2c4e9f69eb release v1.3.0 2023-06-28 21:26:23 +08:00
josc146
119204368d update manifest 2023-06-28 20:57:09 +08:00
josc146
87a86042d2 chore 2023-06-28 20:49:41 +08:00
josc146
32c386799d Change chat saving format 2023-06-28 20:48:22 +08:00
josc146
b56a55e81d Completion Regenerate Button 2023-06-28 20:46:21 +08:00
josc146
2fe7a23049 chore 2023-06-28 19:40:55 +08:00
josc146
9ed3547738 rwkv pip 0.8.0 2023-06-28 19:36:15 +08:00
github-actions[bot]
a0522594da release v1.2.9 2023-06-24 16:12:53 +00:00
josc146
1cac147df4 release v1.2.9 2023-06-25 00:12:20 +08:00
josc146
db67f30082 feat: chat presets (experimental) 2023-06-25 00:07:14 +08:00
josc146
08cf09416a chore 2023-06-24 23:57:49 +08:00
josc146
7f2f4f15c1 improve error messages 2023-06-23 16:32:05 +08:00
josc146
97f6af595e display models that have not been fully downloaded in Downloads page, even if the program is restarted 2023-06-23 16:03:57 +08:00
josc146
447f4572b1 improve error messages 2023-06-23 13:55:45 +08:00
github-actions[bot]
5c9b4a4c05 release v1.2.8 2023-06-21 15:12:45 +00:00
josc146
70f2271b94 release v1.2.8 2023-06-21 23:12:17 +08:00
josc146
15cd689741 adjust MoreUtilsButton 2023-06-21 23:11:22 +08:00
josc146
82a68593bb exact avoidOverflow 2023-06-21 23:08:34 +08:00
github-actions[bot]
21910af96a release v1.2.7 2023-06-21 14:09:27 +00:00
josc146
412a0fe135 release v1.2.7 2023-06-21 22:08:57 +08:00
josc146
cf0972ba52 avoid overflow 2023-06-21 22:02:42 +08:00
josc146
3fe9ef4546 chore 2023-06-21 22:00:29 +08:00
josc146
4cd5a56070 add more chat utils (retry, edit, delete) 2023-06-21 21:20:21 +08:00
josc146
35a7437714 chore 2023-06-21 17:13:04 +08:00
josc146
131a7ddf4a fix the prompt cache that contains potential error 2023-06-21 16:07:16 +08:00
josc146
1465908574 update SupportedCustomCuda 2023-06-21 13:48:09 +08:00
josc146
3eb10f08bb rename 100+ Languages to Global Languages 2023-06-21 12:44:49 +08:00
josc146
b20990d380 when precision is fp32, disable customCuda 2023-06-21 12:14:11 +08:00
josc146
1a5bf4a95e improve InstallPyDep for non-english path 2023-06-21 12:08:04 +08:00
github-actions[bot]
3d123524e7 release v1.2.6 2023-06-20 16:59:55 +00:00
josc146
25a41e51b3 release v1.2.6 2023-06-21 00:46:57 +08:00
josc146
f998ff239a add chat and completion error messages 2023-06-21 00:26:50 +08:00
josc146
bae9ae6551 allow custom api url, key, model 2023-06-20 23:24:51 +08:00
josc146
285e8b1577 add DPI Scaling setting 2023-06-20 22:22:14 +08:00
josc146
ce915cdf6a chore 2023-06-20 22:18:45 +08:00
github-actions[bot]
84317a03e8 release v1.2.5 2023-06-20 09:02:57 +00:00
josc146
ac1fa09604 release v1.2.5 2023-06-20 17:02:28 +08:00
josc146
43bc08648d update manifest 2023-06-20 16:07:52 +08:00
josc146
e93c77394d add usage 2023-06-20 15:55:52 +08:00
josc146
4b2509e643 chore 2023-06-20 15:34:34 +08:00
josc146
14fbb437ff embeddings api example 2023-06-20 00:30:49 +08:00
josc146
8963543159 embeddings api compatible with openai api and langchain(sdk) 2023-06-19 22:51:06 +08:00
josc146
377f71b16b type 2023-06-19 22:32:02 +08:00
josc146
d32351c130 exact model name 2023-06-19 22:30:49 +08:00
josc146
967be6f88f refactor completions api 2023-06-18 20:16:52 +08:00
josc146
fcdda71b46 typo 2023-06-17 19:32:47 +08:00
github-actions[bot]
138251932c release v1.2.4 2023-06-15 16:37:43 +00:00
josc146
4d1a2396e3 release v1.2.4 2023-06-16 00:36:33 +08:00
josc146
b06e292989 improve error messages 2023-06-16 00:35:39 +08:00
josc146
b1d5b84dd6 RWKV-4-World-7B-v1-OnlyForTest_75%_trained-20230615-ctx4096.pth 2023-06-16 00:15:58 +08:00
josc146
2beddab114 save conversation button 2023-06-16 00:12:13 +08:00
josc146
7f85a08508 clear confirm for chat page 2023-06-15 22:55:38 +08:00
josc146
721653a812 fix the state cache crash caused by bad prompts 2023-06-15 22:37:00 +08:00
josc146
d99488f22f improve error messages 2023-06-15 21:57:54 +08:00
josc146
21c3009945 improve api docs 2023-06-15 21:52:22 +08:00
josc146
3f77762fda GPU-2G-3B-World 2023-06-15 00:07:09 +08:00
github-actions[bot]
9590d93c34 release v1.2.3 2023-06-14 14:52:53 +00:00
josc146
e0e846a191 release v1.2.3 2023-06-14 22:52:22 +08:00
josc146
e9cc9b0798 add additional startup condition 2023-06-14 22:50:20 +08:00
josc146
51c5696bb9 improve python dependencies installation 2023-06-14 22:21:17 +08:00
josc146
64f0610ed7 improve OpenFileFolder 2023-06-14 21:11:08 +08:00
josc146
1591430742 reset confirm for completion page 2023-06-14 20:45:52 +08:00
josc146
17c690dfb1 remember current chat input 2023-06-14 20:26:04 +08:00
josc146
4b640f884b global sse AbortController 2023-06-14 20:06:19 +08:00
github-actions[bot]
8976764ee5 release v1.2.2 2023-06-13 15:22:04 +00:00
josc146
47db663fcd release v1.2.2 2023-06-13 23:21:39 +08:00
josc146
366e67bb6e improve built-in user guides 2023-06-13 23:18:04 +08:00
josc146
b52bae6e17 update Instruction template 2023-06-13 23:15:21 +08:00
josc146
714b8834c7 chore 2023-06-13 22:47:17 +08:00
josc146
631704d04d update models and configs 2023-06-13 22:46:41 +08:00
josc146
5896593951 max_trie_len 2023-06-12 15:22:17 +08:00
josc146
8431b5d24f log Generation Prompt 2023-06-12 13:41:51 +08:00
josc146
bbd1ac1484 allow unloading model with switch-model 2023-06-12 12:34:03 +08:00
josc146
5990567a79 avoid misoperations of state_cache 2023-06-12 12:32:50 +08:00
josc146
fa0fcc2c89 add support for python3.8 3.9 2023-06-12 12:09:23 +08:00
github-actions[bot]
face4c97e8 release v1.2.1 2023-06-09 13:17:30 +00:00
josc146
c0ad99673b release v1.2.1 2023-06-09 21:16:37 +08:00
josc146
510683c57e remove enableHighPrecisionForLastLayer 2023-06-09 20:49:45 +08:00
josc146
cea1d8b4d1 add logs for state cache and switch-model 2023-06-09 20:46:19 +08:00
josc146
b7c34b0d42 improve update process for macOS and Linux 2023-06-09 20:38:19 +08:00
josc146
a95fbbbd78 CI 2023-06-09 20:37:05 +08:00
josc146
d1560674b3 update readme 2023-06-09 12:08:09 +08:00
josc146
4fdfbd2f82 update Readme_Install.txt 2023-06-08 17:11:11 +08:00
josc146
635767408f fix UnboundLocalError: local variable 'response' referenced before assignment 2023-06-08 13:30:34 +08:00
josc146
39a7eee8ea update readme 2023-06-08 00:12:54 +08:00
josc146
6ec6044901 deploy example for linux 2023-06-08 00:07:08 +08:00
josc146
4760a552d4 deploy example for windows 2023-06-07 22:20:35 +08:00
josc146
6294327273 update InstallPyDep for better macOS support 2023-06-07 20:38:19 +08:00
josc146
260f51955a update manifest.json 2023-06-07 19:45:53 +08:00
josc146
29ea886576 update manifest.json 2023-06-07 16:49:34 +08:00
josc146
dae3f72d04 chore 2023-06-07 16:49:31 +08:00
josc146
796338a32f Update Readme_Install.txt 2023-06-07 14:03:25 +08:00
josc146
66621e4ceb update readme 2023-06-07 00:11:39 +08:00
josc146
a6f5b520c3 update readme 2023-06-06 23:57:28 +08:00
josc146
c23c644fbc update readme 2023-06-06 23:50:52 +08:00
josc146
cb85c0938d release v1.2.0 2023-06-06 22:43:30 +08:00
josc146
88a5d11e15 add macOS MPS configs 2023-06-06 22:42:38 +08:00
josc146
1ecb0b444b update Readme_Install.txt 2023-06-06 22:42:31 +08:00
josc146
72d601370d improve macOS and Linux user guides 2023-06-06 22:12:26 +08:00
josc146
4814b88172 chore 2023-06-06 21:52:38 +08:00
josc146
cfad67a922 upload Readme_Install.txt for all platforms 2023-06-06 21:47:03 +08:00
josc146
c28f5604ab macOS chore 2023-06-06 20:49:31 +08:00
josc146
5853b8ca8d release v1.1.9 2023-06-06 00:13:35 +08:00
josc146
ebfc0ce672 add ResetConfigsButton to Home Page 2023-06-06 00:12:58 +08:00
josc146
e62fcd152a Improved cross-platform interaction 2023-06-05 23:11:22 +08:00
josc146
9bd9b9ecbd add requirements_without_cyac.txt 2023-06-05 22:58:56 +08:00
josc146
17faa9c5b8 dev config 2023-06-05 22:57:01 +08:00
josc146
4cd445bf77 OpenFileFolder for linux 2023-06-05 22:55:06 +08:00
josc146
4fb35845b0 improve the built-in download function, enhance the logic robustness and reliability in adverse network environments 2023-06-05 22:54:36 +08:00
josc146
f373f1caa8 release v1.1.8 2023-06-04 11:53:50 +08:00
josc146
4e75531651 fix the crash issue caused by temperature being 0 2023-06-04 11:53:33 +08:00
josc146
539c538d65 update manifest.json 2023-06-03 22:14:11 +08:00
josc146
d90186db33 update logo 2023-06-03 21:35:20 +08:00
josc146
a2d8729ae3 release v1.1.7 2023-06-03 20:34:51 +08:00
josc146
edc6ac7297 chore 2023-06-03 20:34:33 +08:00
josc146
e89e23621c update readme 2023-06-03 20:28:21 +08:00
josc146
6b9ec4c6fa add strategy guides 2023-06-03 20:18:57 +08:00
josc146
ced0966ffc display current strategy 2023-06-03 19:38:24 +08:00
josc146
966b912013 improve logs 2023-06-03 19:28:37 +08:00
josc146
dc71054e61 improve logs 2023-06-03 17:36:50 +08:00
josc146
408f3c1a4d release v1.1.6 2023-06-03 17:15:11 +08:00
josc146
38b775c937 add logs 2023-06-03 17:12:59 +08:00
josc146
f2ec1067bf MX, Tesla P, Quadro P, NVIDIA P, TITAN X, RTX A series, TITAN RTX and RTX TITAN Ada 2023-06-03 12:46:56 +08:00
josc146
b01584c49e chore 2023-06-03 00:10:31 +08:00
josc146
01e56382a3 release v1.1.5 2023-06-02 23:53:37 +08:00
josc146
391c067250 add Instruction to Completion Presets 2023-06-02 23:53:25 +08:00
josc146
5b98b5c0a7 update manifest.json 2023-06-02 23:45:37 +08:00
josc146
5bde0abb8d reminder to use administrator privileges 2023-06-02 23:42:13 +08:00
josc146
1036852924 add path contains space prompt and chore 2023-06-02 23:35:33 +08:00
josc146
2b10ccd507 add vc++ installation guide 2023-06-02 23:32:47 +08:00
josc146
e1df1bfc3f chore 2023-06-02 22:20:57 +08:00
josc146
b41a2e7039 move state cache to memory (todo: state cache db) 2023-06-02 21:33:57 +08:00
josc146
b63370928d macOS 2023-06-01 16:54:21 +08:00
josc146
06a125b8d7 release v1.1.4 2023-05-31 16:27:52 +08:00
josc146
b03b00419b fix Cmd and CopyFile 2023-05-31 16:27:43 +08:00
josc146
2f5a7d2d51 fix_tokens 2023-05-31 16:07:09 +08:00
josc146
e318331909 release v1.1.3 2023-05-31 15:46:49 +08:00
josc146
796e5f22c0 custom python path 2023-05-31 15:45:26 +08:00
josc146
b49968c145 custom models path 2023-05-31 15:21:47 +08:00
josc146
cf16e54463 fix_tokens 2023-05-31 14:55:13 +08:00
josc146
7bc8da2e29 add button to reset all configs 2023-05-31 14:19:19 +08:00
josc146
26174d4c10 chore 2023-05-31 14:14:25 +08:00
josc146
3c18ce34c7 cn 7b v12 2023-05-31 12:53:05 +08:00
josc146
c8b2bb53ef improve system for rwkv-4-world 2023-05-31 12:46:06 +08:00
josc146
9f5d15a7d5 custom strategy mode 2023-05-31 12:26:10 +08:00
josc146
8291c50058 safe ModelConfigBody 2023-05-30 23:13:27 +08:00
josc146
1f3f6cf9a8 chore 2023-05-30 22:58:52 +08:00
josc146
83905c65c7 tesla P104 P106 kernel 2023-05-30 22:46:53 +08:00
josc146
9af2b1d208 update readme 2023-05-30 14:40:33 +08:00
josc146
67c9381b82 upload .gitattributes 2023-05-30 13:17:45 +08:00
josc146
4bdfa2d54c update readme 2023-05-30 11:52:38 +08:00
josc146
9945338458 chore 2023-05-30 11:52:33 +08:00
josc146
d93157bde4 don't release embedded files in development mode 2023-05-30 11:04:11 +08:00
josc146
4db9b13803 upload vendor.yml 2023-05-30 10:35:24 +08:00
josc146
69ab273706 release v1.1.2 2023-05-29 23:27:56 +08:00
josc146
d4ce828e99 fix downloads page 2023-05-29 23:27:44 +08:00
josc146
8838b60e97 release v1.1.1 2023-05-29 22:56:55 +08:00
josc146
cbb249725d update models source and hide old models 2023-05-29 22:50:46 +08:00
josc146
53b6a5ffe0 allow system to be placed anywhere 2023-05-29 22:26:22 +08:00
josc146
11b743aa53 RWKV-4-Raven-3B-v12-Eng49%-Chn49%-Jpn1%-Other1%-20230527-ctx4096.pth 2023-05-29 22:15:45 +08:00
josc146
a8c936a885 force display the window after started or updated 2023-05-29 21:51:33 +08:00
josc146
7fb27b927c remove introduction and about in cache 2023-05-29 21:41:44 +08:00
josc146
50bee24e8c external access to the API Switch 2023-05-29 21:34:24 +08:00
josc146
a196ce6da8 setPlatform 2023-05-29 21:20:19 +08:00
josc146
154827f367 improve checkUpdate 2023-05-29 21:15:40 +08:00
josc146
deb9e030cb GetPlatform 2023-05-29 21:15:11 +08:00
josc146
da033ab096 chore 2023-05-29 20:51:20 +08:00
josc146
0fc429f5a3 chore 2023-05-29 20:37:00 +08:00
josc146
142e30622e send response even token is END_OF_TEXT 2023-05-29 20:17:29 +08:00
josc146
81d050d596 download dependencies when file size is zero 2023-05-29 20:16:14 +08:00
josc146
5e698a8312 support for tesla P40 custom cuda kernel 2023-05-29 20:15:37 +08:00
josc146
55bb33bcbb embed all core dependencies 2023-05-29 20:14:42 +08:00
josc146
b4efce15f4 reduce size 2023-05-29 20:13:06 +08:00
josc146
3694ac5015 release v1.1.0 2023-05-29 09:39:49 +08:00
josc146
6fc5a335fb embed dependencies 2023-05-29 09:39:16 +08:00
josc146
d66c30698c release v1.0.9 2023-05-29 00:25:22 +08:00
josc146
fecdf238c1 feat: preload preset_system 2023-05-29 00:08:13 +08:00
josc146
3e11128c9d feat: use model state cache to achieve 5x - 50x faster preparation time for generation 2023-05-28 23:52:38 +08:00
josc146
822f2d729c fix: sha256 check for model deduplication 2023-05-28 23:45:11 +08:00
josc146
a16c85b07d fix: the configs page now always displays the currently selected non-local model so that other models can be selected properly 2023-05-28 23:44:21 +08:00
josc146
4e678eff6f update about 2023-05-28 17:24:49 +08:00
josc146
94971bb666 support for rwkv-4-world 2023-05-28 12:53:14 +08:00
josc146
b7fb8ed898 improve api concurrency performance 2023-05-27 15:18:12 +08:00
josc146
2ca8f5eba9 experimental macOS/Linux support 2023-05-27 14:40:59 +08:00
josc146
2431ff68e6 update readme 2023-05-27 00:38:39 +08:00
josc146
06e21badc0 Update README.md 2023-05-26 13:54:45 +08:00
Pedro Cabral
52c3b7e9bf Add RWKV-4 World 0.1B (#25)
* Add RWKV-4 World 0.1B

* Update manifest.json

---------

Co-authored-by: josc146 <josStorer@outlook.com>
2023-05-26 12:32:29 +08:00
josc146
bd490b4fac update readme 2023-05-25 21:06:05 +08:00
josc146
48b09c4310 release v1.0.8 2023-05-25 20:59:22 +08:00
josc146
ffa90d89d1 update manifest.json 2023-05-25 20:59:03 +08:00
josc146
e0781be9a9 update presets 2023-05-25 20:54:54 +08:00
josc146
33b21a0f5c update home page 2023-05-25 20:40:50 +08:00
josc146
bf5ac7efef update presets 2023-05-25 20:36:32 +08:00
josc146
06622b79aa update rwkv_generate 2023-05-25 20:34:42 +08:00
josc146
537f11cbf1 update defaultModelConfigs 2023-05-25 11:46:38 +08:00
josc146
c6500c6b3a update readme 2023-05-25 10:02:29 +08:00
josc146
6f629dbc55 fix startup status detect 2023-05-25 00:51:45 +08:00
josc146
5729d9fc62 release v1.0.7 2023-05-25 00:22:26 +08:00
josc146
bb8af451f6 fix cuda40 kernel 2023-05-25 00:22:09 +08:00
josc146
ed330566e3 fix 2023-05-24 23:17:08 +08:00
177 changed files with 211076 additions and 3043 deletions

10
.gitattributes vendored Normal file
View File

@@ -0,0 +1,10 @@
backend-python/rwkv_pip/** linguist-vendored
backend-python/wkv_cuda_utils/** linguist-vendored
backend-python/get-pip.py linguist-vendored
backend-python/convert_model.py linguist-vendored
backend-python/convert_safetensors.py linguist-vendored
backend-python/utils/midi.py linguist-vendored
build/** linguist-vendored
finetune/lora/** linguist-vendored
finetune/json2binidx_tool/** linguist-vendored
frontend/wailsjs/** linguist-generated

132
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,132 @@
name: release
on:
push:
tags:
- "v*"
permissions:
contents: write
env:
GH_TOKEN: ${{ github.token }}
jobs:
create-draft:
runs-on: ubuntu-22.04
steps:
- run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
- uses: actions/checkout@v3
with:
ref: master
- uses: jossef/action-set-json-field@v2.1
with:
file: manifest.json
field: version
value: ${{ env.VERSION }}
- continue-on-error: true
run: |
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
git commit -am "release ${{github.ref_name}}"
git push
- run: |
gh release create ${{github.ref_name}} -d -F CURRENT_CHANGE.md -t ${{github.ref_name}}
windows:
runs-on: windows-2022
needs: create-draft
steps:
- uses: actions/checkout@v3
with:
ref: master
- uses: actions/setup-go@v4
with:
go-version: '1.20.5'
- uses: actions/setup-python@v4
id: cp310
with:
python-version: '3.10'
- uses: crazy-max/ghaction-chocolatey@v2
with:
args: install upx
- run: |
Start-BitsTransfer https://github.com/josStorer/ai00_rwkv_server/releases/latest/download/webgpu_server_windows_x86_64.exe ./backend-rust/webgpu_server.exe
Start-BitsTransfer https://github.com/josStorer/web-rwkv-converter/releases/latest/download/web-rwkv-converter_windows_x86_64.exe ./backend-rust/web-rwkv-converter.exe
Start-BitsTransfer https://github.com/josStorer/LibreHardwareMonitor.Console/releases/latest/download/LibreHardwareMonitor.Console.zip ./LibreHardwareMonitor.Console.zip
Expand-Archive ./LibreHardwareMonitor.Console.zip -DestinationPath ./components/LibreHardwareMonitor.Console
Start-BitsTransfer https://www.python.org/ftp/python/3.10.11/python-3.10.11-embed-amd64.zip ./python-3.10.11-embed-amd64.zip
Expand-Archive ./python-3.10.11-embed-amd64.zip -DestinationPath ./py310
$content=Get-Content "./py310/python310._pth"; $content | ForEach-Object {if ($_.ReadCount -eq 3) {"Lib\\site-packages"} else {$_}} | Set-Content ./py310/python310._pth
./py310/python ./backend-python/get-pip.py
./py310/python -m pip install Cython==3.0.4
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../include" -Destination "py310/include" -Recurse
Copy-Item -Path "${{ steps.cp310.outputs.python-path }}/../libs" -Destination "py310/libs" -Recurse
./py310/python -m pip install cyac==1.9
go install github.com/wailsapp/wails/v2/cmd/wails@latest
(Get-Content -Path ./backend-golang/app.go) -replace "//go:custom_build windows ", "" | Set-Content -Path ./backend-golang/app.go
make
Rename-Item -Path "build/bin/RWKV-Runner.exe" -NewName "RWKV-Runner_windows_x64.exe"
- run: gh release upload ${{github.ref_name}} build/bin/RWKV-Runner_windows_x64.exe
linux:
runs-on: ubuntu-20.04
needs: create-draft
steps:
- uses: actions/checkout@v3
with:
ref: master
- uses: actions/setup-go@v4
with:
go-version: '1.20.5'
- run: |
wget https://github.com/josStorer/ai00_rwkv_server/releases/latest/download/webgpu_server_linux_x86_64 -O ./backend-rust/webgpu_server
wget https://github.com/josStorer/web-rwkv-converter/releases/latest/download/web-rwkv-converter_linux_x86_64 -O ./backend-rust/web-rwkv-converter
sudo apt-get update
sudo apt-get install upx
sudo apt-get install build-essential libgtk-3-dev libwebkit2gtk-4.0-dev libasound2-dev
go install github.com/wailsapp/wails/v2/cmd/wails@latest
rm ./backend-python/rwkv_pip/wkv_cuda.pyd
rm ./backend-python/rwkv_pip/rwkv5.pyd
rm ./backend-python/rwkv_pip/rwkv6.pyd
rm ./backend-python/rwkv_pip/beta/wkv_cuda.pyd
rm ./backend-python/get-pip.py
make
mv build/bin/RWKV-Runner build/bin/RWKV-Runner_linux_x64
- run: gh release upload ${{github.ref_name}} build/bin/RWKV-Runner_linux_x64
macos:
runs-on: macos-13
needs: create-draft
steps:
- uses: actions/checkout@v3
with:
ref: master
- uses: actions/setup-go@v4
with:
go-version: '1.20.5'
- run: |
wget https://github.com/josStorer/ai00_rwkv_server/releases/latest/download/webgpu_server_darwin_aarch64 -O ./backend-rust/webgpu_server
wget https://github.com/josStorer/web-rwkv-converter/releases/latest/download/web-rwkv-converter_darwin_aarch64 -O ./backend-rust/web-rwkv-converter
go install github.com/wailsapp/wails/v2/cmd/wails@latest
rm ./backend-python/rwkv_pip/wkv_cuda.pyd
rm ./backend-python/rwkv_pip/rwkv5.pyd
rm ./backend-python/rwkv_pip/rwkv6.pyd
rm ./backend-python/rwkv_pip/beta/wkv_cuda.pyd
rm ./backend-python/get-pip.py
make
cp build/darwin/Readme_Install.txt build/bin/Readme_Install.txt
cp build/bin/RWKV-Runner.app/Contents/MacOS/RWKV-Runner build/bin/RWKV-Runner_darwin_universal
cd build/bin && zip -r RWKV-Runner_macos_universal.zip RWKV-Runner.app Readme_Install.txt
- run: gh release upload ${{github.ref_name}} build/bin/RWKV-Runner_macos_universal.zip build/bin/RWKV-Runner_darwin_universal
publish-release:
runs-on: ubuntu-22.04
needs: [ windows, linux, macos ]
steps:
- uses: actions/checkout@v3
- run: gh release edit ${{github.ref_name}} --draft=false

13
.gitignore vendored
View File

@@ -5,15 +5,26 @@ __pycache__
.idea
.vs
*.pth
*.st
*.safetensors
*.bin
/config.json
/cache.json
/presets.json
/frontend/stats.html
/frontend/package.json.md5
/backend-python/get-pip.py
/py310
*.zip
/cmd-helper.bat
/install-py-dep.bat
/backend-python/wkv_cuda
/backend-python/rwkv*
*.exe
*.old
.DS_Store
*.log.*
*.log
train_log.txt
finetune/json2binidx_tool/data
/wsl.state
/components

19
.vscode/launch.json vendored
View File

@@ -10,9 +10,24 @@
"name": "Python",
"type": "python",
"request": "launch",
"program": "./backend-python/main.py",
"program": "${workspaceFolder}/backend-python/main.py",
"console": "integratedTerminal",
"justMyCode": false,
"justMyCode": false
},
{
"name": "Golang",
"type": "go",
"request": "launch",
"mode": "exec",
"program": "${workspaceFolder}/build/bin/testwails.exe",
"console": "integratedTerminal",
"preLaunchTask": "build dev"
},
{
"name": "Frontend",
"type": "node-terminal",
"request": "launch",
"command": "wails dev -browser"
}
]
}

40
.vscode/tasks.json vendored Normal file
View File

@@ -0,0 +1,40 @@
{
"version": "2.0.0",
"tasks": [
{
"label": "build dev",
"type": "shell",
"options": {
"cwd": "${workspaceFolder}",
"env": {
"CGO_ENABLED": "1"
}
},
"osx": {
"options": {
"env": {
"CGO_CFLAGS": "-mmacosx-version-min=10.13",
"CGO_LDFLAGS": "-framework UniformTypeIdentifiers -mmacosx-version-min=10.13"
}
}
},
"windows": {
"options": {
"env": {
"CGO_ENABLED": "0"
}
}
},
"command": "go",
"args": [
"build",
"-tags",
"dev",
"-gcflags",
"all=-N -l",
"-o",
"build/bin/testwails.exe"
]
}
]
}

14
CURRENT_CHANGE.md Normal file
View File

@@ -0,0 +1,14 @@
## Changes
- allow importing midi file
- add midi tracks to webUI
- improve current instrument display
- fix generation instrumentType
- chore
## Install
- Windows: https://github.com/josStorer/RWKV-Runner/blob/master/build/windows/Readme_Install.txt
- MacOS: https://github.com/josStorer/RWKV-Runner/blob/master/build/darwin/Readme_Install.txt
- Linux: https://github.com/josStorer/RWKV-Runner/blob/master/build/linux/Readme_Install.txt
- Server-Deploy-Examples: https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples

View File

@@ -1,16 +1,33 @@
ifeq ($(OS), Windows_NT)
build: build-windows
else
else ifeq ($(shell uname -s), Darwin)
build: build-macos
else
build: build-linux
endif
build-windows:
@echo ---- build for windows
wails build -upx -ldflags "-s -w"
wails build -upx -ldflags '-s -w -extldflags "-static"' -platform windows/amd64
build-macos:
@echo ---- build for macos
wails build -ldflags '-s -w' -platform darwin/universal
build-linux:
@echo ---- build for linux
wails build -upx -ldflags '-s -w' -platform linux/amd64
build-web:
@echo ---- build for web
cd frontend && npm run build
dev:
wails dev
dev-web:
cd frontend && npm run dev
preview:
cd frontend && npm run preview

221
README.md
View File

@@ -13,9 +13,15 @@ compatible with the OpenAI API, which means that every ChatGPT client is an RWKV
[![license][license-image]][license-url]
[![release][release-image]][release-url]
English | [简体中文](README_ZH.md)
English | [简体中文](README_ZH.md) | [日本語](README_JA.md)
[Preview](#Preview) | [Download][download-url]
### Install
[![Windows][Windows-image]][Windows-url]
[![MacOS][MacOS-image]][MacOS-url]
[![Linux][Linux-image]][Linux-url]
[FAQs](https://github.com/josStorer/RWKV-Runner/wiki/FAQs) | [Preview](#Preview) | [Download][download-url] | [Simple Deploy Example](#Simple-Deploy-Example) | [Server Deploy Examples](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples) | [MIDI Hardware Input](#MIDI-Input)
[license-image]: http://img.shields.io/badge/license-MIT-blue.svg
@@ -25,67 +31,230 @@ English | [简体中文](README_ZH.md)
[release-url]: https://github.com/josStorer/RWKV-Runner/releases/latest
[download-url]: https://github.com/josStorer/RWKV-Runner/releases/download/v1.0.2/RWKV-Runner_windows_x64.exe
[download-url]: https://github.com/josStorer/RWKV-Runner/releases
[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows
[Windows-url]: https://github.com/josStorer/RWKV-Runner/blob/master/build/windows/Readme_Install.txt
[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple
[MacOS-url]: https://github.com/josStorer/RWKV-Runner/blob/master/build/darwin/Readme_Install.txt
[Linux-image]: https://img.shields.io/badge/-Linux-black?logo=linux
[Linux-url]: https://github.com/josStorer/RWKV-Runner/blob/master/build/linux/Readme_Install.txt
</div>
#### Default configs do not enable custom CUDA kernel acceleration, but I strongly recommend that you enable it and run with int8 precision, which is much faster and consumes much less VRAM. Go to the Configs page and turn on `Use Custom CUDA kernel to Accelerate`.
#### Tip: You can deploy [backend-python](./backend-python/) on a server and use this program as a client only. Fill in your server address in the Settings `API URL`.
#### Default configs has enabled custom CUDA kernel acceleration, which is much faster and consumes much less VRAM. If you encounter possible compatibility issues (output garbled), go to the Configs page and turn off `Use Custom CUDA kernel to Accelerate`, or try to upgrade your gpu driver.
#### If Windows Defender claims this is a virus, you can try downloading [v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip) and letting it update automatically to the latest version, or add it to the trusted list (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`).
#### For different tasks, adjusting API parameters can achieve better results. For example, for translation tasks, you can try setting Temperature to 1 and Top_P to 0.3.
## Features
- RWKV model management and one-click startup
- Fully compatible with the OpenAI API, making every ChatGPT client an RWKV client. After starting the model,
- RWKV model management and one-click startup.
- Front-end and back-end separation, if you don't want to use the client, also allows for separately deploying the
front-end service, or the back-end inference service, or the back-end inference service with a WebUI.
[Simple Deploy Example](#Simple-Deploy-Example) | [Server Deploy Examples](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples)
- Compatible with the OpenAI API, making every ChatGPT client an RWKV client. After starting the model,
open http://127.0.0.1:8000/docs to view more details.
- Automatic dependency installation, requiring only a lightweight executable program
- User-friendly chat interaction interface included
- Easy-to-understand and operate parameter configuration
- Built-in model conversion tool
- Built-in download management and remote model inspection
- Multilingual localization
- Theme switching
- Automatic updates
- Automatic dependency installation, requiring only a lightweight executable program.
- Pre-set multi-level VRAM configs, works well on almost all computers. In Configs page, switch Strategy to WebGPU, it
can also run on AMD, Intel, and other graphics cards.
- User-friendly chat, completion, and composition interaction interface included. Also supports chat presets, attachment
uploads, MIDI hardware input, and track editing.
[Preview](#Preview) | [MIDI Hardware Input](#MIDI-Input)
- Built-in WebUI option, one-click start of Web service, sharing your hardware resources.
- Easy-to-understand and operate parameter configuration, along with various operation guidance prompts.
- Built-in model conversion tool.
- Built-in download management and remote model inspection.
- Built-in one-click LoRA Finetune.
- Can also be used as an OpenAI ChatGPT and GPT-Playground client. (Fill in the API URL and API Key in Settings page)
- Multilingual localization.
- Theme switching.
- Automatic updates.
## Todo
## Simple Deploy Example
- [ ] Model training functionality
- [x] CUDA operator int8 acceleration
- [ ] macOS support
- [ ] Linux support
```bash
git clone https://github.com/josStorer/RWKV-Runner
# Then
cd RWKV-Runner
python ./backend-python/main.py #The backend inference service has been started, request /switch-model API to load the model, refer to the API documentation: http://127.0.0.1:8000/docs
# Or
cd RWKV-Runner/frontend
npm ci
npm run build #Compile the frontend
cd ..
python ./backend-python/webui_server.py #Start the frontend service separately
# Or
python ./backend-python/main.py --webui #Start the frontend and backend service at the same time
# Help Info
python ./backend-python/main.py -h
```
## API Concurrency Stress Testing
```bash
ab -p body.json -T application/json -c 20 -n 100 -l http://127.0.0.1:8000/chat/completions
```
body.json:
```json
{
"messages": [
{
"role": "user",
"content": "Hello"
}
]
}
```
## Embeddings API Example
Note: v1.4.0 has improved the quality of embeddings API. The generated results are not compatible
with previous versions. If you are using embeddings API to generate knowledge bases or similar, please regenerate.
If you are using langchain, just use `OpenAIEmbeddings(openai_api_base="http://127.0.0.1:8000", openai_api_key="sk-")`
```python
import numpy as np
import requests
def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
values = [
"I am a girl",
"我是个女孩",
"私は女の子です",
"广东人爱吃福建人",
"我是个人类",
"I am a human",
"that dog is so cute",
"私はねこむすめです、にゃん♪",
"宇宙级特大事件!号外号外!"
]
embeddings = []
for v in values:
r = requests.post("http://127.0.0.1:8000/embeddings", json={"input": v})
embedding = r.json()["data"][0]["embedding"]
embeddings.append(embedding)
compared_embedding = embeddings[0]
embeddings_cos_sim = [cosine_similarity(compared_embedding, e) for e in embeddings]
for i in np.argsort(embeddings_cos_sim)[::-1]:
print(f"{embeddings_cos_sim[i]:.10f} - {values[i]}")
```
## MIDI Input
Tip: You can download https://github.com/josStorer/sgm_plus and unzip it to the program's `assets/sound-font` directory
to use it as an offline sound source. Please note that if you are compiling the program from source code, do not place
it in the source code directory.
### USB MIDI Connection
- USB MIDI devices are plug-and-play, and you can select your input device in the Composition page
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/13bb92c3-4504-482d-ab82-026ac6c31095)
### Mac MIDI Bluetooth Connection
- For Mac users who want to use Bluetooth input,
please install [Bluetooth MIDI Connect](https://apps.apple.com/us/app/bluetooth-midi-connect/id1108321791), then click
the tray icon to connect after launching,
afterwards, you can select your input device in the Composition page.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c079a109-1e3d-45c1-bbf5-eed85da1550e)
### Windows MIDI Bluetooth Connection
- Windows seems to have implemented Bluetooth MIDI support only for UWP (Universal Windows Platform) apps. Therefore, it
requires multiple steps to establish a connection. We need to create a local virtual MIDI device and then launch a UWP
application. Through this UWP application, we will redirect Bluetooth MIDI input to the virtual MIDI device, and then
this software will listen to the input from the virtual MIDI device.
- So, first, you need to
download [loopMIDI](https://www.tobias-erichsen.de/wp-content/uploads/2020/01/loopMIDISetup_1_0_16_27.zip)
to create a virtual MIDI device. Click the plus sign in the bottom left corner to create the device.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b75998ff-115c-4ddd-b97c-deeb5c106255)
- Next, you need to download [Bluetooth LE Explorer](https://apps.microsoft.com/detail/9N0ZTKF1QD98) to discover and
connect to Bluetooth MIDI devices. Click "Start" to search for devices, and then click "Pair" to bind the MIDI device.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c142c3ea-a973-4531-9807-4c385d640a2b)
- Finally, you need to install [MIDIberry](https://apps.microsoft.com/detail/9N39720H2M05),
This UWP application can redirect Bluetooth MIDI input to the virtual MIDI device. After launching it, double-click
your actual Bluetooth MIDI device name in the input field, and in the output field, double-click the virtual MIDI
device name we created earlier.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/5ad6a1d9-4f68-4d95-ae17-4296107d1669)
- Now, you can select the virtual MIDI device as the input in the Composition page. Bluetooth LE Explorer no longer
needs to run, and you can also close the loopMIDI window, it will run automatically in the background. Just keep
MIDIberry open.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/1c371821-c7b7-4c18-8e42-9e315efbe427)
## Related Repositories:
- RWKV-4-World: https://huggingface.co/BlinkDL/rwkv-4-world/tree/main
- RWKV-4-Raven: https://huggingface.co/BlinkDL/rwkv-4-raven/tree/main
- ChatRWKV: https://github.com/BlinkDL/ChatRWKV
- RWKV-LM: https://github.com/BlinkDL/RWKV-LM
- RWKV-LM-LoRA: https://github.com/Blealtan/RWKV-LM-LoRA
- MIDI-LLM-tokenizer: https://github.com/briansemrau/MIDI-LLM-tokenizer
## Preview
### Homepage
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/60efbb65-29e3-4346-a597-5bdcd099251c)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c9b9cdd0-63f9-4319-9f74-5bf5d7df5a67)
### Chat
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/6cde9c45-51bb-4dee-b1fe-746862448520)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/80009872-528f-4932-aeb2-f724fa892e7c)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/e98c9038-3323-47b0-8edb-d639fafd37b2)
### Completion
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/52f47f92-d21d-4cd7-b04e-d6f9af937a97)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/bf49de8e-3b89-4543-b1ef-7cd4b19a1836)
### Composition
Tip: You can download https://github.com/josStorer/sgm_plus and unzip it to the program's `assets/sound-font` directory
to use it as an offline sound source. Please note that if you are compiling the program from source code, do not place
it in the source code directory.
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/e8ad908d-3fd2-4e92-bcdb-96815cb836ee)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b2ce4761-9e75-477e-a182-d0255fb8ac76)
### Configuration
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/93270a68-9d6d-4247-b6a3-e543c65a876b)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/f41060dc-5517-44af-bb3f-8ef71720016d)
### Model Management
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/6f96fdd3-fdf5-4b78-af80-2afbd1ad173b)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b1581147-a6ce-4493-8010-e33c0ddeca0a)
### Download Management
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/6982e7ee-bace-4a88-bb47-92379185bf9d)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c8153cf9-c8cb-4618-8268-60c82a5be539)
### LoRA Finetune
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/4715045a-683e-4d2a-9b0e-090c7a5df63f)
### Settings
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b3b2ab46-344c-4f04-b066-1503f776eeb9)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/1067e635-8c07-4217-86a8-e48a5fcbb075)

259
README_JA.md Normal file
View File

@@ -0,0 +1,259 @@
<p align="center">
<img src="https://github.com/josStorer/RWKV-Runner/assets/13366013/d24834b0-265d-45f5-93c0-fac1e19562af">
</p>
<h1 align="center">RWKV Runner</h1>
<div align="center">
このプロジェクトは、すべてを自動化することで、大規模な言語モデルを使用する際の障壁をなくすことを目的としています。必要なのは、
わずか数メガバイトの軽量な実行プログラムだけです。さらに、このプロジェクトは OpenAI API と互換性のあるインターフェイスを提供しており、
すべての ChatGPT クライアントは RWKV クライアントであることを意味します。
[![license][license-image]][license-url]
[![release][release-image]][release-url]
[English](README.md) | [简体中文](README_ZH.md) | 日本語
### インストール
[![Windows][Windows-image]][Windows-url]
[![MacOS][MacOS-image]][MacOS-url]
[![Linux][Linux-image]][Linux-url]
[FAQs](https://github.com/josStorer/RWKV-Runner/wiki/FAQs) | [プレビュー](#Preview) | [ダウンロード][download-url] | [シンプルなデプロイの例](#Simple-Deploy-Example) | [サーバーデプロイ例](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples) | [MIDIハードウェア入力](#MIDI-Input)
[license-image]: http://img.shields.io/badge/license-MIT-blue.svg
[license-url]: https://github.com/josStorer/RWKV-Runner/blob/master/LICENSE
[release-image]: https://img.shields.io/github/release/josStorer/RWKV-Runner.svg
[release-url]: https://github.com/josStorer/RWKV-Runner/releases/latest
[download-url]: https://github.com/josStorer/RWKV-Runner/releases
[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows
[Windows-url]: https://github.com/josStorer/RWKV-Runner/blob/master/build/windows/Readme_Install.txt
[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple
[MacOS-url]: https://github.com/josStorer/RWKV-Runner/blob/master/build/darwin/Readme_Install.txt
[Linux-image]: https://img.shields.io/badge/-Linux-black?logo=linux
[Linux-url]: https://github.com/josStorer/RWKV-Runner/blob/master/build/linux/Readme_Install.txt
</div>
#### ヒント:サーバーに[backend-python](./backend-python/)をデプロイし、このプログラムをクライアントとして使用することができます。設定された`API URL`にサーバーアドレスを入力してください。
#### デフォルトの設定はカスタム CUDA カーネルアクセラレーションを有効にしています。互換性の問題 (文字化けを出力する) が発生する可能性がある場合は、コンフィグページに移動し、`Use Custom CUDA kernel to Accelerate` をオフにしてください、あるいは、GPUドライバーをアップグレードしてみてください。
#### Windows Defender がこれをウイルスだと主張する場合は、[v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip) をダウンロードして最新版に自動更新させるか、信頼済みリストに追加してみてください (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`)。
#### 異なるタスクについては、API パラメータを調整することで、より良い結果を得ることができます。例えば、翻訳タスクの場合、Temperature を 1 に、Top_P を 0.3 に設定してみてください。
## 特徴
- RWKV モデル管理とワンクリック起動
- フロントエンドとバックエンドの分離は、クライアントを使用しない場合でも、フロントエンドサービス、またはバックエンド推論サービス、またはWebUIを備えたバックエンド推論サービスを個別に展開することを可能にします。
[シンプルなデプロイの例](#Simple-Deploy-Example) | [サーバーデプロイ例](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples)
- OpenAI API と互換性があり、すべての ChatGPT クライアントを RWKV クライアントにします。モデル起動後、
http://127.0.0.1:8000/docs を開いて詳細をご覧ください。
- 依存関係の自動インストールにより、軽量な実行プログラムのみを必要とします
- 事前設定された多段階のVRAM設定、ほとんどのコンピュータで動作します。配置ページで、ストラテジーをWebGPUに切り替えると、AMD、インテル、その他のグラフィックカードでも動作します
- ユーザーフレンドリーなチャット、完成、および作曲インターフェイスが含まれています。また、チャットプリセット、添付ファイルのアップロード、MIDIハードウェア入力、トラック編集もサポートしています。
[プレビュー](#Preview) | [MIDIハードウェア入力](#MIDI-Input)
- 内蔵WebUIオプション、Webサービスのワンクリック開始、ハードウェアリソースの共有
- 分かりやすく操作しやすいパラメータ設定、各種操作ガイダンスプロンプトとともに
- 内蔵モデル変換ツール
- ダウンロード管理とリモートモデル検査機能内蔵
- 内蔵のLoRA微調整機能を搭載しています
- このプログラムは、OpenAI ChatGPTとGPT Playgroundのクライアントとしても使用できます設定ページで `API URL``API Key`
を入力してください)
- 多言語ローカライズ
- テーマ切り替え
- 自動アップデート
## Simple Deploy Example
```bash
git clone https://github.com/josStorer/RWKV-Runner
# Then
cd RWKV-Runner
python ./backend-python/main.py #The backend inference service has been started, request /switch-model API to load the model, refer to the API documentation: http://127.0.0.1:8000/docs
# Or
cd RWKV-Runner/frontend
npm ci
npm run build #Compile the frontend
cd ..
python ./backend-python/webui_server.py #Start the frontend service separately
# Or
python ./backend-python/main.py --webui #Start the frontend and backend service at the same time
# Help Info
python ./backend-python/main.py -h
```
## API 同時実行ストレステスト
```bash
ab -p body.json -T application/json -c 20 -n 100 -l http://127.0.0.1:8000/chat/completions
```
body.json:
```json
{
"messages": [
{
"role": "user",
"content": "Hello"
}
]
}
```
## 埋め込み API の例
注意: v1.4.0 では、埋め込み API の品質が向上しました。生成される結果は、以前のバージョンとは互換性がありません。
もし、embeddings API を使って知識ベースなどを生成している場合は、再生成してください。
LangChain を使用している場合は、`OpenAIEmbeddings(openai_api_base="http://127.0.0.1:8000", openai_api_key="sk-")`
を使用してください
```python
import numpy as np
import requests
def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
values = [
"I am a girl",
"我是个女孩",
"私は女の子です",
"广东人爱吃福建人",
"我是个人类",
"I am a human",
"that dog is so cute",
"私はねこむすめです、にゃん♪",
"宇宙级特大事件!号外号外!"
]
embeddings = []
for v in values:
r = requests.post("http://127.0.0.1:8000/embeddings", json={"input": v})
embedding = r.json()["data"][0]["embedding"]
embeddings.append(embedding)
compared_embedding = embeddings[0]
embeddings_cos_sim = [cosine_similarity(compared_embedding, e) for e in embeddings]
for i in np.argsort(embeddings_cos_sim)[::-1]:
print(f"{embeddings_cos_sim[i]:.10f} - {values[i]}")
```
## MIDI Input
Tip: You can download https://github.com/josStorer/sgm_plus and unzip it to the program's `assets/sound-font` directory
to use it as an offline sound source. Please note that if you are compiling the program from source code, do not place
it in the source code directory.
### USB MIDI Connection
- USB MIDI devices are plug-and-play, and you can select your input device in the Composition page
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/13bb92c3-4504-482d-ab82-026ac6c31095)
### Mac MIDI Bluetooth Connection
- For Mac users who want to use Bluetooth input,
please install [Bluetooth MIDI Connect](https://apps.apple.com/us/app/bluetooth-midi-connect/id1108321791), then click
the tray icon to connect after launching,
afterwards, you can select your input device in the Composition page.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c079a109-1e3d-45c1-bbf5-eed85da1550e)
### Windows MIDI Bluetooth Connection
- Windows seems to have implemented Bluetooth MIDI support only for UWP (Universal Windows Platform) apps. Therefore, it
requires multiple steps to establish a connection. We need to create a local virtual MIDI device and then launch a UWP
application. Through this UWP application, we will redirect Bluetooth MIDI input to the virtual MIDI device, and then
this software will listen to the input from the virtual MIDI device.
- So, first, you need to
download [loopMIDI](https://www.tobias-erichsen.de/wp-content/uploads/2020/01/loopMIDISetup_1_0_16_27.zip)
to create a virtual MIDI device. Click the plus sign in the bottom left corner to create the device.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b75998ff-115c-4ddd-b97c-deeb5c106255)
- Next, you need to download [Bluetooth LE Explorer](https://apps.microsoft.com/detail/9N0ZTKF1QD98) to discover and
connect to Bluetooth MIDI devices. Click "Start" to search for devices, and then click "Pair" to bind the MIDI device.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c142c3ea-a973-4531-9807-4c385d640a2b)
- Finally, you need to install [MIDIberry](https://apps.microsoft.com/detail/9N39720H2M05),
This UWP application can redirect Bluetooth MIDI input to the virtual MIDI device. After launching it, double-click
your actual Bluetooth MIDI device name in the input field, and in the output field, double-click the virtual MIDI
device name we created earlier.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/5ad6a1d9-4f68-4d95-ae17-4296107d1669)
- Now, you can select the virtual MIDI device as the input in the Composition page. Bluetooth LE Explorer no longer
needs to run, and you can also close the loopMIDI window, it will run automatically in the background. Just keep
MIDIberry open.
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/1c371821-c7b7-4c18-8e42-9e315efbe427)
## 関連リポジトリ:
- RWKV-4-World: https://huggingface.co/BlinkDL/rwkv-4-world/tree/main
- RWKV-4-Raven: https://huggingface.co/BlinkDL/rwkv-4-raven/tree/main
- ChatRWKV: https://github.com/BlinkDL/ChatRWKV
- RWKV-LM: https://github.com/BlinkDL/RWKV-LM
- RWKV-LM-LoRA: https://github.com/Blealtan/RWKV-LM-LoRA
- MIDI-LLM-tokenizer: https://github.com/briansemrau/MIDI-LLM-tokenizer
## Preview
### ホームページ
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c9b9cdd0-63f9-4319-9f74-5bf5d7df5a67)
### チャット
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/80009872-528f-4932-aeb2-f724fa892e7c)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/e98c9038-3323-47b0-8edb-d639fafd37b2)
### 補完
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/bf49de8e-3b89-4543-b1ef-7cd4b19a1836)
### 作曲
Tip: You can download https://github.com/josStorer/sgm_plus and unzip it to the program's `assets/sound-font` directory
to use it as an offline sound source. Please note that if you are compiling the program from source code, do not place
it in the source code directory.
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/e8ad908d-3fd2-4e92-bcdb-96815cb836ee)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b2ce4761-9e75-477e-a182-d0255fb8ac76)
### コンフィグ
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/f41060dc-5517-44af-bb3f-8ef71720016d)
### モデル管理
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b1581147-a6ce-4493-8010-e33c0ddeca0a)
### ダウンロード管理
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c8153cf9-c8cb-4618-8268-60c82a5be539)
### LoRA Finetune
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/4715045a-683e-4d2a-9b0e-090c7a5df63f)
### 設定
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/1067e635-8c07-4217-86a8-e48a5fcbb075)

View File

@@ -12,9 +12,15 @@ API兼容的接口这意味着一切ChatGPT客户端都是RWKV客户端。
[![license][license-image]][license-url]
[![release][release-image]][release-url]
[English](README.md) | 简体中文
[English](README.md) | 简体中文 | [日本語](README_JA.md)
[视频演示](https://www.bilibili.com/video/BV1hM4y1v76R) | [预览](#Preview) | [下载][download-url]
### 安装
[![Windows][Windows-image]][Windows-url]
[![MacOS][MacOS-image]][MacOS-url]
[![Linux][Linux-image]][Linux-url]
[视频演示](https://www.bilibili.com/video/BV1hM4y1v76R) | [疑难解答](https://www.bilibili.com/read/cv23921171) | [预览](#Preview) | [下载][download-url] | [懒人包](https://pan.baidu.com/s/1zdzZ_a0uM3gDqi6pXIZVAA?pwd=1111) | [简明服务部署示例](#Simple-Deploy-Example) | [服务器部署示例](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples) | [MIDI硬件输入](#MIDI-Input)
[license-image]: http://img.shields.io/badge/license-MIT-blue.svg
@@ -24,68 +30,216 @@ API兼容的接口这意味着一切ChatGPT客户端都是RWKV客户端。
[release-url]: https://github.com/josStorer/RWKV-Runner/releases/latest
[download-url]: https://github.com/josStorer/RWKV-Runner/releases/download/v1.0.2/RWKV-Runner_windows_x64.exe
[download-url]: https://github.com/josStorer/RWKV-Runner/releases
[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows
[Windows-url]: https://github.com/josStorer/RWKV-Runner/blob/master/build/windows/Readme_Install.txt
[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple
[MacOS-url]: https://github.com/josStorer/RWKV-Runner/blob/master/build/darwin/Readme_Install.txt
[Linux-image]: https://img.shields.io/badge/-Linux-black?logo=linux
[Linux-url]: https://github.com/josStorer/RWKV-Runner/blob/master/build/linux/Readme_Install.txt
</div>
#### 注意 目前RWKV中文模型质量一般推荐使用英文模型体验实际RWKV能力
#### 小贴士:你可以在服务器部署[backend-python](./backend-python/),然后将此程序仅用作客户端,在设置的`API URL`中填入你的服务器地址
#### 预设配置没有开启自定义CUDA算子加速但我强烈建议你开启它并使用int8量化运行速度非常快且显存消耗少得多。前往配置页面,打开`使用自定义CUDA算子加速`
#### 预设配置已经开启自定义CUDA算子加速速度更快,且显存消耗更少。如果你遇到可能的兼容性(输出乱码)问题,前往配置页面,关闭`使用自定义CUDA算子加速`,或更新你的显卡驱动
#### 如果Windows Defender说这是一个病毒你可以尝试下载[v1.3.7_win.zip](https://github.com/josStorer/RWKV-Runner/releases/download/v1.3.7/RWKV-Runner_win.zip),然后让其自动更新到最新版,或添加信任 (`Windows Security` -> `Virus & threat protection` -> `Manage settings` -> `Exclusions` -> `Add or remove exclusions` -> `Add an exclusion` -> `Folder` -> `RWKV-Runner`)
#### 对于不同的任务调整API参数会获得更好的效果例如对于翻译任务你可以尝试设置Temperature为1Top_P为0.3
## 功能
- RWKV模型管理一键启动
- 与OpenAI API完全兼容一切ChatGPT客户端都是RWKV客户端。启动模型后打开 http://127.0.0.1:8000/docs 查看详细内容
- 前后端分离如果你不想使用客户端也允许单独部署前端服务或后端推理服务或具有WebUI的后端推理服务。
[简明服务部署示例](#Simple-Deploy-Example) | [服务器部署示例](https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples)
- 与OpenAI API兼容一切ChatGPT客户端都是RWKV客户端。启动模型后打开 http://127.0.0.1:8000/docs 查看API文档
- 全自动依赖安装,你只需要一个轻巧的可执行程序
- 自带用户友好的聊天交互页面
- 易于理解和操作的参数配置
- 预设多级显存配置几乎在各种电脑上工作良好。通过配置页面切换Strategy到WebGPU还可以在AMDIntel等显卡上运行
- 自带用户友好的聊天续写作曲交互页面。支持聊天预设附件上传MIDI硬件输入及音轨编辑。
[预览](#Preview) | [MIDI硬件输入](#MIDI-Input)
- 内置WebUI选项一键启动Web服务共享硬件资源
- 易于理解和操作的参数配置,及各类操作引导提示
- 内置模型转换工具
- 内置下载管理和远程模型检视
- 内置一键LoRA微调
- 也可用作 OpenAI ChatGPT 和 GPT Playground 客户端 (在设置内填写API URL和API Key)
- 多语言本地化
- 主题切换
- 自动更新
## Todo
## Simple Deploy Example
- [ ] 模型训练功能
- [x] CUDA算子int8提速
- [ ] macOS支持
- [ ] linux支持
```bash
git clone https://github.com/josStorer/RWKV-Runner
# 然后
cd RWKV-Runner
python ./backend-python/main.py #后端推理服务已启动, 调用/switch-model载入模型, 参考API文档: http://127.0.0.1:8000/docs
# 或者
cd RWKV-Runner/frontend
npm ci
npm run build #编译前端
cd ..
python ./backend-python/webui_server.py #单独启动前端服务
# 或者
python ./backend-python/main.py --webui #同时启动前后端服务
# 帮助参数
python ./backend-python/main.py -h
```
## API并发压力测试
```bash
ab -p body.json -T application/json -c 20 -n 100 -l http://127.0.0.1:8000/chat/completions
```
body.json:
```json
{
"messages": [
{
"role": "user",
"content": "Hello"
}
]
}
```
## Embeddings API 示例
注意: 1.4.0 版本对embeddings API质量进行了改善生成结果与之前的版本不兼容如果你正在使用此API生成知识库等请重新生成
如果你在用langchain, 直接使用 `OpenAIEmbeddings(openai_api_base="http://127.0.0.1:8000", openai_api_key="sk-")`
```python
import numpy as np
import requests
def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
values = [
"I am a girl",
"我是个女孩",
"私は女の子です",
"广东人爱吃福建人",
"我是个人类",
"I am a human",
"that dog is so cute",
"私はねこむすめです、にゃん♪",
"宇宙级特大事件!号外号外!"
]
embeddings = []
for v in values:
r = requests.post("http://127.0.0.1:8000/embeddings", json={"input": v})
embedding = r.json()["data"][0]["embedding"]
embeddings.append(embedding)
compared_embedding = embeddings[0]
embeddings_cos_sim = [cosine_similarity(compared_embedding, e) for e in embeddings]
for i in np.argsort(embeddings_cos_sim)[::-1]:
print(f"{embeddings_cos_sim[i]:.10f} - {values[i]}")
```
## MIDI Input
小贴士: 你可以下载 https://github.com/josStorer/sgm_plus, 并解压到程序的`assets/sound-font`目录, 以使用离线音源. 注意,
如果你正在从源码编译程序, 请不要将其放置在源码目录中
### USB MIDI 连接
- USB MIDI设备是即插即用的, 你能够在作曲页面选择你的输入设备
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/a448c34a-56d8-46eb-8dc2-dd11e8e0c4ce)
### Mac MIDI 蓝牙连接
- 对于想要使用蓝牙输入的Mac用户,
请安装[Bluetooth MIDI Connect](https://apps.apple.com/us/app/bluetooth-midi-connect/id1108321791), 启动后点击托盘连接,
之后你可以在作曲页面选择你的输入设备
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c079a109-1e3d-45c1-bbf5-eed85da1550e)
### Windows MIDI 蓝牙连接
- Windows似乎只为UWP实现了蓝牙MIDI支持, 因此需要多个步骤进行连接, 我们需要创建一个本地的虚拟MIDI设备, 然后启动一个UWP应用,
通过此UWP应用将蓝牙MIDI输入重定向到虚拟MIDI设备, 然后本软件监听虚拟MIDI设备的输入
- 因此, 首先你需要下载[loopMIDI](https://www.tobias-erichsen.de/wp-content/uploads/2020/01/loopMIDISetup_1_0_16_27.zip),
用于创建虚拟MIDI设备, 点击左下角的加号创建设备
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/b75998ff-115c-4ddd-b97c-deeb5c106255)
- 然后, 你需要下载[Bluetooth LE Explorer](https://apps.microsoft.com/detail/9N0ZTKF1QD98), 以发现并连接蓝牙MIDI设备,
点击Start搜索设备, 然后点击Pair绑定MIDI设备
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/c142c3ea-a973-4531-9807-4c385d640a2b)
- 最后, 你需要安装[MIDIberry](https://apps.microsoft.com/detail/9N39720H2M05), 这个UWP应用能将MIDI蓝牙输入重定向到虚拟MIDI设备,
启动后, 在输入栏, 双击你实际的蓝牙MIDI设备名称, 在输出栏, 双击我们先前创建的虚拟MIDI设备名称
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/5ad6a1d9-4f68-4d95-ae17-4296107d1669)
- 现在, 你可以在作曲页面选择虚拟MIDI设备作为输入. Bluetooth LE Explorer不再需要运行, loopMIDI窗口也可以退出, 它会自动在后台运行,
仅保持MIDIberry打开即可
- ![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/6460c355-884e-4b28-a2eb-8ab7a2e3a01a)
## 相关仓库:
- RWKV-4-World: https://huggingface.co/BlinkDL/rwkv-4-world/tree/main
- RWKV-4-Raven: https://huggingface.co/BlinkDL/rwkv-4-raven/tree/main
- ChatRWKV: https://github.com/BlinkDL/ChatRWKV
- RWKV-LM: https://github.com/BlinkDL/RWKV-LM
- RWKV-LM-LoRA: https://github.com/Blealtan/RWKV-LM-LoRA
- MIDI-LLM-tokenizer: https://github.com/briansemrau/MIDI-LLM-tokenizer
## Preview
### 主页
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/9d25380a-a17b-443f-b823-86c754ebebf0)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/cd82674e-3ee3-4175-bd9c-a11d45437327)
### 聊天
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/0e66d5fa-f34a-409f-9cd4-d880815733f3)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/9570e73b-dca2-4316-9e92-09961f3c48c4)
### 补全
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/54bb0e2b-cdc4-4ea0-8d16-9beaf57c232c)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/d4178ee9-a188-4878-9777-25c916872c29)
### 续写
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/69f9ba7a-2fe8-4a5e-94cb-aa655aa409e2)
### 作曲
小贴士: 你可以下载 https://github.com/josStorer/sgm_plus, 并解压到程序的`assets/sound-font`目录, 以使用离线音源. 注意,
如果你正在从源码编译程序, 请不要将其放置在源码目录中
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/95b34893-80c2-4706-87f9-bc141032ed4b)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/3cb31ca8-d708-42f1-8768-1605fb0b2174)
### 配置
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/ad9921fc-7248-40a3-9e18-03445b86e4bf)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/0f4d4f21-8abe-4f4d-8c4f-cd7d5607f20e)
### 模型管理
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/7c36f15f-3e77-49cd-a16d-99a29f870bdf)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/871f2d2a-7e41-4be7-9b32-be1b3e00dc3e)
### 下载管理
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/32fde30b-11dd-43b9-9667-ad6975be2106)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/cc076038-2a91-4d36-bd39-266020e8ea87)
### LoRA微调
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/31939b8f-9546-4f44-b434-295b492ec625)
### 设置
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/e8a0f746-9da7-48e3-b3fc-e1453ac50de2)
![image](https://github.com/josStorer/RWKV-Runner/assets/13366013/9652d7cc-ac33-4587-a8fb-03e5a6f5ea77)

Binary file not shown.

View File

@@ -0,0 +1,116 @@
# https://github.com/magenta/magenta-js/issues/164
import json
import os
import urllib.request
def get_pitches_array(min_pitch, max_pitch):
return list(range(min_pitch, max_pitch + 1))
base_url = 'https://storage.googleapis.com/magentadata/js/soundfonts'
soundfont_path = 'sgm_plus'
soundfont_json_url = f"{base_url}/{soundfont_path}/soundfont.json"
# Download soundfont.json
soundfont_json = ""
if not os.path.exists('soundfont.json'):
try:
with urllib.request.urlopen(soundfont_json_url) as response:
soundfont_json = response.read()
# Save soundfont.json
with open('soundfont.json', 'wb') as file:
file.write(soundfont_json)
except:
print("Failed to download soundfont.json")
else:
# If file exists, get it from the file system
with open('soundfont.json', 'rb') as file:
soundfont_json = file.read()
# Parse soundfont.json
soundfont_data = json.loads(soundfont_json)
if soundfont_data is not None:
# Iterate over each instrument
for instrument_id, instrument_name in soundfont_data['instruments'].items():
if not os.path.isdir(instrument_name):
# Create instrument directory if it doesn't exist
os.makedirs(instrument_name)
instrument_json = ""
instrument_path = f"{soundfont_path}/{instrument_name}"
if not os.path.exists(f"{instrument_name}/instrument.json"):
# Download instrument.json
instrument_json_url = f"{base_url}/{instrument_path}/instrument.json"
try:
with urllib.request.urlopen(instrument_json_url) as response:
instrument_json = response.read()
# Save instrument.json
with open(f"{instrument_name}/instrument.json", 'wb') as file:
file.write(instrument_json)
except:
print(f"Failed to download {instrument_name}/instrument.json")
else:
# If file exists, get it from the file system
with open(f"{instrument_name}/instrument.json", 'rb') as file:
instrument_json = file.read()
# Parse instrument.json
instrument_data = json.loads(instrument_json)
if instrument_data is not None:
# Iterate over each pitch and velocity
for velocity in instrument_data['velocities']:
pitches = get_pitches_array(instrument_data['minPitch'], instrument_data['maxPitch'])
for pitch in pitches:
# Create the file name
file_name = f'p{pitch}_v{velocity}.mp3'
# Check if the file already exists
if os.path.exists(f"{instrument_name}/{file_name}"):
pass
#print(f"Skipping {instrument_name}/{file_name} - File already exists")
else:
# Download pitch/velocity file
file_url = f"{base_url}/{instrument_path}/{file_name}"
try:
with urllib.request.urlopen(file_url) as response:
file_contents = response.read()
# Save pitch/velocity file
with open(f"{instrument_name}/{file_name}", 'wb') as file:
file.write(file_contents)
print(f"Downloaded {instrument_name}/{file_name}")
except:
print(f"Failed to download {instrument_name}/{file_name}")
else:
print(f"Failed to parse instrument.json for {instrument_name}")
else:
print('Failed to parse soundfont.json')

View File

@@ -0,0 +1,134 @@
{
"name": "sgm_plus",
"instruments": {
"0": "acoustic_grand_piano",
"1": "bright_acoustic_piano",
"2": "electric_grand_piano",
"3": "honkytonk_piano",
"4": "electric_piano_1",
"5": "electric_piano_2",
"6": "harpsichord",
"7": "clavichord",
"8": "celesta",
"9": "glockenspiel",
"10": "music_box",
"11": "vibraphone",
"12": "marimba",
"13": "xylophone",
"14": "tubular_bells",
"15": "dulcimer",
"16": "drawbar_organ",
"17": "percussive_organ",
"18": "rock_organ",
"19": "church_organ",
"20": "reed_organ",
"21": "accordion",
"22": "harmonica",
"23": "tango_accordion",
"24": "acoustic_guitar_nylon",
"25": "acoustic_guitar_steel",
"26": "electric_guitar_jazz",
"27": "electric_guitar_clean",
"28": "electric_guitar_muted",
"29": "overdriven_guitar",
"30": "distortion_guitar",
"31": "guitar_harmonics",
"32": "acoustic_bass",
"33": "electric_bass_finger",
"34": "electric_bass_pick",
"35": "fretless_bass",
"36": "slap_bass_1",
"37": "slap_bass_2",
"38": "synth_bass_1",
"39": "synth_bass_2",
"40": "violin",
"41": "viola",
"42": "cello",
"43": "contrabass",
"44": "tremolo_strings",
"45": "pizzicato_strings",
"46": "orchestral_harp",
"47": "timpani",
"48": "string_ensemble_1",
"49": "string_ensemble_2",
"50": "synthstrings_1",
"51": "synthstrings_2",
"52": "choir_aahs",
"53": "voice_oohs",
"54": "synth_voice",
"55": "orchestra_hit",
"56": "trumpet",
"57": "trombone",
"58": "tuba",
"59": "muted_trumpet",
"60": "french_horn",
"61": "brass_section",
"62": "synthbrass_1",
"63": "synthbrass_2",
"64": "soprano_sax",
"65": "alto_sax",
"66": "tenor_sax",
"67": "baritone_sax",
"68": "oboe",
"69": "english_horn",
"70": "bassoon",
"71": "clarinet",
"72": "piccolo",
"73": "flute",
"74": "recorder",
"75": "pan_flute",
"76": "blown_bottle",
"77": "shakuhachi",
"78": "whistle",
"79": "ocarina",
"80": "lead_1_square",
"81": "lead_2_sawtooth",
"82": "lead_3_calliope",
"83": "lead_4_chiff",
"84": "lead_5_charang",
"85": "lead_6_voice",
"86": "lead_7_fifths",
"87": "lead_8_bass_lead",
"88": "pad_1_new_age",
"89": "pad_2_warm",
"90": "pad_3_polysynth",
"91": "pad_4_choir",
"92": "pad_5_bowed",
"93": "pad_6_metallic",
"94": "pad_7_halo",
"95": "pad_8_sweep",
"96": "fx_1_rain",
"97": "fx_2_soundtrack",
"98": "fx_3_crystal",
"99": "fx_4_atmosphere",
"100": "fx_5_brightness",
"101": "fx_6_goblins",
"102": "fx_7_echoes",
"103": "fx_8_scifi",
"104": "sitar",
"105": "banjo",
"106": "shamisen",
"107": "koto",
"108": "kalimba",
"109": "bag_pipe",
"110": "fiddle",
"111": "shanai",
"112": "tinkle_bell",
"113": "agogo",
"114": "steel_drums",
"115": "woodblock",
"116": "taiko_drum",
"117": "melodic_tom",
"118": "synth_drum",
"119": "reverse_cymbal",
"120": "guitar_fret_noise",
"121": "breath_noise",
"122": "seashore",
"123": "bird_tweet",
"124": "telephone_ring",
"125": "helicopter",
"126": "applause",
"127": "gunshot",
"drums": "percussion"
}
}

469
assets/soundfont_builder.rb Normal file
View File

@@ -0,0 +1,469 @@
#!/usr/bin/env ruby
#
# JavaScript Soundfont Builder for MIDI.js
# Author: 0xFE <mohit@muthanna.com>
# edited by Valentijn Nieman <valentijnnieman@gmail.com>
#
# Requires:
#
# FluidSynth
# Lame
# Ruby Gems: midilib parallel
#
# $ brew install fluidsynth lame (on OSX)
# $ gem install midilib parallel
#
# You'll need to download a GM soundbank to generate audio.
#
# Usage:
#
# 1) Install the above dependencies.
# 2) Edit BUILD_DIR, SOUNDFONT, and INSTRUMENTS as required.
# 3) Run without any argument.
require 'base64'
require 'digest/sha1'
require 'etc'
require 'fileutils'
require 'midilib'
require 'parallel'
require 'zlib'
require 'json'
include FileUtils
BUILD_DIR = "./sound-font" # Output path
SOUNDFONT = "./default_sound_font.sf2" # Soundfont file path
# This script will generate MIDI.js-compatible instrument JS files for
# all instruments in the below array. Add or remove as necessary.
INSTRUMENTS = [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
100,
101,
102,
103,
104,
105,
106,
107,
108,
109,
110,
111,
112,
113,
114,
115,
116,
117,
118,
119,
120,
121,
122,
123,
124,
125,
126,
127
]
# It was found that midilib uses names that are incompatible with MIDI.js
# For example, midilib uses "SynthBrass 1" -> https://github.com/jimm/midilib/blob/6c8e481ae72cd9f00a38eb3700ddfca6b549f153/lib/midilib/consts.rb#L280
# and the MIDI association uses "SynthBrass 1" -> https://www.midi.org/specifications-old/item/gm-level-1-sound-set
# but the MIDI.js calls this "Synth Brass 1" -> https://github.com/mudcube/MIDI.js/blob/a8a84257afa70721ae462448048a87301fc1554a/js/midi/gm.js#L44
# there are others like "Bag pipe" vs "Bagpipe", etc.
# here, we use the MIDI.js definitions because that is how most users will interact with the generated soundfonts.
MIDIJS_PATCH_NAMES = [
"Acoustic Grand Piano",
"Bright Acoustic Piano",
"Electric Grand Piano",
"Honky-tonk Piano",
"Electric Piano 1",
"Electric Piano 2",
"Harpsichord",
"Clavinet",
"Celesta",
"Glockenspiel",
"Music Box",
"Vibraphone",
"Marimba",
"Xylophone",
"Tubular Bells",
"Dulcimer",
"Drawbar Organ",
"Percussive Organ",
"Rock Organ",
"Church Organ",
"Reed Organ",
"Accordion",
"Harmonica",
"Tango Accordion",
"Acoustic Guitar (nylon)",
"Acoustic Guitar (steel)",
"Electric Guitar (jazz)",
"Electric Guitar (clean)",
"Electric Guitar (muted)",
"Overdriven Guitar",
"Distortion Guitar",
"Guitar Harmonics",
"Acoustic Bass",
"Electric Bass (finger)",
"Electric Bass (pick)",
"Fretless Bass",
"Slap Bass 1",
"Slap Bass 2",
"Synth Bass 1",
"Synth Bass 2",
"Violin",
"Viola",
"Cello",
"Contrabass",
"Tremolo Strings",
"Pizzicato Strings",
"Orchestral Harp",
"Timpani",
"String Ensemble 1",
"String Ensemble 2",
"Synth Strings 1",
"Synth Strings 2",
"Choir Aahs",
"Voice Oohs",
"Synth Choir",
"Orchestra Hit",
"Trumpet",
"Trombone",
"Tuba",
"Muted Trumpet",
"French Horn",
"Brass Section",
"Synth Brass 1",
"Synth Brass 2",
"Soprano Sax",
"Alto Sax",
"Tenor Sax",
"Baritone Sax",
"Oboe",
"English Horn",
"Bassoon",
"Clarinet",
"Piccolo",
"Flute",
"Recorder",
"Pan Flute",
"Blown Bottle",
"Shakuhachi",
"Whistle",
"Ocarina",
"Lead 1 (square)",
"Lead 2 (sawtooth)",
"Lead 3 (calliope)",
"Lead 4 (chiff)",
"Lead 5 (charang)",
"Lead 6 (voice)",
"Lead 7 (fifths)",
"Lead 8 (bass + lead)",
"Pad 1 (new age)",
"Pad 2 (warm)",
"Pad 3 (polysynth)",
"Pad 4 (choir)",
"Pad 5 (bowed)",
"Pad 6 (metallic)",
"Pad 7 (halo)",
"Pad 8 (sweep)",
"FX 1 (rain)",
"FX 2 (soundtrack)",
"FX 3 (crystal)",
"FX 4 (atmosphere)",
"FX 5 (brightness)",
"FX 6 (goblins)",
"FX 7 (echoes)",
"FX 8 (sci-fi)",
"Sitar",
"Banjo",
"Shamisen",
"Koto",
"Kalimba",
"Bagpipe",
"Fiddle",
"Shanai",
"Tinkle Bell",
"Agogo",
"Steel Drums",
"Woodblock",
"Taiko Drum",
"Melodic Tom",
"Synth Drum",
"Reverse Cymbal",
"Guitar Fret Noise",
"Breath Noise",
"Seashore",
"Bird Tweet",
"Telephone Ring",
"Helicopter",
"Applause",
"Gunshot"
]
# The encoders and tools are expected in your PATH. You can supply alternate
# paths by changing the constants below.
LAME = "lame" # `which lame`.chomp
FLUIDSYNTH = "fluidsynth" # `which fluidsynth`.chomp
puts "Building the following instruments using font: " + SOUNDFONT
# Display instrument names.
INSTRUMENTS.each do |i|
puts " #{i}: " + MIDIJS_PATCH_NAMES[i]
end
puts
puts "Using MP3 encoder: " + LAME
puts "Using FluidSynth encoder: " + FLUIDSYNTH
puts
puts "Sending output to: " + BUILD_DIR
puts
raise "Can't find soundfont: #{SOUNDFONT}" unless File.exist? SOUNDFONT
raise "Can't find 'lame' command" if LAME.empty?
raise "Can't find 'fluidsynth' command" if FLUIDSYNTH.empty?
raise "Output directory does not exist: #{BUILD_DIR}" unless File.exist?(BUILD_DIR)
puts "Hit return to begin."
$stdin.readline
NOTES = {
"C" => 0,
"Db" => 1,
"D" => 2,
"Eb" => 3,
"E" => 4,
"F" => 5,
"Gb" => 6,
"G" => 7,
"Ab" => 8,
"A" => 9,
"Bb" => 10,
"B" => 11
}
MIDI_C0 = 12
VELOCITY = 100
DURATION = Integer(3000)
TEMP_FILE = "#{BUILD_DIR}/%s%stemp.midi"
FLUIDSYNTH_RAW = "%s.wav"
def deflate(string, level)
z = Zlib::Deflate.new(level)
dst = z.deflate(string, Zlib::FINISH)
z.close
dst
end
def note_to_int(note, octave)
value = NOTES[note]
increment = MIDI_C0 * octave
return value + increment
end
def int_to_note(value)
raise "Bad Value" if value < MIDI_C0
reverse_notes = NOTES.invert
value -= MIDI_C0
octave = value / 12
note = value % 12
return { key: reverse_notes[note],
octave: octave }
end
# Run a quick table validation
MIDI_C0.upto(100) do |x|
note = int_to_note x
#raise "Broken table" unless note_to_int(note[:key], note[:octave]) == x
end
def generate_midi(program, note_value, file)
include MIDI
seq = Sequence.new()
track = Track.new(seq)
seq.tracks << track
track.events << ProgramChange.new(0, Integer(program))
track.events << NoteOn.new(0, note_value, VELOCITY, 0) # channel, note, velocity, delta
track.events << NoteOff.new(0, note_value, VELOCITY, DURATION)
File.open(file, 'wb') { | file | seq.write(file) }
end
def run_command(cmd)
puts "Running: " + cmd
`#{cmd}`
end
def midi_to_audio(source, target)
run_command "#{FLUIDSYNTH} -C no -R no -g 0.5 -F #{target} #{SOUNDFONT} #{source}"
run_command "#{LAME} -v -b 8 -B 64 #{target}"
rm target
end
def open_js_file(instrument_key, type)
js_file = File.open("#{BUILD_DIR}/#{instrument_key}-#{type}.js", "w")
js_file.write(
"""
if (typeof(MIDI) === 'undefined') var MIDI = {};
if (typeof(MIDI.Soundfont) === 'undefined') MIDI.Soundfont = {};
MIDI.Soundfont.#{instrument_key} = {
""")
return js_file
end
def close_js_file(file)
file.write("\n}\n")
file.close
end
def base64js(note, file, type)
output = '"' + note + '": '
output += '"' + "data:audio/#{type};base64,"
output += Base64.strict_encode64(File.read(file)) + '"'
return output
end
def generate_audio(program)
instrument = MIDIJS_PATCH_NAMES[program]
instrument_key = instrument.downcase.gsub(/[^a-z0-9 ]/, "").gsub(/[ ]/, "_")
puts "Generating audio for: " + instrument + "(#{instrument_key})"
mkdir_p "#{BUILD_DIR}/#{instrument_key}"
note_to_int("A", 0).upto(note_to_int("C", 8)) do |note_value|
output_name = "p#{note_value}_v#{VELOCITY}"
output_path_prefix = BUILD_DIR + "/#{instrument_key}" + output_name
puts "Generating: #{output_name}"
temp_file_specific = TEMP_FILE % [output_name, instrument_key]
generate_midi(program, note_value, temp_file_specific)
midi_to_audio(temp_file_specific, output_path_prefix + ".wav")
mv output_path_prefix + ".mp3", "#{BUILD_DIR}/#{instrument_key}/#{output_name}.mp3"
rm temp_file_specific
end
tempHash = {
"name" => instrument_key,
"minPitch" => 0,
"maxPitch" => 127,
"durationSeconds" => 3.0,
"releaseSeconds" => 1.0,
"percussive": false,
"velocities": [100]
}
File.open("#{BUILD_DIR}/#{instrument_key}/instrument.json", "w") do |f|
f.write(tempHash.to_json)
end
end
Parallel.each(INSTRUMENTS, :in_processes=>Etc.nprocessors){|i| generate_audio(i)}

View File

@@ -1,19 +1,30 @@
package backend_golang
import (
"bufio"
"context"
"errors"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"syscall"
"time"
"github.com/fsnotify/fsnotify"
"github.com/minio/selfupdate"
wruntime "github.com/wailsapp/wails/v2/pkg/runtime"
)
// App struct
type App struct {
ctx context.Context
ctx context.Context
HasConfigData bool
ConfigData map[string]any
exDir string
cmdPrefix string
}
// NewApp creates a new App application struct
@@ -25,8 +36,103 @@ func NewApp() *App {
// so we can call the runtime methods
func (a *App) OnStartup(ctx context.Context) {
a.ctx = ctx
a.exDir = ""
a.cmdPrefix = ""
if runtime.GOOS == "darwin" {
ex, _ := os.Executable()
a.exDir = filepath.Dir(ex) + "/../../../"
a.cmdPrefix = "cd " + a.exDir + " && "
}
os.Chmod(a.exDir+"backend-rust/webgpu_server", 0777)
os.Chmod(a.exDir+"backend-rust/web-rwkv-converter", 0777)
os.Mkdir(a.exDir+"models", os.ModePerm)
os.Mkdir(a.exDir+"lora-models", os.ModePerm)
os.Mkdir(a.exDir+"finetune/json2binidx_tool/data", os.ModePerm)
f, err := os.Create(a.exDir + "lora-models/train_log.txt")
if err == nil {
f.Close()
}
a.downloadLoop()
a.midiLoop()
a.watchFs()
a.monitorHardware()
}
func (a *App) OnBeforeClose(ctx context.Context) bool {
if monitor != nil {
monitor.Process.Kill()
}
return false
}
func (a *App) watchFs() {
watcher, err := fsnotify.NewWatcher()
if err == nil {
watcher.Add(a.exDir + "./lora-models")
watcher.Add(a.exDir + "./models")
go func() {
for {
select {
case event, ok := <-watcher.Events:
if !ok {
return
}
wruntime.EventsEmit(a.ctx, "fsnotify", event.Name)
case _, ok := <-watcher.Errors:
if !ok {
return
}
}
}
}()
}
}
var monitor *exec.Cmd
func (a *App) monitorHardware() {
if runtime.GOOS != "windows" {
return
}
monitor = exec.Command("./components/LibreHardwareMonitor.Console/LibreHardwareMonitor.Console.exe")
stdout, err := monitor.StdoutPipe()
if err != nil {
monitor = nil
return
}
go func() {
reader := bufio.NewReader(stdout)
for {
line, _, err := reader.ReadLine()
if err != nil {
wruntime.EventsEmit(a.ctx, "monitorerr", err.Error())
break
}
wruntime.EventsEmit(a.ctx, "monitor", string(line))
}
}()
monitor.SysProcAttr = &syscall.SysProcAttr{}
//go:custom_build windows monitor.SysProcAttr.HideWindow = true
monitor.Start()
}
type ProgressReader struct {
reader io.Reader
total int64
err error
}
func (pr *ProgressReader) Read(p []byte) (n int, err error) {
n, err = pr.reader.Read(p)
pr.err = err
pr.total += int64(n)
return
}
func (a *App) UpdateApp(url string) (broken bool, err error) {
@@ -35,22 +141,61 @@ func (a *App) UpdateApp(url string) (broken bool, err error) {
return false, err
}
defer resp.Body.Close()
err = selfupdate.Apply(resp.Body, selfupdate.Options{})
pr := &ProgressReader{reader: resp.Body}
ticker := time.NewTicker(250 * time.Millisecond)
defer ticker.Stop()
go func() {
for {
<-ticker.C
wruntime.EventsEmit(a.ctx, "updateApp", &DownloadStatus{
Name: filepath.Base(url),
Path: "",
Url: url,
Transferred: pr.total,
Size: resp.ContentLength,
Speed: 0,
Progress: 100 * (float64(pr.total) / float64(resp.ContentLength)),
Downloading: pr.err == nil && pr.total < resp.ContentLength,
Done: pr.total == resp.ContentLength,
})
if pr.err != nil || pr.total == resp.ContentLength {
break
}
}
}()
err = selfupdate.Apply(pr, selfupdate.Options{})
if err != nil {
if rerr := selfupdate.RollbackError(err); rerr != nil {
return true, rerr
}
return false, err
}
name, err := os.Executable()
if err != nil {
return false, err
if runtime.GOOS == "windows" {
name, err := os.Executable()
if err != nil {
return false, err
}
exec.Command(name, os.Args[1:]...).Start()
wruntime.Quit(a.ctx)
}
exec.Command(name, os.Args[1:]...).Start()
wruntime.Quit(a.ctx)
return false, nil
}
func (a *App) RestartApp() error {
if runtime.GOOS == "windows" {
name, err := os.Executable()
if err != nil {
return err
}
exec.Command(name, os.Args[1:]...).Start()
wruntime.Quit(a.ctx)
return nil
}
return errors.New("unsupported OS")
}
func (a *App) GetPlatform() string {
return runtime.GOOS
}

View File

@@ -1,6 +1,7 @@
package backend_golang
import (
"context"
"path/filepath"
"time"
@@ -9,7 +10,7 @@ import (
)
func (a *App) DownloadFile(path string, url string) error {
_, err := grab.Get(path, url)
_, err := grab.Get(a.exDir+path, url)
if err != nil {
return err
}
@@ -18,6 +19,7 @@ func (a *App) DownloadFile(path string, url string) error {
type DownloadStatus struct {
resp *grab.Response
cancel context.CancelFunc
Name string `json:"name"`
Path string `json:"path"`
Url string `json:"url"`
@@ -29,11 +31,11 @@ type DownloadStatus struct {
Done bool `json:"done"`
}
var downloadList []DownloadStatus
var downloadList []*DownloadStatus
func existsInDownloadList(url string) bool {
func existsInDownloadList(path string, url string) bool {
for _, ds := range downloadList {
if ds.Url == url {
if ds.Path == path || ds.Url == url {
return true
}
}
@@ -41,49 +43,58 @@ func existsInDownloadList(url string) bool {
}
func (a *App) PauseDownload(url string) {
for i, ds := range downloadList {
for _, ds := range downloadList {
if ds.Url == url {
if ds.resp != nil {
ds.resp.Cancel()
}
downloadList[i] = DownloadStatus{
resp: ds.resp,
Name: ds.Name,
Path: ds.Path,
Url: ds.Url,
Downloading: false,
if ds.cancel != nil {
ds.cancel()
}
ds.resp = nil
ds.Downloading = false
ds.Speed = 0
break
}
}
}
func (a *App) ContinueDownload(url string) {
for i, ds := range downloadList {
for _, ds := range downloadList {
if ds.Url == url {
client := grab.NewClient()
req, _ := grab.NewRequest(ds.Path, ds.Url)
resp := client.Do(req)
if !ds.Downloading && ds.resp == nil && !ds.Done {
ds.Downloading = true
downloadList[i] = DownloadStatus{
resp: resp,
Name: ds.Name,
Path: ds.Path,
Url: ds.Url,
Downloading: true,
req, err := grab.NewRequest(ds.Path, ds.Url)
if err != nil {
ds.Downloading = false
break
}
// if PauseDownload() is called before the request finished, ds.Downloading will be false
// if the user keeps clicking pause and resume, it may result in multiple requests being successfully downloaded at the same time
// so we have to create a context and cancel it when PauseDownload() is called
ctx, cancel := context.WithCancel(context.Background())
ds.cancel = cancel
req = req.WithContext(ctx)
resp := grab.DefaultClient.Do(req)
if resp != nil && resp.HTTPResponse != nil &&
resp.HTTPResponse.StatusCode >= 200 && resp.HTTPResponse.StatusCode < 300 {
ds.resp = resp
} else {
ds.Downloading = false
}
}
break
}
}
}
func (a *App) AddToDownloadList(path string, url string) {
if !existsInDownloadList(url) {
downloadList = append(downloadList, DownloadStatus{
if !existsInDownloadList(a.exDir+path, url) {
downloadList = append(downloadList, &DownloadStatus{
resp: nil,
Name: filepath.Base(path),
Path: path,
Path: a.exDir + path,
Url: url,
Downloading: true,
Downloading: false,
})
a.ContinueDownload(url)
} else {
@@ -96,32 +107,17 @@ func (a *App) downloadLoop() {
go func() {
for {
<-ticker.C
for i, ds := range downloadList {
transferred := int64(0)
size := int64(0)
speed := float64(0)
progress := float64(0)
downloading := ds.Downloading
done := false
for _, ds := range downloadList {
if ds.resp != nil {
transferred = ds.resp.BytesComplete()
size = ds.resp.Size()
speed = ds.resp.BytesPerSecond()
progress = 100 * ds.resp.Progress()
downloading = !ds.resp.IsComplete()
done = ds.resp.Progress() == 1
}
downloadList[i] = DownloadStatus{
resp: ds.resp,
Name: ds.Name,
Path: ds.Path,
Url: ds.Url,
Transferred: transferred,
Size: size,
Speed: speed,
Progress: progress,
Downloading: downloading,
Done: done,
ds.Transferred = ds.resp.BytesComplete()
ds.Size = ds.resp.Size()
ds.Speed = ds.resp.BytesPerSecond()
ds.Progress = 100 * ds.resp.Progress()
ds.Downloading = !ds.resp.IsComplete()
ds.Done = ds.resp.Progress() == 1
if !ds.Downloading {
ds.resp = nil
}
}
}
runtime.EventsEmit(a.ctx, "downloadList", downloadList)

View File

@@ -2,13 +2,16 @@ package backend_golang
import (
"encoding/json"
"fmt"
"errors"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"time"
wruntime "github.com/wailsapp/wails/v2/pkg/runtime"
)
func (a *App) SaveJson(fileName string, jsonData any) error {
@@ -17,14 +20,14 @@ func (a *App) SaveJson(fileName string, jsonData any) error {
return err
}
if err := os.WriteFile(fileName, text, 0644); err != nil {
if err := os.WriteFile(a.exDir+fileName, text, 0644); err != nil {
return err
}
return nil
}
func (a *App) ReadJson(fileName string) (any, error) {
file, err := os.ReadFile(fileName)
file, err := os.ReadFile(a.exDir + fileName)
if err != nil {
return nil, err
}
@@ -39,7 +42,7 @@ func (a *App) ReadJson(fileName string) (any, error) {
}
func (a *App) FileExists(fileName string) bool {
_, err := os.Stat(fileName)
_, err := os.Stat(a.exDir + fileName)
return err == nil
}
@@ -50,12 +53,12 @@ type FileInfo struct {
ModTime string `json:"modTime"`
}
func (a *App) ReadFileInfo(fileName string) (FileInfo, error) {
info, err := os.Stat(fileName)
func (a *App) ReadFileInfo(fileName string) (*FileInfo, error) {
info, err := os.Stat(a.exDir + fileName)
if err != nil {
return FileInfo{}, err
return nil, err
}
return FileInfo{
return &FileInfo{
Name: info.Name(),
Size: info.Size(),
IsDir: info.IsDir(),
@@ -64,7 +67,7 @@ func (a *App) ReadFileInfo(fileName string) (FileInfo, error) {
}
func (a *App) ListDirFiles(dirPath string) ([]FileInfo, error) {
files, err := os.ReadDir(dirPath)
files, err := os.ReadDir(a.exDir + dirPath)
if err != nil {
return nil, err
}
@@ -86,7 +89,7 @@ func (a *App) ListDirFiles(dirPath string) ([]FileInfo, error) {
}
func (a *App) DeleteFile(path string) error {
err := os.Remove(path)
err := os.Remove(a.exDir + path)
if err != nil {
return err
}
@@ -94,13 +97,18 @@ func (a *App) DeleteFile(path string) error {
}
func (a *App) CopyFile(src string, dst string) error {
sourceFile, err := os.Open(src)
sourceFile, err := os.Open(a.exDir + src)
if err != nil {
return err
}
defer sourceFile.Close()
destFile, err := os.Create(dst)
err = os.MkdirAll(a.exDir+dst[:strings.LastIndex(dst, "/")], 0755)
if err != nil {
return err
}
destFile, err := os.Create(a.exDir + dst)
if err != nil {
return err
}
@@ -113,8 +121,52 @@ func (a *App) CopyFile(src string, dst string) error {
return nil
}
func (a *App) OpenFileFolder(path string) error {
absPath, err := filepath.Abs(path)
func (a *App) OpenSaveFileDialog(filterPattern string, defaultFileName string, savedContent string) (string, error) {
return a.OpenSaveFileDialogBytes(filterPattern, defaultFileName, []byte(savedContent))
}
func (a *App) OpenSaveFileDialogBytes(filterPattern string, defaultFileName string, savedContent []byte) (string, error) {
path, err := wruntime.SaveFileDialog(a.ctx, wruntime.SaveDialogOptions{
DefaultFilename: defaultFileName,
Filters: []wruntime.FileFilter{{
Pattern: filterPattern,
}},
CanCreateDirectories: true,
})
if err != nil {
return "", err
}
if path == "" {
return "", nil
}
if err := os.WriteFile(path, savedContent, 0644); err != nil {
return "", err
}
return path, nil
}
// Only return the path of the selected file, because communication between frontend and backend is slow. Use AssetServer Handler to read the file.
func (a *App) OpenOpenFileDialog(filterPattern string) (string, error) {
path, err := wruntime.OpenFileDialog(a.ctx, wruntime.OpenDialogOptions{
Filters: []wruntime.FileFilter{{Pattern: filterPattern}},
})
if err != nil {
return "", err
}
if path == "" {
return "", nil
}
return path, nil
}
func (a *App) OpenFileFolder(path string, relative bool) error {
var absPath string
var err error
if relative {
absPath, err = filepath.Abs(a.exDir + path)
} else {
absPath, err = filepath.Abs(path)
}
if err != nil {
return err
}
@@ -125,10 +177,21 @@ func (a *App) OpenFileFolder(path string) error {
if err != nil {
return err
}
return nil
case "darwin":
fmt.Println("Running on macOS")
cmd := exec.Command("open", "-R", absPath)
err := cmd.Run()
if err != nil {
return err
}
return nil
case "linux":
fmt.Println("Running on Linux")
cmd := exec.Command("xdg-open", absPath)
err := cmd.Run()
if err != nil {
return err
}
return nil
}
return nil
return errors.New("unsupported OS")
}

170
backend-golang/midi.go Normal file
View File

@@ -0,0 +1,170 @@
package backend_golang
import (
"errors"
"fmt"
"time"
"github.com/mattrtaylor/go-rtmidi"
"github.com/wailsapp/wails/v2/pkg/runtime"
)
type Port struct {
Name string `json:"name"`
}
type MIDIMessage struct {
MessageType string `json:"messageType"`
Channel int `json:"channel"`
Note int `json:"note"`
Velocity int `json:"velocity"`
Control int `json:"control"`
Value int `json:"value"`
}
var ports []Port
var input rtmidi.MIDIIn
var out rtmidi.MIDIOut
var activeIndex int = -1
var lastNoteTime time.Time
func (a *App) midiLoop() {
var err error
input, err = rtmidi.NewMIDIInDefault()
if err != nil {
runtime.EventsEmit(a.ctx, "midiError", err.Error())
return
}
out, err = rtmidi.NewMIDIOutDefault()
if err != nil {
runtime.EventsEmit(a.ctx, "midiError", err.Error())
}
err = out.OpenPort(0, "")
if err != nil {
runtime.EventsEmit(a.ctx, "midiError", err.Error())
}
ticker := time.NewTicker(500 * time.Millisecond)
go func() {
for {
<-ticker.C
count, err := input.PortCount()
if err != nil {
continue
}
ports = make([]Port, count)
for i := 0; i < count; i++ {
name, err := input.PortName(i)
if err == nil {
ports[i].Name = name
}
}
runtime.EventsEmit(a.ctx, "midiPorts", &ports)
}
}()
}
func (a *App) OpenMidiPort(index int) error {
if input == nil {
return errors.New("failed to initialize MIDI input")
}
if activeIndex == index {
return nil
}
input.Destroy()
var err error
input, err = rtmidi.NewMIDIInDefault()
if err != nil {
return err
}
err = input.SetCallback(func(msg rtmidi.MIDIIn, bytes []byte, t float64) {
// https://www.midi.org/specifications-old/item/table-1-summary-of-midi-message
// https://www.rfc-editor.org/rfc/rfc6295.html
//
// msgType channel
// 1001 0000
//
msgType := bytes[0] >> 4
channel := bytes[0] & 0x0f
switch msgType {
case 0x8:
elapsed := time.Since(lastNoteTime)
lastNoteTime = time.Now()
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
MessageType: "ElapsedTime",
Value: int(elapsed.Milliseconds()),
})
note := bytes[1]
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
MessageType: "NoteOff",
Channel: int(channel),
Note: int(note),
})
case 0x9:
elapsed := time.Since(lastNoteTime)
lastNoteTime = time.Now()
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
MessageType: "ElapsedTime",
Value: int(elapsed.Milliseconds()),
})
note := bytes[1]
velocity := bytes[2]
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
MessageType: "NoteOn",
Channel: int(channel),
Note: int(note),
Velocity: int(velocity),
})
case 0xb:
// control 12 => K1 knob, control 13 => K2 knob
control := bytes[1]
value := bytes[2]
runtime.EventsEmit(a.ctx, "midiMessage", &MIDIMessage{
MessageType: "ControlChange",
Channel: int(channel),
Control: int(control),
Value: int(value),
})
default:
fmt.Printf("Unknown midi message: %v\n", bytes)
}
})
if err != nil {
return err
}
err = input.OpenPort(index, "")
if err != nil {
return err
}
activeIndex = index
lastNoteTime = time.Now()
return nil
}
func (a *App) CloseMidiPort() error {
if input == nil {
return errors.New("failed to initialize MIDI input")
}
if activeIndex == -1 {
return nil
}
activeIndex = -1
input.Destroy()
var err error
input, err = rtmidi.NewMIDIInDefault()
if err != nil {
return err
}
return nil
}
func (a *App) PlayNote(msg MIDIMessage) error {
if out == nil {
return errors.New("failed to initialize MIDI output")
}
channelByte := byte(msg.Channel)
if msg.MessageType == "NoteOn" {
out.SendMessage([]byte{0x90 | channelByte, byte(msg.Note), byte(msg.Velocity)})
} else if msg.MessageType == "NoteOff" {
out.SendMessage([]byte{0x80 | channelByte, byte(msg.Note), byte(msg.Velocity)})
}
return nil
}

View File

@@ -1,60 +1,176 @@
package backend_golang
import (
"encoding/json"
"errors"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
)
func (a *App) StartServer(port int, host string) (string, error) {
python, err := GetPython()
func (a *App) StartServer(python string, port int, host string, webui bool, rwkvBeta bool) (string, error) {
var err error
if python == "" {
python, err = GetPython()
}
if err != nil {
return "", err
}
return Cmd(python, "./backend-python/main.py", strconv.Itoa(port), host)
args := []string{python, "./backend-python/main.py"}
if webui {
args = append(args, "--webui")
}
if rwkvBeta {
args = append(args, "--rwkv-beta")
}
args = append(args, "--port", strconv.Itoa(port), "--host", host)
return Cmd(args...)
}
func (a *App) ConvertModel(modelPath string, strategy string, outPath string) (string, error) {
python, err := GetPython()
func (a *App) StartWebGPUServer(port int, host string) (string, error) {
args := []string{"./backend-rust/webgpu_server"}
args = append(args, "--port", strconv.Itoa(port), "--ip", host)
return Cmd(args...)
}
func (a *App) ConvertModel(python string, modelPath string, strategy string, outPath string) (string, error) {
var err error
if python == "" {
python, err = GetPython()
}
if err != nil {
return "", err
}
return Cmd(python, "./backend-python/convert_model.py", "--in", modelPath, "--out", outPath, "--strategy", strategy)
}
func (a *App) DepCheck() error {
python, err := GetPython()
func (a *App) ConvertSafetensors(modelPath string, outPath string) (string, error) {
args := []string{"./backend-rust/web-rwkv-converter"}
args = append(args, "--input", modelPath, "--output", outPath)
return Cmd(args...)
}
func (a *App) ConvertData(python string, input string, outputPrefix string, vocab string) (string, error) {
var err error
if python == "" {
python, err = GetPython()
}
if err != nil {
return "", err
}
tokenizerType := "HFTokenizer"
if strings.Contains(vocab, "rwkv_vocab_v20230424") {
tokenizerType = "RWKVTokenizer"
}
input = strings.TrimSuffix(input, "/")
if fi, err := os.Stat(input); err == nil && fi.IsDir() {
files, err := os.ReadDir(input)
if err != nil {
return "", err
}
jsonlFile, err := os.Create(outputPrefix + ".jsonl")
if err != nil {
return "", err
}
defer jsonlFile.Close()
for _, file := range files {
if file.IsDir() || !strings.HasSuffix(file.Name(), ".txt") {
continue
}
textContent, err := os.ReadFile(input + "/" + file.Name())
if err != nil {
return "", err
}
textJson, err := json.Marshal(map[string]string{"text": strings.ReplaceAll(strings.ReplaceAll(string(textContent), "\r\n", "\n"), "\r", "\n")})
if err != nil {
return "", err
}
if _, err := jsonlFile.WriteString(string(textJson) + "\n"); err != nil {
return "", err
}
}
input = outputPrefix + ".jsonl"
} else if err != nil {
return "", err
}
return Cmd(python, "./finetune/json2binidx_tool/tools/preprocess_data.py", "--input", input, "--output-prefix", outputPrefix, "--vocab", vocab,
"--tokenizer-type", tokenizerType, "--dataset-impl", "mmap", "--append-eod")
}
func (a *App) MergeLora(python string, useGpu bool, loraAlpha int, baseModel string, loraPath string, outputPath string) (string, error) {
var err error
if python == "" {
python, err = GetPython()
}
if err != nil {
return "", err
}
args := []string{python, "./finetune/lora/merge_lora.py"}
if useGpu {
args = append(args, "--use-gpu")
}
args = append(args, strconv.Itoa(loraAlpha), baseModel, loraPath, outputPath)
return Cmd(args...)
}
func (a *App) DepCheck(python string) error {
var err error
if python == "" {
python, err = GetPython()
}
if err != nil {
return err
}
out, err := exec.Command(python, "./backend-python/dep_check.py").CombinedOutput()
out, err := exec.Command(python, a.exDir+"./backend-python/dep_check.py").CombinedOutput()
if err != nil {
return errors.New("DepCheck Error: " + string(out))
}
return nil
}
func (a *App) InstallPyDep(cnMirror bool) (string, error) {
python, err := GetPython()
func (a *App) InstallPyDep(python string, cnMirror bool) (string, error) {
var err error
if python == "" {
python, err = GetPython()
if runtime.GOOS == "windows" {
python = `"%CD%/` + python + `"`
}
}
if err != nil {
return "", err
}
if runtime.GOOS == "windows" {
ChangeFileLine("./py310/python310._pth", 3, "Lib\\site-packages")
installScript := python + " ./backend-python/get-pip.py -i https://pypi.tuna.tsinghua.edu.cn/simple --no-warn-script-location\n" +
python + " -m pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cu117 --no-warn-script-location\n" +
python + " -m pip install -r ./backend-python/requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple --no-warn-script-location\n" +
"exit"
if !cnMirror {
installScript = strings.Replace(installScript, " -i https://pypi.tuna.tsinghua.edu.cn/simple", "", -1)
}
err = os.WriteFile("./install-py-dep.bat", []byte(installScript), 0644)
if err != nil {
return "", err
}
return Cmd("install-py-dep.bat")
}
if cnMirror {
_, err = Cmd(python, "./backend-python/get-pip.py", "-i", "https://pypi.tuna.tsinghua.edu.cn/simple")
return Cmd(python, "-m", "pip", "install", "-r", "./backend-python/requirements_without_cyac.txt", "-i", "https://pypi.tuna.tsinghua.edu.cn/simple")
} else {
_, err = Cmd(python, "./backend-python/get-pip.py")
}
if err != nil {
return "", err
}
ChangeFileLine("./py310/python310._pth", 3, "Lib\\site-packages")
_, err = Cmd(python, "-m", "pip", "install", "torch==1.13.1", "torchvision==0.14.1", "torchaudio==0.13.1", "--index-url", "https://download.pytorch.org/whl/cu117")
if err != nil {
return "", err
}
if cnMirror {
return Cmd(python, "-m", "pip", "install", "-r", "./backend-python/requirements.txt", "-i", "https://pypi.tuna.tsinghua.edu.cn/simple")
} else {
return Cmd(python, "-m", "pip", "install", "-r", "./backend-python/requirements_versions.txt")
return Cmd(python, "-m", "pip", "install", "-r", "./backend-python/requirements_without_cyac.txt")
}
}
func (a *App) GetPyError() string {
content, err := os.ReadFile("./error.txt")
if err != nil {
return ""
}
return string(content)
}

View File

@@ -3,32 +3,106 @@ package backend_golang
import (
"archive/zip"
"bufio"
"embed"
"errors"
"fmt"
"io"
"io/fs"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
)
func Cmd(args ...string) (string, error) {
_, err := os.Stat("cmd-helper.bat")
if err != nil {
switch platform := runtime.GOOS; platform {
case "windows":
if err := os.WriteFile("./cmd-helper.bat", []byte("start %*"), 0644); err != nil {
return "", err
}
cmdHelper, err := filepath.Abs("./cmd-helper")
if err != nil {
return "", err
}
if strings.Contains(cmdHelper, " ") {
for _, arg := range args {
if strings.Contains(arg, " ") {
return "", errors.New("path contains space") // golang bug https://github.com/golang/go/issues/17149#issuecomment-473976818
}
}
}
cmd := exec.Command(cmdHelper, args...)
out, err := cmd.CombinedOutput()
if err != nil {
return "", err
}
return string(out), nil
case "darwin":
ex, err := os.Executable()
if err != nil {
return "", err
}
exDir := filepath.Dir(ex) + "/../../../"
cmd := exec.Command("osascript", "-e", `tell application "Terminal" to do script "`+"cd "+exDir+" && "+strings.Join(args, " ")+`"`)
err = cmd.Start()
if err != nil {
return "", err
}
cmd.Wait()
return "", nil
case "linux":
cmd := exec.Command(args[0], args[1:]...)
err := cmd.Start()
if err != nil {
return "", err
}
cmd.Wait()
return "", nil
}
cmdHelper, err := filepath.Abs("./cmd-helper")
if err != nil {
return "", err
return "", errors.New("unsupported OS")
}
func CopyEmbed(efs embed.FS) error {
prefix := ""
if runtime.GOOS == "darwin" {
ex, err := os.Executable()
if err != nil {
return err
}
prefix = filepath.Dir(ex) + "/../../../"
}
cmd := exec.Command(cmdHelper, args...)
out, err := cmd.CombinedOutput()
if err != nil {
return "", err
}
return string(out), nil
err := fs.WalkDir(efs, ".", func(path string, d fs.DirEntry, err error) error {
if d.IsDir() {
return nil
}
if err != nil {
return err
}
content, err := efs.ReadFile(path)
if err != nil {
return err
}
path = prefix + path
err = os.MkdirAll(path[:strings.LastIndex(path, "/")], 0755)
if err != nil {
return err
}
err = os.WriteFile(path, content, 0644)
if err != nil {
return err
}
return nil
})
return err
}
func GetPython() (string, error) {
@@ -134,3 +208,12 @@ func Unzip(source, destination string) error {
}
return nil
}
func (a *App) IsPortAvailable(port int) bool {
l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%s", strconv.Itoa(port)))
if err != nil {
return false
}
defer l.Close()
return true
}

View File

@@ -0,0 +1,31 @@
//go:build darwin || linux
package backend_golang
import (
"errors"
)
func (a *App) WslStart() error {
return errors.New("wsl not supported")
}
func (a *App) WslCommand(command string) error {
return errors.New("wsl not supported")
}
func (a *App) WslStop() error {
return errors.New("wsl not supported")
}
func (a *App) WslIsEnabled() error {
return errors.New("wsl not supported")
}
func (a *App) WslEnable(forceMode bool) error {
return errors.New("wsl not supported")
}
func (a *App) WslInstallUbuntu() error {
return errors.New("wsl not supported")
}

View File

@@ -0,0 +1,181 @@
//go:build windows
package backend_golang
import (
"bufio"
"context"
"errors"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
su "github.com/nyaosorg/go-windows-su"
wsl "github.com/ubuntu/gowsl"
wruntime "github.com/wailsapp/wails/v2/pkg/runtime"
)
var distro *wsl.Distro
var stdin io.WriteCloser
var cmd *exec.Cmd
func isWslRunning() (bool, error) {
if distro == nil {
return false, nil
}
state, err := distro.State()
if err != nil {
return false, err
}
if state != wsl.Running {
distro = nil
return false, nil
}
return true, nil
}
func (a *App) WslStart() error {
running, err := isWslRunning()
if err != nil {
return err
}
if running {
return nil
}
distros, err := wsl.RegisteredDistros(context.Background())
if err != nil {
return err
}
for _, d := range distros {
if strings.Contains(d.Name(), "Ubuntu") {
distro = &d
break
}
}
if distro == nil {
return errors.New("ubuntu not found")
}
cmd = exec.Command("wsl", "-d", distro.Name(), "-u", "root")
stdin, err = cmd.StdinPipe()
if err != nil {
return err
}
stdout, err := cmd.StdoutPipe()
cmd.Stderr = cmd.Stdout
if err != nil {
// stdin.Close()
stdin = nil
return err
}
go func() {
reader := bufio.NewReader(stdout)
for {
if stdin == nil {
break
}
line, _, err := reader.ReadLine()
if err != nil {
wruntime.EventsEmit(a.ctx, "wslerr", err.Error())
break
}
wruntime.EventsEmit(a.ctx, "wsl", string(line))
}
// stdout.Close()
}()
if err := cmd.Start(); err != nil {
return err
}
return nil
}
func (a *App) WslCommand(command string) error {
running, err := isWslRunning()
if err != nil {
return err
}
if !running {
return errors.New("wsl not running")
}
_, err = stdin.Write([]byte(command + "\n"))
if err != nil {
return err
}
return nil
}
func (a *App) WslStop() error {
running, err := isWslRunning()
if err != nil {
return err
}
if !running {
return errors.New("wsl not running")
}
if cmd != nil {
err = cmd.Process.Kill()
cmd = nil
}
// stdin.Close()
stdin = nil
distro = nil
if err != nil {
return err
}
return nil
}
func (a *App) WslIsEnabled() error {
ex, err := os.Executable()
if err != nil {
return err
}
exDir := filepath.Dir(ex)
data, err := os.ReadFile(exDir + "/wsl.state")
if err == nil {
if strings.Contains(string(data), "Enabled") {
return nil
}
}
cmd := `-Command (Get-WindowsOptionalFeature -Online -FeatureName Microsoft-Windows-Subsystem-Linux).State | Out-File -Encoding utf8 -FilePath ` + exDir + "/wsl.state"
_, err = su.ShellExecute(su.RUNAS, "powershell", cmd, exDir)
if err != nil {
return err
}
time.Sleep(2 * time.Second)
data, err = os.ReadFile(exDir + "/wsl.state")
if err != nil {
return err
}
if strings.Contains(string(data), "Enabled") {
return nil
} else {
return errors.New("wsl is not enabled")
}
}
func (a *App) WslEnable(forceMode bool) error {
cmd := `/online /enable-feature /featurename:Microsoft-Windows-Subsystem-Linux`
_, err := su.ShellExecute(su.RUNAS, "dism", cmd, `C:\`)
if err != nil {
return err
}
if forceMode {
os.WriteFile("./wsl.state", []byte("Enabled"), 0644)
}
return nil
}
func (a *App) WslInstallUbuntu() error {
_, err := Cmd("ms-windows-store://pdp/?ProductId=9PN20MSR04DW")
return err
}

View File

@@ -219,13 +219,18 @@ def get_args():
return p.parse_args()
args = get_args()
if not args.quiet:
print(f"** {args}")
try:
args = get_args()
if not args.quiet:
print(f"** {args}")
RWKV(
getattr(args, "in"),
args.strategy,
verbose=not args.quiet,
convert_and_save_and_exit=args.out,
)
RWKV(
getattr(args, "in"),
args.strategy,
verbose=not args.quiet,
convert_and_save_and_exit=args.out,
)
except Exception as e:
print(e)
with open("error.txt", "w") as f:
f.write(str(e))

82
backend-python/convert_safetensors.py vendored Normal file
View File

@@ -0,0 +1,82 @@
import json
import os
import sys
import copy
import torch
from safetensors.torch import load_file, save_file
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, help="Path to input pth model")
parser.add_argument(
"--output",
type=str,
default="./converted.st",
help="Path to output safetensors model",
)
args = parser.parse_args()
def rename_key(rename, name):
for k, v in rename.items():
if k in name:
name = name.replace(k, v)
return name
def convert_file(pt_filename: str, sf_filename: str, rename={}, transpose_names=[]):
loaded = torch.load(pt_filename, map_location="cpu")
if "state_dict" in loaded:
loaded = loaded["state_dict"]
loaded = {k: v.clone().half() for k, v in loaded.items()}
# for k, v in loaded.items():
# print(f'{k}\t{v.shape}\t{v.dtype}')
loaded = {rename_key(rename, k).lower(): v.contiguous() for k, v in loaded.items()}
# For tensors to be contiguous
for k, v in loaded.items():
for transpose_name in transpose_names:
if transpose_name in k:
loaded[k] = v.transpose(0, 1)
loaded = {k: v.clone().half().contiguous() for k, v in loaded.items()}
for k, v in loaded.items():
print(f"{k}\t{v.shape}\t{v.dtype}")
dirname = os.path.dirname(sf_filename)
os.makedirs(dirname, exist_ok=True)
save_file(loaded, sf_filename, metadata={"format": "pt"})
reloaded = load_file(sf_filename)
for k in loaded:
pt_tensor = loaded[k]
sf_tensor = reloaded[k]
if not torch.equal(pt_tensor, sf_tensor):
raise RuntimeError(f"The output tensors do not match for key {k}")
if __name__ == "__main__":
try:
convert_file(
args.input,
args.output,
rename={
"time_faaaa": "time_first",
"time_maa": "time_mix",
"lora_A": "lora.0",
"lora_B": "lora.1",
},
transpose_names=[
"time_mix_w1",
"time_mix_w2",
"time_decay_w1",
"time_decay_w2",
],
)
print(f"Saved to {args.output}")
except Exception as e:
print(e)
with open("error.txt", "w") as f:
f.write(str(e))

View File

@@ -1,7 +1,19 @@
import multipart
import fitz
import safetensors
import midi2audio
import mido
import lm_dataformat
import ftfy
import tqdm
import tiktoken
import GPUtil
import torch
import rwkv
import langchain
import numpy
import tokenizers
import fastapi
import uvicorn
import sse_starlette

32321
backend-python/get-pip.py vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,10 @@
from enum import Enum, auto
Args = "args"
Model = "model"
Model_Status = "model_status"
Model_Config = "model_config"
Deploy_Mode = "deploy_mode"
class ModelStatus(Enum):
@@ -15,6 +17,7 @@ def init():
global GLOBALS
GLOBALS = {}
set(Model_Status, ModelStatus.Offline)
set(Deploy_Mode, False)
def set(key, value):

View File

@@ -1,21 +1,72 @@
import time
start_time = time.time()
import argparse
from typing import Union, Sequence
def get_args(args: Union[Sequence[str], None] = None):
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title="server arguments")
group.add_argument(
"--port",
type=int,
default=8000,
help="port to run the server on (default: 8000)",
)
group.add_argument(
"--host",
type=str,
default="127.0.0.1",
help="host to run the server on (default: 127.0.0.1)",
)
group = parser.add_argument_group(title="mode arguments")
group.add_argument(
"--webui",
action="store_true",
help="whether to enable WebUI (default: False)",
)
group.add_argument(
"--rwkv-beta",
action="store_true",
help="whether to use rwkv-beta (default: False)",
)
args = parser.parse_args(args)
return args
if __name__ == "__main__":
args = get_args()
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import psutil
from fastapi import FastAPI
from contextlib import asynccontextmanager
from fastapi import Depends, FastAPI, status
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from utils.rwkv import *
from utils.torch import *
from utils.ngrok import *
from routes import completion, config
from utils.log import log_middleware
from routes import completion, config, state_cache, midi, misc, file_process
import global_var
app = FastAPI()
@asynccontextmanager
async def lifespan(app: FastAPI):
init()
yield
app = FastAPI(lifespan=lifespan, dependencies=[Depends(log_middleware)])
app.add_middleware(
CORSMiddleware,
@@ -27,25 +78,17 @@ app.add_middleware(
app.include_router(completion.router)
app.include_router(config.router)
app.include_router(midi.router)
app.include_router(file_process.router)
app.include_router(misc.router)
app.include_router(state_cache.router)
@app.on_event("startup")
def init():
global_var.init()
set_torch()
if os.environ.get("ngrok_token") is not None:
ngrok_connect()
@app.get("/")
def read_root():
return {"Hello": "World!", "pid": os.getpid()}
@app.post("/exit")
@app.post("/exit", tags=["Root"])
def exit():
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
parent_pid = os.getpid()
parent = psutil.Process(parent_pid)
for child in parent.children(recursive=True):
@@ -53,20 +96,39 @@ def exit():
parent.kill()
def debug():
model = RWKV(
model="../models/RWKV-4-Raven-7B-v11-Eng49%-Chn49%-Jpn1%-Other1%-20230430-ctx8192.pth",
strategy="cuda fp16",
tokens_path="20B_tokenizer.json",
try:
if (
"RWKV_RUNNER_PARAMS" in os.environ
and "--webui" in os.environ["RWKV_RUNNER_PARAMS"].split(" ")
) or args.webui:
from webui_server import webui_server
app.mount("/", webui_server)
except NameError:
pass
@app.get("/", tags=["Root"])
def read_root():
return {"Hello": "World!"}
def init():
global_var.init()
cmd_params = os.environ["RWKV_RUNNER_PARAMS"]
global_var.set(
global_var.Args, get_args(cmd_params.split(" ") if cmd_params else None)
)
d = model.tokenizer.decode([])
print(d)
state_cache.init()
set_torch()
if os.environ.get("ngrok_token") is not None:
ngrok_connect()
if __name__ == "__main__":
uvicorn.run(
"main:app",
port=8000 if len(sys.argv) < 2 else int(sys.argv[1]),
host="127.0.0.1" if len(sys.argv) < 3 else sys.argv[2],
)
# debug()
os.environ["RWKV_RUNNER_PARAMS"] = " ".join(sys.argv[1:])
print("--- %s seconds ---" % (time.time() - start_time))
uvicorn.run("main:app", port=args.port, host=args.host, workers=1)

Binary file not shown.

View File

@@ -0,0 +1,23 @@
torch
torchvision
torchaudio
rwkv==0.8.22
langchain==0.0.322
fastapi==0.104.0
uvicorn==0.23.2
sse-starlette==1.6.5
pydantic==2.4.2
psutil==5.9.6
gputil==1.4.0
tiktoken==0.5.1
ftfy==6.1.1
lm-dataformat==0.0.20
numpy==1.24.4
tokenizers==0.14.1
tqdm==4.66.1
midi2audio==0.1.1
mido==1.3.0
safetensors==0.4.0
PyMuPDF==1.23.5
python-multipart==0.0.6
Cython==3.0.4

View File

@@ -1,209 +1,174 @@
import asyncio
import json
from threading import Lock
from typing import List
from typing import List, Union
from enum import Enum
import base64
from fastapi import APIRouter, Request, status, HTTPException
from sse_starlette.sse import EventSourceResponse
from pydantic import BaseModel
from pydantic import BaseModel, Field
import numpy as np
import tiktoken
from utils.rwkv import *
from utils.log import quick_log
import global_var
router = APIRouter()
interface = ":"
user = "Bob"
bot = "Alice"
class Role(Enum):
User = "user"
Assistant = "assistant"
System = "system"
class Message(BaseModel):
role: str
content: str
role: Role
content: str = Field(min_length=0)
raw: bool = Field(False, description="Whether to treat content as raw text")
default_stop = [
"\n\nUser",
"\n\nQuestion",
"\n\nQ",
"\n\nHuman",
"\n\nBob",
"\n\nAssistant",
"\n\nAnswer",
"\n\nA",
"\n\nBot",
"\n\nAlice",
]
class ChatCompletionBody(ModelConfigBody):
messages: List[Message]
model: str = "rwkv"
messages: Union[List[Message], None]
model: Union[str, None] = "rwkv"
stream: bool = False
stop: str = None
stop: Union[str, List[str], None] = default_stop
user_name: Union[str, None] = Field(
None, description="Internal user name", min_length=1
)
assistant_name: Union[str, None] = Field(
None, description="Internal assistant name", min_length=1
)
presystem: bool = Field(
True, description="Whether to insert default system prompt at the beginning"
)
model_config = {
"json_schema_extra": {
"example": {
"messages": [
{"role": Role.User.value, "content": "hello", "raw": False}
],
"model": "rwkv",
"stream": False,
"stop": None,
"user_name": None,
"assistant_name": None,
"presystem": True,
"max_tokens": 1000,
"temperature": 1.2,
"top_p": 0.5,
"presence_penalty": 0.4,
"frequency_penalty": 0.4,
}
}
}
class CompletionBody(ModelConfigBody):
prompt: Union[str, List[str], None]
model: Union[str, None] = "rwkv"
stream: bool = False
stop: Union[str, List[str], None] = None
model_config = {
"json_schema_extra": {
"example": {
"prompt": "The following is an epic science fiction masterpiece that is immortalized, "
+ "with delicate descriptions and grand depictions of interstellar civilization wars.\nChapter 1.\n",
"model": "rwkv",
"stream": False,
"stop": None,
"max_tokens": 100,
"temperature": 1.2,
"top_p": 0.5,
"presence_penalty": 0.4,
"frequency_penalty": 0.4,
}
}
}
completion_lock = Lock()
requests_num = 0
@router.post("/v1/chat/completions")
@router.post("/chat/completions")
async def chat_completions(body: ChatCompletionBody, request: Request):
model: RWKV = global_var.get(global_var.Model)
if model is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "model not loaded")
question = body.messages[-1]
if question.role == "user":
question = question.content
async def eval_rwkv(
model: AbstractRWKV,
request: Request,
body: ModelConfigBody,
prompt: str,
stream: bool,
stop: Union[str, List[str], None],
chat_mode: bool,
):
global requests_num
requests_num = requests_num + 1
quick_log(request, None, "Start Waiting. RequestsNum: " + str(requests_num))
while completion_lock.locked():
if await request.is_disconnected():
requests_num = requests_num - 1
print(f"{request.client} Stop Waiting (Lock)")
quick_log(
request,
None,
"Stop Waiting (Lock). RequestsNum: " + str(requests_num),
)
return
await asyncio.sleep(0.1)
else:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "no question found")
completion_text = f"""
The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. \
{bot} is very intelligent, creative and friendly. \
{bot} is unlikely to disagree with {user}, and {bot} doesn't like to ask {user} questions. \
{bot} likes to tell {user} a lot about herself and her opinions. \
{bot} usually gives {user} kind, helpful and informative advices.\n
"""
for message in body.messages:
if message.role == "system":
completion_text = (
f"The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. "
+ message.content.replace("\\n", "\n")
.replace("\r\n", "\n")
.replace("\n\n", "\n")
.replace("\n", " ")
.strip()
.replace("You are", f"{bot} is")
.replace("you are", f"{bot} is")
.replace("You're", f"{bot} is")
.replace("you're", f"{bot} is")
.replace("You", f"{bot}")
.replace("you", f"{bot}")
.replace("Your", f"{bot}'s")
.replace("your", f"{bot}'s")
.replace("", f"{bot}")
+ "\n\n"
)
elif message.role == "user":
completion_text += (
f"{user}{interface} "
+ message.content.replace("\\n", "\n")
.replace("\r\n", "\n")
.replace("\n\n", "\n")
.strip()
+ "\n\n"
)
elif message.role == "assistant":
completion_text += (
f"{bot}{interface} "
+ message.content.replace("\\n", "\n")
.replace("\r\n", "\n")
.replace("\n\n", "\n")
.strip()
+ "\n\n"
)
completion_text += f"{bot}{interface}"
async def eval_rwkv():
while completion_lock.locked():
await asyncio.sleep(0.1)
else:
completion_lock.acquire()
with completion_lock:
if await request.is_disconnected():
requests_num = requests_num - 1
print(f"{request.client} Stop Waiting (Lock)")
quick_log(
request,
None,
"Stop Waiting (Lock). RequestsNum: " + str(requests_num),
)
return
set_rwkv_config(model, global_var.get(global_var.Model_Config))
set_rwkv_config(model, body)
if body.stream:
for response, delta in rwkv_generate(
model,
completion_text,
stop=f"\n\n{user}" if body.stop is None else body.stop,
):
if await request.is_disconnected():
break
response, prompt_tokens, completion_tokens = "", 0, 0
for response, delta, prompt_tokens, completion_tokens in model.generate(
prompt,
stop=stop,
):
if await request.is_disconnected():
break
if stream:
yield json.dumps(
{
"response": response,
"model": "rwkv",
"object": "chat.completion.chunk"
if chat_mode
else "text_completion",
# "response": response,
"model": model.name,
"choices": [
{
"delta": {"content": delta},
"index": 0,
"finish_reason": None,
}
],
}
)
# torch_gc()
completion_lock.release()
if await request.is_disconnected():
return
yield json.dumps(
{
"response": response,
"model": "rwkv",
"choices": [
{
"delta": {},
"index": 0,
"finish_reason": "stop",
}
],
}
)
yield "[DONE]"
else:
response = None
for response, delta in rwkv_generate(
model,
completion_text,
stop=f"\n\n{user}" if body.stop is None else body.stop,
):
if await request.is_disconnected():
break
# torch_gc()
completion_lock.release()
if await request.is_disconnected():
return
yield {
"response": response,
"model": "rwkv",
"choices": [
{
"message": {
"role": "assistant",
"content": response,
},
"index": 0,
"finish_reason": "stop",
}
],
}
if body.stream:
return EventSourceResponse(eval_rwkv())
else:
return await eval_rwkv().__anext__()
class CompletionBody(ModelConfigBody):
prompt: str
model: str = "rwkv"
stream: bool = False
stop: str = None
@router.post("/v1/completions")
@router.post("/completions")
async def completions(body: CompletionBody, request: Request):
model: RWKV = global_var.get(global_var.Model)
if model is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "model not loaded")
async def eval_rwkv():
while completion_lock.locked():
await asyncio.sleep(0.1)
else:
completion_lock.acquire()
set_rwkv_config(model, global_var.get(global_var.Model_Config))
set_rwkv_config(model, body)
if body.stream:
for response, delta in rwkv_generate(
model, body.prompt, stop=body.stop
):
if await request.is_disconnected():
break
yield json.dumps(
{
"response": response,
"model": "rwkv",
"choices": [
{
if chat_mode
else {
"text": delta,
"index": 0,
"finish_reason": None,
@@ -211,16 +176,37 @@ async def completions(body: CompletionBody, request: Request):
],
}
)
# torch_gc()
completion_lock.release()
if await request.is_disconnected():
return
# torch_gc()
requests_num = requests_num - 1
if await request.is_disconnected():
print(f"{request.client} Stop Waiting")
quick_log(
request,
body,
response + "\nStop Waiting. RequestsNum: " + str(requests_num),
)
return
quick_log(
request,
body,
response + "\nFinished. RequestsNum: " + str(requests_num),
)
if stream:
yield json.dumps(
{
"response": response,
"model": "rwkv",
"object": "chat.completion.chunk"
if chat_mode
else "text_completion",
# "response": response,
"model": model.name,
"choices": [
{
"delta": {},
"index": 0,
"finish_reason": "stop",
}
if chat_mode
else {
"text": "",
"index": 0,
"finish_reason": "stop",
@@ -230,21 +216,26 @@ async def completions(body: CompletionBody, request: Request):
)
yield "[DONE]"
else:
response = None
for response, delta in rwkv_generate(
model, body.prompt, stop=body.stop
):
if await request.is_disconnected():
break
# torch_gc()
completion_lock.release()
if await request.is_disconnected():
return
yield {
"response": response,
"model": "rwkv",
"object": "chat.completion" if chat_mode else "text_completion",
# "response": response,
"model": model.name,
"usage": {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens,
},
"choices": [
{
"message": {
"role": Role.Assistant.value,
"content": response,
},
"index": 0,
"finish_reason": "stop",
}
if chat_mode
else {
"text": response,
"index": 0,
"finish_reason": "stop",
@@ -252,7 +243,271 @@ async def completions(body: CompletionBody, request: Request):
],
}
@router.post("/v1/chat/completions", tags=["Completions"])
@router.post("/chat/completions", tags=["Completions"])
async def chat_completions(body: ChatCompletionBody, request: Request):
model: TextRWKV = global_var.get(global_var.Model)
if model is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "model not loaded")
if body.messages is None or body.messages == []:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "messages not found")
interface = model.interface
user = model.user if body.user_name is None else body.user_name
bot = model.bot if body.assistant_name is None else body.assistant_name
is_raven = model.rwkv_type == RWKVType.Raven
completion_text: str = ""
basic_system: Union[str, None] = None
if body.presystem:
if body.messages[0].role == Role.System:
basic_system = body.messages[0].content
if basic_system is None:
completion_text = (
f"""
The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. \
{bot} is very intelligent, creative and friendly. \
{bot} is unlikely to disagree with {user}, and {bot} doesn't like to ask {user} questions. \
{bot} likes to tell {user} a lot about herself and her opinions. \
{bot} usually gives {user} kind, helpful and informative advices.\n
"""
if is_raven
else (
f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
)
else:
if not body.messages[0].raw:
basic_system = (
basic_system.replace("\r\n", "\n")
.replace("\r", "\n")
.replace("\n\n", "\n")
.replace("\n", " ")
.strip()
)
completion_text = (
(
f"The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. "
if is_raven
else f"{user}{interface} hi\n\n{bot}{interface} Hi. "
)
+ basic_system.replace("You are", f"{bot} is" if is_raven else "I am")
.replace("you are", f"{bot} is" if is_raven else "I am")
.replace("You're", f"{bot} is" if is_raven else "I'm")
.replace("you're", f"{bot} is" if is_raven else "I'm")
.replace("You", f"{bot}" if is_raven else "I")
.replace("you", f"{bot}" if is_raven else "I")
.replace("Your", f"{bot}'s" if is_raven else "My")
.replace("your", f"{bot}'s" if is_raven else "my")
.replace("", f"{bot}" if is_raven else "")
+ "\n\n"
)
for message in body.messages[(0 if basic_system is None else 1) :]:
append_message: str = ""
if message.role == Role.User:
append_message = f"{user}{interface} " + message.content
elif message.role == Role.Assistant:
append_message = f"{bot}{interface} " + message.content
elif message.role == Role.System:
append_message = message.content
if not message.raw:
append_message = (
append_message.replace("\r\n", "\n")
.replace("\r", "\n")
.replace("\n\n", "\n")
.strip()
)
completion_text += append_message + "\n\n"
completion_text += f"{bot}{interface}"
user_code = model.pipeline.decode([model.pipeline.encode(user)[0]])
bot_code = model.pipeline.decode([model.pipeline.encode(bot)[0]])
if type(body.stop) == str:
body.stop = [body.stop, f"\n\n{user_code}", f"\n\n{bot_code}"]
elif type(body.stop) == list:
body.stop.append(f"\n\n{user_code}")
body.stop.append(f"\n\n{bot_code}")
elif body.stop is None:
body.stop = default_stop
if body.stream:
return EventSourceResponse(eval_rwkv())
return EventSourceResponse(
eval_rwkv(
model, request, body, completion_text, body.stream, body.stop, True
)
)
else:
return await eval_rwkv().__anext__()
try:
return await eval_rwkv(
model, request, body, completion_text, body.stream, body.stop, True
).__anext__()
except StopAsyncIteration:
return None
@router.post("/v1/completions", tags=["Completions"])
@router.post("/completions", tags=["Completions"])
async def completions(body: CompletionBody, request: Request):
model: AbstractRWKV = global_var.get(global_var.Model)
if model is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "model not loaded")
if body.prompt is None or body.prompt == "" or body.prompt == []:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "prompt not found")
if type(body.prompt) == list:
body.prompt = body.prompt[0] # TODO: support multiple prompts
if body.stream:
return EventSourceResponse(
eval_rwkv(model, request, body, body.prompt, body.stream, body.stop, False)
)
else:
try:
return await eval_rwkv(
model, request, body, body.prompt, body.stream, body.stop, False
).__anext__()
except StopAsyncIteration:
return None
class EmbeddingsBody(BaseModel):
input: Union[str, List[str], List[List[int]], None]
model: Union[str, None] = "rwkv"
encoding_format: str = None
fast_mode: bool = False
model_config = {
"json_schema_extra": {
"example": {
"input": "a big apple",
"model": "rwkv",
"encoding_format": None,
"fast_mode": False,
}
}
}
def embedding_base64(embedding: List[float]) -> str:
return base64.b64encode(np.array(embedding).astype(np.float32)).decode("utf-8")
@router.post("/v1/embeddings", tags=["Embeddings"])
@router.post("/embeddings", tags=["Embeddings"])
@router.post("/v1/engines/text-embedding-ada-002/embeddings", tags=["Embeddings"])
@router.post("/engines/text-embedding-ada-002/embeddings", tags=["Embeddings"])
async def embeddings(body: EmbeddingsBody, request: Request):
model: AbstractRWKV = global_var.get(global_var.Model)
if model is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "model not loaded")
if body.input is None or body.input == "" or body.input == [] or body.input == [[]]:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "input not found")
global requests_num
requests_num = requests_num + 1
quick_log(request, None, "Start Waiting. RequestsNum: " + str(requests_num))
while completion_lock.locked():
if await request.is_disconnected():
requests_num = requests_num - 1
print(f"{request.client} Stop Waiting (Lock)")
quick_log(
request,
None,
"Stop Waiting (Lock). RequestsNum: " + str(requests_num),
)
return
await asyncio.sleep(0.1)
else:
with completion_lock:
if await request.is_disconnected():
requests_num = requests_num - 1
print(f"{request.client} Stop Waiting (Lock)")
quick_log(
request,
None,
"Stop Waiting (Lock). RequestsNum: " + str(requests_num),
)
return
base64_format = False
if body.encoding_format == "base64":
base64_format = True
embeddings = []
prompt_tokens = 0
if type(body.input) == list:
if type(body.input[0]) == list:
encoding = tiktoken.model.encoding_for_model(
"text-embedding-ada-002"
)
for i in range(len(body.input)):
if await request.is_disconnected():
break
input = encoding.decode(body.input[i])
embedding, token_len = model.get_embedding(
input, body.fast_mode
)
prompt_tokens = prompt_tokens + token_len
if base64_format:
embedding = embedding_base64(embedding)
embeddings.append(embedding)
else:
for i in range(len(body.input)):
if await request.is_disconnected():
break
embedding, token_len = model.get_embedding(
body.input[i], body.fast_mode
)
prompt_tokens = prompt_tokens + token_len
if base64_format:
embedding = embedding_base64(embedding)
embeddings.append(embedding)
else:
embedding, prompt_tokens = model.get_embedding(
body.input, body.fast_mode
)
if base64_format:
embedding = embedding_base64(embedding)
embeddings.append(embedding)
requests_num = requests_num - 1
if await request.is_disconnected():
print(f"{request.client} Stop Waiting")
quick_log(
request,
None,
"Stop Waiting. RequestsNum: " + str(requests_num),
)
return
quick_log(
request,
None,
"Finished. RequestsNum: " + str(requests_num),
)
ret_data = [
{
"object": "embedding",
"index": i,
"embedding": embedding,
}
for i, embedding in enumerate(embeddings)
]
return {
"object": "list",
"data": ret_data,
"model": model.name,
"usage": {
"prompt_tokens": prompt_tokens,
"total_tokens": prompt_tokens,
},
}

View File

@@ -1,12 +1,11 @@
import pathlib
from utils.log import quick_log
from fastapi import APIRouter, HTTPException, Response, status
from fastapi import APIRouter, HTTPException, Request, Response, status as Status
from pydantic import BaseModel
from langchain.llms import RWKV
from utils.rwkv import *
from utils.torch import *
import global_var
import GPUtil
router = APIRouter()
@@ -14,36 +13,81 @@ router = APIRouter()
class SwitchModelBody(BaseModel):
model: str
strategy: str
tokenizer: Union[str, None] = None
customCuda: bool = False
deploy: bool = Field(
False,
description="Deploy mode. If success, will disable /switch-model, /exit and other dangerous APIs (state cache APIs, part of midi APIs)",
)
model_config = {
"json_schema_extra": {
"example": {
"model": "models/RWKV-4-World-3B-v1-20230619-ctx4096.pth",
"strategy": "cuda fp16",
"tokenizer": "",
"customCuda": False,
"deploy": False,
}
}
}
@router.post("/switch-model")
def switch_model(body: SwitchModelBody, response: Response):
@router.post("/switch-model", tags=["Configs"])
def switch_model(body: SwitchModelBody, response: Response, request: Request):
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(Status.HTTP_403_FORBIDDEN)
if global_var.get(global_var.Model_Status) is global_var.ModelStatus.Loading:
response.status_code = status.HTTP_304_NOT_MODIFIED
response.status_code = Status.HTTP_304_NOT_MODIFIED
return
global_var.set(global_var.Model_Status, global_var.ModelStatus.Offline)
global_var.set(global_var.Model, None)
torch_gc()
if body.model == "":
return "success"
STRATEGY_REGEX = r"^(?:(?:^|->) *(?:cuda(?::[\d]+)?|cpu|mps|dml) (?:fp(?:16|32)|bf16)(?:i8|i4|i3)?(?: \*[\d]+\+?)? *)+$"
if not re.match(STRATEGY_REGEX, body.strategy):
raise HTTPException(
Status.HTTP_400_BAD_REQUEST,
"Invalid strategy. Please read https://pypi.org/project/rwkv/",
)
devices = set(
[
x.strip().split(" ")[0].replace("cuda:0", "cuda")
for x in body.strategy.split("->")
]
)
print(f"Devices: {devices}")
# if len(devices) > 1:
# state_cache.disable_state_cache()
# else:
try:
state_cache.enable_state_cache()
except HTTPException:
pass
os.environ["RWKV_CUDA_ON"] = "1" if body.customCuda else "0"
global_var.set(global_var.Model_Status, global_var.ModelStatus.Loading)
try:
global_var.set(
global_var.Model,
RWKV(
model=body.model,
strategy=body.strategy,
tokens_path=f"{pathlib.Path(__file__).parent.parent.resolve()}/20B_tokenizer.json",
),
RWKV(model=body.model, strategy=body.strategy, tokenizer=body.tokenizer),
)
except Exception as e:
print(e)
quick_log(request, body, f"Exception: {e}")
global_var.set(global_var.Model_Status, global_var.ModelStatus.Offline)
raise HTTPException(status.HTTP_500_INTERNAL_SERVER_ERROR, "failed to load")
raise HTTPException(
Status.HTTP_500_INTERNAL_SERVER_ERROR, f"failed to load: {e}"
)
if body.deploy:
global_var.set(global_var.Deploy_Mode, True)
if global_var.get(global_var.Model_Config) is None:
global_var.set(
global_var.Model_Config, get_rwkv_config(global_var.get(global_var.Model))
@@ -53,7 +97,7 @@ def switch_model(body: SwitchModelBody, response: Response):
return "success"
@router.post("/update-config")
@router.post("/update-config", tags=["Configs"])
def update_config(body: ModelConfigBody):
"""
Will not update the model config immediately, but set it when completion called to avoid modifications during generation
@@ -65,8 +109,10 @@ def update_config(body: ModelConfigBody):
return "success"
@router.get("/status")
@router.get("/status", tags=["Configs"])
def status():
import GPUtil
gpus = GPUtil.getGPUs()
if len(gpus) == 0:
device_name = "CPU"

View File

@@ -0,0 +1,79 @@
import os
from fastapi import (
APIRouter,
HTTPException,
status,
Depends,
File,
UploadFile,
)
from pydantic import BaseModel
from typing import Iterator
router = APIRouter()
class FileToTextParams(BaseModel):
file_name: str
file_encoding: str = "utf-8"
@router.post("/file-to-text", tags=["File Process"])
async def file_to_text(
params: FileToTextParams = Depends(), file_data: UploadFile = File(...)
):
from langchain.schema import Document
from langchain.document_loaders.blob_loaders import Blob
# from langchain
def parse_text(blob: Blob) -> Iterator[Document]:
yield Document(page_content=blob.as_string(), metadata={"source": blob.source})
# from langchain
def parse_pdf(blob: Blob) -> Iterator[Document]:
import fitz
with blob.as_bytes_io() as stream:
doc = fitz.Document(stream=stream)
yield from [
Document(
page_content=page.get_text(),
metadata=dict(
{
"source": blob.source,
"file_path": blob.source,
"page": page.number,
"total_pages": len(doc),
},
**{
k: doc.metadata[k]
for k in doc.metadata
if type(doc.metadata[k]) in [str, int]
},
),
)
for page in doc
]
file_parsers = {".txt": parse_text, ".pdf": parse_pdf}
file_name = file_data.filename or params.file_name
file_ext = os.path.splitext(file_name)[-1]
if file_ext not in file_parsers:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "file type not supported")
try:
pages: Iterator[Document] = file_parsers[file_ext](
Blob.from_data(
await file_data.read(),
encoding=params.file_encoding,
path=file_name,
)
)
pages = list(pages)
except Exception as e:
raise HTTPException(status.HTTP_400_BAD_REQUEST, f"{e}")
return {"pages": pages}

View File

@@ -0,0 +1,155 @@
import io
import global_var
from fastapi import APIRouter, HTTPException, UploadFile, status
from starlette.responses import StreamingResponse
from pydantic import BaseModel
from utils.midi import *
from midi2audio import FluidSynth
router = APIRouter()
class TextToMidiBody(BaseModel):
text: str
model_config = {
"json_schema_extra": {
"example": {
"text": "p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:2d:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:2d:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:1f:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:26:a g:39:a g:39:a g:3e:a g:3e:a g:42:a g:42:a pi:39:a pi:3e:a pi:42:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0",
}
}
}
@router.post("/text-to-midi", tags=["MIDI"])
def text_to_midi(body: TextToMidiBody):
vocab_config = "backend-python/utils/midi_vocab_config.json"
cfg = VocabConfig.from_json(vocab_config)
mid = convert_str_to_midi(cfg, body.text.strip())
mid_data = io.BytesIO()
mid.save(None, mid_data)
mid_data.seek(0)
return StreamingResponse(mid_data, media_type="audio/midi")
@router.post("/midi-to-text", tags=["MIDI"])
async def midi_to_text(file_data: UploadFile):
vocab_config = "backend-python/utils/midi_vocab_config.json"
cfg = VocabConfig.from_json(vocab_config)
mid = mido.MidiFile(file=file_data.file)
text = convert_midi_to_str(cfg, mid)
return {"text": text}
class TxtToMidiBody(BaseModel):
txt_path: str
midi_path: str
model_config = {
"json_schema_extra": {
"example": {
"txt_path": "midi/sample.txt",
"midi_path": "midi/sample.mid",
}
}
}
@router.post("/txt-to-midi", tags=["MIDI"])
def txt_to_midi(body: TxtToMidiBody):
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
if not body.midi_path.startswith("midi/"):
raise HTTPException(status.HTTP_400_BAD_REQUEST, "bad output path")
vocab_config = "backend-python/utils/midi_vocab_config.json"
cfg = VocabConfig.from_json(vocab_config)
with open(body.txt_path, "r") as f:
text = f.read()
text = text.strip()
mid = convert_str_to_midi(cfg, text)
mid.save(body.midi_path)
return "success"
class MidiToWavBody(BaseModel):
midi_path: str
wav_path: str
sound_font_path: str = "assets/default_sound_font.sf2"
model_config = {
"json_schema_extra": {
"example": {
"midi_path": "midi/sample.mid",
"wav_path": "midi/sample.wav",
"sound_font_path": "assets/default_sound_font.sf2",
}
}
}
@router.post("/midi-to-wav", tags=["MIDI"])
def midi_to_wav(body: MidiToWavBody):
"""
Install fluidsynth first, see more: https://github.com/FluidSynth/fluidsynth/wiki/Download#distributions
"""
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
if not body.wav_path.startswith("midi/"):
raise HTTPException(status.HTTP_400_BAD_REQUEST, "bad output path")
fs = FluidSynth(body.sound_font_path)
fs.midi_to_audio(body.midi_path, body.wav_path)
return "success"
class TextToWavBody(BaseModel):
text: str
wav_name: str
sound_font_path: str = "assets/default_sound_font.sf2"
model_config = {
"json_schema_extra": {
"example": {
"text": "p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:2e:a p:3b:a p:45:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2e:0 p:3b:0 p:45:0 b:26:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:26:a g:3e:a g:3e:a g:42:a g:42:a g:45:a g:45:a pi:3e:a pi:42:a pi:45:a t14 p:2a:0 p:3b:0 p:45:0 b:26:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:2d:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:2d:0 g:3e:0 g:3e:0 g:42:0 g:42:0 g:45:0 g:45:0 pi:3e:0 pi:42:0 pi:45:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2a:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a g:39:a g:39:a g:3d:a g:3d:a g:40:a g:40:a pi:39:a pi:3d:a pi:40:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 t2 p:26:a p:2e:a p:31:a p:39:a p:3b:a p:45:a b:21:a t14 p:26:0 p:2e:0 p:31:0 p:39:0 p:3b:0 p:45:0 b:21:0 g:39:0 g:39:0 g:3d:0 g:3d:0 g:40:0 g:40:0 pi:39:0 pi:3d:0 pi:40:0 t2 p:24:a p:2a:a p:31:a p:39:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:24:0 p:2a:0 p:31:0 p:39:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:2e:a p:3b:a p:45:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2e:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:26:a p:2a:a p:3b:a p:45:a t14 p:26:0 p:2a:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a b:1f:a g:3b:a g:3b:a g:3e:a g:3e:a g:43:a g:43:a pi:3b:a pi:3e:a pi:43:a t14 p:2a:0 p:3b:0 p:45:0 b:1f:0 t2 p:24:a p:2a:a p:3b:a p:45:a b:1f:a t14 p:24:0 p:2a:0 p:3b:0 p:45:0 b:1f:0 g:3b:0 g:3b:0 g:3e:0 g:3e:0 g:43:0 g:43:0 pi:3b:0 pi:3e:0 pi:43:0 t2 p:24:a p:2e:a p:3b:a p:45:a b:26:a g:39:a g:39:a g:3e:a g:3e:a g:42:a g:42:a pi:39:a pi:3e:a pi:42:a t14 p:24:0 p:2e:0 p:3b:0 p:45:0 t2 p:2a:a p:3b:a p:45:a t14 p:2a:0 p:3b:0",
"wav_name": "sample",
"sound_font_path": "assets/default_sound_font.sf2",
}
}
}
@router.post("/text-to-wav", tags=["MIDI"])
def text_to_wav(body: TextToWavBody):
"""
Install fluidsynth first, see more: https://github.com/FluidSynth/fluidsynth/wiki/Download#distributions
"""
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
text = body.text.strip()
if not text.startswith("<start>"):
text = "<start> " + text
if not text.endswith("<end>"):
text = text + " <end>"
txt_path = f"midi/{body.wav_name}.txt"
midi_path = f"midi/{body.wav_name}.mid"
wav_path = f"midi/{body.wav_name}.wav"
with open(txt_path, "w") as f:
f.write(text)
txt_to_midi(TxtToMidiBody(txt_path=txt_path, midi_path=midi_path))
midi_to_wav(
MidiToWavBody(
midi_path=midi_path, wav_path=wav_path, sound_font_path=body.sound_font_path
)
)
return "success"

View File

@@ -0,0 +1,131 @@
from fastapi import APIRouter, HTTPException, status
from utils.rwkv import AbstractRWKV
import global_var
router = APIRouter()
@router.get("/dashboard/billing/credit_grants", tags=["MISC"])
def credit_grants():
return {
"object": "credit_summary",
"total_granted": 10000,
"total_used": 0,
"total_available": 10000,
"grants": {
"object": "list",
"data": [
{
"object": "credit_grant",
"grant_amount": 10000,
"used_amount": 0,
"effective_at": 1672531200,
"expires_at": 33229440000,
}
],
},
}
fake_models = [
{
"id": "gpt-3.5-turbo",
"object": "model",
"created": 1677610602,
"owned_by": "openai",
"permission": [
{
"id": "modelperm-zy5TOjnE2zVaicIcKO9bQDgX",
"object": "model_permission",
"created": 1690864883,
"allow_create_engine": False,
"allow_sampling": True,
"allow_logprobs": True,
"allow_search_indices": False,
"allow_view": True,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False,
}
],
"root": "gpt-3.5-turbo",
"parent": None,
},
{
"id": "text-davinci-003",
"object": "model",
"created": 1669599635,
"owned_by": "openai-internal",
"permission": [
{
"id": "modelperm-a6niqBmW2JaGmo0fDO7FEt1n",
"object": "model_permission",
"created": 1690930172,
"allow_create_engine": False,
"allow_sampling": True,
"allow_logprobs": True,
"allow_search_indices": False,
"allow_view": True,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False,
}
],
"root": "text-davinci-003",
"parent": None,
},
]
@router.get("/v1/models", tags=["MISC"])
@router.get("/models", tags=["MISC"])
def models():
model: AbstractRWKV = global_var.get(global_var.Model)
model_name = model.name if model else "rwkv"
return {
"object": "list",
"data": [
{
"id": model_name,
"object": "model",
"owned_by": "rwkv",
"root": model_name,
"parent": None,
},
*fake_models,
],
}
@router.get("/v1/models/{model_id}", tags=["MISC"])
@router.get("/models/{model_id}", tags=["MISC"])
def model(model_id: str):
for fake_model in fake_models:
if fake_model["id"] == model_id:
return fake_model
if "rwkv" in model_id.lower():
model: AbstractRWKV = global_var.get(global_var.Model)
model_name = model.name if model else "rwkv"
return {
"id": model_name,
"object": "model",
"owned_by": "rwkv",
"root": model_name,
"parent": None,
}
raise HTTPException(
status.HTTP_404_NOT_FOUND,
{
"error": {
"message": f"The model '{model_id}' does not exist",
"type": "invalid_request_error",
"param": "model",
"code": "model_not_found",
}
},
)

View File

@@ -0,0 +1,207 @@
from typing import Any, Dict, List, Union
from utils.log import quick_log
from fastapi import APIRouter, HTTPException, Request, Response, status
from pydantic import BaseModel
import gc
import copy
import global_var
router = APIRouter()
trie = None
dtrie: Dict = {}
max_trie_len = 300
loop_start_id = 1 # to prevent preloaded prompts from being deleted
loop_del_trie_id = loop_start_id
def init():
global trie
try:
import cyac
# import mmap
# import os
#
# if os.path.exists("state_cache.trie"):
# with open("state_cache.trie", "r") as bf:
# buff_object = mmap.mmap(bf.fileno(), 0, access=mmap.ACCESS_READ)
# trie = cyac.Trie.from_buff(buff_object, copy=False)
# else:
trie = cyac.Trie()
except ModuleNotFoundError:
print("cyac not found")
@router.post("/disable-state-cache", tags=["State Cache"])
def disable_state_cache():
global trie, dtrie
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
trie = None
dtrie = {}
gc.collect()
print("state cache disabled")
return "success"
@router.post("/enable-state-cache", tags=["State Cache"])
def enable_state_cache():
global trie, dtrie
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
try:
import cyac
trie = cyac.Trie()
dtrie = {}
gc.collect()
print("state cache enabled")
return "success"
except ModuleNotFoundError:
print("state cache disabled")
raise HTTPException(status.HTTP_400_BAD_REQUEST, "cyac not found")
class AddStateBody(BaseModel):
prompt: str
tokens: List[Union[str, int]]
state: Any
logits: Any
# @router.post("/add-state", tags=["State Cache"])
def add_state(body: AddStateBody):
global trie, dtrie, loop_del_trie_id
# if global_var.get(global_var.Deploy_Mode) is True:
# raise HTTPException(status.HTTP_403_FORBIDDEN)
if trie is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
import torch
try:
id: int = trie.insert(body.prompt)
devices: List[torch.device] = [tensor.device for tensor in body.state]
dtrie[id] = {
"tokens": copy.deepcopy(body.tokens),
"state": [tensor.cpu() for tensor in body.state],
"logits": copy.deepcopy(body.logits),
"devices": devices,
}
if len(trie) >= max_trie_len:
del_prompt = trie[loop_del_trie_id]
trie.remove(del_prompt)
dtrie[loop_del_trie_id] = None
loop_del_trie_id = loop_del_trie_id + 1
if loop_del_trie_id >= max_trie_len:
loop_del_trie_id = loop_start_id
quick_log(
None,
None,
f"New Trie Id: {id}\nTrie Len: {len(trie)}\nTrie Buff Size: {trie.buff_size()}\nDtrie Buff Size Of Id: {__get_a_dtrie_buff_size(dtrie[id])}",
)
return "success"
except Exception as e:
raise HTTPException(
status.HTTP_400_BAD_REQUEST, f"insert failed, bad prompt.\n{e}"
)
@router.post("/reset-state", tags=["State Cache"])
def reset_state():
global trie, dtrie
if global_var.get(global_var.Deploy_Mode) is True:
raise HTTPException(status.HTTP_403_FORBIDDEN)
if trie is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
import cyac
trie = cyac.Trie()
dtrie = {}
gc.collect()
return "success"
class LongestPrefixStateBody(BaseModel):
prompt: str
def __get_a_dtrie_buff_size(dtrie_v):
# print(sys.getsizeof(dtrie_v["tokens"][0])) # str
# print(sys.getsizeof(dtrie_v["tokens"][0]) * len(dtrie_v["tokens"]))
# print(dtrie_v["state"][0][0].element_size())
# print(dtrie_v["state"][0].nelement())
# print(len(dtrie_v["state"]))
# print(
# len(dtrie_v["state"])
# * dtrie_v["state"][0].nelement()
# * dtrie_v["state"][0][0].element_size()
# )
# print(dtrie_v["logits"][0].element_size())
# print(dtrie_v["logits"].nelement())
# print(dtrie_v["logits"][0].element_size() * dtrie_v["logits"].nelement())
return 54 * len(dtrie_v["tokens"]) + 491520 + 262144 + 28 # TODO
# @router.post("/longest-prefix-state", tags=["State Cache"])
def longest_prefix_state(body: LongestPrefixStateBody, request: Request):
global trie
# if global_var.get(global_var.Deploy_Mode) is True:
# raise HTTPException(status.HTTP_403_FORBIDDEN)
if trie is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
import torch
id = -1
try:
for id, len in trie.prefix(body.prompt):
pass
except:
pass
if id != -1:
v = dtrie[id]
devices: List[torch.device] = v["devices"]
prompt: str = trie[id]
quick_log(request, body, "Hit:\n" + prompt)
return {
"prompt": prompt,
"tokens": v["tokens"],
"state": [tensor.to(devices[i]) for i, tensor in enumerate(v["state"])],
"logits": v["logits"],
}
else:
return {"prompt": "", "tokens": [], "state": None, "logits": None}
# @router.post("/save-state", tags=["State Cache"])
def save_state():
global trie
# if global_var.get(global_var.Deploy_Mode) is True:
# raise HTTPException(status.HTTP_403_FORBIDDEN)
if trie is None:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "trie not loaded")
# trie.save("state_cache.trie")
return "not implemented"

View File

@@ -0,0 +1,124 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "element_wise.h"
#include "util.h"
// Equivalent Python code:
// ww = t_first + k
// p = torch.maximum(pp, ww)
// e1 = torch.exp(pp - p)
// e2 = torch.exp(ww - p)
// wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
// ww = t_decay + pp
// p = torch.maximum(ww, k)
// e1 = torch.exp(ww - p)
// e2 = torch.exp(k - p)
// t1 = e1 * aa + e2 * v
// t2 = e1 * bb + e2
// r = r * wkv
// return t1, t2, p, r
struct WkvForwardOne {
const float *t_first;
const float *k;
const float *pp;
const float *aa;
const float *bb;
const float *t_decay;
const float *v;
/* out */ float *t1;
/* out */ float *t2;
/* out */ float *p;
/* in & out */ half *r;
__device__ void operator()(int i) const {
float ww = t_first[i] + k[i];
float pp_ = pp[i];
float p_ = (pp_ > ww) ? pp_ : ww;
float e1 = expf(pp_ - p_);
float e2 = expf(ww - p_);
float aa_ = aa[i];
float bb_ = bb[i];
float v_ = v[i];
r[i] = __hmul(r[i], __float2half(((e1 * aa_ + e2 * v_) / (e1 * bb_ + e2))));
ww = t_decay[i] + pp_;
float k_ = k[i];
p_ = (ww > k_) ? ww : k_;
e1 = expf(ww - p_);
e2 = expf(k_ - p_);
t1[i] = e1 * aa_ + e2 * v_;
t2[i] = e1 * bb_ + e2;
p[i] = p_;
}
};
/*
Equivalent Python code:
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
*/
struct Mix {
const half *xx;
const half *sx;
const half *k_mix;
const half *v_mix;
const half *r_mix;
/* out */ half *kx;
/* out */ half *vx;
/* out */ half *rx;
__device__ void operator()(int i) const {
half xx_ = xx[i];
half sx_ = sx[i];
half k_mix_ = k_mix[i];
half v_mix_ = v_mix[i];
half r_mix_ = r_mix[i];
kx[i] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
vx[i] = __hadd(__hmul(xx_, v_mix_),
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
rx[i] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
};
using torch::Tensor;
void gemm_fp16_cublas_tensor(Tensor a, Tensor b, Tensor c);
Tensor att_one(Tensor x, Tensor ln_w, Tensor ln_b, Tensor sx, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw,
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx, Tensor rw,
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
/* imm */ Tensor k, Tensor pp, Tensor ww, Tensor aa, Tensor bb,
Tensor t_decay, /* imm */ Tensor v, /* in & out */ Tensor r,
/* out */ Tensor x_plus_out, /* out */ Tensor t1,
/* out */ Tensor t2, /* out */ Tensor p) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
element_wise(Mix{data_ptr<half>(xx), data_ptr<half>(sx),
data_ptr<half>(k_mix), data_ptr<half>(v_mix),
data_ptr<half>(r_mix), data_ptr<half>(kx),
data_ptr<half>(vx), data_ptr<half>(rx)},
x.numel());
gemm_fp16_cublas_tensor(kx, kw, k);
gemm_fp16_cublas_tensor(vx, vw, v);
gemm_fp16_cublas_tensor(rx, rw, r);
at::sigmoid_(r);
element_wise(WkvForwardOne{data_ptr<float>(t_first), data_ptr<float>(k),
data_ptr<float>(pp), data_ptr<float>(aa),
data_ptr<float>(bb), data_ptr<float>(t_decay),
data_ptr<float>(v), data_ptr<float>(t1),
data_ptr<float>(t2), data_ptr<float>(p),
data_ptr<half>(r)},
x.numel());
gemm_fp16_cublas_tensor(r, ow, x_plus_out);
x_plus_out += x;
return xx;
}

View File

@@ -0,0 +1,109 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "element_wise.h"
#include "util.h"
// Equivalent Python code:
// s1 = t_first * a + s
// s2 = a + t_decay * s
struct Fused1 {
const float *t_first;
const float *t_decay;
const float *a;
const float *s;
const int32_t inner_size;
/* out */ float *s1;
/* out */ float *s2;
__device__ void operator()(int i) const {
const int j = i / inner_size;
s1[i] = t_first[j] * a[i] + s[i];
s2[i] = a[i] + t_decay[j] * s[i];
}
};
/*
Equivalent Python code:
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
*/
struct Mix {
const half *xx;
const half *sx;
const half *k_mix;
const half *v_mix;
const half *r_mix;
/* out */ half *kx;
/* out */ half *vx;
/* out */ half *rx;
__device__ void operator()(int i) const {
half xx_ = xx[i];
half sx_ = sx[i];
half k_mix_ = k_mix[i];
half v_mix_ = v_mix[i];
half r_mix_ = r_mix[i];
kx[i] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
vx[i] = __hadd(__hmul(xx_, v_mix_),
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
rx[i] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
};
using torch::Tensor;
void gemm_fp16_cublas_tensor(Tensor a, Tensor b, Tensor c);
Tensor att_one_v5(Tensor x, Tensor sx, Tensor s, Tensor ln_w, Tensor ln_b,
Tensor lx_w, Tensor lx_b, Tensor k_mix, Tensor v_mix,
Tensor r_mix, Tensor kw,
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx,
Tensor rw,
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
/* imm */ Tensor k, Tensor t_decay, /* imm */ Tensor v,
/* imm */ Tensor r, /* imm */ Tensor s1,
/* out */ Tensor x_plus_out, /* out */ Tensor s2) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
element_wise(Mix{data_ptr<half>(xx), data_ptr<half>(sx),
data_ptr<half>(k_mix), data_ptr<half>(v_mix),
data_ptr<half>(r_mix), data_ptr<half>(kx),
data_ptr<half>(vx), data_ptr<half>(rx)},
x.numel());
int H = t_decay.size(0);
int S = x.size(-1) / H;
gemm_fp16_cublas_tensor(rx, rw, r);
r = at::reshape(r, {H, 1, S});
gemm_fp16_cublas_tensor(kx, kw, k);
k = at::reshape(k, {H, S, 1});
gemm_fp16_cublas_tensor(vx, vw, v);
v = at::reshape(v, {H, 1, S});
{
Tensor a = at::matmul(k, v);
// s1 = t_first * a + s
// s2 = a + t_decay * s
element_wise(Fused1{data_ptr<float>(t_first), data_ptr<float>(t_decay),
data_ptr<float>(a), data_ptr<float>(s),
static_cast<int32_t>(a.size(1) * a.size(2)),
data_ptr<float>(s1), data_ptr<float>(s2)},
a.numel());
}
Tensor out = at::matmul(r, s1);
out = at::flatten(out);
out = at::squeeze(at::group_norm(at::unsqueeze(out, 0), H, lx_w, lx_b), 0);
out = at::_cast_Half(out);
gemm_fp16_cublas_tensor(out, ow, x_plus_out);
x_plus_out += x;
return xx;
}

View File

@@ -0,0 +1,178 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "util.h"
#include "element_wise.h"
using torch::Tensor;
void gemm_fp16_cublas(const void *a, const void *b, void *c, int m,
int n, int k, bool output_fp32);
// based on `kernel_wkv_forward`, fusing more operations
__global__ void kernel_wkv_forward_new(
const int B, const int T, const int C, const float *__restrict__ const _w,
const float *__restrict__ const _u, const float *__restrict__ const _k,
const float *__restrict__ const _v, const half *__restrict__ const r,
half *__restrict__ const _y, float *__restrict__ const _aa,
float *__restrict__ const _bb, float *__restrict__ const _pp) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int _b = idx / C;
const int _c = idx % C;
const int _offset = _b * T * C + _c;
const int _state_offset = _b * C + _c;
float u = _u[_c];
float w = _w[_c];
const float *__restrict__ const k = _k + _offset;
const float *__restrict__ const v = _v + _offset;
half *__restrict__ const y = _y + _offset;
float aa = _aa[_state_offset];
float bb = _bb[_state_offset];
float pp = _pp[_state_offset];
for (int i = 0; i < T; i++) {
const int ii = i * C;
const float kk = k[ii];
const float vv = v[ii];
float ww = u + kk;
float p = max(pp, ww);
float e1 = exp(pp - p);
float e2 = exp(ww - p);
y[ii] = __float2half((e1 * aa + e2 * vv) / (e1 * bb + e2));
ww = w + pp;
p = max(ww, kk);
e1 = exp(ww - p);
e2 = exp(kk - p);
aa = e1 * aa + e2 * vv;
bb = e1 * bb + e2;
pp = p;
}
_aa[_state_offset] = aa;
_bb[_state_offset] = bb;
_pp[_state_offset] = pp;
}
void cuda_wkv_forward_new(int B, int T, int C, float *w, float *u, float *k,
float *v, half *r, half *y, float *aa, float *bb,
float *pp) {
dim3 threadsPerBlock(min(C, 32));
assert(B * C % threadsPerBlock.x == 0);
dim3 numBlocks(B * C / threadsPerBlock.x);
kernel_wkv_forward_new<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, r,
y, aa, bb, pp);
}
__global__ void _att_mix(const half *xx, const half *sx, const half *k_mix,
const half *v_mix, const half *r_mix,
const int outer_size, const int inner_size, half *kx,
half *vx, half *rx) {
for (int idx2 = blockIdx.x * blockDim.x + threadIdx.x; idx2 < inner_size;
idx2 += blockDim.x * gridDim.x) {
half k_mix_ = k_mix[idx2];
half v_mix_ = v_mix[idx2];
half r_mix_ = r_mix[idx2];
for (int row = 0; row < outer_size; ++row) {
int idx1 = row * inner_size + idx2;
half xx_ = xx[idx1];
half sx_ = sx[idx1];
kx[idx1] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
vx[idx1] = __hadd(__hmul(xx_, v_mix_),
__hmul(sx_, __hsub(__float2half(1), v_mix_)));
rx[idx1] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
}
}
void att_mix(const half *xx, const half *sx, const half *k_mix,
const half *v_mix, const half *r_mix, const int outer_size,
const int inner_size, half *kx, half *vx, half *rx) {
// 256 is good enough on most GPUs
const int32_t BLOCK_SIZE = 256;
assert(inner_size % BLOCK_SIZE == 0);
_att_mix<<<inner_size / BLOCK_SIZE, BLOCK_SIZE>>>(
xx, sx, k_mix, v_mix, r_mix, outer_size, inner_size, kx, vx, rx);
}
struct InplaceSigmoid {
__device__ __forceinline__ half operator()(int i) const {
ptr[i] = __float2half(1.0 / (1.0 + exp(-__half2float(ptr[i]))));
}
half *ptr;
};
struct InplaceMul {
__device__ __forceinline__ half operator()(int i) const {
y[i] = __hmul(x[i], y[i]);
}
half *y;
half *x;
};
/*
Equivalent Python code:
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
k = gemm(kx, kw, output_dtype=torch.float32)
v = gemm(vx, vw, output_dtype=torch.float32)
T = x.shape[0]
for t in range(T):
kk = k[t]
vv = v[t]
ww = t_first + kk
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, kk)
e1 = torch.exp(ww - p)
e2 = torch.exp(kk - p)
aa = e1 * aa + e2 * vv
bb = e1 * bb + e2
pp = p
out = gemm(r * sx, ow)
return x + out, xx[-1,:], aa, bb, pp
*/
Tensor att_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
Tensor ow, Tensor t_first, Tensor pp, Tensor aa, Tensor bb,
Tensor t_decay, /* imm */ Tensor buf, /* out */ Tensor x_plus_out) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
sx = at::cat({sx.unsqueeze(0), xx.slice(0, 0, -1)}, 0);
char* buf_ptr = (char*)buf.data_ptr();
half* kx = (half*)buf_ptr;
half* vx = kx + x.numel();
half* rx = vx + x.numel();
half* wkv_y = rx + x.numel();
att_mix(data_ptr<half>(xx), data_ptr<half>(sx), data_ptr<half>(k_mix),
data_ptr<half>(v_mix), data_ptr<half>(r_mix), xx.size(0), xx.size(1),
kx, vx, rx);
float* k = reinterpret_cast<float*>(wkv_y + x.numel());
float* v = k + x.size(0) * kw.size(1);
half* r = reinterpret_cast<half*>(v + x.size(0) * vw.size(1));
gemm_fp16_cublas(kx, kw.data_ptr(), k, x.size(0), kw.size(1), kw.size(0), true);
gemm_fp16_cublas(vx, vw.data_ptr(), v, x.size(0), vw.size(1), vw.size(0), true);
gemm_fp16_cublas(rx, rw.data_ptr(), r, x.size(0), rw.size(1), rw.size(0), false);
element_wise(InplaceSigmoid{r}, x.size(0) * rw.size(1));
cuda_wkv_forward_new(1, x.size(0), x.size(1), data_ptr<float>(t_decay),
data_ptr<float>(t_first), k, v, r,
wkv_y, data_ptr<float>(aa),
data_ptr<float>(bb), data_ptr<float>(pp));
element_wise(InplaceMul{wkv_y, r}, x.numel());
gemm_fp16_cublas(wkv_y, ow.data_ptr(), x_plus_out.data_ptr(), x.size(0), ow.size(1), ow.size(0), false);
x_plus_out += x;
return xx;
}

View File

@@ -0,0 +1,21 @@
#include <cassert>
#include <cstddef>
#include <cstdint>
template <typename Func> __global__ void _element_wise(Func func, int n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
func(i);
}
}
// NOTE: packed data type (e.g. float4) is a overkill for current sizes
// (4096 in 7B model and 768 in 0.1B model),
// and is not faster than the plain float version.
template <typename Func>
void element_wise(Func func, int n) {
// 256 is good enough on most GPUs
const int32_t BLOCK_SIZE = 256;
assert(n % BLOCK_SIZE == 0);
_element_wise<<<n / BLOCK_SIZE, BLOCK_SIZE>>>(func, n);
}

165
backend-python/rwkv_pip/beta/cuda/ffn.cu vendored Normal file
View File

@@ -0,0 +1,165 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "element_wise.h"
#include "util.h"
using torch::Tensor;
void gemm_fp16_cublas(const void *a, const void *b, void *c, int ori_m,
int ori_n, int ori_k, bool output_fp32);
__global__ void _ffn_seq_mix(const half *xx, const half *sx, const half *k_mix,
const half *r_mix, const int outer_size,
const int inner_size, half *kx, half *rx) {
for (int idx2 = blockIdx.x * blockDim.x + threadIdx.x; idx2 < inner_size;
idx2 += blockDim.x * gridDim.x) {
half k_mix_ = k_mix[idx2];
half r_mix_ = r_mix[idx2];
for (int row = 0; row < outer_size; ++row) {
int idx1 = row * inner_size + idx2;
half xx_ = xx[idx1];
half sx_ = sx[idx1];
kx[idx1] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
rx[idx1] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
}
}
void ffn_seq_mix(const half *xx, const half *sx, const half *k_mix,
const half *r_mix, const int outer_size, const int inner_size,
half *kx, half *rx) {
// 256 is good enough on most GPUs
const int32_t BLOCK_SIZE = 256;
assert(inner_size % BLOCK_SIZE == 0);
_ffn_seq_mix<<<inner_size / BLOCK_SIZE, BLOCK_SIZE>>>(
xx, sx, k_mix, r_mix, outer_size, inner_size, kx, rx);
}
struct InplaceSigmoid {
__device__ __forceinline__ void operator()(int i) const {
ptr[i] = __float2half(1.0 / (1.0 + exp(-__half2float(ptr[i]))));
}
half *ptr;
};
struct InplaceReLUAndSquare {
__device__ __forceinline__ void operator()(int i) const {
// __hmax is not defined in old cuda
if (__hgt(ptr[i], __float2half(0))) {
ptr[i] = __hmul(ptr[i], ptr[i]);
} else {
ptr[i] = __float2half(0);
}
}
half *ptr;
};
struct InplaceFma {
__device__ __forceinline__ void operator()(int i) const {
a[i] = __hfma(a[i], b[i], c[i]);
}
half *a;
const half *b;
const half *c;
};
/*
Equivalent Python code:
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
vx = torch.square(torch.relu(gemm(kx, kw)))
out = r * gemm(vx, vw)
return x + out, xx[-1,:]
*/
Tensor ffn_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
/* imm */ Tensor buf,
/* out */ Tensor x_plus_out) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
sx = at::cat({sx.unsqueeze(0), xx.slice(0, 0, -1)}, 0);
char *buf_ptr = (char *)buf.data_ptr();
half *kx = (half *)buf_ptr;
half *rx = kx + x.numel();
half *vx = rx + x.numel();
half *r = vx + x.size(0) * kw.size(1);
ffn_seq_mix(data_ptr<half>(xx), data_ptr<half>(sx), data_ptr<half>(k_mix),
data_ptr<half>(r_mix), xx.size(0), xx.size(1), kx, rx);
gemm_fp16_cublas(rx, rw.data_ptr(), r, x.size(0), rw.size(1), x.size(1),
false);
element_wise(InplaceSigmoid{r}, x.size(0) * rw.size(1));
gemm_fp16_cublas(kx, kw.data_ptr(), vx, x.size(0), kw.size(1), x.size(1),
false);
element_wise(InplaceReLUAndSquare{vx}, x.size(0) * kw.size(1));
gemm_fp16_cublas(vx, vw.data_ptr(), x_plus_out.data_ptr(), x.size(0),
vw.size(1), vw.size(0), false);
element_wise(InplaceFma{data_ptr<half>(x_plus_out), r, data_ptr<half>(x)},
x_plus_out.numel());
return xx;
}
struct FfnOneMix {
__device__ __forceinline__ void operator()(int idx) {
half k_mix_ = k_mix[idx];
half r_mix_ = r_mix[idx];
half xx_ = xx[idx];
half sx_ = sx[idx];
kx[idx] = __hadd(__hmul(xx_, k_mix_),
__hmul(sx_, __hsub(__float2half(1), k_mix_)));
rx[idx] = __hadd(__hmul(xx_, r_mix_),
__hmul(sx_, __hsub(__float2half(1), r_mix_)));
}
half *k_mix;
half *r_mix;
half *xx;
half *sx;
half *kx;
half *rx;
};
/*
Equivalent Python code:
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(gemm(rx, rw))
vx = torch.square(torch.relu(gemm(kx, kw)))
out = r * gemm(vx, vw)
return x + out, xx
*/
Tensor ffn_one(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
/* imm */ Tensor buf,
/* out */ Tensor x_plus_out) {
Tensor xx = at::layer_norm(x, {x.size(-1)}, ln_w, ln_b);
char *buf_ptr = (char *)buf.data_ptr();
half *kx = (half *)buf_ptr;
half *rx = kx + x.numel();
half *vx = rx + x.numel();
half *r = vx + x.size(0) * kw.size(1);
element_wise(FfnOneMix{data_ptr<half>(k_mix), data_ptr<half>(r_mix),
data_ptr<half>(xx), data_ptr<half>(sx), kx, rx},
x.numel());
// vector * matrix, so m = 1
gemm_fp16_cublas(rx, rw.data_ptr(), r, 1, rw.size(1), rw.size(0), false);
element_wise(InplaceSigmoid{r}, rw.size(1));
gemm_fp16_cublas(kx, kw.data_ptr(), vx, 1, kw.size(1), kw.size(0), false);
element_wise(InplaceReLUAndSquare{vx}, kw.size(1));
gemm_fp16_cublas(vx, vw.data_ptr(), x_plus_out.data_ptr(), 1, vw.size(1),
vw.size(0), false);
element_wise(InplaceFma{data_ptr<half>(x_plus_out), r, data_ptr<half>(x)},
x_plus_out.numel());
return xx;
}

View File

@@ -0,0 +1,128 @@
#include <cublas_v2.h>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#define CUBLAS_CHECK(condition) \
for (cublasStatus_t _cublas_check_status = (condition); \
_cublas_check_status != CUBLAS_STATUS_SUCCESS;) \
throw std::runtime_error("cuBLAS error " + \
std::to_string(_cublas_check_status) + " at " + \
std::to_string(__LINE__));
#define CUDA_CHECK(condition) \
for (cudaError_t _cuda_check_status = (condition); \
_cuda_check_status != cudaSuccess;) \
throw std::runtime_error( \
"CUDA error " + std::string(cudaGetErrorString(_cuda_check_status)) + \
" at " + std::to_string(__LINE__));
cublasHandle_t get_cublas_handle() {
static cublasHandle_t cublas_handle = []() {
cublasHandle_t handle = nullptr;
CUBLAS_CHECK(cublasCreate(&handle));
#if CUDA_VERSION < 11000
CUBLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
#else
CUBLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
#endif // CUDA_VERSION < 11000
return handle;
}();
return cublas_handle;
}
/*
NOTE: blas gemm is column-major by default, but we need row-major output.
The data of row-major, transposed matrix is exactly the same as the
column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
*/
void gemm_fp16_cublas(const void *a, const void *b, void *c, int ori_m,
int ori_n, int ori_k, bool output_fp32) {
const auto cuda_data_type = CUDA_R_16F;
const auto cuda_c_data_type = output_fp32 ? CUDA_R_32F : CUDA_R_16F;
const auto compute_type = CUDA_R_32F;
const float sp_alpha = 1.f;
// use CUBLAS_OP_N. see the notes above
const cublasOperation_t cublas_trans_a = CUBLAS_OP_N;
const cublasOperation_t cublas_trans_b = CUBLAS_OP_N;
// m = (B^T).size(0) = B.size(1) = n;
const int cublas_m = ori_n;
const int cublas_k = ori_k;
// comptiable with rwkv one mode, where 1-D tensor * 2-D tensor
// const int n = a.dense_dim() == 1 ? 1 : a.size(0);
const int cublas_n = ori_m;
const int cublas_lda = cublas_m;
const int cublas_ldb = cublas_k;
const int cublas_ldc = cublas_m;
cublasHandle_t cublas_handle = get_cublas_handle();
#if CUDA_VERSION >= 11000
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
#else
cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP;
#endif
const float sp_beta = 0.f;
CUBLAS_CHECK(cublasGemmEx(
cublas_handle, cublas_trans_a, cublas_trans_b, cublas_m, cublas_n,
cublas_k, &sp_alpha, b, cuda_data_type, cublas_lda,
a, cuda_data_type, cublas_ldb, &sp_beta, c,
cuda_c_data_type, cublas_ldc, compute_type, algo));
}
/*
NOTE: blas gemm is column-major by default, but we need row-major output.
The data of row-major, transposed matrix is exactly the same as the
column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
*/
void gemm_fp16_cublas_tensor(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
if (a.sizes().size() == 1) {
assert(b.sizes().size() == 2);
a = at::unsqueeze(a, 0);
}
const auto cuda_data_type = CUDA_R_16F;
const auto cuda_c_data_type =
c.dtype() == torch::kFloat32 ? CUDA_R_32F : CUDA_R_16F;
const auto compute_type = CUDA_R_32F;
const float sp_alpha = 1.f;
// swap a and b, and use CUBLAS_OP_N. see the notes above
std::swap(a, b);
const cublasOperation_t cublas_trans_a = CUBLAS_OP_N;
const cublasOperation_t cublas_trans_b = CUBLAS_OP_N;
// m = (B^T).size(0) = B.size(1), and = A.size(1) after swap,
// negative axis is used because of the existence of batch matmul.
const int m = a.size(-1);
const int k = a.size(-2);
const int n = b.size(-2);
const int cublas_lda = m;
const int cublas_ldb = k;
const int cublas_ldc = m;
cublasHandle_t cublas_handle = get_cublas_handle();
#if CUDA_VERSION >= 11000
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
#else
cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP;
#endif
const float sp_beta = 0.f;
if (a.sizes().size() == 2 && b.sizes().size() == 2) {
CUBLAS_CHECK(cublasGemmEx(
cublas_handle, cublas_trans_a, cublas_trans_b, m, n, k, &sp_alpha,
a.data_ptr(), cuda_data_type, cublas_lda, b.data_ptr(), cuda_data_type,
cublas_ldb, &sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc,
compute_type, algo));
} else {
// batch matmul
assert(a.sizes().size() == 3 && b.sizes().size() == 3);
const long long int cublas_stride_a = m * k;
const long long int cublas_stride_b = k * n;
const long long int cublas_stride_c = m * n;
CUBLAS_CHECK(cublasGemmStridedBatchedEx(
cublas_handle, cublas_trans_a, cublas_trans_b, m,
n, k, &sp_alpha, a.data_ptr(), cuda_data_type, cublas_lda,
cublas_stride_a, b.data_ptr(), cuda_data_type, cublas_ldb, cublas_stride_b,
&sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc, cublas_stride_c,
a.size(0), compute_type, algo));
}
}

View File

@@ -0,0 +1,246 @@
#include <stdio.h>
#include <assert.h>
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#define MIN_VALUE (-1e38)
typedef at::Half fp16;
__half *cast(fp16 *ptr) {
return reinterpret_cast<__half *>(ptr);
}
template <typename F>
__global__ void kernel_wkv_forward(const int B, const int T, const int C,
const float *__restrict__ const _w, const float *__restrict__ const _u, const F *__restrict__ const _k, const F *__restrict__ const _v,
F *__restrict__ const _y, float *__restrict__ const _aa, float *__restrict__ const _bb, float *__restrict__ const _pp) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int _b = idx / C;
const int _c = idx % C;
const int _offset = _b * T * C + _c;
const int _state_offset = _b * C + _c;
float u = _u[_c];
float w = _w[_c];
const F *__restrict__ const k = _k + _offset;
const F *__restrict__ const v = _v + _offset;
F *__restrict__ const y = _y + _offset;
float aa = _aa[_state_offset];
float bb = _bb[_state_offset];
float pp = _pp[_state_offset];
for (int i = 0; i < T; i++) {
const int ii = i * C;
const float kk = float(k[ii]);
const float vv = float(v[ii]);
float ww = u + kk;
float p = max(pp, ww);
float e1 = exp(pp - p);
float e2 = exp(ww - p);
y[ii] = F((e1 * aa + e2 * vv) / (e1 * bb + e2));
ww = w + pp;
p = max(ww, kk);
e1 = exp(ww - p);
e2 = exp(kk - p);
aa = e1 * aa + e2 * vv;
bb = e1 * bb + e2;
pp = p;
}
_aa[_state_offset] = aa;
_bb[_state_offset] = bb;
_pp[_state_offset] = pp;
}
template <typename F>
void cuda_wkv_forward(int B, int T, int C, float *w, float *u, F *k, F *v, F *y, float *aa, float *bb, float *pp) {
dim3 threadsPerBlock( min(C, 32) );
assert(B * C % threadsPerBlock.x == 0);
dim3 numBlocks(B * C / threadsPerBlock.x);
kernel_wkv_forward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, aa, bb, pp);
}
template void cuda_wkv_forward<fp16>(
int B, int T, int C,
float *w, float *u, fp16 *k, fp16 *v, fp16 *y,
float *aa, float *bb, float *pp);
template void cuda_wkv_forward<float>(
int B, int T, int C,
float *w, float *u, float *k, float *v, float *y,
float *aa, float *bb, float *pp);
__global__ void kernel_mm_seq_fp32i8(
const int B, const int N, const int M,
const float *__restrict__ const x, const int x_stride,
const uint8_t *__restrict__ const w, const int w_stride,
const float *__restrict__ const mx,
const float *__restrict__ const rx,
const float *__restrict__ const my,
const float *__restrict__ const ry,
float *__restrict__ const y, const int y_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int k = blockIdx.y * blockDim.y + threadIdx.y;
if (i < B && k < M) {
float y_local = 0;
for (int j = 0; j < N; ++j) {
y_local += x[i * x_stride + j] * (
(float(w[j * w_stride + k]) + 0.5f)
* rx[k] * ry[j] + mx[k] + my[j]
);
}
y[i * y_stride + k] = y_local;
}
}
template <typename F>
void cuda_mm8_seq(int B, int N, int M,
F *x, int x_stride,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
F *y, int y_stride);
template <>
void cuda_mm8_seq<float>(int B, int N, int M,
float *x, int x_stride,
uint8_t *w, int w_stride,
float *mx, float *rx,
float *my, float *ry,
float *y, int y_stride) {
dim3 blockSize(1, 128);
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_seq_fp32i8<<<gridSize, blockSize>>>(
B, N, M, x, x_stride, w, w_stride,
mx, rx, my, ry, y, y_stride);
}
__global__ void kernel_mm_seq_fp16i8(
const int B, const int N, const int M,
const __half *__restrict__ const x, const int x_stride,
const uint8_t *__restrict__ const w, const int w_stride,
const __half *__restrict__ const mx,
const __half *__restrict__ const rx,
const __half *__restrict__ const my,
const __half *__restrict__ const ry,
__half *__restrict__ const y, const int y_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int k = blockIdx.y * blockDim.y + threadIdx.y;
if (i < B && k < M) {
float y_local = 0;
for (int j = 0; j < N; ++j) {
y_local += __half2float(x[i * x_stride + j]) * (
(float(w[j * w_stride + k]) + 0.5f)
* __half2float(rx[k]) * __half2float(ry[j])
+ __half2float(mx[k]) + __half2float(my[j])
);
}
y[i * y_stride + k] = __float2half(y_local);
}
}
template <>
void cuda_mm8_seq<fp16>(int B, int N, int M,
fp16 *x, int x_stride,
uint8_t *w, int w_stride,
fp16 *mx, fp16 *rx,
fp16 *my, fp16 *ry,
fp16 *y, int y_stride) {
dim3 blockSize(1, 128);
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_seq_fp16i8<<<gridSize, blockSize>>>(
B, N, M, cast(x), x_stride, w, w_stride,
cast(mx), cast(rx), cast(my), cast(ry), cast(y), y_stride);
}
#define MM8_ONE_JSPLIT 24
#define MM8_ONE_TILE 1024
__global__ void kernel_mm_one_fp32i8(
const int N, const int M,
const float *__restrict__ const x,
const uint8_t *__restrict__ const w, const int w_stride,
const float *__restrict__ const mx,
const float *__restrict__ const rx,
const float *__restrict__ const my,
const float *__restrict__ const ry,
float *__restrict__ const y) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
if (k < M) {
float y_local = 0;
for (int j = j0; j < j1; ++j) {
y_local += x[j] * (
(float(w[j * w_stride + k]) + 0.5f)
* rx[k] * ry[j] + mx[k] + my[j]
);
}
atomicAdd(&y[k], y_local);
}
}
template <typename F>
void cuda_mm8_one(int N, int M,
F *x,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
float *y);
template <>
void cuda_mm8_one<float>(int N, int M,
float *x,
uint8_t *w, int w_stride,
float *mx, float *rx,
float *my, float *ry,
float *y) {
dim3 blockSize(1, MM8_ONE_TILE);
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_one_fp32i8<<<gridSize, blockSize>>>(
N, M, x, w, w_stride,
mx, rx, my, ry, y);
}
__global__ void kernel_mm_one_fp16i8(
const int N, const int M,
const __half *__restrict__ const x,
const uint8_t *__restrict__ const w, const int w_stride,
const __half *__restrict__ const mx,
const __half *__restrict__ const rx,
const __half *__restrict__ const my,
const __half *__restrict__ const ry,
float *__restrict__ const y) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
if (k < M) {
float y_local = 0;
for (int j = j0; j < j1; ++j) {
y_local += __half2float(x[j]) * (
(float(w[j * w_stride + k]) + 0.5f)
* __half2float(rx[k]) * __half2float(ry[j])
+ __half2float(mx[k]) + __half2float(my[j])
);
}
atomicAdd(&y[k], y_local);
}
}
template <>
void cuda_mm8_one<fp16>(int N, int M,
fp16 *x,
uint8_t *w, int w_stride,
fp16 *mx, fp16 *rx,
fp16 *my, fp16 *ry,
float *y) {
dim3 blockSize(1, MM8_ONE_TILE);
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_one_fp16i8<<<gridSize, blockSize>>>(
N, M, cast(x), w, w_stride,
cast(mx), cast(rx), cast(my), cast(ry), y);
}

View File

@@ -0,0 +1,7 @@
#include "ATen/ATen.h"
#include <cuda_fp16.h>
template <typename T> T *data_ptr(torch::Tensor x) { return x.data_ptr<T>(); }
template <> inline half *data_ptr(torch::Tensor x) {
return reinterpret_cast<half *>(x.data_ptr<at::Half>());
}

View File

@@ -0,0 +1,181 @@
#include <torch/extension.h>
#include "ATen/ATen.h"
#include <iostream>
#include <c10/cuda/CUDAGuard.h>
typedef at::Half fp16;
template <typename F>
void cuda_wkv_forward(int B, int T, int C,
float *w, float *u, F *k, F *v, F *y,
float *aa, float *bb, float *pp);
template <typename F>
void cuda_mm8_seq(int B, int N, int M,
F *x, int x_stride,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
F *y, int y_stride);
template <typename F>
void cuda_mm8_one(int N, int M,
F *x,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
float *y);
void wkv_forward(int64_t B, int64_t T, int64_t C,
torch::Tensor &w, torch::Tensor &u,
torch::Tensor &k, torch::Tensor &v, torch::Tensor &y,
torch::Tensor &aa, torch::Tensor &bb, torch::Tensor &pp) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (k.scalar_type()) {
case c10::ScalarType::Half:
cuda_wkv_forward(B, T, C,
w.data_ptr<float>(), u.data_ptr<float>(),
k.data_ptr<fp16>(), v.data_ptr<fp16>(), y.data_ptr<fp16>(),
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
break;
case c10::ScalarType::Float:
cuda_wkv_forward(B, T, C,
w.data_ptr<float>(), u.data_ptr<float>(),
k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>(),
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
void mm8_seq(int64_t B, int64_t N, int64_t M,
torch::Tensor &x, torch::Tensor &w,
torch::Tensor &mx, torch::Tensor &rx,
torch::Tensor &my, torch::Tensor &ry,
torch::Tensor &y) {
assert(x.stride(1) == 1);
assert(w.stride(1) == 1);
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
assert(my.stride(0) == 1 && ry.stride(0) == 1);
assert(y.stride(1) == 1);
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (x.scalar_type()) {
case c10::ScalarType::Half:
cuda_mm8_seq(
B, N, M,
x.data_ptr<fp16>(), x.stride(0),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
y.data_ptr<fp16>(), y.stride(0));
break;
case c10::ScalarType::Float:
cuda_mm8_seq(
B, N, M,
x.data_ptr<float>(), x.stride(0),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<float>(), rx.data_ptr<float>(),
my.data_ptr<float>(), ry.data_ptr<float>(),
y.data_ptr<float>(), y.stride(0));
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
void mm8_one(int64_t N, int64_t M,
torch::Tensor &x, torch::Tensor &w,
torch::Tensor &mx, torch::Tensor &rx,
torch::Tensor &my, torch::Tensor &ry,
torch::Tensor &y) {
assert(x.stride(0) == 1);
assert(w.stride(1) == 1);
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
assert(my.stride(0) == 1 && ry.stride(0) == 1);
assert(y.stride(0) == 1);
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (x.scalar_type()) {
case c10::ScalarType::Half:
cuda_mm8_one(
N, M,
x.data_ptr<fp16>(),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
y.data_ptr<float>());
break;
case c10::ScalarType::Float:
cuda_mm8_one(
N, M,
x.data_ptr<float>(),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<float>(), rx.data_ptr<float>(),
my.data_ptr<float>(), ry.data_ptr<float>(),
y.data_ptr<float>());
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
using torch::Tensor;
#ifndef DISABLE_CUBLAS_GEMM
void gemm_fp16_cublas_tensor(Tensor a, Tensor b, Tensor c);
#endif
Tensor att_one(Tensor x, Tensor ln_w, Tensor ln_b, Tensor sx, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw,
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx, Tensor rw,
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
/* imm */ Tensor k, Tensor pp, Tensor ww, Tensor aa, Tensor bb,
Tensor t_decay, /* imm */ Tensor v, /* in & out */ Tensor r,
/* out */ Tensor x_plus_out, /* out */ Tensor t1,
/* out */ Tensor t2, /* out */ Tensor p);
Tensor att_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor v_mix, Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
Tensor ow, Tensor t_first, Tensor pp, Tensor aa, Tensor bb,
Tensor t_decay, /* imm */ Tensor buf, /* out */ Tensor x_plus_out);
Tensor att_one_v5(Tensor x, Tensor sx, Tensor s, Tensor ln_w, Tensor ln_b,
Tensor lx_w, Tensor lx_b, Tensor k_mix, Tensor v_mix,
Tensor r_mix, Tensor kw,
/* imm */ Tensor kx, Tensor vw, /* imm */ Tensor vx,
Tensor rw,
/* imm */ Tensor rx, Tensor ow, Tensor t_first,
/* imm */ Tensor k, Tensor t_decay, /* imm */ Tensor v,
/* imm */ Tensor r, /* imm */ Tensor s1,
/* out */ Tensor x_plus_out, /* out */ Tensor s2);
Tensor ffn_seq(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
/* imm */ Tensor buf,
/* out */ Tensor x_plus_out);
Tensor ffn_one(Tensor x, Tensor sx, Tensor ln_w, Tensor ln_b, Tensor k_mix,
Tensor r_mix, Tensor kw, Tensor vw, Tensor rw,
/* imm */ Tensor buf,
/* out */ Tensor x_plus_out);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("wkv_forward", &wkv_forward, "wkv forward");
m.def("mm8_seq", &mm8_seq, "mm8 seq");
m.def("mm8_one", &mm8_one, "mm8 one");
m.def("gemm_fp16_cublas", &gemm_fp16_cublas_tensor, "gemv fp16 cublas");
m.def("att_one", &att_one, "att one");
m.def("att_one_v5", &att_one_v5, "att one v5");
m.def("att_seq", &att_seq, "att seq");
m.def("ffn_seq", &ffn_seq, "ffn seq");
m.def("ffn_one", &ffn_one, "ffn one");
}
TORCH_LIBRARY(rwkv, m) {
m.def("wkv_forward", wkv_forward);
m.def("mm8_seq", mm8_seq);
m.def("mm8_one", mm8_one);
m.def("gemm_fp16_cublas", gemm_fp16_cublas_tensor);
m.def("att_one", att_one);
m.def("att_one_v5", &att_one_v5);
m.def("att_seq", att_seq);
m.def("ffn_seq", ffn_seq);
m.def("ffn_one", ffn_one);
}

1821
backend-python/rwkv_pip/beta/model.py vendored Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@@ -0,0 +1,75 @@
#include <cublas_v2.h>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAContext.h>
#define CUBLAS_CHECK(condition) \
for (cublasStatus_t _cublas_check_status = (condition); \
_cublas_check_status != CUBLAS_STATUS_SUCCESS;) \
throw std::runtime_error("cuBLAS error " + \
std::to_string(_cublas_check_status) + " at " + \
std::to_string(__LINE__));
#define CUDA_CHECK(condition) \
for (cudaError_t _cuda_check_status = (condition); \
_cuda_check_status != cudaSuccess;) \
throw std::runtime_error( \
"CUDA error " + std::string(cudaGetErrorString(_cuda_check_status)) + \
" at " + std::to_string(__LINE__));
/*
NOTE: blas gemm is column-major by default, but we need row-major output.
The data of row-major, transposed matrix is exactly the same as the
column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
*/
void gemm_fp16_cublas(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(a));
const auto cuda_data_type = CUDA_R_16F;
const auto cuda_c_data_type =
c.dtype() == torch::kFloat32 ? CUDA_R_32F : CUDA_R_16F;
const auto compute_type = CUDA_R_32F;
const float sp_alpha = 1.f;
// swap a and b, and use CUBLAS_OP_N. see the notes above
std::swap(a, b);
const cublasOperation_t cublas_trans_a = CUBLAS_OP_N;
const cublasOperation_t cublas_trans_b = CUBLAS_OP_N;
// m = (B^T).size(0) = B.size(1), and = A.size(1) after swap,
// negative axis is used because of the existence of batch matmul.
const int m = a.size(-1);
const int k = a.size(-2);
const int n = b.size(-2);
const int cublas_lda = m;
const int cublas_ldb = k;
const int cublas_ldc = m;
cublasHandle_t cublas_handle = at::cuda::getCurrentCUDABlasHandle();
#if CUDA_VERSION >= 11000
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
#else
cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP;
#endif
const float sp_beta = 0.f;
if (a.sizes().size() == 2 && b.sizes().size() == 2) {
CUBLAS_CHECK(cublasGemmEx(
cublas_handle, cublas_trans_a, cublas_trans_b, m, n, k, &sp_alpha,
a.data_ptr(), cuda_data_type, cublas_lda, b.data_ptr(), cuda_data_type,
cublas_ldb, &sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc,
compute_type, algo));
} else {
// batch matmul
assert(a.sizes().size() == 3 && b.sizes().size() == 3);
const long long int cublas_stride_a = m * k;
const long long int cublas_stride_b = k * n;
const long long int cublas_stride_c = m * n;
CUBLAS_CHECK(cublasGemmStridedBatchedEx(
cublas_handle, cublas_trans_a, cublas_trans_b, m,
n, k, &sp_alpha, a.data_ptr(), cuda_data_type, cublas_lda,
cublas_stride_a, b.data_ptr(), cuda_data_type, cublas_ldb, cublas_stride_b,
&sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc, cublas_stride_c,
a.size(0), compute_type, algo));
}
}

View File

@@ -0,0 +1,246 @@
#include <stdio.h>
#include <assert.h>
#include "ATen/ATen.h"
#include <cuda_fp16.h>
#define MIN_VALUE (-1e38)
typedef at::Half fp16;
__half *cast(fp16 *ptr) {
return reinterpret_cast<__half *>(ptr);
}
template <typename F>
__global__ void kernel_wkv_forward(const int B, const int T, const int C,
const float *__restrict__ const _w, const float *__restrict__ const _u, const F *__restrict__ const _k, const F *__restrict__ const _v,
F *__restrict__ const _y, float *__restrict__ const _aa, float *__restrict__ const _bb, float *__restrict__ const _pp) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int _b = idx / C;
const int _c = idx % C;
const int _offset = _b * T * C + _c;
const int _state_offset = _b * C + _c;
float u = _u[_c];
float w = _w[_c];
const F *__restrict__ const k = _k + _offset;
const F *__restrict__ const v = _v + _offset;
F *__restrict__ const y = _y + _offset;
float aa = _aa[_state_offset];
float bb = _bb[_state_offset];
float pp = _pp[_state_offset];
for (int i = 0; i < T; i++) {
const int ii = i * C;
const float kk = float(k[ii]);
const float vv = float(v[ii]);
float ww = u + kk;
float p = max(pp, ww);
float e1 = exp(pp - p);
float e2 = exp(ww - p);
y[ii] = F((e1 * aa + e2 * vv) / (e1 * bb + e2));
ww = w + pp;
p = max(ww, kk);
e1 = exp(ww - p);
e2 = exp(kk - p);
aa = e1 * aa + e2 * vv;
bb = e1 * bb + e2;
pp = p;
}
_aa[_state_offset] = aa;
_bb[_state_offset] = bb;
_pp[_state_offset] = pp;
}
template <typename F>
void cuda_wkv_forward(int B, int T, int C, float *w, float *u, F *k, F *v, F *y, float *aa, float *bb, float *pp) {
dim3 threadsPerBlock( min(C, 32) );
assert(B * C % threadsPerBlock.x == 0);
dim3 numBlocks(B * C / threadsPerBlock.x);
kernel_wkv_forward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, aa, bb, pp);
}
template void cuda_wkv_forward<fp16>(
int B, int T, int C,
float *w, float *u, fp16 *k, fp16 *v, fp16 *y,
float *aa, float *bb, float *pp);
template void cuda_wkv_forward<float>(
int B, int T, int C,
float *w, float *u, float *k, float *v, float *y,
float *aa, float *bb, float *pp);
__global__ void kernel_mm_seq_fp32i8(
const int B, const int N, const int M,
const float *__restrict__ const x, const int x_stride,
const uint8_t *__restrict__ const w, const int w_stride,
const float *__restrict__ const mx,
const float *__restrict__ const rx,
const float *__restrict__ const my,
const float *__restrict__ const ry,
float *__restrict__ const y, const int y_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int k = blockIdx.y * blockDim.y + threadIdx.y;
if (i < B && k < M) {
float y_local = 0;
for (int j = 0; j < N; ++j) {
y_local += x[i * x_stride + j] * (
(float(w[j * w_stride + k]) + 0.5f)
* rx[k] * ry[j] + mx[k] + my[j]
);
}
y[i * y_stride + k] = y_local;
}
}
template <typename F>
void cuda_mm8_seq(int B, int N, int M,
F *x, int x_stride,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
F *y, int y_stride);
template <>
void cuda_mm8_seq<float>(int B, int N, int M,
float *x, int x_stride,
uint8_t *w, int w_stride,
float *mx, float *rx,
float *my, float *ry,
float *y, int y_stride) {
dim3 blockSize(1, 128);
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_seq_fp32i8<<<gridSize, blockSize>>>(
B, N, M, x, x_stride, w, w_stride,
mx, rx, my, ry, y, y_stride);
}
__global__ void kernel_mm_seq_fp16i8(
const int B, const int N, const int M,
const __half *__restrict__ const x, const int x_stride,
const uint8_t *__restrict__ const w, const int w_stride,
const __half *__restrict__ const mx,
const __half *__restrict__ const rx,
const __half *__restrict__ const my,
const __half *__restrict__ const ry,
__half *__restrict__ const y, const int y_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int k = blockIdx.y * blockDim.y + threadIdx.y;
if (i < B && k < M) {
float y_local = 0;
for (int j = 0; j < N; ++j) {
y_local += __half2float(x[i * x_stride + j]) * (
(float(w[j * w_stride + k]) + 0.5f)
* __half2float(rx[k]) * __half2float(ry[j])
+ __half2float(mx[k]) + __half2float(my[j])
);
}
y[i * y_stride + k] = __float2half(y_local);
}
}
template <>
void cuda_mm8_seq<fp16>(int B, int N, int M,
fp16 *x, int x_stride,
uint8_t *w, int w_stride,
fp16 *mx, fp16 *rx,
fp16 *my, fp16 *ry,
fp16 *y, int y_stride) {
dim3 blockSize(1, 128);
dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_seq_fp16i8<<<gridSize, blockSize>>>(
B, N, M, cast(x), x_stride, w, w_stride,
cast(mx), cast(rx), cast(my), cast(ry), cast(y), y_stride);
}
#define MM8_ONE_JSPLIT 24
#define MM8_ONE_TILE 1024
__global__ void kernel_mm_one_fp32i8(
const int N, const int M,
const float *__restrict__ const x,
const uint8_t *__restrict__ const w, const int w_stride,
const float *__restrict__ const mx,
const float *__restrict__ const rx,
const float *__restrict__ const my,
const float *__restrict__ const ry,
float *__restrict__ const y) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
if (k < M) {
float y_local = 0;
for (int j = j0; j < j1; ++j) {
y_local += x[j] * (
(float(w[j * w_stride + k]) + 0.5f)
* rx[k] * ry[j] + mx[k] + my[j]
);
}
atomicAdd(&y[k], y_local);
}
}
template <typename F>
void cuda_mm8_one(int N, int M,
F *x,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
float *y);
template <>
void cuda_mm8_one<float>(int N, int M,
float *x,
uint8_t *w, int w_stride,
float *mx, float *rx,
float *my, float *ry,
float *y) {
dim3 blockSize(1, MM8_ONE_TILE);
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_one_fp32i8<<<gridSize, blockSize>>>(
N, M, x, w, w_stride,
mx, rx, my, ry, y);
}
__global__ void kernel_mm_one_fp16i8(
const int N, const int M,
const __half *__restrict__ const x,
const uint8_t *__restrict__ const w, const int w_stride,
const __half *__restrict__ const mx,
const __half *__restrict__ const rx,
const __half *__restrict__ const my,
const __half *__restrict__ const ry,
float *__restrict__ const y) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
if (k < M) {
float y_local = 0;
for (int j = j0; j < j1; ++j) {
y_local += __half2float(x[j]) * (
(float(w[j * w_stride + k]) + 0.5f)
* __half2float(rx[k]) * __half2float(ry[j])
+ __half2float(mx[k]) + __half2float(my[j])
);
}
atomicAdd(&y[k], y_local);
}
}
template <>
void cuda_mm8_one<fp16>(int N, int M,
fp16 *x,
uint8_t *w, int w_stride,
fp16 *mx, fp16 *rx,
fp16 *my, fp16 *ry,
float *y) {
dim3 blockSize(1, MM8_ONE_TILE);
dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
kernel_mm_one_fp16i8<<<gridSize, blockSize>>>(
N, M, cast(x), w, w_stride,
cast(mx), cast(rx), cast(my), cast(ry), y);
}

88
backend-python/rwkv_pip/cuda/rwkv5.cu vendored Normal file
View File

@@ -0,0 +1,88 @@
#include <stdio.h>
#include <assert.h>
#include "ATen/ATen.h"
typedef at::BFloat16 bf16;
typedef at::Half fp16;
typedef float fp32;
template <typename F>
__global__ void kernel_forward(const int B, const int T, const int C, const int H, float *__restrict__ _state,
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u,
F *__restrict__ const _y)
{
const int b = blockIdx.x / H;
const int h = blockIdx.x % H;
const int i = threadIdx.x;
_w += h*_N_;
_u += h*_N_;
_state += h*_N_*_N_ + i*_N_; // wrong if B > 1 !!!
__shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
float state[_N_];
#pragma unroll
for (int j = 0; j < _N_; j++)
state[j] = _state[j];
__syncthreads();
u[i] = float(_u[i]);
w[i] = _w[i];
__syncthreads();
for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
{
__syncthreads();
r[i] = float(_r[t]);
k[i] = float(_k[t]);
__syncthreads();
const float v = float(_v[t]);
float y = 0;
#pragma unroll
for (int j = 0; j < _N_; j+=4)
{
const float4& r_ = (float4&)(r[j]);
const float4& k_ = (float4&)(k[j]);
const float4& w_ = (float4&)(w[j]);
const float4& u_ = (float4&)(u[j]);
float4& s = (float4&)(state[j]);
float4 x;
x.x = k_.x * v;
x.y = k_.y * v;
x.z = k_.z * v;
x.w = k_.w * v;
y += r_.x * (u_.x * x.x + s.x);
y += r_.y * (u_.y * x.y + s.y);
y += r_.z * (u_.z * x.z + s.z);
y += r_.w * (u_.w * x.w + s.w);
s.x = s.x * w_.x + x.x;
s.y = s.y * w_.y + x.y;
s.z = s.z * w_.z + x.z;
s.w = s.w * w_.w + x.w;
}
_y[t] = F(y);
}
#pragma unroll
for (int j = 0; j < _N_; j++)
_state[j] = state[j];
}
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}

View File

@@ -0,0 +1,34 @@
#include <torch/extension.h>
#include "ATen/ATen.h"
#include <c10/cuda/CUDAGuard.h>
typedef at::BFloat16 bf16;
typedef at::Half fp16;
typedef float fp32;
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y);
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y);
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y);
void forward_bf16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_bf16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
}
void forward_fp16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_fp16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp16>(), k.data_ptr<fp16>(), v.data_ptr<fp16>(), w.data_ptr<float>(), u.data_ptr<fp16>(), y.data_ptr<fp16>());
}
void forward_fp32(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_fp32(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp32>(), k.data_ptr<fp32>(), v.data_ptr<fp32>(), w.data_ptr<float>(), u.data_ptr<fp32>(), y.data_ptr<fp32>());
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward_bf16", &forward_bf16, "rwkv5 forward_bf16");
m.def("forward_fp16", &forward_fp16, "rwkv5 forward_fp16");
m.def("forward_fp32", &forward_fp32, "rwkv5 forward_fp32");
}
TORCH_LIBRARY(rwkv5, m) {
m.def("forward_bf16", forward_bf16);
m.def("forward_fp16", forward_fp16);
m.def("forward_fp32", forward_fp32);
}

87
backend-python/rwkv_pip/cuda/rwkv6.cu vendored Normal file
View File

@@ -0,0 +1,87 @@
#include <stdio.h>
#include <assert.h>
#include "ATen/ATen.h"
typedef at::BFloat16 bf16;
typedef at::Half fp16;
typedef float fp32;
template <typename F>
__global__ void kernel_forward(const int B, const int T, const int C, const int H, float *__restrict__ _state,
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u,
F *__restrict__ const _y)
{
const int b = blockIdx.x / H;
const int h = blockIdx.x % H;
const int i = threadIdx.x;
_u += h*_N_;
_state += h*_N_*_N_ + i*_N_; // wrong if B > 1 !!!
__shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
float state[_N_];
#pragma unroll
for (int j = 0; j < _N_; j++)
state[j] = _state[j];
__syncthreads();
u[i] = float(_u[i]);
__syncthreads();
for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
{
__syncthreads();
w[i] = _w[t];
r[i] = float(_r[t]);
k[i] = float(_k[t]);
__syncthreads();
const float v = float(_v[t]);
float y = 0;
#pragma unroll
for (int j = 0; j < _N_; j+=4)
{
const float4& r_ = (float4&)(r[j]);
const float4& k_ = (float4&)(k[j]);
const float4& w_ = (float4&)(w[j]);
const float4& u_ = (float4&)(u[j]);
float4& s = (float4&)(state[j]);
float4 x;
x.x = k_.x * v;
x.y = k_.y * v;
x.z = k_.z * v;
x.w = k_.w * v;
y += r_.x * (u_.x * x.x + s.x);
y += r_.y * (u_.y * x.y + s.y);
y += r_.z * (u_.z * x.z + s.z);
y += r_.w * (u_.w * x.w + s.w);
s.x = s.x * w_.x + x.x;
s.y = s.y * w_.y + x.y;
s.z = s.z * w_.z + x.z;
s.w = s.w * w_.w + x.w;
}
_y[t] = F(y);
}
#pragma unroll
for (int j = 0; j < _N_; j++)
_state[j] = state[j];
}
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y)
{
assert(H*_N_ == C);
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
}

View File

@@ -0,0 +1,34 @@
#include <torch/extension.h>
#include "ATen/ATen.h"
#include <c10/cuda/CUDAGuard.h>
typedef at::BFloat16 bf16;
typedef at::Half fp16;
typedef float fp32;
void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y);
void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y);
void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y);
void forward_bf16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_bf16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
}
void forward_fp16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_fp16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp16>(), k.data_ptr<fp16>(), v.data_ptr<fp16>(), w.data_ptr<float>(), u.data_ptr<fp16>(), y.data_ptr<fp16>());
}
void forward_fp32(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
cuda_forward_fp32(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp32>(), k.data_ptr<fp32>(), v.data_ptr<fp32>(), w.data_ptr<float>(), u.data_ptr<fp32>(), y.data_ptr<fp32>());
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward_bf16", &forward_bf16, "rwkv6 forward_bf16");
m.def("forward_fp16", &forward_fp16, "rwkv6 forward_fp16");
m.def("forward_fp32", &forward_fp32, "rwkv6 forward_fp32");
}
TORCH_LIBRARY(rwkv6, m) {
m.def("forward_bf16", forward_bf16);
m.def("forward_fp16", forward_fp16);
m.def("forward_fp32", forward_fp32);
}

141
backend-python/rwkv_pip/cuda/wrapper.cpp vendored Normal file
View File

@@ -0,0 +1,141 @@
#include <torch/extension.h>
#include "ATen/ATen.h"
#include <iostream>
#include <c10/cuda/CUDAGuard.h>
typedef at::Half fp16;
template <typename F>
void cuda_wkv_forward(int B, int T, int C,
float *w, float *u, F *k, F *v, F *y,
float *aa, float *bb, float *pp);
template <typename F>
void cuda_mm8_seq(int B, int N, int M,
F *x, int x_stride,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
F *y, int y_stride);
template <typename F>
void cuda_mm8_one(int N, int M,
F *x,
uint8_t *w, int w_stride,
F *mx, F *rx,
F *my, F *ry,
float *y);
void wkv_forward(int64_t B, int64_t T, int64_t C,
torch::Tensor &w, torch::Tensor &u,
torch::Tensor &k, torch::Tensor &v, torch::Tensor &y,
torch::Tensor &aa, torch::Tensor &bb, torch::Tensor &pp) {
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (k.scalar_type()) {
case c10::ScalarType::Half:
cuda_wkv_forward(B, T, C,
w.data_ptr<float>(), u.data_ptr<float>(),
k.data_ptr<fp16>(), v.data_ptr<fp16>(), y.data_ptr<fp16>(),
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
break;
case c10::ScalarType::Float:
cuda_wkv_forward(B, T, C,
w.data_ptr<float>(), u.data_ptr<float>(),
k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>(),
aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
void mm8_seq(int64_t B, int64_t N, int64_t M,
torch::Tensor &x, torch::Tensor &w,
torch::Tensor &mx, torch::Tensor &rx,
torch::Tensor &my, torch::Tensor &ry,
torch::Tensor &y) {
assert(x.stride(1) == 1);
assert(w.stride(1) == 1);
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
assert(my.stride(0) == 1 && ry.stride(0) == 1);
assert(y.stride(1) == 1);
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (x.scalar_type()) {
case c10::ScalarType::Half:
cuda_mm8_seq(
B, N, M,
x.data_ptr<fp16>(), x.stride(0),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
y.data_ptr<fp16>(), y.stride(0));
break;
case c10::ScalarType::Float:
cuda_mm8_seq(
B, N, M,
x.data_ptr<float>(), x.stride(0),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<float>(), rx.data_ptr<float>(),
my.data_ptr<float>(), ry.data_ptr<float>(),
y.data_ptr<float>(), y.stride(0));
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
void mm8_one(int64_t N, int64_t M,
torch::Tensor &x, torch::Tensor &w,
torch::Tensor &mx, torch::Tensor &rx,
torch::Tensor &my, torch::Tensor &ry,
torch::Tensor &y) {
assert(x.stride(0) == 1);
assert(w.stride(1) == 1);
assert(mx.stride(0) == 1 && rx.stride(0) == 1);
assert(my.stride(0) == 1 && ry.stride(0) == 1);
assert(y.stride(0) == 1);
const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
switch (x.scalar_type()) {
case c10::ScalarType::Half:
cuda_mm8_one(
N, M,
x.data_ptr<fp16>(),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
y.data_ptr<float>());
break;
case c10::ScalarType::Float:
cuda_mm8_one(
N, M,
x.data_ptr<float>(),
w.data_ptr<uint8_t>(), w.stride(0),
mx.data_ptr<float>(), rx.data_ptr<float>(),
my.data_ptr<float>(), ry.data_ptr<float>(),
y.data_ptr<float>());
break;
default:
assert(false && "Only FP16 and FP32 are currently supported");
}
}
using torch::Tensor;
#ifndef DISABLE_CUBLAS_GEMM
void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
#endif
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("wkv_forward", &wkv_forward, "wkv forward");
m.def("mm8_seq", &mm8_seq, "mm8 seq");
m.def("mm8_one", &mm8_one, "mm8 one");
#ifndef DISABLE_CUBLAS_GEMM
m.def("gemm_fp16_cublas", &gemm_fp16_cublas, "gemv fp16 cublas");
#endif
}
TORCH_LIBRARY(rwkv, m) {
m.def("wkv_forward", wkv_forward);
m.def("mm8_seq", mm8_seq);
m.def("mm8_one", mm8_one);
#ifndef DISABLE_CUBLAS_GEMM
m.def("gemm_fp16_cublas", gemm_fp16_cublas);
#endif
}

2480
backend-python/rwkv_pip/model.py vendored Normal file

File diff suppressed because it is too large Load Diff

BIN
backend-python/rwkv_pip/rwkv5.pyd vendored Normal file

Binary file not shown.

BIN
backend-python/rwkv_pip/rwkv6.pyd vendored Normal file

Binary file not shown.

View File

@@ -0,0 +1,106 @@
########################################################################################################
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
########################################################################################################
class TRIE:
__slots__ = tuple("ch,to,values,front".split(","))
to: list
values: set
def __init__(self, front=None, ch=None):
self.ch = ch
self.to = [None for ch in range(256)]
self.values = set()
self.front = front
def __repr__(self):
fr = self
ret = []
while fr != None:
if fr.ch != None:
ret.append(fr.ch)
fr = fr.front
return "<TRIE %s %s>" % (ret[::-1], self.values)
def add(self, key: bytes, idx: int = 0, val=None):
if idx == len(key):
if val is None:
val = key
self.values.add(val)
return self
ch = key[idx]
if self.to[ch] is None:
self.to[ch] = TRIE(front=self, ch=ch)
return self.to[ch].add(key, idx=idx + 1, val=val)
def find_longest(self, key: bytes, idx: int = 0):
u: TRIE = self
ch: int = key[idx]
while u.to[ch] is not None:
u = u.to[ch]
idx += 1
if u.values:
ret = idx, u, u.values
if idx == len(key):
break
ch = key[idx]
return ret
class TRIE_TOKENIZER:
def __init__(self, file_name):
self.idx2token = {}
sorted = [] # must be already sorted
with open(file_name, "r", encoding="utf-8") as f:
lines = f.readlines()
for l in lines:
idx = int(l[: l.index(" ")])
x = eval(l[l.index(" ") : l.rindex(" ")])
x = x.encode("utf-8") if isinstance(x, str) else x
assert isinstance(x, bytes)
assert len(x) == int(l[l.rindex(" ") :])
sorted += [x]
self.idx2token[idx] = x
self.token2idx = {}
for k, v in self.idx2token.items():
self.token2idx[v] = int(k)
self.root = TRIE()
for t, i in self.token2idx.items():
_ = self.root.add(t, val=(t, i))
def encodeBytes(self, src: bytes):
idx: int = 0
tokens = []
while idx < len(src):
_idx: int = idx
idx, _, values = self.root.find_longest(src, idx)
assert idx != _idx
_, token = next(iter(values))
tokens.append(token)
return tokens
def decodeBytes(self, tokens):
return b"".join(map(lambda i: self.idx2token[i], tokens))
def encode(self, src):
return self.encodeBytes(src.encode("utf-8"))
def decode(self, tokens):
try:
return self.decodeBytes(tokens).decode("utf-8")
except:
return "\ufffd" # bad utf-8
def printTokens(self, tokens):
for i in tokens:
s = self.idx2token[i]
try:
s = s.decode("utf-8")
except:
pass
print(f"{repr(s)}{i}", end=" ")
print()

File diff suppressed because it is too large Load Diff

20144
backend-python/rwkv_pip/tokenizer-midi.json vendored Normal file

File diff suppressed because it is too large Load Diff

154
backend-python/rwkv_pip/utils.py vendored Normal file
View File

@@ -0,0 +1,154 @@
########################################################################################################
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
########################################################################################################
import os, sys
import numpy as np
import torch
from torch.nn import functional as F
class PIPELINE_ARGS:
def __init__(
self,
temperature=1.0,
top_p=0.85,
top_k=0,
alpha_frequency=0.2,
alpha_presence=0.2,
alpha_decay=0.996,
token_ban=[],
token_stop=[],
chunk_len=256,
):
self.temperature = temperature
self.top_p = top_p
self.top_k = top_k
self.alpha_frequency = alpha_frequency # Frequency Penalty (as in GPT-3)
self.alpha_presence = alpha_presence # Presence Penalty (as in GPT-3)
self.alpha_decay = alpha_decay # gradually decay the penalty
self.token_ban = token_ban # ban the generation of some tokens
self.token_stop = token_stop # stop generation whenever you see any token here
self.chunk_len = (
chunk_len # split input into chunks to save VRAM (shorter -> slower)
)
class PIPELINE:
def __init__(self, model, WORD_NAME: str):
self.model = model
if WORD_NAME == "cl100k_base":
import tiktoken
self.tokenizer = tiktoken.get_encoding(WORD_NAME)
elif WORD_NAME == "rwkv_vocab_v20230424":
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from rwkv_tokenizer import TRIE_TOKENIZER
self.tokenizer = TRIE_TOKENIZER(
os.path.dirname(os.path.abspath(__file__)) + "/rwkv_vocab_v20230424.txt"
)
else:
if WORD_NAME.endswith(".txt"):
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from rwkv_tokenizer import TRIE_TOKENIZER
self.tokenizer = TRIE_TOKENIZER(WORD_NAME)
else:
from tokenizers import Tokenizer
self.tokenizer = Tokenizer.from_file(WORD_NAME)
def refine_context(self, context):
context = context.strip().split("\n")
for c in range(len(context)):
context[c] = context[c].strip().strip("\u3000").strip("\r")
context = list(filter(lambda c: c != "", context))
context = "\n" + ("\n".join(context)).strip()
if context == "":
context = "\n"
return context
def encode(self, x):
if "Tokenizer" in str(type(self.tokenizer)):
return self.tokenizer.encode(x).ids
else:
return self.tokenizer.encode(x)
def decode(self, x):
return self.tokenizer.decode(x)
def sample_logits(self, logits, temperature=1.0, top_p=0.85, top_k=0):
probs = F.softmax(logits.float(), dim=-1)
top_k = int(top_k)
# 'privateuseone' is the type of custom devices like `torch_directml.device()`
if probs.device.type in ["cpu", "privateuseone"]:
probs = probs.cpu().numpy()
sorted_ids = np.argsort(probs)
sorted_probs = probs[sorted_ids][::-1]
cumulative_probs = np.cumsum(sorted_probs)
cutoff = float(sorted_probs[np.argmax(cumulative_probs >= top_p)])
probs[probs < cutoff] = 0
if top_k < len(probs) and top_k > 0:
probs[sorted_ids[:-top_k]] = 0
if temperature != 1.0:
probs = probs ** (1.0 / temperature)
probs = probs / np.sum(probs)
out = np.random.choice(a=len(probs), p=probs)
return int(out)
else:
sorted_ids = torch.argsort(probs)
sorted_probs = probs[sorted_ids]
sorted_probs = torch.flip(sorted_probs, dims=(0,))
cumulative_probs = torch.cumsum(sorted_probs, dim=-1).cpu().numpy()
cutoff = float(sorted_probs[np.argmax(cumulative_probs >= top_p)])
probs[probs < cutoff] = 0
if top_k < len(probs) and top_k > 0:
probs[sorted_ids[:-top_k]] = 0
if temperature != 1.0:
probs = probs ** (1.0 / temperature)
out = torch.multinomial(probs, num_samples=1)[0]
return int(out)
def generate(
self, ctx, token_count=100, args=PIPELINE_ARGS(), callback=None, state=None
):
all_tokens = []
out_last = 0
out_str = ""
occurrence = {}
for i in range(token_count):
# forward & adjust prob.
tokens = self.encode(ctx) if i == 0 else [token]
while len(tokens) > 0:
out, state = self.model.forward(tokens[: args.chunk_len], state)
tokens = tokens[args.chunk_len :]
for n in args.token_ban:
out[n] = -float("inf")
for n in occurrence:
out[n] -= args.alpha_presence + occurrence[n] * args.alpha_frequency
# sampler
token = self.sample_logits(
out, temperature=args.temperature, top_p=args.top_p, top_k=args.top_k
)
if token in args.token_stop:
break
all_tokens += [token]
for xxx in occurrence:
occurrence[xxx] *= args.alpha_decay
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
# print(occurrence) # debug
# output
tmp = self.decode(all_tokens[out_last:])
if "\ufffd" not in tmp: # is valid utf-8 string?
if callback:
callback(tmp)
out_str += tmp
out_last = i + 1
return out_str

BIN
backend-python/rwkv_pip/wkv_cuda.pyd vendored Normal file

Binary file not shown.

View File

@@ -0,0 +1,49 @@
import json
import logging
from typing import Any
from fastapi import Request
from pydantic import BaseModel
from enum import Enum
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(levelname)s\n%(message)s")
fh = logging.handlers.RotatingFileHandler(
"api.log", mode="a", maxBytes=3 * 1024 * 1024, backupCount=3, encoding="utf-8"
)
fh.setFormatter(formatter)
logger.addHandler(fh)
class ClsEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, BaseModel):
return obj.dict()
if isinstance(obj, Enum):
return obj.value
return super().default(obj)
def quick_log(request: Request, body: Any, response: str):
try:
logger.info(
f"Client: {request.client if request else ''}\nUrl: {request.url if request else ''}\n"
+ (
f"Body: {json.dumps(body.__dict__, ensure_ascii=False, cls=ClsEncoder)}\n"
if body
else ""
)
+ (f"Data:\n{response}\n" if response else "")
)
except Exception as e:
logger.info(f"Error quick_log request:\n{e}")
async def log_middleware(request: Request):
try:
logger.info(
f"Client: {request.client}\nUrl: {request.url}\nBody: {await request.body()}\n"
)
except Exception as e:
logger.info(f"Error log_middleware request:\n{e}")

685
backend-python/utils/midi.py vendored Normal file
View File

@@ -0,0 +1,685 @@
# https://github.com/briansemrau/MIDI-LLM-tokenizer
# MIT License
# Copyright (c) 2023 Brian Semrau
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import random
from dataclasses import dataclass
from functools import lru_cache
from math import ceil, floor, log
from typing import Dict, Iterator, List, Optional, Tuple
import mido
@dataclass
class VocabConfig:
# Number of note events. Should be 128.
note_events: int
# Number of wait events. Configurable, must evenly divide max_wait_time.
wait_events: int
# Max wait time in milliseconds to be represented by a single token.
max_wait_time: int
# Number of velocity events. Should be 128 (or 100? need to check midi standard)
velocity_events: int
# Number of bins to quantize velocity into. Should evenly divide velocity_events.
velocity_bins: int
# Exponential scaling factor for velocity bin sizes. 1.0 = linear scaling.
velocity_exp: float
# Whether to sort tokens by instrument, note. This should improve data reducibility.
do_token_sorting: bool
# Whether tokens should be represented as combined instrument/note/velocity tokens, or separate tokens for each.
unrolled_tokens: bool
# If non-zero, notes held for this many seconds will be automatically released during str->midi decoding.
decode_end_held_note_delay: float
# If true, repeated notes will be automatically released before playing again during str->midi decoding.
decode_fix_repeated_notes: bool
# List of instrument names to use for binning. Must have at most 16 values.
bin_instrument_names: List[str]
# Indicates which bin name represents percussion instruments on MIDI channel 10.
ch10_instrument_bin_name: str
# Mapping from instrument name to bin name.
program_name_to_bin_name: Dict[str, str]
# Mapping from bin name to program name.
bin_name_to_program_name: Dict[str, str]
# Mapping from program number to instrument name.
instrument_names: Dict[str, str]
def __post_init__(self):
self.validate()
self._instrument_names_str_to_int = {
name: int(i) for i, name in self.instrument_names.items()
}
self._instrument_names_int_to_str = {
int(i): name for i, name in self.instrument_names.items()
}
self._bin_str_to_int = {
name: int(i) for i, name in enumerate(self.bin_instrument_names)
}
self._bin_int_to_instrument_int = [
self._instrument_names_str_to_int[self.bin_name_to_program_name[name]]
if name != self.ch10_instrument_bin_name
else 0
for name in self.bin_instrument_names
]
self._instrument_int_to_bin_int = [
self._bin_str_to_int[self.program_name_to_bin_name[instr]]
if self.program_name_to_bin_name[instr] != ""
else -1
for instr in self.program_name_to_bin_name.keys()
]
self._ch10_bin_int = (
self._bin_str_to_int[self.ch10_instrument_bin_name]
if self.ch10_instrument_bin_name
else -1
)
self.short_instr_bin_names = []
for instr in self.bin_instrument_names:
i = min(1, len(instr))
while instr[:i] in self.short_instr_bin_names:
i += 1
self.short_instr_bin_names.append(instr[:i])
self._short_instrument_names_str_to_int = {
name: int(i) for i, name in enumerate(self.short_instr_bin_names)
}
range_excluding_ch10 = [
(i if i < 9 else i + 1) for i in range(len(self.bin_instrument_names))
]
bins_excluding_ch10 = [
n for n in self.bin_instrument_names if n != self.ch10_instrument_bin_name
]
self.bin_channel_map = {
bin: channel
for channel, bin in zip(range_excluding_ch10, bins_excluding_ch10)
}
if self.ch10_instrument_bin_name:
self.bin_channel_map[self.ch10_instrument_bin_name] = 9
def validate(self):
if self.max_wait_time % self.wait_events != 0:
raise ValueError("max_wait_time must be exactly divisible by wait_events")
if self.velocity_bins < 2:
raise ValueError("velocity_bins must be at least 2")
if len(self.bin_instrument_names) > 16:
raise ValueError("bin_instruments must have at most 16 values")
if (
self.ch10_instrument_bin_name
and self.ch10_instrument_bin_name not in self.bin_instrument_names
):
raise ValueError("ch10_instrument_bin_name must be in bin_instruments")
if self.velocity_exp <= 0:
raise ValueError("velocity_exp must be greater than 0")
@classmethod
def from_json(cls, path: str):
with open(path, "r") as f:
config = json.load(f)
return cls(**config)
class VocabUtils:
def __init__(self, cfg: VocabConfig) -> None:
self.cfg = cfg
@lru_cache(maxsize=128)
def format_wait_token(self, wait: int) -> str:
return f"t{wait}"
@lru_cache(maxsize=128)
def format_note_token(
self, instrument_bin: int, note: int, velocity_bin: int
) -> str:
return f"{self.cfg.short_instr_bin_names[instrument_bin]}:{note:x}:{velocity_bin:x}"
def format_unrolled_note(self, note: int) -> str:
return f"n{note:x}"
def format_unrolled_velocity(self, velocity_bin: int) -> str:
return f"v{velocity_bin:x}"
def format_unrolled_instrument_bin(self, instrument_bin: int) -> str:
return f"i{self.cfg.short_instr_bin_names[instrument_bin]}"
def velocity_to_bin(self, velocity: float) -> int:
velocity = max(0, min(velocity, self.cfg.velocity_events - 1))
binsize = self.cfg.velocity_events / (self.cfg.velocity_bins - 1)
if self.cfg.velocity_exp == 1.0:
return ceil(velocity / binsize)
else:
return ceil(
(
self.cfg.velocity_events
* (
(
self.cfg.velocity_exp
** (velocity / self.cfg.velocity_events)
- 1.0
)
/ (self.cfg.velocity_exp - 1.0)
)
)
/ binsize
)
def bin_to_velocity(self, bin: int) -> int:
binsize = self.cfg.velocity_events / (self.cfg.velocity_bins - 1)
if self.cfg.velocity_exp == 1.0:
return max(0, ceil(bin * binsize - 1))
else:
return max(
0,
ceil(
self.cfg.velocity_events
* log(
((self.cfg.velocity_exp - 1) * binsize * bin)
/ self.cfg.velocity_events
+ 1,
self.cfg.velocity_exp,
)
- 1
),
)
def delta_to_wait_ids(self, delta_ms: float) -> Iterator[int]:
def roundi(f: float):
return ceil(f - 0.5)
max_wait_ms = self.cfg.max_wait_time
div = max_wait_ms / self.cfg.wait_events
# if delta_ms // max_wait_ms > 512: # arbitrary limit to avoid excessive time_shifts
# raise ValueError("delta_time is too large")
if delta_ms > max_wait_ms * 10:
delta_ms = max_wait_ms * 10 # truncate time
for _ in range(floor(delta_ms / max_wait_ms)):
yield roundi(max_wait_ms / div)
leftover_time_shift = roundi((delta_ms % max_wait_ms) / div)
if leftover_time_shift > 0:
yield leftover_time_shift
def prog_data_to_token_data(
self, program: int, channel: int, note: int, velocity: float
) -> Optional[Tuple[int, int, int]]:
if channel == 9:
if self.cfg._ch10_bin_int == -1:
return None
return self.cfg._ch10_bin_int, note, self.velocity_to_bin(velocity)
instrument_bin = self.cfg._instrument_int_to_bin_int[program]
if instrument_bin != -1:
return instrument_bin, note, self.velocity_to_bin(velocity)
return None
def prog_data_list_to_token_data_list(
self, data: List[Tuple[int, int, int, float]]
) -> Iterator[Tuple[int, int, int]]:
for d in data:
token_data = self.prog_data_to_token_data(*d)
if token_data is not None:
yield token_data
def sort_token_data(
self, data: List[Tuple[int, int, int]]
) -> List[Tuple[int, int, int]]:
# ensure order is preserved for tokens with the same instrument, note
data = [(i, n, v, x) for x, (i, n, v) in enumerate(data)]
data.sort(key=lambda x: (x[0] != self.cfg._ch10_bin_int, x[0], x[1], x[3]))
return [(i, n, v) for i, n, v, _ in data]
def data_to_wait_tokens(self, delta_ms: float) -> List[str]:
if delta_ms == 0.0:
return []
return [self.format_wait_token(i) for i in self.delta_to_wait_ids(delta_ms)]
def wait_token_to_delta(self, token: str) -> float:
return self.cfg.max_wait_time / self.cfg.wait_events * int(token[1:])
def note_token_to_data(self, token: str) -> Tuple[int, int, int]:
instr_str, note_str, velocity_str = token.strip().split(":")
instr_bin = self.cfg._short_instrument_names_str_to_int[instr_str]
note = int(note_str, base=16)
velocity = self.bin_to_velocity(int(velocity_str, base=16))
return instr_bin, note, velocity
@dataclass
class AugmentValues:
instrument_bin_remap: Dict[int, int]
velocity_mod_factor: float
transpose_semitones: int
time_stretch_factor: float
@classmethod
def default(cls) -> "AugmentValues":
return cls(
instrument_bin_remap={},
velocity_mod_factor=1.0,
transpose_semitones=0,
time_stretch_factor=1.0,
)
@dataclass
class AugmentConfig:
# The number of times to augment each MIDI file. The dataset size will be multiplied by this number.
augment_data_factor: int
# A list of instrument names to randomly swap with each other.
instrument_mixups: List[List[str]]
# A list of percentages to change the note velocity by. 0.0 = no change. 0 is included by default.
velocity_mod_pct: List[float]
# A list of semitones to transpose by. 0 is included by default.
transpose_semitones: List[int]
# A list of percentages to stretch the tempo by. 0.0 = no stretch. 0 is included by default.
time_stretch_pct: List[float]
# Random seed to use for reproducibility.
seed: int
cfg: VocabConfig
def __post_init__(self):
self.validate()
if len(self.velocity_mod_pct) == 0:
self.velocity_mod_pct = [0.0]
if len(self.transpose_semitones) == 0:
self.transpose_semitones = [0]
if len(self.time_stretch_pct) == 0:
self.time_stretch_pct = [0.0]
self._instrument_mixups_int = [
[self.cfg._bin_str_to_int[i] for i in l if i in self.cfg._bin_str_to_int]
for l in self.instrument_mixups
]
self._instrument_mixups_int = [
l for l in self._instrument_mixups_int if len(l) > 0
] # remove empty lists
self._instrument_pool_assignments = {}
self._mixup_pools = []
for pool_i, mixup_list in enumerate(self._instrument_mixups_int):
pool = set()
for i in mixup_list:
pool.add(i)
self._instrument_pool_assignments[i] = pool_i
self._mixup_pools.append(pool)
def validate(self):
if self.augment_data_factor < 1:
raise ValueError("augment_data_factor must be at least 1")
used_instruments = set()
for mixup_list in self.instrument_mixups:
for n in mixup_list:
if n in used_instruments:
raise ValueError(f"Duplicate instrument name: {n}")
used_instruments.add(n)
@classmethod
def from_json(cls, path: str, cfg: VocabConfig):
with open(path, "r") as f:
config = json.load(f)
config["cfg"] = cfg
if "seed" not in config:
config["seed"] = random.randint(0, 2**32 - 1)
return cls(**config)
def get_augment_values(self, filename: str) -> Iterator[AugmentValues]:
# first yield default values
yield AugmentValues.default()
rng = random.Random(self.seed + hash(filename))
for _ in range(int(self.augment_data_factor - 1)):
# randomize order for each pool
randomized_pools = [list(pool) for pool in self._mixup_pools]
for pool in randomized_pools:
rng.shuffle(pool)
# distribute reassignments
instrument_bin_remap = {}
for i, pool in enumerate(randomized_pools):
for j, instrument in enumerate(pool):
instrument_bin_remap[instrument] = randomized_pools[i - 1][j]
yield AugmentValues(
instrument_bin_remap=instrument_bin_remap,
velocity_mod_factor=1.0 + rng.choice(self.velocity_mod_pct),
transpose_semitones=rng.choice(self.transpose_semitones),
time_stretch_factor=1.0 + rng.choice(self.time_stretch_pct),
)
def mix_volume(velocity: int, volume: int, expression: int) -> float:
return velocity * (volume / 127.0) * (expression / 127.0)
def convert_midi_to_str(
cfg: VocabConfig, mid: mido.MidiFile, augment: AugmentValues = None
) -> str:
utils = VocabUtils(cfg)
if augment is None:
augment = AugmentValues.default()
# filter out unknown meta messages before merge (https://github.com/mido/mido/pull/286)
for i in range(len(mid.tracks)):
mid.tracks[i] = [msg for msg in mid.tracks[i] if msg.type != "unknown_meta"]
if len(mid.tracks) > 1:
mid.tracks = [mido.merge_tracks(mid.tracks)]
delta_time_ms = 0.0
tempo = 500000
channel_program = {i: 0 for i in range(16)}
channel_volume = {i: 127 for i in range(16)}
channel_expression = {
i: 127 for i in range(16)
} # unlikely to be useful. expression usually modifies an already played note.
channel_notes = {i: {} for i in range(16)}
channel_pedal_on = {i: False for i in range(16)}
channel_pedal_events = {
i: {} for i in range(16)
} # {channel: {(note, program) -> True}}
started_flag = False
output = ["<start>"]
token_data_buffer: List[
Tuple[int, int, int, float]
] = [] # need to sort notes between wait tokens
def flush_token_data_buffer():
nonlocal token_data_buffer, output, cfg, utils, augment
token_data = [
x for x in utils.prog_data_list_to_token_data_list(token_data_buffer)
]
if augment.instrument_bin_remap or augment.transpose_semitones:
# TODO put transpose in a real function
raw_transpose = (
lambda bin, n: n + augment.transpose_semitones
if bin != cfg._ch10_bin_int
else n
)
octave_shift_if_oob = (
lambda n: n + 12 if n < 0 else n - 12 if n >= cfg.note_events else n
)
# TODO handle ranges beyond 12
# octave_shift_if_oob = lambda n: 0 if n < 0 else (n - cfg.note_events) % 12 + cfg.note_events if n >= cfg.note_events else n
transpose = lambda bin, n: octave_shift_if_oob(raw_transpose(bin, n))
token_data = [
(augment.instrument_bin_remap.get(i, i), transpose(i, n), v)
for i, n, v in token_data
]
if cfg.do_token_sorting:
token_data = utils.sort_token_data(token_data)
if cfg.unrolled_tokens:
for t in token_data:
output += [
utils.format_unrolled_instrument_bin(t[0]),
utils.format_unrolled_note(t[1]),
utils.format_unrolled_velocity(t[2]),
]
else:
output += [utils.format_note_token(*t) for t in token_data]
token_data_buffer = []
def consume_note_program_data(prog: int, chan: int, note: int, vel: float):
nonlocal output, started_flag, delta_time_ms, cfg, utils, token_data_buffer
is_token_valid = (
utils.prog_data_to_token_data(prog, chan, note, vel) is not None
)
if not is_token_valid:
return
if started_flag:
wait_tokens = utils.data_to_wait_tokens(delta_time_ms)
if len(wait_tokens) > 0:
flush_token_data_buffer()
output += wait_tokens
delta_time_ms = 0.0
token_data_buffer.append((prog, chan, note, vel * augment.velocity_mod_factor))
started_flag = True
for msg in mid.tracks[0]:
time_ms = mido.tick2second(msg.time, mid.ticks_per_beat, tempo) * 1000.0
delta_time_ms += time_ms
t = msg.type
if msg.is_meta:
if t == "set_tempo":
tempo = msg.tempo * augment.time_stretch_factor
continue
def handle_note_off(ch, prog, n):
if channel_pedal_on[ch]:
channel_pedal_events[ch][(n, prog)] = True
else:
consume_note_program_data(prog, ch, n, 0)
if n in channel_notes[ch]:
del channel_notes[ch][n]
if t == "program_change":
channel_program[msg.channel] = msg.program
elif t == "note_on":
if msg.velocity == 0:
handle_note_off(msg.channel, channel_program[msg.channel], msg.note)
else:
if (msg.note, channel_program[msg.channel]) in channel_pedal_events[
msg.channel
]:
del channel_pedal_events[msg.channel][
(msg.note, channel_program[msg.channel])
]
consume_note_program_data(
channel_program[msg.channel],
msg.channel,
msg.note,
mix_volume(
msg.velocity,
channel_volume[msg.channel],
channel_expression[msg.channel],
),
)
channel_notes[msg.channel][msg.note] = True
elif t == "note_off":
handle_note_off(msg.channel, channel_program[msg.channel], msg.note)
elif t == "control_change":
if msg.control == 7 or msg.control == 39: # volume
channel_volume[msg.channel] = msg.value
elif msg.control == 11: # expression
channel_expression[msg.channel] = msg.value
elif msg.control == 64: # sustain pedal
channel_pedal_on[msg.channel] = msg.value >= 64
if not channel_pedal_on[msg.channel]:
for note, program in channel_pedal_events[msg.channel]:
handle_note_off(msg.channel, program, note)
channel_pedal_events[msg.channel] = {}
elif msg.control == 123: # all notes off
for channel in channel_notes.keys():
for note in list(channel_notes[channel]).copy():
handle_note_off(channel, channel_program[channel], note)
else:
pass
flush_token_data_buffer()
output.append("<end>")
return " ".join(output)
def generate_program_change_messages(cfg: VocabConfig):
for bin_name, channel in cfg.bin_channel_map.items():
if channel == 9:
continue
program = cfg._instrument_names_str_to_int[
cfg.bin_name_to_program_name[bin_name]
]
yield mido.Message("program_change", program=program, time=0, channel=channel)
yield mido.Message("program_change", program=0, time=0, channel=9)
@dataclass
class DecodeState:
total_time: float # milliseconds
delta_accum: float # milliseconds
current_bin: int
current_note: int
active_notes: Dict[Tuple[int, int], float] # { (channel, note): time started, ... }
def token_to_midi_message(
utils: VocabUtils, token: str, state: DecodeState, end_token_pause: float = 3.0
) -> Iterator[Tuple[Optional[mido.Message], DecodeState]]:
if state is None:
state = DecodeState(
total_time=0.0,
delta_accum=0.0,
current_bin=utils.cfg._short_instrument_names_str_to_int[
utils.cfg.short_instr_bin_names[0]
],
current_note=0,
active_notes={},
)
token = token.strip()
if not token:
yield None, state
return
if token == "<end>":
d = end_token_pause * 1000.0
state.delta_accum += d
state.total_time += d
if utils.cfg.decode_end_held_note_delay != 0.0:
# end held notes
for (channel, note), start_time in list(state.active_notes.items()).copy():
ticks = int(mido.second2tick(state.delta_accum / 1000.0, 480, 500000))
state.delta_accum = 0.0
del state.active_notes[(channel, note)]
yield mido.Message(
"note_off", note=note, time=ticks, channel=channel
), state
yield None, state
return
if token.startswith("<"):
yield None, state
return
if utils.cfg.unrolled_tokens:
if token[0] == "t":
d = utils.wait_token_to_delta(token)
state.delta_accum += d
state.total_time += d
elif token[0] == "n":
state.current_note = int(token[1:], base=16)
elif token[0] == "i":
state.current_bin = utils.cfg._short_instrument_names_str_to_int[token[1:]]
elif token[0] == "v":
current_velocity = utils.bin_to_velocity(int(token[1:], base=16))
channel = utils.cfg.bin_channel_map[
utils.cfg.bin_instrument_names[state.current_bin]
]
ticks = int(mido.second2tick(state.delta_accum / 1000.0, 480, 500000))
state.delta_accum = 0.0
if current_velocity > 0:
yield mido.Message(
"note_on",
note=state.current_note,
velocity=current_velocity,
time=ticks,
channel=channel,
), state
else:
yield mido.Message(
"note_off",
note=state.current_note,
velocity=0,
time=ticks,
channel=channel,
), state
else:
if token[0] == "t" and token[1].isdigit(): # wait token
d = utils.wait_token_to_delta(token)
state.delta_accum += d
state.total_time += d
if utils.cfg.decode_end_held_note_delay != 0.0:
# remove notes that have been held for too long
for (channel, note), start_time in list(
state.active_notes.items()
).copy():
if (
state.total_time - start_time
> utils.cfg.decode_end_held_note_delay * 1000.0
):
ticks = int(
mido.second2tick(state.delta_accum / 1000.0, 480, 500000)
)
state.delta_accum = 0.0
del state.active_notes[(channel, note)]
yield mido.Message(
"note_off", note=note, time=ticks, channel=channel
), state
return
else: # note token
bin, note, velocity = utils.note_token_to_data(token)
channel = utils.cfg.bin_channel_map[utils.cfg.bin_instrument_names[bin]]
ticks = int(mido.second2tick(state.delta_accum / 1000.0, 480, 500000))
state.delta_accum = 0.0
if velocity > 0:
if utils.cfg.decode_fix_repeated_notes:
if (channel, note) in state.active_notes:
del state.active_notes[(channel, note)]
yield mido.Message(
"note_off", note=note, time=ticks, channel=channel
), state
ticks = 0
state.active_notes[(channel, note)] = state.total_time
yield mido.Message(
"note_on", note=note, velocity=velocity, time=ticks, channel=channel
), state
return
else:
if (channel, note) in state.active_notes:
del state.active_notes[(channel, note)]
yield mido.Message(
"note_off", note=note, time=ticks, channel=channel
), state
return
yield None, state
def str_to_midi_messages(utils: VocabUtils, data: str) -> Iterator[mido.Message]:
state = None
for token in data.split(" "):
for msg, new_state in token_to_midi_message(utils, token, state):
state = new_state
if msg is not None:
yield msg
def convert_str_to_midi(
cfg: VocabConfig, data: str, meta_text: str = "Generated by MIDI-LLM-tokenizer"
) -> mido.MidiFile:
utils = VocabUtils(cfg)
mid = mido.MidiFile()
track = mido.MidiTrack()
mid.tracks.append(track)
tempo = 500000
if meta_text:
track.append(mido.MetaMessage("text", text=meta_text, time=0))
track.append(mido.MetaMessage("set_tempo", tempo=tempo, time=0))
for msg in generate_program_change_messages(cfg):
track.append(msg)
# data = data.replace("<start>", "").replace("<end>", "").replace("<pad>", "").strip()
for msg in str_to_midi_messages(utils, data):
track.append(msg)
track.append(mido.MetaMessage("end_of_track", time=0))
return mid

View File

@@ -0,0 +1,303 @@
{
"note_events": 128,
"wait_events": 125,
"max_wait_time": 1000,
"velocity_events": 128,
"velocity_bins": 12,
"velocity_exp": 0.5,
"do_token_sorting": true,
"unrolled_tokens": false,
"decode_end_held_note_delay": 5.0,
"decode_fix_repeated_notes": true,
"bin_instrument_names": [
"percussion",
"drum",
"tuba",
"marimba",
"bass",
"guitar",
"violin",
"trumpet",
"piano",
"sax",
"flute",
"lead",
"pad"
],
"ch10_instrument_bin_name": "percussion",
"program_name_to_bin_name": {
"Acoustic Grand Piano": "piano",
"Bright Acoustic Piano": "piano",
"Electric Grand Piano": "piano",
"Honky-tonk Piano": "piano",
"Electric Piano 1 (Rhodes Piano)": "piano",
"Electric Piano 2 (Chorused Piano)": "piano",
"Harpsichord": "piano",
"Clavinet": "piano",
"Celesta": "marimba",
"Glockenspiel": "marimba",
"Music Box": "marimba",
"Vibraphone": "marimba",
"Marimba": "marimba",
"Xylophone": "marimba",
"Tubular Bells": "marimba",
"Dulcimer (Santur)": "marimba",
"Drawbar Organ (Hammond)": "marimba",
"Percussive Organ": "piano",
"Rock Organ": "piano",
"Church Organ": "piano",
"Reed Organ": "piano",
"Accordion (French)": "piano",
"Harmonica": "piano",
"Tango Accordion (Band neon)": "piano",
"Acoustic Guitar (nylon)": "guitar",
"Acoustic Guitar (steel)": "guitar",
"Electric Guitar (jazz)": "guitar",
"Electric Guitar (clean)": "guitar",
"Electric Guitar (muted)": "guitar",
"Overdriven Guitar": "guitar",
"Distortion Guitar": "guitar",
"Guitar harmonics": "guitar",
"Acoustic Bass": "bass",
"Electric Bass (fingered)": "bass",
"Electric Bass (picked)": "bass",
"Fretless Bass": "bass",
"Slap Bass 1": "bass",
"Slap Bass 2": "bass",
"Synth Bass 1": "bass",
"Synth Bass 2": "bass",
"Violin": "violin",
"Viola": "violin",
"Cello": "bass",
"Contrabass": "bass",
"Tremolo Strings": "violin",
"Pizzicato Strings": "violin",
"Orchestral Harp": "violin",
"Timpani": "drum",
"String Ensemble 1 (strings)": "violin",
"String Ensemble 2 (slow strings)": "violin",
"SynthStrings 1": "violin",
"SynthStrings 2": "violin",
"Choir Aahs": "violin",
"Voice Oohs": "violin",
"Synth Voice": "violin",
"Orchestra Hit": "",
"Trumpet": "trumpet",
"Trombone": "tuba",
"Tuba": "tuba",
"Muted Trumpet": "trumpet",
"French Horn": "trumpet",
"Brass Section": "trumpet",
"SynthBrass 1": "trumpet",
"SynthBrass 2": "trumpet",
"Soprano Sax": "sax",
"Alto Sax": "sax",
"Tenor Sax": "sax",
"Baritone Sax": "sax",
"Oboe": "sax",
"English Horn": "trumpet",
"Bassoon": "sax",
"Clarinet": "sax",
"Piccolo": "flute",
"Flute": "flute",
"Recorder": "flute",
"Pan Flute": "flute",
"Blown Bottle": "flute",
"Shakuhachi": "flute",
"Whistle": "flute",
"Ocarina": "flute",
"Lead 1 (square wave)": "lead",
"Lead 2 (sawtooth wave)": "lead",
"Lead 3 (calliope)": "lead",
"Lead 4 (chiffer)": "lead",
"Lead 5 (charang)": "lead",
"Lead 6 (voice solo)": "violin",
"Lead 7 (fifths)": "lead",
"Lead 8 (bass + lead)": "lead",
"Pad 1 (new age Fantasia)": "pad",
"Pad 2 (warm)": "pad",
"Pad 3 (polysynth)": "pad",
"Pad 4 (choir space voice)": "violin",
"Pad 5 (bowed glass)": "pad",
"Pad 6 (metallic pro)": "pad",
"Pad 7 (halo)": "pad",
"Pad 8 (sweep)": "pad",
"FX 1 (rain)": "",
"FX 2 (soundtrack)": "",
"FX 3 (crystal)": "",
"FX 4 (atmosphere)": "",
"FX 5 (brightness)": "",
"FX 6 (goblins)": "",
"FX 7 (echoes, drops)": "",
"FX 8 (sci-fi, star theme)": "",
"Sitar": "guitar",
"Banjo": "guitar",
"Shamisen": "guitar",
"Koto": "guitar",
"Kalimba": "guitar",
"Bag pipe": "sax",
"Fiddle": "violin",
"Shanai": "sax",
"Tinkle Bell": "marimba",
"Agogo": "marimba",
"Steel Drums": "marimba",
"Woodblock": "marimba",
"Taiko Drum": "drum",
"Melodic Tom": "drum",
"Synth Drum": "drum",
"Reverse Cymbal": "",
"Guitar Fret Noise": "",
"Breath Noise": "",
"Seashore": "",
"Bird Tweet": "",
"Telephone Ring": "",
"Helicopter": "",
"Applause": "",
"Gunshot": ""
},
"bin_name_to_program_name": {
"piano": "Acoustic Grand Piano",
"marimba": "Marimba",
"drum": "Synth Drum",
"guitar": "Acoustic Guitar (steel)",
"bass": "Acoustic Bass",
"violin": "Violin",
"percussion": "",
"trumpet": "Trumpet",
"tuba": "Tuba",
"sax": "Tenor Sax",
"flute": "Flute",
"lead": "Lead 1 (square wave)",
"pad": "Pad 1 (new age Fantasia)"
},
"instrument_names": {
"0": "Acoustic Grand Piano",
"1": "Bright Acoustic Piano",
"2": "Electric Grand Piano",
"3": "Honky-tonk Piano",
"4": "Electric Piano 1 (Rhodes Piano)",
"5": "Electric Piano 2 (Chorused Piano)",
"6": "Harpsichord",
"7": "Clavinet",
"8": "Celesta",
"9": "Glockenspiel",
"10": "Music Box",
"11": "Vibraphone",
"12": "Marimba",
"13": "Xylophone",
"14": "Tubular Bells",
"15": "Dulcimer (Santur)",
"16": "Drawbar Organ (Hammond)",
"17": "Percussive Organ",
"18": "Rock Organ",
"19": "Church Organ",
"20": "Reed Organ",
"21": "Accordion (French)",
"22": "Harmonica",
"23": "Tango Accordion (Band neon)",
"24": "Acoustic Guitar (nylon)",
"25": "Acoustic Guitar (steel)",
"26": "Electric Guitar (jazz)",
"27": "Electric Guitar (clean)",
"28": "Electric Guitar (muted)",
"29": "Overdriven Guitar",
"30": "Distortion Guitar",
"31": "Guitar harmonics",
"32": "Acoustic Bass",
"33": "Electric Bass (fingered)",
"34": "Electric Bass (picked)",
"35": "Fretless Bass",
"36": "Slap Bass 1",
"37": "Slap Bass 2",
"38": "Synth Bass 1",
"39": "Synth Bass 2",
"40": "Violin",
"41": "Viola",
"42": "Cello",
"43": "Contrabass",
"44": "Tremolo Strings",
"45": "Pizzicato Strings",
"46": "Orchestral Harp",
"47": "Timpani",
"48": "String Ensemble 1 (strings)",
"49": "String Ensemble 2 (slow strings)",
"50": "SynthStrings 1",
"51": "SynthStrings 2",
"52": "Choir Aahs",
"53": "Voice Oohs",
"54": "Synth Voice",
"55": "Orchestra Hit",
"56": "Trumpet",
"57": "Trombone",
"58": "Tuba",
"59": "Muted Trumpet",
"60": "French Horn",
"61": "Brass Section",
"62": "SynthBrass 1",
"63": "SynthBrass 2",
"64": "Soprano Sax",
"65": "Alto Sax",
"66": "Tenor Sax",
"67": "Baritone Sax",
"68": "Oboe",
"69": "English Horn",
"70": "Bassoon",
"71": "Clarinet",
"72": "Piccolo",
"73": "Flute",
"74": "Recorder",
"75": "Pan Flute",
"76": "Blown Bottle",
"77": "Shakuhachi",
"78": "Whistle",
"79": "Ocarina",
"80": "Lead 1 (square wave)",
"81": "Lead 2 (sawtooth wave)",
"82": "Lead 3 (calliope)",
"83": "Lead 4 (chiffer)",
"84": "Lead 5 (charang)",
"85": "Lead 6 (voice solo)",
"86": "Lead 7 (fifths)",
"87": "Lead 8 (bass + lead)",
"88": "Pad 1 (new age Fantasia)",
"89": "Pad 2 (warm)",
"90": "Pad 3 (polysynth)",
"91": "Pad 4 (choir space voice)",
"92": "Pad 5 (bowed glass)",
"93": "Pad 6 (metallic pro)",
"94": "Pad 7 (halo)",
"95": "Pad 8 (sweep)",
"96": "FX 1 (rain)",
"97": "FX 2 (soundtrack)",
"98": "FX 3 (crystal)",
"99": "FX 4 (atmosphere)",
"100": "FX 5 (brightness)",
"101": "FX 6 (goblins)",
"102": "FX 7 (echoes, drops)",
"103": "FX 8 (sci-fi, star theme)",
"104": "Sitar",
"105": "Banjo",
"106": "Shamisen",
"107": "Koto",
"108": "Kalimba",
"109": "Bag pipe",
"110": "Fiddle",
"111": "Shanai",
"112": "Tinkle Bell",
"113": "Agogo",
"114": "Steel Drums",
"115": "Woodblock",
"116": "Taiko Drum",
"117": "Melodic Tom",
"118": "Synth Drum",
"119": "Reverse Cymbal",
"120": "Guitar Fret Noise",
"121": "Breath Noise",
"122": "Seashore",
"123": "Bird Tweet",
"124": "Telephone Ring",
"125": "Helicopter",
"126": "Applause",
"127": "Gunshot"
}
}

View File

@@ -1,11 +1,13 @@
import os
import sys
import global_var
def ngrok_connect():
from pyngrok import ngrok, conf
conf.set_default(conf.PyngrokConfig(ngrok_path="./ngrok"))
conf.set_default(
conf.PyngrokConfig(ngrok_path="./ngrok.exe" if os.name == "nt" else "./ngrok")
)
ngrok.set_auth_token(os.environ["ngrok_token"])
http_tunnel = ngrok.connect(8000 if len(sys.argv) == 1 else int(sys.argv[1]))
print(http_tunnel.public_url)
http_tunnel = ngrok.connect(global_var.get(global_var.Args).port)
print(f"ngrok url: {http_tunnel.public_url}")

View File

@@ -1,32 +1,589 @@
from abc import ABC, abstractmethod
from enum import Enum, auto
import os
import pathlib
from typing import Dict
from langchain.llms import RWKV
from pydantic import BaseModel
import copy
import re
from typing import Dict, Iterable, List, Tuple, Union, Type
from utils.log import quick_log
from fastapi import HTTPException
from pydantic import BaseModel, Field
import numpy as np
from routes import state_cache
import global_var
END_OF_TEXT = 0
END_OF_LINE_DOUBLE = 535
os.environ["TORCH_EXTENSIONS_DIR"] = f"{pathlib.Path(__file__).parent.parent.resolve()}"
class RWKVType(Enum):
NoneType = auto()
Raven = auto()
World = auto()
Music = auto()
class AbstractRWKV(ABC):
def __init__(self, model, pipeline):
self.name = "rwkv"
self.model = model
self.pipeline = pipeline
self.model_state = None
self.model_tokens = []
self.rwkv_type: RWKVType = RWKVType.NoneType
self.tokenizer_len = len(model.w["emb.weight"])
self.max_tokens_per_generation = 500
self.temperature = 1
self.top_p = 0.3
self.top_k = 0
self.penalty_alpha_presence = 0
self.penalty_alpha_frequency = 1
@abstractmethod
def adjust_occurrence(self, occurrence: Dict, token: int):
pass
@abstractmethod
def adjust_forward_logits(self, logits: List[float], occurrence: Dict, i: int):
pass
# Model only saw '\n\n' as [187, 187] before, but the tokenizer outputs [535] for it at the end
@abstractmethod
def fix_tokens(self, tokens) -> List[int]:
pass
@abstractmethod
def run_rnn(
self, _tokens: List[str], newline_adj: int = 0
) -> Tuple[List[float], int]:
pass
@abstractmethod
def delta_postprocess(self, delta: str) -> str:
pass
def get_embedding(self, input: str, fast_mode: bool) -> Tuple[List[float], int]:
if fast_mode:
embedding, token_len = self.__fast_embedding(
self.fix_tokens(self.pipeline.encode(input)), None
)
else:
self.model_state = None
self.model_tokens = []
_, token_len = self.run_rnn(self.fix_tokens(self.pipeline.encode(input)))
embedding = self.model_state[-11].tolist()
embedding = (embedding / np.linalg.norm(embedding)).tolist()
return embedding, token_len
def __fast_embedding(self, tokens: List[str], state):
import torch
tokens = [int(x) for x in tokens]
token_len = len(tokens)
self = self.model
with torch.no_grad():
w = self.w
args = self.args
if state == None:
state = [None] * args.n_layer * 5
for i in range(
args.n_layer
): # state: 0=att_xx 1=att_aa 2=att_bb 3=att_pp 4=ffn_xx
dd = self.strategy[i]
dev = dd.device
atype = dd.atype
state[i * 5 + 0] = torch.zeros(
args.n_embd, dtype=atype, requires_grad=False, device=dev
).contiguous()
state[i * 5 + 1] = torch.zeros(
args.n_embd, dtype=torch.float, requires_grad=False, device=dev
).contiguous()
state[i * 5 + 2] = torch.zeros(
args.n_embd, dtype=torch.float, requires_grad=False, device=dev
).contiguous()
state[i * 5 + 3] = (
torch.zeros(
args.n_embd,
dtype=torch.float,
requires_grad=False,
device=dev,
).contiguous()
- 1e30
)
state[i * 5 + 4] = torch.zeros(
args.n_embd, dtype=atype, requires_grad=False, device=dev
).contiguous()
break
seq_mode = len(tokens) > 1
x = w["emb.weight"][tokens if seq_mode else tokens[0]]
for i in range(args.n_layer):
bbb = f"blocks.{i}."
att = f"blocks.{i}.att."
ffn = f"blocks.{i}.ffn."
dd = self.strategy[i]
dev = dd.device
atype = dd.atype
wtype = dd.wtype
if seq_mode:
if "cuda" in str(dev) and os.environ["RWKV_CUDA_ON"] == "1":
ATT = (
self.cuda_att_seq
if wtype != torch.uint8
else self.cuda_att_seq_i8
)
else:
ATT = self.att_seq if wtype != torch.uint8 else self.att_seq_i8
FFN = self.ffn_seq if wtype != torch.uint8 else self.ffn_seq_i8
else:
ATT = self.att_one if wtype != torch.uint8 else self.att_one_i8
FFN = self.ffn_one if wtype != torch.uint8 else self.ffn_one_i8
x = x.to(dtype=atype, device=dev)
kw = w[f"{att}key.weight"]
vw = w[f"{att}value.weight"]
rw = w[f"{att}receptance.weight"]
ow = w[f"{att}output.weight"]
if dd.stream:
kw = kw.to(device=dev, non_blocking=True)
vw = vw.to(device=dev, non_blocking=True)
rw = rw.to(device=dev, non_blocking=True)
ow = ow.to(device=dev, non_blocking=True)
kmx = w[f"{att}key.weight_mx"] if wtype == torch.uint8 else x
krx = w[f"{att}key.weight_rx"] if wtype == torch.uint8 else x
kmy = w[f"{att}key.weight_my"] if wtype == torch.uint8 else x
kry = w[f"{att}key.weight_ry"] if wtype == torch.uint8 else x
vmx = w[f"{att}value.weight_mx"] if wtype == torch.uint8 else x
vrx = w[f"{att}value.weight_rx"] if wtype == torch.uint8 else x
vmy = w[f"{att}value.weight_my"] if wtype == torch.uint8 else x
vry = w[f"{att}value.weight_ry"] if wtype == torch.uint8 else x
rmx = w[f"{att}receptance.weight_mx"] if wtype == torch.uint8 else x
rrx = w[f"{att}receptance.weight_rx"] if wtype == torch.uint8 else x
rmy = w[f"{att}receptance.weight_my"] if wtype == torch.uint8 else x
rry = w[f"{att}receptance.weight_ry"] if wtype == torch.uint8 else x
omx = w[f"{att}output.weight_mx"] if wtype == torch.uint8 else x
orx = w[f"{att}output.weight_rx"] if wtype == torch.uint8 else x
omy = w[f"{att}output.weight_my"] if wtype == torch.uint8 else x
ory = w[f"{att}output.weight_ry"] if wtype == torch.uint8 else x
(
x,
state[i * 5 + 0],
state[i * 5 + 1],
state[i * 5 + 2],
state[i * 5 + 3],
) = ATT(
x,
state[i * 5 + 0],
state[i * 5 + 1],
state[i * 5 + 2],
state[i * 5 + 3],
w[f"{bbb}ln1.weight"],
w[f"{bbb}ln1.bias"],
w[f"{att}time_mix_k"],
w[f"{att}time_mix_v"],
w[f"{att}time_mix_r"],
w[f"{att}time_decay"],
w[f"{att}time_first"],
kw,
vw,
rw,
ow,
kmx,
krx,
kmy,
kry,
vmx,
vrx,
vmy,
vry,
rmx,
rrx,
rmy,
rry,
omx,
orx,
omy,
ory,
)
return state[0].tolist(), token_len
def generate(
self, prompt: str, stop: Union[str, List[str], None] = None
) -> Iterable[Tuple[str, str, int, int]]:
quick_log(None, None, "Generation Prompt:\n" + prompt)
cache = None
delta_prompt = prompt
try:
cache = state_cache.longest_prefix_state(
state_cache.LongestPrefixStateBody(prompt=prompt), None
)
except HTTPException:
pass
if cache is None or cache["prompt"] == "":
self.model_state = None
self.model_tokens = []
else:
delta_prompt = prompt[len(cache["prompt"]) :]
self.model_state = copy.deepcopy(cache["state"])
self.model_tokens = copy.deepcopy(cache["tokens"])
logits = copy.deepcopy(cache["logits"])
prompt_token_len = 0
if delta_prompt != "":
logits, prompt_token_len = self.run_rnn(
self.fix_tokens(self.pipeline.encode(delta_prompt))
)
try:
state_cache.add_state(
state_cache.AddStateBody(
prompt=prompt,
tokens=self.model_tokens,
state=self.model_state,
logits=logits,
)
)
except HTTPException:
pass
begin = len(self.model_tokens)
out_last = begin
occurrence: Dict = {}
completion_token_len = 0
response = ""
for i in range(self.max_tokens_per_generation):
self.adjust_forward_logits(logits, occurrence, i)
token = self.pipeline.sample_logits(
logits, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k
)
if token == END_OF_TEXT:
yield response, "", prompt_token_len, completion_token_len
break
self.adjust_occurrence(occurrence, token)
logits, _ = self.run_rnn([token])
completion_token_len = completion_token_len + 1
delta: str = self.delta_postprocess(
self.pipeline.decode(self.model_tokens[out_last:])
)
if "\ufffd" not in delta: # avoid utf-8 display issues
response += delta
if stop is not None:
if type(stop) == str:
if stop in response:
try:
state_cache.add_state(
state_cache.AddStateBody(
prompt=prompt + response,
tokens=self.model_tokens,
state=self.model_state,
logits=logits,
)
)
except HTTPException:
pass
response = response.split(stop)[0]
yield response, "", prompt_token_len, completion_token_len
break
elif type(stop) == list:
stop_exist_regex = "|".join(stop)
matched = re.search(stop_exist_regex, response)
if matched:
try:
state_cache.add_state(
state_cache.AddStateBody(
prompt=prompt + response,
tokens=self.model_tokens,
state=self.model_state,
logits=logits,
)
)
except HTTPException:
pass
response = response.split(matched.group())[0]
yield response, "", prompt_token_len, completion_token_len
break
out_last = begin + i + 1
if i == self.max_tokens_per_generation - 1:
try:
state_cache.add_state(
state_cache.AddStateBody(
prompt=prompt + response,
tokens=self.model_tokens,
state=self.model_state,
logits=logits,
)
)
except HTTPException:
pass
yield response, delta, prompt_token_len, completion_token_len
class TextRWKV(AbstractRWKV):
def __init__(self, model, pipeline) -> None:
super().__init__(model, pipeline)
self.CHUNK_LEN = 256
self.max_tokens_per_generation = 500
self.temperature = 1
self.top_p = 0.3
self.top_k = 0
self.penalty_alpha_presence = 0
self.penalty_alpha_frequency = 1
self.interface = ":"
if self.tokenizer_len < 65536:
self.rwkv_type = RWKVType.Raven
self.user = "Bob"
self.bot = "Alice"
self.END_OF_LINE = 187
else:
self.rwkv_type = RWKVType.World
self.user = "User"
self.bot = "Assistant"
self.END_OF_LINE = 11
self.AVOID_REPEAT_TOKENS = []
AVOID_REPEAT = ""
for i in AVOID_REPEAT:
dd = self.pipeline.encode(i)
assert len(dd) == 1
self.AVOID_REPEAT_TOKENS += dd
self.__preload()
def adjust_occurrence(self, occurrence: Dict, token: int):
for xxx in occurrence:
occurrence[xxx] *= 0.996
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
def adjust_forward_logits(self, logits: List[float], occurrence: Dict, i: int):
for n in occurrence:
logits[n] -= (
self.penalty_alpha_presence
+ occurrence[n] * self.penalty_alpha_frequency
)
if i == 0:
for token in self.model_tokens:
token = int(token)
for xxx in occurrence:
occurrence[xxx] *= 0.996
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
# Model only saw '\n\n' as [187, 187] before, but the tokenizer outputs [535] for it at the end
def fix_tokens(self, tokens) -> List[int]:
if self.rwkv_type == RWKVType.World:
return tokens
if len(tokens) > 0 and tokens[-1] == END_OF_LINE_DOUBLE:
tokens = tokens[:-1] + [self.END_OF_LINE, self.END_OF_LINE]
return tokens
def run_rnn(
self, _tokens: List[str], newline_adj: int = 0
) -> Tuple[List[float], int]:
tokens = [int(x) for x in _tokens]
token_len = len(tokens)
self.model_tokens += tokens
while len(tokens) > 0:
out, self.model_state = self.model.forward(
tokens[: self.CHUNK_LEN], self.model_state
)
tokens = tokens[self.CHUNK_LEN :]
out[self.END_OF_LINE] += newline_adj # adjust \n probability
if self.model_tokens[-1] in self.AVOID_REPEAT_TOKENS:
out[self.model_tokens[-1]] = -999999999
return out, token_len
def delta_postprocess(self, delta: str) -> str:
return delta
def __preload(self):
interface = self.interface
user = self.user
bot = self.bot
preset_system = (
f"""
The following is a coherent verbose detailed conversation between a girl named {bot} and her friend {user}. \
{bot} is very intelligent, creative and friendly. \
{bot} is unlikely to disagree with {user}, and {bot} doesn't like to ask {user} questions. \
{bot} likes to tell {user} a lot about herself and her opinions. \
{bot} usually gives {user} kind, helpful and informative advices.\n
"""
if self.rwkv_type == RWKVType.Raven
else (
f"{user}{interface} hi\n\n{bot}{interface} Hi. "
+ "I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.\n\n"
)
)
logits, _ = self.run_rnn(self.fix_tokens(self.pipeline.encode(preset_system)))
try:
state_cache.add_state(
state_cache.AddStateBody(
prompt=preset_system,
tokens=self.model_tokens,
state=self.model_state,
logits=logits,
)
)
except HTTPException:
pass
class MusicRWKV(AbstractRWKV):
def __init__(self, model, pipeline):
super().__init__(model, pipeline)
self.max_tokens_per_generation = 500
self.temperature = 1
self.top_p = 0.8
self.top_k = 8
self.rwkv_type = RWKVType.Music
def adjust_occurrence(self, occurrence: Dict, token: int):
for n in occurrence:
occurrence[n] *= 0.997 #### decay repetition penalty
if token >= 128 or token == 127:
occurrence[token] = 1 + (occurrence[token] if token in occurrence else 0)
else:
occurrence[token] = 0.3 + (occurrence[token] if token in occurrence else 0)
def adjust_forward_logits(self, logits: List[float], occurrence: Dict, i: int):
for n in occurrence:
logits[n] -= 0 + occurrence[n] * 0.5
logits[0] += (i - 2000) / 500 # try not to be too short or too long
logits[127] -= 1 # avoid "t125"
def fix_tokens(self, tokens) -> List[int]:
return tokens
def run_rnn(
self, _tokens: List[str], newline_adj: int = 0
) -> Tuple[List[float], int]:
tokens = [int(x) for x in _tokens]
token_len = len(tokens)
self.model_tokens += tokens
out, self.model_state = self.model.forward(tokens, self.model_state)
return out, token_len
def delta_postprocess(self, delta: str) -> str:
return " " + delta
def get_tokenizer(tokenizer_len: int):
tokenizer_dir = f"{pathlib.Path(__file__).parent.parent.resolve()}/rwkv_pip/"
if tokenizer_len < 50277:
return tokenizer_dir + "tokenizer-midi.json"
elif tokenizer_len < 65536:
return tokenizer_dir + "20B_tokenizer.json"
else:
return "rwkv_vocab_v20230424"
def RWKV(model: str, strategy: str, tokenizer: Union[str, None]) -> AbstractRWKV:
rwkv_beta = global_var.get(global_var.Args).rwkv_beta
if "midi" in model.lower() or "abc" in model.lower():
os.environ["RWKV_RESCALE_LAYER"] = "999"
# dynamic import to make RWKV_CUDA_ON work
if rwkv_beta:
from rwkv_pip.beta.model import (
RWKV as Model,
)
else:
from rwkv_pip.model import (
RWKV as Model,
)
from rwkv_pip.utils import PIPELINE
filename, _ = os.path.splitext(os.path.basename(model))
model = Model(model, strategy)
if not tokenizer:
tokenizer = get_tokenizer(len(model.w["emb.weight"]))
pipeline = PIPELINE(model, tokenizer)
rwkv_map: dict[str, Type[AbstractRWKV]] = {
"20B_tokenizer": TextRWKV,
"rwkv_vocab_v20230424": TextRWKV,
"tokenizer-midi": MusicRWKV,
}
tokenizer_name = os.path.splitext(os.path.basename(tokenizer))[0]
rwkv: AbstractRWKV
if tokenizer_name in rwkv_map:
rwkv = rwkv_map[tokenizer_name](model, pipeline)
else:
rwkv = TextRWKV(model, pipeline)
rwkv.name = filename
return rwkv
class ModelConfigBody(BaseModel):
max_tokens: int = None
temperature: float = None
top_p: float = None
presence_penalty: float = None
frequency_penalty: float = None
max_tokens: int = Field(default=None, gt=0, le=102400)
temperature: float = Field(default=None, ge=0, le=2)
top_p: float = Field(default=None, ge=0, le=1)
presence_penalty: float = Field(default=None, ge=-2, le=2)
frequency_penalty: float = Field(default=None, ge=-2, le=2)
model_config = {
"json_schema_extra": {
"example": {
"max_tokens": 1000,
"temperature": 1.2,
"top_p": 0.5,
"presence_penalty": 0.4,
"frequency_penalty": 0.4,
}
}
}
def set_rwkv_config(model: RWKV, body: ModelConfigBody):
if body.max_tokens:
def set_rwkv_config(model: AbstractRWKV, body: ModelConfigBody):
if body.max_tokens is not None:
model.max_tokens_per_generation = body.max_tokens
if body.temperature:
model.temperature = body.temperature
if body.top_p:
if body.temperature is not None:
if body.temperature < 0.1:
model.temperature = 0.1
else:
model.temperature = body.temperature
if body.top_p is not None:
model.top_p = body.top_p
if body.presence_penalty:
if body.presence_penalty is not None:
model.penalty_alpha_presence = body.presence_penalty
if body.frequency_penalty:
if body.frequency_penalty is not None:
model.penalty_alpha_frequency = body.frequency_penalty
def get_rwkv_config(model: RWKV) -> ModelConfigBody:
def get_rwkv_config(model: AbstractRWKV) -> ModelConfigBody:
return ModelConfigBody(
max_tokens=model.max_tokens_per_generation,
temperature=model.temperature,
@@ -34,49 +591,3 @@ def get_rwkv_config(model: RWKV) -> ModelConfigBody:
presence_penalty=model.penalty_alpha_presence,
frequency_penalty=model.penalty_alpha_frequency,
)
os.environ["TORCH_EXTENSIONS_DIR"] = f"{pathlib.Path(__file__).parent.parent.resolve()}"
def rwkv_generate(model: RWKV, prompt: str, stop: str = None):
model.model_state = None
model.model_tokens = []
logits = model.run_rnn(model.tokenizer.encode(prompt).ids)
begin = len(model.model_tokens)
out_last = begin
occurrence: Dict = {}
response = ""
for i in range(model.max_tokens_per_generation):
for n in occurrence:
logits[n] -= (
model.penalty_alpha_presence
+ occurrence[n] * model.penalty_alpha_frequency
)
token = model.pipeline.sample_logits(
logits, temperature=model.temperature, top_p=model.top_p
)
END_OF_TEXT = 0
if token == END_OF_TEXT:
break
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
logits = model.run_rnn([token])
delta: str = model.tokenizer.decode(model.model_tokens[out_last:])
if "\ufffd" not in delta: # avoid utf-8 display issues
response += delta
if stop is not None:
if stop in response:
response = response.split(stop)[0]
yield response, ""
break
yield response, delta
out_last = begin + i + 1
if i >= model.max_tokens_per_generation - 100:
break

View File

@@ -0,0 +1,14 @@
from fastapi import FastAPI
from fastapi.middleware.gzip import GZipMiddleware
from fastapi.staticfiles import StaticFiles
import uvicorn
webui_server = FastAPI()
webui_server.add_middleware(GZipMiddleware, minimum_size=1000)
webui_server.mount(
"/", StaticFiles(directory="frontend/dist", html=True), name="static"
)
if __name__ == "__main__":
uvicorn.run("webui_server:webui_server")

Binary file not shown.

Binary file not shown.

View File

@@ -1,734 +0,0 @@
########################################################################################################
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
########################################################################################################
import types, gc, os, time, re
import torch
from torch.nn import functional as F
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.allow_tf32 = True
torch.backends.cuda.matmul.allow_tf32 = True
current_path = os.path.dirname(os.path.abspath(__file__))
# https://zhuanlan.zhihu.com/p/612879065
def LoadPreCompileLibrary(file):
import importlib
import os
import torch
# load the custom_op_library and register the custom ops
lib_dir = os.path.dirname(__file__)
if os.name == "nt":
# Register the main torchvision library location on the default DLL path
import ctypes
import sys
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
prev_error_mode = kernel32.SetErrorMode(0x0001)
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
if sys.version_info >= (3, 8):
os.add_dll_directory(lib_dir)
elif with_load_library_flags:
res = kernel32.AddDllDirectory(lib_dir)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += f' Error adding "{lib_dir}" to the DLL directories.'
raise ValueError(err)
kernel32.SetErrorMode(prev_error_mode)
loader_details = (
importlib.machinery.ExtensionFileLoader,
importlib.machinery.EXTENSION_SUFFIXES,
)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
ext_specs = extfinder.find_spec(file)
if ext_specs is None:
return False
try:
torch.ops.load_library(ext_specs.origin)
except OSError as exc:
return False
return True
########################################################################################################
if os.environ.get('RWKV_JIT_ON') != '0':
os.environ["RWKV_JIT_ON"] = '1'
MyModule = torch.jit.ScriptModule
MyFunction = torch.jit.script_method
MyStatic = torch.jit.script
else:
MyModule = torch.nn.Module
def __nop(ob):
return ob
MyFunction = __nop
MyStatic = __nop
if os.environ.get('RWKV_CUDA_ON') == '1':
if LoadPreCompileLibrary('wkv_cuda') is False:
from torch.utils.cpp_extension import load
load(
name=f"wkv_cuda",
sources=[f"{current_path}/cuda/wrapper.cpp", f"{current_path}/cuda/operators.cu"],
verbose=True,
extra_cuda_cflags=["-t 4", "-std=c++17", "--use_fast_math", "-O3", "--extra-device-vectorization"],
is_python_module=False)
@MyStatic
def cuda_wkv(T: int, C: int, w, u, k, v, aa, bb, pp):
assert 1 * C % min(C, 32) == 0
assert k.dtype == v.dtype == torch.float16 or k.dtype == v.dtype == torch.float32
assert w.dtype == u.dtype == aa.dtype == bb.dtype == pp.dtype == torch.float32
w = w.contiguous()
u = u.contiguous()
k = k.contiguous()
v = v.contiguous()
y = torch.empty((T, C), device=w.device, memory_format=torch.contiguous_format, dtype=k.dtype)
torch.ops.rwkv.wkv_forward(1, T, C, w, u, k, v, y, aa, bb, pp)
return y, aa, bb, pp
@MyStatic
def cuda_mm8_seq(B: int, N: int, M: int, x, w, mx, rx, my, ry):
assert x.dtype == mx.dtype == rx.dtype == my.dtype == ry.dtype
assert x.dtype == torch.float32 or x.dtype == torch.float16
assert w.dtype == torch.uint8
assert x.shape == [B, N]
assert w.shape == [N, M]
assert rx.shape == mx.shape == [M]
assert ry.shape == my.shape == [N, 1]
y = torch.empty((B, M), device=w.device, dtype=x.dtype)
torch.ops.rwkv.mm8_seq(B, N, M, x, w, mx, rx, my, ry, y)
return y
@MyStatic
def cuda_mm8_one(N: int, M: int, x, w, mx, rx, my, ry):
assert x.dtype == mx.dtype == rx.dtype == my.dtype == ry.dtype
assert x.dtype == torch.float32 or x.dtype == torch.float16
assert w.dtype == torch.uint8
assert x.shape == [N]
assert w.shape == [N, M]
assert rx.shape == mx.shape == [M]
assert ry.shape == my.shape == [N, 1]
y = torch.zeros((M,), device=w.device, dtype=torch.float32)
torch.ops.rwkv.mm8_one(N, M, x, w, mx, rx, my, ry, y)
return y.to(dtype=x.dtype)
else:
os.environ["RWKV_CUDA_ON"] = '0'
########################################################################################################
class RWKV(MyModule):
def __init__(self, model, strategy, verbose = True, convert_and_save_and_exit = None):
super().__init__()
if verbose:
prxxx = lambda *args, **kwargs: print(*args, **kwargs)
else:
prxxx = lambda *args, **kwargs: None
STRATEGY_REGEX = r"^(?:(?:^|->) *(?:cuda(?::[\d]+)?|cpu|mps) (?:fp(?:16|32)|bf16)(?:i8|i4|i3)?(?: \*[\d]+\+?)? *)+$"
if not re.match(STRATEGY_REGEX, strategy):
raise ValueError("Invalid strategy. Please read https://pypi.org/project/rwkv/")
strategy = ('->'.join([x.strip() for x in strategy.split('->')])).replace('->', ' -> ')
self.args = types.SimpleNamespace()
args = self.args
args.MODEL_NAME = model
args.strategy_string = strategy
# Rescale for fp16 mode: set x = x/2 every X layer (to avoid fp16 overflow)
self.RESCALE_LAYER = 6 if 'fp16' in strategy else 0
prxxx(f'RWKV_JIT_ON {os.environ["RWKV_JIT_ON"]} RWKV_CUDA_ON {os.environ["RWKV_CUDA_ON"]} RESCALE_LAYER {self.RESCALE_LAYER}\n')
args.MODEL_NAME = args.MODEL_NAME.strip()
if not args.MODEL_NAME.endswith('.pth'):
args.MODEL_NAME += '.pth'
prxxx(f'Loading {args.MODEL_NAME} ...')
with torch.no_grad():
self.w = torch.load(args.MODEL_NAME, map_location='cpu') # load model to CPU first
gc.collect()
w = self.w
ALREADY_CONVERTED = False
if '_strategy' in w:
ALREADY_CONVERTED = True
assert convert_and_save_and_exit == None # you should only convert a raw model
prxxx(f"Converted model: strategy {w['_strategy']}, version {w['_version']}\n")
assert w['_strategy'] == args.strategy_string # if you are using a new strategy, re-convert the model
assert float(w['_version']) >= 0.7 # sometimes you should re-convert using latest convert_model.py
assert w['_rescale_layer'] == self.RESCALE_LAYER
del w['_strategy']
del w['_version']
del w['_rescale_layer']
args.n_embd = w['emb.weight'].shape[1]
args.n_layer = 0
keys = list(w.keys())
for x in keys:
layer_id = int(x.split('.')[1]) if ('blocks.' in x) else 0
args.n_layer = max(args.n_layer, layer_id+1)
####################### Compute strategy
s = [x.strip().split(' ') for x in strategy.split('->')]
plan = [0] * len(s)
stream_i = -1
stream_count = 0
to_allocate = args.n_layer + 1
allocated = 0
free_slots = 0
for i in range(len(s)):
si = s[i]
si1 = si[1]
if si1.startswith('fp32'): si[1] = [torch.float]
elif si1.startswith('fp16'): si[1] = [torch.float16]
elif si1.startswith('bf16'): si[1] = [torch.bfloat16]
if si1.endswith('i8'): si[1] += [torch.uint8]
else: si[1] += [si[1][0]]
if len(si) > 2:
ss = si[2]
assert ss.startswith('*')
if ss.endswith('+'):
plan[i] = int(ss[1:-1])
stream_i = i
else:
plan[i] = int(ss[1:])
allocated += plan[i]
if allocated >= to_allocate:
plan[i] += to_allocate - allocated
break
else:
free_slots += 1
if stream_i < 0:
if free_slots > 0 and to_allocate > allocated:
for i in range(len(s)):
if plan[i] == 0:
plan[i] = (to_allocate - allocated) // free_slots
allocated += plan[i]
free_slots -= 1
if to_allocate > allocated:
plan[len(s)-1] += to_allocate - allocated
else:
if to_allocate > allocated:
stream_count = to_allocate - allocated
plan[stream_i] += stream_count
prxxx(f'Strategy: (total {args.n_layer}+1={args.n_layer+1} layers)')
for i in range(len(s)):
ss = s[i]
if i != stream_i:
prxxx(f'* {ss[0]} {str(ss[1]).replace("torch.","")}, store {plan[i]} layers')
else:
prxxx(f'* {ss[0]} {str(ss[1]).replace("torch.","")}, store {plan[i]-stream_count} layers, stream {stream_count} layers')
plan[i] += (0 if i == 0 else plan[i-1])
self.strategy = [None] * (args.n_layer + 1)
strategy = self.strategy
for n in range(args.n_layer + 1):
for i in range(len(s)):
if n < plan[i]:
strategy[n] = types.SimpleNamespace()
strategy[n].device = s[i][0]
strategy[n].atype = s[i][1][0]
strategy[n].wtype = s[i][1][1]
strategy[n].stream = False
if i == stream_i and n >= (plan[i] - stream_count):
strategy[n].stream = True
break
prxxx(f"{n}-{strategy[n].device}-{str(strategy[n].atype).replace('torch.','')}-{str(strategy[n].wtype).replace('torch.','')}{'-stream' if strategy[n].stream else ''}",end=' ')
prxxx()
####################### Load weights to self.w
if not ALREADY_CONVERTED:
try: # precompute embedding
w['emb.weight'] = F.layer_norm(w['emb.weight'], (args.n_embd,), weight=w['blocks.0.ln0.weight'], bias=w['blocks.0.ln0.bias'])
except:
w['emb.weight'] = F.layer_norm(w['emb.weight'].float(), (args.n_embd,), weight=w['blocks.0.ln0.weight'].float(), bias=w['blocks.0.ln0.bias'].float())
del w['blocks.0.ln0.weight']
del w['blocks.0.ln0.bias']
print_need_newline = False
keys = list(w.keys())
for x in keys:
w[x].requires_grad = False
layer_id = int(x.split('.')[1]) if ('blocks.' in x) else 0
if ('ln_out.' in x) or ('head.' in x):
layer_id = args.n_layer
dd = strategy[layer_id]
DEVICE = dd.device
ATYPE = dd.atype
WTYPE = dd.wtype
if not ALREADY_CONVERTED:
if self.RESCALE_LAYER > 0:
if 'att.output.weight' in x:
w[x] = w[x] / (2 ** int(layer_id // self.RESCALE_LAYER))
if 'ffn.value.weight' in x:
w[x] = w[x] / (2 ** int(layer_id // self.RESCALE_LAYER))
if '.time_' in x:
w[x] = w[x].squeeze()
if 'key.weight' in x or 'value.weight' in x or 'receptance.weight' in x or 'output.weight' in x or 'head.weight' in x:
w[x] = w[x].t()
if '.time_decay' in x: # need fp32 for this
w[x] = -torch.exp(w[x].float())
elif '.time_first' in x: # need fp32 for this
w[x] = w[x].float()
else:
if (len(w[x].shape) == 2) and ('emb' not in x):
if WTYPE != torch.uint8:
w[x] = w[x].to(dtype=WTYPE)
else:
w[x] = w[x].float()
if w[x].shape[0] > w[x].shape[1]:
w[x+'_my'] = torch.amin(w[x], dim=1).unsqueeze(1)
w[x] = w[x] - w[x+'_my']
w[x+'_mx'] = torch.amin(w[x], dim=0)
w[x] = w[x] - w[x+'_mx']
w[x+'_rx'] = torch.amax(w[x], dim=0)
w[x] = w[x] / w[x+'_rx']
w[x+'_ry'] = torch.amax(w[x], dim=1).unsqueeze(1)
w[x] = w[x] / w[x+'_ry']
else:
w[x+'_mx'] = torch.amin(w[x], dim=0)
w[x] = w[x] - w[x+'_mx']
w[x+'_my'] = torch.amin(w[x], dim=1).unsqueeze(1)
w[x] = w[x] - w[x+'_my']
w[x+'_rx'] = torch.amax(w[x], dim=0)
w[x] = w[x] / w[x+'_rx']
w[x+'_ry'] = torch.amax(w[x], dim=1).unsqueeze(1)
w[x] = w[x] / w[x+'_ry']
w[x] = torch.clip(torch.floor(w[x] * 256), min=0, max=255).to(dtype=torch.uint8)
w[x+'_mx'] = w[x+'_mx'].to(dtype=ATYPE).contiguous()
w[x+'_rx'] = (w[x+'_rx'] / 16).to(dtype=ATYPE).contiguous()
w[x+'_my'] = w[x+'_my'].to(dtype=ATYPE).contiguous()
w[x+'_ry'] = (w[x+'_ry'] / 16).to(dtype=ATYPE).contiguous()
else:
w[x] = w[x].to(dtype=ATYPE)
if convert_and_save_and_exit == None:
if 'emb.' in x:
w[x] = w[x].contiguous()
elif (dd.stream) and (x.endswith('key.weight') or x.endswith('value.weight') or x.endswith('receptance.weight') or x.endswith('output.weight')):
try:
w[x] = w[x].contiguous().pin_memory() # if you see "CUDA error: out of memory" here, that's out of CPU RAM, not VRAM. Get more RAM :)
except:
print('Note: You are running out of RAM. Get more CPU RAM. Now this will run much slower.')
elif DEVICE != 'cpu':
w[x] = w[x].to(device=DEVICE).contiguous()
if (dd.stream) or (DEVICE != 'cpu'):
try:
w[x+'_mx'] = w[x+'_mx'].to(device=DEVICE).contiguous()
w[x+'_rx'] = w[x+'_rx'].to(device=DEVICE).contiguous()
w[x+'_my'] = w[x+'_my'].to(device=DEVICE).contiguous()
w[x+'_ry'] = w[x+'_ry'].to(device=DEVICE).contiguous()
except:
pass
if 'ffn.value.weight' in x:
gc.collect()
if 'cuda' in args.strategy_string:
torch.cuda.empty_cache()
shape = [i for i in w[x].shape if i != 1]
if len(shape) > 1:
shape = f" {str(shape[0]).rjust(5)} {str(shape[1]).rjust(5)}"
else:
shape = f" {str(shape[0]).rjust(5)} "
if layer_id == 0 or layer_id >= args.n_layer-1:
if print_need_newline:
prxxx('\n', end = '')
print_need_newline = False
dt = str(w[x].dtype).replace('torch.', '')
dt = dt.replace('float32', 'f32').replace('bfloat16', 'bf16').replace('float16', 'f16').replace('uint8', 'i8')
prxxx(x.ljust(32), dt.rjust(4), str(w[x].device).rjust(8), shape, ' (pinned)' if w[x].is_pinned() else '')
else:
print_need_newline = True
prxxx('.', end = '', flush = True)
if convert_and_save_and_exit:
w['_strategy'] = args.strategy_string
w['_rescale_layer'] = self.RESCALE_LAYER
w['_version'] = '0.7'
if not convert_and_save_and_exit.endswith('.pth'):
convert_and_save_and_exit += '.pth'
prxxx(f'Saving to {convert_and_save_and_exit}...')
torch.save(w, convert_and_save_and_exit)
prxxx(f'Converted and saved. Now this will exit.')
exit(0)
gc.collect()
if 'cuda' in args.strategy_string:
torch.cuda.empty_cache()
@MyFunction
def torch_mm8_seq(self, x, w, mx, rx, my, ry):
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
@MyFunction
def torch_mm8_one(self, x, w, mx, rx, my, ry):
return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
if os.environ.get('RWKV_CUDA_ON') == '1':
@MyFunction
def mm8_seq(self, x, w, mx, rx, my, ry):
if w.device.type == 'cuda' and x.dtype == torch.float16:
B, N, M = x.shape[0], w.shape[0], w.shape[1]
return cuda_mm8_seq(B, N, M, x, w, mx, rx, my, ry)
else:
return self.torch_mm8_seq(x, w, mx, rx, my, ry)
@MyFunction
def mm8_one(self, x, w, mx, rx, my, ry):
if w.device.type == 'cuda':
N, M = w.shape[0], w.shape[1]
return cuda_mm8_one(N, M, x, w, mx, rx, my, ry)
else:
return self.torch_mm8_one(x, w, mx, rx, my, ry)
else:
@MyFunction
def mm8_seq(self, x, w, mx, rx, my, ry):
return self.torch_mm8_seq(x, w, mx, rx, my, ry)
@MyFunction
def mm8_one(self, x, w, mx, rx, my, ry):
return self.torch_mm8_one(x, w, mx, rx, my, ry)
########################################################################################################
@MyFunction
def ffn_one(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
vx = torch.square(torch.relu(kx @ kw))
out = r * (vx @ vw)
return x + out, xx
@MyFunction
def ffn_one_i8(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_one(rx, rw, rmx, rrx, rmy, rry))
vx = torch.square(torch.relu(self.mm8_one(kx, kw, kmx, krx, kmy, kry)))
out = r * (self.mm8_one(vx, vw, vmx, vrx, vmy, vry))
return x + out, xx
########################################################################################################
@MyFunction
def ffn_seq(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
vx = torch.square(torch.relu(kx @ kw))
out = r * (vx @ vw)
return x + out, xx[-1,:]
@MyFunction
def ffn_seq_i8(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
vx = torch.square(torch.relu(self.mm8_seq(kx, kw, kmx, krx, kmy, kry)))
out = r * (self.mm8_seq(vx, vw, vmx, vrx, vmy, vry))
return x + out, xx[-1,:]
########################################################################################################
@MyFunction
def att_one(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
k = (kx @ kw).float()
v = (vx @ vw).float()
ww = t_first + k
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, k)
e1 = torch.exp(ww - p)
e2 = torch.exp(k - p)
out = (r * wkv) @ ow
return x + out, xx, e1 * aa + e2 * v, e1 * bb + e2, p
@MyFunction
def att_one_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_one(rx, rw, rmx, rrx, rmy, rry))
k = (self.mm8_one(kx, kw, kmx, krx, kmy, kry)).float()
v = (self.mm8_one(vx, vw, vmx, vrx, vmy, vry)).float()
ww = t_first + k
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, k)
e1 = torch.exp(ww - p)
e2 = torch.exp(k - p)
out = self.mm8_one(r * wkv, ow, omx, orx, omy, ory)
return x + out, xx, e1 * aa + e2 * v, e1 * bb + e2, p
########################################################################################################
@MyFunction
def att_seq(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
k = (kx @ kw).float()
v = (vx @ vw).float()
T = x.shape[0]
for t in range(T):
kk = k[t]
vv = v[t]
ww = t_first + kk
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, kk)
e1 = torch.exp(ww - p)
e2 = torch.exp(kk - p)
aa = e1 * aa + e2 * vv
bb = e1 * bb + e2
pp = p
out = (r * sx) @ ow
return x + out, xx[-1,:], aa, bb, pp
@MyFunction
def att_seq_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
k = self.mm8_seq(kx, kw, kmx, krx, kmy, kry).float()
v = self.mm8_seq(vx, vw, vmx, vrx, vmy, vry).float()
T = x.shape[0]
for t in range(T):
kk = k[t]
vv = v[t]
ww = t_first + kk
p = torch.maximum(pp, ww)
e1 = torch.exp(pp - p)
e2 = torch.exp(ww - p)
sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
ww = t_decay + pp
p = torch.maximum(ww, kk)
e1 = torch.exp(ww - p)
e2 = torch.exp(kk - p)
aa = e1 * aa + e2 * vv
bb = e1 * bb + e2
pp = p
out = self.mm8_seq(r * sx, ow, omx, orx, omy, ory)
return x + out, xx[-1,:], aa, bb, pp
########################################################################################################
if os.environ["RWKV_CUDA_ON"] == '1':
@MyFunction
def cuda_att_seq(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
T, C = x.size()
xx = F.layer_norm(x, (C,), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(rx @ rw)
k = kx @ kw
v = vx @ vw
y, aa, bb, pp = cuda_wkv(T, C, t_decay, t_first, k, v, aa, bb, pp)
out = (r * y) @ ow
return x + out, xx[-1,:], aa, bb, pp
@MyFunction
def cuda_att_seq_i8(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
T, C = x.size()
xx = F.layer_norm(x, (C,), weight=ln_w, bias=ln_b)
sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
kx = xx * k_mix + sx * (1 - k_mix)
vx = xx * v_mix + sx * (1 - v_mix)
rx = xx * r_mix + sx * (1 - r_mix)
r = torch.sigmoid(self.mm8_seq(rx, rw, rmx, rrx, rmy, rry))
k = self.mm8_seq(kx, kw, kmx, krx, kmy, kry)
v = self.mm8_seq(vx, vw, vmx, vrx, vmy, vry)
y, aa, bb, pp = cuda_wkv(T, C, t_decay, t_first, k, v, aa, bb, pp)
out = self.mm8_seq(r * y, ow, omx, orx, omy, ory)
return x + out, xx[-1,:], aa, bb, pp
########################################################################################################
def forward(self, tokens, state, full_output=False):
with torch.no_grad():
w = self.w
args = self.args
if state == None:
state = [None] * args.n_layer * 5
for i in range(args.n_layer): # state: 0=att_xx 1=att_aa 2=att_bb 3=att_pp 4=ffn_xx
dd = self.strategy[i]
dev = dd.device
atype = dd.atype
state[i*5+0] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
state[i*5+1] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous()
state[i*5+2] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous()
state[i*5+3] = torch.zeros(args.n_embd, dtype=torch.float, requires_grad=False, device=dev).contiguous() - 1e30
state[i*5+4] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
seq_mode = len(tokens) > 1
x = w['emb.weight'][tokens if seq_mode else tokens[0]]
for i in range(args.n_layer):
bbb = f'blocks.{i}.'
att = f'blocks.{i}.att.'
ffn = f'blocks.{i}.ffn.'
dd = self.strategy[i]
dev = dd.device
atype = dd.atype
wtype = dd.wtype
if seq_mode:
if 'cuda' in str(dev) and os.environ["RWKV_CUDA_ON"] == '1':
ATT = self.cuda_att_seq if wtype != torch.uint8 else self.cuda_att_seq_i8
else:
ATT = self.att_seq if wtype != torch.uint8 else self.att_seq_i8
FFN = self.ffn_seq if wtype != torch.uint8 else self.ffn_seq_i8
else:
ATT = self.att_one if wtype != torch.uint8 else self.att_one_i8
FFN = self.ffn_one if wtype != torch.uint8 else self.ffn_one_i8
x = x.to(dtype=atype, device=dev)
kw = w[f'{att}key.weight']
vw = w[f'{att}value.weight']
rw = w[f'{att}receptance.weight']
ow = w[f'{att}output.weight']
if dd.stream:
kw = kw.to(device=dev, non_blocking=True)
vw = vw.to(device=dev, non_blocking=True)
rw = rw.to(device=dev, non_blocking=True)
ow = ow.to(device=dev, non_blocking=True)
kmx = w[f'{att}key.weight_mx'] if wtype == torch.uint8 else x
krx = w[f'{att}key.weight_rx'] if wtype == torch.uint8 else x
kmy = w[f'{att}key.weight_my'] if wtype == torch.uint8 else x
kry = w[f'{att}key.weight_ry'] if wtype == torch.uint8 else x
vmx = w[f'{att}value.weight_mx'] if wtype == torch.uint8 else x
vrx = w[f'{att}value.weight_rx'] if wtype == torch.uint8 else x
vmy = w[f'{att}value.weight_my'] if wtype == torch.uint8 else x
vry = w[f'{att}value.weight_ry'] if wtype == torch.uint8 else x
rmx = w[f'{att}receptance.weight_mx'] if wtype == torch.uint8 else x
rrx = w[f'{att}receptance.weight_rx'] if wtype == torch.uint8 else x
rmy = w[f'{att}receptance.weight_my'] if wtype == torch.uint8 else x
rry = w[f'{att}receptance.weight_ry'] if wtype == torch.uint8 else x
omx = w[f'{att}output.weight_mx'] if wtype == torch.uint8 else x
orx = w[f'{att}output.weight_rx'] if wtype == torch.uint8 else x
omy = w[f'{att}output.weight_my'] if wtype == torch.uint8 else x
ory = w[f'{att}output.weight_ry'] if wtype == torch.uint8 else x
x, state[i*5+0], state[i*5+1], state[i*5+2], state[i*5+3] = ATT(
x, state[i*5+0], state[i*5+1], state[i*5+2], state[i*5+3],
w[f'{bbb}ln1.weight'], w[f'{bbb}ln1.bias'],
w[f'{att}time_mix_k'], w[f'{att}time_mix_v'], w[f'{att}time_mix_r'],
w[f'{att}time_decay'], w[f'{att}time_first'],
kw, vw, rw, ow,
kmx, krx, kmy, kry,
vmx, vrx, vmy, vry,
rmx, rrx, rmy, rry,
omx, orx, omy, ory,
)
if dd.stream:
del kw, vw, rw, ow
kw = w[f'{ffn}key.weight']
vw = w[f'{ffn}value.weight']
rw = w[f'{ffn}receptance.weight']
if dd.stream:
kw = kw.to(device=dev, non_blocking=True)
vw = vw.to(device=dev, non_blocking=True)
rw = rw.to(device=dev, non_blocking=True)
kmx = w[f'{ffn}key.weight_mx'] if wtype == torch.uint8 else x
krx = w[f'{ffn}key.weight_rx'] if wtype == torch.uint8 else x
kmy = w[f'{ffn}key.weight_my'] if wtype == torch.uint8 else x
kry = w[f'{ffn}key.weight_ry'] if wtype == torch.uint8 else x
vmx = w[f'{ffn}value.weight_mx'] if wtype == torch.uint8 else x
vrx = w[f'{ffn}value.weight_rx'] if wtype == torch.uint8 else x
vmy = w[f'{ffn}value.weight_my'] if wtype == torch.uint8 else x
vry = w[f'{ffn}value.weight_ry'] if wtype == torch.uint8 else x
rmx = w[f'{ffn}receptance.weight_mx'] if wtype == torch.uint8 else x
rrx = w[f'{ffn}receptance.weight_rx'] if wtype == torch.uint8 else x
rmy = w[f'{ffn}receptance.weight_my'] if wtype == torch.uint8 else x
rry = w[f'{ffn}receptance.weight_ry'] if wtype == torch.uint8 else x
x, state[i*5+4] = FFN(
x, state[i*5+4],
w[f'{bbb}ln2.weight'], w[f'{bbb}ln2.bias'],
w[f'{ffn}time_mix_k'], w[f'{ffn}time_mix_r'],
kw, vw, rw,
kmx, krx, kmy, kry,
vmx, vrx, vmy, vry,
rmx, rrx, rmy, rry,
)
if dd.stream:
del kw, vw, rw
if self.RESCALE_LAYER > 0:
if (i+1) % self.RESCALE_LAYER == 0:
x = x / 2
dd = self.strategy[args.n_layer]
x = x[-1,:] if (seq_mode and (not full_output)) else x
x = x.to(dtype=dd.atype, device=dd.device)
x = F.layer_norm(x, (args.n_embd,), weight=w['ln_out.weight'], bias=w['ln_out.bias'])
if w['head.weight'].dtype != torch.uint8:
x = x @ w['head.weight']
else:
if seq_mode and full_output:
x = self.mm8_seq(x, w['head.weight'], w['head.weight_mx'], w['head.weight_rx'], w['head.weight_my'], w['head.weight_ry'])
else:
x = self.mm8_one(x, w['head.weight'], w['head.weight_mx'], w['head.weight_rx'], w['head.weight_my'], w['head.weight_ry'])
return x.float(), state

File diff suppressed because it is too large Load Diff

BIN
build/appicon.png vendored

Binary file not shown.

Before

Width:  |  Height:  |  Size: 102 KiB

After

Width:  |  Height:  |  Size: 83 KiB

View File

@@ -8,7 +8,7 @@
<key>CFBundleExecutable</key>
<string>{{.Name}}</string>
<key>CFBundleIdentifier</key>
<string>com.wails.{{.Name}}</string>
<string>dev.josStorer.RWKV-Runner</string>
<key>CFBundleVersion</key>
<string>{{.Info.ProductVersion}}</string>
<key>CFBundleGetInfoString</key>

View File

@@ -8,7 +8,7 @@
<key>CFBundleExecutable</key>
<string>{{.Name}}</string>
<key>CFBundleIdentifier</key>
<string>com.wails.{{.Name}}</string>
<string>dev.josStorer.RWKV-Runner</string>
<key>CFBundleVersion</key>
<string>{{.Info.ProductVersion}}</string>
<key>CFBundleGetInfoString</key>

13
build/darwin/Readme_Install.txt vendored Normal file
View File

@@ -0,0 +1,13 @@
For Mac and Linux users, please manually install Python 3.10 (usually the latest systems come with it built-in). You can specify the Python interpreter to use in Settings. (which python3)
对于Mac和Linux用户请手动安装 Python3.10 (通常最新的系统已经内置了). 你可以在设置中指定使用的Python解释器. (which python3)
MacおよびLinuxのユーザーの方は、Python3.10を手動でインストールしてください(通常、最新のシステムには既に組み込まれています)。 設定メニューで使用するPythonインタプリタを指定することができます。 (which python3)
Please execute this program in an empty directory. All related dependencies will be placed in this directory.
请将本程序放在一个空目录内执行, 所有相关依赖均会放置于此目录.
このプログラムを空のディレクトリで実行してください。関連するすべての依存関係は、このディレクトリに配置されます。
Please execute the following command in the terminal to remove the permission restrictions of this app, and then this program can work properly:
请在终端执行以下命令解除本app的权限限制, 然后本程序才可以正常工作:
このアプリの権限制限を解除するために、ターミナルで以下のコマンドを実行してください。その後、このプログラムは正常に動作するようになります:
sudo xattr -r -d com.apple.quarantine ./RWKV-Runner.app

16
build/darwin/entitlements.plist vendored Normal file
View File

@@ -0,0 +1,16 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>com.apple.security.app-sandbox</key>
<false/>
<key>com.apple.security.network.client</key>
<true/>
<key>com.apple.security.network.server</key>
<true/>
<key>com.apple.security.files.user-selected.read-write</key>
<true/>
<key>com.apple.security.files.downloads.read-write</key>
<true/>
</dict>
</plist>

17
build/darwin/gon-sign.json vendored Normal file
View File

@@ -0,0 +1,17 @@
{
"source": [
"./build/bin/RWKV-Runner_darwin.app"
],
"bundle_id": "dev.josStorer.RWKV-Runner",
"apple_id": {
"username": "joshua1466587594@outlook.com",
"password": ""
},
"sign": {
"application_identity": "D00A983569B4EAA2A008B963254F385F42A493FD",
"entitlements_file": "./build/darwin/entitlements.plist"
},
"zip": {
"output_path": "./build/bin/RWKV-Runner_darwin.archive.zip"
}
}

19
build/linux/Readme_Install.txt vendored Normal file
View File

@@ -0,0 +1,19 @@
For Mac and Linux users, please manually install Python 3.10 (usually the latest systems come with it built-in). You can specify the Python interpreter to use in Settings.
对于Mac和Linux用户请手动安装 Python3.10 (通常最新的系统已经内置了). 你可以在设置中指定使用的Python解释器.
MacおよびLinuxのユーザーの方は、Python3.10を手動でインストールしてください(通常、最新のシステムには既に組み込まれています)。 設定メニューで使用するPythonインタプリタを指定することができます。
Please execute this program in an empty directory. All related dependencies will be placed in this directory.
请将本程序放在一个空目录内执行, 所有相关依赖均会放置于此目录.
このプログラムを空のディレクトリで実行してください。関連するすべての依存関係は、このディレクトリに配置されます。
On Linux system, this program cannot invoke the terminal for automatic dependency installation. You must manually execute the following commands for installation so that it can be used normally:
在Linux系统下, 本程序无法调用终端自动安装依赖, 你必须手动执行以下命令进行安装, 之后方可正常使用:
Linuxシステムでは、このプログラムはターミナルを自動的に呼び出して依存関係をインストールすることができません。以下のコマンドを手動で実行する必要があります。それが完了した後に、正常に使用することができます:
sudo apt install python3-dev
chmod +x ./RWKV-Runner
./RWKV-Runner
cd backend-python
pip3 install -r requirements.txt # or pip3 install -r requirements_without_cyac.txt
# See More: https://github.com/josStorer/RWKV-Runner/tree/master/deploy-examples

3
build/windows/Readme_Install.txt vendored Normal file
View File

@@ -0,0 +1,3 @@
Please execute this program in an empty directory. All related dependencies will be placed in this directory.
请将本程序放在一个空目录内执行, 所有相关依赖均会放置于此目录.
このプログラムを空のディレクトリで実行してください。関連するすべての依存関係は、このディレクトリに配置されます。

BIN
build/windows/icon.ico vendored

Binary file not shown.

Before

Width:  |  Height:  |  Size: 147 KiB

After

Width:  |  Height:  |  Size: 175 KiB

View File

@@ -0,0 +1,24 @@
: install git python3.10 yarn by yourself
: change model and strategy according to your hardware
mkdir RWKV-Next-Web
cd RWKV-Next-Web
git clone https://github.com/josStorer/RWKV-Runner --depth=1
python -m pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cu117
python -m pip install -r RWKV-Runner/backend-python/requirements.txt
start python ./RWKV-Runner/backend-python/main.py
powershell -Command "(Test-Path ./RWKV-Runner/models) -or (mkdir RWKV-Runner/models)"
powershell -Command "Import-Module BitsTransfer"
powershell -Command "(Test-Path ./RWKV-Runner/models/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth) -or (Start-BitsTransfer https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth ./RWKV-Runner/models/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth)"
powershell -Command "Invoke-WebRequest http://127.0.0.1:8000/switch-model -Method POST -ContentType 'application/json' -Body '{\"model\":\"./RWKV-Runner/models/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth\",\"strategy\":\"cuda fp32 *20+\"}'"
git clone https://github.com/Yidadaa/ChatGPT-Next-Web --depth=1
cd ChatGPT-Next-Web
call yarn install
call yarn build
set PROXY_URL=""
set BASE_URL=http://127.0.0.1:8000
start "C:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe" "http://127.0.0.1:3000"
yarn start

View File

@@ -0,0 +1,27 @@
# install git python3.10 yarn by yourself
# change model and strategy according to your hardware
sudo apt install python3-dev
mkdir RWKV-Next-Web
cd RWKV-Next-Web
git clone https://github.com/josStorer/RWKV-Runner --depth=1
python3 -m pip install torch torchvision torchaudio
python3 -m pip install -r RWKV-Runner/backend-python/requirements.txt
python3 ./RWKV-Runner/backend-python/main.py > log.txt & # this is only an example, you should use screen or other tools to run it in background
if [ ! -d RWKV-Runner/models ]; then
mkdir RWKV-Runner/models
fi
wget -N https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-0.1B-v1-20230520-ctx4096.pth -P RWKV-Runner/models/
git clone https://github.com/Yidadaa/ChatGPT-Next-Web --depth=1
cd ChatGPT-Next-Web
yarn install
yarn build
export PROXY_URL=""
export BASE_URL=http://127.0.0.1:8000
yarn start & # this is only an example, you should use screen or other tools to run it in background
curl http://127.0.0.1:8000/switch-model -X POST -H "Content-Type: application/json" -d '{"model":"./RWKV-Runner/models/RWKV-4-World-0.1B-v1-20230520-ctx4096.pth","strategy":"cpu fp32"}'

View File

@@ -0,0 +1,19 @@
: install git python3.10 npm by yourself
: change model and strategy according to your hardware
git clone https://github.com/josStorer/RWKV-Runner --depth=1
python -m pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cu117
python -m pip install -r RWKV-Runner/backend-python/requirements.txt
cd RWKV-Runner/frontend
call npm ci
call npm run build
cd ..
: optional: set ngrok_token=YOUR_NGROK_TOKEN
start python ./backend-python/main.py --webui
start "C:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe" "http://127.0.0.1:8000"
powershell -Command "(Test-Path ./models) -or (mkdir models)"
powershell -Command "Import-Module BitsTransfer"
powershell -Command "(Test-Path ./models/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth) -or (Start-BitsTransfer https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth ./models/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth)"
powershell -Command "Invoke-WebRequest http://127.0.0.1:8000/switch-model -Method POST -ContentType 'application/json' -Body '{\"model\":\"./models/RWKV-4-World-1.5B-v1-fixed-20230612-ctx4096.pth\",\"strategy\":\"cuda fp32 *20+\",\"deploy\":\"true\"}'"

View File

@@ -0,0 +1,22 @@
# install git python3.10 npm by yourself
# change model and strategy according to your hardware
sudo apt install python3-dev
git clone https://github.com/josStorer/RWKV-Runner --depth=1
python3 -m pip install torch torchvision torchaudio
python3 -m pip install -r RWKV-Runner/backend-python/requirements.txt
cd RWKV-Runner/frontend
npm ci
npm run build
cd ..
# optional: export ngrok_token=YOUR_NGROK_TOKEN
python3 ./backend-python/main.py --webui > log.txt & # this is only an example, you should use screen or other tools to run it in background
if [ ! -d models ]; then
mkdir models
fi
wget -N https://huggingface.co/BlinkDL/rwkv-4-world/resolve/main/RWKV-4-World-0.1B-v1-20230520-ctx4096.pth -P models/
curl http://127.0.0.1:8000/switch-model -X POST -H "Content-Type: application/json" -d '{"model":"./models/RWKV-4-World-0.1B-v1-20230520-ctx4096.pth","strategy":"cpu fp32","deploy":"true"}'

View File

@@ -0,0 +1,5 @@
{"text": "The following is an epic science fiction masterpiece that is immortalized, with delicate descriptions and grand depictions of interstellar civilization wars.\nChapter 1.\nAs I sit down to write here amidst the shadows of vine-leaves under the blue sky of southern Italy, it comes to me with a certain quality of astonishment that my participation in these amazing adventures of Mr. Cavor was, after all, the outcome of the purest accident. It might have been any one. I fell into these things at a time when I thought myself removed from the slightest possibility of disturbing experiences. I had gone to Lympne because I had imagined it the most uneventful place in the world. “Here, at any rate,” said I, “I shall find peace and a chance to work!”"}
{"text": "Translate the following into Chinese.\n\nEnglish: What rooms do you have available?\nChinese: 你们有哪些房间可以提供"}
{"text": "User: Hello.\n\nAssistant: I'm here, meow~.\n\nUser: Can you tell some jokes?\n\nAssistant: Of course, master. What kind of jokes would you like to hear?"}
{"text": "Instruction: Write a story using the following information\n\nInput: A man named Alex chops a tree down\n\nResponse: Once upon a time, there was a man named Alex who lived in the heart of the forest. He had always been fascinated by trees and spent most of his days exploring the forest and learning about its many wonders. One day, while wandering through the woods, he stumbled upon an old oak tree that stood tall and proud in the middle of a clearing."}
{"text": "def get_args(args: Union[Sequence[str], None] = None):\n parser = argparse.ArgumentParser()\n group = parser.add_argument_group(title=\"server arguments\")\n group.add_argument(\n \"--port\",\n type=int,\n default=8000,\n help=\"port to run the server on (default: 8000)\",\n )\n group.add_argument(\n \"--host\",\n type=str,\n default=\"127.0.0.1\",\n help=\"host to run the server on (default: 127.0.0.1)\",\n )"}

View File

@@ -0,0 +1,57 @@
import torch
import sys
import time
import os
import threading
import gc
def file_cleaner(file):
last_pos = 0
def cleaner():
nonlocal last_pos
while True:
time.sleep(0.1)
pos = file.tell()
if pos > last_pos:
os.posix_fadvise(
file.fileno(), last_pos, pos - last_pos, os.POSIX_FADV_DONTNEED
)
last_pos = pos
return cleaner
expected_max_version = float(sys.argv[2]) if len(sys.argv) > 2 else 100
model_file = open(sys.argv[1], "rb")
cleaner = file_cleaner(model_file)
cleaner_thread = threading.Thread(target=cleaner, daemon=True)
cleaner_thread.start()
w = torch.load(model_file, map_location="cpu")
gc.collect()
n_embd = w["emb.weight"].shape[1]
n_layer = 0
keys = list(w.keys())
version = 4
for x in keys:
layer_id = int(x.split(".")[1]) if ("blocks." in x) else 0
n_layer = max(n_layer, layer_id + 1)
if "ln_x" in x:
version = max(5, version)
if "gate.weight" in x:
version = max(5.1, version)
if int(version) == 5 and "att.time_decay" in x:
if len(w[x].shape) > 1:
if w[x].shape[1] > 1:
version = max(5.2, version)
if "time_maa" in x:
version = max(6, version)
if version <= expected_max_version:
print(f"--n_layer {n_layer} --n_embd {n_embd}", end="")
else:
raise Exception(f"RWKV{version} is not supported")

View File

@@ -0,0 +1,58 @@
echo $@
if [[ ${cnMirror} == 1 ]]; then
export PIP_INDEX_URL="https://pypi.tuna.tsinghua.edu.cn/simple"
if grep -q "mirrors.aliyun.com" /etc/apt/sources.list; then
echo "apt cnMirror already set"
else
sudo sed -i 's/http:\/\/archive.ubuntu.com\/ubuntu\//http:\/\/mirrors.aliyun.com\/ubuntu\//g' /etc/apt/sources.list
sudo apt update
fi
fi
if dpkg -s "gcc" >/dev/null 2>&1; then
echo "gcc installed"
else
sudo apt -y install gcc
fi
if dpkg -s "python3-pip" >/dev/null 2>&1; then
echo "pip installed"
else
sudo apt -y install python3-pip
fi
if dpkg -s "ninja-build" >/dev/null 2>&1; then
echo "ninja installed"
else
sudo apt -y install ninja-build
fi
if dpkg -s "cuda" >/dev/null 2>&1 && dpkg -s "cuda" | grep Version | awk '{print $2}' | grep -q "12"; then
echo "cuda 12 installed"
else
wget -N https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin
sudo mv cuda-wsl-ubuntu.pin /etc/apt/preferences.d/cuda-repository-pin-600
wget -N https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda-repo-wsl-ubuntu-12-2-local_12.2.0-1_amd64.deb
sudo dpkg -i cuda-repo-wsl-ubuntu-12-2-local_12.2.0-1_amd64.deb
sudo cp /var/cuda-repo-wsl-ubuntu-12-2-local/cuda-*-keyring.gpg /usr/share/keyrings/
sudo apt-get update
sudo apt-get -y install cuda
fi
if python3 -c "import pkg_resources; pkg_resources.require(open('./finetune/requirements.txt',mode='r'))" &>/dev/null; then
echo "requirements satisfied"
else
python3 -m pip install -r ./finetune/requirements.txt
fi
echo "loading $loadModel"
modelInfo=$(python3 ./finetune/get_layer_and_embd.py $loadModel 4)
echo $modelInfo
if [[ $modelInfo =~ "--n_layer" ]]; then
python3 ./finetune/lora/train.py $modelInfo $@ --proj_dir lora-models --data_type binidx --lora \
--lora_parts=att,ffn,time,ln --strategy deepspeed_stage_2 --accelerator gpu
else
echo "modelInfo is invalid"
exit 1
fi

View File

@@ -0,0 +1,597 @@
# Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# copied from fairseq/fairseq/data/indexed_dataset.py
# Removed IndexedRawTextDataset since it relied on Fairseq dictionary
# other slight modifications to remove fairseq dependencies
# Added document index to index file and made it accessible.
# An empty sentence no longer separates documents.
import os
import shutil
import struct
from functools import lru_cache
from itertools import accumulate
import numpy as np
import torch
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def infer_dataset_impl(path):
if IndexedDataset.exists(path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return "cached"
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return "mmap"
else:
return None
else:
print(f"Dataset does not exist: {path}")
print(
"Path should be a basename that both .idx and .bin can be appended to get full filenames."
)
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == "mmap":
return MMapIndexedDatasetBuilder(
out_file, dtype=__best_fitting_dtype(vocab_size)
)
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, skip_warmup=False):
if not IndexedDataset.exists(path):
print(f"Dataset does not exist: {path}")
print(
"Path should be a basename that both .idx and .bin can be appended to get full filenames."
)
return None
if impl == "infer":
impl = infer_dataset_impl(path)
if impl == "lazy" and IndexedDataset.exists(path):
return IndexedDataset(path)
elif impl == "cached" and IndexedDataset.exists(path):
return IndexedCachedDataset(path)
elif impl == "mmap" and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path, skip_warmup)
print(f"Unknown dataset implementation: {impl}")
return None
def dataset_exists(path, impl):
if impl == "mmap":
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float32,
7: np.float64,
8: np.uint16,
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + ".idx"
def data_file_path(prefix_path):
return prefix_path + ".bin"
def create_doc_idx(sizes):
doc_idx = [0]
for i, s in enumerate(sizes):
if s == 0:
doc_idx.append(i + 1)
return doc_idx
class IndexedDataset(torch.utils.data.Dataset):
"""Loader for IndexedDataset"""
_HDR_MAGIC = b"TNTIDX\x00\x00"
def __init__(self, path):
super().__init__()
self.path = path
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = f.read(8)
assert struct.unpack("<Q", version) == (1,)
code, self.element_size = struct.unpack("<QQ", f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack("<QQ", f.read(16))
self.doc_count = struct.unpack("<Q", f.read(8))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
self.doc_idx = read_longs(f, self.doc_count)
def read_data(self, path):
self.data_file = open(data_file_path(path), "rb", buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError("index out of range")
def __del__(self):
if self.data_file:
self.data_file.close()
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if not self.data_file:
self.read_data(self.path)
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
return a
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
sizes = self.sizes[self.dim_offsets[start] : self.dim_offsets[stop]]
size = sum(sizes)
a = np.empty(size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[start] * self.element_size)
self.data_file.readinto(a)
offsets = list(accumulate(sizes))
sents = np.split(a, offsets[:-1])
return sents
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return os.path.exists(index_file_path(path)) and os.path.exists(
data_file_path(path)
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path):
super().__init__(path)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx : ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx : ptx + a.size])
return a
elif isinstance(idx, slice):
# Hack just to make this work, can optimizer later if necessary
sents = []
for i in range(*idx.indices(len(self))):
sents.append(self[i])
return sents
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float32: 4,
np.float64: 8,
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, "wb")
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
self.doc_idx = [0]
def add_item(self, np_array):
assert isinstance(np_array, np.ndarray) and np_array.dtype == self.dtype
bytes = self.out_file.write(np_array)
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in np_array.shape:
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(np_array.shape))
def end_document(self):
self.doc_idx.append(len(self.sizes))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), "rb") as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, "wb")
index.write(b"TNTIDX\x00\x00")
index.write(struct.pack("<Q", 1))
index.write(struct.pack("<QQ", code(self.dtype), self.element_size))
index.write(struct.pack("<QQ", len(self.data_offsets) - 1, len(self.sizes)))
index.write(struct.pack("<Q", len(self.doc_idx)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
write_longs(index, self.doc_idx)
index.close()
def _warmup_mmap_file(path):
with open(path, "rb") as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b"MMIDIDX\x00\x00"
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, "wb")
# Write Magic string so we can check the file format then opening it again.
self._file.write(cls._HDR_MAGIC)
# Write version number
# Little endian unsigned 64 Bit integer
self._file.write(struct.pack("<Q", 1))
# Little endian unsigned 8 Bit integer
self._file.write(struct.pack("<B", code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
pointers = np.zeros(len(sizes), dtype=np.int64)
sizes = np.array(sizes, dtype=np.int64)
np.cumsum(sizes[:-1], out=pointers[1:])
pointers = pointers * dtype().itemsize
return pointers
def write(self, sizes, doc_idx):
pointers = self._get_pointers(sizes)
# Little endian unsigned 64 Bit integer
self._file.write(struct.pack("<Q", len(sizes)))
# Little endian unsigned 64 Bit integer
self._file.write(struct.pack("<Q", len(doc_idx)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order="C"))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order="C"))
del pointers
doc_idx = np.array(doc_idx, dtype=np.int64)
self._file.write(doc_idx.tobytes(order="C"))
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=False):
with open(path, "rb") as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
# Little endian unsigned 64 Bit integer
version = struct.unpack("<Q", stream.read(8))
assert (1,) == version
# Little endian unsigned 8 Bit integer
(dtype_code,) = struct.unpack("<B", stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack("<Q", stream.read(8))[0]
self._doc_count = struct.unpack("<Q", stream.read(8))[0]
offset = stream.tell()
if not skip_warmup:
print(" warming up index mmap file...")
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode="r", order="C")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
print(" reading sizes...")
self._sizes = np.frombuffer(
self._bin_buffer, dtype=np.int32, count=self._len, offset=offset
)
print(" reading pointers...")
self._pointers = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._len,
offset=offset + self._sizes.nbytes,
)
print(" reading document index...")
self._doc_idx = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._doc_count,
offset=offset + self._sizes.nbytes + self._pointers.nbytes,
)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@property
def doc_idx(self):
return self._doc_idx
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path, skip_warmup=False):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path, skip_warmup)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path, skip_warmup):
self._path = path
self._index = self.Index(index_file_path(self._path), skip_warmup)
if not skip_warmup:
print(" warming up data mmap file...")
_warmup_mmap_file(data_file_path(self._path))
print(" creating numpy buffer of mmap...")
self._bin_buffer_mmap = np.memmap(
data_file_path(self._path), mode="r", order="C"
)
print(" creating memory view of numpy buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
ptr, size = self._index[idx]
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
)
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
sizes = self._index._sizes[idx]
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr
)
sents = np.split(np_array, offsets[:-1])
return sents
def get(self, idx, offset=0, length=None):
"""Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
ptr, size = self._index[idx]
if length is None:
length = size - offset
ptr += offset * np.dtype(self._index.dtype).itemsize
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr
)
return np_array
@property
def sizes(self):
return self._index.sizes
@property
def doc_idx(self):
return self._index.doc_idx
def get_doc_idx(self):
return self._index._doc_idx
def set_doc_idx(self, doc_idx_):
self._index._doc_idx = doc_idx_
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return os.path.exists(index_file_path(path)) and os.path.exists(
data_file_path(path)
)
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, "wb")
self._dtype = dtype
self._sizes = []
self._doc_idx = [0]
@property
def dtype(self):
return self._dtype
def add_item(self, np_array):
assert isinstance(np_array, np.ndarray) and np_array.dtype == self.dtype
self._data_file.write(np_array.tobytes(order="C"))
self._sizes.append(np_array.size)
def end_document(self):
self._doc_idx.append(len(self._sizes))
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), "rb") as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes, self._doc_idx)

View File

@@ -0,0 +1,251 @@
# Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processing data for pretraining."""
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import argparse
import multiprocessing
import lm_dataformat as lmd
import numpy as np
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
)
import time
import tqdm
import ftfy
from tokenizer import build_tokenizer
import indexed_dataset
from threading import Semaphore
class Encoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
# Use Encoder class as a container for global data
Encoder.tokenizer = build_tokenizer(self.args)
def encode(self, text):
if self.args.ftfy:
text = ftfy.fix_text(text)
ids = {}
for key in self.args.jsonl_keys:
doc_ids = []
text_ids = Encoder.tokenizer.tokenize(text)
if len(text_ids) > 0:
doc_ids.append(text_ids)
if self.args.append_eod:
doc_ids[-1].append(Encoder.tokenizer.eod)
ids[key] = doc_ids
return ids, len(text)
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title="input data")
group.add_argument(
"--input",
type=str,
required=True,
help="Path to input jsonl files or lmd archive(s) - if using multiple archives, put them in a comma separated "
"list",
)
group.add_argument(
"--jsonl-keys",
nargs="+",
default=["text"],
help="space separate listed of keys to extract from jsonl. Defa",
)
group.add_argument(
"--num-docs",
default=None,
help="Optional: Number of documents in the input data (if known) for an accurate progress bar.",
type=int,
)
group = parser.add_argument_group(title="tokenizer")
group.add_argument(
"--tokenizer-type",
type=str,
required=True,
choices=[
"HFGPT2Tokenizer",
"HFTokenizer",
"GPT2BPETokenizer",
"CharLevelTokenizer",
"TiktokenTokenizer",
"RWKVTokenizer",
],
help="What type of tokenizer to use.",
)
group.add_argument(
"--vocab-file", type=str, default=None, help="Path to the vocab file"
)
group.add_argument(
"--merge-file",
type=str,
default=None,
help="Path to the BPE merge file (if necessary).",
)
group.add_argument(
"--append-eod",
action="store_true",
help="Append an <eod> token to the end of a document.",
)
group.add_argument("--ftfy", action="store_true", help="Use ftfy to clean text")
group = parser.add_argument_group(title="output data")
group.add_argument(
"--output-prefix",
type=str,
required=True,
help="Path to binary output file without suffix",
)
group.add_argument(
"--dataset-impl",
type=str,
default="mmap",
choices=["lazy", "cached", "mmap"],
help="Dataset implementation to use. Default: mmap",
)
group = parser.add_argument_group(title="runtime")
group.add_argument(
"--workers", type=int, default=1, help="Number of worker processes to launch"
)
group.add_argument(
"--log-interval",
type=int,
default=100,
help="Interval between progress updates",
)
args = parser.parse_args()
args.keep_empty = False
# some default/dummy values for the tokenizer
args.rank = 0
args.make_vocab_size_divisible_by = 128
args.model_parallel_size = 1
return args
def yield_from_files(fnames: list, semaphore):
"""
Iterator over input documents using lm_dataformat. Should be able to handle jsons / texts /
other compressed formats. Also filters out empty documents.
:param fnames: list of filenames
"""
def yielder(fname, semaphore):
for f in filter(lambda x: x, lmd.Reader(fname).stream_data()):
semaphore.acquire()
yield f
for fname in fnames:
semaphore.acquire()
yield from yielder(fname, semaphore)
def main():
args = get_args()
encoder = Encoder(args)
tokenizer = build_tokenizer(args)
print(f"Vocab size: {tokenizer.vocab_size}")
print(f"Output prefix: {args.output_prefix}")
# build a semaphore object to stop `yield_from_files` from getting ahead of encoder.encode and
# hence building up memory
semaphore = Semaphore(10000 + args.workers)
# use multiprocessing to iterate over input documents
fin = yield_from_files(args.input.split(","), semaphore)
if args.workers > 1:
pool = multiprocessing.Pool(args.workers, initializer=encoder.initializer)
encoded_docs = pool.imap(encoder.encode, fin, chunksize=25)
else:
encoder.initializer()
encoded_docs = (encoder.encode(doc) for doc in fin)
# make a dataset builder for each key in args.jsonl_keys
# each key will output to a different file beginning with args.output_prefix
output_bin_files = {}
output_idx_files = {}
builders = {}
for key in args.jsonl_keys:
output_bin_files[key] = "{}_{}_{}.bin".format(
args.output_prefix, key, "document"
)
output_idx_files[key] = "{}_{}_{}.idx".format(
args.output_prefix, key, "document"
)
builders[key] = indexed_dataset.make_builder(
output_bin_files[key],
impl=args.dataset_impl,
vocab_size=tokenizer.vocab_size,
)
# actually do tokenization
proc_start = time.time()
total_bytes_processed = 0
pbar = tqdm.tqdm()
for i, (doc, bytes_processed) in enumerate(encoded_docs, start=1):
total_bytes_processed += bytes_processed
# release semaphore so `yield_from_files` can add another file to the buffer
semaphore.release()
# add each tokenized document / sentence
for key, sentences in doc.items():
for sentence in sentences:
builders[key].add_item(np.array(sentence, dtype=builders[key].dtype))
# separate with eos token
builders[key].end_document()
# log progress
if i % args.log_interval == 0:
current = time.time()
elapsed = current - proc_start
mbs = total_bytes_processed / elapsed / 1024 / 1024
pbar.set_description(
f"Processed {i}{'' if args.num_docs is None else '/' + str(args.num_docs)} documents ({i / elapsed:0.2f} docs/s, {mbs:0.2f} MB/s)."
)
if i != 0:
pbar.update(args.log_interval)
# save output file
for key in args.jsonl_keys:
builders[key].finalize(output_idx_files[key])
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
with open("error.txt", "w") as f:
f.write(str(e))

View File

@@ -0,0 +1,232 @@
########################################################################################################
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
# Source: https://github.com/BlinkDL/ChatRWKV/blob/main/tokenizer/rwkv_tokenizer.py
########################################################################################################
import os, sys, time, random
print('''
#######################################################################################################################
This tokenizer is not used in any RWKV models yet. I plan to use it for the future multilang RWKV models.
Benefits:
* Good support of most languages, from European to CJK to Arabic and Hindi and more.
* Clean vocab. Good for code too. Vocab size = 65525 (use 0 for <|endoftext|>).
* Good at numbers: the numerical tokens are '0'~'9', '10'~'99', ' 0'~' 9', ' 10'~' 99'.
* Very easy tokenization:
** The input text must be in UTF-8.
** Greedy encoding: always pick the longest (in bytes) token (with the highest id) that matches your UTF-8 bytes.
* The tokenization result is surprisingly good, because the vocab respects word boundaries and UTF-8 boundaries.
For 10x faster speed:
mypyc rwkv_tokenizer.py
python3 -c "import rwkv_tokenizer"
#######################################################################################################################
''')
########################################################################################################
# Tokenizer #1 (reference, naive, slow)
########################################################################################################
class RWKV_TOKENIZER():
table = None # : list[list[list[bytes]]] = None
good = None # : list[set[int]]
wlen = None # : list[int]
def __init__(self, file_name):
self.vocab_size = 65525
self.idx2token = {}
sorted = [] # must be already sorted
lines = open(file_name, "r", encoding="utf-8").readlines()
for l in lines:
idx = int(l[:l.index(' ')])
x = eval(l[l.index(' '):l.rindex(' ')])
x = x.encode("utf-8") if isinstance(x, str) else x
assert isinstance(x, bytes)
assert len(x) == int(l[l.rindex(' '):])
sorted += [x]
self.idx2token[idx] = x
self.token2idx = {}
for k, v in self.idx2token.items():
self.token2idx[v] = int(k)
# precompute some tables for fast matching
self.table = [[[] for j in range(256)] for i in range(256)]
self.good = [set() for i in range(256)]
self.wlen = [0 for i in range(256)]
for i in reversed(range(len(sorted))): # reverse order - match longer tokens first
s = sorted[i]
if len(s) >= 2:
s0 = int(s[0])
s1 = int(s[1])
self.table[s0][s1] += [s]
self.wlen[s0] = max(self.wlen[s0], len(s))
self.good[s0].add(s1)
def encodeBytes(self, src: bytes):
src_len: int = len(src)
tokens = []
i: int = 0
while i < src_len:
s: bytes = src[i : i + 1]
if i < src_len - 1:
s1: int = int(src[i + 1])
s0: int = int(src[i])
if s1 in self.good[s0]:
sss: bytes = src[i : i + self.wlen[s0]]
try:
s = next(filter(sss.startswith, self.table[s0][s1]))
except:
pass
tokens.append(self.token2idx[s])
i += len(s)
return tokens
def decodeBytes(self, tokens):
return b''.join(map(lambda i: self.idx2token[i], tokens))
def encode(self, src: str):
return self.encodeBytes(src.encode("utf-8"))
def decode(self, tokens):
return self.decodeBytes(tokens).decode('utf-8')
def token_to_id(self, token):
return self.token2idx[token]
def get_vocab_size(self):
return self.vocab_size
def get_vocab(self):
return self.idx2token
def printTokens(self, tokens):
for i in tokens:
s = self.idx2token[i]
try:
s = s.decode('utf-8')
except:
pass
print(f'{repr(s)}{i}', end=' ')
# print(repr(s), i)
print()
########################################################################################################
# Tokenizer #2 (trie, faster) https://github.com/TkskKurumi/ChatRWKV-TRIE-Tokenizer
########################################################################################################
class TRIE:
__slots__ = tuple("ch,to,values,front".split(","))
to:list
values:set
def __init__(self, front=None, ch=None):
self.ch = ch
self.to = [None for ch in range(256)]
self.values = set()
self.front = front
def __repr__(self):
fr = self
ret = []
while(fr!=None):
if(fr.ch!=None):
ret.append(fr.ch)
fr = fr.front
return "<TRIE %s %s>"%(ret[::-1], self.values)
def add(self, key:bytes, idx:int=0, val=None):
if(idx == len(key)):
if(val is None):
val = key
self.values.add(val)
return self
ch = key[idx]
if(self.to[ch] is None):
self.to[ch] = TRIE(front=self, ch=ch)
return self.to[ch].add(key, idx=idx+1, val=val)
def find_longest(self, key:bytes, idx:int=0):
u:TRIE = self
ch:int = key[idx]
while(u.to[ch] is not None):
u = u.to[ch]
idx += 1
if(u.values):
ret = idx, u, u.values
if(idx==len(key)):
break
ch = key[idx]
return ret
class TRIE_TOKENIZER():
def __init__(self, file_name):
self.vocab_size = 65525
self.idx2token = {}
sorted = [] # must be already sorted
with open(file_name, "r", encoding="utf-8") as f:
lines = f.readlines()
for l in lines:
idx = int(l[:l.index(' ')])
x = eval(l[l.index(' '):l.rindex(' ')])
x = x.encode("utf-8") if isinstance(x, str) else x
assert isinstance(x, bytes)
assert len(x) == int(l[l.rindex(' '):])
sorted += [x]
self.idx2token[idx] = x
self.token2idx = {}
for k,v in self.idx2token.items():
self.token2idx[v] = int(k)
self.root = TRIE()
for t, i in self.token2idx.items():
_ = self.root.add(t, val=(t, i))
def encodeBytes(self, src:bytes):
idx:int = 0
tokens = []
while (idx < len(src)):
_idx:int = idx
idx, _, values = self.root.find_longest(src, idx)
assert(idx != _idx)
_, token = next(iter(values))
tokens.append(token)
return tokens
def decodeBytes(self, tokens):
return b''.join(map(lambda i: self.idx2token[i], tokens))
def encode(self, src):
return self.encodeBytes(src.encode("utf-8"))
def decode(self, tokens):
return self.decodeBytes(tokens).decode('utf-8')
def get_vocab_size(self):
return self.vocab_size
def get_vocab(self):
return self.idx2token
def printTokens(self, tokens):
for i in tokens:
s = self.idx2token[i]
try:
s = s.decode('utf-8')
except:
pass
print(f'{repr(s)}{i}', end=' ')
print()

View File

@@ -0,0 +1,205 @@
# Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron tokenizers."""
from abc import ABC
from abc import abstractmethod
from tokenizers import Tokenizer
from rwkv_tokenizer import RWKV_TOKENIZER, TRIE_TOKENIZER
from typing import List, Union
def build_tokenizer(args):
"""Initialize tokenizer."""
if args.rank == 0:
print("> building {} tokenizer ...".format(args.tokenizer_type), flush=True)
# Select and instantiate the tokenizer.
if args.tokenizer_type.lower() == "HFTokenizer".lower():
assert args.vocab_file is not None
tokenizer = HFTokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "RWKVTokenizer".lower():
assert args.vocab_file is not None
tokenizer = RWKVTokenizer(args.vocab_file)
else:
raise NotImplementedError(
"{} tokenizer is not " "implemented.".format(args.tokenizer_type)
)
# Add vocab size.
args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args)
return tokenizer
def _vocab_size_with_padding(orig_vocab_size, args):
"""Pad vocab size so it is divisible by model parallel size and
still having GPU friendly size."""
after = orig_vocab_size
multiple = args.make_vocab_size_divisible_by * args.model_parallel_size
while (after % multiple) != 0:
after += 1
if args.rank == 0:
print(
" > padded vocab (size: {}) with {} dummy tokens "
"(new size: {})".format(orig_vocab_size, after - orig_vocab_size, after),
flush=True,
)
return after
class AbstractTokenizer(ABC):
"""Abstract class for tokenizer."""
def __init__(self, name):
self.name = name
super().__init__()
@property
@abstractmethod
def vocab_size(self):
pass
@property
@abstractmethod
def vocab(self):
"""Dictionary from vocab text token to id token."""
pass
@property
@abstractmethod
def inv_vocab(self):
"""Dictionary from vocab id token to text token."""
pass
@abstractmethod
def tokenize(self, text):
pass
def detokenize(self, token_ids):
raise NotImplementedError(
"detokenizer is not implemented for {} " "tokenizer".format(self.name)
)
@property
def cls(self):
raise NotImplementedError(
"CLS is not provided for {} " "tokenizer".format(self.name)
)
@property
def sep(self):
raise NotImplementedError(
"SEP is not provided for {} " "tokenizer".format(self.name)
)
@property
def pad(self):
raise NotImplementedError(
"PAD is not provided for {} " "tokenizer".format(self.name)
)
@property
def eod(self):
raise NotImplementedError(
"EOD is not provided for {} " "tokenizer".format(self.name)
)
@property
def mask(self):
raise NotImplementedError(
"MASK is not provided for {} " "tokenizer".format(self.name)
)
class HFTokenizer(AbstractTokenizer):
"""Designed to Integrate HF's Tokenizer library."""
def __init__(self, vocab_file):
name = "HFTokenizer"
super().__init__(name)
self.tokenizer = Tokenizer.from_file(vocab_file)
self.eod_id = self.tokenizer.token_to_id("<|endoftext|>")
self.pad_id = self.tokenizer.token_to_id("<|padding|>")
@property
def vocab_size(self):
return self.tokenizer.get_vocab_size()
@property
def vocab(self):
return self.tokenizer.get_vocab()
@property
def inv_vocab(self):
return self.tokenizer.decoder
def tokenize(self, text: str):
return self.tokenizer.encode(text).ids
def tokenize_batch(self, text_batch: Union[List[str], str]):
return self.tokenizer.encode_batch(text_batch)
def detokenize(self, token_ids):
return self.tokenizer.decode(token_ids)
@property
def eod(self):
return self.eod_id
class RWKVTokenizer(AbstractTokenizer):
"""RWKV Worlds Tokenizer."""
def __init__(self, vocab_file='rwkv_vocab_v20230424.txt'):
name = "RWKVTokenizer"
super().__init__(name)
self.tokenizer = TRIE_TOKENIZER(vocab_file)
self.eod_id = 0 # self.tokenizer.token_to_id("<|endoftext|>")
# self.pad_id = self.tokenizer.token_to_id("<|padding|>")
@property
def vocab_size(self):
return self.tokenizer.get_vocab_size()
@property
def vocab(self):
return self.tokenizer.get_vocab()
@property
def inv_vocab(self):
return self.tokenizer.decode
def tokenize(self, text: str):
return self.tokenizer.encode(text)
def tokenize_batch(self, text_batch: Union[List[str], str]):
return self.tokenizer.encode_batch(text_batch)
def detokenize(self, token_ids):
return self.tokenizer.decode(token_ids)
@property
def eod(self):
return self.eod_id

133
finetune/lora/cuda/wkv_cuda.cu vendored Normal file
View File

@@ -0,0 +1,133 @@
#include <stdio.h>
#include <assert.h>
#define MIN_VALUE (-1e38)
template <typename F>
__global__ void kernel_forward(const int B, const int T, const int C,
const F *__restrict__ const _w, const F *__restrict__ const _u, const F *__restrict__ const _k, const F *__restrict__ const _v,
F *__restrict__ const _y) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int _b = idx / C;
const int _c = idx % C;
const int _offset = _b * T * C + _c;
F u = _u[_c];
F w = _w[_c];
const F *__restrict__ const k = _k + _offset;
const F *__restrict__ const v = _v + _offset;
F *__restrict__ const y = _y + _offset;
// aa and bb are running sums divided by exp(pp) (to avoid overflow)
F aa = 0, bb = 0, pp = MIN_VALUE;
for (int i = 0; i < T; i++) {
const int ii = i * C;
const F kk = k[ii];
const F vv = v[ii];
F ww = u + kk;
F p = max(pp, ww);
F e1 = exp(pp - p);
F e2 = exp(ww - p);
y[ii] = (e1 * aa + e2 * vv) / (e1 * bb + e2);
ww = w + pp;
p = max(ww, kk);
e1 = exp(ww - p);
e2 = exp(kk - p);
aa = e1 * aa + e2 * vv;
bb = e1 * bb + e2;
pp = p;
}
}
template <typename F>
__global__ void kernel_backward(const int B, const int T, const int C,
const F *__restrict__ const _w, const F *__restrict__ const _u, const F *__restrict__ const _k, const F *__restrict__ const _v,
const F *__restrict__ const _y, const F *__restrict__ const _gy,
F *__restrict__ const _gw, F *__restrict__ const _gu, F *__restrict__ const _gk, F *__restrict__ const _gv) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int _b = idx / C;
const int _c = idx % C;
const int _offset = _b * T * C + _c;
F u = _u[_c];
F w = _w[_c];
const F *__restrict__ const k = _k + _offset;
const F *__restrict__ const v = _v + _offset;
const F *__restrict__ const y = _y + _offset;
const F *__restrict__ const gy = _gy + _offset;
F *__restrict__ const gk = _gk + _offset;
F *__restrict__ const gv = _gv + _offset;
F q[Tmax], r[Tmax];
F gw = 0, gu = 0, aa = 0, bb = 0, ga = 0, gb = 0, pp = MIN_VALUE;
for (int i = 0; i < T; i++) {
const int ii = i * C;
const F kk = k[ii];
const F vv = v[ii];
const F yy = y[ii];
F ww = u + kk;
F p = max(pp, ww);
F e1 = exp(pp - p);
F e2 = exp(ww - p);
const F qq = gy[ii] / (e1 * bb + e2);
gw += (ga - gb * yy) * e1 * qq;
gu += (vv - yy) * e2 * qq;
q[i] = qq;
r[i] = ww - p;
ww = w + pp;
p = max(ww, kk);
e1 = exp(ww - p);
e2 = exp(kk - p);
ga = e1 * (aa + ga);
gb = e1 * (bb + gb);
aa = e1 * aa + e2 * vv;
bb = e1 * bb + e2;
pp = p;
}
const int _offsetBC = _b * C + _c;
_gw[_offsetBC] = gw * _w[_c]; // multiply by w because of w -> -exp(w) in python forward()
_gu[_offsetBC] = gu;
aa = 0, bb = 0, pp = MIN_VALUE;
for (int i = T - 1; i >= 0; i--) {
const int ii = i * C;
const F kk = k[ii];
const F vv = v[ii];
const F yy = y[ii];
const F qq = q[i];
const F rr = r[i];
F e1 = qq * exp(rr);
F e2 = exp(kk + pp);
gk[ii] = e1 * (vv - yy) + e2 * (aa * vv + bb);
gv[ii] = e1 + e2 * aa;
const F ww = w + pp;
const F www = rr - u - kk;
const F p = max(ww, www);
e1 = exp(ww - p);
e2 = qq * exp(www - p);
aa = e1 * aa + e2;
bb = e1 * bb - e2 * yy;
pp = p;
}
}
void cuda_forward(int B, int T, int C, float *w, float *u, float *k, float *v, float *y) {
dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance
assert(B * C % threadsPerBlock.x == 0);
dim3 numBlocks(B * C / threadsPerBlock.x);
kernel_forward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y);
}
void cuda_backward(int B, int T, int C, float *w, float *u, float *k, float *v, float *y, float *gy, float *gw, float *gu, float *gk, float *gv) {
dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance
assert(B * C % threadsPerBlock.x == 0);
dim3 numBlocks(B * C / threadsPerBlock.x);
kernel_backward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, gy, gw, gu, gk, gv);
}

132
finetune/lora/cuda/wkv_cuda_bf16.cu vendored Normal file
View File

@@ -0,0 +1,132 @@
#include <stdio.h>
#include <assert.h>
#include "ATen/ATen.h"
#define MIN_VALUE (-1e38)
typedef at::BFloat16 bf16;
__global__ void kernel_forward(const int B, const int T, const int C,
const float *__restrict__ const _w, const bf16 *__restrict__ const _u, const bf16 *__restrict__ const _k, const bf16 *__restrict__ const _v,
bf16 *__restrict__ const _y) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int _b = idx / C;
const int _c = idx % C;
const int _offset = _b * T * C + _c;
float u = float(_u[_c]);
float w = _w[_c];
const bf16 *__restrict__ const k = _k + _offset;
const bf16 *__restrict__ const v = _v + _offset;
bf16 *__restrict__ const y = _y + _offset;
// aa and bb are running sums divided by exp(pp) (to avoid overflow)
float aa = 0, bb = 0, pp = MIN_VALUE;
for (int i = 0; i < T; i++) {
const int ii = i * C;
const float kk = float(k[ii]);
const float vv = float(v[ii]);
float ww = u + kk;
float p = max(pp, ww);
float e1 = exp(pp - p);
float e2 = exp(ww - p);
y[ii] = bf16((e1 * aa + e2 * vv) / (e1 * bb + e2));
ww = w + pp;
p = max(ww, kk);
e1 = exp(ww - p);
e2 = exp(kk - p);
aa = e1 * aa + e2 * vv;
bb = e1 * bb + e2;
pp = p;
}
}
__global__ void kernel_backward(const int B, const int T, const int C,
const float *__restrict__ const _w, const bf16 *__restrict__ const _u, const bf16 *__restrict__ const _k, const bf16 *__restrict__ const _v,
const bf16 *__restrict__ const _y, const bf16 *__restrict__ const _gy,
bf16 *__restrict__ const _gw, bf16 *__restrict__ const _gu, bf16 *__restrict__ const _gk, bf16 *__restrict__ const _gv) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int _b = idx / C;
const int _c = idx % C;
const int _offset = _b * T * C + _c;
float u = float(_u[_c]);
float w = _w[_c];
const bf16 *__restrict__ const k = _k + _offset;
const bf16 *__restrict__ const v = _v + _offset;
const bf16 *__restrict__ const y = _y + _offset;
const bf16 *__restrict__ const gy = _gy + _offset;
bf16 *__restrict__ const gk = _gk + _offset;
bf16 *__restrict__ const gv = _gv + _offset;
float q[Tmax], r[Tmax];
float gw = 0, gu = 0, aa = 0, bb = 0, ga = 0, gb = 0, pp = MIN_VALUE;
for (int i = 0; i < T; i++) {
const int ii = i * C;
const float kk = float(k[ii]);
const float vv = float(v[ii]);
const float yy = float(y[ii]);
float ww = u + kk;
float p = max(pp, ww);
float e1 = exp(pp - p);
float e2 = exp(ww - p);
const float qq = float(gy[ii]) / (e1 * bb + e2);
gw += (ga - gb * yy) * e1 * qq;
gu += (vv - yy) * e2 * qq;
q[i] = qq;
r[i] = ww - p;
ww = w + pp;
p = max(ww, kk);
e1 = exp(ww - p);
e2 = exp(kk - p);
ga = e1 * (aa + ga);
gb = e1 * (bb + gb);
aa = e1 * aa + e2 * vv;
bb = e1 * bb + e2;
pp = p;
}
const int _offsetBC = _b * C + _c;
_gw[_offsetBC] = bf16(gw * _w[_c]); // multiply by w because of w -> -exp(w) in python forward()
_gu[_offsetBC] = bf16(gu);
aa = 0, bb = 0, pp = MIN_VALUE;
for (int i = T - 1; i >= 0; i--) {
const int ii = i * C;
const float kk = float(k[ii]);
const float vv = float(v[ii]);
const float yy = float(y[ii]);
const float qq = q[i];
const float rr = r[i];
float e1 = qq * exp(rr);
float e2 = exp(kk + pp);
gk[ii] = bf16(e1 * (vv - yy) + e2 * (aa * vv + bb));
gv[ii] = bf16(e1 + e2 * aa);
const float ww = w + pp;
const float www = rr - u - kk;
const float p = max(ww, www);
e1 = exp(ww - p);
e2 = qq * exp(www - p);
aa = e1 * aa + e2;
bb = e1 * bb - e2 * yy;
pp = p;
}
}
void cuda_forward(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y) {
dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance
assert(B * C % threadsPerBlock.x == 0);
dim3 numBlocks(B * C / threadsPerBlock.x);
kernel_forward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y);
}
void cuda_backward(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, bf16 *gy, bf16 *gw, bf16 *gu, bf16 *gk, bf16 *gv) {
dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance
assert(B * C % threadsPerBlock.x == 0);
dim3 numBlocks(B * C / threadsPerBlock.x);
kernel_backward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, gy, gw, gu, gk, gv);
}

21
finetune/lora/cuda/wkv_op.cpp vendored Normal file
View File

@@ -0,0 +1,21 @@
#include <torch/extension.h>
void cuda_forward(int B, int T, int C, float *w, float *u, float *k, float *v, float *y);
void cuda_backward(int B, int T, int C, float *w, float *u, float *k, float *v, float *y, float *gy, float *gw, float *gu, float *gk, float *gv);
void forward(int64_t B, int64_t T, int64_t C, torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y) {
cuda_forward(B, T, C, w.data_ptr<float>(), u.data_ptr<float>(), k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>());
}
void backward(int64_t B, int64_t T, int64_t C, torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y, torch::Tensor &gy, torch::Tensor &gw, torch::Tensor &gu, torch::Tensor &gk, torch::Tensor &gv) {
cuda_backward(B, T, C, w.data_ptr<float>(), u.data_ptr<float>(), k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>(), gy.data_ptr<float>(), gw.data_ptr<float>(), gu.data_ptr<float>(), gk.data_ptr<float>(), gv.data_ptr<float>());
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &forward, "wkv forward");
m.def("backward", &backward, "wkv backward");
}
TORCH_LIBRARY(wkv, m) {
m.def("forward", forward);
m.def("backward", backward);
}

25
finetune/lora/cuda/wkv_op_bf16.cpp vendored Normal file
View File

@@ -0,0 +1,25 @@
#include <torch/extension.h>
#include "ATen/ATen.h"
typedef at::BFloat16 bf16;
void cuda_forward(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y);
void cuda_backward(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, bf16 *gy, bf16 *gw, bf16 *gu, bf16 *gk, bf16 *gv);
void forward(int64_t B, int64_t T, int64_t C, torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y) {
cuda_forward(B, T, C, w.data_ptr<float>(), u.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), y.data_ptr<bf16>());
}
void backward(int64_t B, int64_t T, int64_t C, torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y,
torch::Tensor &gy, torch::Tensor &gw, torch::Tensor &gu, torch::Tensor &gk, torch::Tensor &gv) {
cuda_backward(B, T, C, w.data_ptr<float>(), u.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), y.data_ptr<bf16>(),
gy.data_ptr<bf16>(), gw.data_ptr<bf16>(), gu.data_ptr<bf16>(), gk.data_ptr<bf16>(), gv.data_ptr<bf16>());
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &forward, "wkv forward");
m.def("backward", &backward, "wkv backward");
}
TORCH_LIBRARY(wkv, m) {
m.def("forward", forward);
m.def("backward", backward);
}

69
finetune/lora/merge_lora.py vendored Normal file
View File

@@ -0,0 +1,69 @@
from collections import OrderedDict
import os
import sys
from typing import Dict
import typing
import torch
try:
if "-h" in sys.argv or "--help" in sys.argv:
print(
f"Usage: python3 {sys.argv[0]} [--use-gpu] <lora_alpha> <base_model.pth> <lora_checkpoint.pth> <output.pth>"
)
if sys.argv[1] == "--use-gpu":
device = "cuda"
lora_alpha, base_model, lora, output = (
float(sys.argv[2]),
sys.argv[3],
sys.argv[4],
sys.argv[5],
)
else:
device = "cpu"
lora_alpha, base_model, lora, output = (
float(sys.argv[1]),
sys.argv[2],
sys.argv[3],
sys.argv[4],
)
with torch.no_grad():
w: Dict[str, torch.Tensor] = torch.load(base_model, map_location="cpu")
# merge LoRA-only slim checkpoint into the main weights
w_lora: Dict[str, torch.Tensor] = torch.load(lora, map_location="cpu")
for k in w_lora.keys():
w[k] = w_lora[k]
output_w: typing.OrderedDict[str, torch.Tensor] = OrderedDict()
# merge LoRA weights
keys = list(w.keys())
for k in keys:
if k.endswith(".weight"):
prefix = k[: -len(".weight")]
lora_A = prefix + ".lora_A"
lora_B = prefix + ".lora_B"
if lora_A in keys:
assert lora_B in keys
print(f"merging {lora_A} and {lora_B} into {k}")
assert w[lora_B].shape[1] == w[lora_A].shape[0]
lora_r = w[lora_B].shape[1]
w[k] = w[k].to(device=device)
w[lora_A] = w[lora_A].to(device=device)
w[lora_B] = w[lora_B].to(device=device)
w[k] += w[lora_B] @ w[lora_A] * (lora_alpha / lora_r)
output_w[k] = w[k].to(device="cpu", copy=True)
del w[k]
del w[lora_A]
del w[lora_B]
continue
if "lora" not in k:
print(f"retaining {k}")
output_w[k] = w[k].clone()
del w[k]
torch.save(output_w, output)
except Exception as e:
print(e)
with open("error.txt", "w") as f:
f.write(str(e))

0
finetune/lora/src/__init__.py vendored Normal file
View File

Some files were not shown because too many files have changed in this diff Show More