custom strategy mode

This commit is contained in:
josc146 2023-05-31 12:26:10 +08:00
parent 8291c50058
commit 9f5d15a7d5
4 changed files with 100 additions and 62 deletions

View File

@ -124,5 +124,6 @@
"There is currently a game of Werewolf with six players, including a Seer (who can check identities at night), two Werewolves (who can choose someone to kill at night), a Bodyguard (who can choose someone to protect at night), two Villagers (with no special abilities), and a game host. Bob will play as Player 1, Alice will play as Players 2-6 and the game host, and they will begin playing together. Every night, the host will ask Bob for his action and simulate the actions of the other players. During the day, the host will oversee the voting process and ask Bob for his vote. \n\nAlice: Next, I will act as the game host and assign everyone their roles, including randomly assigning yours. Then, I will simulate the actions of Players 2-6 and let you know what happens each day. Based on your assigned role, you can tell me your actions and I will let you know the corresponding results each day.\n\nBob: Okay, I understand. Let's begin. Please assign me a role. Am I the Seer, Werewolf, Villager, or Bodyguard?\n\nAlice: You are the Seer. Now that night has fallen, please choose a player to check his identity.\n\nBob: Tonight, I want to check Player 2 and find out his role.": "现在有一场六人狼人杀游戏,包括一名预言家(可以在夜晚查验身份),两名狼人(可以在夜晚选择杀人),一名守卫(可以在夜晚选择要守护的人),两名平民(无技能)一名主持人以下内容中Bob将扮演其中的1号玩家Alice来扮演2-6号玩家以及主持人并开始与Bob进行游戏主持人每晚都会询问Bob的行动并模拟其他人的行动在白天则要主持投票并同样询问Bob投票对象公布投票结果。\n\nAlice: 接下来我将首先作为主持人进行角色分配并给你赋予随机的角色之后我将模拟2-6号玩家进行行动告知你每天的动态根据你被分配的角色你可以回复我你做的行动我会告诉你每天对应的结果\n\nBob: 好的,我明白了,那么开始吧。请先给我一个角色身份。我是预言家,狼人,平民,守卫中的哪一个呢?\n\nAlice: 你的身份是预言家。现在夜晚降临,请选择你要查验的玩家。\n\nBob: 今晚我要验2号玩家他是什么身份",
"Writer, Translator, Role-playing": "写作,翻译,角色扮演",
"Chinese Kongfu": "情境冒险",
"Allow external access to the API (service must be restarted)": "允许外部访问API (必须重启服务)"
"Allow external access to the API (service must be restarted)": "允许外部访问API (必须重启服务)",
"Custom": "自定义"
}

View File

@ -136,7 +136,7 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
});
let customCudaFile = '';
if (modelConfig.modelParameters.useCustomCuda) {
if (modelConfig.modelParameters.device != 'CPU' && modelConfig.modelParameters.useCustomCuda) {
customCudaFile = getSupportedCustomCudaFile();
if (customCudaFile) {
FileExists('./py310/Lib/site-packages/rwkv/model.py').then((exist) => {

View File

@ -28,7 +28,7 @@ export type ApiParameters = {
frequencyPenalty: number;
}
export type Device = 'CPU' | 'CUDA';
export type Device = 'CPU' | 'CUDA' | 'Custom';
export type Precision = 'fp16' | 'int8' | 'fp32';
export type ModelParameters = {
@ -40,6 +40,7 @@ export type ModelParameters = {
maxStoredLayers: number;
enableHighPrecisionForLastLayer: boolean;
useCustomCuda?: boolean;
customStrategy?: string;
}
export type ModelConfig = {
@ -806,20 +807,22 @@ export const Configs: FC = observer(() => {
}
}} />
<Labeled label={t('Device')} content={
<Dropdown style={{ minWidth: 0 }} className="grow" value={selectedConfig.modelParameters.device}
<Dropdown style={{ minWidth: 0 }} className="grow" value={t(selectedConfig.modelParameters.device)!}
selectedOptions={[selectedConfig.modelParameters.device]}
onOptionSelect={(_, data) => {
if (data.optionText) {
if (data.optionValue) {
setSelectedConfigModelParams({
device: data.optionText as Device
device: data.optionValue as Device
});
}
}}>
<Option>CPU</Option>
<Option>CUDA</Option>
<Option value="CPU">CPU</Option>
<Option value="CUDA">CUDA</Option>
<Option value="Custom">{t('Custom')!}</Option>
</Dropdown>
} />
<Labeled label={t('Precision')}
{
selectedConfig.modelParameters.device != 'Custom' && <Labeled label={t('Precision')}
desc={t('int8 uses less VRAM, but has slightly lower quality. fp16 has higher quality, and fp32 has the best quality.')}
content={
<Dropdown style={{ minWidth: 0 }} className="grow"
@ -837,8 +840,10 @@ export const Configs: FC = observer(() => {
<Option>fp32</Option>
</Dropdown>
} />
<div />
<Labeled label={t('Stored Layers')}
}
{selectedConfig.modelParameters.device == 'CUDA' && <div />}
{
selectedConfig.modelParameters.device == 'CUDA' && <Labeled label={t('Stored Layers')}
desc={t('Number of the neural network layers loaded into VRAM, the more you load, the faster the speed, but it consumes more VRAM.')}
content={
<ValuedSlider value={selectedConfig.modelParameters.storedLayers} min={0}
@ -849,6 +854,9 @@ export const Configs: FC = observer(() => {
});
}} />
} />
}
{
selectedConfig.modelParameters.device == 'CUDA' &&
<Labeled label={t('Enable High Precision For Last Layer')}
desc={t('Whether to use CPU to calculate the last output layer of the neural network with FP32 precision to obtain better quality.')}
content={
@ -859,6 +867,23 @@ export const Configs: FC = observer(() => {
});
}} />
} />
}
{
selectedConfig.modelParameters.device == 'Custom' &&
<Labeled label="Strategy" desc="https://github.com/BlinkDL/ChatRWKV/blob/main/ChatRWKV-strategy.png"
content={
<Input className="grow" placeholder="cuda:0 fp16 *20 -> cuda:1 fp16"
value={selectedConfig.modelParameters.customStrategy}
onChange={(e, data) => {
setSelectedConfigModelParams({
customStrategy: data.value
});
}} />
} />
}
{selectedConfig.modelParameters.device == 'Custom' && <div />}
{
selectedConfig.modelParameters.device != 'CPU' &&
<Labeled label={t('Use Custom CUDA kernel to Accelerate')}
desc={t('Enabling this option can greatly improve inference speed, but there may be compatibility issues. If it fails to start, please turn off this option.')}
content={
@ -869,6 +894,7 @@ export const Configs: FC = observer(() => {
});
}} />
} />
}
</div>
}
/>

View File

@ -127,12 +127,23 @@ export const getStrategy = (modelConfig: ModelConfig | undefined = undefined) =>
if (modelConfig) params = modelConfig.modelParameters;
else params = commonStore.getCurrentModelConfig().modelParameters;
let strategy = '';
strategy += (params.device === 'CPU' ? 'cpu' : 'cuda') + ' ';
strategy += params.device === 'CPU' ? 'fp32' : (params.precision === 'fp16' ? 'fp16' : params.precision === 'int8' ? 'fp16i8' : 'fp32');
switch (params.device) {
case 'CPU':
strategy += 'cpu ';
strategy += params.precision === 'int8' ? 'fp32i8' : 'fp32';
break;
case 'CUDA':
strategy += 'cuda ';
strategy += params.precision === 'fp16' ? 'fp16' : params.precision === 'int8' ? 'fp16i8' : 'fp32';
if (params.storedLayers < params.maxStoredLayers)
strategy += ` *${params.storedLayers}+`;
if (params.enableHighPrecisionForLastLayer)
strategy += ' -> cpu fp32 *1';
break;
case 'Custom':
strategy = params.customStrategy || '';
break;
}
return strategy;
};