update Labeled desc
This commit is contained in:
parent
1573c09db0
commit
4f8e35ce62
@ -75,5 +75,14 @@
|
|||||||
"New Version Available": "新版本可用",
|
"New Version Available": "新版本可用",
|
||||||
"Update": "更新",
|
"Update": "更新",
|
||||||
"Please click the button in the top right corner to start the model": "请点击右上角的按钮启动模型",
|
"Please click the button in the top right corner to start the model": "请点击右上角的按钮启动模型",
|
||||||
"Update Error, Please restart this program": "更新出错, 请重启本程序"
|
"Update Error, Please restart this program": "更新出错, 请重启本程序",
|
||||||
|
"Open the following URL with your browser to view the API documentation": "使用浏览器打开以下地址查看API文档",
|
||||||
|
"By default, the maximum number of tokens that can be answered in a single response, it can be changed by the user by specifying API parameters.": "默认情况下, 单个回复最多回答的token数目, 用户可以通过自行指定API参数改变这个值",
|
||||||
|
"Sampling temperature, the higher the stronger the randomness and creativity, while the lower, the more focused and deterministic it will be.": "采样温度, 越大随机性越强, 更具创造力, 越小则越保守稳定",
|
||||||
|
"Consider the results of the top n% probability mass, 0.1 considers the top 10%, with higher quality but more conservative, 1 considers all results, with lower quality but more diverse.": "考虑前 n% 概率质量的结果, 0.1 考虑前 10%, 质量更高, 但更保守, 1 考虑所有质量结果, 质量降低, 但更多样",
|
||||||
|
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.": "存在惩罚. 正值根据新token在至今的文本中是否出现过, 来对其进行惩罚, 从而增加了模型涉及新话题的可能性",
|
||||||
|
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.": "频率惩罚. 正值根据新token在至今的文本中出现的频率/次数, 来对其进行惩罚, 从而减少模型原封不动地重复相同句子的可能性",
|
||||||
|
"int8 uses less VRAM, and is faster, but has slightly lower quality. fp16 has higher quality, and fp32 has the best quality.": "int8占用显存更低, 速度更快, 但质量略微下降. fp16质量更好, fp32质量最好",
|
||||||
|
"Number of the neural network layers loaded into VRAM, the more you load, the faster the speed, but it consumes more VRAM.": "载入显存的神经网络层数, 载入越多, 速度越快, 但显存消耗越大",
|
||||||
|
"Whether to use CPU to calculate the last output layer of the neural network with FP32 precision to obtain better quality.": "是否使用cpu以fp32精度计算神经网络的最后一层输出层, 以获得更好的质量"
|
||||||
}
|
}
|
@ -3,7 +3,7 @@ import {Label, Tooltip} from '@fluentui/react-components';
|
|||||||
import classnames from 'classnames';
|
import classnames from 'classnames';
|
||||||
|
|
||||||
export const Labeled: FC<{
|
export const Labeled: FC<{
|
||||||
label: string; desc?: string, content: ReactElement, flex?: boolean, spaceBetween?: boolean
|
label: string; desc?: string | null, content: ReactElement, flex?: boolean, spaceBetween?: boolean
|
||||||
}> = ({
|
}> = ({
|
||||||
label,
|
label,
|
||||||
desc,
|
desc,
|
||||||
|
@ -22,8 +22,8 @@ export const Configs: FC = observer(() => {
|
|||||||
const {t} = useTranslation();
|
const {t} = useTranslation();
|
||||||
const [selectedIndex, setSelectedIndex] = React.useState(commonStore.currentModelConfigIndex);
|
const [selectedIndex, setSelectedIndex] = React.useState(commonStore.currentModelConfigIndex);
|
||||||
const [selectedConfig, setSelectedConfig] = React.useState(commonStore.modelConfigs[selectedIndex]);
|
const [selectedConfig, setSelectedConfig] = React.useState(commonStore.modelConfigs[selectedIndex]);
|
||||||
|
|
||||||
const navigate = useNavigate();
|
const navigate = useNavigate();
|
||||||
|
const port = selectedConfig.apiParameters.apiPort;
|
||||||
|
|
||||||
const updateSelectedIndex = (newIndex: number) => {
|
const updateSelectedIndex = (newIndex: number) => {
|
||||||
setSelectedIndex(newIndex);
|
setSelectedIndex(newIndex);
|
||||||
@ -104,55 +104,72 @@ export const Configs: FC = observer(() => {
|
|||||||
desc={t('Hover your mouse over the text to view a detailed description. Settings marked with * will take effect immediately after being saved.')}
|
desc={t('Hover your mouse over the text to view a detailed description. Settings marked with * will take effect immediately after being saved.')}
|
||||||
content={
|
content={
|
||||||
<div className="grid grid-cols-1 sm:grid-cols-2 gap-2">
|
<div className="grid grid-cols-1 sm:grid-cols-2 gap-2">
|
||||||
<Labeled label={t('API Port')} desc={`127.0.0.1:${selectedConfig.apiParameters.apiPort}`} content={
|
<Labeled label={t('API Port')}
|
||||||
<NumberInput value={selectedConfig.apiParameters.apiPort} min={1} max={65535} step={1}
|
desc={t('Open the following URL with your browser to view the API documentation') + `: http://127.0.0.1:${port}/docs. ` +
|
||||||
onChange={(e, data) => {
|
t('This tool’s API is compatible with OpenAI API. It can be used with any ChatGPT tool you like. Go to the settings of some ChatGPT tool, replace the \'https://api.openai.com\' part in the API address with \'') + `http://127.0.0.1:${port}` + '\'.'}
|
||||||
setSelectedConfigApiParams({
|
content={
|
||||||
apiPort: data.value
|
<NumberInput value={port} min={1} max={65535} step={1}
|
||||||
});
|
onChange={(e, data) => {
|
||||||
}}/>
|
setSelectedConfigApiParams({
|
||||||
}/>
|
apiPort: data.value
|
||||||
<Labeled label={t('Max Response Token *')} content={
|
});
|
||||||
<ValuedSlider value={selectedConfig.apiParameters.maxResponseToken} min={100} max={8100} step={400}
|
}}/>
|
||||||
input
|
}/>
|
||||||
onChange={(e, data) => {
|
<Labeled label={t('Max Response Token *')}
|
||||||
setSelectedConfigApiParams({
|
desc={t('By default, the maximum number of tokens that can be answered in a single response, it can be changed by the user by specifying API parameters.')}
|
||||||
maxResponseToken: data.value
|
content={
|
||||||
});
|
<ValuedSlider value={selectedConfig.apiParameters.maxResponseToken} min={100} max={8100}
|
||||||
}}/>
|
step={400}
|
||||||
}/>
|
input
|
||||||
<Labeled label={t('Temperature *')} content={
|
onChange={(e, data) => {
|
||||||
<ValuedSlider value={selectedConfig.apiParameters.temperature} min={0} max={2} step={0.1} input
|
setSelectedConfigApiParams({
|
||||||
onChange={(e, data) => {
|
maxResponseToken: data.value
|
||||||
setSelectedConfigApiParams({
|
});
|
||||||
temperature: data.value
|
}}/>
|
||||||
});
|
}/>
|
||||||
}}/>
|
<Labeled label={t('Temperature *')}
|
||||||
}/>
|
desc={t('Sampling temperature, the higher the stronger the randomness and creativity, while the lower, the more focused and deterministic it will be.')}
|
||||||
<Labeled label={t('Top_P *')} content={
|
content={
|
||||||
<ValuedSlider value={selectedConfig.apiParameters.topP} min={0} max={1} step={0.1} input
|
<ValuedSlider value={selectedConfig.apiParameters.temperature} min={0} max={2} step={0.1}
|
||||||
onChange={(e, data) => {
|
input
|
||||||
setSelectedConfigApiParams({
|
onChange={(e, data) => {
|
||||||
topP: data.value
|
setSelectedConfigApiParams({
|
||||||
});
|
temperature: data.value
|
||||||
}}/>
|
});
|
||||||
}/>
|
}}/>
|
||||||
<Labeled label={t('Presence Penalty *')} content={
|
}/>
|
||||||
<ValuedSlider value={selectedConfig.apiParameters.presencePenalty} min={-2} max={2} step={0.1} input
|
<Labeled label={t('Top_P *')}
|
||||||
onChange={(e, data) => {
|
desc={t('Consider the results of the top n% probability mass, 0.1 considers the top 10%, with higher quality but more conservative, 1 considers all results, with lower quality but more diverse.')}
|
||||||
setSelectedConfigApiParams({
|
content={
|
||||||
presencePenalty: data.value
|
<ValuedSlider value={selectedConfig.apiParameters.topP} min={0} max={1} step={0.1} input
|
||||||
});
|
onChange={(e, data) => {
|
||||||
}}/>
|
setSelectedConfigApiParams({
|
||||||
}/>
|
topP: data.value
|
||||||
<Labeled label={t('Frequency Penalty *')} content={
|
});
|
||||||
<ValuedSlider value={selectedConfig.apiParameters.frequencyPenalty} min={-2} max={2} step={0.1} input
|
}}/>
|
||||||
onChange={(e, data) => {
|
}/>
|
||||||
setSelectedConfigApiParams({
|
<Labeled label={t('Presence Penalty *')}
|
||||||
frequencyPenalty: data.value
|
desc={t('Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.')}
|
||||||
});
|
content={
|
||||||
}}/>
|
<ValuedSlider value={selectedConfig.apiParameters.presencePenalty} min={-2} max={2}
|
||||||
}/>
|
step={0.1} input
|
||||||
|
onChange={(e, data) => {
|
||||||
|
setSelectedConfigApiParams({
|
||||||
|
presencePenalty: data.value
|
||||||
|
});
|
||||||
|
}}/>
|
||||||
|
}/>
|
||||||
|
<Labeled label={t('Frequency Penalty *')}
|
||||||
|
desc={t('Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.')}
|
||||||
|
content={
|
||||||
|
<ValuedSlider value={selectedConfig.apiParameters.frequencyPenalty} min={-2} max={2}
|
||||||
|
step={0.1} input
|
||||||
|
onChange={(e, data) => {
|
||||||
|
setSelectedConfigApiParams({
|
||||||
|
frequencyPenalty: data.value
|
||||||
|
});
|
||||||
|
}}/>
|
||||||
|
}/>
|
||||||
</div>
|
</div>
|
||||||
}
|
}
|
||||||
/>
|
/>
|
||||||
@ -208,38 +225,45 @@ export const Configs: FC = observer(() => {
|
|||||||
<Option>CUDA</Option>
|
<Option>CUDA</Option>
|
||||||
</Dropdown>
|
</Dropdown>
|
||||||
}/>
|
}/>
|
||||||
<Labeled label={t('Precision')} content={
|
<Labeled label={t('Precision')}
|
||||||
<Dropdown style={{minWidth: 0}} className="grow" value={selectedConfig.modelParameters.precision}
|
desc={t('int8 uses less VRAM, and is faster, but has slightly lower quality. fp16 has higher quality, and fp32 has the best quality.')}
|
||||||
selectedOptions={[selectedConfig.modelParameters.precision]}
|
content={
|
||||||
onOptionSelect={(_, data) => {
|
<Dropdown style={{minWidth: 0}} className="grow"
|
||||||
if (data.optionText) {
|
value={selectedConfig.modelParameters.precision}
|
||||||
setSelectedConfigModelParams({
|
selectedOptions={[selectedConfig.modelParameters.precision]}
|
||||||
precision: data.optionText as Precision
|
onOptionSelect={(_, data) => {
|
||||||
});
|
if (data.optionText) {
|
||||||
}
|
setSelectedConfigModelParams({
|
||||||
}}>
|
precision: data.optionText as Precision
|
||||||
<Option>fp16</Option>
|
});
|
||||||
<Option>int8</Option>
|
}
|
||||||
<Option>fp32</Option>
|
}}>
|
||||||
</Dropdown>
|
<Option>fp16</Option>
|
||||||
}/>
|
<Option>int8</Option>
|
||||||
<Labeled label={t('Stored Layers')} content={
|
<Option>fp32</Option>
|
||||||
<ValuedSlider value={selectedConfig.modelParameters.storedLayers} min={0}
|
</Dropdown>
|
||||||
max={selectedConfig.modelParameters.maxStoredLayers} step={1} input
|
}/>
|
||||||
onChange={(e, data) => {
|
<Labeled label={t('Stored Layers')}
|
||||||
setSelectedConfigModelParams({
|
desc={t('Number of the neural network layers loaded into VRAM, the more you load, the faster the speed, but it consumes more VRAM.')}
|
||||||
storedLayers: data.value
|
content={
|
||||||
});
|
<ValuedSlider value={selectedConfig.modelParameters.storedLayers} min={0}
|
||||||
}}/>
|
max={selectedConfig.modelParameters.maxStoredLayers} step={1} input
|
||||||
}/>
|
onChange={(e, data) => {
|
||||||
<Labeled label={t('Enable High Precision For Last Layer')} content={
|
setSelectedConfigModelParams({
|
||||||
<Switch checked={selectedConfig.modelParameters.enableHighPrecisionForLastLayer}
|
storedLayers: data.value
|
||||||
onChange={(e, data) => {
|
});
|
||||||
setSelectedConfigModelParams({
|
}}/>
|
||||||
enableHighPrecisionForLastLayer: data.checked
|
}/>
|
||||||
});
|
<Labeled label={t('Enable High Precision For Last Layer')}
|
||||||
}}/>
|
desc={t('Whether to use CPU to calculate the last output layer of the neural network with FP32 precision to obtain better quality.')}
|
||||||
}/>
|
content={
|
||||||
|
<Switch checked={selectedConfig.modelParameters.enableHighPrecisionForLastLayer}
|
||||||
|
onChange={(e, data) => {
|
||||||
|
setSelectedConfigModelParams({
|
||||||
|
enableHighPrecisionForLastLayer: data.checked
|
||||||
|
});
|
||||||
|
}}/>
|
||||||
|
}/>
|
||||||
</div>
|
</div>
|
||||||
}
|
}
|
||||||
/>
|
/>
|
||||||
|
Loading…
Reference in New Issue
Block a user