improve error messages
This commit is contained in:
parent
97f6af595e
commit
7f2f4f15c1
@ -159,8 +159,9 @@
|
|||||||
"Delete": "删除",
|
"Delete": "删除",
|
||||||
"Edit": "编辑",
|
"Edit": "编辑",
|
||||||
"Memory is not enough, try to increase the virtual memory or use a smaller model.": "内存不足,尝试增加虚拟内存,或使用一个更小规模的模型",
|
"Memory is not enough, try to increase the virtual memory or use a smaller model.": "内存不足,尝试增加虚拟内存,或使用一个更小规模的模型",
|
||||||
"Bad pytorch version, please reinstall pytorch with cuda.": "错误的Pytorch版本,请重新安装CUDA版本的Pytorch",
|
"Bad PyTorch version, please reinstall PyTorch with cuda.": "错误的PyTorch版本,请重新安装CUDA版本的PyTorch",
|
||||||
"The model file is corrupted, please download again.": "模型文件损坏,请重新下载",
|
"The model file is corrupted, please download again.": "模型文件损坏,请重新下载",
|
||||||
"Found no NVIDIA driver, please install the latest driver.": "没有找到NVIDIA驱动,请安装最新驱动",
|
"Found no NVIDIA driver, please install the latest driver.": "没有找到NVIDIA驱动,请安装最新驱动",
|
||||||
"VRAM is not enough, please reduce stored layers or use a lower precision in Configs page.": "显存不足,请在配置页面减少载入显存层数,或使用更低的精度"
|
"VRAM is not enough, please reduce stored layers or use a lower precision in Configs page.": "显存不足,请在配置页面减少载入显存层数,或使用更低的精度",
|
||||||
|
"Failed to enable custom CUDA kernel, ninja is required to load C++ extensions. You may be using the CPU version of PyTorch, please reinstall PyTorch with CUDA. Or if you are using a custom Python interpreter, you must compile the CUDA kernel by yourself or disable Custom CUDA kernel acceleration.": "自定义CUDA算子开启失败,需要安装Ninja来读取C++扩展。你可能正在使用CPU版本的PyTorch,请重新安装CUDA版本的PyTorch。如果你正在使用自定义Python解释器,你必须自己编译CUDA算子或禁用自定义CUDA算子加速"
|
||||||
}
|
}
|
@ -210,10 +210,11 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean
|
|||||||
const error = await r.text();
|
const error = await r.text();
|
||||||
const errorsMap = {
|
const errorsMap = {
|
||||||
'not enough memory': 'Memory is not enough, try to increase the virtual memory or use a smaller model.',
|
'not enough memory': 'Memory is not enough, try to increase the virtual memory or use a smaller model.',
|
||||||
'not compiled with CUDA': 'Bad pytorch version, please reinstall pytorch with cuda.',
|
'not compiled with CUDA': 'Bad PyTorch version, please reinstall PyTorch with cuda.',
|
||||||
'invalid header or archive is corrupted': 'The model file is corrupted, please download again.',
|
'invalid header or archive is corrupted': 'The model file is corrupted, please download again.',
|
||||||
'no NVIDIA driver': 'Found no NVIDIA driver, please install the latest driver.',
|
'no NVIDIA driver': 'Found no NVIDIA driver, please install the latest driver.',
|
||||||
'CUDA out of memory': 'VRAM is not enough, please reduce stored layers or use a lower precision in Configs page.'
|
'CUDA out of memory': 'VRAM is not enough, please reduce stored layers or use a lower precision in Configs page.',
|
||||||
|
'Ninja is required to load C++ extensions': 'Failed to enable custom CUDA kernel, ninja is required to load C++ extensions. You may be using the CPU version of PyTorch, please reinstall PyTorch with CUDA. Or if you are using a custom Python interpreter, you must compile the CUDA kernel by yourself or disable Custom CUDA kernel acceleration.'
|
||||||
};
|
};
|
||||||
const matchedError = Object.entries(errorsMap).find(([key, _]) => error.includes(key));
|
const matchedError = Object.entries(errorsMap).find(([key, _]) => error.includes(key));
|
||||||
const message = matchedError ? t(matchedError[1]) : error;
|
const message = matchedError ? t(matchedError[1]) : error;
|
||||||
|
Loading…
Reference in New Issue
Block a user