From 7f2f4f15c1fd26d6b443e1b880c207a9ebaa5161 Mon Sep 17 00:00:00 2001 From: josc146 Date: Fri, 23 Jun 2023 16:32:05 +0800 Subject: [PATCH] improve error messages --- frontend/src/_locales/zh-hans/main.json | 5 +++-- frontend/src/components/RunButton.tsx | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/frontend/src/_locales/zh-hans/main.json b/frontend/src/_locales/zh-hans/main.json index 1a06e95..3128354 100644 --- a/frontend/src/_locales/zh-hans/main.json +++ b/frontend/src/_locales/zh-hans/main.json @@ -159,8 +159,9 @@ "Delete": "删除", "Edit": "编辑", "Memory is not enough, try to increase the virtual memory or use a smaller model.": "内存不足,尝试增加虚拟内存,或使用一个更小规模的模型", - "Bad pytorch version, please reinstall pytorch with cuda.": "错误的Pytorch版本,请重新安装CUDA版本的Pytorch", + "Bad PyTorch version, please reinstall PyTorch with cuda.": "错误的PyTorch版本,请重新安装CUDA版本的PyTorch", "The model file is corrupted, please download again.": "模型文件损坏,请重新下载", "Found no NVIDIA driver, please install the latest driver.": "没有找到NVIDIA驱动,请安装最新驱动", - "VRAM is not enough, please reduce stored layers or use a lower precision in Configs page.": "显存不足,请在配置页面减少载入显存层数,或使用更低的精度" + "VRAM is not enough, please reduce stored layers or use a lower precision in Configs page.": "显存不足,请在配置页面减少载入显存层数,或使用更低的精度", + "Failed to enable custom CUDA kernel, ninja is required to load C++ extensions. You may be using the CPU version of PyTorch, please reinstall PyTorch with CUDA. Or if you are using a custom Python interpreter, you must compile the CUDA kernel by yourself or disable Custom CUDA kernel acceleration.": "自定义CUDA算子开启失败,需要安装Ninja来读取C++扩展。你可能正在使用CPU版本的PyTorch,请重新安装CUDA版本的PyTorch。如果你正在使用自定义Python解释器,你必须自己编译CUDA算子或禁用自定义CUDA算子加速" } \ No newline at end of file diff --git a/frontend/src/components/RunButton.tsx b/frontend/src/components/RunButton.tsx index 8e0cd80..0adb57a 100644 --- a/frontend/src/components/RunButton.tsx +++ b/frontend/src/components/RunButton.tsx @@ -210,10 +210,11 @@ export const RunButton: FC<{ onClickRun?: MouseEventHandler, iconMode?: boolean const error = await r.text(); const errorsMap = { 'not enough memory': 'Memory is not enough, try to increase the virtual memory or use a smaller model.', - 'not compiled with CUDA': 'Bad pytorch version, please reinstall pytorch with cuda.', + 'not compiled with CUDA': 'Bad PyTorch version, please reinstall PyTorch with cuda.', 'invalid header or archive is corrupted': 'The model file is corrupted, please download again.', 'no NVIDIA driver': 'Found no NVIDIA driver, please install the latest driver.', - 'CUDA out of memory': 'VRAM is not enough, please reduce stored layers or use a lower precision in Configs page.' + 'CUDA out of memory': 'VRAM is not enough, please reduce stored layers or use a lower precision in Configs page.', + 'Ninja is required to load C++ extensions': 'Failed to enable custom CUDA kernel, ninja is required to load C++ extensions. You may be using the CPU version of PyTorch, please reinstall PyTorch with CUDA. Or if you are using a custom Python interpreter, you must compile the CUDA kernel by yourself or disable Custom CUDA kernel acceleration.' }; const matchedError = Object.entries(errorsMap).find(([key, _]) => error.includes(key)); const message = matchedError ? t(matchedError[1]) : error;