From 748b682fddb052a9c23fa73669944ce9ad3a6ab1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E6=82=A6?= Date: Sat, 23 Mar 2024 20:17:17 +0800 Subject: [PATCH] =?UTF-8?q?=E8=80=81=E6=98=BE=E5=8D=A1=E5=88=A4=E6=96=AD?= =?UTF-8?q?=E5=8D=8A=E7=B2=BE=E5=BA=A6=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 当前逻辑对于老显卡不友好,比如NVIDIA T500,即使修改配置文件,is_half变量也会被反复赋值为True,现在改为自动判断当前设备是否支持半精度 --- GPT_SoVITS/inference_webui.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 199948c6b..5c22a5b82 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -29,7 +29,24 @@ is_share = eval(is_share) if "_CUDA_VISIBLE_DEVICES" in os.environ: os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] -is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() +# is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() +# 创建一个FP16张量 +fp16_tensor = torch.tensor([1.0, 2.0, 3.0], dtype=torch.float16) + +# 尝试在GPU上执行操作 +try: + # 将张量移动到GPU + fp16_tensor = fp16_tensor.cuda() + # 创建一个与fp16_tensor相同大小的FP32张量 + fp32_tensor = torch.tensor(fp16_tensor).cuda() + # 执行一个简单的操作,比如加法 + result = fp16_tensor + fp32_tensor + is_half = True + print("FP16 is supported on this device.") +except RuntimeError as e: + # 如果发生运行时错误,可能是因为设备不支持FP16 + print(f"FP16 is not supported on this device. Error: {e}") + is_half = False gpt_path = os.environ.get("gpt_path", None) sovits_path = os.environ.get("sovits_path", None) cnhubert_base_path = os.environ.get("cnhubert_base_path", None)