-
Notifications
You must be signed in to change notification settings - Fork 1.4k
Description
报错日志是 :Traceback (most recent call last):
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\queueing.py", line 667, in process_events
response = await route_utils.call_process_api(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\route_utils.py", line 349, in call_process_api
output = await app.get_blocks().process_api(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\blocks.py", line 2274, in process_api
result = await self.call_function(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\blocks.py", line 1781, in call_function
prediction = await anyio.to_thread.run_sync( # type: ignore
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio_backends_asyncio.py", line 2476, in run_sync_in_worker_thread
return await future
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio_backends_asyncio.py", line 967, in run
result = context.run(func, *args)
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\utils.py", line 915, in wrapper
response = f(*args, **kwargs)
File "I:\AITool\TSS_AI\index-tts\webui.py", line 148, in gen_single
output = tts.infer(spk_audio_prompt=prompt, text=text,
File "I:\AITool\TSS_AI\index-tts\indextts\infer_v2.py", line 558, in infer
wav = torch.cat(wavs, dim=1)
RuntimeError: torch.cat(): expected a non-empty list of Tensors
Emo control mode:0,vec:None
start inference...
Traceback (most recent call last):
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\queueing.py", line 667, in process_events
response = await route_utils.call_process_api(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\route_utils.py", line 349, in call_process_api
output = await app.get_blocks().process_api(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\blocks.py", line 2274, in process_api
result = await self.call_function(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\blocks.py", line 1781, in call_function
prediction = await anyio.to_thread.run_sync( # type: ignore
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio_backends_asyncio.py", line 2476, in run_sync_in_worker_thread
return await future
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio_backends_asyncio.py", line 967, in run
result = context.run(func, *args)
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\utils.py", line 915, in wrapper
response = f(*args, **kwargs)
File "I:\AITool\TSS_AI\index-tts\webui.py", line 148, in gen_single
output = tts.infer(spk_audio_prompt=prompt, text=text,
File "I:\AITool\TSS_AI\index-tts\indextts\infer_v2.py", line 558, in infer
wav = torch.cat(wavs, dim=1)
RuntimeError: torch.cat(): expected a non-empty list of Tensors
Emo control mode:0,vec:None
start inference...
Traceback (most recent call last):
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\queueing.py", line 667, in process_events
response = await route_utils.call_process_api(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\route_utils.py", line 349, in call_process_api
output = await app.get_blocks().process_api(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\blocks.py", line 2274, in process_api
result = await self.call_function(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\blocks.py", line 1781, in call_function
prediction = await anyio.to_thread.run_sync( # type: ignore
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio_backends_asyncio.py", line 2476, in run_sync_in_worker_thread
return await future
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio_backends_asyncio.py", line 967, in run
result = context.run(func, *args)
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\utils.py", line 915, in wrapper
response = f(*args, **kwargs)
File "I:\AITool\TSS_AI\index-tts\webui.py", line 148, in gen_single
output = tts.infer(spk_audio_prompt=prompt, text=text,
File "I:\AITool\TSS_AI\index-tts\indextts\infer_v2.py", line 558, in infer
wav = torch.cat(wavs, dim=1)
RuntimeError: torch.cat(): expected a non-empty list of Tensors
Emo control mode:0,vec:None
start inference...
Traceback (most recent call last):
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\queueing.py", line 667, in process_events
response = await route_utils.call_process_api(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\route_utils.py", line 349, in call_process_api
output = await app.get_blocks().process_api(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\blocks.py", line 2274, in process_api
result = await self.call_function(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\blocks.py", line 1781, in call_function
prediction = await anyio.to_thread.run_sync( # type: ignore
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio_backends_asyncio.py", line 2476, in run_sync_in_worker_thread
return await future
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio_backends_asyncio.py", line 967, in run
result = context.run(func, *args)
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\utils.py", line 915, in wrapper
response = f(*args, **kwargs)
File "I:\AITool\TSS_AI\index-tts\webui.py", line 148, in gen_single
output = tts.infer(spk_audio_prompt=prompt, text=text,
File "I:\AITool\TSS_AI\index-tts\indextts\infer_v2.py", line 558, in infer
wav = torch.cat(wavs, dim=1)
RuntimeError: torch.cat(): expected a non-empty list of Tensors
Emo control mode:0,vec:None
start inference...
Traceback (most recent call last):
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\queueing.py", line 667, in process_events
response = await route_utils.call_process_api(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\route_utils.py", line 349, in call_process_api
output = await app.get_blocks().process_api(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\blocks.py", line 2274, in process_api
result = await self.call_function(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\blocks.py", line 1781, in call_function
prediction = await anyio.to_thread.run_sync( # type: ignore
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio_backends_asyncio.py", line 2476, in run_sync_in_worker_thread
return await future
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\anyio_backends_asyncio.py", line 967, in run
result = context.run(func, *args)
File "I:\AITool\TSS_AI\index-tts.venv\lib\site-packages\gradio\utils.py", line 915, in wrapper
response = f(*args, **kwargs)
File "I:\AITool\TSS_AI\index-tts\webui.py", line 148, in gen_single
output = tts.infer(spk_audio_prompt=prompt, text=text,
File "I:\AITool\TSS_AI\index-tts\indextts\infer_v2.py", line 558, in infer
wav = torch.cat(wavs, dim=1)
RuntimeError: torch.cat(): expected a non-empty list of Tensors