Skip to content

linux voxcpm2的webui关于torch.compile的问题 #201

@hotdogarea

Description

@hotdogarea

linux 环境下 开启torch.compile= true时 启动webui推理时报错,关闭compile可以正常运行但3080ti下,rtf在一点几

  • To create a public link, set share=True in launch().
    Traceback (most recent call last):
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/gradio/queueing.py", line 785, in process_events
    response = await route_utils.call_process_api(
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/gradio/route_utils.py", line 358, in call_process_api
    output = await app.get_blocks().process_api(
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/gradio/blocks.py", line 2172, in process_api
    result = await self.call_function(
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/gradio/blocks.py", line 1634, in call_function
    prediction = await anyio.to_thread.run_sync( # type: ignore
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/anyio/to_thread.py", line 63, in run_sync
    return await get_async_backend().run_sync_in_worker_thread(
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2502, in run_sync_in_worker_thread
    return await future
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 986, in run
    result = context.run(func, *args)
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/gradio/utils.py", line 1059, in wrapper
    response = f(*args, **kwargs)
    File "/root/VoxCPM/appV2.py", line 616, in _generate
    sr, wav_np = demo.generate_tts_audio(
    File "/root/VoxCPM/appV2.py", line 436, in generate_tts_audio
    return self._generate_tts_audio_main_thread(
    File "/root/VoxCPM/appV2.py", line 587, in _generate_tts_audio_main_thread
    raise error_queue.get()
    File "/root/VoxCPM/appV2.py", line 570, in inference_worker
    result = self._generate_tts_audio_worker_thread(
    File "/root/VoxCPM/appV2.py", line 502, in _generate_tts_audio_worker_thread
    wav = current_model.generate(**generate_kwargs)
    File "/root/VoxCPM/src/voxcpm/core.py", line 155, in generate
    return next(self._generate(*args, streaming=False, **kwargs))
    File "/root/VoxCPM/src/voxcpm/core.py", line 276, in _generate
    for wav, _, _ in generate_result:
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 36, in generator_context
    response = gen.send(None)
    File "/root/VoxCPM/src/voxcpm/model/voxcpm2.py", line 927, in _generate_with_prompt_cache
    latent_pred, pred_audio_feat = next(inference_result)
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 36, in generator_context
    response = gen.send(None)
    File "/root/VoxCPM/src/voxcpm/model/voxcpm2.py", line 992, in _inference
    feat_embed = self.feat_encoder(feat) # [b, t, h_feat]
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
    return forward_call(*args, **kwargs)
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py", line 465, in _fn
    return fn(*args, **kwargs)
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
    return forward_call(*args, **kwargs)
    File "/root/VoxCPM/src/voxcpm/modules/locenc/local_encoder.py", line 17, in forward
    def forward(self, x):
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py", line 632, in _fn
    return fn(*args, **kwargs)
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py", line 1100, in forward
    return compiled_fn(full_args)
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/runtime_wrappers.py", line 321, in runtime_wrapper
    all_outs = call_func_at_runtime_with_args(
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/utils.py", line 124, in call_func_at_runtime_with_args
    out = normalize_as_list(f(args))
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/runtime_wrappers.py", line 667, in inner_fn
    outs = compiled_fn(args)
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/_functorch/_aot_autograd/runtime_wrappers.py", line 488, in wrapper
    return compiled_fn(runtime_args)
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/_inductor/codecache.py", line 1478, in call
    return self.current_callable(inputs)
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/_inductor/compile_fx.py", line 1008, in run
    return compiled_fn(new_inputs)
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/_inductor/cudagraph_trees.py", line 398, in deferred_cudagraphify
    fn, out = cudagraphify(model, inputs, new_static_input_idxs, *args, **kwargs)
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/_inductor/cudagraph_trees.py", line 420, in cudagraphify
    manager = get_container(device_index).get_tree_manager()
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/_inductor/cudagraph_trees.py", line 341, in get_container
    container_dict = get_obj(local, "tree_manager_containers")
    File "/root/miniconda3/envs/voxcpm/lib/python3.10/site-packages/torch/_inductor/cudagraph_trees.py", line 336, in get_obj
    assert torch._C._is_key_in_tls(attr_name)
    AssertionError

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions