You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
我想用mini数据集测试下maptr的效果,想熟悉下跑通的流程,但是遇到了一些问题
运行命令是:python -m torch.distributed.launch --nproc_per_node=1 --master_port 12345 tools/test.py
在for i, data in enumerate(data_loader):报错 EOFError: Ran out of input TypeError: cannot pickle 'dict_keys' object
NOTE: Redirects are currently not supported in Windows or MacOs.
D:\anaconda\envs\bev\lib\site-packages\torch\distributed\launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated
and will be removed in future. Use torch.distributed.run.
Note that --use_env is set by default in torch.distributed.run.
If your script expects --local_rank argument to be set, please
change it to read from os.environ['LOCAL_RANK'] instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for
further instructions
warnings.warn(
projects.mmdet3d_plugin
E:\code/MapTR\projects\mmdet3d_plugin\bevformer\modules\custom_base_transformer_layer.py:94: UserWarning: The arguments feedforward_channels in BaseTransformerLayer has been deprecated, now you should se
t feedforward_channels and other FFN related arguments to a dict named ffn_cfgs.
warnings.warn(
E:\code/MapTR\projects\mmdet3d_plugin\bevformer\modules\custom_base_transformer_layer.py:94: UserWarning: The arguments ffn_dropout in BaseTransformerLayer has been deprecated, now you should set ffn_dr op and other FFN related arguments to a dict named ffn_cfgs.
warnings.warn(
E:\code/MapTR\projects\mmdet3d_plugin\bevformer\modules\custom_base_transformer_layer.py:94: UserWarning: The arguments ffn_num_fcs in BaseTransformerLayer has been deprecated, now you should set num_fc s and other FFN related arguments to a dict named ffn_cfgs.
warnings.warn(
D:\anaconda\envs\bev\lib\site-packages\mmcv\cnn\bricks\transformer.py:341: UserWarning: The arguments feedforward_channels in BaseTransformerLayer has been deprecated, now you should set feedforward_cha nnels and other FFN related arguments to a dict named ffn_cfgs.
warnings.warn(
D:\anaconda\envs\bev\lib\site-packages\mmcv\cnn\bricks\transformer.py:341: UserWarning: The arguments ffn_dropout in BaseTransformerLayer has been deprecated, now you should set ffn_drop and other FFN
related arguments to a dict named ffn_cfgs.
warnings.warn(
D:\anaconda\envs\bev\lib\site-packages\mmcv\cnn\bricks\transformer.py:341: UserWarning: The arguments ffn_num_fcs in BaseTransformerLayer has been deprecated, now you should set num_fcs and other FFN r
elated arguments to a dict named ffn_cfgs.
warnings.warn(
D:\anaconda\envs\bev\lib\site-packages\mmcv\cnn\bricks\transformer.py:92: UserWarning: The arguments dropout in MultiheadAttention has been deprecated, now you can separately set attn_drop(float), proj
_drop(float), and dropout_layer(dict)
warnings.warn('The arguments dropout in MultiheadAttention '
load checkpoint from local path: E:/code/MapTR/ckpts/maptr_nano_r18_110e.pth
The model and loaded state dict do not match exactly
unexpected key in source state_dict: pts_bbox_head.transformer.encoder.layers.0.attentions.1.attention.grid_offsets
[ ] 0/81, elapsed: 0s, ETA:Traceback (most recent call last):
File "tools/test.py", line 269, in
main()
File "tools/test.py", line 240, in main
Traceback (most recent call last):
File "", line 1, in
outputs = custom_multi_gpu_test(model, data_loader, args.tmpdir,
File "D:\anaconda\envs\bev\lib\multiprocessing\spawn.py", line 116, in spawn_main
File "E:\code/MapTR\projects\mmdet3d_plugin\bevformer\apis\test.py", line 70, in custom_multi_gpu_test
exitcode = _main(fd, parent_sentinel)
File "D:\anaconda\envs\bev\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
for i, data in enumerate(data_loader):
File "D:\anaconda\envs\bev\lib\site-packages\torch\utils\data\dataloader.py", line 359, in iter
return self._get_iterator()
File "D:\anaconda\envs\bev\lib\site-packages\torch\utils\data\dataloader.py", line 305, in _get_iterator
return _MultiProcessingDataLoaderIter(self)
File "D:\anaconda\envs\bev\lib\site-packages\torch\utils\data\dataloader.py", line 918, in init
w.start()
File "D:\anaconda\envs\bev\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "D:\anaconda\envs\bev\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "D:\anaconda\envs\bev\lib\multiprocessing\context.py", line 327, in _Popen
return Popen(process_obj)
File "D:\anaconda\envs\bev\lib\multiprocessing\popen_spawn_win32.py", line 93, in init
reduction.dump(process_obj, to_child)
File "D:\anaconda\envs\bev\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle 'dict_keys' object
ERROR:torch.distributed.elastic.multiprocessing.api:failed (exitcode: 1) local_rank: 0 (pid: 25492) of binary: D:\anaconda\envs\bev\python.exe
Traceback (most recent call last):
File "D:\anaconda\envs\bev\lib\runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "D:\anaconda\envs\bev\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "D:\anaconda\envs\bev\lib\site-packages\torch\distributed\launch.py", line 193, in
main()
File "D:\anaconda\envs\bev\lib\site-packages\torch\distributed\launch.py", line 189, in main
launch(args)
File "D:\anaconda\envs\bev\lib\site-packages\torch\distributed\launch.py", line 174, in launch
run(args)
File "D:\anaconda\envs\bev\lib\site-packages\torch\distributed\run.py", line 689, in run
elastic_launch(
File "D:\anaconda\envs\bev\lib\site-packages\torch\distributed\launcher\api.py", line 116, in call
return launch_agent(self._config, self._entrypoint, list(args))
File "D:\anaconda\envs\bev\lib\site-packages\torch\distributed\launcher\api.py", line 244, in launch_agent
raise ChildFailedError(
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
我想用mini数据集测试下maptr的效果,想熟悉下跑通的流程,但是遇到了一些问题
运行命令是:python -m torch.distributed.launch --nproc_per_node=1 --master_port 12345 tools/test.py
在for i, data in enumerate(data_loader):报错 EOFError: Ran out of input TypeError: cannot pickle 'dict_keys' object
NOTE: Redirects are currently not supported in Windows or MacOs.
D:\anaconda\envs\bev\lib\site-packages\torch\distributed\launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated
and will be removed in future. Use torch.distributed.run.
Note that --use_env is set by default in torch.distributed.run.
If your script expects
--local_rank
argument to be set, pleasechange it to read from
os.environ['LOCAL_RANK']
instead. Seehttps://pytorch.org/docs/stable/distributed.html#launch-utility for
further instructions
warnings.warn(
projects.mmdet3d_plugin
E:\code/MapTR\projects\mmdet3d_plugin\bevformer\modules\custom_base_transformer_layer.py:94: UserWarning: The arguments
feedforward_channels
in BaseTransformerLayer has been deprecated, now you should set
feedforward_channels
and other FFN related arguments to a dict namedffn_cfgs
.warnings.warn(
E:\code/MapTR\projects\mmdet3d_plugin\bevformer\modules\custom_base_transformer_layer.py:94: UserWarning: The arguments
ffn_dropout
in BaseTransformerLayer has been deprecated, now you should setffn_dr op
and other FFN related arguments to a dict namedffn_cfgs
.warnings.warn(
E:\code/MapTR\projects\mmdet3d_plugin\bevformer\modules\custom_base_transformer_layer.py:94: UserWarning: The arguments
ffn_num_fcs
in BaseTransformerLayer has been deprecated, now you should setnum_fc s
and other FFN related arguments to a dict namedffn_cfgs
.warnings.warn(
D:\anaconda\envs\bev\lib\site-packages\mmcv\cnn\bricks\transformer.py:341: UserWarning: The arguments
feedforward_channels
in BaseTransformerLayer has been deprecated, now you should setfeedforward_cha nnels
and other FFN related arguments to a dict namedffn_cfgs
.warnings.warn(
D:\anaconda\envs\bev\lib\site-packages\mmcv\cnn\bricks\transformer.py:341: UserWarning: The arguments
ffn_dropout
in BaseTransformerLayer has been deprecated, now you should setffn_drop
and other FFNrelated arguments to a dict named
ffn_cfgs
.warnings.warn(
D:\anaconda\envs\bev\lib\site-packages\mmcv\cnn\bricks\transformer.py:341: UserWarning: The arguments
ffn_num_fcs
in BaseTransformerLayer has been deprecated, now you should setnum_fcs
and other FFN related arguments to a dict named
ffn_cfgs
.warnings.warn(
D:\anaconda\envs\bev\lib\site-packages\mmcv\cnn\bricks\transformer.py:92: UserWarning: The arguments
dropout
in MultiheadAttention has been deprecated, now you can separately setattn_drop
(float), proj_drop(float), and
dropout_layer
(dict)warnings.warn('The arguments
dropout
in MultiheadAttention 'load checkpoint from local path: E:/code/MapTR/ckpts/maptr_nano_r18_110e.pth
The model and loaded state dict do not match exactly
unexpected key in source state_dict: pts_bbox_head.transformer.encoder.layers.0.attentions.1.attention.grid_offsets
[ ] 0/81, elapsed: 0s, ETA:Traceback (most recent call last):
File "tools/test.py", line 269, in
main()
File "tools/test.py", line 240, in main
Traceback (most recent call last):
File "", line 1, in
outputs = custom_multi_gpu_test(model, data_loader, args.tmpdir,
File "D:\anaconda\envs\bev\lib\multiprocessing\spawn.py", line 116, in spawn_main
File "E:\code/MapTR\projects\mmdet3d_plugin\bevformer\apis\test.py", line 70, in custom_multi_gpu_test
exitcode = _main(fd, parent_sentinel)
File "D:\anaconda\envs\bev\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
for i, data in enumerate(data_loader):
File "D:\anaconda\envs\bev\lib\site-packages\torch\utils\data\dataloader.py", line 359, in iter
return self._get_iterator()
File "D:\anaconda\envs\bev\lib\site-packages\torch\utils\data\dataloader.py", line 305, in _get_iterator
return _MultiProcessingDataLoaderIter(self)
File "D:\anaconda\envs\bev\lib\site-packages\torch\utils\data\dataloader.py", line 918, in init
w.start()
File "D:\anaconda\envs\bev\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "D:\anaconda\envs\bev\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "D:\anaconda\envs\bev\lib\multiprocessing\context.py", line 327, in _Popen
return Popen(process_obj)
File "D:\anaconda\envs\bev\lib\multiprocessing\popen_spawn_win32.py", line 93, in init
reduction.dump(process_obj, to_child)
File "D:\anaconda\envs\bev\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle 'dict_keys' object
ERROR:torch.distributed.elastic.multiprocessing.api:failed (exitcode: 1) local_rank: 0 (pid: 25492) of binary: D:\anaconda\envs\bev\python.exe
Traceback (most recent call last):
File "D:\anaconda\envs\bev\lib\runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "D:\anaconda\envs\bev\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "D:\anaconda\envs\bev\lib\site-packages\torch\distributed\launch.py", line 193, in
main()
File "D:\anaconda\envs\bev\lib\site-packages\torch\distributed\launch.py", line 189, in main
launch(args)
File "D:\anaconda\envs\bev\lib\site-packages\torch\distributed\launch.py", line 174, in launch
run(args)
File "D:\anaconda\envs\bev\lib\site-packages\torch\distributed\run.py", line 689, in run
elastic_launch(
File "D:\anaconda\envs\bev\lib\site-packages\torch\distributed\launcher\api.py", line 116, in call
return launch_agent(self._config, self._entrypoint, list(args))
File "D:\anaconda\envs\bev\lib\site-packages\torch\distributed\launcher\api.py", line 244, in launch_agent
raise ChildFailedError(
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
=======================================
Root Cause:
[0]:
time: 2024-03-14_14:21:03
rank: 0 (local_rank: 0)
exitcode: 1 (pid: 25492)
error_file: <N/A>
msg: "Process failed with exitcode 1"
Other Failures:
<NO_OTHER_FAILURES>
The text was updated successfully, but these errors were encountered: