You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I was using the command: python tools/test.py ./configs/recognition/swin/swin_small_patch244_window877_kinetics400_1k.py ./swin_small_patch244_window877_kinetics400_1k.pth --eval top_k_accuracy to do inference, the error occurred. someone says that I need to decrease the batch_size, but I didn't find the correlative parameter.
ETA:Traceback (most recent call last):
File "/root/obelisk/Collection/Video-Swin-Transformer-master/tools/test.py", line 364, in
main()
File "/root/obelisk/Collection/Video-Swin-Transformer-master/tools/test.py", line 349, in main
outputs = inference_pytorch(args, cfg, distributed, data_loader)
File "/root/obelisk/Collection/Video-Swin-Transformer-master/tools/test.py", line 160, in inference_pytorch
outputs = single_gpu_test(model, data_loader)
File "/root/anaconda3/lib/python3.9/site-packages/mmcv/engine/test.py", line 33, in single_gpu_test
result = model(return_loss=False, **data)
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/root/anaconda3/lib/python3.9/site-packages/mmcv/parallel/data_parallel.py", line 50, in forward
return super().forward(*inputs, **kwargs)
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/parallel/data_parallel.py", line 166, in forward
return self.module(*inputs[0], **kwargs[0])
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/root/obelisk/Collection/Video-Swin-Transformer-master/mmaction/models/recognizers/base.py", line 258, in forward
return self.forward_test(imgs, **kwargs)
File "/root/obelisk/Collection/Video-Swin-Transformer-master/mmaction/models/recognizers/recognizer3d.py", line 90, in forward_test
return self._do_test(imgs).cpu().numpy()
File "/root/obelisk/Collection/Video-Swin-Transformer-master/mmaction/models/recognizers/recognizer3d.py", line 47, in _do_test
x = self.extract_feat(batch_imgs)
File "/root/anaconda3/lib/python3.9/site-packages/mmcv/runner/fp16_utils.py", line 98, in new_func
return old_func(*args, **kwargs)
File "/root/obelisk/Collection/Video-Swin-Transformer-master/mmaction/models/recognizers/base.py", line 157, in extract_feat
x = self.backbone(imgs)
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/root/obelisk/Collection/Video-Swin-Transformer-master/mmaction/models/backbones/swin_transformer.py", line 652, in forward
x = self.patch_embed(x)
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/root/obelisk/Collection/Video-Swin-Transformer-master/mmaction/models/backbones/swin_transformer.py", line 449, in forward
x = self.proj(x) # B C D Wh Ww
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/conv.py", line 590, in forward
return self._conv_forward(input, self.weight, self.bias)
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/conv.py", line 585, in _conv_forward
return F.conv3d(
RuntimeError: Unable to find a valid cuDNN algorithm to run convolution
The text was updated successfully, but these errors were encountered:
I was using the command: python tools/test.py ./configs/recognition/swin/swin_small_patch244_window877_kinetics400_1k.py ./swin_small_patch244_window877_kinetics400_1k.pth --eval top_k_accuracy to do inference, the error occurred. someone says that I need to decrease the batch_size, but I didn't find the correlative parameter.
ETA:Traceback (most recent call last):
File "/root/obelisk/Collection/Video-Swin-Transformer-master/tools/test.py", line 364, in
main()
File "/root/obelisk/Collection/Video-Swin-Transformer-master/tools/test.py", line 349, in main
outputs = inference_pytorch(args, cfg, distributed, data_loader)
File "/root/obelisk/Collection/Video-Swin-Transformer-master/tools/test.py", line 160, in inference_pytorch
outputs = single_gpu_test(model, data_loader)
File "/root/anaconda3/lib/python3.9/site-packages/mmcv/engine/test.py", line 33, in single_gpu_test
result = model(return_loss=False, **data)
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/root/anaconda3/lib/python3.9/site-packages/mmcv/parallel/data_parallel.py", line 50, in forward
return super().forward(*inputs, **kwargs)
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/parallel/data_parallel.py", line 166, in forward
return self.module(*inputs[0], **kwargs[0])
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/root/obelisk/Collection/Video-Swin-Transformer-master/mmaction/models/recognizers/base.py", line 258, in forward
return self.forward_test(imgs, **kwargs)
File "/root/obelisk/Collection/Video-Swin-Transformer-master/mmaction/models/recognizers/recognizer3d.py", line 90, in forward_test
return self._do_test(imgs).cpu().numpy()
File "/root/obelisk/Collection/Video-Swin-Transformer-master/mmaction/models/recognizers/recognizer3d.py", line 47, in _do_test
x = self.extract_feat(batch_imgs)
File "/root/anaconda3/lib/python3.9/site-packages/mmcv/runner/fp16_utils.py", line 98, in new_func
return old_func(*args, **kwargs)
File "/root/obelisk/Collection/Video-Swin-Transformer-master/mmaction/models/recognizers/base.py", line 157, in extract_feat
x = self.backbone(imgs)
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/root/obelisk/Collection/Video-Swin-Transformer-master/mmaction/models/backbones/swin_transformer.py", line 652, in forward
x = self.patch_embed(x)
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/root/obelisk/Collection/Video-Swin-Transformer-master/mmaction/models/backbones/swin_transformer.py", line 449, in forward
x = self.proj(x) # B C D Wh Ww
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/conv.py", line 590, in forward
return self._conv_forward(input, self.weight, self.bias)
File "/root/anaconda3/lib/python3.9/site-packages/torch/nn/modules/conv.py", line 585, in _conv_forward
return F.conv3d(
RuntimeError: Unable to find a valid cuDNN algorithm to run convolution
The text was updated successfully, but these errors were encountered: