You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Issue:
It will trigger different device error.
I believe it's a problem about the vision tower.
The images and casual language model are both in the device1 but the vision tower model.
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cuda:1! (when checking argument for argument weight in method wrapper_CUDA__cudnn_convolution)
Log:
Traceback (most recent call last):
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/planner/.vscode-server/extensions/ms-python.debugpy-2024.6.0/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/__main__.py", line 39, in <module>
cli.main()
File "/home/planner/.vscode-server/extensions/ms-python.debugpy-2024.6.0/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/../debugpy/server/cli.py", line 430, in main
run()
File "/home/planner/.vscode-server/extensions/ms-python.debugpy-2024.6.0/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/../debugpy/server/cli.py", line 284, in run_file
runpy.run_path(target, run_name="__main__")
File "/home/planner/.vscode-server/extensions/ms-python.debugpy-2024.6.0/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 321, in run_path
return _run_module_code(code, init_globals, run_name,
File "/home/planner/.vscode-server/extensions/ms-python.debugpy-2024.6.0/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 135, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "/home/planner/.vscode-server/extensions/ms-python.debugpy-2024.6.0/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 124, in _run_code
exec(code, run_globals)
File "/public/yzc/completely_new/first_load_llava.py", line 273, in <module>
output=model(
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/public/yzc/completely_new/llava/model/language_model/llava_llama.py", line 81, in forward
) = self.prepare_inputs_labels_for_multimodal(
File "/public/yzc/completely_new/llava/model/llava_arch.py", line 205, in prepare_inputs_labels_for_multimodal
image_features = self.encode_images(images)
File "/public/yzc/completely_new/llava/model/llava_arch.py", line 144, in encode_images
image_features = encoder(images)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/public/yzc/completely_new/llava/model/multimodal_encoder/clip_encoder.py", line 54, in forward
image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/accelerate/hooks.py", line 165, in new_forward
output = old_forward(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/transformers/models/clip/modeling_clip.py", line 917, in forward
return self.vision_model(
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/transformers/models/clip/modeling_clip.py", line 841, in forward
hidden_states = self.embeddings(pixel_values)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/accelerate/hooks.py", line 165, in new_forward
output = old_forward(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/transformers/models/clip/modeling_clip.py", line 182, in forward
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/accelerate/hooks.py", line 165, in new_forward
output = old_forward(*args, **kwargs)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/conv.py", line 460, in forward
return self._conv_forward(input, self.weight, self.bias)
File "/home/planner/anaconda3/envs/habitat_latest/lib/python3.9/site-packages/torch/nn/modules/conv.py", line 456, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cuda:1! (when checking argument for argument weight in method wrapper_CUDA__cudnn_convolution)
The text was updated successfully, but these errors were encountered:
Describe the issue
Run demo python with args like:
Issue:
It will trigger different device error.
I believe it's a problem about the
vision tower
.The images and casual language model are both in the device1 but the vision tower model.
Log:
The text was updated successfully, but these errors were encountered: