We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
안녕하세요 좋은 모듈 만들어주셔서 감사합니다. 제가 cuda 환경에서 모델을 불러와서
pprint(goemotions(text))
를 실행하면 아래와처럼 에러가 뜨네요. multilabel_pipeline 에서 device를 -1에서 0으로도 바꿔보고,
import torch device = torch.device('cuda:0') model.to(device)
위 처럼 추가 설정도 해보았는데 동일한 에러가 떠서 여기에 여쭤보게 됐습니다. 어떻게 고칠 수 있을까요?
RuntimeError Traceback (most recent call last) in ----> 1 pprint(goemotions(df['sentences'][0]))
~/ProjComment/IMDB/GoEmotions-pytorch/multilabel_pipeline.py in call(self, *args, **kwargs) 37 38 def call(self, *args, **kwargs): ---> 39 outputs = super().call(*args, **kwargs) 40 scores = 1 / (1 + np.exp(-outputs)) # Sigmoid 41 results = []
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/transformers/pipelines.py in call(self, *args, **kwargs) 472 def call(self, *args, **kwargs): 473 inputs = self._parse_and_tokenize(*args, **kwargs) --> 474 return self._forward(inputs) 475 476 def _forward(self, inputs, return_tensors=False):
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/transformers/pipelines.py in _forward(self, inputs, return_tensors) 491 with torch.no_grad(): 492 inputs = self.ensure_tensor_on_device(**inputs) --> 493 predictions = self.model(**inputs)[0].cpu() 494 495 if return_tensors:
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs) 530 result = self._slow_forward(*input, **kwargs) 531 else: --> 532 result = self.forward(*input, **kwargs) 533 for hook in self._forward_hooks.values(): 534 hook_result = hook(self, input, result)
~/ProjComment/IMDB/GoEmotions-pytorch/model.py in forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, labels) 25 labels=None, 26 ): ---> 27 outputs = self.bert( 28 input_ids, 29 attention_mask=attention_mask,
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/transformers/modeling_bert.py in forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask) 724 head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) 725 --> 726 embedding_output = self.embeddings( 727 input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds 728 )
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/transformers/modeling_bert.py in forward(self, input_ids, token_type_ids, position_ids, inputs_embeds) 172 173 if inputs_embeds is None: --> 174 inputs_embeds = self.word_embeddings(input_ids) 175 position_embeddings = self.position_embeddings(position_ids) 176 token_type_embeddings = self.token_type_embeddings(token_type_ids)
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/torch/nn/modules/sparse.py in forward(self, input) 110 111 def forward(self, input): --> 112 return F.embedding( 113 input, self.weight, self.padding_idx, self.max_norm, 114 self.norm_type, self.scale_grad_by_freq, self.sparse)
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse) 1482 # remove once script supports set_grad_enabled 1483 no_grad_embedding_renorm(weight, input, max_norm, norm_type) -> 1484 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse) 1485 1486
RuntimeError: Expected object of device type cuda but got device type cpu for argument #3 'index' in call to _th_index_select
The text was updated successfully, but these errors were encountered:
No branches or pull requests
안녕하세요 좋은 모듈 만들어주셔서 감사합니다.
제가 cuda 환경에서 모델을 불러와서
pprint(goemotions(text))
를 실행하면 아래와처럼 에러가 뜨네요.
multilabel_pipeline 에서 device를 -1에서 0으로도 바꿔보고,
import torch
device = torch.device('cuda:0')
model.to(device)
위 처럼 추가 설정도 해보았는데 동일한 에러가 떠서 여기에 여쭤보게 됐습니다.
어떻게 고칠 수 있을까요?
감사합니다.
RuntimeError Traceback (most recent call last)
in
----> 1 pprint(goemotions(df['sentences'][0]))
~/ProjComment/IMDB/GoEmotions-pytorch/multilabel_pipeline.py in call(self, *args, **kwargs)
37
38 def call(self, *args, **kwargs):
---> 39 outputs = super().call(*args, **kwargs)
40 scores = 1 / (1 + np.exp(-outputs)) # Sigmoid
41 results = []
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/transformers/pipelines.py in call(self, *args, **kwargs)
472 def call(self, *args, **kwargs):
473 inputs = self._parse_and_tokenize(*args, **kwargs)
--> 474 return self._forward(inputs)
475
476 def _forward(self, inputs, return_tensors=False):
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/transformers/pipelines.py in _forward(self, inputs, return_tensors)
491 with torch.no_grad():
492 inputs = self.ensure_tensor_on_device(**inputs)
--> 493 predictions = self.model(**inputs)[0].cpu()
494
495 if return_tensors:
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
~/ProjComment/IMDB/GoEmotions-pytorch/model.py in forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, labels)
25 labels=None,
26 ):
---> 27 outputs = self.bert(
28 input_ids,
29 attention_mask=attention_mask,
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/transformers/modeling_bert.py in forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask)
724 head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
725
--> 726 embedding_output = self.embeddings(
727 input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
728 )
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/transformers/modeling_bert.py in forward(self, input_ids, token_type_ids, position_ids, inputs_embeds)
172
173 if inputs_embeds is None:
--> 174 inputs_embeds = self.word_embeddings(input_ids)
175 position_embeddings = self.position_embeddings(position_ids)
176 token_type_embeddings = self.token_type_embeddings(token_type_ids)
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/torch/nn/modules/sparse.py in forward(self, input)
110
111 def forward(self, input):
--> 112 return F.embedding(
113 input, self.weight, self.padding_idx, self.max_norm,
114 self.norm_type, self.scale_grad_by_freq, self.sparse)
/home/ubuntu/anaconda3/envs/GoEmotions-pytorch/lib/python3.8/site-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
1482 # remove once script supports set_grad_enabled
1483 no_grad_embedding_renorm(weight, input, max_norm, norm_type)
-> 1484 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
1485
1486
RuntimeError: Expected object of device type cuda but got device type cpu for argument #3 'index' in call to _th_index_select
The text was updated successfully, but these errors were encountered: