Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

use_gpu=True Error #22

Closed
Mario-RC opened this issue Jul 5, 2022 · 2 comments
Closed

use_gpu=True Error #22

Mario-RC opened this issue Jul 5, 2022 · 2 comments

Comments

@Mario-RC
Copy link

Mario-RC commented Jul 5, 2022

In Google Colab.

INSTALLED:
!pip install -qqq git+https://github.com/PrithivirajDamodaran/Parrot_Paraphraser.git

MY CODE:

from parrot import Parrot

def random_state(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
random_state(1234)

parrot_gpu = Parrot(model_tag="prithivida/parrot_paraphraser_on_T5", use_gpu=True)

phrases = ['i drive a ford pickup truck.', 'i am very conservative.', 'my family lives down the street from me.',
'i go to church every sunday.', 'i have three guns and love hunting.']

para_phrases_gpu = parrot_gpu.augment(input_phrase=phrases[0], use_gpu=True, max_return_phrases = 10)

ERROR:


RuntimeError Traceback (most recent call last)
in ()
----> 1 para_phrases_gpu = parrot_gpu.augment(input_phrase=phrases[0], use_gpu=True, max_return_phrases = 10)

/usr/local/lib/python3.7/dist-packages/parrot/parrot.py in augment(self, input_phrase, use_gpu, diversity_ranker, do_diverse, max_return_phrases, max_length, adequacy_threshold, fluency_threshold)
128
129
--> 130 adequacy_filtered_phrases = self.adequacy_score.filter(input_phrase, paraphrases, adequacy_threshold, device )
131 if len(adequacy_filtered_phrases) > 0 :
132 fluency_filtered_phrases = self.fluency_score.filter(adequacy_filtered_phrases, fluency_threshold, device )

/usr/local/lib/python3.7/dist-packages/parrot/filters.py in filter(self, input_phrase, para_phrases, adequacy_threshold, device)
13 x = self.tokenizer(input_phrase, para_phrase, return_tensors='pt', max_length=128, truncation=True)
14 self.adequacy_model = self.adequacy_model.to(device)
---> 15 logits = self.adequacy_model(**x).logits
16 probs = logits.softmax(dim=1)
17 prob_label_is_true = probs[:,1]

/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []

/usr/local/lib/python3.7/dist-packages/transformers/models/roberta/modeling_roberta.py in forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, labels, output_attentions, output_hidden_states, return_dict)
1213 output_attentions=output_attentions,
1214 output_hidden_states=output_hidden_states,
-> 1215 return_dict=return_dict,
1216 )
1217 sequence_output = outputs[0]

/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []

/usr/local/lib/python3.7/dist-packages/transformers/models/roberta/modeling_roberta.py in forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask, past_key_values, use_cache, output_attentions, output_hidden_states, return_dict)
844 token_type_ids=token_type_ids,
845 inputs_embeds=inputs_embeds,
--> 846 past_key_values_length=past_key_values_length,
847 )
848 encoder_outputs = self.encoder(

/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []

/usr/local/lib/python3.7/dist-packages/transformers/models/roberta/modeling_roberta.py in forward(self, input_ids, token_type_ids, position_ids, inputs_embeds, past_key_values_length)
126
127 if inputs_embeds is None:
--> 128 inputs_embeds = self.word_embeddings(input_ids)
129 token_type_embeddings = self.token_type_embeddings(token_type_ids)
130

/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []

/usr/local/lib/python3.7/dist-packages/torch/nn/modules/sparse.py in forward(self, input)
158 return F.embedding(
159 input, self.weight, self.padding_idx, self.max_norm,
--> 160 self.norm_type, self.scale_grad_by_freq, self.sparse)
161
162 def extra_repr(self) -> str:

/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
2181 # remove once script supports set_grad_enabled
2182 no_grad_embedding_renorm(weight, input, max_norm, norm_type)
-> 2183 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
2184
2185

RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu! (when checking argument for argument index in method wrapper__index_select)

@Mario-RC Mario-RC changed the title GPU model use_gpu=True Error Jul 5, 2022
@PrithivirajDamodaran
Copy link
Owner

Try now

@Mario-RC
Copy link
Author

Mario-RC commented Jul 5, 2022

Now it works perfectly, thank you very much for the quick fix!

@Mario-RC Mario-RC closed this as completed Jul 5, 2022
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants