You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Traceback (most recent call last):
File "train.py", line 357, in
train()
File "train.py", line 353, in train
trainer.train()
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/trainer.py", line 1885, in train
return inner_training_loop(
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/trainer.py", line 2216, in _inner_training_loop
tr_loss_step = self.training_step(model, inputs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/trainer.py", line 3216, in training_step
loss = self.compute_loss(model, inputs)
File "train.py", line 288, in compute_loss
logits = model(
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/accelerate/utils/operations.py", line 822, in forward
return model_forward(*args, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/accelerate/utils/operations.py", line 810, in call
return convert_to_fp32(self.model_forward(*args, **kwargs))
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/amp/autocast_mode.py", line 14, in decorate_autocast
return func(*args, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/peft/peft_model.py", line 1395, in forward
return self.base_model(
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/peft/tuners/tuners_utils.py", line 179, in forward
return self.model.forward(*args, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/accelerate/hooks.py", line 166, in new_forward
output = module._old_forward(*args, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 1205, in forward
outputs = self.model(
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 1000, in forward
layer_outputs = decoder_layer(
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/accelerate/hooks.py", line 166, in new_forward
output = module._old_forward(*args, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 749, in forward
hidden_states, self_attn_weights, present_key_value = self.self_attn(
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/accelerate/hooks.py", line 166, in new_forward
output = module._old_forward(*args, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 351, in forward
query_states = self.q_proj(hidden_states)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/peft/tuners/lora/layer.py", line 540, in forward
result = result + lora_B(lora_A(dropout(x))) * scaling
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/dropout.py", line 59, in forward
return F.dropout(input, self.p, self.training, self.inplace)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/functional.py", line 1252, in dropout
return VF.dropout(input, p, training) if inplace else _VF.dropout(input, p, training)
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB (GPU 0; 23.65 GiB total capacity; 22.31 GiB already allocated; 28.56 MiB free; 22.37 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
hello,i have questions above when I use 2 4090.I'd like to ask for the device requirement.Looking forward to your reply.
您好,可以问一下以上问题的解决方案吗?可以提供一些帮助吗?
The text was updated successfully, but these errors were encountered:
Traceback (most recent call last):
File "train.py", line 357, in
train()
File "train.py", line 353, in train
trainer.train()
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/trainer.py", line 1885, in train
return inner_training_loop(
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/trainer.py", line 2216, in _inner_training_loop
tr_loss_step = self.training_step(model, inputs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/trainer.py", line 3216, in training_step
loss = self.compute_loss(model, inputs)
File "train.py", line 288, in compute_loss
logits = model(
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/accelerate/utils/operations.py", line 822, in forward
return model_forward(*args, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/accelerate/utils/operations.py", line 810, in call
return convert_to_fp32(self.model_forward(*args, **kwargs))
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/amp/autocast_mode.py", line 14, in decorate_autocast
return func(*args, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/peft/peft_model.py", line 1395, in forward
return self.base_model(
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/peft/tuners/tuners_utils.py", line 179, in forward
return self.model.forward(*args, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/accelerate/hooks.py", line 166, in new_forward
output = module._old_forward(*args, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 1205, in forward
outputs = self.model(
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 1000, in forward
layer_outputs = decoder_layer(
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/accelerate/hooks.py", line 166, in new_forward
output = module._old_forward(*args, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 749, in forward
hidden_states, self_attn_weights, present_key_value = self.self_attn(
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/accelerate/hooks.py", line 166, in new_forward
output = module._old_forward(*args, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 351, in forward
query_states = self.q_proj(hidden_states)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/peft/tuners/lora/layer.py", line 540, in forward
result = result + lora_B(lora_A(dropout(x))) * scaling
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/modules/dropout.py", line 59, in forward
return F.dropout(input, self.p, self.training, self.inplace)
File "/home/ymr/anaconda3/envs/alignment/lib/python3.8/site-packages/torch/nn/functional.py", line 1252, in dropout
return VF.dropout(input, p, training) if inplace else _VF.dropout(input, p, training)
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB (GPU 0; 23.65 GiB total capacity; 22.31 GiB already allocated; 28.56 MiB free; 22.37 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
hello,i have questions above when I use 2 4090.I'd like to ask for the device requirement.Looking forward to your reply.
您好,可以问一下以上问题的解决方案吗?可以提供一些帮助吗?
The text was updated successfully, but these errors were encountered: