You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
When I was training a summarization model under fairseq, I encountered the following error in the validation step. How can I solve this problem.
-- Process 1 terminated with the following error:
Traceback (most recent call last):
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 69, in _wrap
fn(i, *args)
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/site-packages/fairseq/distributed/utils.py", line 328, in distributed_main
main(cfg, **kwargs)
File "/home/deep/PRGEN/train.py", line 181, in main
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/deep/PRGEN/train.py", line 314, in train
cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch,
File "/home/deep/PRGEN/train.py", line 404, in validate_and_save
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
File "/home/deep/PRGEN/train.py", line 476, in validate
trainer.valid_step(sample)
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/deep/PRGEN/trainer.py", line 1037, in valid_step
sample, self.model, self.criterion, **extra_kwargs
File "/home/deep/PRGEN/src/task/faithful_summary_task.py", line 282, in valid_step
metrics = self._inference_with_bleu(self.sequence_generator, sample, model)
File "/home/deep/PRGEN/src/task/faithful_summary_task.py", line 332, in _inference_with_bleu
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/site-packages/fairseq/tasks/fairseq_task.py", line 541, in inference_step
models, sample, prefix_tokens=prefix_tokens, constraints=constraints
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/site-packages/fairseq/sequence_generator.py", line 204, in generate
return self._generate(sample, **kwargs)
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/site-packages/fairseq/sequence_generator.py", line 470, in _generate
assert step < max_len, f"{step} < {max_len}"
AssertionError: 60 < 60
The text was updated successfully, but these errors were encountered:
When I was training a summarization model under fairseq, I encountered the following error in the validation step. How can I solve this problem.
-- Process 1 terminated with the following error:
Traceback (most recent call last):
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 69, in _wrap
fn(i, *args)
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/site-packages/fairseq/distributed/utils.py", line 328, in distributed_main
main(cfg, **kwargs)
File "/home/deep/PRGEN/train.py", line 181, in main
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/deep/PRGEN/train.py", line 314, in train
cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch,
File "/home/deep/PRGEN/train.py", line 404, in validate_and_save
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
File "/home/deep/PRGEN/train.py", line 476, in validate
trainer.valid_step(sample)
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/deep/PRGEN/trainer.py", line 1037, in valid_step
sample, self.model, self.criterion, **extra_kwargs
File "/home/deep/PRGEN/src/task/faithful_summary_task.py", line 282, in valid_step
metrics = self._inference_with_bleu(self.sequence_generator, sample, model)
File "/home/deep/PRGEN/src/task/faithful_summary_task.py", line 332, in _inference_with_bleu
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/site-packages/fairseq/tasks/fairseq_task.py", line 541, in inference_step
models, sample, prefix_tokens=prefix_tokens, constraints=constraints
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/site-packages/fairseq/sequence_generator.py", line 204, in generate
return self._generate(sample, **kwargs)
File "/home/deep/.conda/envs/torch3.7/lib/python3.7/site-packages/fairseq/sequence_generator.py", line 470, in _generate
assert step < max_len, f"{step} < {max_len}"
AssertionError: 60 < 60
The text was updated successfully, but these errors were encountered: