Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

The argument "--cpu" is not supported #6

Open
Beau-xu opened this issue Nov 5, 2021 · 0 comments
Open

The argument "--cpu" is not supported #6

Beau-xu opened this issue Nov 5, 2021 · 0 comments

Comments

@Beau-xu
Copy link

Beau-xu commented Nov 5, 2021

I want to use a device without GPU to translate testing datasets.
But it seems that a GPU is necessary, even though I used the argument "--cpu".
My command is:

$ python3 fairseq_cli/generate.py ${data_dir} --path ${checkpoint_path}/checkpoint_best.pt --user-dir glat_plugins \
>     --task translation_lev_modified --remove-bpe --max-sentences 20 --source-lang ${src} --target-lang ${tgt} \
>     --quiet --iter-decode-max-iter 0 --iter-decode-eos-penalty 0 --iter-decode-with-beam 1 --gen-subset test --save-dir transcheck \
>     --cpu

and it responses:

fairseq plugins loaded...
2021-11-05 20:48:08 | INFO | fairseq_cli.generate | {'_name': None, 'common': {'_name': None, 'no_progress_bar': False, 'log_interval': 100, 'log_format': None, 'tensorboard_logdir': None, 'wandb_project': None, 'azureml_logging': False, 'seed': 1, 'cpu': True, 'tpu': False, 'bf16': False, 'memory_efficient_bf16': False, 'fp16': False, 'memory_efficient_fp16': False, 'fp16_no_flatten_grads': False, 'fp16_init_scale': 128, 'fp16_scale_window': None, 'fp16_scale_tolerance': 0.0, 'min_loss_scale': 0.0001, 'threshold_loss_scale': None, 'user_dir': 'glat_plugins', 'empty_cache_freq': 0, 'all_gather_list_size': 16384, 'model_parallel_size': 1, 'quantization_config_path': None, 'profile': False, 'reset_logging': False, 'suppress_crashes': False}, 'common_eval': {'_name': None, 'path': 'smallmodel/checkpoint_best.pt', 'post_process': 'subword_nmt', 'quiet': True, 'model_overrides': '{}', 'results_path': None}, 'distributed_training': {'_name': None, 'distributed_world_size': 1, 'distributed_rank': 0, 'distributed_backend': 'nccl', 'distributed_init_method': None, 'distributed_port': -1, 'device_id': 0, 'distributed_no_spawn': False, 'ddp_backend': 'pytorch_ddp', 'bucket_cap_mb': 25, 'fix_batches_to_gpus': False, 'find_unused_parameters': False, 'fast_stat_sync': False, 'heartbeat_timeout': -1, 'broadcast_buffers': False, 'slowmo_momentum': None, 'slowmo_algorithm': 'LocalSGD', 'localsgd_frequency': 3, 'nprocs_per_node': 1, 'pipeline_model_parallel': False, 'pipeline_balance': None, 'pipeline_devices': None, 'pipeline_chunks': 0, 'pipeline_encoder_balance': None, 'pipeline_encoder_devices': None, 'pipeline_decoder_balance': None, 'pipeline_decoder_devices': None, 'pipeline_checkpoint': 'never', 'zero_sharding': 'none', 'tpu': False}, 'dataset': {'_name': None, 'num_workers': 1, 'skip_invalid_size_inputs_valid_test': False, 'max_tokens': None, 'batch_size': 20, 'required_batch_size_multiple': 8, 'required_seq_len_multiple': 1, 'dataset_impl': None, 'data_buffer_size': 10, 'train_subset': 'train', 'valid_subset': 'valid', 'validate_interval': 1, 'validate_interval_updates': 0, 'validate_after_updates': 0, 'fixed_validation_seed': None, 'disable_validation': False, 'max_tokens_valid': None, 'batch_size_valid': 20, 'curriculum': 0, 'gen_subset': 'test', 'num_shards': 1, 'shard_id': 0}, 'optimization': {'_name': None, 'max_epoch': 0, 'max_update': 0, 'stop_time_hours': 0.0, 'clip_norm': 0.0, 'sentence_avg': False, 'update_freq': [1], 'lr': [0.25], 'stop_min_lr': -1.0, 'use_bmuf': False}, 'checkpoint': {'_name': None, 'save_dir': 'transcheck', 'restore_file': 'checkpoint_last.pt', 'finetune_from_model': None, 'reset_dataloader': False, 'reset_lr_scheduler': False, 'reset_meters': False, 'reset_optimizer': False, 'optimizer_overrides': '{}', 'save_interval': 1, 'save_interval_updates': 0, 'keep_interval_updates': -1, 'keep_last_epochs': -1, 'keep_best_checkpoints': -1, 'no_save': False, 'no_epoch_checkpoints': False, 'no_last_checkpoints': False, 'no_save_optimizer_state': False, 'best_checkpoint_metric': 'loss', 'maximize_best_checkpoint_metric': False, 'patience': -1, 'checkpoint_suffix': '', 'checkpoint_shard_count': 1, 'load_checkpoint_on_all_dp_ranks': False, 'model_parallel_size': 1, 'distributed_rank': 0}, 'bmuf': {'_name': None, 'block_lr': 1.0, 'block_momentum': 0.875, 'global_sync_iter': 50, 'warmup_iterations': 500, 'use_nbm': False, 'average_sync': False, 'distributed_world_size': 1}, 'generation': {'_name': None, 'beam': 5, 'nbest': 1, 'max_len_a': 0.0, 'max_len_b': 200, 'min_len': 1, 'match_source_len': False, 'unnormalized': False, 'no_early_stop': False, 'no_beamable_mm': False, 'lenpen': 1.0, 'unkpen': 0.0, 'replace_unk': None, 'sacrebleu': False, 'score_reference': False, 'prefix_size': 0, 'no_repeat_ngram_size': 0, 'sampling': False, 'sampling_topk': -1, 'sampling_topp': -1.0, 'constraints': None, 'temperature': 1.0, 'diverse_beam_groups': -1, 'diverse_beam_strength': 0.5, 'diversity_rate': -1.0, 'print_alignment': None, 'print_step': False, 'lm_path': None, 'lm_weight': 0.0, 'iter_decode_eos_penalty': 0.0, 'iter_decode_max_iter': 0, 'iter_decode_force_max_iter': False, 'iter_decode_with_beam': 1, 'iter_decode_with_external_reranker': False, 'retain_iter_history': False, 'retain_dropout': False, 'retain_dropout_modules': None, 'decoding_format': None, 'no_seed_provided': False}, 'eval_lm': {'_name': None, 'output_word_probs': False, 'output_word_stats': False, 'context_window': 0, 'softmax_batch': 9223372036854775807}, 'interactive': {'_name': None, 'buffer_size': 0, 'input': '-'}, 'model': None, 'task': {'_name': 'translation_lev_modified', 'data': 'cantonese-mandarin/pre-processed', 'source_lang': 'can', 'target_lang': 'man', 'load_alignments': False, 'left_pad_source': False, 'left_pad_target': False, 'max_source_positions': 1024, 'max_target_positions': 1024, 'upsample_primary': -1, 'truncate_source': False, 'num_batch_buckets': 0, 'train_subset': 'train', 'dataset_impl': None, 'required_seq_len_multiple': 1, 'eval_bleu': False, 'eval_bleu_args': '{}', 'eval_bleu_detok': 'space', 'eval_bleu_detok_args': '{}', 'eval_tokenized_bleu': False, 'eval_bleu_remove_bpe': None, 'eval_bleu_print_samples': False, 'noise': 'random_delete', 'start_p': 0.5, 'minus_p': 0.2, 'total_up': 300000}, 'criterion': {'_name': 'cross_entropy', 'sentence_avg': True}, 'optimizer': None, 'lr_scheduler': {'_name': 'fixed', 'force_anneal': None, 'lr_shrink': 0.1, 'warmup_updates': 0, 'lr': [0.25]}, 'scoring': {'_name': 'bleu', 'pad': 1, 'eos': 2, 'unk': 3}, 'bpe': None, 'tokenizer': None}
2021-11-05 20:48:08 | INFO | fairseq.tasks.translation | [can] dictionary: 10168 types
2021-11-05 20:48:08 | INFO | fairseq.tasks.translation | [man] dictionary: 10168 types
2021-11-05 20:48:08 | INFO | fairseq_cli.generate | loading model(s) from smallmodel/checkpoint_best.pt
2021-11-05 20:48:10 | INFO | fairseq.data.data_utils | loaded 1,000 examples from: cantonese-mandarin/pre-processed/test.can-man.can
2021-11-05 20:48:10 | INFO | fairseq.data.data_utils | loaded 1,000 examples from: cantonese-mandarin/pre-processed/test.can-man.man
2021-11-05 20:48:10 | INFO | fairseq.tasks.translation | cantonese-mandarin/pre-processed test can-man 1000 examples
Traceback (most recent call last):                                                                                                                                         
  File "fairseq_cli/generate.py", line 408, in <module>
    cli_main()
  File "fairseq_cli/generate.py", line 404, in cli_main
    main(args)
  File "fairseq_cli/generate.py", line 49, in main
    return _main(cfg, sys.stdout)
  File "fairseq_cli/generate.py", line 206, in _main
    constraints=constraints,
  File "/data/home/db72687/Documents/glat/fairseq/tasks/fairseq_task.py", line 501, in inference_step
    models, sample, prefix_tokens=prefix_tokens, constraints=constraints
  File "/data/home/db72687/anaconda3/envs/glat/lib/python3.7/site-packages/torch/autograd/grad_mode.py", line 15, in decorate_context
    return func(*args, **kwargs)
  File "/data/home/db72687/Documents/glat/fairseq/iterative_refinement_generator.py", line 212, in generate
    prev_decoder_out, encoder_out, **decoder_options
  File "/data/home/db72687/Documents/glat/fairseq/models/nat/nonautoregressive_transformer.py", line 138, in forward_decoder
    step=step,
  File "/data/home/db72687/anaconda3/envs/glat/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
    result = self.forward(*input, **kwargs)
  File "/data/home/db72687/Documents/glat/fairseq/models/nat/fairseq_nat_model.py", line 46, in wrapper
    self, normalize=normalize, encoder_out=encoder_out, *args, **kwargs
  File "/data/home/db72687/Documents/glat/fairseq/models/nat/nonautoregressive_transformer.py", line 239, in forward
    embedding_copy=(step == 0) & self.src_embedding_copy,
  File "/data/home/db72687/Documents/glat/fairseq/models/nat/nonautoregressive_transformer.py", line 301, in extract_features
    x = cat_x.index_select(dim=0, index=torch.arange(bsz * seq_len).cuda() * 2 +
  File "/data/home/db72687/anaconda3/envs/glat/lib/python3.7/site-packages/torch/cuda/__init__.py", line 149, in _lazy_init
    _check_driver()
  File "/data/home/db72687/anaconda3/envs/glat/lib/python3.7/site-packages/torch/cuda/__init__.py", line 54, in _check_driver
    http://www.nvidia.com/Download/index.aspx""")
AssertionError: 
Found no NVIDIA driver on your system. Please check that you
have an NVIDIA GPU and installed a driver from
http://www.nvidia.com/Download/index.aspx
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant