Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

inputs_ids cast to fp16 in deeperspeed bug #44

Open
mayank31398 opened this issue May 5, 2023 · 0 comments
Open

inputs_ids cast to fp16 in deeperspeed bug #44

mayank31398 opened this issue May 5, 2023 · 0 comments
Assignees

Comments

@mayank31398
Copy link
Collaborator

mayank31398 commented May 5, 2023

{
  "pipe-parallel-size": 1,
  "model-parallel-size": 1,

  "num-layers": 16,
  "hidden-size": 2048,
  "num-attention-heads": 8,
  "seq-length": 2048,
  "max-position-embeddings": 2048,
  "pos-emb": "rotary",
  "rotary-pct": 0.25,
  "no-weight-tying": true,
  "gpt-j-residual": true,
  "output-layer-parallelism": "column",
  
  "scaled-upper-triang-masked-softmax-fusion": false,
  "bias-gelu-fusion": false,

  "init_method": "small_init",
  "output_layer_init_method": "wang_init",

  "optimizer": {
    "type": "Adam",
    "params": {
      "lr": 0.00025,
      "betas": [0.9, 0.95],
      "eps": 1.0e-8
    }
  },
  "min_lr": 0.000025,

  "zero_optimization": {
    "stage": 0,
    "allgather_partitions": true,
    "allgather_bucket_size": 500000000,
    "overlap_comm": true,
    "reduce_scatter": true,
    "reduce_bucket_size": 500000000,
    "contiguous_gradients": true,
    "cpu_offload": false
  }, 

  "fp16": {
    "enabled": true,
    "type": "bfloat16",
    "auto_cast": true,
    "loss_scale": 0,
    "loss_scale_window": 1000,
    "initial_scale_power": 12,
    "hysteresis": 2,
    "min_loss_scale": 1
  }, 

  "fp32_allreduce": true,

  "train_micro_batch_size_per_gpu": 4,
  "gradient-accumulation-steps": 4,

  "data-path": "data/debug_text_document",
  "data-impl": "mmap",
  "num_workers": 1,

  "checkpoint-activations": true,
  "checkpoint-num-layers": 1,
  "partition-activations": true,
  "synchronize-each-layer": true,

  "gradient_clipping": 1.0,
  "weight-decay": 0.1,
  "hidden-dropout": 0,
  "attention-dropout": 0,

  "train-iters": 143000,
  "lr-decay-iters": 143000,
  "distributed-backend": "nccl",
  "lr-decay-style": "cosine",
  "warmup": 0.01,
  "checkpoint-factor": 1000,
  "extra-save-iters": [0,1,2,4,8,16,32,64,128,256,512],
  "eval-interval": 143000,
  "eval-iters": 10,

  "log-interval": 10,
  "steps_per_print": 10,
  "wall_clock_breakdown": true,

  "tokenizer-type": "HFGPT2Tokenizer"
}

Tried this config but I see the error:

Traceback (most recent call last):
Traceback (most recent call last):
  File "train.py", line 27, in <module>
  File "train.py", line 27, in <module>
    pretrain(neox_args=neox_args)
  File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/training.py", line 226, in pretrain
    pretrain(neox_args=neox_args)
  File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/training.py", line 226, in pretrain
    iteration = train(
  File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/training.py", line 782, in train
    loss_dict, skipped_iter = train_step(
  File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/training.py", line 688, in train_step
    iteration = train(
      File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/training.py", line 782, in train
reduced_loss = train_step_pipe(
  File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/training.py", line 738, in train_step_pipe
    loss_dict, skipped_iter = train_step(
  File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/training.py", line 688, in train_step
    loss = model.train_batch(data_iter=data_iterator)
  File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/runtime/pipe/engine.py", line 346, in train_batch
    reduced_loss = train_step_pipe(
  File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/training.py", line 738, in train_step_pipe
    loss = model.train_batch(data_iter=data_iterator)
  File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/runtime/pipe/engine.py", line 346, in train_batch
    self._exec_schedule(sched)
      File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/runtime/pipe/engine.py", line 1376, in _exec_schedule
self._exec_schedule(sched)
  File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/runtime/pipe/engine.py", line 1376, in _exec_schedule
    self._exec_instr(**cmd.kwargs)
  File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/runtime/pipe/engine.py", line 658, in _exec_forward_pass
    self._exec_instr(**cmd.kwargs)
  File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/runtime/pipe/engine.py", line 658, in _exec_forward_pass
    outputs = super().forward(inputs)
  File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/utils/nvtx.py", line 11, in wrapped_fn
    outputs = super().forward(inputs)
  File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/utils/nvtx.py", line 11, in wrapped_fn
    ret_val = func(*args, **kwargs)
  File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/runtime/engine.py", line 1842, in forward
    ret_val = func(*args, **kwargs)
  File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/runtime/engine.py", line 1842, in forward
    loss = self.module(*inputs, **kwargs)    
loss = self.module(*inputs, **kwargs)
  File "/dccstor/mayankgpfs/conda/envs/laion/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
  File "/dccstor/mayankgpfs/conda/envs/laion/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
        return forward_call(*input, **kwargs)
return forward_call(*input, **kwargs)
  File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/runtime/pipe/module.py", line 364, in forward
  File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/runtime/pipe/module.py", line 364, in forward
    x = exec_range_func(start_idx, end_idx)(*x)
      File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/runtime/pipe/module.py", line 337, in exec_func
x = exec_range_func(start_idx, end_idx)(*x)
  File "/dccstor/mayankgpfs/scratch/DeeperSpeed/deepspeed/runtime/pipe/module.py", line 337, in exec_func
    inputs = layer(inputs)
      File "/dccstor/mayankgpfs/conda/envs/laion/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
inputs = layer(inputs)
  File "/dccstor/mayankgpfs/conda/envs/laion/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
    return forward_call(*input, **kwargs)
    return forward_call(*input, **kwargs)  File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/model/word_embeddings.py", line 181, in forward

  File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/model/word_embeddings.py", line 181, in forward
    embeddings = super().forward(input_ids, position_ids)
  File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/model/word_embeddings.py", line 136, in forward
    words_embeddings = self.word_embeddings(input_ids)
  File "/dccstor/mayankgpfs/conda/envs/laion/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
    embeddings = super().forward(input_ids, position_ids)
  File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/model/word_embeddings.py", line 136, in forward
    return forward_call(*input, **kwargs)
  File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/mpu/layers.py", line 196, in forward
    words_embeddings = self.word_embeddings(input_ids)
  File "/dccstor/mayankgpfs/conda/envs/laion/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
    output_parallel = F.embedding(
  File "/dccstor/mayankgpfs/conda/envs/laion/lib/python3.8/site-packages/torch/nn/functional.py", line 2210, in embedding
    return forward_call(*input, **kwargs)
  File "/dccstor/mayankgpfs/scratch/gpt-neox/megatron/mpu/layers.py", line 196, in forward
    output_parallel = F.embedding(
  File "/dccstor/mayankgpfs/conda/envs/laion/lib/python3.8/site-packages/torch/nn/functional.py", line 2210, in embedding
    return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
RuntimeError: Expected tensor for argument #1 'indices' to have one of the following scalar types: Long, Int; but got torch.cuda.HalfTensor instead (while checking arguments for embedding)
    return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
RuntimeError: Expected tensor for argument #1 'indices' to have one of the following scalar types: Long, Int; but got torch.cuda.HalfTensor instead (while checking arguments for embedding)
@mayank31398 mayank31398 self-assigned this May 5, 2023
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant