Skip to content

Commit

Permalink
Merge branch 'jasonwan/nemo_peft_patch' into 'main'
Browse files Browse the repository at this point in the history
support mcore peft adapters in nemo

See merge request ADLR/megatron-lm!735
  • Loading branch information
jaredcasper committed Aug 21, 2023
2 parents 3e63c45 + 49f65b7 commit f24fac4
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions megatron/core/model_parallel_config.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.

from dataclasses import dataclass
from typing import Callable
from typing import Callable, Optional

import torch

Expand Down Expand Up @@ -113,7 +113,7 @@ class ModelParallelConfig:
# Model parallelism
tensor_model_parallel_size: int = 1
pipeline_model_parallel_size: int = 1
virtual_pipeline_model_parallel_size: int = None
virtual_pipeline_model_parallel_size: Optional[int] = None
sequence_parallel: bool = False

# Initialization
Expand All @@ -136,7 +136,7 @@ class ModelParallelConfig:
enable_autocast: bool = False
autocast_dtype: torch.dtype = None
variable_seq_lengths: bool = False
num_microbatches_with_partial_activation_checkpoints: int = None
num_microbatches_with_partial_activation_checkpoints: Optional[int] = None
overlap_p2p_comm: bool = False
batch_p2p_comm: bool = True
batch_p2p_sync: bool = True
Expand Down

0 comments on commit f24fac4

Please sign in to comment.