From b25b8e0347a5e1f2e97077941b2facf56235c84b Mon Sep 17 00:00:00 2001 From: Max Luebbering Date: Wed, 19 Jun 2024 17:40:24 +0200 Subject: [PATCH 01/18] refactor: block_size in datasets is now sequence_length +1 --- config_files/training/config_lorem_ipsum.yaml | 2 +- examples/getting_started/example_config.yaml | 4 ++-- examples/library_usage/config_lorem_ipsum.yaml | 2 +- src/modalities/config/config.py | 4 ++-- src/modalities/dataloader/dataset_factory.py | 14 ++++++++------ src/modalities/models/gpt2/gpt2_model.py | 3 +-- .../yaml_configs/skipped_dataloader.yaml | 2 +- tests/end2end_tests/gpt2_train_num_steps_8.yaml | 4 ++-- .../end2end_tests/gpt2_warm_start_from_step_4.yaml | 2 +- tests/test_yaml_configs/config_lorem_ipsum.yaml | 2 +- 10 files changed, 20 insertions(+), 19 deletions(-) diff --git a/config_files/training/config_lorem_ipsum.yaml b/config_files/training/config_lorem_ipsum.yaml index f7db8a968..4f0e99dd4 100644 --- a/config_files/training/config_lorem_ipsum.yaml +++ b/config_files/training/config_lorem_ipsum.yaml @@ -32,7 +32,7 @@ train_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: ./data/lorem_ipsum.pbin - block_size: ${settings.training.sequence_length} + model_sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} train_dataloader: diff --git a/examples/getting_started/example_config.yaml b/examples/getting_started/example_config.yaml index c6160af87..e21b7a869 100644 --- a/examples/getting_started/example_config.yaml +++ b/examples/getting_started/example_config.yaml @@ -34,7 +34,7 @@ train_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: ./data/mem_map/redpajama_v2_samples_512_train.pbin - block_size: ${settings.training.sequence_length} + model_sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} train_dataloader: @@ -73,7 +73,7 @@ val_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: ./data/mem_map/redpajama_v2_samples_512_test.pbin - block_size: ${settings.training.sequence_length} + model_sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} val_dataloader: diff --git a/examples/library_usage/config_lorem_ipsum.yaml b/examples/library_usage/config_lorem_ipsum.yaml index 8e4b2a745..d56fe1879 100644 --- a/examples/library_usage/config_lorem_ipsum.yaml +++ b/examples/library_usage/config_lorem_ipsum.yaml @@ -38,7 +38,7 @@ train_dataset: config: raw_data_path: ../../data/lorem_ipsum.jsonl index_path: ../../data/lorem_ipsum.idx - block_size: ${settings.training.sequence_length} + model_sequence_length: ${settings.training.sequence_length} jq_pattern: ".text" sample_key: ${settings.referencing_keys.sample_key} tokenizer: diff --git a/src/modalities/config/config.py b/src/modalities/config/config.py index 52c748144..41b35a072 100644 --- a/src/modalities/config/config.py +++ b/src/modalities/config/config.py @@ -251,7 +251,7 @@ class DistributedSamplerConfig(BaseModel): class MemMapDatasetConfig(BaseModel): raw_data_path: FilePath index_path: Optional[FilePath] = None - block_size: Annotated[int, Field(strict=True, gt=0)] + model_sequence_length: Annotated[int, Field(strict=True, gt=0)] tokenizer: PydanticTokenizerIFType jq_pattern: str sample_key: str @@ -259,7 +259,7 @@ class MemMapDatasetConfig(BaseModel): class PackedMemMapDatasetContinuousConfig(BaseModel): raw_data_path: Path - block_size: Annotated[int, Field(strict=True, gt=0)] + model_sequence_length: Annotated[int, Field(strict=True, gt=0)] sample_key: str diff --git a/src/modalities/dataloader/dataset_factory.py b/src/modalities/dataloader/dataset_factory.py index 1d31e27cf..bce751478 100644 --- a/src/modalities/dataloader/dataset_factory.py +++ b/src/modalities/dataloader/dataset_factory.py @@ -40,7 +40,7 @@ def get_dummy_dataset(num_samples: int, sample_definition: Tuple[DummySampleConf @staticmethod def get_mem_map_dataset( raw_data_path: Path, - block_size: int, + model_sequence_length: int, tokenizer: PreTrainedTokenizer, sample_key: str, index_path: Optional[Path] = None, @@ -48,7 +48,7 @@ def get_mem_map_dataset( ) -> MemMapDataset: dataset = MemMapDataset( raw_data_path=raw_data_path, - block_size=block_size, + block_size=model_sequence_length + 1, tokenizer=tokenizer, sample_key=sample_key, index_path=index_path, @@ -58,18 +58,20 @@ def get_mem_map_dataset( @staticmethod def get_packed_mem_map_dataset_continuous( - raw_data_path: Path, block_size: int, sample_key: str + raw_data_path: Path, model_sequence_length: int, sample_key: str ) -> PackedMemMapDatasetContinuous: dataset = PackedMemMapDatasetContinuous( - raw_data_path=raw_data_path, block_size=block_size, sample_key=sample_key + raw_data_path=raw_data_path, block_size=model_sequence_length + 1, sample_key=sample_key ) return dataset @staticmethod def get_packed_mem_map_dataset_megatron( - raw_data_path: Path, block_size: int, sample_key: str + raw_data_path: Path, model_sequence_length: int, sample_key: str ) -> PackedMemMapDatasetMegatron: - dataset = PackedMemMapDatasetMegatron(raw_data_path=raw_data_path, block_size=block_size, sample_key=sample_key) + dataset = PackedMemMapDatasetMegatron( + raw_data_path=raw_data_path, block_size=model_sequence_length + 1, sample_key=sample_key + ) return dataset @staticmethod diff --git a/src/modalities/models/gpt2/gpt2_model.py b/src/modalities/models/gpt2/gpt2_model.py index 1fcd440de..dbf827281 100644 --- a/src/modalities/models/gpt2/gpt2_model.py +++ b/src/modalities/models/gpt2/gpt2_model.py @@ -452,8 +452,7 @@ def __init__( # TODO: dependency injection if poe_type is PositionTypes.ABSOLUTE: - # source and target sequences are block_size -1 long, since target is the source shifted by 1 - wpe = nn.Embedding(num_embeddings=block_size - 1, embedding_dim=n_embd) + wpe = nn.Embedding(num_embeddings=block_size, embedding_dim=n_embd) elif poe_type is PositionTypes.NOPE: # Using a pre-trained layer, requires to define a separate FSDP unit for the frozen layer c.f. # https://github.com/huggingface/accelerate/issues/807 diff --git a/tests/dataloader/yaml_configs/skipped_dataloader.yaml b/tests/dataloader/yaml_configs/skipped_dataloader.yaml index adcfab690..726c1d088 100644 --- a/tests/dataloader/yaml_configs/skipped_dataloader.yaml +++ b/tests/dataloader/yaml_configs/skipped_dataloader.yaml @@ -22,7 +22,7 @@ train_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: ./data/lorem_ipsum.pbin - block_size: ${settings.training.sequence_length} + model_sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} skip_num_batches: diff --git a/tests/end2end_tests/gpt2_train_num_steps_8.yaml b/tests/end2end_tests/gpt2_train_num_steps_8.yaml index 79bc59737..2ce6703a8 100644 --- a/tests/end2end_tests/gpt2_train_num_steps_8.yaml +++ b/tests/end2end_tests/gpt2_train_num_steps_8.yaml @@ -31,7 +31,7 @@ train_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: lorem_ipsum.pbin - block_size: ${settings.training.sequence_length} + model_sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} train_dataloader: @@ -125,7 +125,7 @@ model: config: sample_key: ${settings.referencing_keys.sample_key} poe_type: NOPE - block_size: ${settings.training.sequence_length} + model_sequence_length: ${settings.training.sequence_length} prediction_key: ${loss_fn.config.prediction_key} vocab_size: 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: 2 diff --git a/tests/end2end_tests/gpt2_warm_start_from_step_4.yaml b/tests/end2end_tests/gpt2_warm_start_from_step_4.yaml index 56594b602..78503b6b2 100644 --- a/tests/end2end_tests/gpt2_warm_start_from_step_4.yaml +++ b/tests/end2end_tests/gpt2_warm_start_from_step_4.yaml @@ -31,7 +31,7 @@ train_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: lorem_ipsum.pbin - block_size: ${settings.training.sequence_length} + model_sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} train_dataloader: diff --git a/tests/test_yaml_configs/config_lorem_ipsum.yaml b/tests/test_yaml_configs/config_lorem_ipsum.yaml index 4b3b255dc..5b8781bcd 100644 --- a/tests/test_yaml_configs/config_lorem_ipsum.yaml +++ b/tests/test_yaml_configs/config_lorem_ipsum.yaml @@ -32,7 +32,7 @@ train_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: ./data/lorem_ipsum.pbin - block_size: ${settings.training.sequence_length} + model_sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} train_dataloader: From c36bc20f7e245d625dcc657b77d39347769352a1 Mon Sep 17 00:00:00 2001 From: Max Luebbering Date: Wed, 19 Jun 2024 17:52:24 +0200 Subject: [PATCH 02/18] fix: failing end2end tests due to sequence_length / block_size changes --- tests/end2end_tests/gpt2_train_num_steps_8.yaml | 2 +- tests/test_optimizer_factory.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/end2end_tests/gpt2_train_num_steps_8.yaml b/tests/end2end_tests/gpt2_train_num_steps_8.yaml index 2ce6703a8..654a852dc 100644 --- a/tests/end2end_tests/gpt2_train_num_steps_8.yaml +++ b/tests/end2end_tests/gpt2_train_num_steps_8.yaml @@ -125,7 +125,7 @@ model: config: sample_key: ${settings.referencing_keys.sample_key} poe_type: NOPE - model_sequence_length: ${settings.training.sequence_length} + block_size: ${settings.training.sequence_length} prediction_key: ${loss_fn.config.prediction_key} vocab_size: 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: 2 diff --git a/tests/test_optimizer_factory.py b/tests/test_optimizer_factory.py index 7b2d159ab..840003d34 100644 --- a/tests/test_optimizer_factory.py +++ b/tests/test_optimizer_factory.py @@ -72,7 +72,7 @@ def _load_coca() -> FSDP: # number of parameters for each optimizer group GPT2_LINEAR = 66130944 -GPT2_EMBEDDING = 768 * (50304 + 2048 - 1) # n_embd * (vocab_size + block_size - 1) +GPT2_EMBEDDING = 768 * (50304 + 2048) # n_embd * (vocab_size + sequence_length) GPT2_LAYERNORM = 768 * 50 # n_embd * num_layer_norms COCA_ALL = 184502784 From bfad9fc6c036557e3ae81b3e9349e71d12067ef6 Mon Sep 17 00:00:00 2001 From: Max Luebbering Date: Wed, 19 Jun 2024 18:57:25 +0200 Subject: [PATCH 03/18] refactor: replaced context_size and block_size with sequence_length when applicable --- .../text_generation_config_torch.yaml | 2 +- .../training/config_example_coca.yaml | 4 ++-- .../config_example_openGPTx_dataset.yaml | 2 +- .../config_gpt2_small_overfitting_de.yaml | 2 +- ...gpt2_small_overfitting_de_abs_pos_emb.yaml | 2 +- ...onfig_gpt2_small_redpajama_DE_1048576.yaml | 2 +- config_files/training/config_lorem_ipsum.yaml | 6 ++--- examples/getting_started/example_config.yaml | 4 ++-- .../example_text_generation_config.yaml | 2 +- .../library_usage/config_lorem_ipsum.yaml | 2 +- src/modalities/models/gpt2/gpt2_model.py | 24 ++++++++++--------- src/modalities/utils/number_conversion.py | 22 ++++++++--------- tests/checkpointing/gpt2_config.yaml | 2 +- .../test_checkpoint_execution_functions.py | 2 +- .../test_fsdp_to_disc_checkpointing.py | 10 ++++---- .../yaml_configs/skipped_dataloader.yaml | 2 +- .../end2end_tests/gpt2_train_num_steps_8.yaml | 8 +++---- .../gpt2_warm_start_from_step_4.yaml | 8 +++---- .../test_yaml_configs/config_lorem_ipsum.yaml | 6 ++--- .../gpt2_config_optimizer.yaml | 2 +- tests/utils/test_number_conversion.py | 19 ++++++++------- 21 files changed, 68 insertions(+), 65 deletions(-) diff --git a/config_files/text_generation/text_generation_config_torch.yaml b/config_files/text_generation/text_generation_config_torch.yaml index a639586f6..8f462c5cb 100644 --- a/config_files/text_generation/text_generation_config_torch.yaml +++ b/config_files/text_generation/text_generation_config_torch.yaml @@ -44,7 +44,7 @@ raw_model: config: sample_key: ${settings.referencing_keys.sample_key} poe_type: ABSOLUTE - block_size: ${settings.context_length} + sequence_length: ${settings.context_length} prediction_key: ${settings.referencing_keys.prediction_key} vocab_size: 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: 12 diff --git a/config_files/training/config_example_coca.yaml b/config_files/training/config_example_coca.yaml index dc854ebcd..19c4591d7 100644 --- a/config_files/training/config_example_coca.yaml +++ b/config_files/training/config_example_coca.yaml @@ -150,7 +150,7 @@ checkpoint_saving: config: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} loss_fn: component_key: loss variant_key: clm_cross_entropy_loss @@ -269,7 +269,7 @@ batch_progress_subscriber: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} global_num_tokens: ${settings.training.global_num_seen_tokens} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} gradient_acc_steps: ${settings.training.gradient_acc_steps} train_dataloader: instance_key: train_dataloader diff --git a/config_files/training/config_example_openGPTx_dataset.yaml b/config_files/training/config_example_openGPTx_dataset.yaml index 85114fe47..226d625d1 100644 --- a/config_files/training/config_example_openGPTx_dataset.yaml +++ b/config_files/training/config_example_openGPTx_dataset.yaml @@ -142,7 +142,7 @@ model: config: sample_key: ${data.sample_key} prediction_key: "logits" - block_size: ${data.sequence_len} + sequence_length: ${data.sequence_len} vocab_size: 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: 12 n_head_q: 12 diff --git a/config_files/training/config_gpt2_small_overfitting_de.yaml b/config_files/training/config_gpt2_small_overfitting_de.yaml index 5e8c3baea..0209d2981 100644 --- a/config_files/training/config_gpt2_small_overfitting_de.yaml +++ b/config_files/training/config_gpt2_small_overfitting_de.yaml @@ -245,7 +245,7 @@ batch_progress_subscriber: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} global_num_tokens: ${settings.training.global_num_seen_tokens} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} train_dataloader: instance_key: train_dataloader pass_type: BY_REFERENCE diff --git a/config_files/training/config_gpt2_small_overfitting_de_abs_pos_emb.yaml b/config_files/training/config_gpt2_small_overfitting_de_abs_pos_emb.yaml index d349cad72..cc9e8d5bd 100644 --- a/config_files/training/config_gpt2_small_overfitting_de_abs_pos_emb.yaml +++ b/config_files/training/config_gpt2_small_overfitting_de_abs_pos_emb.yaml @@ -245,7 +245,7 @@ batch_progress_subscriber: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} global_num_tokens: ${settings.training.global_num_seen_tokens} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} train_dataloader: instance_key: train_dataloader pass_type: BY_REFERENCE diff --git a/config_files/training/config_gpt2_small_redpajama_DE_1048576.yaml b/config_files/training/config_gpt2_small_redpajama_DE_1048576.yaml index c3e4d9393..f8bb019c1 100644 --- a/config_files/training/config_gpt2_small_redpajama_DE_1048576.yaml +++ b/config_files/training/config_gpt2_small_redpajama_DE_1048576.yaml @@ -246,7 +246,7 @@ batch_progress_subscriber: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} global_num_tokens: ${settings.training.global_num_seen_tokens} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} train_dataloader: instance_key: train_dataloader pass_type: BY_REFERENCE diff --git a/config_files/training/config_lorem_ipsum.yaml b/config_files/training/config_lorem_ipsum.yaml index 4f0e99dd4..afd1c8729 100644 --- a/config_files/training/config_lorem_ipsum.yaml +++ b/config_files/training/config_lorem_ipsum.yaml @@ -156,7 +156,7 @@ checkpoint_saving: config: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} # resolving class types via different enums sucks... loss_fn: @@ -184,7 +184,7 @@ model: config: sample_key: ${settings.referencing_keys.sample_key} poe_type: NOPE - block_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} prediction_key: ${loss_fn.config.prediction_key} vocab_size: 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: 2 @@ -278,7 +278,7 @@ batch_progress_subscriber: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} global_num_tokens: ${settings.training.global_num_seen_tokens} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} gradient_acc_steps: ${settings.training.gradient_acc_steps} train_dataloader: instance_key: train_dataloader diff --git a/examples/getting_started/example_config.yaml b/examples/getting_started/example_config.yaml index e21b7a869..185a83f50 100644 --- a/examples/getting_started/example_config.yaml +++ b/examples/getting_started/example_config.yaml @@ -154,7 +154,7 @@ model: config: sample_key: ${settings.referencing_keys.sample_key} poe_type: NOPE - block_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} prediction_key: ${loss_fn.config.prediction_key} vocab_size: 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: 2 @@ -234,7 +234,7 @@ batch_progress_subscriber: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} global_num_tokens: ${settings.training.global_num_seen_tokens} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} train_dataloader: instance_key: train_dataloader pass_type: BY_REFERENCE diff --git a/examples/getting_started/example_text_generation_config.yaml b/examples/getting_started/example_text_generation_config.yaml index f992e26ff..c8e1775f1 100644 --- a/examples/getting_started/example_text_generation_config.yaml +++ b/examples/getting_started/example_text_generation_config.yaml @@ -44,7 +44,7 @@ raw_model: config: sample_key: ${settings.referencing_keys.sample_key} poe_type: NOPE - block_size: ${settings.context_length} + sequence_length: ${settings.context_length} prediction_key: ${settings.referencing_keys.prediction_key} vocab_size: 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: 2 diff --git a/examples/library_usage/config_lorem_ipsum.yaml b/examples/library_usage/config_lorem_ipsum.yaml index d56fe1879..38a9aaed3 100644 --- a/examples/library_usage/config_lorem_ipsum.yaml +++ b/examples/library_usage/config_lorem_ipsum.yaml @@ -190,7 +190,7 @@ model: config: sample_key: "input_ids" # TODO reference this prediction_key: "logits" # TODO reference this - block_size: 256 # TODO reference this (same as sequence length) + sequence_length: 256 # TODO reference this (same as sequence length) vocab_size: 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: 2 n_head_q: 4 diff --git a/src/modalities/models/gpt2/gpt2_model.py b/src/modalities/models/gpt2/gpt2_model.py index dbf827281..f8ffe6b40 100644 --- a/src/modalities/models/gpt2/gpt2_model.py +++ b/src/modalities/models/gpt2/gpt2_model.py @@ -148,7 +148,7 @@ class GPT2LLMConfig(BaseModel): sample_key: str prediction_key: str poe_type: PositionTypes - block_size: Annotated[int, Field(strict=True, ge=1)] + sequence_length: Annotated[int, Field(strict=True, ge=1)] vocab_size: Annotated[ int, Field(strict=True, ge=1) ] # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency @@ -251,15 +251,15 @@ def projection(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch def execute_qkv_transforms( q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, qkv_transforms: nn.ModuleList, n_head_q: int ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - batch_size, block_size, embedding_dim = q.size() + batch_size, sequence_length, embedding_dim = q.size() # hidden dimension of single head # Note, that number of heads does not change the overall parameters of the networks # to scale up the network we either have to increase the embedding_dim or the number of layers n_head_dim = embedding_dim // n_head_q - q = q.view(batch_size, block_size, n_head_q, n_head_dim).transpose(1, 2).contiguous() # (B, nh_q, T, hd) - k = k.view(batch_size, block_size, -1, n_head_dim).transpose(1, 2).contiguous() # (B, nh_kv, T, hd) - v = v.view(batch_size, block_size, -1, n_head_dim).transpose(1, 2).contiguous() # (B, nh_kv, T, hd) + q = q.view(batch_size, sequence_length, n_head_q, n_head_dim).transpose(1, 2).contiguous() # (B, nh_q, T, hd) + k = k.view(batch_size, sequence_length, -1, n_head_dim).transpose(1, 2).contiguous() # (B, nh_kv, T, hd) + v = v.view(batch_size, sequence_length, -1, n_head_dim).transpose(1, 2).contiguous() # (B, nh_kv, T, hd) for transform in qkv_transforms: q, k, v = transform(q, k, v) @@ -419,7 +419,7 @@ def __init__( sample_key: str, prediction_key: str, poe_type: PositionTypes, - block_size: int, + sequence_length: int, vocab_size: int, n_layer: int, n_head_q: int, @@ -444,19 +444,19 @@ def __init__( super().__init__(weight_decay_groups=weight_decay_groups) self.sample_key = sample_key self.prediction_key = prediction_key - self.block_size = block_size + self.sequence_length = sequence_length self.poe_type = poe_type assert vocab_size is not None - assert block_size is not None + assert sequence_length is not None # TODO: dependency injection if poe_type is PositionTypes.ABSOLUTE: - wpe = nn.Embedding(num_embeddings=block_size, embedding_dim=n_embd) + wpe = nn.Embedding(num_embeddings=sequence_length, embedding_dim=n_embd) elif poe_type is PositionTypes.NOPE: # Using a pre-trained layer, requires to define a separate FSDP unit for the frozen layer c.f. # https://github.com/huggingface/accelerate/issues/807 - # wpe = nn.Embedding.from_pretrained(torch.zeros(block_size, n_embd)) + # wpe = nn.Embedding.from_pretrained(torch.zeros(sequence_length, n_embd)) wpe = nn.Identity() else: raise TypeError(f"{poe_type} not supported") @@ -518,7 +518,9 @@ def forward_impl(self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tenso input_ids = inputs[self.sample_key] device = input_ids.device b, t = input_ids.size() # batch size, sequence length - assert t <= self.block_size, f"Cannot forward sequence of length {t}, block size is only {self.block_size}" + assert ( + t <= self.sequence_length + ), f"Cannot forward sequence of length {t}, block size is only {self.sequence_length}" # forward the GPT model itself tok_emb = self.transformer.wte(input_ids) # token embeddings of shape (b, t, n_embd) diff --git a/src/modalities/utils/number_conversion.py b/src/modalities/utils/number_conversion.py index d52dfde07..71979a9a4 100644 --- a/src/modalities/utils/number_conversion.py +++ b/src/modalities/utils/number_conversion.py @@ -11,7 +11,7 @@ class LocalNumBatchesFromNumSamplesConfig(BaseModel): class LocalNumBatchesFromNumTokensConfig(BaseModel): num_ranks: Annotated[int, Field(strict=True, gt=0)] global_num_tokens: Annotated[int, Field(strict=True, ge=0)] - context_size: Annotated[int, Field(strict=True, gt=0)] + sequence_length: Annotated[int, Field(strict=True, gt=0)] class NumStepsFromNumSamplesConfig(BaseModel): @@ -24,13 +24,13 @@ class NumStepsFromNumTokensConfig(BaseModel): num_ranks: Annotated[int, Field(strict=True, gt=0)] local_micro_batch_size: Annotated[int, Field(strict=True, gt=0)] global_num_tokens: Annotated[int, Field(strict=True, ge=0)] - context_size: Annotated[int, Field(strict=True, gt=0)] + sequence_length: Annotated[int, Field(strict=True, gt=0)] class NumTokensFromNumStepsConfig(BaseModel): num_ranks: Annotated[int, Field(strict=True, gt=0)] local_micro_batch_size: Annotated[int, Field(strict=True, gt=0)] - context_size: Annotated[int, Field(strict=True, gt=0)] + sequence_length: Annotated[int, Field(strict=True, gt=0)] class NumberConversion: @@ -51,7 +51,7 @@ def get_local_num_batches_from_num_samples(num_ranks: int, global_num_samples: i return global_num_samples // num_ranks @staticmethod - def get_local_num_batches_from_num_tokens(num_ranks: int, global_num_tokens: int, context_size: int) -> int: + def get_local_num_batches_from_num_tokens(num_ranks: int, global_num_tokens: int, sequence_length: int) -> int: """Calculates the number of local batches for each rank, given the global number of tokens and number of ranks. This helper function is primarily used to calculate the number of batches to @@ -60,12 +60,12 @@ def get_local_num_batches_from_num_tokens(num_ranks: int, global_num_tokens: int Args: num_ranks (int): _description_ global_num_tokens (int): _description_ - context_size (int): _description_ + sequence_length (int): _description_ Returns: int: _description_ """ - global_num_samples = global_num_tokens // context_size + global_num_samples = global_num_tokens // sequence_length return NumberConversion.get_local_num_batches_from_num_samples( num_ranks=num_ranks, global_num_samples=global_num_samples ) @@ -87,7 +87,7 @@ def get_num_steps_from_num_samples(num_ranks: int, local_micro_batch_size: int, @staticmethod def get_num_steps_from_num_tokens( - num_ranks: int, local_micro_batch_size: int, global_num_tokens: int, context_size: int + num_ranks: int, local_micro_batch_size: int, global_num_tokens: int, sequence_length: int ) -> int: """Calculates the number of steps given the global number of tokens, local micro batch size and number of ranks. @@ -98,18 +98,18 @@ def get_num_steps_from_num_tokens( num_ranks (int): _description_ local_micro_batch_size (int): _description_ global_num_tokens (int): _description_ - context_size (int): _description_ + sequence_length (int): _description_ Returns: int: _description_ """ - global_num_samples = global_num_tokens // context_size + global_num_samples = global_num_tokens // sequence_length return NumberConversion.get_num_steps_from_num_samples( num_ranks=num_ranks, local_micro_batch_size=local_micro_batch_size, global_num_samples=global_num_samples ) @staticmethod def get_num_tokens_from_num_steps_callable( - num_ranks: int, local_micro_batch_size: int, context_size: int + num_ranks: int, local_micro_batch_size: int, sequence_length: int ) -> Callable[[int], int]: - return lambda num_steps_done: num_steps_done * num_ranks * local_micro_batch_size * context_size + return lambda num_steps_done: num_steps_done * num_ranks * local_micro_batch_size * sequence_length diff --git a/tests/checkpointing/gpt2_config.yaml b/tests/checkpointing/gpt2_config.yaml index a53bee89a..dcaaee820 100644 --- a/tests/checkpointing/gpt2_config.yaml +++ b/tests/checkpointing/gpt2_config.yaml @@ -7,7 +7,7 @@ model: sample_key: "input_ids" # TODO reference this poe_type: NOPE prediction_key: "logits" # TODO reference this - block_size: 256 # TODO reference this (same as sequence length) + sequence_length: 256 # TODO reference this (same as sequence length) vocab_size: 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: 2 n_head_q: 4 diff --git a/tests/checkpointing/test_checkpoint_execution_functions.py b/tests/checkpointing/test_checkpoint_execution_functions.py index 378e05264..d8095806e 100644 --- a/tests/checkpointing/test_checkpoint_execution_functions.py +++ b/tests/checkpointing/test_checkpoint_execution_functions.py @@ -45,7 +45,7 @@ def test_delete_checkpoint(tmpdir): model_path = directory / experiment_id / f"eid_{experiment_id}-model-num_steps_101-num_tokens_4848.bin" model_path.write_text(CONTENT) get_num_tokens_from_num_steps_callable = NumberConversion.get_num_tokens_from_num_steps_callable( - num_ranks=2, local_micro_batch_size=4, context_size=6 + num_ranks=2, local_micro_batch_size=4, sequence_length=6 ) checkpoint_saving = FSDPCheckpointSaving( checkpoint_path=directory, diff --git a/tests/checkpointing/test_fsdp_to_disc_checkpointing.py b/tests/checkpointing/test_fsdp_to_disc_checkpointing.py index 700267873..c044d44c9 100644 --- a/tests/checkpointing/test_fsdp_to_disc_checkpointing.py +++ b/tests/checkpointing/test_fsdp_to_disc_checkpointing.py @@ -111,9 +111,9 @@ def _clone_parameters(fsdp_wrapped_model): def _generate_batch(gpt2_model_config: Dict): # prepare input and targets data = torch.randint( - 0, - gpt2_model_config["model"]["config"]["vocab_size"], - (8, gpt2_model_config["model"]["config"]["block_size"] + 1), + 0, # lowest token_id + gpt2_model_config["model"]["config"]["vocab_size"], # highest token_id, i.e, vocab_size + (8, gpt2_model_config["model"]["config"]["sequence_length"] + 1), # (batch_size, sequence_length + 1) ).cuda() batch_input_ids_dict = {gpt2_model_config["model"]["config"]["sample_key"]: data[:, :-1]} batch_target_ids = data[:, 1:] @@ -198,9 +198,9 @@ def test_save_checkpoint_after_backward_pass( experiment_id = "0" num_train_steps_done = 1 - context_size = gpt2_model_config_dict["model"]["config"]["block_size"] + sequence_length = gpt2_model_config_dict["model"]["config"]["sequence_length"] get_num_tokens_from_num_steps_callable = NumberConversion.get_num_tokens_from_num_steps_callable( - num_ranks=2, local_micro_batch_size=4, context_size=context_size + num_ranks=2, local_micro_batch_size=4, sequence_length=sequence_length ) checkpoint_saving = FSDPCheckpointSaving( checkpoint_path=temporary_checkpoint_folder_path, diff --git a/tests/dataloader/yaml_configs/skipped_dataloader.yaml b/tests/dataloader/yaml_configs/skipped_dataloader.yaml index 726c1d088..40f229abc 100644 --- a/tests/dataloader/yaml_configs/skipped_dataloader.yaml +++ b/tests/dataloader/yaml_configs/skipped_dataloader.yaml @@ -31,7 +31,7 @@ skip_num_batches: config: num_ranks: ${settings.cuda_env.world_size} global_num_tokens: ${settings.training.global_num_seen_tokens} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} train_dataloader: component_key: data_loader diff --git a/tests/end2end_tests/gpt2_train_num_steps_8.yaml b/tests/end2end_tests/gpt2_train_num_steps_8.yaml index 654a852dc..2b55a73bc 100644 --- a/tests/end2end_tests/gpt2_train_num_steps_8.yaml +++ b/tests/end2end_tests/gpt2_train_num_steps_8.yaml @@ -48,7 +48,7 @@ train_dataloader: config: num_ranks: ${settings.cuda_env.world_size} global_num_tokens: ${settings.training.global_num_seen_tokens} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} dataset: instance_key: train_dataset pass_type: BY_REFERENCE @@ -96,7 +96,7 @@ checkpoint_saving: config: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} # resolving class types via different enums sucks... @@ -125,7 +125,7 @@ model: config: sample_key: ${settings.referencing_keys.sample_key} poe_type: NOPE - block_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} prediction_key: ${loss_fn.config.prediction_key} vocab_size: 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: 2 @@ -213,7 +213,7 @@ batch_progress_subscriber: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} global_num_tokens: ${settings.training.global_num_seen_tokens} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} gradient_acc_steps: ${settings.training.gradient_acc_steps} train_dataloader: instance_key: train_dataloader diff --git a/tests/end2end_tests/gpt2_warm_start_from_step_4.yaml b/tests/end2end_tests/gpt2_warm_start_from_step_4.yaml index 78503b6b2..bfcf78c52 100644 --- a/tests/end2end_tests/gpt2_warm_start_from_step_4.yaml +++ b/tests/end2end_tests/gpt2_warm_start_from_step_4.yaml @@ -48,7 +48,7 @@ train_dataloader: config: num_ranks: ${settings.cuda_env.world_size} global_num_tokens: ${settings.training.global_num_seen_tokens} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} dataset: instance_key: train_dataset pass_type: BY_REFERENCE @@ -105,7 +105,7 @@ checkpoint_saving: config: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} # resolving class types via different enums sucks... loss_fn: @@ -133,7 +133,7 @@ model: config: sample_key: ${settings.referencing_keys.sample_key} poe_type: NOPE - block_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} prediction_key: ${loss_fn.config.prediction_key} vocab_size: 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: 2 @@ -236,7 +236,7 @@ batch_progress_subscriber: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} global_num_tokens: ${settings.training.global_num_seen_tokens} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} gradient_acc_steps: ${settings.training.gradient_acc_steps} train_dataloader: instance_key: train_dataloader diff --git a/tests/test_yaml_configs/config_lorem_ipsum.yaml b/tests/test_yaml_configs/config_lorem_ipsum.yaml index 5b8781bcd..f324da51b 100644 --- a/tests/test_yaml_configs/config_lorem_ipsum.yaml +++ b/tests/test_yaml_configs/config_lorem_ipsum.yaml @@ -156,7 +156,7 @@ checkpoint_saving: config: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} # resolving class types via different enums sucks... loss_fn: @@ -184,7 +184,7 @@ model: config: sample_key: ${settings.referencing_keys.sample_key} poe_type: NOPE - block_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} prediction_key: ${loss_fn.config.prediction_key} vocab_size: 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: 2 @@ -277,7 +277,7 @@ batch_progress_subscriber: num_ranks: ${settings.cuda_env.world_size} local_micro_batch_size: ${settings.training.local_train_micro_batch_size} global_num_tokens: ${settings.training.global_num_seen_tokens} - context_size: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} gradient_acc_steps: ${settings.training.gradient_acc_steps} train_dataloader: instance_key: train_dataloader diff --git a/tests/test_yaml_configs/gpt2_config_optimizer.yaml b/tests/test_yaml_configs/gpt2_config_optimizer.yaml index ffd838348..7cb01f4a9 100644 --- a/tests/test_yaml_configs/gpt2_config_optimizer.yaml +++ b/tests/test_yaml_configs/gpt2_config_optimizer.yaml @@ -5,7 +5,7 @@ model: sample_key: "input_ids" poe_type: ABSOLUTE prediction_key: "logits" - block_size: 2048 + sequence_length: 2048 vocab_size: 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: 12 n_head_q: 12 diff --git a/tests/utils/test_number_conversion.py b/tests/utils/test_number_conversion.py index 0a961bba1..0b3ce1ada 100644 --- a/tests/utils/test_number_conversion.py +++ b/tests/utils/test_number_conversion.py @@ -12,14 +12,15 @@ def test_get_local_num_batches_from_num_samples(num_ranks: int, global_num_sampl @pytest.mark.parametrize( - "num_ranks,global_num_tokens,context_size,expected", + "num_ranks,global_num_tokens,sequence_length,expected", [(2, 10, 2, 2), (2, 11, 2, 2), (2, 12, 2, 3)], ) def test_get_local_num_batches_from_num_tokens( - num_ranks: int, global_num_tokens: int, context_size: int, expected: int + num_ranks: int, global_num_tokens: int, sequence_length: int, expected: int ): assert ( - NumberConversion.get_local_num_batches_from_num_tokens(num_ranks, global_num_tokens, context_size) == expected + NumberConversion.get_local_num_batches_from_num_tokens(num_ranks, global_num_tokens, sequence_length) + == expected ) @@ -37,32 +38,32 @@ def test_get_num_steps_from_num_samples( @pytest.mark.parametrize( - "num_ranks,local_micro_batch_size,global_num_tokens,context_size,expected", + "num_ranks,local_micro_batch_size,global_num_tokens,sequence_length,expected", [(2, 2, 20, 2, 2), (2, 2, 21, 2, 2), (2, 2, 22, 2, 2), (2, 2, 24, 2, 3)], ) def test_get_num_steps_from_num_tokens( - num_ranks: int, local_micro_batch_size: int, global_num_tokens: int, context_size: int, expected: int + num_ranks: int, local_micro_batch_size: int, global_num_tokens: int, sequence_length: int, expected: int ): assert ( NumberConversion.get_num_steps_from_num_tokens( - num_ranks, local_micro_batch_size, global_num_tokens, context_size + num_ranks, local_micro_batch_size, global_num_tokens, sequence_length ) == expected ) @pytest.mark.parametrize( - "num_ranks,local_micro_batch_size,context_size,num_steps_done,expected", + "num_ranks,local_micro_batch_size,sequence_length,num_steps_done,expected", [ (2, 2, 2, 2, 16), (2, 2, 2, 3, 24), ], ) def test_get_num_tokens_from_num_steps_callable( - num_ranks: int, local_micro_batch_size: int, context_size: int, num_steps_done: int, expected: int + num_ranks: int, local_micro_batch_size: int, sequence_length: int, num_steps_done: int, expected: int ): assert ( - NumberConversion.get_num_tokens_from_num_steps_callable(num_ranks, local_micro_batch_size, context_size)( + NumberConversion.get_num_tokens_from_num_steps_callable(num_ranks, local_micro_batch_size, sequence_length)( num_steps_done ) == expected From 2ded83d5a7dfe0f2a4da865dfb79302a532312d3 Mon Sep 17 00:00:00 2001 From: Max Luebbering Date: Thu, 20 Jun 2024 13:37:17 +0200 Subject: [PATCH 04/18] refactor: the last token from a block (i.e., last target token) is used as the first token (i.e., first input token) of the subsequent block --- src/modalities/config/config.py | 4 +- src/modalities/dataloader/dataset.py | 17 +++- tests/dataloader/test_packed_dataset.py | 100 ++++++++++++++++++++---- 3 files changed, 102 insertions(+), 19 deletions(-) diff --git a/src/modalities/config/config.py b/src/modalities/config/config.py index 41b35a072..601adcaaf 100644 --- a/src/modalities/config/config.py +++ b/src/modalities/config/config.py @@ -259,13 +259,13 @@ class MemMapDatasetConfig(BaseModel): class PackedMemMapDatasetContinuousConfig(BaseModel): raw_data_path: Path - model_sequence_length: Annotated[int, Field(strict=True, gt=0)] + model_sequence_length: Annotated[int, Field(strict=True, gt=1)] sample_key: str class PackedMemMapDatasetMegatronConfig(BaseModel): raw_data_path: Path - block_size: Annotated[int, Field(strict=True, gt=0)] + block_size: Annotated[int, Field(strict=True, gt=1)] sample_key: str diff --git a/src/modalities/dataloader/dataset.py b/src/modalities/dataloader/dataset.py index 1d9518a34..90de616cd 100644 --- a/src/modalities/dataloader/dataset.py +++ b/src/modalities/dataloader/dataset.py @@ -173,8 +173,21 @@ class PackedMemMapDatasetContinuous(PackedMemMapDatasetBase): def _generate_packing_index(self) -> List[Tuple[int, int]]: # get number of total tokens in file total_tokens = self._embedded_stream_data.data_len // self._token_size_in_bytes - num_samples = total_tokens // self.block_size - return [(i * self.block_size * self._token_size_in_bytes, self.block_size) for i in range(num_samples)] + if total_tokens < self.block_size: + raise ValueError( + f"Block size ({self.block_size}) is larger than the" + "total number of tokens in the dataset ({total_tokens})." + ) + if self.block_size < 2: + raise ValueError("Block size must be at least 2.") + # Given a fixed number of samples we can compute the total number of tokens as + # num_tokens = block_size + (block_size-1) * (num_samples-1) + # as the first sample always needs block_size many tokens and the following samples + # each need block_size-1 many tokens (since we can reuse the last target token as the first input token + # of the subsequent sample). + num_samples = (total_tokens - self.block_size) // (self.block_size - 1) + 1 + # given num_samples we calculate the starting index and length of each sample as tuple. + return [((i * self.block_size - i) * self._token_size_in_bytes, self.block_size) for i in range(num_samples)] class PackedMemMapDatasetMegatron(PackedMemMapDatasetBase): diff --git a/tests/dataloader/test_packed_dataset.py b/tests/dataloader/test_packed_dataset.py index 7a9f791ad..ffc26f7a5 100644 --- a/tests/dataloader/test_packed_dataset.py +++ b/tests/dataloader/test_packed_dataset.py @@ -17,17 +17,60 @@ def test_packed_megatron_dataset_loading(dummy_packed_data_path, block_size, exp @pytest.mark.parametrize( "block_size, expected_length, expected_output", [ - (1, 20, [[i] for i in range(20)]), - (2, 10, [[2 * i, 2 * i + 1] for i in range(10)]), - (3, 6, [[3 * i, 3 * i + 1, 3 * i + 2] for i in range(6)]), - (10, 2, [list(range(10)), list(range(10, 20))]), - (6, 3, [list(range(i * 6, i * 6 + 6)) for i in range(3)]), + ( + 2, + 19, + [ + [0, 1], + [1, 2], + [2, 3], + [3, 4], + [4, 5], + [5, 6], + [6, 7], + [7, 8], + [8, 9], + [9, 10], + [10, 11], + [11, 12], + [12, 13], + [13, 14], + [14, 15], + [15, 16], + [16, 17], + [17, 18], + [18, 19], + ], + ), + ( + 3, + 9, + [ + [0, 1, 2], + [2, 3, 4], + [4, 5, 6], + [6, 7, 8], + [8, 9, 10], + [10, 11, 12], + [12, 13, 14], + [14, 15, 16], + [16, 17, 18], + ], + ), + (10, 2, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [9, 10, 11, 12, 13, 14, 15, 16, 17, 18]]), + (6, 3, [[0, 1, 2, 3, 4, 5], [5, 6, 7, 8, 9, 10], [10, 11, 12, 13, 14, 15]]), (20, 1, [list(range(20))]), - (25, 0, []), + (21, 0, ValueError), + (1, 0, ValueError), ], ) def test_packed_continuous_dataset_loading(dummy_packed_data_path, block_size, expected_length, expected_output): - ds = PackedMemMapDatasetContinuous(dummy_packed_data_path, block_size, sample_key="input_ids") + try: + ds = PackedMemMapDatasetContinuous(dummy_packed_data_path, block_size, sample_key="input_ids") + except ValueError: + assert expected_output == ValueError + return + assert len(ds) == expected_length retrieved_input_ids = [list(packed_samples["input_ids"]) for packed_samples in ds] assert retrieved_input_ids == expected_output @@ -71,12 +114,25 @@ def test_create_packed_dataset(indexed_dummy_data_path_long, wrapped_gpt2_tokeni eod_token_id = wrapped_gpt2_tokenizer.get_token_id("<|endoftext|>") # we flatten the list of tokenized documents and add the eod token at the end of each document jsonl_tokenized_flat = [token_id for doc in jsonl_tokenized for token_id in doc + [eod_token_id]] - # we make sure that the length of the flattened tokenized jsonl file is a multiple of the block size - # as the packed dataset also cuts off partially packed samples at the end. - jsonl_tokenized_flat = jsonl_tokenized_flat[: len(jsonl_tokenized_flat) // block_size * block_size] - # flatten the tokens from the packed dataset - packed_dataset_tokens_flat = [j for i in iter(packed_dataset) for j in i["input_ids"].tolist()] + # we calculate the number of samples in the jsonl file given the block size + # the formula takes into account that that from the second sample onwards the + # last token (i.e., last target token) is reused as the first input token from the next sample + num_samples = (len(jsonl_tokenized_flat) - block_size) // (block_size - 1) + 1 + # the first sample has a length of block_size and the subsequent one of block_size-1 + num_tokens = block_size + (block_size - 1) * (num_samples - 1) + jsonl_tokenized_flat = jsonl_tokenized_flat[:num_tokens] + + # flatten the tokens from the packed dataset to reproduce the tokenized jsonl file + packed_dataset_tokens_flat = [] + for block_id, block in enumerate(iter(packed_dataset)): + if block_id > 0: + # we remove the first token from each block as it is a + # reused token from the previous block + tokens = block["input_ids"].tolist()[1:] + packed_dataset_tokens_flat += tokens + else: + packed_dataset_tokens_flat += block["input_ids"].tolist() # compare the flattened tokens from the packed dataset with the manually tokenized jsonl file assert packed_dataset_tokens_flat == jsonl_tokenized_flat @@ -111,9 +167,23 @@ def test_join_packed_datasets(dummy_packed_data_path, tmpdir): original_datasets = [ PackedMemMapDatasetContinuous(p, block_size=2, sample_key="whatever") for p in packed_data_clones ] - assert [v for batch in loaded_dataset for v in batch["whatever"]] == [ - v for ds in original_datasets for batch in ds for v in batch["whatever"] - ] + + original_datasets_concatenated = [] + for ds_id, ds in enumerate(original_datasets): + for batch_id, batch in enumerate(ds): + if ds_id > 0 and batch_id == 0: + # we add the batch that was missing from the transition from one dataset to the next + # NOTE: this test only works with block_size=2! + original_datasets_concatenated += [ + original_datasets_concatenated[-1], + batch["whatever"].flatten().tolist()[0], + ] + + original_datasets_concatenated += batch["whatever"].flatten().tolist() + + print([batch["whatever"].tolist()]) + loaded_dataset_flattened = [v for batch in loaded_dataset for v in batch["whatever"]] + assert loaded_dataset_flattened == original_datasets_concatenated @pytest.mark.parametrize("token_size_in_bytes", [1, 2, 4]) From 4b88bfdcc0a336e69640d88e53bde06aec9756e0 Mon Sep 17 00:00:00 2001 From: Max Luebbering Date: Thu, 20 Jun 2024 13:53:12 +0200 Subject: [PATCH 05/18] refactor: renamed all model_sequence_length with sequence_length --- config_files/training/config_lorem_ipsum.yaml | 2 +- examples/getting_started/example_config.yaml | 4 ++-- examples/library_usage/config_lorem_ipsum.yaml | 2 +- src/modalities/config/config.py | 4 ++-- src/modalities/dataloader/dataset_factory.py | 12 ++++++------ .../dataloader/yaml_configs/skipped_dataloader.yaml | 2 +- tests/end2end_tests/gpt2_train_num_steps_8.yaml | 2 +- tests/end2end_tests/gpt2_warm_start_from_step_4.yaml | 2 +- tests/test_yaml_configs/config_lorem_ipsum.yaml | 2 +- 9 files changed, 16 insertions(+), 16 deletions(-) diff --git a/config_files/training/config_lorem_ipsum.yaml b/config_files/training/config_lorem_ipsum.yaml index afd1c8729..62834a6f4 100644 --- a/config_files/training/config_lorem_ipsum.yaml +++ b/config_files/training/config_lorem_ipsum.yaml @@ -32,7 +32,7 @@ train_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: ./data/lorem_ipsum.pbin - model_sequence_length: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} train_dataloader: diff --git a/examples/getting_started/example_config.yaml b/examples/getting_started/example_config.yaml index 185a83f50..8629b1430 100644 --- a/examples/getting_started/example_config.yaml +++ b/examples/getting_started/example_config.yaml @@ -34,7 +34,7 @@ train_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: ./data/mem_map/redpajama_v2_samples_512_train.pbin - model_sequence_length: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} train_dataloader: @@ -73,7 +73,7 @@ val_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: ./data/mem_map/redpajama_v2_samples_512_test.pbin - model_sequence_length: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} val_dataloader: diff --git a/examples/library_usage/config_lorem_ipsum.yaml b/examples/library_usage/config_lorem_ipsum.yaml index 38a9aaed3..e702316c9 100644 --- a/examples/library_usage/config_lorem_ipsum.yaml +++ b/examples/library_usage/config_lorem_ipsum.yaml @@ -38,7 +38,7 @@ train_dataset: config: raw_data_path: ../../data/lorem_ipsum.jsonl index_path: ../../data/lorem_ipsum.idx - model_sequence_length: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} jq_pattern: ".text" sample_key: ${settings.referencing_keys.sample_key} tokenizer: diff --git a/src/modalities/config/config.py b/src/modalities/config/config.py index 601adcaaf..d3181e1c4 100644 --- a/src/modalities/config/config.py +++ b/src/modalities/config/config.py @@ -251,7 +251,7 @@ class DistributedSamplerConfig(BaseModel): class MemMapDatasetConfig(BaseModel): raw_data_path: FilePath index_path: Optional[FilePath] = None - model_sequence_length: Annotated[int, Field(strict=True, gt=0)] + sequence_length: Annotated[int, Field(strict=True, gt=0)] tokenizer: PydanticTokenizerIFType jq_pattern: str sample_key: str @@ -259,7 +259,7 @@ class MemMapDatasetConfig(BaseModel): class PackedMemMapDatasetContinuousConfig(BaseModel): raw_data_path: Path - model_sequence_length: Annotated[int, Field(strict=True, gt=1)] + sequence_length: Annotated[int, Field(strict=True, gt=1)] sample_key: str diff --git a/src/modalities/dataloader/dataset_factory.py b/src/modalities/dataloader/dataset_factory.py index bce751478..990ffb227 100644 --- a/src/modalities/dataloader/dataset_factory.py +++ b/src/modalities/dataloader/dataset_factory.py @@ -40,7 +40,7 @@ def get_dummy_dataset(num_samples: int, sample_definition: Tuple[DummySampleConf @staticmethod def get_mem_map_dataset( raw_data_path: Path, - model_sequence_length: int, + sequence_length: int, tokenizer: PreTrainedTokenizer, sample_key: str, index_path: Optional[Path] = None, @@ -48,7 +48,7 @@ def get_mem_map_dataset( ) -> MemMapDataset: dataset = MemMapDataset( raw_data_path=raw_data_path, - block_size=model_sequence_length + 1, + block_size=sequence_length + 1, tokenizer=tokenizer, sample_key=sample_key, index_path=index_path, @@ -58,19 +58,19 @@ def get_mem_map_dataset( @staticmethod def get_packed_mem_map_dataset_continuous( - raw_data_path: Path, model_sequence_length: int, sample_key: str + raw_data_path: Path, sequence_length: int, sample_key: str ) -> PackedMemMapDatasetContinuous: dataset = PackedMemMapDatasetContinuous( - raw_data_path=raw_data_path, block_size=model_sequence_length + 1, sample_key=sample_key + raw_data_path=raw_data_path, block_size=sequence_length + 1, sample_key=sample_key ) return dataset @staticmethod def get_packed_mem_map_dataset_megatron( - raw_data_path: Path, model_sequence_length: int, sample_key: str + raw_data_path: Path, sequence_length: int, sample_key: str ) -> PackedMemMapDatasetMegatron: dataset = PackedMemMapDatasetMegatron( - raw_data_path=raw_data_path, block_size=model_sequence_length + 1, sample_key=sample_key + raw_data_path=raw_data_path, block_size=sequence_length + 1, sample_key=sample_key ) return dataset diff --git a/tests/dataloader/yaml_configs/skipped_dataloader.yaml b/tests/dataloader/yaml_configs/skipped_dataloader.yaml index 40f229abc..02d28ae51 100644 --- a/tests/dataloader/yaml_configs/skipped_dataloader.yaml +++ b/tests/dataloader/yaml_configs/skipped_dataloader.yaml @@ -22,7 +22,7 @@ train_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: ./data/lorem_ipsum.pbin - model_sequence_length: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} skip_num_batches: diff --git a/tests/end2end_tests/gpt2_train_num_steps_8.yaml b/tests/end2end_tests/gpt2_train_num_steps_8.yaml index 2b55a73bc..97e8362b7 100644 --- a/tests/end2end_tests/gpt2_train_num_steps_8.yaml +++ b/tests/end2end_tests/gpt2_train_num_steps_8.yaml @@ -31,7 +31,7 @@ train_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: lorem_ipsum.pbin - model_sequence_length: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} train_dataloader: diff --git a/tests/end2end_tests/gpt2_warm_start_from_step_4.yaml b/tests/end2end_tests/gpt2_warm_start_from_step_4.yaml index bfcf78c52..4f72a89c8 100644 --- a/tests/end2end_tests/gpt2_warm_start_from_step_4.yaml +++ b/tests/end2end_tests/gpt2_warm_start_from_step_4.yaml @@ -31,7 +31,7 @@ train_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: lorem_ipsum.pbin - model_sequence_length: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} train_dataloader: diff --git a/tests/test_yaml_configs/config_lorem_ipsum.yaml b/tests/test_yaml_configs/config_lorem_ipsum.yaml index f324da51b..a77dbc28c 100644 --- a/tests/test_yaml_configs/config_lorem_ipsum.yaml +++ b/tests/test_yaml_configs/config_lorem_ipsum.yaml @@ -32,7 +32,7 @@ train_dataset: variant_key: packed_mem_map_dataset_continuous config: raw_data_path: ./data/lorem_ipsum.pbin - model_sequence_length: ${settings.training.sequence_length} + sequence_length: ${settings.training.sequence_length} sample_key: ${settings.referencing_keys.sample_key} train_dataloader: From 1772f4774230e19c990dca7abf4adc90d95aecb6 Mon Sep 17 00:00:00 2001 From: Max Luebbering Date: Tue, 25 Jun 2024 14:58:53 +0200 Subject: [PATCH 06/18] fix: we use the correct byte-based indexation now --- .../dataloader/create_packed_data.py | 12 +++-- src/modalities/dataloader/dataset.py | 44 +++++++++++++------ 2 files changed, 39 insertions(+), 17 deletions(-) diff --git a/src/modalities/dataloader/create_packed_data.py b/src/modalities/dataloader/create_packed_data.py index e31572034..80c9dfc47 100644 --- a/src/modalities/dataloader/create_packed_data.py +++ b/src/modalities/dataloader/create_packed_data.py @@ -161,7 +161,10 @@ def _write_batch( EmbeddedStreamData.TOKEN_SIZE_DESCRIPTOR_LENGTH_IN_BYTES, byteorder="little" ) ) - curr_offset = EmbeddedStreamData.HEADER_SIZE_IN_BYTES + # The offset only applies to the data section, not the header + # When we load the file, we addtionally add the header size + # to the offset + curr_offset = 0 # write data section (tokens) pbar = tqdm(total=len(self._reader), desc="Processed batches") @@ -229,8 +232,7 @@ def _process_thread(self, process_id: int): ) def _update_data_length_in_pre_allocated_header(self, dst_path: Path, index_list: List[Tuple[int, int]]): - start_of_index_in_bytes = index_list[-1][0] + index_list[-1][1] - length_of_byte_encoded_data_section = start_of_index_in_bytes - EmbeddedStreamData.HEADER_SIZE_IN_BYTES + length_of_byte_encoded_data_section = index_list[-1][0] + index_list[-1][1] data_section_length_in_bytes = length_of_byte_encoded_data_section.to_bytes( EmbeddedStreamData.DATA_SECTION_LENGTH_IN_BYTES, byteorder="little" ) @@ -277,7 +279,9 @@ def __init__(self, data_path: Path): # get index f.seek(self.HEADER_SIZE_IN_BYTES + self.data_len) pkl_encoded_index = f.read() - self.index_base = pickle.loads(pkl_encoded_index) + # contains the start offset and length of each segment + # as byte positions in the data section + self.index_base: List[Tuple[int, int]] = pickle.loads(pkl_encoded_index) # initialize memmapped data section self.data = np.memmap(self._data_path, mode="r", offset=self.HEADER_SIZE_IN_BYTES, shape=(self.data_len,)) diff --git a/src/modalities/dataloader/dataset.py b/src/modalities/dataloader/dataset.py index 90de616cd..53f0b2617 100644 --- a/src/modalities/dataloader/dataset.py +++ b/src/modalities/dataloader/dataset.py @@ -18,9 +18,8 @@ class Dataset(TorchdataSet): - def __init__(self, raw_data_path: Path, block_size: int, sample_key: str): + def __init__(self, raw_data_path: Path, sample_key: str): self.raw_data_path = raw_data_path - self.block_size = block_size self.sample_key = sample_key def _check_if_inbounds(self, idx: int): @@ -51,7 +50,7 @@ def __init__(self, num_samples: int, sample_definition: Tuple[DummySampleConfig] :param sample_definition: A list of tuples defining the dataset output. Each touple contains the sample key, shape and data type. """ - super().__init__(raw_data_path=None, block_size=None, sample_key=None) + super().__init__(raw_data_path=None, sample_key=None) self.num_samples = num_samples self.sample_definition = sample_definition @@ -78,7 +77,6 @@ class MemMapDataset(Dataset): def __init__( self, raw_data_path: Path, - block_size: int, tokenizer: TokenizerWrapper, sample_key: str, index_path: Optional[Path] = None, @@ -88,7 +86,6 @@ def __init__( Pytorch Dataset with mmap support. :param raw_data_path: Path to a jsonl file, which holds text data - :param block_size: alias for max sequence length. The amount of tokens the model can handle. :param tokenizer: PretrainedTokenizer required to tokenize text data on the fly. :param jq_pattern: jq-pattern applied on every jsonl-entry. Results are afterwards tokenized and packed :param index_path: Path to an index file, which indicates the start character/byte position @@ -99,7 +96,7 @@ def __init__( TODO: If this setting should support multi-modal features using separately encoded inputs, this needs to get replaced with a list of sample keys! """ - super().__init__(raw_data_path=raw_data_path, block_size=block_size, sample_key=sample_key) + super().__init__(raw_data_path=raw_data_path, sample_key=sample_key) self.reader = LargeFileLinesReader(self.raw_data_path, index_path=index_path) self.jq_filter = jq.compile(jq_pattern) @@ -124,7 +121,7 @@ class PackedMemMapDatasetBase(Dataset): } type_converter_for_torch = {1: np.uint8, 2: np.int32, 4: np.int64} - def __init__(self, raw_data_path: Path, block_size: int, sample_key: str): + def __init__(self, raw_data_path: Path, sample_key: str): """ Base class for packed memmapped datasets. The underlying dataset file has the structure: | header | data | index | @@ -134,12 +131,11 @@ def __init__(self, raw_data_path: Path, block_size: int, sample_key: str): :param raw_data_path: Path to a packed binary file (*.pbin). Use `modalities data pack_encoded_data` to create one based on a jsonl-file. - :param block_size: alias for max sequence length. The amount of tokens the model can handle. :param sample_key: model-specific parameter to indicate where in the BatchEncoding the input_token_ids are. TODO: If this setting should support multi-modal features using separately encoded inputs, this needs to get replaced with a list of sample keys! """ - super().__init__(raw_data_path=raw_data_path, block_size=block_size, sample_key=sample_key) + super().__init__(raw_data_path=raw_data_path, sample_key=sample_key) self._embedded_stream_data = EmbeddedStreamData(raw_data_path) self._token_size_in_bytes = self._embedded_stream_data.token_size_in_bytes try: @@ -153,16 +149,27 @@ def __init__(self, raw_data_path: Path, block_size: int, sample_key: str): self._index = self._generate_packing_index() def _generate_packing_index(self) -> List[Tuple[int, int]]: - raise NotImplementedError + # index is a tuple of offset and length in bytes + return self._embedded_stream_data.index_base def __len__(self) -> int: return len(self._index) def __getitem__(self, idx: int) -> BatchEncoding: self._check_if_inbounds(idx) - offset, length = self._index[idx] + # offset and length in bytes + offset, length_in_bytes = self._index[idx] + if length_in_bytes % self._token_size_in_bytes != 0: + raise ValueError( + f"Length of the sample in bytes is not a multiple of {self._token_size_in_bytes}." + f"Offset in bytes: {offset}, Length in bytes: {length_in_bytes}" + ) + # numpy frombuffer takes the memmap object as the buffer + # and indices the data section with the given offset (in bytes) + # and length in indices of type self._token_dtype_on_disk + num_tokens = length_in_bytes // self._token_size_in_bytes tokens = np.frombuffer( - self._embedded_stream_data.data, dtype=self._token_dtype_on_disk, count=length, offset=offset + buffer=self._embedded_stream_data.data, dtype=self._token_dtype_on_disk, count=num_tokens, offset=offset ) # torch can't convert most uint-formats, therefore we infer regular int types tokens = tokens.astype(self._token_dtype_in_ram) @@ -170,6 +177,10 @@ def __getitem__(self, idx: int) -> BatchEncoding: class PackedMemMapDatasetContinuous(PackedMemMapDatasetBase): + def __init__(self, raw_data_path: Path, sample_key: str, block_size: int): + self.block_size = block_size + super().__init__(raw_data_path=raw_data_path, sample_key=sample_key) + def _generate_packing_index(self) -> List[Tuple[int, int]]: # get number of total tokens in file total_tokens = self._embedded_stream_data.data_len // self._token_size_in_bytes @@ -187,10 +198,17 @@ def _generate_packing_index(self) -> List[Tuple[int, int]]: # of the subsequent sample). num_samples = (total_tokens - self.block_size) // (self.block_size - 1) + 1 # given num_samples we calculate the starting index and length of each sample as tuple. - return [((i * self.block_size - i) * self._token_size_in_bytes, self.block_size) for i in range(num_samples)] + return [ + ((i * self.block_size - i) * self._token_size_in_bytes, self.block_size * self._token_size_in_bytes) + for i in range(num_samples) + ] class PackedMemMapDatasetMegatron(PackedMemMapDatasetBase): + def __init__(self, raw_data_path: Path, sample_key: str, block_size: int): + self.block_size = block_size + super().__init__(raw_data_path=raw_data_path, sample_key=sample_key) + def _generate_packing_index(self) -> List[Tuple[int, int]]: index = [] curr_offset = self.HEADER_SIZE_IN_BYTES From 3c6bfbc01506126a1170d537de9a062c8e5eb3cf Mon Sep 17 00:00:00 2001 From: Max Luebbering Date: Tue, 25 Jun 2024 14:59:35 +0200 Subject: [PATCH 07/18] test: added test test_original_samples_in_packed_dataset for testing the indexation of the original samples --- tests/dataloader/test_packed_dataset.py | 45 +++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/tests/dataloader/test_packed_dataset.py b/tests/dataloader/test_packed_dataset.py index ffc26f7a5..9c9882027 100644 --- a/tests/dataloader/test_packed_dataset.py +++ b/tests/dataloader/test_packed_dataset.py @@ -4,13 +4,19 @@ import pytest from modalities.dataloader.create_packed_data import EmbeddedStreamData, PackedDataGenerator, join_embedded_stream_data -from modalities.dataloader.dataset import PackedMemMapDatasetContinuous, PackedMemMapDatasetMegatron +from modalities.dataloader.dataset import ( + PackedMemMapDatasetBase, + PackedMemMapDatasetContinuous, + PackedMemMapDatasetMegatron, +) from modalities.models.gpt2.collator import GPT2LLMCollateFn @pytest.mark.parametrize("block_size, expected_length", [(1, 4), (2, 3), (3, 3), (10, 2), (6, 2), (20, 1), (25, 0)]) def test_packed_megatron_dataset_loading(dummy_packed_data_path, block_size, expected_length): - ds = PackedMemMapDatasetMegatron(dummy_packed_data_path, block_size, sample_key="input_ids") + ds = PackedMemMapDatasetMegatron( + raw_data_path=dummy_packed_data_path, block_size=block_size, sample_key="input_ids" + ) assert len(ds) == expected_length @@ -66,7 +72,9 @@ def test_packed_megatron_dataset_loading(dummy_packed_data_path, block_size, exp ) def test_packed_continuous_dataset_loading(dummy_packed_data_path, block_size, expected_length, expected_output): try: - ds = PackedMemMapDatasetContinuous(dummy_packed_data_path, block_size, sample_key="input_ids") + ds = PackedMemMapDatasetContinuous( + raw_data_path=dummy_packed_data_path, block_size=block_size, sample_key="input_ids" + ) except ValueError: assert expected_output == ValueError return @@ -202,3 +210,34 @@ def test_conversion_tokens_represented_as_unsigned_ints(tmpdir, token_size_in_by collator = GPT2LLMCollateFn(sample_key=sample_key, target_key="abc") for batch in zip(ds, ds): collator(list(batch)) + + +def test_original_samples_in_packed_dataset(indexed_dummy_data_path_long, wrapped_gpt2_tokenizer): + # In this test, we create a packed dataset from a long jsonl file + # and iterate over the packed dataset to check if the tokenization is correct. + # We do so by manually tokenizing the jsonl file and comparing the tokenized + # output with the packed dataset + packed_generator = PackedDataGenerator( + src_path=indexed_dummy_data_path_long.raw_data_path, + tokenizer=wrapped_gpt2_tokenizer, + number_of_processes=5, + eod_token="<|endoftext|>", + index_path=indexed_dummy_data_path_long.index_path, + jq_pattern=".text", + processing_batch_size=5, + raw_samples_queue_size=3, + processed_samples_queue_size=3, + ) + default_packed_dataset_path = packed_generator._default_destination_path() + assert not default_packed_dataset_path.is_file() + packed_generator.run() + packed_dataset = PackedMemMapDatasetBase(default_packed_dataset_path, sample_key="input_ids") + # read in the raw jsonl files for manual tokenization + with open(indexed_dummy_data_path_long.raw_data_path) as f: + jsonl_list = [json.loads(line)["text"] for line in f] + + eod_token_id = wrapped_gpt2_tokenizer.get_token_id("<|endoftext|>") + jsonl_tokenized = [wrapped_gpt2_tokenizer.tokenize(v) + [eod_token_id] for v in jsonl_list] + + for sample, original_sample in zip(packed_dataset, jsonl_tokenized): + assert sample["input_ids"].tolist() == original_sample From 3a45e52e7cd2955c4b1f3de46795cf9ab3b3b527 Mon Sep 17 00:00:00 2001 From: Max Luebbering Date: Tue, 25 Jun 2024 15:00:34 +0200 Subject: [PATCH 08/18] chore: updated getting started documentation regarding the byte-based indexation --- examples/getting_started/README.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/examples/getting_started/README.md b/examples/getting_started/README.md index 04bfee821..8b74dcd8d 100644 --- a/examples/getting_started/README.md +++ b/examples/getting_started/README.md @@ -130,10 +130,11 @@ contains the concatenated token ids for all documents. Index segment: ============== -The index contains a tuple for each document with the format (byte_offset, segment_length), -where the byte_offset specifies the byte position in the data segment for the start of the document and segment_length. -Therfore, the index segment would look like [(8, 100), (108, 302), (410, 803), ...]. The first sample starts at byte position 8 and -has a length of 100 bytes. The second sample therefore starts at byte position 108 and has a length of 284 bytes and so on. +The index contains a tuple for each document with the format (byte_offset, segment_byte_length), +where the byte_offset specifies the byte position in the data segment for the start of the document and segment_length +specifies the byte length of the document. +Therfore, the index segment would look like [(0, 100), (100, 302), (410, 803), ...]. The first sample starts at byte position 0 and +has a length of 100 bytes. The second sample therefore starts at byte position 100 and has a length of 202 bytes and so on. ``` We have implemented different packing strategies on top of the file format, each making sure that a batch is completely filled up with documents without any trailing padding in the sequences. From 0f284927004124ad2af6068949dafe7ef1079a88 Mon Sep 17 00:00:00 2001 From: Max Luebbering Date: Tue, 25 Jun 2024 15:04:15 +0200 Subject: [PATCH 09/18] fix: fixed index in dummy_packed_data_path of conftest --- tests/conftest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index 8dfc430f9..5c8ba9e5c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -37,7 +37,8 @@ def dummy_packed_data_path(tmpdir) -> Path: data += (len(tokens) * token_size_in_bytes).to_bytes(header_size_in_bytes, byteorder="little") data += token_size_in_bytes.to_bytes(4, byteorder="little") data += b"".join([t.to_bytes(token_size_in_bytes, byteorder="little") for t in tokens]) - index = [(4, 24), (28, 40), (68, 12), (80, 4)] # [(index,len), ...] -> in 4 bytes #lengths: 6,10,3,1 + # NOTE: so far none of the implemented pytests use this index though! + index = [(0, 24), (24, 40), (64, 12), (76, 4)] # [(index,len), ...] -> in 4 bytes #lengths: 6,10,3,1 data += pickle.dumps(index) dummy_packed_data_path = Path(tmpdir, "dummy.pbin") dummy_packed_data_path.write_bytes(data) From 378c59c4f68c5b4355763ef893c75a57e460f45f Mon Sep 17 00:00:00 2001 From: Max Luebbering Date: Tue, 25 Jun 2024 15:14:10 +0200 Subject: [PATCH 10/18] chore: updated readme inaccuracy --- examples/getting_started/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/getting_started/README.md b/examples/getting_started/README.md index 8b74dcd8d..a91e79c48 100644 --- a/examples/getting_started/README.md +++ b/examples/getting_started/README.md @@ -133,8 +133,8 @@ Index segment: The index contains a tuple for each document with the format (byte_offset, segment_byte_length), where the byte_offset specifies the byte position in the data segment for the start of the document and segment_length specifies the byte length of the document. -Therfore, the index segment would look like [(0, 100), (100, 302), (410, 803), ...]. The first sample starts at byte position 0 and -has a length of 100 bytes. The second sample therefore starts at byte position 100 and has a length of 202 bytes and so on. +Therfore, the index segment would look like [(0, 100), (100, 302), (402, 803), ...]. The first sample starts at byte position 0 and +has a length of 100 bytes. The second sample therefore starts at byte position 100 and has a length of 302 bytes and so on. ``` We have implemented different packing strategies on top of the file format, each making sure that a batch is completely filled up with documents without any trailing padding in the sequences. From cc7af6a58144ce2c6f2f111e0d56a7eec1cbdae4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Max=20L=C3=BCbbering?= <2804731+le1nux@users.noreply.github.com> Date: Tue, 25 Jun 2024 15:52:08 +0200 Subject: [PATCH 11/18] Update src/modalities/config/config.py Co-authored-by: Felix Stollenwerk --- src/modalities/config/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/modalities/config/config.py b/src/modalities/config/config.py index d3181e1c4..9a431cbd1 100644 --- a/src/modalities/config/config.py +++ b/src/modalities/config/config.py @@ -259,7 +259,7 @@ class MemMapDatasetConfig(BaseModel): class PackedMemMapDatasetContinuousConfig(BaseModel): raw_data_path: Path - sequence_length: Annotated[int, Field(strict=True, gt=1)] + sequence_length: Annotated[int, Field(strict=True, gt=0)] sample_key: str From e845c1066d17656457a468b0c580474a5bc7f05a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Max=20L=C3=BCbbering?= <2804731+le1nux@users.noreply.github.com> Date: Tue, 25 Jun 2024 15:52:36 +0200 Subject: [PATCH 12/18] Update src/modalities/models/gpt2/gpt2_model.py Co-authored-by: Felix Stollenwerk --- src/modalities/models/gpt2/gpt2_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/modalities/models/gpt2/gpt2_model.py b/src/modalities/models/gpt2/gpt2_model.py index f8ffe6b40..2bfab7589 100644 --- a/src/modalities/models/gpt2/gpt2_model.py +++ b/src/modalities/models/gpt2/gpt2_model.py @@ -517,7 +517,7 @@ def _init_weights(self, module: nn.Module, weight_init: WeightInitializationConf def forward_impl(self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: input_ids = inputs[self.sample_key] device = input_ids.device - b, t = input_ids.size() # batch size, sequence length + _, t = input_ids.size() # batch size, sequence length assert ( t <= self.sequence_length ), f"Cannot forward sequence of length {t}, block size is only {self.sequence_length}" From a9f416622ceb2d1c0136c76b39f424fda96de11a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Max=20L=C3=BCbbering?= <2804731+le1nux@users.noreply.github.com> Date: Tue, 25 Jun 2024 15:52:54 +0200 Subject: [PATCH 13/18] Update src/modalities/models/gpt2/gpt2_model.py Co-authored-by: Felix Stollenwerk --- src/modalities/models/gpt2/gpt2_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/modalities/models/gpt2/gpt2_model.py b/src/modalities/models/gpt2/gpt2_model.py index 2bfab7589..649681580 100644 --- a/src/modalities/models/gpt2/gpt2_model.py +++ b/src/modalities/models/gpt2/gpt2_model.py @@ -520,7 +520,7 @@ def forward_impl(self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tenso _, t = input_ids.size() # batch size, sequence length assert ( t <= self.sequence_length - ), f"Cannot forward sequence of length {t}, block size is only {self.sequence_length}" + ), f"Cannot forward sequence of length {t}, the model's maximum input sequence length is only {self.sequence_length}" # forward the GPT model itself tok_emb = self.transformer.wte(input_ids) # token embeddings of shape (b, t, n_embd) From 7185f7e81a7a3f798d5f9da61eb485b67314460e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Max=20L=C3=BCbbering?= <2804731+le1nux@users.noreply.github.com> Date: Tue, 25 Jun 2024 15:58:43 +0200 Subject: [PATCH 14/18] Update tests/checkpointing/test_fsdp_to_disc_checkpointing.py Co-authored-by: Felix Stollenwerk --- tests/checkpointing/test_fsdp_to_disc_checkpointing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/checkpointing/test_fsdp_to_disc_checkpointing.py b/tests/checkpointing/test_fsdp_to_disc_checkpointing.py index c044d44c9..563c4f424 100644 --- a/tests/checkpointing/test_fsdp_to_disc_checkpointing.py +++ b/tests/checkpointing/test_fsdp_to_disc_checkpointing.py @@ -112,7 +112,7 @@ def _generate_batch(gpt2_model_config: Dict): # prepare input and targets data = torch.randint( 0, # lowest token_id - gpt2_model_config["model"]["config"]["vocab_size"], # highest token_id, i.e, vocab_size + gpt2_model_config["model"]["config"]["vocab_size"], # highest token_id + 1, i.e. vocab_size (8, gpt2_model_config["model"]["config"]["sequence_length"] + 1), # (batch_size, sequence_length + 1) ).cuda() batch_input_ids_dict = {gpt2_model_config["model"]["config"]["sample_key"]: data[:, :-1]} From 138fa85d63ad30cb242335ebc0d4d73bdb4b2ff1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Max=20L=C3=BCbbering?= <2804731+le1nux@users.noreply.github.com> Date: Fri, 28 Jun 2024 10:24:03 +0200 Subject: [PATCH 15/18] Update src/modalities/dataloader/create_packed_data.py Co-authored-by: Felix Stollenwerk --- src/modalities/dataloader/create_packed_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/modalities/dataloader/create_packed_data.py b/src/modalities/dataloader/create_packed_data.py index 80c9dfc47..0687c88f1 100644 --- a/src/modalities/dataloader/create_packed_data.py +++ b/src/modalities/dataloader/create_packed_data.py @@ -162,7 +162,7 @@ def _write_batch( ) ) # The offset only applies to the data section, not the header - # When we load the file, we addtionally add the header size + # When we load the file, we add the header size to the offset # to the offset curr_offset = 0 From 455c26a96e1b19973d2398619519e8398d98fe3c Mon Sep 17 00:00:00 2001 From: Max Luebbering Date: Fri, 28 Jun 2024 10:30:31 +0200 Subject: [PATCH 16/18] chore: renamed offset to offset_in_bytes for consistency --- src/modalities/dataloader/dataset.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/modalities/dataloader/dataset.py b/src/modalities/dataloader/dataset.py index 53f0b2617..1730971f4 100644 --- a/src/modalities/dataloader/dataset.py +++ b/src/modalities/dataloader/dataset.py @@ -158,18 +158,21 @@ def __len__(self) -> int: def __getitem__(self, idx: int) -> BatchEncoding: self._check_if_inbounds(idx) # offset and length in bytes - offset, length_in_bytes = self._index[idx] + offset_in_bytes, length_in_bytes = self._index[idx] if length_in_bytes % self._token_size_in_bytes != 0: raise ValueError( f"Length of the sample in bytes is not a multiple of {self._token_size_in_bytes}." - f"Offset in bytes: {offset}, Length in bytes: {length_in_bytes}" + f"Offset in bytes: {offset_in_bytes}, Length in bytes: {length_in_bytes}" ) # numpy frombuffer takes the memmap object as the buffer # and indices the data section with the given offset (in bytes) # and length in indices of type self._token_dtype_on_disk num_tokens = length_in_bytes // self._token_size_in_bytes tokens = np.frombuffer( - buffer=self._embedded_stream_data.data, dtype=self._token_dtype_on_disk, count=num_tokens, offset=offset + buffer=self._embedded_stream_data.data, + dtype=self._token_dtype_on_disk, + count=num_tokens, + offset=offset_in_bytes, ) # torch can't convert most uint-formats, therefore we infer regular int types tokens = tokens.astype(self._token_dtype_in_ram) From 969f11ab8f033dcfc8aa3d4993910cb5e3793c44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Max=20L=C3=BCbbering?= <2804731+le1nux@users.noreply.github.com> Date: Fri, 28 Jun 2024 10:32:32 +0200 Subject: [PATCH 17/18] Update src/modalities/dataloader/create_packed_data.py Co-authored-by: Felix Stollenwerk --- src/modalities/dataloader/create_packed_data.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/modalities/dataloader/create_packed_data.py b/src/modalities/dataloader/create_packed_data.py index 0687c88f1..d71c5a3b3 100644 --- a/src/modalities/dataloader/create_packed_data.py +++ b/src/modalities/dataloader/create_packed_data.py @@ -163,7 +163,6 @@ def _write_batch( ) # The offset only applies to the data section, not the header # When we load the file, we add the header size to the offset - # to the offset curr_offset = 0 # write data section (tokens) From a8a0a1dbc6b1794f3e87cf6de3128b2b705b6610 Mon Sep 17 00:00:00 2001 From: mali-git Date: Sat, 29 Jun 2024 16:37:51 +0200 Subject: [PATCH 18/18] chore: add comments --- src/modalities/dataloader/create_index.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/modalities/dataloader/create_index.py b/src/modalities/dataloader/create_index.py index 1fc0d4d9b..656b69416 100644 --- a/src/modalities/dataloader/create_index.py +++ b/src/modalities/dataloader/create_index.py @@ -25,7 +25,9 @@ def __init__(self, src_file: Path, chunksize: int = 4096, drop_faulty_entries: b self.chunksize = chunksize self.drop_faulty_entries = drop_faulty_entries with self.src_file.open(mode="r") as fin: + # Move the cursor to the end of the file fin.seek(0, os.SEEK_END) + # Get number of characters in the file self._total_num_chars = fin.tell() self.num_chunks = self._total_num_chars // self.chunksize self._queue_of_raw_lines = queue.Queue()