Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Simplified preparation of pretraining datasets #1057

Draft
wants to merge 4 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 0 additions & 60 deletions litgpt/data/prepare_slimpajama.py

This file was deleted.

78 changes: 0 additions & 78 deletions litgpt/data/prepare_starcoder.py

This file was deleted.

99 changes: 87 additions & 12 deletions litgpt/data/tinyllama.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
import json
import os
import time
import traceback
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from typing import Union, Optional, Generator

from torch.utils.data import DataLoader

Expand All @@ -25,29 +29,48 @@ class TinyLlama(DataModule):
num_workers: int = 8
"""How many DataLoader processes to use for loading."""

tokenizer: Optional[Tokenizer] = field(init=False, repr=False, default=None)
batch_size: int = field(init=False, repr=False, default=1)
seq_length: int = field(init=False, repr=False, default=2048)

def __post_init__(self):
# Could be a remote path (s3://) or a local path
self.slimpajama_train = str(self.data_path).rstrip("/") + "/slimpajama/train"
self.slimpajama_val = str(self.data_path).rstrip("/") + "/slimpajama/val"
self.starcoder_train = str(self.data_path).rstrip("/") + "/starcoder"
self.slimpajama_train = os.path.join(str(self.data_path), "slimpajama", "train")
self.slimpajama_val = os.path.join(str(self.data_path), "slimpajama", "val")
self.starcoder_train = os.path.join(str(self.data_path), "starcoder")

def connect(
self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: Optional[int] = None
) -> None:
self.tokenizer = tokenizer
self.batch_size = batch_size
self.seq_length = max_seq_length + 1 # Increase by one because we need the next token as well
if max_seq_length:
self.seq_length = max_seq_length + 1 # Increase by one because we need the next token as well

def prepare_data(self) -> None:
for path in (self.slimpajama_train, self.slimpajama_val, self.starcoder_train):
if not path.startswith("s3://") and not Path(path).is_dir():
raise FileNotFoundError(
"The data path for TinyLlama is expected to be the directory containing these subdirectories:"
f" `slimpajama/train`, `slimpajama/val`, `starcoder`. The directory {path} does not exist."
" Set it via `--data.data_path=...`"
)
# for path in (self.slimpajama_train, self.slimpajama_val, self.starcoder_train):
# if not path.startswith("s3://") and not Path(path).is_dir():
# raise FileNotFoundError(
# "The data path for TinyLlama is expected to be the directory containing these subdirectories:"
# f" `slimpajama/train`, `slimpajama/val`, `starcoder`. The directory {path} does not exist."
# " Set it via `--data.data_path=...`"
# )

prepare_slimpajama(
input_dir=os.path.join(self.data_path, "slimpajama-raw/train"),
output_dir=self.slimpajama_train,
tokenizer=self.tokenizer,
)
prepare_slimpajama(
input_dir=os.path.join(self.data_path, "slimpajama-raw/validation"),
output_dir=self.slimpajama_val,
tokenizer=self.tokenizer,
)
prepare_starcoder(
input_dir=os.path.join(self.data_path, "starcoderdata-raw"),
output_dir=self.starcoder_train,
tokenizer=self.tokenizer,
)

def train_dataloader(self) -> DataLoader:
from litdata.streaming import CombinedStreamingDataset, StreamingDataLoader, StreamingDataset, TokensLoader
Expand Down Expand Up @@ -89,3 +112,55 @@ def val_dataloader(self) -> DataLoader:
val_dataset, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True
)
return val_dataloader


def prepare_slimpajama(input_dir: str, output_dir: str, tokenizer: Tokenizer) -> None:
from litdata import optimize
import zstandard as zstd

def process(filepath: str) -> Generator:
with zstd.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
for row in f:
text = json.loads(row)["text"]
if json.loads(row)["meta"]["redpajama_set_name"] == "RedPajamaGithub":
continue # exclude the GitHub data since it overlaps with starcoder
text_ids = tokenizer.encode(text, bos=False, eos=True)
yield text_ids

optimize(
fn=process,
inputs=[str(file) for file in Path(input_dir).rglob("*.zst")],
output_dir=output_dir,
chunk_bytes="100MB", # TODO: find a good value, chunk_size = (2049 * 16384),
num_workers=os.cpu_count(),
num_downloaders=1,
fast_dev_run=False,
)


def prepare_starcoder(input_dir: str, output_dir: str, tokenizer: Tokenizer) -> None:
from litdata import optimize
import pyarrow.parquet as pq

def process(filepath: str) -> Generator:
try:
parquet_file = pq.ParquetFile(filepath)
# Reduce RAM usage
for batch in parquet_file.iter_batches(batch_size=8192, columns=["content"]):
for text in batch.to_pandas()["content"]:
yield tokenizer.encode(text, bos=False, eos=True)
except:
print(traceback.format_exc())
print(f"Error reading {filepath}")
return
parquet_file.close()

optimize(
fn=process,
inputs=[str(file) for file in Path(input_dir).rglob("*.parquet")],
output_dir=output_dir,
chunk_bytes="100MB", # TODO: find a good value, chunk_size = (2049 * 8192),
num_workers=os.cpu_count(),
num_downloaders=1,
fast_dev_run=False,
)
28 changes: 28 additions & 0 deletions litgpt/scripts/prepare.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
from pathlib import Path
from typing import Optional

from lightning_utilities import is_overridden
from litgpt import Tokenizer
from litgpt.data import LitDataModule
from litgpt.utils import CLI


def prepare(
data: LitDataModule,
tokenizer_dir: Optional[Path],
max_seq_length: Optional[int] = None
) -> None:

if not is_overridden("prepare_data", data, LitDataModule):
raise ValueError(
f"The {type(data).__name__} data module does not support preparing the data in advance."
)

tokenizer = Tokenizer(tokenizer_dir)
data.connect(tokenizer=tokenizer, batch_size=1, max_seq_length=max_seq_length)
data.prepare_data()


if __name__ == "__main__":
CLI(prepare)
34 changes: 6 additions & 28 deletions tutorials/pretrain_tinyllama.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ In order to start pretraining litgpt on it, you need to read, tokenize, and writ
First, install additional dependencies for preprocessing:

```bash
pip install '.[all]'
pip install litgpt '.[all]'
```

You will need to have the tokenizer config available:
Expand All @@ -64,38 +64,16 @@ litgpt download \
--tokenizer_only true
```

Then, run the preprocessing script for each dataset and split.
You will require **1.1 TB** of disk space for Starcoder and **2.5** TB of space for the SlimPajama dataset.

**Starcoder:**

```bash
python litgpt/data/prepare_starcoder.py \
--input_dir data/starcoderdata-raw \
--output_dir data/starcoder \
--tokenizer_path checkpoints/meta-llama/Llama-2-7b-hf
```

**SlimPajama:**
Then, run the preprocessing command by pointing to the directory where the data was downloaded.
You will require and additional **1.1 TB** of disk space for Starcoder and **2.5** TB of space for the SlimPajama dataset.

```bash
python litgpt/data/prepare_slimpajama.py \
--input_dir data/slimpajama-raw/validation \
--output_dir data/slimpajama/val \
--tokenizer_path checkpoints/meta-llama/Llama-2-7b-hf

python litgpt/data/prepare_slimpajama.py \
--input_dir data/slimpajama-raw/test \
--output_dir data/slimpajama/test \
--tokenizer_path checkpoints/meta-llama/Llama-2-7b-hf

python litgpt/data/prepare_slimpajama.py \
--input_dir data/slimpajama-raw/train \
--output_dir data/slimpajama/train \
litgpt prepare \
--data TinyLlama \
--data.data_path data \
--tokenizer_path checkpoints/meta-llama/Llama-2-7b-hf
```

If you want to run on a small slice of the datasets first, pass the flag `--fast_dev_run=true` to the commands above.
In the above we are assuming that you will be using the same tokenizer as used in LlaMA/TinyLlama, but any trained [SentencePiece](https://github.com/google/sentencepiece) tokenizer with a 32000 vocabulary size will do here.

 
Expand Down
Loading