Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from functools import partial
import numpy as np
import torch
from peft import LoraConfig

import twinkle
Expand All @@ -12,6 +11,7 @@

logger = get_logger()
MODEL_ID = 'ms://Qwen/Qwen2.5-7B-Instruct'
DATASETS='ms://swift/self-cognition'

device_group = [
DeviceGroup(
Expand All @@ -30,79 +30,70 @@
)

twinkle.initialize(
mode="ray",
mode="local",
nproc_per_node=4,
groups=device_group,
global_device_mesh=device_mesh,
lazy_collect=False,
)

def eval(model):
dataloader = DataLoader(
dataset=partial(create_dataset, data_slice=range(100)),
batch_size=4,
device_mesh=device_mesh,
)
for _, batch in enumerate(dataloader):
model.forward_only(inputs=batch, adapter_name="default")
model.calculate_loss(adapter_name="default")
return model.calculate_metric(is_training=False, adapter_name="default")


def create_dataset(data_slice=None):
dataset = Dataset(
dataset_meta=DatasetMeta("ms://swift/self-cognition", data_slice=data_slice)
dataset_meta=DatasetMeta(DATASETS, data_slice=range(500))
)
dataset.set_template(
"Template",
model_id=MODEL_ID,
truncation_strategy="left",
max_length=64,
model_id=MODEL_ID
)
dataset.map(SelfCognitionProcessor("twinkle模型", "twinkle团队"))
dataset.encode(batched=True)
return dataset


def eval(model: TransformersModel):
dataloader = DataLoader(
dataset=partial(create_dataset, data_slice=range(20)),
batch_size=4,
drop_last=True,
device_mesh=device_mesh,
remote_group="default",
)
for step, batch in enumerate(dataloader):
model.forward_only(inputs=batch, adapter_name="default")
model.calculate_loss(adapter_name="default")
metrics = model.calculate_metric(is_training=False, adapter_name="default")
return metrics()


def train():
dataloader = DataLoader(
dataset=partial(create_dataset, data_slice=None),
batch_size=4,
batch_size=8,
device_mesh=device_mesh,
remote_group="default",
)

model = TransformersModel(
model_id=MODEL_ID,
device_mesh=device_mesh,
strategy="native_fsdp",
remote_group="default",
)

lora_config = LoraConfig(target_modules="all-linear")
model.add_adapter_to_model("default", lora_config, gradient_accumulation_steps=1)
model.set_optimizer("AdamW", lr=1e-4, adapter_name="default")
model.set_lr_scheduler(
scheduler_cls="CosineWarmupScheduler",
num_warmup_steps=5,
num_training_steps=len(dataloader),
adapter_name="default",
)

logger.info(model.get_train_configs(adapter_name="default"))
logger.info(f"Total steps: {len(dataloader)}")


loss_metric = 99.0
for step, batch in enumerate(dataloader):
if isinstance(batch, list) and len(batch) == 0:
continue
output = model.forward_backward(inputs=batch, adapter_name="default")
loss_value = output() if callable(output) else output
logger.info(f"step {step}, loss: {loss_value}")
model.forward_backward(inputs=batch, adapter_name="default")
model.clip_grad_and_step(adapter_name="default")
if step % 50 == 0 and step > 0:
metrics = eval(model)
logger.info(f"Current is step {step} of {len(dataloader)}, metric: {metrics}")
metrics["step"] = step
if loss_metric > metrics["loss"]:
model.save(f"checkpoint-{step}")
loss_metric = metrics["loss"]
if step % 20 == 0:
metric = model.calculate_metric(is_training=True, adapter_name="default")
logger.info(f"Current is step {step} of {len(dataloader)}, metric: {metric}")
model.save("last-checkpoint", interval=1)


if __name__ == "__main__":
train()
train()
10 changes: 10 additions & 0 deletions cookbook/transformers/sp_fsdp_dense.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/bin/bash
# To enabele sequence parallelism, please set ulysses_size > 1
# device_mesh = DeviceMesh(
# device_type="cuda",
# mesh=np.arange(4).reshape(2, 2),
# mesh_dim_names=("dp", "fsdp"),
# ulysses_size=2,
# )
#
CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun --nproc_per_node=4 sp_fsdp_dense.py
Loading
Loading