Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions docs/source/Instruction/GRPO.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ swift rlhf \
--num_train_epochs 1 \
--per_device_train_batch_size 2 \
--per_device_eval_batch_size 2 \
--learning_rate 2e-5 \
--learning_rate 2e-6 \
--gradient_accumulation_steps 8 \
--save_total_limit 2 \
--logging_steps 5 \
Expand All @@ -69,7 +69,7 @@ swift rlhf \
--num_train_epochs 1 \
--per_device_train_batch_size 2 \
--per_device_eval_batch_size 2 \
--learning_rate 2e-5 \
--learning_rate 2e-6 \
--gradient_accumulation_steps 8 \
--save_total_limit 2 \
--logging_steps 5 \
Expand Down
4 changes: 2 additions & 2 deletions docs/source_en/Instruction/GRPO.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ swift rlhf \
--num_train_epochs 1 \
--per_device_train_batch_size 2 \
--per_device_eval_batch_size 2 \
--learning_rate 2e-5 \
--learning_rate 2e-6 \
--gradient_accumulation_steps 16 \
--save_total_limit 2 \
--logging_steps 5 \
Expand All @@ -69,7 +69,7 @@ swift rlhf \
--num_train_epochs 1 \
--per_device_train_batch_size 2 \
--per_device_eval_batch_size 2 \
--learning_rate 2e-5 \
--learning_rate 2e-6 \
--gradient_accumulation_steps 16 \
--save_total_limit 2 \
--logging_steps 5 \
Expand Down
3 changes: 1 addition & 2 deletions examples/train/grpo/grpo.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ def run(self):
# dataset
dataset = ['AI-MO/NuminaMath-TIR'] # dataset_id or dataset_path
data_seed = 42
max_new_tokens = 512
split_dataset_ratio = 0.01 # Split validation set
num_proc = 4 # The number of processes for data loading.

Expand Down Expand Up @@ -80,7 +79,7 @@ def run(self):
reward_funcs=reward_funcs,
split_dataset_ratio=split_dataset_ratio,
output_dir=output_dir,
learning_rate=2e-5,
learning_rate=2e-6,
gradient_checkpointing=True,
weight_decay=0.1,
lr_scheduler_type='cosine',
Expand Down
2 changes: 1 addition & 1 deletion examples/train/grpo/grpo.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ swift rlhf \
--num_train_epochs 1 \
--per_device_train_batch_size 2 \
--per_device_eval_batch_size 2 \
--learning_rate 2e-5 \
--learning_rate 2e-6 \
--gradient_accumulation_steps 8 \
--save_total_limit 2 \
--logging_steps 5 \
Expand Down
7 changes: 3 additions & 4 deletions swift/trainers/rlhf_trainer/grpo_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Part of the implementation is borrowed from huggingface/trl.
import inspect
from collections import defaultdict
from typing import Any, Dict, List, Optional, Union
from typing import Any, Callable, Dict, List, Optional, Union
from unittest.mock import patch

import torch
Expand Down Expand Up @@ -33,8 +33,7 @@ def __init__(self,
model: Optional[Union[PreTrainedModel, nn.Module]] = None,
ref_model: Optional[Union[PreTrainedModel, nn.Module]] = None,
reward_model: Optional[Union[PreTrainedModel, nn.Module]] = None,
reward_funcs: List[str, callable] = None,
*_args,
reward_funcs: Optional[List[Union[str, Callable]]] = None * _args,
**kwargs):

args = kwargs['args']
Expand Down Expand Up @@ -239,7 +238,7 @@ def _prepare_inputs(self, inputs) -> Dict[str, Union[torch.Tensor, Any]]:
if isinstance(reward_func, nn.Module): # Module instead of PretrainedModel for compat with compiled models
reward_func_name = reward_func.config._name_or_path.split('/')[-1]
else:
if isinstance(reward_func, callable):
if callable(reward_func):
reward_func_name = reward_func.__name__ # function
else:
reward_func_name = reward_func.__class__.__name__ # object
Expand Down
Loading