Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 8 additions & 6 deletions tinker_cookbook/rl/metric_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from tinker_cookbook.rl.types import EnvGroupBuilder, RLDataset, TrajectoryGroup
from tinker_cookbook.utils.misc_utils import all_same, dict_mean
from tinker_cookbook.utils import logtree
from tinker_cookbook.completers import TokenCompleter


def _compute_by_group_metrics(trajectory_groups_P: List[TrajectoryGroup], good_thresh: float = 0.5):
Expand Down Expand Up @@ -107,17 +108,15 @@ def __init__(
self,
dataset: RLDataset,
max_tokens: int,
name: str | None = None,
name: str = "test",
num_groups_to_log: int = 4,
):
self.env_group_builders_P = dataset_to_env_group_builders(dataset)
self.max_tokens = max_tokens
self.name = name
self.num_groups_to_log = num_groups_to_log

async def __call__(self, sampling_client: tinker.SamplingClient) -> dict[str, float]:
policy = TinkerTokenCompleter(sampling_client, max_tokens=self.max_tokens)

async def eval_token_completer(self, policy: TokenCompleter) -> dict[str, float]:
async def run_group_rollout(builder, i):
enable_logging = i < self.num_groups_to_log
with logtree.optional_enable_logging(enable=enable_logging):
Expand All @@ -129,6 +128,9 @@ async def run_group_rollout(builder, i):
taglist_P = [builder.logging_tags() for builder in self.env_group_builders_P]
metrics = compute_trajectory_metrics(trajectory_groups_P, taglist_P)

if self.name is not None:
metrics = {f"{self.name}/{k}": v for k, v in metrics.items()}
metrics = {f"{self.name}/{k}": v for k, v in metrics.items()}
return metrics

async def __call__(self, sampling_client: tinker.SamplingClient) -> dict[str, float]:
policy = TinkerTokenCompleter(sampling_client, max_tokens=self.max_tokens)
return await self.eval_token_completer(policy)
2 changes: 1 addition & 1 deletion tinker_cookbook/rl/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ async def run_single_evaluation(evaluator, cfg, i_batch, sampling_client):
scope_name=f"Running evaluation {ev_name} {i_batch}",
):
eval_metrics = await evaluator(sampling_client)
return {f"test/{k}": v for k, v in eval_metrics.items()}
return eval_metrics


@scope
Expand Down
10 changes: 6 additions & 4 deletions tinker_cookbook/supervised/nll_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@


class NLLEvaluator(TrainingClientEvaluator):
def __init__(self, data: list[tinker.Datum]):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

actually wouldn't it be better to make the name default to "test", and remove the prefixing of "test" from the training loop?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah that's better -- I thought the original code meant to always want "test/" to be the prefix of the log name but that's indeed a bit cumbersome.

def __init__(self, data: list[tinker.Datum], name: str = "test"):
self.name = name
self.data = data

async def __call__(self, training_client: tinker.TrainingClient) -> dict[str, float]:
Expand All @@ -16,9 +17,10 @@ async def __call__(self, training_client: tinker.TrainingClient) -> dict[str, fl
logprobs = [x["logprobs"] for x in result.loss_fn_outputs]
weights = [datum.loss_fn_inputs["weights"] for datum in self.data]
nll = compute_mean_nll(logprobs, weights)
return {"nll": nll}
key = f"{self.name}/nll"
return {key: nll}

@classmethod
def from_dataset(cls, dataset: SupervisedDataset) -> "NLLEvaluator":
def from_dataset(cls, dataset: SupervisedDataset, name: str = "test") -> "NLLEvaluator":
all_data = list(itertools.chain(*[dataset.get_batch(i) for i in range(len(dataset))]))
return cls(all_data)
return cls(all_data, name=name)
2 changes: 1 addition & 1 deletion tinker_cookbook/supervised/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ async def run_evals(
raise ValueError(f"Unknown evaluator type: {type(evaluator)}")

# Add test/ prefix to all metrics
metrics.update({f"test/{k}": v for k, v in eval_metrics.items()})
metrics.update(eval_metrics)

return metrics

Expand Down