Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion benchmarks/benchmark_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def list_experiment_configs(self):
"test": ["eval", "train"],
}

# Apply command line chocies.
# Apply command line choices.
if self._args.accelerator:
config_choices["accelerator"] = list(set(self._args.accelerator))
if self._args.xla:
Expand Down
17 changes: 10 additions & 7 deletions benchmarks/experiment_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,8 @@ def generate_and_run_all_configs(self):
if not self.model_loader.is_compatible(benchmark_model,
benchmark_experiment):
logger.warning("SKIP incompatible model and experiment configs.")
self._save_results(experiment_cfg, model_cfg, {"error": "SKIP"})
self._save_results(benchmark_experiment.to_dict(),
benchmark_model.to_dict(), {"error": "SKIP"})
continue

# Compose child process environment.
Expand Down Expand Up @@ -157,17 +158,21 @@ def generate_and_run_all_configs(self):
except subprocess.TimeoutExpired as e:
self._fwd_captured_stdout_stderr(e.stdout, e.stderr)
logger.error("TIMEOUT")
self._save_results(experiment_cfg, model_cfg, {"error": str(e)})
self._save_results(benchmark_experiment.to_dict(),
benchmark_model.to_dict(), {"error": str(e)})
except subprocess.CalledProcessError as e:
self._fwd_captured_stdout_stderr(e.stdout, e.stderr)
logger.error("ERROR in subprocess")
self._save_results(experiment_cfg, model_cfg, {"error": e.stderr})
self._save_results(benchmark_experiment.to_dict(),
benchmark_model.to_dict(), {"error": e.stderr})
except subprocess.SubprocessError as e:
logger.error("ERROR when launching child process")
self._save_results(experiment_cfg, model_cfg, {"error": str(e)})
self._save_results(benchmark_experiment.to_dict(),
benchmark_model.to_dict(), {"error": str(e)})
except ValueError as e:
logger.error(f"ERROR {e}")
self._save_results(experiment_cfg, model_cfg, {"error": str(e)})
self._save_results(benchmark_experiment.to_dict(),
benchmark_model.to_dict(), {"error": str(e)})

# TODO: Use `_unique_basename` instead.
def _get_config_fingerprint(self, experiment_config: OrderedDict,
Expand Down Expand Up @@ -210,8 +215,6 @@ def run_single_config(self):
accumulated_metrics[k] = []
accumulated_metrics[k].append(v)

# TODO: Use `experiment_config` and `model_config` when env vars are no
# longer included.
self._save_results(benchmark_experiment.to_dict(),
benchmark_model.to_dict(), accumulated_metrics)

Expand Down