Skip to content

Commit

Permalink
Merge pull request #178 from kiudee/176_plot_elo
Browse files Browse the repository at this point in the history
Add a plot which shows the Elo estimate of the optima over time
  • Loading branch information
kiudee committed Feb 11, 2022
2 parents f5845fb + 2820487 commit 78ae62c
Show file tree
Hide file tree
Showing 5 changed files with 139 additions and 7 deletions.
2 changes: 2 additions & 0 deletions HISTORY.rst
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ Local tuner

- Add a plot which shows the optima predicted by the tuner across the
iterations (#172). This can be useful to gauge convergence.
- Add a plot which shows the estimated Elo (+ confidence interval) of the
predicted optima (#176).
- Tuner saves optima and their Elo performance (including standard deviation)
to disk now (#171).

Expand Down
3 changes: 2 additions & 1 deletion tune/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
reduce_ranges,
run_match,
)
from tune.plots import partial_dependence, plot_objective, plot_optima
from tune.plots import partial_dependence, plot_objective, plot_optima, plot_performance
from tune.priors import roundflat
from tune.utils import TimeControl, TimeControlBag, expected_ucb, parse_timecontrol

Expand All @@ -31,6 +31,7 @@
"partial_dependence",
"plot_objective",
"plot_optima",
"plot_performance",
"prob_to_elo",
"reduce_ranges",
"roundflat",
Expand Down
1 change: 1 addition & 0 deletions tune/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,6 +427,7 @@ def local( # noqa: C901
optimizer=opt,
result_object=result_object,
iterations=np.array(performance)[:, 0],
elos=np.array(performance)[:, 1:],
optima=np.array(optima),
plot_path=settings.get("plot_path", plot_path),
parameter_names=list(param_ranges.keys()),
Expand Down
22 changes: 17 additions & 5 deletions tune/local.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from skopt.space import Categorical, Dimension, Integer, Real, Space
from skopt.utils import normalize_dimensions

from tune.plots import plot_objective, plot_objective_1d, plot_optima
from tune.plots import plot_objective, plot_objective_1d, plot_optima, plot_performance
from tune.summary import confidence_intervals
from tune.utils import TimeControl, confidence_to_mult, expected_ucb

Expand Down Expand Up @@ -519,6 +519,7 @@ def plot_results(
optimizer: Optimizer,
result_object: OptimizeResult,
iterations: np.ndarray,
elos: np.ndarray,
optima: np.ndarray,
plot_path: str,
parameter_names: Sequence[str],
Expand All @@ -534,6 +535,8 @@ def plot_results(
Result object containing the data and the last fitted model.
iterations : np.ndarray
Array containing the iterations at which optima were collected.
elos : np.ndarray, shape=(n_iterations, 2)
Array containing the estimated Elo of the optima and the standard error.
optima : np.ndarray
Array containing the predicted optimal parameters.
plot_path : str
Expand Down Expand Up @@ -570,21 +573,30 @@ def plot_results(
fig.patch.set_facecolor(dark_gray)
plot_objective(result_object, dimensions=parameter_names, fig=fig, ax=ax)
plotpath = pathlib.Path(plot_path)
plotpath.mkdir(parents=True, exist_ok=True)
full_plotpath = plotpath / f"{timestr}-{len(optimizer.Xi)}.png"
for subdir in ["landscapes", "elo", "optima"]:
(plotpath / subdir).mkdir(parents=True, exist_ok=True)
full_plotpath = plotpath / f"landscapes/landscape-{timestr}-{len(optimizer.Xi)}.png"
dpi = 150 if optimizer.space.n_dims == 1 else 300
plt.savefig(full_plotpath, dpi=dpi, facecolor=dark_gray, **save_params)
logger.info(f"Saving a plot to {full_plotpath}.")
plt.close(fig)

# Now plot the history of optima:
# Plot the history of optima:
fig, ax = plot_optima(
iterations=iterations,
optima=optima,
space=optimizer.space,
parameter_names=parameter_names,
)
full_plotpath = plotpath / f"optima-{timestr}-{len(optimizer.Xi)}.png"
full_plotpath = plotpath / f"optima/optima-{timestr}-{len(optimizer.Xi)}.png"
fig.savefig(full_plotpath, dpi=150, facecolor=dark_gray)
plt.close(fig)

# Plot the predicted Elo performance of the optima:
fig, ax = plot_performance(
performance=np.hstack([iterations[:, None], elos]), confidence=confidence
)
full_plotpath = plotpath / f"elo/elo-{timestr}-{len(optimizer.Xi)}.png"
fig.savefig(full_plotpath, dpi=150, facecolor=dark_gray)
plt.close(fig)

Expand Down
118 changes: 117 additions & 1 deletion tune/plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,13 @@

from tune.utils import confidence_to_mult, expected_ucb

__all__ = ["partial_dependence", "plot_objective", "plot_objective_1d", "plot_optima"]
__all__ = [
"partial_dependence",
"plot_objective",
"plot_objective_1d",
"plot_optima",
"plot_performance",
]


def _evenly_sample(dim, n_points):
Expand Down Expand Up @@ -613,5 +619,115 @@ def plot_optima(

if parameter_names is not None:
a.set_ylabel(parameter_names[i])
return fig, ax


def plot_performance(
performance: np.ndarray,
confidence: float = 0.9,
plot_width: float = 8,
aspect_ratio: float = 0.7,
fig: Optional[Figure] = None,
ax: Optional[Axes] = None,
colors: Optional[Sequence[Union[tuple, str]]] = None,
) -> Tuple[Figure, np.ndarray]:
"""Plot the estimated Elo of the Optima predicted by the tuning algorithm.
Parameters
----------
performance : np.ndarray, shape=(n_iterations, 3)
Array containing the iteration numbers, the estimated Elo of the predicted
optimum, and the estimated standard error of the estimated Elo.
confidence : float, optional (default=0.9)
The confidence interval to plot around the estimated Elo.
plot_width : int, optional (default=8)
The width of each plot in inches. The total width of the plot will be larger
depending on the number of parameters and how they are arranged.
aspect_ratio : float, optional (default=0.7)
The aspect ratio of the subplots. The default is 0.4, which means that the
height of each subplot will be 40% of the width.
fig : Figure, optional
The figure to plot on. If not provided, a new figure in the style of
chess-tuning-tools will be created.
ax : np.ndarray or Axes, optional
The axes to plot on. If not provided, new axes will be created.
If provided, the axes will be filled. Thus, the number of axes should be at
least as large as the number of parameters.
colors : Sequence[Union[tuple, str]], optional
The colors to use for the plots. If not provided, the color scheme 'Set3' of
matplotlib will be used.
Returns
-------
Figure
The figure containing the plots.
np.ndarray
A two-dimensional array containing the axes.
Raises
------
ValueError
- if the number of parameters does not match the number of parameter names
- if the number of axes is smaller than the number of parameters
- if the number of iterations is not matching the number of optima
- if a fig, but no ax is passed
"""
iterations, elo, elo_std = performance.T
if colors is None:
colors = plt.cm.get_cmap("Set3").colors
if fig is None:
plt.style.use("dark_background")
figsize = (plot_width, aspect_ratio * plot_width)
fig, ax = plt.subplots(figsize=figsize)

margin_left = 0.8
margin_right = 0.1
margin_bottom = 0.7
margin_top = 0.3
plt.subplots_adjust(
left=margin_left / figsize[0],
right=1 - margin_right / figsize[0],
bottom=margin_bottom / figsize[1],
top=1 - margin_top / figsize[1],
)
ax.set_facecolor("#36393f")
ax.grid(which="major", color="#ffffff", alpha=0.1)
fig.patch.set_facecolor("#36393f")
ax.set_title("Elo of the predicted best parameters over time")
elif ax is None:
raise ValueError("Axes must be specified if a figure is provided.")

ax.plot(
iterations,
elo,
color=colors[0],
zorder=10,
linewidth=1.3,
label="Predicted Elo",
)
confidence_mult = confidence_to_mult(confidence)
ax.fill_between(
iterations,
elo - confidence_mult * elo_std,
elo + confidence_mult * elo_std,
color=colors[0],
linewidth=0,
zorder=9,
alpha=0.25,
label=f"{confidence:.0%} confidence interval",
)
ax.axhline(
y=elo[-1],
linestyle="--",
zorder=8,
color=colors[0],
label="Last prediction",
linewidth=1,
alpha=0.3,
)
ax.legend(loc="upper center", frameon=False, bbox_to_anchor=(0.5, -0.08), ncol=3)
ax.set_xlabel("Iteration")
ax.set_ylabel("Elo")
ax.set_xlim(min(iterations), max(iterations))

return fig, ax

0 comments on commit 78ae62c

Please sign in to comment.