Skip to content

Commit

Permalink
ENH: add universal displays for debugging iteration
Browse files Browse the repository at this point in the history
  • Loading branch information
jeffgortmaker committed May 21, 2022
1 parent 6f40d9b commit d18a9a1
Show file tree
Hide file tree
Showing 4 changed files with 187 additions and 66 deletions.
19 changes: 13 additions & 6 deletions pyblp/configurations/iteration.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@

# define contraction function types
ContractionResults = Tuple[Array, Optional[Array], Optional[Array]]
ContractionFunction = Callable[[Array], ContractionResults]
ContractionFunction = Callable[[Array, int, int], ContractionResults]
ContractionWrapper = Callable[[Array], ContractionResults]


class Iteration(StringRepresentation):
Expand Down Expand Up @@ -115,6 +116,10 @@ class Iteration(StringRepresentation):
compute_jacobian : `bool, optional`
Whether to compute an analytic Jacobian during iteration. By default, analytic Jacobians are not computed, and
if a ``method`` is selected that supports analytic Jacobians, they will by default be numerically approximated.
universal_display : `bool, optional`
Whether to format iteration progress such that the display looks the same for all routines. By default, the
universal display is not used and no iteration progress is displayed. Setting this to ``True`` can be helpful
for debugging iteration issues. For example, iteration may get stuck above the configured termination tolerance.
Examples
--------
Expand All @@ -136,9 +141,10 @@ class Iteration(StringRepresentation):
_description: str
_method_options: Options
_compute_jacobian: bool
_universal_display: bool

def __init__(self, method: Union[str, Callable], method_options: Optional[Options] = None,
compute_jacobian: bool = False) -> None:
compute_jacobian: bool = False, universal_display: bool = False) -> None:
"""Validate the method and configure default options."""
simple_methods = {
'simple': (functools.partial(simple_iterator), "no acceleration"),
Expand Down Expand Up @@ -173,6 +179,7 @@ def __init__(self, method: Union[str, Callable], method_options: Optional[Option

# initialize class attributes
self._compute_jacobian = compute_jacobian
self._universal_display = universal_display

# options are by default empty
if method_options is None:
Expand Down Expand Up @@ -263,7 +270,7 @@ def contraction_wrapper(raw_values: Any) -> ContractionResults:
if not isinstance(raw_values, np.ndarray):
raw_values = np.asarray(raw_values)
values = raw_values.reshape(initial.shape).astype(initial.dtype, copy=False)
values, weights, jacobian = contraction(values)
values, weights, jacobian = contraction(values, iterations, evaluations)
return (
values.astype(raw_values.dtype, copy=False).reshape(raw_values.shape),
None if weights is None else weights.astype(raw_values.dtype, copy=False).reshape(raw_values.shape),
Expand Down Expand Up @@ -294,7 +301,7 @@ def return_iterator(initial: Array, *_: Any, **__: Any) -> Tuple[Array, bool]:


def scipy_iterator(
initial: Array, contraction: ContractionFunction, iteration_callback: Callable[[], None], method: str,
initial: Array, contraction: ContractionWrapper, iteration_callback: Callable[[], None], method: str,
compute_jacobian: bool, **scipy_options: Any) -> Tuple[Array, bool]:
"""Apply a SciPy root finding method."""

Expand Down Expand Up @@ -350,7 +357,7 @@ def contraction_wrapper(x: Array) -> Union[Tuple[Array, Array], Array]:


def simple_iterator(
initial: Array, contraction: ContractionFunction, iteration_callback: Callable[[], None], max_evaluations: int,
initial: Array, contraction: ContractionWrapper, iteration_callback: Callable[[], None], max_evaluations: int,
atol: float, rtol: float, norm: Callable[[Array], float]) -> Tuple[Array, bool]:
"""Apply simple fixed point iteration with no acceleration."""
x = initial
Expand Down Expand Up @@ -378,7 +385,7 @@ def simple_iterator(


def squarem_iterator(
initial: Array, contraction: ContractionFunction, iteration_callback: Callable[[], None], max_evaluations: int,
initial: Array, contraction: ContractionWrapper, iteration_callback: Callable[[], None], max_evaluations: int,
atol: float, rtol: float, norm: Callable[[Array], float], scheme: int, step_min: float, step_max: float,
step_factor: float) -> Tuple[Array, bool]:
"""Apply the SQUAREM acceleration method for fixed point iteration."""
Expand Down
210 changes: 158 additions & 52 deletions pyblp/markets/market.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,9 @@
from ..parameters import BetaParameter, GammaParameter, NonlinearCoefficient, Parameter, Parameters, RhoParameter
from ..primitives import Container
from ..utilities.algebra import approximately_invert, approximately_solve
from ..utilities.basics import Array, Bounds, RecArray, Error, Groups, SolverStats, update_matrices
from ..utilities.basics import (
Array, Bounds, RecArray, Error, Groups, SolverStats, format_number, format_table, output, update_matrices
)


class Market(Container):
Expand Down Expand Up @@ -460,6 +462,54 @@ def compute_delta(

return delta, clipped_shares, SolverStats(), errors

# add padding around the universal display
if iteration._universal_display:
output("")

# keep track of the best max norm
smallest_max_norm = np.inf

def universal_display(x0: Array, x: Array, iterations: int, evaluations: int) -> None:
"""Format and output a universal display of iteration progress. The first iteration will include the
progress table header.
"""
if not iteration._universal_display:
return

# construct the leftmost part of the table that always shows up
header = [
("", "Market"),
("Contraction", "Iterations"),
("Contraction", "Evaluations"),
]
values = [
str(self.t),
str(iterations),
str(evaluations),
]

# add a count of any clipped shares
nonlocal clipped_shares
if np.isfinite(shares_bounds).any():
header.append(("Clipped", "Shares"))
values.append(clipped_shares.sum())

# add information about the max norm
nonlocal smallest_max_norm
header.extend([("Delta" if 'linear' in fp_type else "Exp Delta", "Max Norm"), ("Max Norm", "Improvement")])
max_norm = np.max(np.abs(x - x0))
values.append(format_number(max_norm))
improvement = smallest_max_norm - max_norm
if np.isfinite(improvement) and improvement > 0:
values.append(format_number(smallest_max_norm - max_norm))
else:
values.append(" " * len(format_number(improvement)))
if improvement > 0:
smallest_max_norm = max_norm

# format and output the table
output(format_table(header, values, include_border=False, include_header=evaluations == 1))

# solve for delta with a linear fixed point
if 'linear' in fp_type:
log_shares = np.log(self.products.shares)
Expand Down Expand Up @@ -493,12 +543,13 @@ def clip_shares(shares: Array) -> None:

# define the linear contraction
if self.H == 0:
def contraction(x: Array) -> ContractionResults:
def contraction(x: Array, iterations: int, evaluations: int) -> ContractionResults:
"""Compute the next linear delta and optionally its Jacobian."""
probabilities = compute_probabilities(x)[0]
shares = probabilities @ self.agents.weights
clip_shares(shares)
x = x + log_shares - np.log(shares)
x0, x = x, x + log_shares - np.log(shares)
universal_display(x0, x, iterations, evaluations)
if not iteration._compute_jacobian:
return x, None, None
weighted_probabilities = self.agents.weights * probabilities.T
Expand All @@ -508,12 +559,13 @@ def contraction(x: Array) -> ContractionResults:
dampener = 1 - self.rho
rho_membership = self.rho * self.get_membership_matrix()

def contraction(x: Array) -> ContractionResults:
def contraction(x: Array, iterations: int, evaluations: int) -> ContractionResults:
"""Compute the next linear delta and optionally its Jacobian under nesting."""
probabilities, conditionals = compute_probabilities(x)
shares = probabilities @ self.agents.weights
clip_shares(shares)
x = x + (log_shares - np.log(shares)) * dampener
x0, x = x, x + (log_shares - np.log(shares)) * dampener
universal_display(x0, x, iterations, evaluations)
if not iteration._compute_jacobian:
return x, None, None
weighted_probabilities = self.agents.weights * probabilities.T
Expand All @@ -524,54 +576,60 @@ def contraction(x: Array) -> ContractionResults:

# solve the linear fixed point problem
delta, stats = iteration._iterate(initial_delta, contraction)
return delta, clipped_shares, stats, errors

# solve for delta with a nonlinear fixed point
assert 'nonlinear' in fp_type and self.epsilon_scale == 1
if 'safe' in fp_type:
utility_reduction = np.clip(self.mu.max(axis=0, keepdims=True), 0, None)
exp_mu = np.exp(self.mu - utility_reduction)
compute_probabilities = functools.partial(
self.compute_probabilities, mu=exp_mu, utility_reduction=utility_reduction, linear=False
)
else:
exp_mu = np.exp(self.mu)
compute_probabilities = functools.partial(self.compute_probabilities, mu=exp_mu, linear=False)
# solve for delta with a nonlinear fixed point
assert 'nonlinear' in fp_type and self.epsilon_scale == 1
if 'safe' in fp_type:
utility_reduction = np.clip(self.mu.max(axis=0, keepdims=True), 0, None)
exp_mu = np.exp(self.mu - utility_reduction)
compute_probabilities = functools.partial(
self.compute_probabilities, mu=exp_mu, utility_reduction=utility_reduction, linear=False
)
else:
exp_mu = np.exp(self.mu)
compute_probabilities = functools.partial(self.compute_probabilities, mu=exp_mu, linear=False)

# define the nonlinear contraction
if self.H == 0:
def contraction(x: Array, iterations: int, evaluations: int) -> ContractionResults:
"""Compute the next exponentiated delta and optionally its Jacobian."""
probability_ratios = compute_probabilities(x, numerator=exp_mu)[0]
share_ratios = probability_ratios @ self.agents.weights
x0, x = x, self.products.shares / share_ratios
universal_display(x0, x, iterations, evaluations)
if not iteration._compute_jacobian:
return x, None, None
shares = x0 * share_ratios
probabilities = x0 * probability_ratios
weighted_probabilities = self.agents.weights * probabilities.T
jacobian = x / x0.T * (probabilities @ weighted_probabilities) / shares
return x, None, jacobian
else:
dampener = 1 - self.rho
rho_membership = self.rho * self.get_membership_matrix()

def contraction(x: Array, iterations: int, evaluations: int) -> ContractionResults:
"""Compute the next exponentiated delta and optionally its Jacobian under nesting."""
probabilities, conditionals = compute_probabilities(x)
shares = probabilities @ self.agents.weights
x0, x = x, x * (self.products.shares / shares)**dampener
universal_display(x0, x, iterations, evaluations)
if not iteration._compute_jacobian:
return x, None, None
weighted_probabilities = self.agents.weights * probabilities.T
probabilities_part = dampener * (probabilities @ weighted_probabilities)
conditionals_part = rho_membership * (conditionals @ weighted_probabilities)
jacobian = x / x0.T * (probabilities_part + conditionals_part) / shares
return x, None, jacobian

# solve the nonlinear fixed point problem
exp_delta, stats = iteration._iterate(np.exp(initial_delta), contraction)
delta = np.log(exp_delta)

# add padding around the universal display
if iteration._universal_display:
output("")

# define the nonlinear contraction
if self.H == 0:
def contraction(x: Array) -> ContractionResults:
"""Compute the next exponentiated delta and optionally its Jacobian."""
probability_ratios = compute_probabilities(x, numerator=exp_mu)[0]
share_ratios = probability_ratios @ self.agents.weights
x0, x = x, self.products.shares / share_ratios
if not iteration._compute_jacobian:
return x, None, None
shares = x0 * share_ratios
probabilities = x0 * probability_ratios
weighted_probabilities = self.agents.weights * probabilities.T
jacobian = x / x0.T * (probabilities @ weighted_probabilities) / shares
return x, None, jacobian
else:
dampener = 1 - self.rho
rho_membership = self.rho * self.get_membership_matrix()

def contraction(x: Array) -> ContractionResults:
"""Compute the next exponentiated delta and optionally its Jacobian under nesting."""
probabilities, conditionals = compute_probabilities(x)
shares = probabilities @ self.agents.weights
x0, x = x, x * (self.products.shares / shares) ** dampener
if not iteration._compute_jacobian:
return x, None, None
weighted_probabilities = self.agents.weights * probabilities.T
probabilities_part = dampener * (probabilities @ weighted_probabilities)
conditionals_part = rho_membership * (conditionals @ weighted_probabilities)
jacobian = x / x0.T * (probabilities_part + conditionals_part) / shares
return x, None, jacobian

# solve the nonlinear fixed point problem
exp_delta, stats = iteration._iterate(np.exp(initial_delta), contraction)
delta = np.log(exp_delta)
return delta, clipped_shares, stats, errors

def compute_capital_lamda_gamma(
Expand Down Expand Up @@ -657,7 +715,49 @@ def compute_equilibrium_prices(
second_derivatives = self.compute_utility_derivatives('prices', order=2)
get_second_derivatives = lambda _: second_derivatives

def contraction(x: Array) -> ContractionResults:
# add padding around the universal display
if iteration._universal_display:
output("")

# keep track of the best max norm
smallest_max_norm = np.inf

def universal_display(x0: Array, x: Array, iterations: int, evaluations: int) -> None:
"""Format and output a universal display of iteration progress. The first iteration will include the
progress table header.
"""
if not iteration._universal_display:
return

# construct the leftmost part of the table that always shows up
header = [
("", "Market"),
("Contraction", "Iterations"),
("Contraction", "Evaluations"),
]
values = [
str(self.t),
str(iterations),
str(evaluations),
]

# add information about the max norm
nonlocal smallest_max_norm
header.extend([("Prices", "Max Norm"), ("Max Norm", "Improvement")])
max_norm = np.max(np.abs(x - x0))
values.append(format_number(max_norm))
improvement = smallest_max_norm - max_norm
if np.isfinite(improvement) and improvement > 0:
values.append(format_number(smallest_max_norm - max_norm))
else:
values.append(" " * len(format_number(improvement)))
if improvement > 0:
smallest_max_norm = max_norm

# format and output the table
output(format_table(header, values, include_border=False, include_header=evaluations == 1))

def contraction(x: Array, iterations: int, evaluations: int) -> ContractionResults:
"""Compute the next equilibrium prices."""

# update probabilities and shares
Expand Down Expand Up @@ -688,6 +788,7 @@ def contraction(x: Array) -> ContractionResults:
weights = np.abs(capital_lamda_diagonal)

# update prices
universal_display(x, updated_x, iterations, evaluations)
if not iteration._compute_jacobian:
return updated_x, weights, None

Expand Down Expand Up @@ -725,6 +826,11 @@ def contraction(x: Array) -> ContractionResults:

# solve the fixed point problem
prices, stats = iteration._iterate(prices, contraction)

# add padding around the universal display
if iteration._universal_display:
output("")

return prices, stats

def compute_shares(self, prices: Optional[Array] = None) -> Array:
Expand Down
6 changes: 3 additions & 3 deletions tests/test_blp.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
pytest.param({'fp_type': 'safe_nonlinear'}, id="nonlinear fixed point"),
pytest.param({'fp_type': 'nonlinear'}, id="non-safe nonlinear fixed point"),
pytest.param(
{'iteration': Iteration('hybr', {'xtol': 1e-12}, compute_jacobian=True)},
{'iteration': Iteration('hybr', {'xtol': 1e-12}, compute_jacobian=True, universal_display=True)},
id="linear Newton fixed point"
),
pytest.param(
Expand Down Expand Up @@ -207,7 +207,7 @@ def test_result_serialization(simulated_problem: SimulatedProblemFixture) -> Non
originals = [
Formulation('x + y', absorb='C(z)', absorb_method='lsmr', absorb_options={'tol': 1e-10}),
Integration('halton', size=10, specification_options={'seed': 0, 'scramble': True}),
Iteration('lm', method_options={'max_evaluations': 100}, compute_jacobian=True),
Iteration('lm', method_options={'max_evaluations': 100}, compute_jacobian=True, universal_display=True),
Optimization('nelder-mead', method_options={'xatol': 1e-5}, compute_gradient=False, universal_display=False),
problem,
simulation,
Expand Down Expand Up @@ -555,7 +555,7 @@ def test_costs(simulated_problem: SimulatedProblemFixture) -> None:

@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('iteration', [
pytest.param(Iteration('simple', {'atol': 1e-12}), id="simple"),
pytest.param(Iteration('simple', {'atol': 1e-12}, universal_display=True), id="simple"),
pytest.param(Iteration('hybr', {'xtol': 1e-12, 'ftol': 0}, compute_jacobian=True), id="Powell/LM"),
])
def test_prices(simulated_problem: SimulatedProblemFixture, iteration: Iteration) -> None:
Expand Down

0 comments on commit d18a9a1

Please sign in to comment.