Skip to content

Commit

Permalink
ENH: add labels for theta
Browse files Browse the repository at this point in the history
  • Loading branch information
jeffgortmaker committed Aug 23, 2022
1 parent 828ad22 commit 84ac65b
Show file tree
Hide file tree
Showing 3 changed files with 37 additions and 18 deletions.
21 changes: 18 additions & 3 deletions pyblp/parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,7 @@ class Parameters(object):
rho_labels: List[str]
beta_labels: List[str]
gamma_labels: List[str]
theta_labels: List[str]
rc_types: List[str]
sigma: Array
sigma_squared: Array
Expand Down Expand Up @@ -281,6 +282,9 @@ def __init__(
# identify whether there are any bounds
self.any_bounds = np.isfinite(self.compress_bounds()).any()

# define labels for theta
self.theta_labels = self.compress_labels()

@staticmethod
def initialize_matrix(
name: str, condition_name: str, values: Optional[Any], shapes: Sequence[Tuple[int, int]],
Expand Down Expand Up @@ -507,7 +511,7 @@ def compress(self) -> Array:
(PiParameter, self.pi),
(RhoParameter, self.rho),
(BetaParameter, self.beta),
(GammaParameter, self.gamma)
(GammaParameter, self.gamma),
]
return np.r_[[v[p.location] for t, v in items for p in self.unfixed if isinstance(p, t)]]

Expand All @@ -518,10 +522,21 @@ def compress_bounds(self) -> List[Tuple[float, float]]:
(PiParameter, self.pi_bounds),
(RhoParameter, self.rho_bounds),
(BetaParameter, self.beta_bounds),
(GammaParameter, self.gamma_bounds)
(GammaParameter, self.gamma_bounds),
]
return [(l[p.location], u[p.location]) for t, (l, u) in items for p in self.unfixed if isinstance(p, t)]

def compress_labels(self) -> List[str]:
"""Compress labels into a list of labels for theta."""
items = [
(SigmaParameter, np.array([[f'{k1} x {k2}' for k2 in self.sigma_labels] for k1 in self.sigma_labels])),
(PiParameter, np.array([[f'{k1} x {k2}' for k2 in self.pi_labels] for k1 in self.sigma_labels])),
(RhoParameter, np.c_[np.array(self.rho_labels)]),
(BetaParameter, np.c_[np.array(self.beta_labels)]),
(GammaParameter, np.c_[np.array(self.gamma_labels)]),
]
return [v[p.location] for t, v in items for p in self.unfixed if isinstance(p, t)]

def expand(self, theta_like: Array, nullify: bool = False) -> Tuple[Array, Array, Array, Array, Array]:
"""Recover matrices of the same size as parameter matrices from a vector of the same size as theta. By default,
fill elements corresponding to fixed parameters with their fixed values. Always fill concentrated out parameters
Expand All @@ -537,7 +552,7 @@ def expand(self, theta_like: Array, nullify: bool = False) -> Tuple[Array, Array
(PiParameter, pi_like),
(RhoParameter, rho_like),
(BetaParameter, beta_like),
(GammaParameter, gamma_like)
(GammaParameter, gamma_like),
]

# fill values of unfixed parameters
Expand Down
22 changes: 11 additions & 11 deletions pyblp/results/economy_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -1319,11 +1319,11 @@ def compute_micro_scores(
Returns
-------
`list`
Scores :math:`\mathscr{S}_n`. The list is in the same order as :attr:`ProblemResults.theta`. Each element
of the list is an array of scores for the corresponding parameter. The array is in the same order as
observations appear in the ``micro_data``. Note that it is possible for parameters in
:attr:`ProblemResults.theta` to mechanically have zero scores, for example if they are on a constant
demographic.
Scores :math:`\mathscr{S}_n`. The list is in the same order as :attr:`ProblemResults.theta` (also see
:attr:`ProblemResults.theta_labels`). Each element of the list is an array of scores for the corresponding
parameter. The array is in the same order as observations appear in the ``micro_data``. Note that it is
possible for parameters in :attr:`ProblemResults.theta` to mechanically have zero scores, for example if
they are on a constant demographic.
Taking the mean of a parameter's scores delivers the observed ``value`` for an optimal
:class:`MicroMoment` that matches the score for that parameter.
Expand Down Expand Up @@ -1408,12 +1408,12 @@ def compute_agent_scores(
Returns
-------
`list`
Scores :math:`\mathscr{S}_n`. The list is in the same order as :attr:`ProblemResults.theta`. Each element
of the list is a mapping from market IDs supported by the ``dataset`` to an array of scores for the
corresponding parameter and market. The array's dimensions correspond to the dimensions of the weights
returned by ``compute_weights`` passed to ``dataset``. Note that it is possible for parameters in
:attr:`ProblemResults.theta` to mechanically have zero scores, for example if they are on a constant
demographic.
Scores :math:`\mathscr{S}_n`. The list is in the same order as :attr:`ProblemResults.theta` (also see
:attr:`ProblemResults.theta_labels`). Each element of the list is a mapping from market IDs supported by the
``dataset`` to an array of scores for the corresponding parameter and market. The array's dimensions
correspond to the dimensions of the weights returned by ``compute_weights`` passed to ``dataset``. Note that
it is possible for parameters in :attr:`ProblemResults.theta` to mechanically have zero scores, for example
if they are on a constant demographic.
To build an optimal :class:`MicroMoment` that matches the score for a parameter, ``compute_values``
in its single :class:`MicroPart` should select the array corresponding to that parameter and the requested
Expand Down
12 changes: 8 additions & 4 deletions pyblp/results/problem_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,8 @@ class ProblemResults(EconomyResults):
Variable labels for :math:`\beta`, which are derived from the formulation for :math:`X_1`.
gamma_labels : `list of str`
Variable labels for :math:`\gamma`, which are derived from the formulation for :math:`X_3`.
theta_labels : `list of str`
Variable labels for :math:`\theta`, which are derived from the above labels.
delta : `ndarray`
Estimated mean utility, :math:`\delta(\hat{\theta})`.
clipped_shares : `ndarray`
Expand Down Expand Up @@ -278,6 +280,7 @@ class ProblemResults(EconomyResults):
rho_labels: List[str]
beta_labels: List[str]
gamma_labels: List[str]
theta_labels: List[str]
delta: Array
clipped_shares: Array
tilde_costs: Array
Expand Down Expand Up @@ -433,6 +436,7 @@ def __init__(
self.rho_labels = self._parameters.rho_labels
self.beta_labels = self._parameters.beta_labels
self.gamma_labels = self._parameters.gamma_labels
self.theta_labels = self._parameters.theta_labels

# ignore computational errors when updating the weighting matrix and computing covariances
with np.errstate(all='ignore'):
Expand Down Expand Up @@ -652,10 +656,10 @@ def to_dict(
'cumulative_contraction_evaluations', 'parameters', 'parameter_covariances', 'theta', 'sigma',
'sigma_squared', 'pi', 'rho', 'beta', 'gamma', 'sigma_se', 'sigma_squared_se', 'pi_se', 'rho_se',
'beta_se', 'gamma_se', 'sigma_bounds', 'pi_bounds', 'rho_bounds', 'beta_bounds', 'gamma_bounds',
'sigma_labels', 'pi_labels', 'rho_labels', 'beta_labels', 'gamma_labels', 'delta', 'tilde_costs',
'clipped_shares', 'clipped_costs', 'xi', 'omega', 'xi_fe', 'omega_fe', 'micro', 'micro_values',
'micro_covariances', 'moments', 'moments_jacobian', 'simulation_covariances', 'objective',
'xi_by_theta_jacobian', 'omega_by_theta_jacobian', 'micro_by_theta_jacobian', 'gradient',
'sigma_labels', 'pi_labels', 'rho_labels', 'beta_labels', 'gamma_labels', 'theta_labels', 'delta',
'tilde_costs', 'clipped_shares', 'clipped_costs', 'xi', 'omega', 'xi_fe', 'omega_fe', 'micro',
'micro_values', 'micro_covariances', 'moments', 'moments_jacobian', 'simulation_covariances',
'objective', 'xi_by_theta_jacobian', 'omega_by_theta_jacobian', 'micro_by_theta_jacobian', 'gradient',
'projected_gradient', 'projected_gradient_norm', 'hessian', 'reduced_hessian',
'reduced_hessian_eigenvalues', 'W', 'updated_W'
)) -> dict:
Expand Down

0 comments on commit 84ac65b

Please sign in to comment.