Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

use MarkovProcess in consumption saving models, fixes #639 #929

Merged
merged 3 commits into from
Jan 21, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Documentation/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ Release Data: TBD
* calcExpectations method for taking the expectation of a distribution over a function [#884](https://github.com/econ-ark/HARK/pull/884/] (#897)[https://github.com/econ-ark/HARK/pull/897/)
* Centralizes the definition of value, marginal value, and marginal marginal value functions that use inverse-space
interpolation for problems with CRRA utility. See [#888](https://github.com/econ-ark/HARK/pull/888).
* MarkovProcess class [#902](https://github.com/econ-ark/HARK/pull/902)
* MarkovProcess class used in ConsMarkovModel, ConsRepAgentModel, ConsAggShockModel [#902](https://github.com/econ-ark/HARK/pull/902) [#929](https://github.com/econ-ark/HARK/pull/929)
* Adds a SSA life tables and methods to extract survival probabilities from them [#986](https://github.com/econ-ark/HARK/pull/906).
* Fix the return fields of `dcegm/calcCrossPoints`[#909](https://github.com/econ-ark/HARK/pull/909).
* Corrects location of constructor documentation to class string for Sphinx rendering [#908](https://github.com/econ-ark/HARK/pull/908)
Expand Down
14 changes: 8 additions & 6 deletions HARK/ConsumptionSaving/ConsAggShockModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
makeGridExpMult,
)
from HARK.distribution import (
MarkovProcess,
MeanOneLogNormal,
Uniform,
combineIndepDstns,
Expand Down Expand Up @@ -2547,7 +2548,6 @@ def makeMrkvHist(self):

# Initialize the Markov history and set up transitions
MrkvNow_hist = np.zeros(self.act_T_orig, dtype=int)
cutoffs = np.cumsum(self.MrkvArray, axis=1)
loops = 0
go = True
MrkvNow = self.sow_init["MrkvNow"]
Expand All @@ -2557,9 +2557,10 @@ def makeMrkvHist(self):
# Add histories until each state has been visited at least state_T_min times
while go:
draws = Uniform(seed=loops).draw(N=self.act_T_orig)
for s in range(draws.size): # Add act_T_orig more periods
markov_process = MarkovProcess(self.MrkvArray,seed=loops)
for s in range(self.act_T_orig): # Add act_T_orig more periods
MrkvNow_hist[t] = MrkvNow
MrkvNow = np.searchsorted(cutoffs[MrkvNow, :], draws[s])
MrkvNow = markov_process.draw(MrkvNow)
t += 1

# Calculate the empirical distribution
Expand Down Expand Up @@ -2905,15 +2906,16 @@ def makeMrkvHist(self):
"""
# Initialize the Markov history and set up transitions
MrkvNow_hist = np.zeros(self.act_T, dtype=int)
cutoffs = np.cumsum(self.MrkvArray, axis=1)
MrkvNow = self.MrkvNow_init
t = 0

# Add histories until each state has been visited at least state_T_min times
draws = Uniform(seed=0).draw(N=self.act_T)
for s in range(draws.size): # Add act_T_orig more periods

markov_process = MarkovProcess(self.MrkvArray, seed= 0)
for s in range(self.act_T): # Add act_T_orig more periods
MrkvNow_hist[t] = MrkvNow
MrkvNow = np.searchsorted(cutoffs[MrkvNow, :], draws[s])
MrkvNow = markov_process.draw(MrkvNow)
t += 1

# Store the result as attribute of self
Expand Down
25 changes: 6 additions & 19 deletions HARK/ConsumptionSaving/ConsMarkovModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

from HARK.distribution import (
DiscreteDistribution,
MarkovProcess,
Uniform,
calcExpectation
)
Expand Down Expand Up @@ -1057,15 +1058,6 @@ def getMarkovStates(self):
-------
None
"""
# Draw random numbers that will be used to determine the next Markov state
if self.global_markov:
base_draws = np.ones(self.AgentCount) * Uniform(
seed=self.RNG.randint(0, 2 ** 31 - 1)
).draw(1)
else:
base_draws = Uniform(seed=self.RNG.randint(0, 2 ** 31 - 1)).draw(
self.AgentCount
)
dont_change = (
self.t_age == 0
) # Don't change Markov state for those who were just born (unless global_markov)
Expand All @@ -1076,20 +1068,15 @@ def getMarkovStates(self):
J = self.MrkvArray[0].shape[0]
MrkvPrev = self.shocks["MrkvNow"]
MrkvNow = np.zeros(self.AgentCount, dtype=int)
MrkvBoolArray = np.zeros((J, self.AgentCount))

for j in range(J):
MrkvBoolArray[j, :] = MrkvPrev == j

# Draw new Markov states for each agent
for t in range(self.T_cycle):
Cutoffs = np.cumsum(self.MrkvArray[t], axis=1)
markov_process = MarkovProcess(
self.MrkvArray[t],
seed=self.RNG.randint(0, 2 ** 31 - 1)
)
right_age = self.t_cycle == t
for j in range(J):
these = np.logical_and(right_age, MrkvBoolArray[j, :])
MrkvNow[these] = np.searchsorted(
Cutoffs[j, :], base_draws[these]
).astype(int)
MrkvNow[right_age] = markov_process.draw(MrkvPrev[right_age])
if not self.global_markov:
MrkvNow[dont_change] = MrkvPrev[dont_change]

Expand Down
13 changes: 7 additions & 6 deletions HARK/ConsumptionSaving/ConsRepAgentModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from builtins import range
import numpy as np
from HARK.interpolation import LinearInterp, MargValueFuncCRRA
from HARK.distribution import Uniform
from HARK.distribution import (MarkovProcess, Uniform)
from HARK.ConsumptionSaving.ConsIndShockModel import (
IndShockConsumerType,
ConsumerSolution,
Expand Down Expand Up @@ -349,12 +349,13 @@ def getShocks(self):
-------
None
"""
cutoffs = np.cumsum(self.MrkvArray[self.MrkvNow, :])
MrkvDraw = Uniform(seed=self.RNG.randint(0, 2 ** 31 - 1)).draw(N=1)
self.MrkvNow = np.searchsorted(cutoffs, MrkvDraw)
self.MrkvNow = MarkovProcess(
self.MrkvArray,
seed=self.RNG.randint(0, 2 ** 31 - 1)
).draw(self.MrkvNow)

t = self.t_cycle[0]
i = self.MrkvNow[0]
i = self.MrkvNow
IncShkDstnNow = self.IncShkDstn[t - 1][i] # set current income distribution
PermGroFacNow = self.PermGroFac[t - 1][i] # and permanent growth factor
# Get random draws of income shocks from the discrete distribution
Expand All @@ -379,7 +380,7 @@ def getControls(self):
None
"""
t = self.t_cycle[0]
i = self.MrkvNow[0]
i = self.MrkvNow
self.controls["cNrmNow"] = self.solution[t].cFunc[i](self.mNrmNow)


Expand Down