diff --git a/lectures/_static/lecture_specific/coleman_policy_iter/solve_time_iter.py b/lectures/_static/lecture_specific/coleman_policy_iter/solve_time_iter.py index 009170e67..1440489c2 100644 --- a/lectures/_static/lecture_specific/coleman_policy_iter/solve_time_iter.py +++ b/lectures/_static/lecture_specific/coleman_policy_iter/solve_time_iter.py @@ -17,10 +17,9 @@ def solve_model_time_iter(model, # Class with model information print(f"Error at iteration {i} is {error}.") σ = σ_new - if i == max_iter: + if error > tol: print("Failed to converge!") - - if verbose and i < max_iter: + elif verbose: print(f"\nConverged in {i} iterations.") return σ_new diff --git a/lectures/_static/lecture_specific/optgrowth/solve_model.py b/lectures/_static/lecture_specific/optgrowth/solve_model.py index 947d78056..06333effc 100644 --- a/lectures/_static/lecture_specific/optgrowth/solve_model.py +++ b/lectures/_static/lecture_specific/optgrowth/solve_model.py @@ -21,10 +21,9 @@ def solve_model(og, print(f"Error at iteration {i} is {error}.") v = v_new - if i == max_iter: + if error > tol: print("Failed to converge!") - - if verbose and i < max_iter: + elif verbose: print(f"\nConverged in {i} iterations.") return v_greedy, v_new diff --git a/lectures/cake_eating_numerical.md b/lectures/cake_eating_numerical.md index c98227e7b..8244e7b16 100644 --- a/lectures/cake_eating_numerical.md +++ b/lectures/cake_eating_numerical.md @@ -88,11 +88,11 @@ The basic idea is: 1. Take an arbitary intial guess of $v$. 1. Obtain an update $w$ defined by - + $$ w(x) = \max_{0\leq c \leq x} \{u(c) + \beta v(x-c)\} $$ - + 1. Stop if $w$ is approximately equal to $v$, otherwise set $v=w$ and go back to step 2. @@ -299,10 +299,9 @@ def compute_value_function(ce, v = v_new - if i == max_iter: + if error > tol: print("Failed to converge!") - - if verbose and i < max_iter: + elif verbose: print(f"\nConverged in {i} iterations.") return v_new @@ -657,10 +656,9 @@ def iterate_euler_equation(ce, σ = σ_new - if i == max_iter: + if error > tol: print("Failed to converge!") - - if verbose and i < max_iter: + elif verbose: print(f"\nConverged in {i} iterations.") return σ @@ -685,4 +683,4 @@ plt.show() ``` ```{solution-end} -``` \ No newline at end of file +``` diff --git a/lectures/career.md b/lectures/career.md index 79613518a..f7a5174d8 100644 --- a/lectures/career.md +++ b/lectures/career.md @@ -300,12 +300,11 @@ def solve_model(cw, print(f"Error at iteration {i} is {error}.") v = v_new - if i == max_iter and error > tol: + if error > tol: print("Failed to converge!") - else: - if verbose: - print(f"\nConverged in {i} iterations.") + elif verbose: + print(f"\nConverged in {i} iterations.") return v_new ``` @@ -545,4 +544,4 @@ has become more concentrated around the mean, making high-paying jobs less realistic. ```{solution-end} -``` \ No newline at end of file +``` diff --git a/lectures/ifp_advanced.md b/lectures/ifp_advanced.md index 986ee104f..da118b4c3 100644 --- a/lectures/ifp_advanced.md +++ b/lectures/ifp_advanced.md @@ -494,10 +494,9 @@ def solve_model_time_iter(model, # Class with model information print(f"Error at iteration {i} is {error}.") a_vec, σ_vec = np.copy(a_new), np.copy(σ_new) - if i == max_iter: + if error > tol: print("Failed to converge!") - - if verbose and i < max_iter: + elif verbose: print(f"\nConverged in {i} iterations.") return a_new, σ_new diff --git a/lectures/jv.md b/lectures/jv.md index b53db141b..8c87951b4 100644 --- a/lectures/jv.md +++ b/lectures/jv.md @@ -362,10 +362,9 @@ def solve_model(jv, print(f"Error at iteration {i} is {error}.") v = v_new - if i == max_iter: + if error > tol: print("Failed to converge!") - - if verbose and i < max_iter: + elif verbose: print(f"\nConverged in {i} iterations.") return v_new @@ -569,4 +568,4 @@ This seems reasonable and helps us confirm that our dynamic programming solutions are probably correct. ```{solution-end} -``` \ No newline at end of file +``` diff --git a/lectures/mccall_correlated.md b/lectures/mccall_correlated.md index 125cfeef7..3064f7cbf 100644 --- a/lectures/mccall_correlated.md +++ b/lectures/mccall_correlated.md @@ -281,10 +281,9 @@ def compute_fixed_point(js, print(f"Error at iteration {i} is {error}.") f_in[:] = f_out - if i == max_iter: + if error > tol: print("Failed to converge!") - - if verbose and i < max_iter: + elif verbose: print(f"\nConverged in {i} iterations.") return f_out @@ -453,4 +452,4 @@ plt.show() The figure shows that more patient individuals tend to wait longer before accepting an offer. ```{solution-end} -``` \ No newline at end of file +``` diff --git a/lectures/mccall_model.md b/lectures/mccall_model.md index 159344d0a..a13eef52f 100644 --- a/lectures/mccall_model.md +++ b/lectures/mccall_model.md @@ -91,7 +91,7 @@ economists to inject randomness into their models.) In this lecture, we adopt the following simple environment: -* $\{s_t\}$ is IID, with $q(s)$ being the probability of observing state $s$ in $\mathbb{S}$ at each point in time, +* $\{s_t\}$ is IID, with $q(s)$ being the probability of observing state $s$ in $\mathbb{S}$ at each point in time, * the agent observes $s_t$ at the start of $t$ and hence knows $w_t = w(s_t)$, * the set $\mathbb S$ is finite. @@ -120,7 +120,7 @@ The variable $y_t$ is income, equal to * unemployment compensation $c$ when unemployed The worker knows that $\{s_t\}$ is IID with common -distribution $q$ and uses knowledge when he or she computes mathematical expectations of various random variables that are functions of +distribution $q$ and uses knowledge when he or she computes mathematical expectations of various random variables that are functions of $s_t$. ### A Trade-Off @@ -134,7 +134,7 @@ To decide optimally in the face of this trade-off, we use dynamic programming. Dynamic programming can be thought of as a two-step procedure that -1. first assigns values to "states" +1. first assigns values to "states" 1. then deduces optimal actions given those values We'll go through these steps in turn. @@ -160,7 +160,7 @@ Let $v^*(s)$ be the optimal value of the problem when $s \in \mathbb{S}$ for a -Thus, the function $v^*(s)$ is the maximum value of objective +Thus, the function $v^*(s)$ is the maximum value of objective {eq}`objective` for a previously unemployed worker who has offer $w(s)$ in hand and has yet to choose whether to accept it. Notice that $v^*(s)$ is part of the **solution** of the problem, so it isn't obvious that it is a good idea to start working on the problem by focusing on $v^*(s)$. @@ -168,8 +168,8 @@ Notice that $v^*(s)$ is part of the **solution** of the problem, so it isn't obv There is a chicken and egg problem: we don't know how to compute $v^*(s)$ because we don't yet know what decisions are optimal and what aren't! -But it turns out to be a really good idea by asking what properties the optimal value function $v^*(s)$ must have in order it -to qualify as an optimal value function. +But it turns out to be a really good idea by asking what properties the optimal value function $v^*(s)$ must have in order it +to qualify as an optimal value function. Think of $v^*$ as a function that assigns to each possible state $s$ the maximal expected discounted income stream that can be obtained with that offer in @@ -192,7 +192,7 @@ for every possible $s$ in $\mathbb S$. Notice how the function $v^*(s)$ appears on both the right and left sides of equation {eq}`odu_pv` -- that is why it is called a **functional equation**, i.e., an equation that restricts a **function**. -This important equation is a version of a **Bellman equation**, an equation that is +This important equation is a version of a **Bellman equation**, an equation that is ubiquitous in economic dynamics and other fields involving planning over time. The intuition behind it is as follows: @@ -218,7 +218,7 @@ Once we have this function in hand we can figure out how behave optimally (i.e. All we have to do is select the maximal choice on the r.h.s. of {eq}`odu_pv`. -The optimal action in state $s$ can be thought of as a part of a **policy** that maps a +The optimal action in state $s$ can be thought of as a part of a **policy** that maps a state into an action. Given *any* $s$, we can read off the corresponding best choice (accept or @@ -351,7 +351,7 @@ Moreover, it's immediate from the definition of $T$ that this fixed point is $v^*$. A second implication of the Banach contraction mapping theorem is that -$\{ T^k v \}$ converges to the fixed point $v^*$ regardless of the initial +$\{ T^k v \}$ converges to the fixed point $v^*$ regardless of the initial $v \in \mathbb R^n$. ### Implementation @@ -386,7 +386,7 @@ We are going to use Numba to accelerate our code. * See, in particular, the discussion of `@jitclass` in [our lecture on Numba](https://python-programming.quantecon.org/numba.html). -The following helps Numba by providing some information about types +The following helps Numba by providing some information about types ```{code-cell} python3 mccall_data = [ @@ -490,15 +490,15 @@ def compute_reservation_wage(mcm, n = len(w) v = w / (1 - β) # initial guess v_next = np.empty_like(v) - i = 0 + j = 0 error = tol + 1 - while i < max_iter and error > tol: + while j < max_iter and error > tol: for i in range(n): v_next[i] = np.max(mcm.state_action_values(i, v)) error = np.max(np.abs(v_next - v)) - i += 1 + j += 1 v[:] = v_next # copy contents into v diff --git a/lectures/navy_captain.md b/lectures/navy_captain.md index 92078cc81..0149d50c1 100644 --- a/lectures/navy_captain.md +++ b/lectures/navy_captain.md @@ -538,7 +538,7 @@ def solve_model(wf, tol=1e-4, max_iter=1000): i += 1 h = h_new - if i == max_iter: + if error > tol: print("Failed to converge!") return h_new @@ -621,7 +621,7 @@ conditioning on knowing for sure that nature has selected $f_{0}$, in the first case, or $f_{1}$, in the second case. 1. under $f_{0}$, - + $$ V^{0}\left(\pi\right)=\begin{cases} 0 & \text{if }\alpha\leq\pi,\\ @@ -629,9 +629,9 @@ in the first case, or $f_{1}$, in the second case. \bar L_{1} & \text{if }\pi<\beta. \end{cases} $$ - + 1. under $f_{1}$ - + $$ V^{1}\left(\pi\right)=\begin{cases} \bar L_{0} & \text{if }\alpha\leq\pi,\\ @@ -639,7 +639,7 @@ in the first case, or $f_{1}$, in the second case. 0 & \text{if }\pi<\beta. \end{cases} $$ - + where $\pi^{\prime}=\frac{\pi f_{0}\left(z^{\prime}\right)}{\pi f_{0}\left(z^{\prime}\right)+\left(1-\pi\right)f_{1}\left(z^{\prime}\right)}$. @@ -1118,4 +1118,3 @@ plt.title('Uncond. distribution of log likelihood ratio at frequentist t') plt.show() ``` - diff --git a/lectures/odu.md b/lectures/odu.md index ec9d5db03..c9b4b175c 100644 --- a/lectures/odu.md +++ b/lectures/odu.md @@ -149,10 +149,10 @@ $$ -The worker's time $t$ subjective belief about the the distribution of $W_t$ is +The worker's time $t$ subjective belief about the the distribution of $W_t$ is $$ -\pi_t f + (1 - \pi_t) g, +\pi_t f + (1 - \pi_t) g, $$ where $\pi_t$ updates via @@ -427,10 +427,9 @@ def solve_model(sp, print(f"Error at iteration {i} is {error}.") v = v_new - if i == max_iter: + if error > tol: print("Failed to converge!") - - if verbose and i < max_iter: + elif verbose: print(f"\nConverged in {i} iterations.") @@ -731,10 +730,9 @@ def solve_wbar(sp, print(f"Error at iteration {i} is {error}.") w = w_new - if i == max_iter: + if error > tol: print("Failed to converge!") - - if verbose and i < max_iter: + elif verbose: print(f"\nConverged in {i} iterations.") return w_new @@ -1178,4 +1176,3 @@ after having acquired less information about the wage distribution. ```{code-cell} python3 job_search_example(1, 1, 3, 1.2, c=0.1) ``` - diff --git a/lectures/wald_friedman.md b/lectures/wald_friedman.md index b72952bfa..f99186d89 100644 --- a/lectures/wald_friedman.md +++ b/lectures/wald_friedman.md @@ -526,7 +526,7 @@ def solve_model(wf, tol=1e-4, max_iter=1000): i += 1 h = h_new - if i == max_iter: + if error > tol: print("Failed to converge!") return h_new @@ -902,11 +902,11 @@ Wald summarizes Neyman and Pearson's setup as follows: > Neyman and Pearson show that a region consisting of all samples > $(z_1, z_2, \ldots, z_n)$ which satisfy the inequality -> +> > $$ \frac{ f_1(z_1) \cdots f_1(z_n)}{f_0(z_1) \cdots f_0(z_n)} \geq k $$ -> +> > is a most powerful critical region for testing the hypothesis > $H_0$ against the alternative hypothesis $H_1$. The term > $k$ on the right side is a constant chosen so that the region