diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 95cac53af..3a1462850 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -name: Build Project [myst] +name: Build Project [using jupyter-book] on: [push] jobs: tests: @@ -15,21 +15,24 @@ jobs: python-version: 3.8 environment-file: environment.yml activate-environment: qe-lectures - - name: Install sphinxcontrib-tomyst - shell: bash -l {0} - run: | - git clone https://github.com/QuantEcon/sphinxcontrib-tomyst - cd sphinxcontrib-tomyst && python setup.py install - name: Install quantecon-book-theme shell: bash -l {0} run: | git clone https://github.com/QuantEcon/quantecon-book-theme cd quantecon-book-theme python setup.py install + cd ../ && rm -rf quantecon-book-theme + - name: Install sphinx-multitoc-numbering + shell: bash -l {0} + run: | + git clone https://github.com/executablebooks/sphinx-multitoc-numbering.git + cd sphinx-multitoc-numbering + python setup.py install + cd ../ && rm -rf sphinx-multitoc-numbering - name: Install Dependencies shell: bash -l {0} run: | - pip install myst-nb + pip install jupyter-book - name: Display Conda Environment Versions shell: bash -l {0} run: conda list @@ -39,8 +42,7 @@ jobs: - name: Build HTML shell: bash -l {0} run: | - pwd - make html + jb build lectures --path-output ./ - name: Preview Deploy to Netlify uses: nwtgck/actions-netlify@v1.1 with: diff --git a/Makefile b/Makefile deleted file mode 100644 index 2812a7dec..000000000 --- a/Makefile +++ /dev/null @@ -1,88 +0,0 @@ -SHELL := bash -# -# Makefile for Sphinx Extension Test Cases -# - -# You can set these variables from the command line. -SPHINXOPTS = -c "./" -P -SPHINXBUILD = python -msphinx -SPHINXPROJ = lecture-python-programming -SOURCEDIR = source/rst -BUILDDIR = _build -BUILDWEBSITE = _build/website -BUILDCOVERAGE = _build/coverage -BUILDPDF = _build/pdf -PORT = 8890 -FILES = -THEMEPATH = theme/minimal -TEMPLATEPATH = $(THEMEPATH)/templates - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(FILES) $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Install requiremenets for building lectures. -setup: - pip install -r requirements.txt - -preview: -ifeq (,$(filter $(target),website Website)) - cd $(BUILDWEBSITE)/jupyter_html && python -m http.server $(PORT) -else -ifdef lecture - cd $(BUILDDIR)/jupyter/ && jupyter notebook --port $(PORT) --port-retries=0 $(basename $(lecture)).ipynb -else - cd $(BUILDDIR)/jupyter/ && jupyter notebook --port $(PORT) --port-retries=0 -endif -endif - -clean-coverage: - rm -rf $(BUILDCOVERAGE) - -clean-website: - rm -rf $(BUILDWEBSITE) - -clean-pdf: - rm -rf $(BUILDDIR)/jupyterpdf - -coverage: -ifneq ($(strip $(parallel)),) - @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDCOVERAGE)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_make_coverage=1 -D jupyter_execute_notebooks=1 -D jupyter_ignore_skip_test=0 -D jupyter_theme_path="$(THEMEPATH)" -D jupyter_template_path="$(TEMPLATEPATH)" -D jupyter_template_coverage_file_path="error_report_template.html" -D jupyter_number_workers=$(parallel) -else - @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDCOVERAGE)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_make_coverage=1 -D jupyter_execute_notebooks=1 -D jupyter_ignore_skip_test=0 -D jupyter_theme_path="$(THEMEPATH)" -D jupyter_template_path="$(TEMPLATEPATH)" -D jupyter_template_coverage_file_path="error_report_template.html" -endif - -website: - echo "Theme: $(THEMEPATH)" -ifneq ($(strip $(parallel)),) - @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDWEBSITE)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_make_site=1 -D jupyter_generate_html=1 -D jupyter_download_nb=1 -D jupyter_execute_notebooks=1 -D jupyter_target_html=1 -D jupyter_download_nb_image_urlpath="https://s3-ap-southeast-2.amazonaws.com/python-programming.quantecon.org/_static/" -D jupyter_images_markdown=0 -D jupyter_theme_path="$(THEMEPATH)" -D jupyter_template_path="$(TEMPLATEPATH)" -D jupyter_html_template="html.tpl" -D jupyter_download_nb_urlpath="https://python-programming.quantecon.org/" -D jupyter_coverage_dir=$(BUILDCOVERAGE) -D jupyter_number_workers=$(parallel) - -else - @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDWEBSITE)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_make_site=1 -D jupyter_generate_html=1 -D jupyter_download_nb=1 -D jupyter_execute_notebooks=1 -D jupyter_target_html=1 -D jupyter_download_nb_image_urlpath="https://s3-ap-southeast-2.amazonaws.com/python-programming.quantecon.org/_static/" -D jupyter_images_markdown=0 -D jupyter_theme_path="$(THEMEPATH)" -D jupyter_template_path="$(TEMPLATEPATH)" -D jupyter_html_template="html.tpl" -D jupyter_download_nb_urlpath="https://python-programming.quantecon.org/" -D jupyter_coverage_dir=$(BUILDCOVERAGE) -endif - -pdf: -ifneq ($(strip $(parallel)),) - @$(SPHINXBUILD) -M jupyterpdf "$(SOURCEDIR)" "$(BUILDDIR)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_latex_template="latex.tpl" -D jupyter_theme_path="$(THEMEPATH)" -D jupyter_template_path="$(TEMPLATEPATH)" -D jupyter_latex_template_book="latex_book.tpl" -D jupyter_images_markdown=1 -D jupyter_execute_notebooks=1 -D jupyter_pdf_book=1 -D jupyter_target_pdf=1 -D jupyter_number_workers=$(parallel) - -else - @$(SPHINXBUILD) -M jupyterpdf "$(SOURCEDIR)" "$(BUILDDIR)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_theme_path="$(THEMEPATH)" -D jupyter_template_path="$(TEMPLATEPATH)" -D jupyter_latex_template="latex.tpl" -D jupyter_latex_template_book="latex_book.tpl" -D jupyter_images_markdown=1 -D jupyter_execute_notebooks=1 -D jupyter_pdf_book=1 -D jupyter_target_pdf=1 -endif - -constructor-pdf: -ifneq ($(strip $(parallel)),) - @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDPDF)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_images_markdown=1 -D jupyter_execute_notebooks=1 -D jupyter_number_workers=$(parallel) - -else - @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDPDF)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_images_markdown=1 -D jupyter_execute_notebooks=1 -endif - -notebooks: - make jupyter - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_allow_html_only=1 diff --git a/conf.py b/conf.py deleted file mode 100644 index 698812814..000000000 --- a/conf.py +++ /dev/null @@ -1,371 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# -# QuantEcon.lectures-python documentation build configuration file, created by -# sphinx-quickstart on Mon Feb 13 14:28:35 2017. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import nbformat -import datetime - -now = datetime.datetime.now() - - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = '1.5' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.mathjax', - 'sphinxcontrib.bibtex', - 'IPython.sphinxext.ipython_console_highlighting', - # Custom Sphinx Extensions - 'myst_nb' -] - -# Retired Extensions but may be useful in Future - - # 'matplotlib.sphinxext.plot_directive', - # 'matplotlib.sphinxext.only_directives', - # 'sphinxcontrib.tikz', - # 'sphinx.ext.graphviz', - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' -master_pdf_doc = 'index' - -# General information about the project. -project = 'QuantEcon.lectures-python3' -copyright = '2020, Thomas J. Sargent and John Stachurski' -author = 'Thomas J. Sargent and John Stachurski' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '%s-%s-%s' % (now.year, now.strftime("%b"), now.day) -# The full version, including alpha/beta/rc tags. -release = version - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build', '_static'] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - -# Add rst prolog -rst_prolog = """ -.. highlight:: python3 -""" - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'quantecon_book_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -html_theme_path = ['_themes'] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -html_title = "Quantitative Economics with Python" - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -#html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = { -# 'index': ['py_layout.html'], -# } - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' -#html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'QuantEconlectures-python3doc' - -# Tikz HTML configuration for rendering images -tikz_latex_preamble = r""" - \usetikzlibrary{arrows} - \usetikzlibrary{calc} - \usetikzlibrary{intersections} - \usetikzlibrary{decorations} - \usetikzlibrary{decorations.pathreplacing} -""" - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -'pointsize': '11pt', - -# Additional stuff for the LaTeX preamble. -'preamble': r""" -\usepackage{amsmath, amssymb} -\usepackage{mathrsfs} - -\usepackage{tikz} -\usetikzlibrary{arrows} -\usetikzlibrary{calc} -\usetikzlibrary{intersections} -\usetikzlibrary{decorations} -\usepackage{pgf} -\usepackage{pgfplots} - - -\usepackage{bbm} -\newcommand{\RR}{\mathbbm R} -\newcommand{\NN}{\mathbbm N} -\newcommand{\PP}{\mathbbm P} -\newcommand{\EE}{\mathbbm E \,} -\newcommand{\XX}{\mathbbm X} -\newcommand{\ZZ}{\mathbbm Z} -\newcommand{\QQ}{\mathbbm Q} - -\newcommand{\fF}{\mathcal F} -\newcommand{\dD}{\mathcal D} -\newcommand{\lL}{\mathcal L} -\newcommand{\gG}{\mathcal G} -\newcommand{\hH}{\mathcal H} -\newcommand{\nN}{\mathcal N} -\newcommand{\pP}{\mathcal P} - -\DeclareMathOperator{\trace}{trace} -\DeclareMathOperator{\Var}{Var} -\DeclareMathOperator{\Span}{span} -\DeclareMathOperator{\proj}{proj} -\DeclareMathOperator{\col}{col} -\DeclareMathOperator*{\argmax}{arg\,max} -\DeclareMathOperator*{\argmin}{arg\,min} - -\usepackage{makeidx} -\makeindex -""", - -# Latex figure (float) alignment (Could use 'H' to force the placement of figures) -'figure_align': 'H',#'htbp', - -#Add Frontmatter before TOC -'tableofcontents' : r"""\newpage -\thispagestyle{empty} -\chapter*{Preface} -\large -This \textbf{pdf} presents a series of lectures on quantitative economic -modeling, designed and written by \href{http://www.tomsargent.com/}{Thomas J. Sargent} and \href{http://johnstachurski.net}{John Stachurski}. -The primary programming languages are \href{https://www.python.org}{Python} and \href{http://julialang.org/}{Julia}. -You can send feedback to the authors via contact@quantecon.org. - -\vspace{5em} - -\begin{leftbar} -\textbf{Note: You are currently viewing an automatically generated -pdf version of our online lectures,} which are located at - -\vspace{2em} - -\begin{center} - \texttt{https://lectures.quantecon.org} -\end{center} - -\vspace{2em} - -Please visit the website for more information on the aims and scope of the -lectures and the two language options (Julia or Python). - -\vspace{1em} - -Due to automatic generation of this pdf, \textbf{presentation quality is likely -to be lower than that of the website}. - -\end{leftbar} - -\normalsize - -\sphinxtableofcontents -""" -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_pdf_doc, 'QuantEconlectures-python3.tex', 'QuantEcon.lectures-python3 PDF', - 'Thomas J. Sargent and John Stachurski', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - -# ------------------ -# Linkcheck Options -# ------------------ - -linkcheck_ignore = [r'https:\/\/github\.com\/.*?#.*', #Anchors on Github seem to create issues with linkchecker - r'https://economics.mit.edu/faculty/acemoglu/data/ajr2001', - r'https://economics.mit.edu/files/4123'] - -linkcheck_timeout = 30 - diff --git a/lectures/_config.yml b/lectures/_config.yml new file mode 100644 index 000000000..78b5f9397 --- /dev/null +++ b/lectures/_config.yml @@ -0,0 +1,14 @@ +title: Quantitative Economics with Python +author: Thomas J. Sargent & John Stachurski +logo: _static/qe-logo-large.png + +execute: + execute_notebooks: "cache" + timeout: 60 +# run_in_temp: true + +sphinx: + extra_extensions: [sphinx_multitoc_numbering] + config: + html_theme: quantecon_book_theme + html_static_path: ['_static'] \ No newline at end of file diff --git a/source/rst/_static/includes/header.raw b/lectures/_static/includes/header.raw similarity index 100% rename from source/rst/_static/includes/header.raw rename to lectures/_static/includes/header.raw diff --git a/source/rst/_static/includes/lecture_howto_py.raw b/lectures/_static/includes/lecture_howto_py.raw similarity index 100% rename from source/rst/_static/includes/lecture_howto_py.raw rename to lectures/_static/includes/lecture_howto_py.raw diff --git a/source/rst/_static/lecture_specific/aiyagari/aiyagari_obit.pdf b/lectures/_static/lecture_specific/aiyagari/aiyagari_obit.pdf similarity index 100% rename from source/rst/_static/lecture_specific/aiyagari/aiyagari_obit.pdf rename to lectures/_static/lecture_specific/aiyagari/aiyagari_obit.pdf diff --git a/source/rst/_static/lecture_specific/arellano/arellano_bond_prices.png b/lectures/_static/lecture_specific/arellano/arellano_bond_prices.png similarity index 100% rename from source/rst/_static/lecture_specific/arellano/arellano_bond_prices.png rename to lectures/_static/lecture_specific/arellano/arellano_bond_prices.png diff --git a/source/rst/_static/lecture_specific/arellano/arellano_bond_prices_2.png b/lectures/_static/lecture_specific/arellano/arellano_bond_prices_2.png similarity index 100% rename from source/rst/_static/lecture_specific/arellano/arellano_bond_prices_2.png rename to lectures/_static/lecture_specific/arellano/arellano_bond_prices_2.png diff --git a/source/rst/_static/lecture_specific/arellano/arellano_default_probs.png b/lectures/_static/lecture_specific/arellano/arellano_default_probs.png similarity index 100% rename from source/rst/_static/lecture_specific/arellano/arellano_default_probs.png rename to lectures/_static/lecture_specific/arellano/arellano_default_probs.png diff --git a/source/rst/_static/lecture_specific/arellano/arellano_time_series.png b/lectures/_static/lecture_specific/arellano/arellano_time_series.png similarity index 100% rename from source/rst/_static/lecture_specific/arellano/arellano_time_series.png rename to lectures/_static/lecture_specific/arellano/arellano_time_series.png diff --git a/source/rst/_static/lecture_specific/arellano/arellano_value_funcs.png b/lectures/_static/lecture_specific/arellano/arellano_value_funcs.png similarity index 100% rename from source/rst/_static/lecture_specific/arellano/arellano_value_funcs.png rename to lectures/_static/lecture_specific/arellano/arellano_value_funcs.png diff --git a/source/rst/_static/lecture_specific/cake_eating_numerical/analytical.py b/lectures/_static/lecture_specific/cake_eating_numerical/analytical.py similarity index 100% rename from source/rst/_static/lecture_specific/cake_eating_numerical/analytical.py rename to lectures/_static/lecture_specific/cake_eating_numerical/analytical.py diff --git a/source/rst/_static/lecture_specific/career/career_solutions_ex1_py.png b/lectures/_static/lecture_specific/career/career_solutions_ex1_py.png similarity index 100% rename from source/rst/_static/lecture_specific/career/career_solutions_ex1_py.png rename to lectures/_static/lecture_specific/career/career_solutions_ex1_py.png diff --git a/source/rst/_static/lecture_specific/coleman_policy_iter/solve_time_iter.py b/lectures/_static/lecture_specific/coleman_policy_iter/solve_time_iter.py similarity index 100% rename from source/rst/_static/lecture_specific/coleman_policy_iter/solve_time_iter.py rename to lectures/_static/lecture_specific/coleman_policy_iter/solve_time_iter.py diff --git a/source/rst/_static/lecture_specific/finite_markov/hamilton_graph.png b/lectures/_static/lecture_specific/finite_markov/hamilton_graph.png similarity index 100% rename from source/rst/_static/lecture_specific/finite_markov/hamilton_graph.png rename to lectures/_static/lecture_specific/finite_markov/hamilton_graph.png diff --git a/source/rst/_static/lecture_specific/finite_markov/mc_aperiodicity1.gv b/lectures/_static/lecture_specific/finite_markov/mc_aperiodicity1.gv similarity index 100% rename from source/rst/_static/lecture_specific/finite_markov/mc_aperiodicity1.gv rename to lectures/_static/lecture_specific/finite_markov/mc_aperiodicity1.gv diff --git a/source/rst/_static/lecture_specific/finite_markov/mc_aperiodicity1.png b/lectures/_static/lecture_specific/finite_markov/mc_aperiodicity1.png similarity index 100% rename from source/rst/_static/lecture_specific/finite_markov/mc_aperiodicity1.png rename to lectures/_static/lecture_specific/finite_markov/mc_aperiodicity1.png diff --git a/source/rst/_static/lecture_specific/finite_markov/mc_aperiodicity2.gv b/lectures/_static/lecture_specific/finite_markov/mc_aperiodicity2.gv similarity index 100% rename from source/rst/_static/lecture_specific/finite_markov/mc_aperiodicity2.gv rename to lectures/_static/lecture_specific/finite_markov/mc_aperiodicity2.gv diff --git a/source/rst/_static/lecture_specific/finite_markov/mc_aperiodicity2.png b/lectures/_static/lecture_specific/finite_markov/mc_aperiodicity2.png similarity index 100% rename from source/rst/_static/lecture_specific/finite_markov/mc_aperiodicity2.png rename to lectures/_static/lecture_specific/finite_markov/mc_aperiodicity2.png diff --git a/source/rst/_static/lecture_specific/finite_markov/mc_ex1_plot.png b/lectures/_static/lecture_specific/finite_markov/mc_ex1_plot.png similarity index 100% rename from source/rst/_static/lecture_specific/finite_markov/mc_ex1_plot.png rename to lectures/_static/lecture_specific/finite_markov/mc_ex1_plot.png diff --git a/source/rst/_static/lecture_specific/finite_markov/mc_irreducibility1.gv b/lectures/_static/lecture_specific/finite_markov/mc_irreducibility1.gv similarity index 100% rename from source/rst/_static/lecture_specific/finite_markov/mc_irreducibility1.gv rename to lectures/_static/lecture_specific/finite_markov/mc_irreducibility1.gv diff --git a/source/rst/_static/lecture_specific/finite_markov/mc_irreducibility1.png b/lectures/_static/lecture_specific/finite_markov/mc_irreducibility1.png similarity index 100% rename from source/rst/_static/lecture_specific/finite_markov/mc_irreducibility1.png rename to lectures/_static/lecture_specific/finite_markov/mc_irreducibility1.png diff --git a/source/rst/_static/lecture_specific/finite_markov/mc_irreducibility2.gv b/lectures/_static/lecture_specific/finite_markov/mc_irreducibility2.gv similarity index 100% rename from source/rst/_static/lecture_specific/finite_markov/mc_irreducibility2.gv rename to lectures/_static/lecture_specific/finite_markov/mc_irreducibility2.gv diff --git a/source/rst/_static/lecture_specific/finite_markov/mc_irreducibility2.png b/lectures/_static/lecture_specific/finite_markov/mc_irreducibility2.png similarity index 100% rename from source/rst/_static/lecture_specific/finite_markov/mc_irreducibility2.png rename to lectures/_static/lecture_specific/finite_markov/mc_irreducibility2.png diff --git a/source/rst/_static/lecture_specific/finite_markov/web_graph.png b/lectures/_static/lecture_specific/finite_markov/web_graph.png similarity index 100% rename from source/rst/_static/lecture_specific/finite_markov/web_graph.png rename to lectures/_static/lecture_specific/finite_markov/web_graph.png diff --git a/source/rst/_static/lecture_specific/finite_markov/web_graph_data.txt b/lectures/_static/lecture_specific/finite_markov/web_graph_data.txt similarity index 100% rename from source/rst/_static/lecture_specific/finite_markov/web_graph_data.txt rename to lectures/_static/lecture_specific/finite_markov/web_graph_data.txt diff --git a/source/rst/_static/lecture_specific/heavy_tails/light_heavy_fig1.png b/lectures/_static/lecture_specific/heavy_tails/light_heavy_fig1.png similarity index 100% rename from source/rst/_static/lecture_specific/heavy_tails/light_heavy_fig1.png rename to lectures/_static/lecture_specific/heavy_tails/light_heavy_fig1.png diff --git a/source/rst/_static/lecture_specific/heavy_tails/rank_size_fig1.png b/lectures/_static/lecture_specific/heavy_tails/rank_size_fig1.png similarity index 100% rename from source/rst/_static/lecture_specific/heavy_tails/rank_size_fig1.png rename to lectures/_static/lecture_specific/heavy_tails/rank_size_fig1.png diff --git a/source/rst/_static/lecture_specific/ifp/ifp_agg_savings.png b/lectures/_static/lecture_specific/ifp/ifp_agg_savings.png similarity index 100% rename from source/rst/_static/lecture_specific/ifp/ifp_agg_savings.png rename to lectures/_static/lecture_specific/ifp/ifp_agg_savings.png diff --git a/source/rst/_static/lecture_specific/ifp/ifp_histogram.png b/lectures/_static/lecture_specific/ifp/ifp_histogram.png similarity index 100% rename from source/rst/_static/lecture_specific/ifp/ifp_histogram.png rename to lectures/_static/lecture_specific/ifp/ifp_histogram.png diff --git a/source/rst/_static/lecture_specific/ifp/ifp_policies.png b/lectures/_static/lecture_specific/ifp/ifp_policies.png similarity index 100% rename from source/rst/_static/lecture_specific/ifp/ifp_policies.png rename to lectures/_static/lecture_specific/ifp/ifp_policies.png diff --git a/source/rst/_static/lecture_specific/ifp/pi2.pdf b/lectures/_static/lecture_specific/ifp/pi2.pdf similarity index 100% rename from source/rst/_static/lecture_specific/ifp/pi2.pdf rename to lectures/_static/lecture_specific/ifp/pi2.pdf diff --git a/source/rst/_static/lecture_specific/kalman/kalman_ex3.png b/lectures/_static/lecture_specific/kalman/kalman_ex3.png similarity index 100% rename from source/rst/_static/lecture_specific/kalman/kalman_ex3.png rename to lectures/_static/lecture_specific/kalman/kalman_ex3.png diff --git a/source/rst/_static/lecture_specific/kalman/kl_ex1_fig.png b/lectures/_static/lecture_specific/kalman/kl_ex1_fig.png similarity index 100% rename from source/rst/_static/lecture_specific/kalman/kl_ex1_fig.png rename to lectures/_static/lecture_specific/kalman/kl_ex1_fig.png diff --git a/source/rst/_static/lecture_specific/kalman/kl_ex2_fig.png b/lectures/_static/lecture_specific/kalman/kl_ex2_fig.png similarity index 100% rename from source/rst/_static/lecture_specific/kalman/kl_ex2_fig.png rename to lectures/_static/lecture_specific/kalman/kl_ex2_fig.png diff --git a/source/rst/_static/lecture_specific/lake_model/lake_distribution_wages.png b/lectures/_static/lecture_specific/lake_model/lake_distribution_wages.png similarity index 100% rename from source/rst/_static/lecture_specific/lake_model/lake_distribution_wages.png rename to lectures/_static/lecture_specific/lake_model/lake_distribution_wages.png diff --git a/source/rst/_static/lecture_specific/linear_algebra/course_notes.pdf b/lectures/_static/lecture_specific/linear_algebra/course_notes.pdf similarity index 100% rename from source/rst/_static/lecture_specific/linear_algebra/course_notes.pdf rename to lectures/_static/lecture_specific/linear_algebra/course_notes.pdf diff --git a/source/rst/_static/lecture_specific/linear_models/covariance_stationary.png b/lectures/_static/lecture_specific/linear_models/covariance_stationary.png similarity index 100% rename from source/rst/_static/lecture_specific/linear_models/covariance_stationary.png rename to lectures/_static/lecture_specific/linear_models/covariance_stationary.png diff --git a/source/rst/_static/lecture_specific/linear_models/ensemble_mean.png b/lectures/_static/lecture_specific/linear_models/ensemble_mean.png similarity index 100% rename from source/rst/_static/lecture_specific/linear_models/ensemble_mean.png rename to lectures/_static/lecture_specific/linear_models/ensemble_mean.png diff --git a/source/rst/_static/lecture_specific/linear_models/iteration_notes.pdf b/lectures/_static/lecture_specific/linear_models/iteration_notes.pdf similarity index 100% rename from source/rst/_static/lecture_specific/linear_models/iteration_notes.pdf rename to lectures/_static/lecture_specific/linear_models/iteration_notes.pdf diff --git a/source/rst/_static/lecture_specific/linear_models/paths_and_stationarity.png b/lectures/_static/lecture_specific/linear_models/paths_and_stationarity.png similarity index 100% rename from source/rst/_static/lecture_specific/linear_models/paths_and_stationarity.png rename to lectures/_static/lecture_specific/linear_models/paths_and_stationarity.png diff --git a/source/rst/_static/lecture_specific/linear_models/solution_lss_ex1.png b/lectures/_static/lecture_specific/linear_models/solution_lss_ex1.png similarity index 100% rename from source/rst/_static/lecture_specific/linear_models/solution_lss_ex1.png rename to lectures/_static/lecture_specific/linear_models/solution_lss_ex1.png diff --git a/source/rst/_static/lecture_specific/linear_models/solution_lss_ex2.png b/lectures/_static/lecture_specific/linear_models/solution_lss_ex2.png similarity index 100% rename from source/rst/_static/lecture_specific/linear_models/solution_lss_ex2.png rename to lectures/_static/lecture_specific/linear_models/solution_lss_ex2.png diff --git a/source/rst/_static/lecture_specific/linear_models/tsh.png b/lectures/_static/lecture_specific/linear_models/tsh.png similarity index 100% rename from source/rst/_static/lecture_specific/linear_models/tsh.png rename to lectures/_static/lecture_specific/linear_models/tsh.png diff --git a/source/rst/_static/lecture_specific/linear_models/tsh0.png b/lectures/_static/lecture_specific/linear_models/tsh0.png similarity index 100% rename from source/rst/_static/lecture_specific/linear_models/tsh0.png rename to lectures/_static/lecture_specific/linear_models/tsh0.png diff --git a/source/rst/_static/lecture_specific/linear_models/tsh_hg.png b/lectures/_static/lecture_specific/linear_models/tsh_hg.png similarity index 100% rename from source/rst/_static/lecture_specific/linear_models/tsh_hg.png rename to lectures/_static/lecture_specific/linear_models/tsh_hg.png diff --git a/source/rst/_static/lecture_specific/lqcontrol/solution_lqc_ex1.png b/lectures/_static/lecture_specific/lqcontrol/solution_lqc_ex1.png similarity index 100% rename from source/rst/_static/lecture_specific/lqcontrol/solution_lqc_ex1.png rename to lectures/_static/lecture_specific/lqcontrol/solution_lqc_ex1.png diff --git a/source/rst/_static/lecture_specific/lqcontrol/solution_lqc_ex2.png b/lectures/_static/lecture_specific/lqcontrol/solution_lqc_ex2.png similarity index 100% rename from source/rst/_static/lecture_specific/lqcontrol/solution_lqc_ex2.png rename to lectures/_static/lecture_specific/lqcontrol/solution_lqc_ex2.png diff --git a/source/rst/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g1.png b/lectures/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g1.png similarity index 100% rename from source/rst/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g1.png rename to lectures/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g1.png diff --git a/source/rst/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g10.png b/lectures/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g10.png similarity index 100% rename from source/rst/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g10.png rename to lectures/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g10.png diff --git a/source/rst/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g50.png b/lectures/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g50.png similarity index 100% rename from source/rst/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g50.png rename to lectures/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g50.png diff --git a/source/rst/_static/lecture_specific/markov_perf/duopoly_mpe.py b/lectures/_static/lecture_specific/markov_perf/duopoly_mpe.py similarity index 100% rename from source/rst/_static/lecture_specific/markov_perf/duopoly_mpe.py rename to lectures/_static/lecture_specific/markov_perf/duopoly_mpe.py diff --git a/source/rst/_static/lecture_specific/markov_perf/judd_fig1.png b/lectures/_static/lecture_specific/markov_perf/judd_fig1.png similarity index 100% rename from source/rst/_static/lecture_specific/markov_perf/judd_fig1.png rename to lectures/_static/lecture_specific/markov_perf/judd_fig1.png diff --git a/source/rst/_static/lecture_specific/markov_perf/judd_fig2.png b/lectures/_static/lecture_specific/markov_perf/judd_fig2.png similarity index 100% rename from source/rst/_static/lecture_specific/markov_perf/judd_fig2.png rename to lectures/_static/lecture_specific/markov_perf/judd_fig2.png diff --git a/source/rst/_static/lecture_specific/markov_perf/mpe_vs_monopolist.png b/lectures/_static/lecture_specific/markov_perf/mpe_vs_monopolist.png similarity index 100% rename from source/rst/_static/lecture_specific/markov_perf/mpe_vs_monopolist.png rename to lectures/_static/lecture_specific/markov_perf/mpe_vs_monopolist.png diff --git a/source/rst/_static/lecture_specific/mccall/mccall_resw_alpha.py b/lectures/_static/lecture_specific/mccall/mccall_resw_alpha.py similarity index 100% rename from source/rst/_static/lecture_specific/mccall/mccall_resw_alpha.py rename to lectures/_static/lecture_specific/mccall/mccall_resw_alpha.py diff --git a/source/rst/_static/lecture_specific/mccall/mccall_resw_beta.py b/lectures/_static/lecture_specific/mccall/mccall_resw_beta.py similarity index 100% rename from source/rst/_static/lecture_specific/mccall/mccall_resw_beta.py rename to lectures/_static/lecture_specific/mccall/mccall_resw_beta.py diff --git a/source/rst/_static/lecture_specific/mccall/mccall_resw_c.py b/lectures/_static/lecture_specific/mccall/mccall_resw_c.py similarity index 100% rename from source/rst/_static/lecture_specific/mccall/mccall_resw_c.py rename to lectures/_static/lecture_specific/mccall/mccall_resw_c.py diff --git a/source/rst/_static/lecture_specific/mccall/mccall_resw_gamma.py b/lectures/_static/lecture_specific/mccall/mccall_resw_gamma.py similarity index 100% rename from source/rst/_static/lecture_specific/mccall/mccall_resw_gamma.py rename to lectures/_static/lecture_specific/mccall/mccall_resw_gamma.py diff --git a/source/rst/_static/lecture_specific/mccall/mccall_vf_plot1.py b/lectures/_static/lecture_specific/mccall/mccall_vf_plot1.py similarity index 100% rename from source/rst/_static/lecture_specific/mccall/mccall_vf_plot1.py rename to lectures/_static/lecture_specific/mccall/mccall_vf_plot1.py diff --git a/source/rst/_static/lecture_specific/mccall_model_with_separation/mccall_resw_alpha.png b/lectures/_static/lecture_specific/mccall_model_with_separation/mccall_resw_alpha.png similarity index 100% rename from source/rst/_static/lecture_specific/mccall_model_with_separation/mccall_resw_alpha.png rename to lectures/_static/lecture_specific/mccall_model_with_separation/mccall_resw_alpha.png diff --git a/source/rst/_static/lecture_specific/mccall_model_with_separation/mccall_resw_beta.png b/lectures/_static/lecture_specific/mccall_model_with_separation/mccall_resw_beta.png similarity index 100% rename from source/rst/_static/lecture_specific/mccall_model_with_separation/mccall_resw_beta.png rename to lectures/_static/lecture_specific/mccall_model_with_separation/mccall_resw_beta.png diff --git a/source/rst/_static/lecture_specific/mccall_model_with_separation/mccall_resw_c.png b/lectures/_static/lecture_specific/mccall_model_with_separation/mccall_resw_c.png similarity index 100% rename from source/rst/_static/lecture_specific/mccall_model_with_separation/mccall_resw_c.png rename to lectures/_static/lecture_specific/mccall_model_with_separation/mccall_resw_c.png diff --git a/source/rst/_static/lecture_specific/mle/fp.dta b/lectures/_static/lecture_specific/mle/fp.dta similarity index 100% rename from source/rst/_static/lecture_specific/mle/fp.dta rename to lectures/_static/lecture_specific/mle/fp.dta diff --git a/source/rst/_static/lecture_specific/odu/odu.py b/lectures/_static/lecture_specific/odu/odu.py similarity index 100% rename from source/rst/_static/lecture_specific/odu/odu.py rename to lectures/_static/lecture_specific/odu/odu.py diff --git a/source/rst/_static/lecture_specific/ols/maketable1.dta b/lectures/_static/lecture_specific/ols/maketable1.dta similarity index 100% rename from source/rst/_static/lecture_specific/ols/maketable1.dta rename to lectures/_static/lecture_specific/ols/maketable1.dta diff --git a/source/rst/_static/lecture_specific/ols/maketable2.dta b/lectures/_static/lecture_specific/ols/maketable2.dta similarity index 100% rename from source/rst/_static/lecture_specific/ols/maketable2.dta rename to lectures/_static/lecture_specific/ols/maketable2.dta diff --git a/source/rst/_static/lecture_specific/ols/maketable4.dta b/lectures/_static/lecture_specific/ols/maketable4.dta similarity index 100% rename from source/rst/_static/lecture_specific/ols/maketable4.dta rename to lectures/_static/lecture_specific/ols/maketable4.dta diff --git a/source/rst/_static/lecture_specific/optgrowth/3ndp.pdf b/lectures/_static/lecture_specific/optgrowth/3ndp.pdf similarity index 100% rename from source/rst/_static/lecture_specific/optgrowth/3ndp.pdf rename to lectures/_static/lecture_specific/optgrowth/3ndp.pdf diff --git a/source/rst/_static/lecture_specific/optgrowth/bellman_operator.py b/lectures/_static/lecture_specific/optgrowth/bellman_operator.py similarity index 100% rename from source/rst/_static/lecture_specific/optgrowth/bellman_operator.py rename to lectures/_static/lecture_specific/optgrowth/bellman_operator.py diff --git a/source/rst/_static/lecture_specific/optgrowth/cd_analytical.py b/lectures/_static/lecture_specific/optgrowth/cd_analytical.py similarity index 100% rename from source/rst/_static/lecture_specific/optgrowth/cd_analytical.py rename to lectures/_static/lecture_specific/optgrowth/cd_analytical.py diff --git a/source/rst/_static/lecture_specific/optgrowth/solution_og_ex2.png b/lectures/_static/lecture_specific/optgrowth/solution_og_ex2.png similarity index 100% rename from source/rst/_static/lecture_specific/optgrowth/solution_og_ex2.png rename to lectures/_static/lecture_specific/optgrowth/solution_og_ex2.png diff --git a/source/rst/_static/lecture_specific/optgrowth/solve_model.py b/lectures/_static/lecture_specific/optgrowth/solve_model.py similarity index 100% rename from source/rst/_static/lecture_specific/optgrowth/solve_model.py rename to lectures/_static/lecture_specific/optgrowth/solve_model.py diff --git a/source/rst/_static/lecture_specific/optgrowth_fast/ogm.py b/lectures/_static/lecture_specific/optgrowth_fast/ogm.py similarity index 100% rename from source/rst/_static/lecture_specific/optgrowth_fast/ogm.py rename to lectures/_static/lecture_specific/optgrowth_fast/ogm.py diff --git a/source/rst/_static/lecture_specific/optgrowth_fast/ogm_crra.py b/lectures/_static/lecture_specific/optgrowth_fast/ogm_crra.py similarity index 100% rename from source/rst/_static/lecture_specific/optgrowth_fast/ogm_crra.py rename to lectures/_static/lecture_specific/optgrowth_fast/ogm_crra.py diff --git a/source/rst/_static/lecture_specific/pandas_panel/countries.csv b/lectures/_static/lecture_specific/pandas_panel/countries.csv similarity index 100% rename from source/rst/_static/lecture_specific/pandas_panel/countries.csv rename to lectures/_static/lecture_specific/pandas_panel/countries.csv diff --git a/source/rst/_static/lecture_specific/pandas_panel/employ.csv b/lectures/_static/lecture_specific/pandas_panel/employ.csv similarity index 100% rename from source/rst/_static/lecture_specific/pandas_panel/employ.csv rename to lectures/_static/lecture_specific/pandas_panel/employ.csv diff --git a/source/rst/_static/lecture_specific/pandas_panel/realwage.csv b/lectures/_static/lecture_specific/pandas_panel/realwage.csv similarity index 100% rename from source/rst/_static/lecture_specific/pandas_panel/realwage.csv rename to lectures/_static/lecture_specific/pandas_panel/realwage.csv diff --git a/source/rst/_static/lecture_specific/pandas_panel/venn_diag.png b/lectures/_static/lecture_specific/pandas_panel/venn_diag.png similarity index 100% rename from source/rst/_static/lecture_specific/pandas_panel/venn_diag.png rename to lectures/_static/lecture_specific/pandas_panel/venn_diag.png diff --git a/source/rst/_static/lecture_specific/perm_income/perm_inc_ir.py b/lectures/_static/lecture_specific/perm_income/perm_inc_ir.py similarity index 100% rename from source/rst/_static/lecture_specific/perm_income/perm_inc_ir.py rename to lectures/_static/lecture_specific/perm_income/perm_inc_ir.py diff --git a/source/rst/_static/lecture_specific/schelling/schelling_fig1.png b/lectures/_static/lecture_specific/schelling/schelling_fig1.png similarity index 100% rename from source/rst/_static/lecture_specific/schelling/schelling_fig1.png rename to lectures/_static/lecture_specific/schelling/schelling_fig1.png diff --git a/source/rst/_static/lecture_specific/schelling/schelling_fig2.png b/lectures/_static/lecture_specific/schelling/schelling_fig2.png similarity index 100% rename from source/rst/_static/lecture_specific/schelling/schelling_fig2.png rename to lectures/_static/lecture_specific/schelling/schelling_fig2.png diff --git a/source/rst/_static/lecture_specific/schelling/schelling_fig3.png b/lectures/_static/lecture_specific/schelling/schelling_fig3.png similarity index 100% rename from source/rst/_static/lecture_specific/schelling/schelling_fig3.png rename to lectures/_static/lecture_specific/schelling/schelling_fig3.png diff --git a/source/rst/_static/lecture_specific/schelling/schelling_fig4.png b/lectures/_static/lecture_specific/schelling/schelling_fig4.png similarity index 100% rename from source/rst/_static/lecture_specific/schelling/schelling_fig4.png rename to lectures/_static/lecture_specific/schelling/schelling_fig4.png diff --git a/source/rst/_static/lecture_specific/short_path/Graph-networkx2.ipynb b/lectures/_static/lecture_specific/short_path/Graph-networkx2.ipynb similarity index 100% rename from source/rst/_static/lecture_specific/short_path/Graph-networkx2.ipynb rename to lectures/_static/lecture_specific/short_path/Graph-networkx2.ipynb diff --git a/source/rst/_static/lecture_specific/short_path/graph.png b/lectures/_static/lecture_specific/short_path/graph.png similarity index 100% rename from source/rst/_static/lecture_specific/short_path/graph.png rename to lectures/_static/lecture_specific/short_path/graph.png diff --git a/source/rst/_static/lecture_specific/short_path/graph2.png b/lectures/_static/lecture_specific/short_path/graph2.png similarity index 100% rename from source/rst/_static/lecture_specific/short_path/graph2.png rename to lectures/_static/lecture_specific/short_path/graph2.png diff --git a/source/rst/_static/lecture_specific/short_path/graph3.png b/lectures/_static/lecture_specific/short_path/graph3.png similarity index 100% rename from source/rst/_static/lecture_specific/short_path/graph3.png rename to lectures/_static/lecture_specific/short_path/graph3.png diff --git a/source/rst/_static/lecture_specific/short_path/graph4.png b/lectures/_static/lecture_specific/short_path/graph4.png similarity index 100% rename from source/rst/_static/lecture_specific/short_path/graph4.png rename to lectures/_static/lecture_specific/short_path/graph4.png diff --git a/source/rst/_static/lecture_specific/troubleshooting/launch.png b/lectures/_static/lecture_specific/troubleshooting/launch.png similarity index 100% rename from source/rst/_static/lecture_specific/troubleshooting/launch.png rename to lectures/_static/lecture_specific/troubleshooting/launch.png diff --git a/source/rst/_static/lecture_specific/uncertainty_traps/uncertainty_traps_45.png b/lectures/_static/lecture_specific/uncertainty_traps/uncertainty_traps_45.png similarity index 100% rename from source/rst/_static/lecture_specific/uncertainty_traps/uncertainty_traps_45.png rename to lectures/_static/lecture_specific/uncertainty_traps/uncertainty_traps_45.png diff --git a/source/rst/_static/lecture_specific/uncertainty_traps/uncertainty_traps_mu.png b/lectures/_static/lecture_specific/uncertainty_traps/uncertainty_traps_mu.png similarity index 100% rename from source/rst/_static/lecture_specific/uncertainty_traps/uncertainty_traps_mu.png rename to lectures/_static/lecture_specific/uncertainty_traps/uncertainty_traps_mu.png diff --git a/source/rst/_static/lecture_specific/uncertainty_traps/uncertainty_traps_sim.png b/lectures/_static/lecture_specific/uncertainty_traps/uncertainty_traps_sim.png similarity index 100% rename from source/rst/_static/lecture_specific/uncertainty_traps/uncertainty_traps_sim.png rename to lectures/_static/lecture_specific/uncertainty_traps/uncertainty_traps_sim.png diff --git a/source/rst/_static/lecture_specific/wald_friedman/wald_class.py b/lectures/_static/lecture_specific/wald_friedman/wald_class.py similarity index 100% rename from source/rst/_static/lecture_specific/wald_friedman/wald_class.py rename to lectures/_static/lecture_specific/wald_friedman/wald_class.py diff --git a/source/rst/_static/lecture_specific/wald_friedman/wald_dec_rule.png b/lectures/_static/lecture_specific/wald_friedman/wald_dec_rule.png similarity index 100% rename from source/rst/_static/lecture_specific/wald_friedman/wald_dec_rule.png rename to lectures/_static/lecture_specific/wald_friedman/wald_dec_rule.png diff --git a/source/rst/_static/lecture_specific/wald_friedman/wald_dec_rule.tex b/lectures/_static/lecture_specific/wald_friedman/wald_dec_rule.tex similarity index 100% rename from source/rst/_static/lecture_specific/wald_friedman/wald_dec_rule.tex rename to lectures/_static/lecture_specific/wald_friedman/wald_dec_rule.tex diff --git a/source/rst/_static/lecture_specific/wald_friedman/wf_first_pass.py b/lectures/_static/lecture_specific/wald_friedman/wf_first_pass.py similarity index 100% rename from source/rst/_static/lecture_specific/wald_friedman/wf_first_pass.py rename to lectures/_static/lecture_specific/wald_friedman/wf_first_pass.py diff --git a/source/rst/_static/lecture_specific/wealth_dynamics/htop_again.png b/lectures/_static/lecture_specific/wealth_dynamics/htop_again.png similarity index 100% rename from source/rst/_static/lecture_specific/wealth_dynamics/htop_again.png rename to lectures/_static/lecture_specific/wealth_dynamics/htop_again.png diff --git a/source/rst/_static/qe-logo-large.png b/lectures/_static/qe-logo-large.png similarity index 100% rename from source/rst/_static/qe-logo-large.png rename to lectures/_static/qe-logo-large.png diff --git a/source/rst/_static/quant-econ.bib b/lectures/_static/quant-econ.bib similarity index 100% rename from source/rst/_static/quant-econ.bib rename to lectures/_static/quant-econ.bib diff --git a/lectures/_toc.yml b/lectures/_toc.yml new file mode 100644 index 000000000..7ac0c6cab --- /dev/null +++ b/lectures/_toc.yml @@ -0,0 +1,93 @@ +- file: intro + numbered: true + +- part: Tools and Techniques + chapters: + - file: geom_series + - file: multi_hyper + - file: sir_model + - file: linear_algebra + - file: complex_and_trig + - file: lln_clt + - file: heavy_tails + - file: multivariate_normal + - file: time_series_with_matrices + +- part: Introduction to Dynamics + chapters: + - file: scalar_dynam + - file: ar1_processes + - file: finite_markov + - file: inventory_dynamics + - file: linear_models + - file: samuelson + - file: kesten_processes + - file: wealth_dynamics + - file: kalman + - file: short_path + - file: cass_koopmans_1 + - file: cass_koopmans_2 + +- part: Search + chapters: + - file: mccall_model + - file: mccall_model_with_separation + - file: mccall_fitted_vfi + - file: mccall_correlated + - file: career + - file: jv + +- part: Consumption, Savings and Growth + chapters: + - file: cake_eating_problem + - file: cake_eating_numerical + - file: optgrowth + - file: optgrowth_fast + - file: coleman_policy_iter + - file: egm_policy_iter + - file: ifp + - file: ifp_advanced + +- part: Information + chapters: + - file: odu + - file: likelihood_ratio_process + # - file: wald_friedman + - file: exchangeable + - file: likelihood_bayes + - file: navy_captain + +- part: LQ Control + chapters: + - file: lqcontrol + - file: perm_income + - file: perm_income_cons + - file: lq_inventories + +- part: Multiple Agent Models + chapters: + - file: schelling + - file: lake_model + - file: rational_expectations + - file: re_with_feedback + - file: markov_perf + - file: uncertainty_traps + - file: aiyagari + +- part: Asset Pricing and Finance + chapters: + - file: markov_asset + - file: harrison_kreps + +- part: Data and Empirics + chapters: + - file: pandas_panel + - file: ols + - file: mle + +# Placeholder for other pages + +- part: Other + chapters: + - file: troubleshooting + - file: zreferences \ No newline at end of file diff --git a/source/rst/about_lectures.md b/lectures/about_lectures.md similarity index 100% rename from source/rst/about_lectures.md rename to lectures/about_lectures.md diff --git a/source/rst/aiyagari.md b/lectures/aiyagari.md similarity index 100% rename from source/rst/aiyagari.md rename to lectures/aiyagari.md diff --git a/source/rst/ar1_processes.md b/lectures/ar1_processes.md similarity index 100% rename from source/rst/ar1_processes.md rename to lectures/ar1_processes.md diff --git a/source/rst/cake_eating_numerical.md b/lectures/cake_eating_numerical.md similarity index 99% rename from source/rst/cake_eating_numerical.md rename to lectures/cake_eating_numerical.md index c08c3a0f4..632ba0943 100644 --- a/source/rst/cake_eating_numerical.md +++ b/lectures/cake_eating_numerical.md @@ -98,15 +98,13 @@ This is a form of **successive approximation**, and was discussed in our {doc}`l The basic idea is: -$$ -$$ - 1. Take an arbitary intial guess of $v$. 1. Obtain an update $w$ defined by - > w(x) = \max_{0\leq c \leq x} \{u(c) + \beta v(x-c)\} - > - > + $$ + w(x) = \max_{0\leq c \leq x} \{u(c) + \beta v(x-c)\} + $$ + 1. Stop if $w$ is approximately equal to $v$, otherwise set $v=w$ and go back to step 2. diff --git a/source/rst/cake_eating_problem.md b/lectures/cake_eating_problem.md similarity index 100% rename from source/rst/cake_eating_problem.md rename to lectures/cake_eating_problem.md diff --git a/source/rst/career.md b/lectures/career.md similarity index 99% rename from source/rst/career.md rename to lectures/career.md index f35608615..e2b235179 100644 --- a/source/rst/career.md +++ b/lectures/career.md @@ -73,8 +73,8 @@ In what follows we distinguish between a career and a job, where For workers, wages can be decomposed into the contribution of job and career * $w_t = \theta_t + \epsilon_t$, where - * $> \theta_t$> is the contribution of career at time $> t$ - * $> \epsilon_t$> is the contribution of the job at time $> t$ + * $\theta_t$ is the contribution of career at time $t$ + * $\epsilon_t$ is the contribution of the job at time $t$ At the start of time $t$, a worker has the following options diff --git a/source/rst/cass_koopmans_1.md b/lectures/cass_koopmans_1.md similarity index 100% rename from source/rst/cass_koopmans_1.md rename to lectures/cass_koopmans_1.md diff --git a/source/rst/cass_koopmans_2.md b/lectures/cass_koopmans_2.md similarity index 97% rename from source/rst/cass_koopmans_2.md rename to lectures/cass_koopmans_2.md index cfaf999f7..ac0b30663 100644 --- a/source/rst/cass_koopmans_2.md +++ b/lectures/cass_koopmans_2.md @@ -49,22 +49,19 @@ The present lecture uses additional ideas including problem and the Hicks-Arrow prices. - A **Big** $K$ **, little** $k$ trick widely used in macroeconomic dynamics. - -* > We shall encounter this trick in [> this lecture](https://lectures.quantecon.org/py/rational_expectations.html#)> - > and also in [> this lecture](https://lectures.quantecon.org/py/dyn_stack.html#)> . - + * We shall encounter this trick in [this lecture](https://lectures.quantecon.org/py/rational_expectations.html#) + and also in [this lecture](https://lectures.quantecon.org/py/dyn_stack.html#). - A non-stochastic version of a theory of the **term structure of interest rates**. - An intimate connection between the cases for the optimality of two competing visions of good ways to organize an economy, namely: - -* **> socialism**> in which a central planner commands the - > allocation of resources, and -* **> capitalism**> (also known as **> a market economy**> ) in - > which competitive equilibrium **> prices**> induce individual - > consumers and producers to choose a socially optimal allocation - > as an unintended consequence of their selfish - > decisions + * **socialism** in which a central planner commands the + allocation of resources, and + * **capitalism** (also known as **a market economy**) in + which competitive equilibrium **prices** induce individual + consumers and producers to choose a socially optimal allocation + as an unintended consequence of their selfish + decisions Let's start with some standard imports: @@ -581,7 +578,7 @@ which is {eq}`constraint3`. Combining {eq}`cond4` and {eq}`eq-price`, we get: $$ -- \beta^{T+1} \mu_{T+1} \leq 0 +-\beta^{T+1} \mu_{T+1} \leq 0 $$ Dividing both sides by $\beta^{T+1}$ gives diff --git a/source/rst/coleman_policy_iter.md b/lectures/coleman_policy_iter.md similarity index 100% rename from source/rst/coleman_policy_iter.md rename to lectures/coleman_policy_iter.md diff --git a/source/rst/complex_and_trig.md b/lectures/complex_and_trig.md similarity index 100% rename from source/rst/complex_and_trig.md rename to lectures/complex_and_trig.md diff --git a/source/rst/egm_policy_iter.md b/lectures/egm_policy_iter.md similarity index 100% rename from source/rst/egm_policy_iter.md rename to lectures/egm_policy_iter.md diff --git a/source/rst/exchangeable.md b/lectures/exchangeable.md similarity index 97% rename from source/rst/exchangeable.md rename to lectures/exchangeable.md index 788ff59ca..5c9825d52 100644 --- a/source/rst/exchangeable.md +++ b/lectures/exchangeable.md @@ -181,12 +181,12 @@ $G$ with probability $1 - \tilde \pi$. Thus, we assume that the decision maker -- **> knows**> both $> F$> and $> G$ -- **> doesnt't know**> which of these two distributions that nature has drawn -- > summarizing his ignorance by acting as if or **> thinking**> that nature chose distribution $> F$> with probability $> \tilde \pi \in (0,1)$> and distribution - > $> G$> with probability $> 1 - \tilde \pi$ -- > at date $> t \geq 0$> has observed the partial history $> w_t, w_{t-1}, \ldots, w_0$> of draws from the appropriate joint - > density of the partial history +- **knows** both $F$ and $G$ +- **doesnt't know** which of these two distributions that nature has drawn +- summarizing his ignorance by acting as if or **thinking** that nature chose distribution $F$ with probability $\tilde \pi \in (0,1)$ and distribution + $G$ with probability $1 - \tilde \pi$ +- at date $t \geq 0$ has observed the partial history $w_t, w_{t-1}, \ldots, w_0$ of draws from the appropriate joint + density of the partial history But what do we mean by the *appropriate joint distribution*? @@ -616,7 +616,7 @@ periods when the sequence is truly IID draws from $G$. Again, we set the initial π_paths_G = simulate(a=3, b=1.2, T=T, N=1000) ``` -In the above graph we observe that now most paths $\pi_t \rightarrow 0$. +In the above graph we observe that now most paths $\pi_t \rightarrow 0$. ### Rates of convergence diff --git a/source/rst/finite_markov.md b/lectures/finite_markov.md similarity index 100% rename from source/rst/finite_markov.md rename to lectures/finite_markov.md diff --git a/source/rst/geom_series.md b/lectures/geom_series.md similarity index 100% rename from source/rst/geom_series.md rename to lectures/geom_series.md diff --git a/source/rst/harrison_kreps.md b/lectures/harrison_kreps.md similarity index 99% rename from source/rst/harrison_kreps.md rename to lectures/harrison_kreps.md index 670b33f64..163d3ead7 100644 --- a/source/rst/harrison_kreps.md +++ b/lectures/harrison_kreps.md @@ -70,7 +70,7 @@ Economists differ in how they define a *bubble*. The Harrison-Kreps model illustrates the following notion of a bubble that attracts many economists: -*> A component of an asset price can be interpreted as a bubble when all investors agree that the current price of the asset exceeds what they believe the asset's underlying dividend stream justifies*> . +> *A component of an asset price can be interpreted as a bubble when all investors agree that the current price of the asset exceeds what they believe the asset's underlying dividend stream justifies*. ## Structure of the Model diff --git a/source/rst/heavy_tails.md b/lectures/heavy_tails.md similarity index 100% rename from source/rst/heavy_tails.md rename to lectures/heavy_tails.md diff --git a/source/rst/ifp.md b/lectures/ifp.md similarity index 100% rename from source/rst/ifp.md rename to lectures/ifp.md diff --git a/source/rst/ifp_advanced.md b/lectures/ifp_advanced.md similarity index 100% rename from source/rst/ifp_advanced.md rename to lectures/ifp_advanced.md diff --git a/lectures/intro.md b/lectures/intro.md new file mode 100644 index 000000000..163a77669 --- /dev/null +++ b/lectures/intro.md @@ -0,0 +1,21 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst +kernelspec: + display_name: Python 3 + language: python + name: python3 +--- + +# Quantitative Economics with Python + +This website presents a set of lectures on quantitative economic modeling, designed and written by +[Thomas J. Sargent](http://www.tomsargent.com/) and [John Stachurski](http://johnstachurski.net/). + +For an overview of the series, see [this page](https://quantecon.org/python-lectures/) + +```{tableofcontents} +``` + diff --git a/source/rst/inventory_dynamics.md b/lectures/inventory_dynamics.md similarity index 100% rename from source/rst/inventory_dynamics.md rename to lectures/inventory_dynamics.md diff --git a/source/rst/jv.md b/lectures/jv.md similarity index 100% rename from source/rst/jv.md rename to lectures/jv.md diff --git a/source/rst/kalman.md b/lectures/kalman.md similarity index 96% rename from source/rst/kalman.md rename to lectures/kalman.md index e82612484..53e7ae7cd 100644 --- a/source/rst/kalman.md +++ b/lectures/kalman.md @@ -83,7 +83,7 @@ One way to summarize our knowledge is a point prediction $\hat x$ * But what if the President wants to know the probability that the missile is currently over the Sea of Japan? * Then it is better to summarize our initial beliefs with a bivariate probability density $p$ - * $> \int_E p(x)dx$> indicates the probability that we attach to the missile being in region $> E$> . + * $\int_E p(x)dx$ indicates the probability that we attach to the missile being in region $E$. The density $p$ is called our *prior* for the random variable $x$. @@ -507,8 +507,8 @@ In this case, for any initial choice of $\Sigma_0$ that is both non-negative and The class `Kalman` from the [QuantEcon.py](http://quantecon.org/quantecon-py) package implements the Kalman filter * Instance data consists of: - * > the moments $> (\hat x_t, \Sigma_t)$> of the current prior. - * > An instance of the [> LinearStateSpace](https://github.com/QuantEcon/QuantEcon.py/blob/master/quantecon/lss.py)> class from [> QuantEcon.py](http://quantecon.org/quantecon-py)> . + * the moments $(\hat x_t, \Sigma_t)$ of the current prior. + * An instance of the [LinearStateSpace](https://github.com/QuantEcon/QuantEcon.py/blob/master/quantecon/lss.py) class from [QuantEcon.py](http://quantecon.org/quantecon-py). The latter represents a linear state space model of the form @@ -530,10 +530,10 @@ $$ * The class `Kalman` from the [QuantEcon.py](http://quantecon.org/quantecon-py) package has a number of methods, some that we will wait to use until we study more advanced applications in subsequent lectures. * Methods pertinent for this lecture are: - * `> prior_to_filtered`> , which updates $> (\hat x_t, \Sigma_t)$> to $> (\hat x_t^F, \Sigma_t^F)$ - * `> filtered_to_forecast`> , which updates the filtering distribution to the predictive distribution -- which becomes the new prior $> (\hat x_{t+1}, \Sigma_{t+1})$ - * `> update`> , which combines the last two methods - * > a `> stationary_values`> , which computes the solution to {eq}`kalman_dare`> and the corresponding (stationary) Kalman gain + * `prior_to_filtered`, which updates $(\hat x_t, \Sigma_t)$ to $(\hat x_t^F, \Sigma_t^F)$ + * `filtered_to_forecast`, which updates the filtering distribution to the predictive distribution -- which becomes the new prior $(\hat x_{t+1}, \Sigma_{t+1})$ + * `update`, which combines the last two methods + * a `stationary_values`, which computes the solution to {eq}`kalman_dare` and the corresponding (stationary) Kalman gain You can view the program [on GitHub](https://github.com/QuantEcon/QuantEcon.py/blob/master/quantecon/kalman.py). diff --git a/source/rst/kesten_processes.md b/lectures/kesten_processes.md similarity index 100% rename from source/rst/kesten_processes.md rename to lectures/kesten_processes.md diff --git a/source/rst/lake_model.md b/lectures/lake_model.md similarity index 100% rename from source/rst/lake_model.md rename to lectures/lake_model.md diff --git a/source/rst/likelihood_bayes.md b/lectures/likelihood_bayes.md similarity index 100% rename from source/rst/likelihood_bayes.md rename to lectures/likelihood_bayes.md diff --git a/source/rst/likelihood_ratio_process.md b/lectures/likelihood_ratio_process.md similarity index 96% rename from source/rst/likelihood_ratio_process.md rename to lectures/likelihood_ratio_process.md index ed14739a2..2823b073c 100644 --- a/source/rst/likelihood_ratio_process.md +++ b/lectures/likelihood_ratio_process.md @@ -40,10 +40,10 @@ We'll use a setting described in {doc}`this lecture `. Among the things that we'll learn about are -* > A peculiar property of likelihood ratio processes -* > How a likelihood ratio process is the key ingredient in frequentist hypothesis testing -* > How a **> receiver operator characteristic curve**> summarizes information about a false alarm probability and power in frequentist hypothesis testing -* > How during World War II the United States Navy devised a decision rule that Captain Garret L. Schyler challenged and asked Milton Friedman to justify to him, a topic to be studied in {doc}`this lecture ` +* A peculiar property of likelihood ratio processes +* How a likelihood ratio process is the key ingredient in frequentist hypothesis testing +* How a **receiver operator characteristic curve** summarizes information about a false alarm probability and power in frequentist hypothesis testing +* How during World War II the United States Navy devised a decision rule that Captain Garret L. Schyler challenged and asked Milton Friedman to justify to him, a topic to be studied in {doc}`this lecture ` ## Likelihood Ratio Process diff --git a/source/rst/linear_algebra.md b/lectures/linear_algebra.md similarity index 100% rename from source/rst/linear_algebra.md rename to lectures/linear_algebra.md diff --git a/source/rst/linear_models.md b/lectures/linear_models.md similarity index 98% rename from source/rst/linear_models.md rename to lectures/linear_models.md index 1e8b9f348..10ee2c08d 100644 --- a/source/rst/linear_models.md +++ b/lectures/linear_models.md @@ -53,15 +53,15 @@ Its many applications include: * representing dynamics of higher-order linear systems * predicting the position of a system $j$ steps into the future * predicting a geometric sum of future values of a variable like - * > non-financial income - * > dividends on a stock - * > the money supply - * > a government deficit or surplus, etc. + * non-financial income + * dividends on a stock + * the money supply + * a government deficit or surplus, etc. * key ingredient of useful models - * > Friedman's permanent income model of consumption smoothing. - * > Barro's model of smoothing total tax collections. - * > Rational expectations version of Cagan's model of hyperinflation. - * > Sargent and Wallace's "unpleasant monetarist arithmetic," etc. + * Friedman's permanent income model of consumption smoothing. + * Barro's model of smoothing total tax collections. + * Rational expectations version of Cagan's model of hyperinflation. + * Sargent and Wallace's "unpleasant monetarist arithmetic," etc. Let's start with some imports: diff --git a/source/rst/lln_clt.md b/lectures/lln_clt.md similarity index 100% rename from source/rst/lln_clt.md rename to lectures/lln_clt.md diff --git a/source/rst/lq_inventories.md b/lectures/lq_inventories.md similarity index 100% rename from source/rst/lq_inventories.md rename to lectures/lq_inventories.md diff --git a/source/rst/lqcontrol.md b/lectures/lqcontrol.md similarity index 98% rename from source/rst/lqcontrol.md rename to lectures/lqcontrol.md index 603fa8d94..1e0e7cd5a 100644 --- a/source/rst/lqcontrol.md +++ b/lectures/lqcontrol.md @@ -563,16 +563,16 @@ In the module, the various updating, simulation and fixed point methods are wrapped in a class called `LQ`, which includes * Instance data: - * > The required parameters $> Q, R, A, B$> and optional parameters > C, β, T, R_f, N> specifying a given LQ model - * > set $> T$> and $> R_f$> to `> None`> in the infinite horizon case - * > set `> C = None`> (or zero) in the deterministic case + * The required parameters $Q, R, A, B$ and optional parameters C, β, T, R_f, N specifying a given LQ model + * set $T$ and $R_f$ to `None` in the infinite horizon case + * set `C = None` (or zero) in the deterministic case * the value function and policy data - * $> d_t, P_t, F_t$> in the finite horizon case - * $> d, P, F$> in the infinite horizon case + * $d_t, P_t, F_t$ in the finite horizon case + * $d, P, F$ in the infinite horizon case * Methods: - * `> update_values`> --- shifts $> d_t, P_t, F_t$> to their $> t-1$> values via {eq}`lq_pr`> , {eq}`lq_dd`> and {eq}`lq_oc` - * `> stationary_values`> --- computes $> P, d, F$> in the infinite horizon case - * `> compute_sequence`> ---- simulates the dynamics of $> x_t, u_t, w_t$> given $> x_0$> and assuming standard normal shocks + * `update_values` --- shifts $d_t, P_t, F_t$ to their $t-1$ values via {eq}`lq_pr`, {eq}`lq_dd` and {eq}`lq_oc` + * `stationary_values` --- computes $P, d, F$ in the infinite horizon case + * `compute_sequence` ---- simulates the dynamics of $x_t, u_t, w_t$ given $x_0$ and assuming standard normal shocks (lq_mfpa)= ### An Application diff --git a/source/rst/markov_asset.md b/lectures/markov_asset.md similarity index 100% rename from source/rst/markov_asset.md rename to lectures/markov_asset.md diff --git a/source/rst/markov_perf.md b/lectures/markov_perf.md similarity index 99% rename from source/rst/markov_perf.md rename to lectures/markov_perf.md index c0c5021a5..784ed27d7 100644 --- a/source/rst/markov_perf.md +++ b/lectures/markov_perf.md @@ -149,7 +149,7 @@ The adjective "Markov" denotes that the equilibrium decision rules depend only o "Perfect" means complete, in the sense that the equilibrium is constructed by backward induction and hence builds in optimizing behavior for each firm at all possible future states. -* > These include many states that will not be reached when we iterate forward on the pair of equilibrium strategies $> f_i$> starting from a given initial state. +* These include many states that will not be reached when we iterate forward on the pair of equilibrium strategies $f_i$ starting from a given initial state. ### Computation diff --git a/source/rst/mccall_correlated.md b/lectures/mccall_correlated.md similarity index 100% rename from source/rst/mccall_correlated.md rename to lectures/mccall_correlated.md diff --git a/source/rst/mccall_fitted_vfi.md b/lectures/mccall_fitted_vfi.md similarity index 100% rename from source/rst/mccall_fitted_vfi.md rename to lectures/mccall_fitted_vfi.md diff --git a/source/rst/mccall_model.md b/lectures/mccall_model.md similarity index 100% rename from source/rst/mccall_model.md rename to lectures/mccall_model.md diff --git a/source/rst/mccall_model_with_separation.md b/lectures/mccall_model_with_separation.md similarity index 100% rename from source/rst/mccall_model_with_separation.md rename to lectures/mccall_model_with_separation.md diff --git a/source/rst/mle.md b/lectures/mle.md similarity index 99% rename from source/rst/mle.md rename to lectures/mle.md index 492f7e582..09ec9caae 100644 --- a/source/rst/mle.md +++ b/lectures/mle.md @@ -417,20 +417,20 @@ To use the algorithm, we take an initial guess at the maximum value, $\beta_0$ (the OLS parameter estimates might be a reasonable guess), then -$$ -$$ - -$$ -$$ - 1. Use the updating rule to iterate the algorithm - \boldsymbol{\beta}_{(k+1)} = \boldsymbol{\beta}_{(k)} - H^{-1}(\boldsymbol{\beta}_{(k)})G(\boldsymbol{\beta}_{(k)})where: + $$ + \boldsymbol{\beta}_{(k+1)} = \boldsymbol{\beta}_{(k)} - H^{-1}(\boldsymbol{\beta}_{(k)})G(\boldsymbol{\beta}_{(k)}) + $$ + where: + $$ \begin{aligned} G(\boldsymbol{\beta}_{(k)}) = \frac{d \log \mathcal{L(\boldsymbol{\beta}_{(k)})}}{d \boldsymbol{\beta}_{(k)}} \\ H(\boldsymbol{\beta}_{(k)}) = \frac{d^2 \log \mathcal{L(\boldsymbol{\beta}_{(k)})}}{d \boldsymbol{\beta}_{(k)}d \boldsymbol{\beta}'_{(k)}} \end{aligned} + $$ + 1. Check whether $\boldsymbol{\beta}_{(k+1)} - \boldsymbol{\beta}_{(k)} < tol$ - If true, then stop iterating and set $\hat{\boldsymbol{\beta}} = \boldsymbol{\beta}_{(k+1)}$ diff --git a/source/rst/multi_hyper.md b/lectures/multi_hyper.md similarity index 96% rename from source/rst/multi_hyper.md rename to lectures/multi_hyper.md index d8220ebad..161804f3f 100644 --- a/source/rst/multi_hyper.md +++ b/lectures/multi_hyper.md @@ -445,16 +445,16 @@ Note the substantial differences between hypergeometric distribution and the app The off-diagonal graphs plot the empirical joint distribution of $k_i$ and $k_j$ for each pair $(i, j)$. -> The darker the blue, the more data points are contained in the corresponding cell. (Note that $> k_i$> is on the x-axis and $> k_j$> is on the y-axis). +The darker the blue, the more data points are contained in the corresponding cell. (Note that $k_i$ is on the x-axis and $k_j$ is on the y-axis). -> The contour maps plot the bivariate Gaussian density function of $> \left(k_i, k_j\right)$> with the population mean and covariance given by slices of $> \mu$> and $> \Sigma$> that we computed above. +The contour maps plot the bivariate Gaussian density function of $\left(k_i, k_j\right)$ with the population mean and covariance given by slices of $\mu$ and $\Sigma$ that we computed above. Let's also test the normality for each $k_i$ using `scipy.stats.normaltest` that implements D’Agostino and Pearson's test that combines skew and kurtosis to form an omnibus test of normality. The null hypothesis is that the sample follows normal distribution. -> `normaltest`> returns an array of p-values associated with tests for each $> k_i$> sample. +> `normaltest` returns an array of p-values associated with tests for each $k_i$ sample. ```{code-cell} python3 test_multihyper = normaltest(sample) diff --git a/source/rst/multivariate_normal.md b/lectures/multivariate_normal.md similarity index 100% rename from source/rst/multivariate_normal.md rename to lectures/multivariate_normal.md diff --git a/source/rst/navy_captain.md b/lectures/navy_captain.md similarity index 100% rename from source/rst/navy_captain.md rename to lectures/navy_captain.md diff --git a/source/rst/odu.md b/lectures/odu.md similarity index 100% rename from source/rst/odu.md rename to lectures/odu.md diff --git a/source/rst/ols.md b/lectures/ols.md similarity index 100% rename from source/rst/ols.md rename to lectures/ols.md diff --git a/source/rst/optgrowth.md b/lectures/optgrowth.md similarity index 99% rename from source/rst/optgrowth.md rename to lectures/optgrowth.md index 6a15e1d05..e7f8415e7 100644 --- a/source/rst/optgrowth.md +++ b/lectures/optgrowth.md @@ -285,7 +285,7 @@ The term $\int v(f(y - c) z) \phi(dz)$ can be understood as the expected next pe As shown in [EDTC](http://johnstachurski.net/edtc.html), theorem 10.1.11 and a range of other texts -*> The value function*> $> v^*$> *> satisfies the Bellman equation* +> *The value function* $v^*$ *satisfies the Bellman equation* In other words, {eq}`fpb30` holds when $v=v^*$. @@ -324,7 +324,7 @@ function. In our setting, we have the following key result -* > A feasible consumption policy is optimal if and only if it is $> v^*$> -greedy. +* A feasible consumption policy is optimal if and only if it is $v^*$-greedy. The intuition is similar to the intuition for the Bellman equation, which was provided after {eq}`fpb30`. diff --git a/source/rst/optgrowth_fast.md b/lectures/optgrowth_fast.md similarity index 100% rename from source/rst/optgrowth_fast.md rename to lectures/optgrowth_fast.md diff --git a/source/rst/pandas_panel.md b/lectures/pandas_panel.md similarity index 100% rename from source/rst/pandas_panel.md rename to lectures/pandas_panel.md diff --git a/source/rst/perm_income.md b/lectures/perm_income.md similarity index 99% rename from source/rst/perm_income.md rename to lectures/perm_income.md index abdabc4f7..9cd60dc40 100644 --- a/source/rst/perm_income.md +++ b/lectures/perm_income.md @@ -600,10 +600,10 @@ Using these formulas together with {eq}`sprob15ab` and substituting into {eq}` Representation {eq}`sprob16abcd` makes clear that * The state can be taken as $(c_t, z_t)$. - * > The endogenous part is $> c_t$> and the exogenous part is $> z_t$> . - * > Debt $> b_t$> has disappeared as a component of the state because it is encoded in $> c_t$> . + * The endogenous part is $c_t$ and the exogenous part is $z_t$. + * Debt $b_t$ has disappeared as a component of the state because it is encoded in $c_t$. * Consumption is a random walk with innovation $(1-\beta) U (I-\beta A)^{-1} C w_{t+1}$. - * > This is a more explicit representation of the martingale result in {eq}`sprob5`> . + * This is a more explicit representation of the martingale result in {eq}`sprob5`. (coint_pi)= ### Cointegration diff --git a/source/rst/perm_income_cons.md b/lectures/perm_income_cons.md similarity index 99% rename from source/rst/perm_income_cons.md rename to lectures/perm_income_cons.md index fe7ee70dc..6a49d7a57 100644 --- a/source/rst/perm_income_cons.md +++ b/lectures/perm_income_cons.md @@ -478,9 +478,9 @@ The examples differ only in the initial states with which we endow the consumer All other parameter values are kept the same in the two examples - In the first example, all consumers begin with zero nonfinancial income and zero debt. - * > The consumers are thus *> ex-ante*> identical. + * The consumers are thus *ex-ante* identical. - In the second example, while all begin with zero debt, we draw their initial income levels from the invariant distribution of financial income. - * > Consumers are *> ex-ante*> heterogeneous. + * Consumers are *ex-ante* heterogeneous. In the first example, consumers' nonfinancial income paths display pronounced transients early in the sample diff --git a/source/rst/rational_expectations.md b/lectures/rational_expectations.md similarity index 99% rename from source/rst/rational_expectations.md rename to lectures/rational_expectations.md index d0ba32274..aaab8c898 100644 --- a/source/rst/rational_expectations.md +++ b/lectures/rational_expectations.md @@ -575,7 +575,7 @@ Recall the planner's problem {ref}`described above ` 1. Formulate the planner's problem as an LQ problem. 1. Solve it using the same parameter values in exercise 1 - * $> a_0= 100, a_1= 0.05, \beta = 0.95, \gamma=10$ + * $a_0= 100, a_1= 0.05, \beta = 0.95, \gamma=10$ 1. Represent the solution in the form $Y_{t+1} = \kappa_0 + \kappa_1 Y_t$. 1. Compare your answer with the results from exercise 2. diff --git a/source/rst/re_with_feedback.md b/lectures/re_with_feedback.md similarity index 83% rename from source/rst/re_with_feedback.md rename to lectures/re_with_feedback.md index 54c71035e..44efd08a4 100644 --- a/source/rst/re_with_feedback.md +++ b/lectures/re_with_feedback.md @@ -54,12 +54,12 @@ the price level to the money supply. Cagan did not use a rational expectations version of his model, but Sargent {cite}`Sargent77hyper` did. -We study this model because it is intrinsically interesting and also because it has a mathematical structure that -also appears in virtually all linear rational expectations model, namely, that a key endogenous variable equals +We study a rational expectations version of this model because it is intrinsically interesting and because it +has a mathematical structure that +appears in virtually all linear rational expectations model, namely, that a key endogenous variable equals a mathematical expectation of a geometric sum of future values of another variable. -In a rational expectations version of Cagan's model, the endogenous variable is the price level or rate of inflation and -the other variable is the money supply or the rate of change in the money supply. +The model determines the price level or rate of inflation as a function of the money supply or the rate of change in the money supply. In this lecture, we'll encounter: @@ -67,36 +67,35 @@ In this lecture, we'll encounter: * a way of solving an expectational difference equation by mapping it into a vector first-order difference equation and appropriately manipulating an eigen decomposition of the transition matrix in order to impose stability * a way to use a Big $K$, little $k$ argument to allow apparent feedback from endogenous to exogenous variables within a rational expectations equilibrium * a use of eigenvector decompositions of matrices that allowed Blanchard and Khan (1981) and Whiteman (1983) to solve a class of linear rational expectations models +* how to use **SymPy** to get analytical formulas for some key objects comprising a rational expectations equilibrium -Cagan's model with rational expectations -is formulated as an **expectational difference equation** whose solution is a rational expectations equilibrium. +We formulate a version of Cagan's model under rational expectations +as an **expectational difference equation** whose solution is a rational expectations equilibrium. We'll start this lecture with a quick review of deterministic (i.e., non-random) first-order and second-order linear difference equations. ## Linear difference equations -In this quick review of linear difference equations, we'll use the *backward shift* or *lag* operator $L$ +We'll use the *backward shift* or *lag* operator $L$. The lag operator $L$ maps a sequence $\{x_t\}_{t=0}^\infty$ into the sequence $\{x_{t-1}\}_{t=0}^\infty$ -We'll can use $L$ in linear difference equations by using the equality +We'll deploy $L$ by using the equality $L x_t \equiv x_{t-1}$ in algebraic expressions. Further, the inverse $L^{-1}$ of the lag operator is the *forward shift* operator. -In linear difference equations, we'll often use the equaltiy $L^{-1} x_t \equiv x_{t+1}$ in the the algebra -below. +We'll often use the equality $L^{-1} x_t \equiv x_{t+1}$ below. -The algebra of lag and forward shift operators often simplifies formulas for linear difference equations and their -solutions. +The algebra of lag and forward shift operators can simplify representing and solving linear difference equations. ### First order We want to solve a linear first-order scalar difference equation. -First, let $|\lambda | < 1$, and let +Let $|\lambda | < 1$ and let $\{u_t\}_{t=-\infty}^\infty$ be a bounded sequence of scalar real numbers. @@ -167,14 +166,14 @@ To verify that this is a solution, check the consequences of operating on both sides of equation {eq}`equn_5` by $(1 -\lambda L)$ and compare to equation {eq}`equn_1`. -Solution {eq}`equn_2` exists for $|\lambda | < 1$ because -the distributed lag in $u$ converges. +For any bounded $\{u_t\}$ sequence, solution {eq}`equn_2` exists for $|\lambda | < 1$ because +the **distributed lag** in $u$ converges. Solution {eq}`equn_5` exists when $|\lambda| > 1$ because the **distributed lead** in $u$ converges. When $|\lambda | > 1$, the distributed lag in $u$ in {eq}`equn_2` may -diverge, so that a solution of this form does not exist. +diverge, in which case a solution of this form does not exist. The distributed lead in $u$ in {eq}`equn_5` need not converge when $|\lambda| < 1$. @@ -210,17 +209,21 @@ y_{t+1} = \lambda_1 y_t - \lambda_2^{-1} \sum_{j=0}^\infty \lambda_2^{-j} u_{t+j ``` Thus, we obtained equation {eq}`equn_7` by -solving stable roots (in this case $\lambda_1$) **backward**, and -unstable roots (in this case $\lambda_2$) **forward**. +solving a stable root (in this case $\lambda_1$) **backward**, and an +unstable root (in this case $\lambda_2$) **forward**. Equation {eq}`equn_7` has a form that we shall encounter often. -$\lambda_1 y_t$ is called the **feedback part** and -$-{\frac{\lambda_2^{-1}}{1 - \lambda_2^{-1}L^{-1}}} u_{t+1}$ is -called the **feedforward part** of the solution. +* $\lambda_1 y_t$ is called the **feedback part** +* $-{\frac{\lambda_2^{-1}}{1 - \lambda_2^{-1}L^{-1}}} u_{t+1}$ is called the **feedforward part** ## Illustration: Cagan's Model +Now let's use linear difference equations to represent and solve Sargent's {cite}`Sargent77hyper` rational expectations version of +Cagan’s model {cite}`Cagan` that connects the price level to the public's anticipations of future money supplies. + +Cagan did not use a rational expectations version of his model, but Sargent {cite}`Sargent77hyper` + Let - $m_t^d$ be the log of the demand for money @@ -250,6 +253,9 @@ p_t = (1 -\lambda) m_t + \lambda p_{t+1} where $\lambda \equiv \frac{\beta}{1+\beta} \in (0,1)$. +(We note that the characteristic polynomial if $1 - \lambda^{-1} z^{-1} = 0$ so that the zero of the +characteristic polynomial in this case is $\lambda \in (0,1)$ which here is **inside** the unit circle.) + Solving the first order difference equation {eq}`equation_1` forward gives ```{math} @@ -271,6 +277,8 @@ that is indexed by the real number $c \in {\bf R}$. Because we want to focus on stable solutions, we set $c=0$. +Equation {eq}`equation_1a` attributes **perfect foresight** about the money supply sequence to the holders of real balances. + We begin by assuming that the log of the money supply is **exogenous** in the sense that it is an autonomous process that does not feed back on the log of the price level. @@ -294,7 +302,7 @@ absolute values, and $G$ is a $1 \times n$ selector matrix. Variables appearing in the vector $x_t$ contain information that might help predict future values of the money supply. -We’ll take an example in which $x_t$ includes only $m_t$, +We’ll start with an example in which $x_t$ includes only $m_t$, possibly lagged values of $m$, and a constant. An example of such an $\{m_t\}$ process that fits info state space @@ -307,7 +315,9 @@ $$ where the zeros of the characteristic polynomial $(1 - \rho_1 z - \rho_2 z^2)$ are strictly greater than $1$ -in modulus +in modulus. + +(Please see {doc}`this ` QuantEcon lecture for more about characteristic polynomials and their role in solving linear difference equations.) We seek a stable or non-explosive solution of the difference equation {eq}`equation_1` that obeys the system comprised of {eq}`equation_1`-{eq}`equation_3`. @@ -315,7 +325,7 @@ obeys the system comprised of {eq}`equation_1`-{eq}`equation_3`. By stable or non-explosive, we mean that neither $m_t$ nor $p_t$ diverges as $t \rightarrow + \infty$. -This means that we are shutting down the term $c \lambda^{-t}$ in equation {eq}`equation_1a` above by setting $c=0$ +This requirees that we shut down the term $c \lambda^{-t}$ in equation {eq}`equation_1a` above by setting $c=0$ The solution we are after is @@ -378,8 +388,9 @@ A = np.array([[1, 0, 0], G = np.array([[0, 1, 0]]) ``` -The matrix $A$ has one eigenvalue equal to unity that is -associated with the $A_{11}$ component that captures a +The matrix $A$ has one eigenvalue equal to unity. + +It is associated with the $A_{11}$ component that captures a constant component of the state $x_t$. We can verify that the two eigenvalues of $A$ not associated with @@ -395,7 +406,7 @@ print(eigvals) (abs(eigvals) <= 1).all() ``` -Now let’s compute $F$ in formulas {eq}`equation_4` and {eq}`equation_5` +Now let’s compute $F$ in formulas {eq}`equation_4` and {eq}`equation_5`. ```{code-cell} python3 # compute the solution, i.e. forumula (3) @@ -443,7 +454,7 @@ plt.show() In the above graph, why is the log of the price level always less than the log of the money supply? -The answer is because +Because - according to equation {eq}`equation_2`, $p_t$ is a geometric weighted average of current and future values of $m_t$, and @@ -511,19 +522,19 @@ $$ F = (1-\lambda) (1 -\lambda \rho)^{-1} . $$ -and the log the log price level satisfies +so that the log the log price level satisfies $$ p_t = F m_t . $$ Please keep these formulas in mind as we investigate an alternative -route to and interpretation of the formula for $F$. +route to and interpretation of our formula for $F$. ## Another perspective Above, we imposed stability or non-explosiveness on the solution of the key difference equation {eq}`equation_1` -in Cagan's model by solving the unstable root $\lambda^{-1}$ forward. +in Cagan's model by solving the unstable root of the characteristic polynomial forward. To shed light on the mechanics involved in imposing stability on a solution of a potentially unstable system of linear difference equations @@ -558,7 +569,7 @@ $\lambda^{-1} > 1$. Because an eigenvalue of $H$ exceeds unity, if we iterate on equation {eq}`equation_9` starting from an arbitrary initial vector -$y_0 = \begin{bmatrix} m_0 \\ p_0 \end{bmatrix}$, we discover that +$y_0 = \begin{bmatrix} m_0 \\ p_0 \end{bmatrix}$ with $m_0 >0, p_0 >0$, we discover that in general absolute values of both components of $y_t$ diverge toward $+\infty$ as $t \rightarrow + \infty$. @@ -571,7 +582,7 @@ H = Q \Lambda Q^{-1} . $$ Here $\Lambda$ is a diagonal matrix of eigenvalues of $H$ -and $Q$ is a matrix whose columns are eigenvectors of the +and $Q$ is a matrix whose columns are eigenvectors associated with the corresponding eigenvalues. Note that @@ -590,7 +601,7 @@ For almost all initial vectors $y_0$, the presence of the eigenvalue $\lambda^{-1} > 1$ causes both components of $y_t$ to diverge in absolute value to $+\infty$. -To explore this outcome in more detail, we use the following +To explore this outcome in more detail, we can use the following transformation $$ @@ -609,7 +620,7 @@ Staring at this equation indicates that unless ```{math} :label: equation_11 -y^*_0 = \begin{bmatrix} y^*_{1,0} \cr 0 \end{bmatrix} , +y^*_0 = \begin{bmatrix} y^*_{1,0} \cr 0 \end{bmatrix} ``` the path of $y^*_t$ and therefore the paths of both components of @@ -629,12 +640,11 @@ $$ But note that since $y_0 = \begin{bmatrix} m_0 \cr p_0 \end{bmatrix}$ and $m_0$ -is given to us an an initial condition, it has to be $p_0$ that -does all the adjusting to satisfy this equation. +is given to us an an initial condition, $p_0$ has to do all the adjusting to satisfy this equation. Sometimes this situation is described by saying that while $m_0$ is truly a **state** variable, $p_0$ is a **jump** variable that -is free to adjust at $t=0$ in order to satisfy the equation. +must adjust at $t=0$ in order to satisfy the equation. Thus, in a nutshell the unique value of the vector $y_0$ for which the paths of $y_t$ do not diverge must have second component @@ -661,7 +671,7 @@ Q^{21} m_0 + Q^{22} p_0 = 0 where $Q^{ij}$ denotes the $(i,j)$ component of $Q^{-1}$. -Solving this equation for $p_0$ we find +Solving this equation for $p_0$, we find ```{math} :label: equation_13 @@ -669,7 +679,7 @@ Solving this equation for $p_0$ we find p_0 = - (Q^{22})^{-1} Q^{21} m_0. ``` -This is the unique **stabilizing value** of $p_0$ as a function of +This is the unique **stabilizing value** of $p_0$ expressed as a function of $m_0$. ### Refining the formula @@ -706,7 +716,7 @@ So we can write p_0 = Q_{21} Q_{11}^{-1} m_0 . ``` -It can be verified that this formula replicates itself over time so that +It can be verified that this formula replicates itself over time in the sense that ```{math} :label: equation_15 @@ -729,7 +739,7 @@ Notice that if we set $A=\rho$ and $G=1$ in our earlier formula for $p_t$ we get $$ -p_t = G (I - \lambda A)^{-1} m_t = (1-\lambda) (1 - \lambda \rho)^{-1} m_t +p_t = G (I - \lambda A)^{-1} m_t = (1-\lambda) (1 - \lambda \rho)^{-1} m_t , $$ a formula that is equivalent with @@ -747,7 +757,7 @@ $$ ### Some remarks about feedback We have expressed {eq}`equation_8` in what superficially appears to be a form in -which $y_{t+1}$ feeds back on $y_t$. even though what we +which $y_{t+1}$ feeds back on $y_t$, even though what we actually want to represent is that the component $p_t$ feeds **forward** on $p_{t+1}$, and through it, on future $m_{t+j}$, $j = 0, 1, 2, \ldots$. @@ -767,9 +777,8 @@ level. ## Log money supply feeds back on log price level -The same pattern of eigenvalues splitting around unity, with one being -below unity and another greater than unity, sometimes continues to -prevail when there is *feedback* from the log price level to the log +An arrangement of eigenvalues that split around unity, with one being +below unity and another being greater than unity, sometimes prevails when there is *feedback* from the log price level to the log money supply. Let the feedback rule be @@ -780,20 +789,18 @@ Let the feedback rule be m_{t+1} = \rho m_t + \delta p_t ``` -where $\rho \in (0,1)$ as before and where we shall now allow +where $\rho \in (0,1)$ and where we shall now allow $\delta \neq 0$. -However, -$\delta$ cannot be too large if things are to fit together as we -wish to deliver a stable system for some initial value $p_0$ that we want to determine uniquely. -. +**Warning:** If things are to fit together as we +wish to deliver a stable system for some initial value $p_0$ that we want to determine uniquely, $\delta$ cannot be too large. The forward-looking equation {eq}`equation_1` continues to describe equality between the demand and supply of money. We assume that equations {eq}`equation_1` and {eq}`equation_16` govern $y_t \equiv \begin{bmatrix} m_t \cr p_t \end{bmatrix}$ for -$t \geq 0$ +$t \geq 0$. The transition matrix $H$ in the law of motion @@ -811,12 +818,12 @@ We take $m_0$ as a given intial condition and as before seek an initial value $p_0$ that stabilizes the system in the sense that $y_t$ converges as $t \rightarrow + \infty$. -Our approach is identical with that followed above and is based on an +Our approach is identical with the one followed above and is based on an eigenvalue decomposition in which, cross our fingers, one eigenvalue exceeds unity and the other is less than unity in absolute value. When $\delta \neq 0$ as we now assume, the eigenvalues of -$H$ are no longer $\rho \in (0,1)$ and +$H$ will no longer be $\rho \in (0,1)$ and $\lambda^{-1} > 1$ We’ll just calculate them and apply the same algorithm that we used @@ -888,7 +895,9 @@ H_eigvals(δ=0.2) We want to study systems in which one eigenvalue exceeds unity in modulus while the other is less than unity in modulus, so we avoid -values of $\delta$ that are too large +values of $\delta$ that are too. + +That is, we want to avoid too much positive feedback from $p_t$ to $m_{t+1}$. ```{code-cell} python3 def magic_p0(m0, ρ=.9, λ=.5, δ=0): @@ -954,18 +963,18 @@ magic_p0(1, δ=0.2) ## Big $P$, little $p$ interpretation -It is helpful to view our solutions with feedback from the price level or inflation to money or the rate of money -creation in terms of the Big $K$, little $k$ idea discussed in {doc}`Rational Expectations Models ` +It is helpful to view our solutions of difference equations having feedback from the price level or inflation to money or the rate of money +creation in terms of the Big $K$, little $k$ idea discussed in {doc}`Rational Expectations Models `. This will help us sort out what is taken as given by the decision makers who use the difference equation {eq}`equation_2` to determine $p_t$ as a function of their forecasts of future values of $m_t$. Let's write the stabilizing solution that we have computed using the eigenvector decomposition of $H$ as -$P_t = F^* m_t$ where +$P_t = F^* m_t$, where $$ -F^* = Q_{21} Q_{11}^{-1} +F^* = Q_{21} Q_{11}^{-1} . $$ Then from $P_{t+1} = F^* m_{t+1}$ and $m_{t+1} = \rho m_t + \delta P_t$ we can deduce the recursion $P_{t+1} = F^* \rho m_t + F^* \delta P_t$ and create the stacked system @@ -983,7 +992,7 @@ $$ where $x_t = \begin{bmatrix} m_t \cr P_t \end{bmatrix}$. -Then apply formula {eq}`equation_5` for $F$ to deduce that +Apply formula {eq}`equation_5` for $F$ to deduce that $$ p_t = F \begin{bmatrix} m_t \cr P_t \end{bmatrix} = F \begin{bmatrix} m_t \cr F^* m_t \end{bmatrix} @@ -995,13 +1004,13 @@ $$ p_t = \begin{bmatrix} F_1 & F_2 \end{bmatrix} \begin{bmatrix} m_t \cr F^* m_t \end{bmatrix} = F_1 m_t + F_2 F^* m_t $$ -so that we expect to have +so that we can anticipate that $$ F^* = F_1 + F_2 F^* $$ -We verify this equality in the next block of Python code that implements the following +We shall verify this equality in the next block of Python code that implements the following computations. 1. For the system with $\delta\neq 0$ so that there is feedback, @@ -1015,7 +1024,7 @@ computations. $\begin{bmatrix} F_1 & F_2 \end{bmatrix} \equiv F$ from equation {eq}`equation_5` above. 1. We compute $F_1 + F_2 F^*$ and compare it - with $F^*$ and verify equality. + with $F^*$ and check for the anticipated equality. ```{code-cell} python3 # set parameters @@ -1052,15 +1061,15 @@ Compare $F^*$ with $F_1 + F_2 F^*$ F_check[0] + F_check[1] * F_star, F_star ``` -## Fun with Sympy code +## Fun with SymPy code -This section is a small gift for readers who have made it this far. +This section is a gift for readers who have made it this far. -It puts Sympy to work on our model. +It puts SymPy to work on our model. -Thus, we use Sympy to compute some of the key objects comprising the eigenvector decomposition of $H$. +Thus, we use Sympy to compute some key objects comprising the eigenvector decomposition of $H$. -$H$ with nonzero $\delta$. +We start by generating an $H$ with nonzero $\delta$. ```{code-cell} python3 λ, δ, ρ = symbols('λ, δ, ρ') @@ -1082,7 +1091,7 @@ H1.eigenvals() H1.eigenvects() ``` -$H$ with $\delta$ being zero. +Now let's compute $H$ when $\delta$ is zero. ```{code-cell} python3 H2 = Matrix([[ρ,0], [- (1 - λ) / λ, λ ** -1]]) @@ -1100,16 +1109,16 @@ H2.eigenvals() H2.eigenvects() ``` -Below we do induce sympy to do the following fun things for us analytically: +Below we do induce SymPy to do the following fun things for us analytically: 1. We compute the matrix $Q$ whose first column is the eigenvector associated with $\rho$. and whose second column is the eigenvector associated with $\lambda^{-1}$. -1. We use sympy to compute the inverse $Q^{-1}$ of $Q$ +1. We use SymPy to compute the inverse $Q^{-1}$ of $Q$ (both in symbols). -1. We use sympy to compute $Q_{21} Q_{11}^{-1}$ (in symbols). +1. We use SymPy to compute $Q_{21} Q_{11}^{-1}$ (in symbols). 1. Where $Q^{ij}$ denotes the $(i,j)$ component of - $Q^{-1}$, weighted use sympy to compute + $Q^{-1}$, we use SymPy to compute $- (Q^{22})^{-1} Q^{21}$ (again in symbols) ```{code-cell} python3 diff --git a/source/rst/samuelson.md b/lectures/samuelson.md similarity index 100% rename from source/rst/samuelson.md rename to lectures/samuelson.md diff --git a/source/rst/scalar_dynam.md b/lectures/scalar_dynam.md similarity index 100% rename from source/rst/scalar_dynam.md rename to lectures/scalar_dynam.md diff --git a/source/rst/schelling.md b/lectures/schelling.md similarity index 100% rename from source/rst/schelling.md rename to lectures/schelling.md diff --git a/source/rst/short_path.md b/lectures/short_path.md similarity index 100% rename from source/rst/short_path.md rename to lectures/short_path.md diff --git a/source/rst/sir_model.md b/lectures/sir_model.md similarity index 100% rename from source/rst/sir_model.md rename to lectures/sir_model.md diff --git a/source/rst/time_series_with_matrices.md b/lectures/time_series_with_matrices.md similarity index 100% rename from source/rst/time_series_with_matrices.md rename to lectures/time_series_with_matrices.md diff --git a/source/rst/troubleshooting.md b/lectures/troubleshooting.md similarity index 100% rename from source/rst/troubleshooting.md rename to lectures/troubleshooting.md diff --git a/source/rst/uncertainty_traps.md b/lectures/uncertainty_traps.md similarity index 100% rename from source/rst/uncertainty_traps.md rename to lectures/uncertainty_traps.md diff --git a/source/rst/wealth_dynamics.md b/lectures/wealth_dynamics.md similarity index 100% rename from source/rst/wealth_dynamics.md rename to lectures/wealth_dynamics.md diff --git a/source/rst/zreferences.md b/lectures/zreferences.md similarity index 100% rename from source/rst/zreferences.md rename to lectures/zreferences.md diff --git a/source/rst/404.md b/source/rst/404.md deleted file mode 100644 index ee91c9f2d..000000000 --- a/source/rst/404.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -(404)= -# Page Not Found - -```{raw} html - -``` - -We couldn’t find the page you were looking for. - -Please check the URL or try a link below: - -* [Home](/) -* [QuantEcon](https://quantecon.org/) -* [Quantitative Economics with Python](https://python.quantecon.org/) -* [Quantitative Economics with Julia](https://julia.quantecon.org/) -* [QuantEcon DataScience](https://datascience.quantecon.org/) -* [Forum](http://discourse.quantecon.org/) -* [Contact us](mailto:contact@quantecon.org) - diff --git a/source/rst/index.md b/source/rst/index.md deleted file mode 100644 index 38100851b..000000000 --- a/source/rst/index.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -(index)= -# Quantitative Economics with Python - -```{toctree} -:hidden: - -index_toc -``` - -```{raw} html -
-

Quantitative Economics with Python

-
-
-
-

This website presents a set of lectures on quantitative economic modeling, designed and written by Thomas J. Sargent and John Stachurski.

-

Last compiled:
- View source | - View commits | See all contributors

-
- -
- -
- -``` - diff --git a/source/rst/index_asset_pricing.md b/source/rst/index_asset_pricing.md deleted file mode 100644 index 9a83e6316..000000000 --- a/source/rst/index_asset_pricing.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -(asset_pricing)= -```{raw} html - -``` - -# Asset Pricing and Finance - -```{only} html -## Lectures -``` - -```{toctree} -:maxdepth: 2 - -markov_asset -harrison_kreps -``` - diff --git a/source/rst/index_data_and_empirics.md b/source/rst/index_data_and_empirics.md deleted file mode 100644 index 27e87c58b..000000000 --- a/source/rst/index_data_and_empirics.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -```{raw} html - -``` - -# Data and Empirics - -This part of the course provides a set of lectures focused on Data and -Empirics using Python - -```{only} html -## Lectures -``` - -```{toctree} -:maxdepth: 2 - -pandas_panel -ols -mle -``` - diff --git a/source/rst/index_information.md b/source/rst/index_information.md deleted file mode 100644 index 4227087da..000000000 --- a/source/rst/index_information.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -```{raw} html - -``` - -# Information - -This section of the course contains foundational models for dynamic economic -modeling. Most are single agent problems that take the activities of other -agents as given. Later we will look at full equilibrium problems. - -```{only} html -## Lectures -``` - -```{toctree} -:maxdepth: 2 - -odu -likelihood_ratio_process -wald_friedman -exchangeable -likelihood_bayes -navy_captain -``` - diff --git a/source/rst/index_intro_dynam.md b/source/rst/index_intro_dynam.md deleted file mode 100644 index f516dca51..000000000 --- a/source/rst/index_intro_dynam.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -```{raw} html - -``` - -# Introduction to Dynamics - -This section of the course contains foundational models for dynamic economic -modeling. Most are single agent problems that take the activities of other -agents as given. Later we will look at full equilibrium problems. - -```{only} html -## Lectures -``` - -```{toctree} -:maxdepth: 2 - -scalar_dynam -ar1_processes -finite_markov -inventory_dynamics -linear_models -samuelson -kesten_processes -wealth_dynamics -kalman -short_path -cass_koopmans_1 -cass_koopmans_2 -``` - diff --git a/source/rst/index_lq_control.md b/source/rst/index_lq_control.md deleted file mode 100644 index e975822b8..000000000 --- a/source/rst/index_lq_control.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -(lq_dynamic_programming)= -```{raw} html - -``` - -# LQ Control - -```{only} html -## Lectures -``` - -```{toctree} -:maxdepth: 2 - -lqcontrol -perm_income -perm_income_cons -lq_inventories -``` - diff --git a/source/rst/index_multi_agent_models.md b/source/rst/index_multi_agent_models.md deleted file mode 100644 index 30af84c08..000000000 --- a/source/rst/index_multi_agent_models.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -(multi_agent_models)= -```{raw} html - -``` - -# Multiple Agent Models - -These lectures look at important economic models that also illustrate common -equilibrium concepts. - -```{only} html -## Lectures -``` - -```{toctree} -:maxdepth: 2 - -schelling -lake_model -rational_expectations -re_with_feedback -markov_perf -uncertainty_traps -aiyagari -``` - diff --git a/source/rst/index_savings_growth.md b/source/rst/index_savings_growth.md deleted file mode 100644 index c858037bb..000000000 --- a/source/rst/index_savings_growth.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -```{raw} html - -``` - -# Consumption, Savings and Growth - -This section of the course contains foundational models for dynamic economic -modeling. Most are single agent problems that take the activities of other -agents as given. Later we will look at full equilibrium problems. - -```{only} html -## Lectures -``` - -```{toctree} -:maxdepth: 2 - -cake_eating_problem -cake_eating_numerical -optgrowth -optgrowth_fast -coleman_policy_iter -egm_policy_iter -ifp -ifp_advanced -``` - diff --git a/source/rst/index_search.md b/source/rst/index_search.md deleted file mode 100644 index e4fa0a506..000000000 --- a/source/rst/index_search.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -```{raw} html - -``` - -# Search - -This section of the course contains foundational models for dynamic economic -modeling. Most are single agent problems that take the activities of other -agents as given. Later we will look at full equilibrium problems. - -```{only} html -## Lectures -``` - -```{toctree} -:maxdepth: 2 - -mccall_model -mccall_model_with_separation -mccall_fitted_vfi -mccall_correlated -career -jv -``` - diff --git a/source/rst/index_toc.md b/source/rst/index_toc.md deleted file mode 100644 index 8f580ebf6..000000000 --- a/source/rst/index_toc.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -(toc)= -```{raw} html -

Powered by NumFOCUS logo

-``` - -```{only} html -# Table of Contents -``` - -```{toctree} ---- -maxdepth: 2 -titlesonly: ---- - -about_lectures -index_tools_and_techniques -index_intro_dynam -index_search -index_savings_growth -index_information -index_lq_control -index_multi_agent_models -index_asset_pricing -index_data_and_empirics -zreferences -``` - -```{toctree} -:hidden: - -404 -search -status -troubleshooting -``` - -```{image} http://assets.quantecon.org/img/banner.png ---- -align: center -scale: 30 ---- -``` - -```{only} latex -Acknowledgements: These lectures have benefitted greatly from comments and -suggestion from our colleagues, students and friends. Special thanks go to -Anmol Bhandari, Long Bui, Jeong-Hun Choi, Chase Coleman, David Evans, Shunsuke Hori, -Chenghan Hou, Doc-Jin Jang, Spencer Lyon, Qingyin Ma, Akira Matsushita, -Matthew McKay, Tomohito Okabe, Alex Olssen, Nathan Palmer and Yixiao Zhou. -``` - diff --git a/source/rst/index_tools_and_techniques.md b/source/rst/index_tools_and_techniques.md deleted file mode 100644 index c07f88982..000000000 --- a/source/rst/index_tools_and_techniques.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -(tools_and_techniques)= -```{raw} html - -``` - -# Tools and Techniques - -This section of the course contains foundational mathematical and statistical -tools and techniques - -```{only} html -## Lectures -``` - -```{toctree} -:maxdepth: 2 - -geom_series -multi_hyper -sir_model -linear_algebra -complex_and_trig -lln_clt -heavy_tails -multivariate_normal -time_series_with_matrices -``` - diff --git a/source/rst/search.md b/source/rst/search.md deleted file mode 100644 index 2fd5a536a..000000000 --- a/source/rst/search.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -(search)= -# Search - -```{raw} html - - -
-``` - diff --git a/source/rst/status.md b/source/rst/status.md deleted file mode 100644 index 0e6b56711..000000000 --- a/source/rst/status.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -(status)= -# Lecture Status - -```{raw} html - -

The badges below show which lectures are currently passing their execution test (i.e., executing without errors).

-

The lecture code checker was last run: N/A

-
-
-
-

The code checker is run on a t2.small Amazon EC2 instance. This is an instance with a single CPU and 2 GiB of Memory.

-

You should achieve faster run times on many common laptops and desktops.

-``` - diff --git a/source/rst/wald_friedman.md b/source/rst/wald_friedman.md deleted file mode 100644 index f19ef9ea2..000000000 --- a/source/rst/wald_friedman.md +++ /dev/null @@ -1,943 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Python 3 - language: python - name: python3 ---- - -(wald_friedman)= -```{raw} html - -``` - -# {index}`A Problem that Stumped Milton Friedman ` - -(and that Abraham Wald solved by inventing sequential analysis) - -```{index} single: Models; Sequential analysis -``` - -```{contents} Contents -:depth: 2 -``` - -In addition to what's in Anaconda, this lecture will need the following libraries: - -```{code-cell} ipython ---- -tags: [hide-output] ---- -!conda install -y quantecon -!pip install interpolation -``` - -## Overview - -This lecture describes a statistical decision problem encountered by Milton -Friedman and W. Allen Wallis during World War II when they were analysts at -the U.S. Government's Statistical Research Group at Columbia University. - -This problem led Abraham Wald {cite}`Wald47` to formulate **sequential analysis**, -an approach to statistical decision problems intimately related to dynamic programming. - -In this lecture, we apply dynamic programming algorithms to Friedman and Wallis and Wald's problem. - -Key ideas in play will be: - -- Bayes' Law -- Dynamic programming -- Type I and type II statistical errors - - a type I error occurs when you reject a null hypothesis that is true - - a type II error is when you accept a null hypothesis that is false -- Abraham Wald's **sequential probability ratio test** -- The **power** of a statistical test -- The **critical region** of a statistical test -- A **uniformly most powerful test** - -We'll begin with some imports: - -```{code-cell} ipython -import numpy as np -import matplotlib.pyplot as plt -from numba import jit, prange, jitclass, float64, int64 -from interpolation import interp -from math import gamma -``` - -This lecture uses ideas studied in {doc}`this lecture `, {doc}`this lecture `. -and {doc}`this lecture `. - -## Origin of the Problem - -On pages 137-139 of his 1998 book *Two Lucky People* with Rose Friedman {cite}`Friedman98`, -Milton Friedman described a problem presented to him and Allen Wallis -during World War II, when they worked at the US Government's -Statistical Research Group at Columbia University. - -Let's listen to Milton Friedman tell us what happened - -> In order to understand the story, it is necessary to have an idea of a -> simple statistical problem, and of the standard procedure for dealing -> with it. The actual problem out of which sequential analysis grew will -> serve. The Navy has two alternative designs (say A and B) for a -> projectile. It wants to determine which is superior. To do so it -> undertakes a series of paired firings. On each round, it assigns the -> value 1 or 0 to A accordingly as its performance is superior or inferior -> to that of B and conversely 0 or 1 to B. The Navy asks the statistician -> how to conduct the test and how to analyze the results. - -> The standard statistical answer was to specify a number of firings (say -> 1,000) and a pair of percentages (e.g., 53% and 47%) and tell the client -> that if A receives a 1 in more than 53% of the firings, it can be -> regarded as superior; if it receives a 1 in fewer than 47%, B can be -> regarded as superior; if the percentage is between 47% and 53%, neither -> can be so regarded. - -> When Allen Wallis was discussing such a problem with (Navy) Captain -> Garret L. Schyler, the captain objected that such a test, to quote from -> Allen's account, may prove wasteful. If a wise and seasoned ordnance -> officer like Schyler were on the premises, he would see after the first -> few thousand or even few hundred [rounds] that the experiment need not -> be completed either because the new method is obviously inferior or -> because it is obviously superior beyond what was hoped for -> $> \ldots$> . - -Friedman and Wallis struggled with the problem but, after realizing that -they were not able to solve it, described the problem to Abraham Wald. - -That started Wald on the path that led him to *Sequential Analysis* {cite}`Wald47`. - -We'll formulate the problem using dynamic programming. - -## A Dynamic Programming Approach - -The following presentation of the problem closely follows Dmitri -Berskekas's treatment in **Dynamic Programming and Stochastic Control** {cite}`Bertekas75`. - -A decision-maker observes a sequence of draws of a random variable $z$. - -He (or she) wants to know which of two probability distributions $f_0$ or $f_1$ governs $z$. - -Conditional on knowing that successive observations are drawn from distribution $f_0$, the sequence of -random variables is independently and identically distributed (IID). - -Conditional on knowing that successive observations are drawn from distribution $f_1$, the sequence of -random variables is also independently and identically distributed (IID). - -But the observer does not know which of the two distributions generated the sequence. - -For reasons explained [Exchangeability and Bayesian Updating](https://python.quantecon.org/exchangeable.html), this means that the sequence is not -IID and that the observer has something to learn, even though he knows both $f_0$ and $f_1$. - -After a number of draws, also to be determined, he makes a decision about -which of the distributions is generating the draws he observes. - -He starts with prior - -$$ -\pi_{-1} = -\mathbb P \{ f = f_0 \mid \textrm{ no observations} \} \in (0, 1) -$$ - -After observing $k+1$ observations $z_k, z_{k-1}, \ldots, z_0$, he updates this value to - -$$ -\pi_k = \mathbb P \{ f = f_0 \mid z_k, z_{k-1}, \ldots, z_0 \} -$$ - -which is calculated recursively by applying Bayes' law: - -$$ -\pi_{k+1} = \frac{ \pi_k f_0(z_{k+1})}{ \pi_k f_0(z_{k+1}) + (1-\pi_k) f_1 (z_{k+1}) }, -\quad k = -1, 0, 1, \ldots -$$ - -After observing $z_k, z_{k-1}, \ldots, z_0$, the decision-maker believes -that $z_{k+1}$ has probability distribution - -$$ -f_{{\pi}_k} (v) = \pi_k f_0(v) + (1-\pi_k) f_1 (v) -$$ - -This is a mixture of distributions $f_0$ and $f_1$, with the weight -on $f_0$ being the posterior probability that $f = f_0$ [^f1]. - -To help illustrate this kind of distribution, let's inspect some mixtures of beta distributions. - -The density of a beta probability distribution with parameters $a$ and $b$ is - -$$ -f(z; a, b) = \frac{\Gamma(a+b) z^{a-1} (1-z)^{b-1}}{\Gamma(a) \Gamma(b)} -\quad \text{where} \quad -\Gamma(t) := \int_{0}^{\infty} x^{t-1} e^{-x} dx -$$ - -The next figure shows two beta distributions in the top panel. - -The bottom panel presents mixtures of these distributions, with various mixing probabilities $\pi_k$ - -```{code-cell} python3 -@jit(nopython=True) -def p(x, a, b): - r = gamma(a + b) / (gamma(a) * gamma(b)) - return r * x**(a-1) * (1 - x)**(b-1) - -f0 = lambda x: p(x, 1, 1) -f1 = lambda x: p(x, 9, 9) -grid = np.linspace(0, 1, 50) - -fig, axes = plt.subplots(2, figsize=(10, 8)) - -axes[0].set_title("Original Distributions") -axes[0].plot(grid, f0(grid), lw=2, label="$f_0$") -axes[0].plot(grid, f1(grid), lw=2, label="$f_1$") - -axes[1].set_title("Mixtures") -for π in 0.25, 0.5, 0.75: - y = π * f0(grid) + (1 - π) * f1(grid) - axes[1].plot(y, lw=2, label=f"$\pi_k$ = {π}") - -for ax in axes: - ax.legend() - ax.set(xlabel="$z$ values", ylabel="probability of $z_k$") - -plt.tight_layout() -plt.show() -``` - -### Losses and Costs - -After observing $z_k, z_{k-1}, \ldots, z_0$, the decision-maker -chooses among three distinct actions: - -- He decides that $f = f_0$ and draws no more $z$'s -- He decides that $f = f_1$ and draws no more $z$'s -- He postpones deciding now and instead chooses to draw a - $z_{k+1}$ - -Associated with these three actions, the decision-maker can suffer three -kinds of losses: - -- A loss $L_0$ if he decides $f = f_0$ when actually - $f=f_1$ -- A loss $L_1$ if he decides $f = f_1$ when actually - $f=f_0$ -- A cost $c$ if he postpones deciding and chooses instead to draw - another $z$ - -### Digression on Type I and Type II Errors - -If we regard $f=f_0$ as a null hypothesis and $f=f_1$ as an alternative hypothesis, -then $L_1$ and $L_0$ are losses associated with two types of statistical errors - -- a type I error is an incorrect rejection of a true null hypothesis (a "false positive") -- a type II error is a failure to reject a false null hypothesis (a "false negative") - -So when we treat $f=f_0$ as the null hypothesis - -- We can think of $L_1$ as the loss associated with a type I - error. -- We can think of $L_0$ as the loss associated with a type II - error. - -### Intuition - -Let's try to guess what an optimal decision rule might look like before we go further. - -Suppose at some given point in time that $\pi$ is close to 1. - -Then our prior beliefs and the evidence so far point strongly to $f = f_0$. - -If, on the other hand, $\pi$ is close to 0, then $f = f_1$ is strongly favored. - -Finally, if $\pi$ is in the middle of the interval $[0, 1]$, then we have little information in either direction. - -This reasoning suggests a decision rule such as the one shown in the figure - -```{figure} /_static/lecture_specific/wald_friedman/wald_dec_rule.png - -``` - -As we'll see, this is indeed the correct form of the decision rule. - -The key problem is to determine the threshold values $\alpha, \beta$, -which will depend on the parameters listed above. - -You might like to pause at this point and try to predict the impact of a -parameter such as $c$ or $L_0$ on $\alpha$ or $\beta$. - -### A Bellman Equation - -Let $J(\pi)$ be the total loss for a decision-maker with current belief $\pi$ who chooses optimally. - -With some thought, you will agree that $J$ should satisfy the Bellman equation - -```{math} -:label: new1 - -J(\pi) = - \min - \left\{ - (1-\pi) L_0, \; \pi L_1, \; - c + \mathbb E [ J (\pi') ] - \right\} -``` - -where $\pi'$ is the random variable defined by - -$$ -\pi' = \kappa(z', \pi) = \frac{ \pi f_0(z')}{ \pi f_0(z') + (1-\pi) f_1 (z') } -$$ - -when $\pi$ is fixed and $z'$ is drawn from the current best guess, which is the distribution $f$ defined by - -$$ -f_{\pi}(v) = \pi f_0(v) + (1-\pi) f_1 (v) -$$ - -In the Bellman equation, minimization is over three actions: - -1. Accept the hypothesis that $f = f_0$ -1. Accept the hypothesis that $f = f_1$ -1. Postpone deciding and draw again - -We can represent the Bellman equation as - -```{math} -:label: optdec - -J(\pi) = -\min \left\{ (1-\pi) L_0, \; \pi L_1, \; h(\pi) \right\} -``` - -where $\pi \in [0,1]$ and - -- $(1-\pi) L_0$ is the expected loss associated with accepting - $f_0$ (i.e., the cost of making a type II error). -- $\pi L_1$ is the expected loss associated with accepting - $f_1$ (i.e., the cost of making a type I error). -- $h(\pi) := c + \mathbb E [J(\pi')]$ the continuation value; i.e., - the expected cost associated with drawing one more $z$. - -The optimal decision rule is characterized by two numbers $\alpha, \beta \in (0,1) \times (0,1)$ that satisfy - -$$ -(1- \pi) L_0 < \min \{ \pi L_1, c + \mathbb E [J(\pi')] \} \textrm { if } \pi \geq \alpha -$$ - -and - -$$ -\pi L_1 < \min \{ (1-\pi) L_0, c + \mathbb E [J(\pi')] \} \textrm { if } \pi \leq \beta -$$ - -The optimal decision rule is then - -$$ -\begin{aligned} -\textrm { accept } f=f_0 \textrm{ if } \pi \geq \alpha \\ -\textrm { accept } f=f_1 \textrm{ if } \pi \leq \beta \\ -\textrm { draw another } z \textrm{ if } \beta \leq \pi \leq \alpha -\end{aligned} -$$ - -Our aim is to compute the value function $J$, and from it the associated cutoffs $\alpha$ -and $\beta$. - -To make our computations simpler, using {eq}`optdec`, we can write the continuation value $h(\pi)$ as - -```{math} -:label: optdec2 - -\begin{aligned} -h(\pi) &= c + \mathbb E [J(\pi')] \\ -&= c + \mathbb E_{\pi'} \min \{ (1 - \pi') L_0, \pi' L_1, h(\pi') \} \\ -&= c + \int \min \{ (1 - \kappa(z', \pi) ) L_0, \kappa(z', \pi) L_1, h(\kappa(z', \pi) ) \} f_\pi (z') dz' -\end{aligned} -``` - -The equality - -```{math} -:label: funceq - -h(\pi) = -c + \int \min \{ (1 - \kappa(z', \pi) ) L_0, \kappa(z', \pi) L_1, h(\kappa(z', \pi) ) \} f_\pi (z') dz' -``` - -can be understood as a functional equation, where $h$ is the unknown. - -Using the functional equation, {eq}`funceq`, for the continuation value, we can back out -optimal choices using the RHS of {eq}`optdec`. - -This functional equation can be solved by taking an initial guess and iterating -to find the fixed point. - -In other words, we iterate with an operator $Q$, where - -$$ -Q h(\pi) = -c + \int \min \{ (1 - \kappa(z', \pi) ) L_0, \kappa(z', \pi) L_1, h(\kappa(z', \pi) ) \} f_\pi (z') dz' -$$ - -## Implementation - -First, we will construct a `jitclass` to store the parameters of the model - -```{code-cell} python3 -wf_data = [('a0', float64), # Parameters of beta distributions - ('b0', float64), - ('a1', float64), - ('b1', float64), - ('c', float64), # Cost of another draw - ('π_grid_size', int64), - ('L0', float64), # Cost of selecting f0 when f1 is true - ('L1', float64), # Cost of selecting f1 when f0 is true - ('π_grid', float64[:]), - ('mc_size', int64), - ('z0', float64[:]), - ('z1', float64[:])] -``` - -```{code-cell} python3 -@jitclass(wf_data) -class WaldFriedman: - - def __init__(self, - c=1.25, - a0=1, - b0=1, - a1=3, - b1=1.2, - L0=25, - L1=25, - π_grid_size=200, - mc_size=1000): - - self.a0, self.b0 = a0, b0 - self.a1, self.b1 = a1, b1 - self.c, self.π_grid_size = c, π_grid_size - self.L0, self.L1 = L0, L1 - self.π_grid = np.linspace(0, 1, π_grid_size) - self.mc_size = mc_size - - self.z0 = np.random.beta(a0, b0, mc_size) - self.z1 = np.random.beta(a1, b1, mc_size) - - def f0(self, x): - - return p(x, self.a0, self.b0) - - def f1(self, x): - - return p(x, self.a1, self.b1) - - def f0_rvs(self): - return np.random.beta(self.a0, self.b0) - - def f1_rvs(self): - return np.random.beta(self.a1, self.b1) - - def κ(self, z, π): - """ - Updates π using Bayes' rule and the current observation z - """ - - f0, f1 = self.f0, self.f1 - - π_f0, π_f1 = π * f0(z), (1 - π) * f1(z) - π_new = π_f0 / (π_f0 + π_f1) - - return π_new -``` - -As in the {doc}`optimal growth lecture `, to approximate a continuous value function - -* We iterate at a finite grid of possible values of $\pi$. -* When we evaluate $\mathbb E[J(\pi')]$ between grid points, we use linear interpolation. - -We define the operator function `Q` below. - -```{code-cell} python3 -@jit(nopython=True, parallel=True) -def Q(h, wf): - - c, π_grid = wf.c, wf.π_grid - L0, L1 = wf.L0, wf.L1 - z0, z1 = wf.z0, wf.z1 - mc_size = wf.mc_size - - κ = wf.κ - - h_new = np.empty_like(π_grid) - h_func = lambda p: interp(π_grid, h, p) - - for i in prange(len(π_grid)): - π = π_grid[i] - - # Find the expected value of J by integrating over z - integral_f0, integral_f1 = 0, 0 - for m in range(mc_size): - π_0 = κ(z0[m], π) # Draw z from f0 and update π - integral_f0 += min((1 - π_0) * L0, π_0 * L1, h_func(π_0)) - - π_1 = κ(z1[m], π) # Draw z from f1 and update π - integral_f1 += min((1 - π_1) * L0, π_1 * L1, h_func(π_1)) - - integral = (π * integral_f0 + (1 - π) * integral_f1) / mc_size - - h_new[i] = c + integral - - return h_new -``` - -To solve the model, we will iterate using `Q` to find the fixed point - -```{code-cell} python3 -@jit(nopython=True) -def solve_model(wf, tol=1e-4, max_iter=1000): - """ - Compute the continuation value function - - * wf is an instance of WaldFriedman - """ - - # Set up loop - h = np.zeros(len(wf.π_grid)) - i = 0 - error = tol + 1 - - while i < max_iter and error > tol: - h_new = Q(h, wf) - error = np.max(np.abs(h - h_new)) - i += 1 - h = h_new - - if i == max_iter: - print("Failed to converge!") - - return h_new -``` - -## Analysis - -Let's inspect the model's solutions. - -We will be using the default parameterization with distributions like so - -```{code-cell} python3 -wf = WaldFriedman() - -fig, ax = plt.subplots(figsize=(10, 6)) -ax.plot(wf.f0(wf.π_grid), label="$f_0$") -ax.plot(wf.f1(wf.π_grid), label="$f_1$") -ax.set(ylabel="probability of $z_k$", xlabel="$k$", title="Distributions") -ax.legend() - -plt.show() -``` - -### Value Function - -To solve the model, we will call our `solve_model` function - -```{code-cell} python3 -h_star = solve_model(wf) # Solve the model -``` - -We will also set up a function to compute the cutoffs $\alpha$ and $\beta$ -and plot these on our value function plot - -```{code-cell} python3 -@jit(nopython=True) -def find_cutoff_rule(wf, h): - - """ - This function takes a continuation value function and returns the - corresponding cutoffs of where you transition between continuing and - choosing a specific model - """ - - π_grid = wf.π_grid - L0, L1 = wf.L0, wf.L1 - - # Evaluate cost at all points on grid for choosing a model - payoff_f0 = (1 - π_grid) * L0 - payoff_f1 = π_grid * L1 - - # The cutoff points can be found by differencing these costs with - # The Bellman equation (J is always less than or equal to p_c_i) - β = π_grid[np.searchsorted( - payoff_f1 - np.minimum(h, payoff_f0), - 1e-10) - - 1] - α = π_grid[np.searchsorted( - np.minimum(h, payoff_f1) - payoff_f0, - 1e-10) - - 1] - - return (β, α) - -β, α = find_cutoff_rule(wf, h_star) -cost_L0 = (1 - wf.π_grid) * wf.L0 -cost_L1 = wf.π_grid * wf.L1 - -fig, ax = plt.subplots(figsize=(10, 6)) - -ax.plot(wf.π_grid, h_star, label='continuation value') -ax.plot(wf.π_grid, cost_L1, label='choose f1') -ax.plot(wf.π_grid, cost_L0, label='choose f0') -ax.plot(wf.π_grid, - np.amin(np.column_stack([h_star, cost_L0, cost_L1]),axis=1), - lw=15, alpha=0.1, color='b', label='minimum cost') - -ax.annotate(r"$\beta$", xy=(β + 0.01, 0.5), fontsize=14) -ax.annotate(r"$\alpha$", xy=(α + 0.01, 0.5), fontsize=14) - -plt.vlines(β, 0, β * wf.L0, linestyle="--") -plt.vlines(α, 0, (1 - α) * wf.L1, linestyle="--") - -ax.set(xlim=(0, 1), ylim=(0, 0.5 * max(wf.L0, wf.L1)), ylabel="cost", - xlabel="$\pi$", title="Value function") - -plt.legend(borderpad=1.1) -plt.show() -``` - -The value function equals $\pi L_1$ for $\pi \leq \beta$, and $(1-\pi )L_0$ for $\pi -\geq \alpha$. - -The slopes of the two linear pieces of the value function are determined by $L_1$ -and $- L_0$. - -The value function is smooth in the interior region, where the posterior -probability assigned to $f_0$ is in the indecisive region $\pi \in (\beta, \alpha)$. - -The decision-maker continues to sample until the probability that he attaches to -model $f_0$ falls below $\beta$ or above $\alpha$. - -### Simulations - -The next figure shows the outcomes of 500 simulations of the decision process. - -On the left is a histogram of the stopping times, which equal the number of draws of $z_k$ required to make a decision. - -The average number of draws is around 6.6. - -On the right is the fraction of correct decisions at the stopping time. - -In this case, the decision-maker is correct 80% of the time - -```{code-cell} python3 -def simulate(wf, true_dist, h_star, π_0=0.5): - - """ - This function takes an initial condition and simulates until it - stops (when a decision is made) - """ - - f0, f1 = wf.f0, wf.f1 - f0_rvs, f1_rvs = wf.f0_rvs, wf.f1_rvs - π_grid = wf.π_grid - κ = wf.κ - - if true_dist == "f0": - f, f_rvs = wf.f0, wf.f0_rvs - elif true_dist == "f1": - f, f_rvs = wf.f1, wf.f1_rvs - - # Find cutoffs - β, α = find_cutoff_rule(wf, h_star) - - # Initialize a couple of useful variables - decision_made = False - π = π_0 - t = 0 - - while decision_made is False: - # Maybe should specify which distribution is correct one so that - # the draws come from the "right" distribution - z = f_rvs() - t = t + 1 - π = κ(z, π) - if π < β: - decision_made = True - decision = 1 - elif π > α: - decision_made = True - decision = 0 - - if true_dist == "f0": - if decision == 0: - correct = True - else: - correct = False - - elif true_dist == "f1": - if decision == 1: - correct = True - else: - correct = False - - return correct, π, t - -def stopping_dist(wf, h_star, ndraws=250, true_dist="f0"): - - """ - Simulates repeatedly to get distributions of time needed to make a - decision and how often they are correct - """ - - tdist = np.empty(ndraws, int) - cdist = np.empty(ndraws, bool) - - for i in range(ndraws): - correct, π, t = simulate(wf, true_dist, h_star) - tdist[i] = t - cdist[i] = correct - - return cdist, tdist - -def simulation_plot(wf): - h_star = solve_model(wf) - ndraws = 500 - cdist, tdist = stopping_dist(wf, h_star, ndraws) - - fig, ax = plt.subplots(1, 2, figsize=(16, 5)) - - ax[0].hist(tdist, bins=np.max(tdist)) - ax[0].set_title(f"Stopping times over {ndraws} replications") - ax[0].set(xlabel="time", ylabel="number of stops") - ax[0].annotate(f"mean = {np.mean(tdist)}", xy=(max(tdist) / 2, - max(np.histogram(tdist, bins=max(tdist))[0]) / 2)) - - ax[1].hist(cdist.astype(int), bins=2) - ax[1].set_title(f"Correct decisions over {ndraws} replications") - ax[1].annotate(f"% correct = {np.mean(cdist)}", - xy=(0.05, ndraws / 2)) - - plt.show() - -simulation_plot(wf) -``` - -### Comparative Statics - -Now let's consider the following exercise. - -We double the cost of drawing an additional observation. - -Before you look, think about what will happen: - -- Will the decision-maker be correct more or less often? -- Will he make decisions sooner or later? - -```{code-cell} python3 -wf = WaldFriedman(c=2.5) -simulation_plot(wf) -``` - -Increased cost per draw has induced the decision-maker to take less draws before deciding. - -Because he decides with less, the percentage of time he is correct drops. - -This leads to him having a higher expected loss when he puts equal weight on both models. - -### A Notebook Implementation - -To facilitate comparative statics, we provide -a [Jupyter notebook](https://nbviewer.jupyter.org/github/QuantEcon/lecture-python-advanced.notebooks/blob/master/wald_friedman.ipynb) that -generates the same plots, but with sliders. - -With these sliders, you can adjust parameters and immediately observe - -* effects on the smoothness of the value function in the indecisive middle range - as we increase the number of grid points in the piecewise linear approximation. -* effects of different settings for the cost parameters $L_0, L_1, c$, the - parameters of two beta distributions $f_0$ and $f_1$, and the number - of points and linear functions $m$ to use in the piece-wise continuous approximation to the value function. -* various simulations from $f_0$ and associated distributions of waiting times to making a decision. -* associated histograms of correct and incorrect decisions. - -## Comparison with Neyman-Pearson Formulation - -For several reasons, it is useful to describe the theory underlying the test -that Navy Captain G. S. Schuyler had been told to use and that led him -to approach Milton Friedman and Allan Wallis to convey his conjecture -that superior practical procedures existed. - -Evidently, the Navy had told -Captail Schuyler to use what it knew to be a state-of-the-art -Neyman-Pearson test. - -We'll rely on Abraham Wald's {cite}`Wald47` elegant summary of Neyman-Pearson theory. - -For our purposes, watch for there features of the setup: - -- the assumption of a *fixed* sample size $n$ -- the application of laws of large numbers, conditioned on alternative - probability models, to interpret the probabilities $\alpha$ and - $\beta$ defined in the Neyman-Pearson theory - -Recall that in the sequential analytic formulation above, that - -- The sample size $n$ is not fixed but rather an object to be - chosen; technically $n$ is a random variable. -- The parameters $\beta$ and $\alpha$ characterize cut-off - rules used to determine $n$ as a random variable. -- Laws of large numbers make no appearances in the sequential - construction. - -In chapter 1 of **Sequential Analysis** {cite}`Wald47` Abraham Wald summarizes the -Neyman-Pearson approach to hypothesis testing. - -Wald frames the problem as making a decision about a probability -distribution that is partially known. - -(You have to assume that *something* is already known in order to state a well-posed -problem -- usually, *something* means *a lot*) - -By limiting what is unknown, Wald uses the following simple structure -to illustrate the main ideas: - -- A decision-maker wants to decide which of two distributions - $f_0$, $f_1$ govern an IID random variable $z$. -- The null hypothesis $H_0$ is the statement that $f_0$ - governs the data. -- The alternative hypothesis $H_1$ is the statement that - $f_1$ governs the data. -- The problem is to devise and analyze a test of hypothesis - $H_0$ against the alternative hypothesis $H_1$ on the - basis of a sample of a fixed number $n$ independent - observations $z_1, z_2, \ldots, z_n$ of the random variable - $z$. - -To quote Abraham Wald, - -> A test procedure leading to the acceptance or rejection of the [null] -> hypothesis in question is simply a rule specifying, for each possible -> sample of size $> n$> , whether the [null] hypothesis should be accepted -> or rejected on the basis of the sample. This may also be expressed as -> follows: A test procedure is simply a subdivision of the totality of -> all possible samples of size $> n$> into two mutually exclusive -> parts, say part 1 and part 2, together with the application of the -> rule that the [null] hypothesis be accepted if the observed sample is -> contained in part 2. Part 1 is also called the critical region. Since -> part 2 is the totality of all samples of size $> n$> which are not -> included in part 1, part 2 is uniquely determined by part 1. Thus, -> choosing a test procedure is equivalent to determining a critical -> region. - -Let's listen to Wald longer: - -> As a basis for choosing among critical regions the following -> considerations have been advanced by Neyman and Pearson: In accepting -> or rejecting $> H_0$> we may commit errors of two kinds. We commit -> an error of the first kind if we reject $> H_0$> when it is true; -> we commit an error of the second kind if we accept $> H_0$> when -> $> H_1$> is true. After a particular critical region $> W$> has -> been chosen, the probability of committing an error of the first -> kind, as well as the probability of committing an error of the second -> kind is uniquely determined. The probability of committing an error -> of the first kind is equal to the probability, determined by the -> assumption that $> H_0$> is true, that the observed sample will be -> included in the critical region $> W$> . The probability of -> committing an error of the second kind is equal to the probability, -> determined on the assumption that $> H_1$> is true, that the -> probability will fall outside the critical region $> W$> . For any -> given critical region $> W$> we shall denote the probability of an -> error of the first kind by $> \alpha$> and the probability of an -> error of the second kind by $> \beta$> . - -Let's listen carefully to how Wald applies law of large numbers to -interpret $\alpha$ and $\beta$: - -> The probabilities $> \alpha$> and $> \beta$> have the -> following important practical interpretation: Suppose that we draw a -> large number of samples of size $> n$> . Let $> M$> be the -> number of such samples drawn. Suppose that for each of these -> $> M$> samples we reject $> H_0$> if the sample is included in -> $> W$> and accept $> H_0$> if the sample lies outside -> $> W$> . In this way we make $> M$> statements of rejection or -> acceptance. Some of these statements will in general be wrong. If -> $> H_0$> is true and if $> M$> is large, the probability is -> nearly $> 1$> (i.e., it is practically certain) that the -> proportion of wrong statements (i.e., the number of wrong statements -> divided by $> M$> ) will be approximately $> \alpha$> . If -> $> H_1$> is true, the probability is nearly $> 1$> that the -> proportion of wrong statements will be approximately $> \beta$> . -> Thus, we can say that in the long run [ here Wald applies law of -> large numbers by driving $> M \rightarrow \infty$> (our comment, -> not Wald's) ] the proportion of wrong statements will be -> $> \alpha$> if $> H_0$> is true and $> \beta$> if -> $> H_1$> is true. - -The quantity $\alpha$ is called the *size* of the critical region, -and the quantity $1-\beta$ is called the *power* of the critical -region. - -Wald notes that - -> one critical region $> W$> is more desirable than another if it -> has smaller values of $> \alpha$> and $> \beta$> . Although -> either $> \alpha$> or $> \beta$> can be made arbitrarily small -> by a proper choice of the critical region $> W$> , it is possible -> to make both $> \alpha$> and $> \beta$> arbitrarily small for a -> fixed value of $> n$> , i.e., a fixed sample size. - -Wald summarizes Neyman and Pearson's setup as follows: - -> Neyman and Pearson show that a region consisting of all samples -> $> (z_1, z_2, \ldots, z_n)$> which satisfy the inequality - -$$ -> \frac{ f_1(z_1) \cdots f_1(z_n)}{f_0(z_1) \cdots f_0(z_n)} \geq k -$$ - -> is a most powerful critical region for testing the hypothesis -> $> H_0$> against the alternative hypothesis $> H_1$> . The term -> $> k$> on the right side is a constant chosen so that the region -> will have the required size $> \alpha$> . - -Wald goes on to discuss Neyman and Pearson's concept of *uniformly most -powerful* test. - -Here is how Wald introduces the notion of a sequential test - -> A rule is given for making one of the following three decisions at any stage of -> the experiment (at the m th trial for each integral value of m ): (1) to -> accept the hypothesis H , (2) to reject the hypothesis H , (3) to -> continue the experiment by making an additional observation. Thus, such -> a test procedure is carried out sequentially. On the basis of the first -> observation, one of the aforementioned decision is made. If the first or -> second decision is made, the process is terminated. If the third -> decision is made, a second trial is performed. Again, on the basis of -> the first two observations, one of the three decision is made. If the -> third decision is made, a third trial is performed, and so on. The -> process is continued until either the first or the second decisions is -> made. The number n of observations required by such a test procedure is -> a random variable, since the value of n depends on the outcome of the -> observations. - -[^f1]: The decision maker acts as if he believes that the sequence of random variables -$[z_{0}, z_{1}, \ldots]$ is *exchangeable*. See [Exchangeability and Bayesian Updating](https://python.quantecon.org/exchangeable.html) and -{cite}`Kreps88` chapter 11, for discussions of exchangeability. - -## Sequels - -We'll dig deeper into some of the ideas used here in the following lectures: - -* {doc}`this lecture ` discusses the key concept of **exchangeability** that rationalizes statistical learning -* {doc}`this lecture ` describes **likelihood ratio processes** and their role in frequentist and Bayesian statistical theories -* {doc}`this lecture ` discusses the role of likelihood ratio processes in **Bayesian learning** -* {doc}`this lecture ` returns to the subject of this lecture and studies whether the Captain's hunch that the (frequentist) decision rule - that the Navy had ordered him to use can be expected to be better or worse than the rule sequential rule that Abraham Wald designed -