Skip to content

Commit

Permalink
Numpyrize backward induction. (#145)
Browse files Browse the repository at this point in the history
  • Loading branch information
tobiasraabe committed Apr 2, 2019
1 parent ea796ba commit 9f2d1cb
Show file tree
Hide file tree
Showing 47 changed files with 1,422 additions and 1,408 deletions.
Empty file.
96 changes: 52 additions & 44 deletions development/documentation/forensics/upgraded/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,50 +5,58 @@

import shutil
import os
from pathlib import Path

# Compiler options. Note that the original codes are not robust enough to
# execute in debug mode.
DEBUG_OPTIONS = (
" -O2 -Wall -Wline-truncation -Wcharacter-truncation "
" -Wsurprising -Waliasing -Wimplicit-interface -Wunused-parameter "
" -fwhole-file -fcheck=all -fbacktrace -g -fmax-errors=1 "
" -ffpe-trap=invalid,zero"
)

PRODUCTION_OPTIONS = " -O3"

# I rely on production options for this script, as I also run the estimation
# below.
OPTIONS = PRODUCTION_OPTIONS

# Some strings that show up repeatedly in compiler command.
MODULES = "imsl_replacements.f90"
LAPACK = "-L/usr/lib/lapack -llapack"

# Copy required initialization files from the original codes.
for fname in ["seed.txt", "in1.txt"]:
shutil.copy("../original/" + fname, ".")

# Compiling and calling executable for simulation.
cmd = " gfortran " + OPTIONS + " -o dp3asim " + MODULES + " dp3asim.f90 " + LAPACK
os.system(cmd + "; ./dp3asim")

# Compiling and calling executable for assessment of interpolation.
for fname in ["dpsim1", "dpsim4d"]:
cmd = (
" gfortran "
+ OPTIONS
+ " -o "
+ fname
+ " "
+ MODULES
+ " "
+ fname
+ ".f90 "
+ LAPACK

def main():
# Compiler options. Note that the original codes are not robust enough to
# execute in debug mode.
DEBUG_OPTIONS = (
" -O2 -Wall -Wline-truncation -Wcharacter-truncation "
" -Wsurprising -Waliasing -Wimplicit-interface -Wunused-parameter "
" -fwhole-file -fcheck=all -fbacktrace -g -fmax-errors=1 "
" -ffpe-trap=invalid,zero"
)
os.system(cmd + "; ./" + fname)

# Compiling and calling executable for estimation.
cmd = " gfortran " + OPTIONS + " -o dpml4a " + MODULES + " dpml4a.f90 " + LAPACK
os.system(cmd + "; ./dpml4a")
PRODUCTION_OPTIONS = " -O3"

# I rely on production options for this script, as I also run the estimation
# below.
OPTIONS = PRODUCTION_OPTIONS

# Some strings that show up repeatedly in compiler command.
MODULES = "imsl_replacements.f90"
LAPACK = "-L/usr/lib/lapack -llapack"

# Copy required initialization files from the original codes.
path = Path("..", "original")
for fname in ["seed.txt", "in1.txt"]:
shutil.copy(str(path / fname), ".")

# Compiling and calling executable for simulation.
cmd = " gfortran " + OPTIONS + " -o dp3asim " + MODULES + " dp3asim.f90 " + LAPACK
os.system(cmd + "; ./dp3asim")

# Compiling and calling executable for assessment of interpolation.
for fname in ["dpsim1", "dpsim4d"]:
cmd = (
" gfortran "
+ OPTIONS
+ " -o "
+ fname
+ " "
+ MODULES
+ " "
+ fname
+ ".f90 "
+ LAPACK
)
os.system(cmd + "; ./" + fname)

# Compiling and calling executable for estimation.
cmd = " gfortran " + OPTIONS + " -o dpml4a " + MODULES + " dpml4a.f90 " + LAPACK
os.system(cmd + "; ./dpml4a")


if __name__ == '__main__':
main()
Empty file.
23 changes: 13 additions & 10 deletions development/documentation/reliability/run_reliability.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
#!/usr/bin/env python
from auxiliary_shared import process_command_line_arguments
from auxiliary_reliability import run
from development.modules.auxiliary_shared import process_command_line_arguments
from development.modules.auxiliary_reliability import run

if __name__ == "__main__":

def main():
is_debug = process_command_line_arguments(
"Run reliability exercise for the package"
)
Expand All @@ -12,8 +11,8 @@
spec_dict = dict()
spec_dict["fnames"] = ["reliability_short.ini"]

# The following key-value pairs are the requested updates from the baseline initialization
# file.
# The following key-value pairs are the requested updates from the baseline
# initialization file.
spec_dict["update"] = dict()

spec_dict["update"]["is_store"] = True
Expand All @@ -24,10 +23,10 @@
spec_dict["update"]["maxfun"] = 1500
spec_dict["update"]["level"] = 0.05

# The following key-value pair sets the number of processors for each of the estimations.
# This is required as the maximum number of useful cores varies drastically depending on the
# model. The requested number of processors is never larger than the one specified as part of
# the update dictionary.
# The following key-value pair sets the number of processors for each of the
# estimations. This is required as the maximum number of useful cores varies
# drastically depending on the model. The requested number of processors is never
# larger than the one specified as part of the update dictionary.
spec_dict["procs"] = dict()
spec_dict["procs"]["ambiguity"] = 1
spec_dict["procs"]["static"] = 1
Expand All @@ -42,3 +41,7 @@
spec_dict["update"]["maxfun"] = 0

run(spec_dict)


if __name__ == '__main__':
main()
5 changes: 2 additions & 3 deletions development/documentation/scalability/run_scalability.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
#!/usr/bin/env python
from auxiliary_shared import process_command_line_arguments
from auxiliary_scalability import run
from development.modules.auxiliary_shared import process_command_line_arguments
from development.modules.auxiliary_scalability import run

if __name__ == "__main__":

Expand Down
27 changes: 14 additions & 13 deletions development/modules/auxiliary_property.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import numpy as np
import os
import shutil
from pathlib import Path

# RESPY directory. This allows to compile_ the debug version of the FORTRAN
# program.
Expand Down Expand Up @@ -141,24 +142,25 @@ def get_test_dict(test_dir):
modules as the keys. The corresponding value is a list with all test
methods inside that module.
"""
# Process all candidate modules.
current_directory = os.getcwd()
os.chdir(test_dir)
test_modules = []
for test_file in glob.glob("test_*.py"):
test_module = test_file.replace(".py", "")
test_modules.append(test_module)
os.chdir(current_directory)
# Get all test modules
test_modules = test_dir.glob("*.py")
# Add test_dir to path for importing
sys.path.append(str(test_dir))

# Given the modules, get all tests methods.
test_dict = dict()
test_dict = {}
for test_module in test_modules:
test_dict[test_module] = []
mod = importlib.import_module(test_module.replace(".py", ""))
if test_module.stem == "__init__":
continue
test_dict[test_module.stem] = []
mod = importlib.import_module(test_module.stem)
candidate_methods = dir(mod.TestClass)
for candidate_method in candidate_methods:
if "test_" in candidate_method:
test_dict[test_module].append(candidate_method)
test_dict[test_module.stem].append(candidate_method)

# Remove path from PYTHONPATH
sys.path.remove(str(test_dir))

# If the PARALLELISM or FORTRAN is not available, we remove the parallel tests.
if not IS_PARALLELISM_MPI and not IS_PARALLELISM_OMP:
Expand All @@ -173,7 +175,6 @@ def get_test_dict(test_dir):
if not IS_F2PY:
del test_dict["test_f2py"]

# Finishing
return test_dict


Expand Down
18 changes: 12 additions & 6 deletions development/modules/auxiliary_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def get_chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield l[i: i + n]
yield l[i : i + n]


def create_single(idx):
Expand All @@ -39,16 +39,22 @@ def create_single(idx):
version = np.random.choice(["python", "fortran"])

# only choose from constraint optimizers because we always have some bounds
if version == 'python':
if version == "python":
optimizer = "SCIPY-LBFGSB"
else:
optimizer = "FORTE-BOBYQA"

constr = {
"program": {"version": version},
"preconditioning": {"type": np.random.choice(["identity", "magnitudes"])},
"estimation": {"maxfun": int(np.random.choice(range(6), p=[0.5, 0.1, 0.1, 0.1, 0.1, 0.1])),
"optimizer": optimizer},
"preconditioning": {
"type": np.random.choice(["identity", "magnitudes"])
},
"estimation": {
"maxfun": int(
np.random.choice(range(6), p=[0.5, 0.1, 0.1, 0.1, 0.1, 0.1])
),
"optimizer": optimizer,
},
}
constr["flag_estimation"] = True

Expand Down Expand Up @@ -84,7 +90,7 @@ def check_single(tests, idx):
attr["num_procs"] = 1

if not IS_FORTRAN:
attr['version'] = 'python'
attr["version"] = "python"

# In the past we also had the problem that some of the testing machines report
# selective failures when the regression vault was created on another machine.
Expand Down
46 changes: 25 additions & 21 deletions development/modules/auxiliary_reliability.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,16 +9,17 @@
from development.modules.auxiliary_shared import send_notification
from development.modules.auxiliary_shared import cleanup

from config import SPEC_DIR
from development.modules.config import SPEC_DIR

from respy.scripts.scripts_simulate import scripts_simulate
from respy.scripts.scripts_compare import scripts_compare
import respy


def run(spec_dict):
""" Details of the Monte Carlo exercise can be specified in the code block below. Note that
only deviations from the benchmark initialization files need to be addressed.
""" Details of the Monte Carlo exercise can be specified in the code block below.
Note that only deviations from the benchmark initialization files need to be
addressed.
"""

cleanup()
Expand All @@ -42,28 +43,29 @@ def run_single(spec_dict, fname):
os.mkdir(fname.replace(".ini", ""))
os.chdir(fname.replace(".ini", ""))

# We first read in the first specification from the initial paper for our baseline and
# process only the specified deviations.
# We first read in the first specification from the initial paper for our baseline
# and process only the specified deviations.
respy_obj = respy.RespyCls(SPEC_DIR + fname)
update_class_instance(respy_obj, spec_dict)

respy_obj.write_out()

# Let us first simulate a baseline sample, store the results for future reference, and start
# an estimation from the true values.
# Let us first simulate a baseline sample, store the results for future reference,
# and start an estimation from the true values.
x = None

is_risk = spec_dict["update"]["level"] == 0.00
num_procs = spec_dict["update"]["num_procs"]

for request in ["Truth", "Static", "Risk", "Ambiguity"]:

# If there is no ambiguity in the dataset, then we can skip the AMBIGUITY estimation.
# If there is no ambiguity in the dataset, then we can skip the AMBIGUITY
# estimation.
if is_risk and request == "Ambiguity":
continue

# If there is no ambiguity, we will just fit the ambiguity parameter to avoid the
# computational costs.
# If there is no ambiguity, we will just fit the ambiguity parameter to avoid
# the computational costs.
respy_obj.unlock()

if request == "Truth" and is_risk:
Expand All @@ -78,8 +80,8 @@ def run_single(spec_dict, fname):
# We do only need a subset of the available processors
respy_obj.attr["num_procs"] = min(num_procs, spec_dict["procs"]["static"])

# There is no update required, we start with the true parameters from the dynamic
# ambiguity model.
# There is no update required, we start with the true parameters from the
# dynamic ambiguity model.
respy_obj.attr["optim_paras"]["delta"] = np.array([0.00])
respy_obj.attr["optim_paras"]["level"] = np.array([0.00])
respy_obj.attr["optim_paras"]["paras_fixed"][:2] = [True, True]
Expand All @@ -93,9 +95,9 @@ def run_single(spec_dict, fname):
# This is an update with the results from the static estimation.
respy_obj.update_optim_paras(x)

# Note that we now start with 0.85, which is in the middle of the parameter bounds.
# Manual testing showed that the program is reliable even if we start at 0.00.
# However, it does take much more function evaluations.
# Note that we now start with 0.85, which is in the middle of the parameter
# bounds. Manual testing showed that the program is reliable even if we
# start at 0.00. However, it does take much more function evaluations.
respy_obj.attr["optim_paras"]["delta"] = np.array([0.85])
respy_obj.attr["optim_paras"]["level"] = np.array([0.00])
respy_obj.attr["optim_paras"]["paras_fixed"][:2] = [False, True]
Expand All @@ -110,9 +112,10 @@ def run_single(spec_dict, fname):
# This is an update with the results from the dynamic risk estimation.
respy_obj.update_optim_paras(x)

# We want to be able to start the ambiguity estimation directly from the risk-only
# case. This requires that we adjust the the starting values for the discount factor
# manually from zero as it otherwise violates the bounds.
# We want to be able to start the ambiguity estimation directly from the
# risk-only case. This requires that we adjust the the starting values for
# the discount factor manually from zero as it otherwise violates the
# bounds.
if respy_obj.attr["optim_paras"]["delta"] == 0.0:
respy_obj.attr["optim_paras"]["delta"] = np.array([0.85])

Expand Down Expand Up @@ -252,8 +255,8 @@ def get_rmse():


def simulate_specification(respy_obj, subdir, update, paras=None):
""" Simulate results to assess the estimation performance. Note that we do not update the
object that is passed in.
""" Simulate results to assess the estimation performance. Note that we do not
update the object that is passed in.
"""
os.mkdir(subdir)
os.chdir(subdir)
Expand All @@ -266,7 +269,8 @@ def simulate_specification(respy_obj, subdir, update, paras=None):
assert paras is not None
respy_copy.update_optim_paras(paras)

# The initialization file is specified to run the actual estimation in the directory above.
# The initialization file is specified to run the actual estimation in the directory
# above.
respy_copy.attr["file_est"] = "../" + respy_copy.attr["file_est"]
respy_copy.write_out()

Expand Down
9 changes: 5 additions & 4 deletions development/modules/auxiliary_robustness.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,11 @@ def run_robustness_test(seed, is_investigation):
copy(join(old_dir, file), join(new_dir, file))
os.chdir(new_dir)

# We need to impose some constraints so that the random initialization file does meet the
# structure of the empirical dataset. We need to be particularly careful with the
# construction of the maximum level of schooling as we need to rule out that anyone in the
# estimation sample has a value larger then the specified maximum value.
# We need to impose some constraints so that the random initialization file does
# meet the structure of the empirical dataset. We need to be particularly careful
# with the construction of the maximum level of schooling as we need to rule out
# that anyone in the estimation sample has a value larger then the specified maximum
# value.
version = np.random.choice(["python", "fortran"])
if version == 'python':
max_periods = 3
Expand Down

0 comments on commit 9f2d1cb

Please sign in to comment.