Skip to content

Commit

Permalink
feature: Model and Control from yaml files
Browse files Browse the repository at this point in the history
  • Loading branch information
jmccreight committed Jun 21, 2023
1 parent f22fbf7 commit 916ff97
Show file tree
Hide file tree
Showing 28 changed files with 726 additions and 104 deletions.
18 changes: 14 additions & 4 deletions .github/RELEASE.md
Original file line number Diff line number Diff line change
@@ -1,16 +1,26 @@
# -----------------------------------------------------------------------------
# Release guide

This document describes release procedures, conventions, and utilities for `pywatershed`.
This document describes release procedures, conventions, and utilities for
`pywatershed`.

## Conventions

- Releases follow the [git flow](https://nvie.com/posts/a-successful-git-branching-model/).
- Releases follow the
[git flow](https://nvie.com/posts/a-successful-git-branching-model/).
- Release numbers follow [semantic version](https://semver.org/) conventions.
- Minor and major releases branch from `develop`. Patches branch from `main`.

## Releasing `pywatershed`

The release procedure is mostly automated. The workflow is defined in `.github/workflows/release.yaml` and triggers when a release or patch branch is pushed to this repo.
The release procedure is mostly automated. The workflow is defined in
`.github/workflows/release.yaml` and triggers when a release or patch branch is
pushed to this repo.

Prior to release:
1. Run asv tests without asv, perform an asv regression test:
1. On develop update the `what's new.rst` to include the date of the release


To release a new version:

Expand Down Expand Up @@ -43,7 +53,7 @@ To release a new version:
- Update `version.txt` and `pywatershed/version.py` to match the just-released version, with a '+' appended to the version number in `version.txt` to indicate preliminary/development status.
- Draft a PR against `develop` with the updated version files and the updates previously merged to `main`.

5. Merge the PR to `develop`. As above, it is important to *merge* the PR, not squash, to preserve history and keep `develop` and `main` from diverging.
5. Merge the PR to `develop`. As above, it is important to *merge* the PR, not squash, to preserve history and keep `develop` and `main` from diverging.

## Utility scripts

Expand Down
14 changes: 12 additions & 2 deletions asv_benchmarks/benchmarks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,13 @@

print("sys.version: ", sys.version)

# TODO remove backwards compatiability with pynhm once
# reported slows downs are sorted output
# TODO remove backwards compatability with <0.2.0 once
# it is released.

try:
# backwards compatability
import pynhm as pws

_is_pws = False
Expand All @@ -13,7 +19,6 @@

_is_pws = True

# this is in asv dev
if not "constants" in pws.__dict__.keys():
del pws
import pywatershed as pws
Expand All @@ -22,7 +27,12 @@

# The package is installed without data. The test data are relative to the repo
# used to do the install. So use the asv env var to find that.
asv_conf_dir = pl.Path(os.environ["ASV_CONF_DIR"]).resolve()
try:
asv_conf_dir = pl.Path(os.environ["ASV_CONF_DIR"]).resolve()
except KeyError:
asv_conf_dir = pl.Path(".")


assert asv_conf_dir.exists()
pws_root = asv_conf_dir / "../pywatershed"
test_data_dir = asv_conf_dir / "../test_data"
Expand Down
58 changes: 46 additions & 12 deletions asv_benchmarks/benchmarks/prms.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,17 @@
import shutil
from typing import Union, Literal


from . import _is_pws, parameterized, test_data_dir

if _is_pws:
import pywatershed as pws
else:
import pynhm as pws

# TODO remove backwards compatiability with pynhm once
# reported slows downs are sorted output
# TODO remove backwards compatability with <0.2.0 once
# it is released.

domains = ["hru_1", "drb_2yr", "ucb_2yr"]
outputs = [None, "separate", "together"]
Expand Down Expand Up @@ -73,13 +76,25 @@ def setup(self, *args):

self.control_file = test_data_dir / f"{self.domain}/control.test"
self.parameter_file = test_data_dir / f"{self.domain}/myparam.param"
print(f"model_setup_run tag: {self.tag}")

# backwards compatability pre pywatershed
if _is_pws:
params = pws.parameters.PrmsParameters.load(self.parameter_file)
self.params = pws.parameters.PrmsParameters.load(
self.parameter_file
)
else:
params = pws.PrmsParameters.load(self.parameter_file)
self.params = pws.PrmsParameters.load(self.parameter_file)

# backwards compatability
try:
self.control = pws.Control.load(
self.control_file, params=self.params
)
self.ge_v0_2_0 = False
except:
self.control = pws.Control.load(self.control_file)
self.ge_v0_2_0 = True

self.control = pws.Control.load(self.control_file, params=params)
self.control.edit_n_time_steps(n_time_steps)

# setup input_dir with symlinked prms inputs and outputs
Expand All @@ -104,13 +119,25 @@ def model_setup_run(
processes: tuple = None,
write_output: Union[bool, Literal["separate", "together"]] = None,
):
model = pws.Model(
*self.processes,
control=self.control,
input_dir=self.tag_input_dir,
budget_type="warn",
calc_method="numba",
)
if self.ge_v0_2_0:
model = pws.Model(
self.processes,
control=self.control,
discretization_dict=None,
parameters=self.params,
input_dir=self.tag_input_dir,
budget_type="warn",
calc_method="numba",
)
else:
model = pws.Model(
*self.processes,
control=self.control,
input_dir=self.tag_input_dir,
budget_type="warn",
calc_method="numba",
)

if write_output is not None:
model.initialize_netcdf(
self.tag_dir, separate_files=(write_output == "separate")
Expand All @@ -131,6 +158,13 @@ def time_prms_run(
processes: tuple,
output: Union[None, Literal["separate", "together"]],
):
print(
"\nPRMSModels args: \n",
f"domain: {domain}\n",
f"processes:{processes}\n",
f"output: {output}\n",
)

_ = self.model_setup_run(
domain=domain, processes=processes, write_output=output
)
23 changes: 23 additions & 0 deletions asv_benchmarks/run_benchmarks_wo_asv.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
from itertools import product
from benchmarks.prms import domains, outputs, model_tests, PRMSModels

# Just running the PRMSModels benchmarks

all_tests = list(
product(
*[
domains,
model_tests.values(),
outputs,
]
)
)


all_tests = all_tests

for args in all_tests:
mm = PRMSModels()
mm.setup(*args)
mm.time_prms_run(*args)
mm.teardown(*args)
42 changes: 26 additions & 16 deletions autotest/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
}


params = ("params_sep", "params_one")
params = ("params_sep", "params_one", "from_yml")


@pytest.fixture(scope="function")
Expand All @@ -43,15 +43,12 @@ def control(domain):
@pytest.fixture(scope="function")
def discretization(domain):
dis_hru_file = domain["dir"] / "parameters_dis_hru.nc"
dis_seg_file = domain["dir"] / "parameters_dis_seg.nc"
dis_both_file = domain["dir"] / "parameters_dis_both.nc"
dis_hru = Parameters.from_netcdf(dis_hru_file, encoding=False)
dis_both = Parameters.from_netcdf(dis_both_file, encoding=False)
# PRMSChannel needs both dis where as it should only need dis_seg
# and will when we have exchanges
dis_combined = Parameters.merge(
Parameters.from_netcdf(dis_hru_file, encoding=False),
Parameters.from_netcdf(dis_seg_file, encoding=False),
)
dis = {"dis_hru": dis_hru, "dis_combined": dis_combined}
dis = {"dis_hru": dis_hru, "dis_both": dis_both}

return dis

Expand All @@ -73,7 +70,7 @@ def model_args(domain, control, discretization, request):
"parameters": PrmsParameters.load(domain["param_file"]),
}

else:
elif params == "params_sep":
# Constructing this model_dict is the new way
model_dict = discretization
model_dict["control"] = control
Expand All @@ -91,7 +88,7 @@ def model_args(domain, control, discretization, request):
proc_param_file = domain["dir"] / f"parameters_{proc_name}.nc"
proc["parameters"] = PrmsParameters.from_netcdf(proc_param_file)
if proc_name_lower == "PRMSChannel".lower():
proc["dis"] = "dis_combined"
proc["dis"] = "dis_both"
else:
proc["dis"] = "dis_hru"

Expand All @@ -104,14 +101,24 @@ def model_args(domain, control, discretization, request):
"parameters": None,
}

elif params == "from_yml":
yml_file = domain["dir"] / "nhm_model.yml"
model_dict = Model.model_dict_from_yml(yml_file)

args = {
"process_list_or_model_dict": model_dict,
"control": None,
"discretization_dict": None,
"parameters": None,
}

else:
msg = "invalid parameter value"
raise ValueError(msg)

return args


# @pytest.mark.parametrize(
# "processes",
# test_models.values(),
# ids=test_models.keys(),
# )
def test_model(domain, model_args, tmp_path):
"""Run the full NHM model"""

Expand All @@ -130,6 +137,7 @@ def test_model(domain, model_args, tmp_path):
**model_args,
input_dir=input_dir,
budget_type=budget_type,
calc_method="numba",
load_n_time_batches=3,
)

Expand Down Expand Up @@ -318,6 +326,8 @@ def test_model(domain, model_args, tmp_path):
for vv, aa in var_ans.items():
if not is_old_style:
pp = pp.lower()
if pp not in model.processes.keys():
pp = pp[4:]
result = model.processes[pp][vv].mean()
reg_ans = aa[domain["domain_name"]]
if not reg_ans:
Expand All @@ -338,8 +348,8 @@ def test_model(domain, model_args, tmp_path):
if not all_success:
if fail_prms_compare and fail_regression:
msg = (
"pywatershed results both failed regression test and comparison "
"with prms5.2.1"
"pywatershed results both failed regression test and "
"comparison with prms5.2.1"
)
elif fail_prms_compare:
msg = "pywatershed results failed comparison with prms5.2.1"
Expand Down
2 changes: 1 addition & 1 deletion autotest/test_prms_param_separate.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def test_param_sep(domain, params, use_xr, tmp_path):
process_list=nhm_processes,
use_xr=use_xr,
)
assert len(proc_nc_files) == len(nhm_processes) + 2 # 2 dis
assert len(proc_nc_files) == len(nhm_processes) + 3 # hru, seg, both

# check roundtrip: to file and back
for proc_name, proc_file in proc_nc_files.items():
Expand Down
2 changes: 1 addition & 1 deletion pywatershed/atmosphere/PRMSAtmosphere.py
Original file line number Diff line number Diff line change
Expand Up @@ -720,7 +720,7 @@ def calculate_transp_tindex(self):
# transp_on inited to 0 everywhere above

# candidate for worst code lines
if self.params.get_parameters("temp_units")["temp_units"] == 0:
if self.params.parameters["temp_units"] == 0:
transp_tmax_f = self.transp_tmax
else:
transp_tmax_f = (self.transp_tmax * (9.0 / 5.0)) + 32.0
Expand Down
Loading

0 comments on commit 916ff97

Please sign in to comment.