diff --git a/autotest/conftest.py b/autotest/conftest.py index b2e3428c..66e9ba4c 100644 --- a/autotest/conftest.py +++ b/autotest/conftest.py @@ -1,5 +1,7 @@ from pathlib import Path import pytest +import shutil +import platform # from pst_from_tests import setup_freyberg_mf6 pytest_plugins = ["modflow_devtools.fixtures"] @@ -22,7 +24,53 @@ # "verf_test.py", ] +def get_project_root_path(): + """ + Get the root path of the project. + """ + return Path(__file__).parent.parent + + +def get_exe_path(exe_name, forgive=True): + """ + Get the absolute path to an executable in the project. + """ + if shutil.which(exe_name) is not None: + return exe_name + root_path = get_project_root_path() + exe_path = root_path / "bin" + if not (exe_path / exe_name).exists(): + if "linux" in platform.system().lower(): + exe_path = Path(exe_path, "linux") + elif "darwin" in platform.system().lower(): + exe_path = Path(exe_path, "mac") + else: + exe_path = Path(exe_path, "win") + if not (exe_path / exe_name).exists(): + if forgive: + print(f"Executable {exe_name} not found in {exe_path}, returning None") + else: + raise FileNotFoundError(f"Executable {exe_name} not found in system PATH or fallback path:" + f" {exe_path}") + return None + return exe_path / exe_name + + +def full_exe_ref_dict(): + """ + Get a dictionary of executable references for the project. + """ + d = {} + for exe_name in [ + "mfnwt", "mt3dusgs", "mfusg_gsi", "mf6", + "pestpp-ies", "pestpp-sen", "pestpp-opt", "pestpp-glm", + "pestpp-mou", "pestpp-da", "pestpp-sqp", "pestpp-swp" + ]: + d[exe_name] = get_exe_path(exe_name) + return d + + @pytest.fixture(autouse=True) def _ch2testdir(monkeypatch): testdir = Path(__file__).parent - monkeypatch.chdir(testdir) \ No newline at end of file + monkeypatch.chdir(testdir) diff --git a/autotest/emulator_tests.py b/autotest/emulator_tests.py index 1aa11817..36bc4e91 100644 --- a/autotest/emulator_tests.py +++ b/autotest/emulator_tests.py @@ -6,11 +6,11 @@ import pandas as pd import platform import pyemu -from pst_from_tests import setup_tmp, bin_path, _get_port +from pst_from_tests import setup_tmp, _get_port, exepath_dict from pyemu.emulators import DSI, LPFA, GPR -ies_exe_path = os.path.join(bin_path, "pestpp-ies") -mou_exe_path = os.path.join(bin_path, "pestpp-mou") +ies_exe_path = exepath_dict["pestpp-ies"] +mou_exe_path = exepath_dict["pestpp-mou"] def dsi_freyberg(tmp_d,transforms=None,tag=""): diff --git a/autotest/pst_from_tests.py b/autotest/pst_from_tests.py index 7d60e608..091f9009 100644 --- a/autotest/pst_from_tests.py +++ b/autotest/pst_from_tests.py @@ -10,35 +10,14 @@ import shutil import pytest -ext = '' -local_bins = False # change if wanting to test with local binary exes -if local_bins: - bin_path = os.path.join("..", "..", "bin") - if "linux" in platform.system().lower(): - pass - bin_path = os.path.join(bin_path, "linux") - elif "darwin" in platform.system().lower(): - pass - bin_path = os.path.join(bin_path, "mac") - else: - bin_path = os.path.join(bin_path, "win") - ext = '.exe' -else: - bin_path = '' - if "windows" in platform.system().lower(): - ext = '.exe' - -mf_exe_path = os.path.join(bin_path, "mfnwt") -mt_exe_path = os.path.join(bin_path, "mt3dusgs") -usg_exe_path = os.path.join(bin_path, "mfusg_gsi") -mf6_exe_path = os.path.join(bin_path, "mf6") -pp_exe_path = os.path.join(bin_path, "pestpp-glm") -ies_exe_path = os.path.join(bin_path, "pestpp-ies") -swp_exe_path = os.path.join(bin_path, "pestpp-swp") - -mf_exe_name = os.path.basename(mf_exe_path) -mf6_exe_name = os.path.basename(mf6_exe_path) +from conftest import full_exe_ref_dict +exepath_dict = full_exe_ref_dict() +ies_exe_path = exepath_dict['pestpp-ies'] +mf_exe_path = exepath_dict['mfnwt'] +mf6_exe_path = exepath_dict['mf6'] +pp_exe_path = exepath_dict['pestpp-glm'] +usg_exe_path = exepath_dict["mfusg_gsi"] def _get_port(): import socket @@ -92,104 +71,8 @@ def setup_tmp(od, tmp_d, sub=None): shutil.copytree(od, new_d) return new_d -# @pytest.fixture -# def freybergmf6_2_pstfrom(tmp_path): -# import numpy as np -# import pandas as pd -# pd.set_option('display.max_rows', 500) -# pd.set_option('display.max_columns', 500) -# pd.set_option('display.width', 1000) -# try: -# import flopy -# except: -# return -# -# org_model_ws = os.path.join('..', 'examples', 'freyberg_mf6') -# tmp_model_ws = setup_tmp(org_model_ws, tmp_path) -# bd = Path.cwd() -# os.chdir(tmp_path) -# try: -# tmp_model_ws = tmp_model_ws.relative_to(tmp_path) -# sim = flopy.mf6.MFSimulation.load(sim_ws=str(tmp_model_ws)) -# m = sim.get_model() -# sim.set_all_data_external(check_data=False) -# sim.write_simulation() -# -# # SETUP pest stuff... -# os_utils.run("{0} ".format(mf6_exe_path), cwd=tmp_model_ws) -# template_ws = "new_temp" -# if os.path.exists(template_ws): -# shutil.rmtree(template_ws) -# # sr0 = m.sr -# # sr = pyemu.helpers.SpatialReference.from_namfile( -# # os.path.join(tmp_model_ws, "freyberg6.nam"), -# # delr=m.dis.delr.array, delc=m.dis.delc.array) -# sr = m.modelgrid -# # set up PstFrom object -# pf = PstFrom(original_d=tmp_model_ws, new_d=template_ws, -# remove_existing=True, -# longnames=True, spatial_reference=sr, -# zero_based=False, start_datetime="1-1-2018", -# chunk_len=1) -# yield pf -# except Exception as e: -# os.chdir(bd) -# raise e -# os.chdir(bd) - - -# @pytest.fixture -# def freybergnwt_2_pstfrom(tmp_path): -# import numpy as np -# import pandas as pd -# pd.set_option('display.max_rows', 500) -# pd.set_option('display.max_columns', 500) -# pd.set_option('display.width', 1000) -# try: -# import flopy -# except: -# return -# -# org_model_ws = os.path.join('..', 'examples', 'freyberg_sfr_reaches') -# tmp_model_ws = setup_tmp(org_model_ws, tmp_path) -# bd = Path.cwd() -# os.chdir(tmp_path) -# nam_file = "freyberg.nam" -# try: -# tmp_model_ws = tmp_model_ws.relative_to(tmp_path) -# m = flopy.modflow.Modflow.load(nam_file, model_ws=tmp_model_ws, -# check=False, forgive=False, -# exe_name=mf_exe_path) -# flopy.modflow.ModflowRiv(m, stress_period_data={ -# 0: [[0, 0, 0, m.dis.top.array[0, 0], 1.0, m.dis.botm.array[0, 0, 0]], -# [0, 0, 1, m.dis.top.array[0, 1], 1.0, m.dis.botm.array[0, 0, 1]], -# [0, 0, 1, m.dis.top.array[0, 1], 1.0, m.dis.botm.array[0, 0, 1]]]}) -# -# m.external_path = "." -# m.write_input() -# runstr = ("{0} {1}".format(mf_exe_path, m.name + ".nam"), tmp_model_ws) -# print(runstr) -# os_utils.run(*runstr) -# template_ws = "template" -# if os.path.exists(template_ws): -# shutil.rmtree(template_ws) -# sr = pyemu.helpers.SpatialReference.from_namfile( -# os.path.join(m.model_ws, m.namefile), -# delr=m.dis.delr, delc=m.dis.delc) -# # set up PstFrom object -# pf = PstFrom(original_d=tmp_model_ws, new_d=template_ws, -# remove_existing=True, -# longnames=True, spatial_reference=sr, -# zero_based=False, start_datetime="1-1-2018", -# chunk_len=1) -# yield pf -# except Exception as e: -# os.chdir(bd) -# raise e -# os.chdir(bd) - - -def freyberg_test(tmp_path): + +def test_freyberg(tmp_path): import numpy as np import pandas as pd from pyemu import PyemuWarning @@ -383,7 +266,7 @@ def freyberg_test(tmp_path): # add model run command - pf.mod_sys_cmds.append("{0} {1}".format(mf_exe_name, m.name + ".nam")) + pf.mod_sys_cmds.append("{0} {1}".format(mf_exe_path, m.name + ".nam")) print(pf.mult_files) print(pf.org_files) @@ -608,7 +491,7 @@ def test_freyberg_prior_build(tmp_path): # add model run command - pf.mod_sys_cmds.append("{0} {1}".format(mf_exe_name, m.name + ".nam")) + pf.mod_sys_cmds.append("{0} {1}".format(mf_exe_path, m.name + ".nam")) print(pf.mult_files) print(pf.org_files) @@ -1086,7 +969,7 @@ def test_mf6_freyberg(tmp_path): geostruct=rch_temporal_gs) # add model run command - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) # print(pf.mult_files) # print(pf.org_files) @@ -1298,7 +1181,7 @@ def mf6_freyberg_shortnames_test(tmp_path): assert len(pdf) == 4 # add model run command - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) print(pf.mult_files) print(pf.org_files) @@ -1414,7 +1297,7 @@ def mf6_freyberg_da_test(tmp_path): m = sim.get_model("freyberg6") # SETUP pest stuff... - os_utils.run("{0} ".format("mf6"), cwd=tmp_model_ws) + os_utils.run("{0} ".format(mf6_exe_path), cwd=tmp_model_ws) template_ws = "new_temp_da" if os.path.exists(template_ws): @@ -1485,7 +1368,7 @@ def mf6_freyberg_da_test(tmp_path): par_type="grid") # add model run command - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) # print(pf.mult_files) # print(pf.org_files) @@ -1585,7 +1468,7 @@ def setup_freyberg_mf6(wd, model="freyberg_mf6", **kwargs): sim.write_simulation() # SETUP pest stuff... - os_utils.run("{0} ".format("mf6"), cwd=tmp_model_ws) + os_utils.run("{0} ".format(mf6_exe_path), cwd=tmp_model_ws) template_ws = Path(wd, "template") if os.path.exists(template_ws): @@ -1613,7 +1496,7 @@ def setup_freyberg_mf6(wd, model="freyberg_mf6", **kwargs): def build_direct(pf): - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) print(pf.mult_files) print(pf.org_files) @@ -2071,7 +1954,7 @@ def mf6_freyberg_direct_test(tmp_path): sim.write_simulation() # SETUP pest stuff... - os_utils.run("{0} ".format("mf6"), cwd=tmp_model_ws) + os_utils.run("{0} ".format(mf6_exe_path), cwd=tmp_model_ws) template_ws = Path(tmp_path, "new_temp_direct") if os.path.exists(template_ws): @@ -2249,7 +2132,7 @@ def mf6_freyberg_direct_test(tmp_path): use_rows=[('well2', 21, 15), ('well4', 30, 7)]) assert len(pf.par_dfs[-1]) == 2 * 2 # should be # add model run command - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) print(pf.mult_files) print(pf.org_files) @@ -2497,7 +2380,7 @@ def mf6_freyberg_varying_idomain(tmp_path): geostruct=gr_gs, zone_array=ib[k],ult_lbound=ult_lb,ult_ubound=ult_ub) # add model run command - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) df = pd.read_csv(os.path.join(tmp_model_ws, "heads.csv"), index_col=0) df = pf.add_observations("heads.csv", insfile="heads.csv.ins", index_cols="time", use_cols=list(df.columns.values), prefix="hds", ofile_sep=",") @@ -2617,7 +2500,7 @@ def mf6_freyberg_short_direct_test(tmp_path): sim.write_simulation() # SETUP pest stuff... - os_utils.run("{0} ".format("mf6"), cwd=tmp_model_ws) + os_utils.run("{0} ".format(mf6_exe_path), cwd=tmp_model_ws) template_ws = "new_temp_direct" if os.path.exists(template_ws): @@ -2759,7 +2642,7 @@ def mf6_freyberg_short_direct_test(tmp_path): transform="none") # add model run command - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) print(pf.mult_files) print(pf.org_files) @@ -3595,7 +3478,7 @@ def mf6_freyberg_arr_obs_and_headerless_test(tmp_path): arr_dict[arr_file] = np.loadtxt(pf.new_d / arr_file) # add model run command - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) print(pf.mult_files) print(pf.org_files) @@ -3642,7 +3525,7 @@ def mf6_freyberg_arr_obs_and_headerless_test(tmp_path): assert d.sum() == 0 -def mf6_freyberg_pp_locs_test(tmp_path): +def test_mf6_freyberg_pp_locs(tmp_path): import numpy as np import pandas as pd pd.set_option('display.max_rows', 500) @@ -3747,7 +3630,7 @@ def mf6_freyberg_pp_locs_test(tmp_path): # add model run command - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) print(pf.mult_files) print(pf.org_files) @@ -4037,7 +3920,7 @@ def mf6_add_various_obs_test(tmp_path): pf.add_parameters(["freyberg6.npf_k_layer1.txt", "freyberg6.npf_k_layer2.txt", "freyberg6.npf_k_layer3.txt"],par_type='constant') - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) pf.add_py_function( __file__, f"_add_big_obsffile('.', profile=False, nchar={linelen})", @@ -4102,11 +3985,7 @@ def mf6_subdir_test(tmp_path): sim.write_simulation() # SETUP pest stuff... - if bin_path == '': - exe = mf6_exe_path # bit of flexibility for local/server run - else: - exe = os.path.join('..', mf6_exe_path) - os_utils.run("{0} ".format(exe), cwd=tmp2_ws) + os_utils.run("{0} ".format(mf6_exe_path), cwd=tmp2_ws) # call generic once so that the output file exists df = generic_function(tmp2_ws) template_ws = "new_temp" @@ -4271,7 +4150,7 @@ def mf6_subdir_test(tmp_path): # # # add model run command pf.pre_py_cmds.append(f"os.chdir('{sd}')") - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) pf.post_py_cmds.insert(0, "os.chdir('..')") # print(pf.mult_files) # print(pf.org_files) @@ -4584,7 +4463,7 @@ def test_vertex_grid(tmp_path): a = [float(i) for i in a.split()] np.savetxt(fname=filename, X=a) # run model after input file change - pyemu.os_utils.run('mf6', cwd=template_ws) + pyemu.os_utils.run(mf6_exe_path, cwd=template_ws) for f in files: layer = int(f.split('_layer')[-1].split('.')[0]) - 1 @@ -4635,7 +4514,7 @@ def test_vertex_grid(tmp_path): index_cols="time", use_cols=list(df.columns.values), prefix="sfr") - pf.mod_sys_cmds.append('mf6') + pf.mod_sys_cmds.append(mf6_exe_path) pst = pf.build_pst() # check_apply(pf) # run once @@ -4837,7 +4716,7 @@ def mf6_freyberg_thresh_test(tmp_path): # SETUP pest stuff... - os_utils.run("{0} ".format("mf6"), cwd=tmp_model_ws) + os_utils.run("{0} ".format(mf6_exe_path), cwd=tmp_model_ws) template_ws = Path(tmp_path, "new_temp_thresh") if os.path.exists(template_ws): @@ -4944,7 +4823,7 @@ def mf6_freyberg_thresh_test(tmp_path): num_cat_arrays += 1 # add model run command - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) # print(pf.mult_files) # print(pf.org_files) @@ -5707,7 +5586,7 @@ def mf6_freyberg_ppu_hyperpars_invest(tmp_path): # add model run command - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) print(pf.mult_files) print(pf.org_files) @@ -5806,7 +5685,7 @@ def mf6_freyberg_ppu_hyperpars_thresh_invest(tmp_path): sim.write_simulation() # SETUP pest stuff... - os_utils.run("{0} ".format("mf6"), cwd=tmp_model_ws) + os_utils.run("{0} ".format(mf6_exe_path), cwd=tmp_model_ws) template_ws = Path(tmp_path, "new_temp_thresh") if os.path.exists(template_ws): @@ -5961,7 +5840,7 @@ def mf6_freyberg_ppu_hyperpars_thresh_invest(tmp_path): num_cat_arrays += 1 # add model run command - pf.mod_sys_cmds.append("mf6") + pf.mod_sys_cmds.append(mf6_exe_path) print(pf.mult_files) print(pf.org_files) @@ -6322,7 +6201,7 @@ def invest_vertexpp_setup_speed(): from pathlib import Path from shapely import Polygon import cProfile - sim = flopy.mf6.MFSimulation(sim_name="test", sim_ws="test", exe_name="mf6") + sim = flopy.mf6.MFSimulation(sim_name="test", sim_ws="test", exe_name=mf6_exe_path) gwf = flopy.mf6.ModflowGwf(sim, modelname="test") dis = flopy.mf6.ModflowGwfdis( @@ -6436,7 +6315,7 @@ def draw_consistency_test(tmp_path): bce = pyemu.ParameterEnsemble.from_binary(pst=None,filename=os.path.join(tmp_d,"basecase_pe_enforce.bin"))._df bc.index = bc.index.astype(int) bce.index = bce.index.astype(int) - + sr = pyemu.helpers.SpatialReference.from_namfile( @@ -6462,7 +6341,7 @@ def draw_consistency_test(tmp_path): bearing=45.0 #angle in degrees East of North corresponding to anisotropy ellipse ) - pp_gs = pyemu.geostats.GeoStruct(variograms=v_pp, transform='log') + pp_gs = pyemu.geostats.GeoStruct(variograms=v_pp, transform='log') v = pyemu.utils.geostats.ExpVario(a=3000,contribution=1.0) tag = "npf_k" @@ -6481,7 +6360,7 @@ def draw_consistency_test(tmp_path): pargp=f.split('.')[1].replace("_","")+"cn", lower_bound=0.5,upper_bound=2.0, ult_ubound=100, ult_lbound=0.01) - + df_cst = pf.add_parameters(f, par_type="grid", par_name_base=f.split('.')[1].replace("_","")+"gr", @@ -6506,7 +6385,7 @@ def draw_consistency_test(tmp_path): ult_ubound=100, ult_lbound=0.01, pp_options={"pp_space":50} ) # `PstFrom` will generate a uniform grid of pilot points in every 4th row and column - + df_pp = pf.add_parameters(f, zone_array=ib, par_type="pilotpoints", @@ -6517,7 +6396,7 @@ def draw_consistency_test(tmp_path): ult_ubound=100, ult_lbound=0.01, pp_options={"pp_space":20} ) # `PstFrom` will generate a uniform grid of pilot points in every 4th row and column - + pst = pf.build_pst() @@ -6531,7 +6410,7 @@ def draw_consistency_test(tmp_path): # no bs values... assert np.nanmax(np.abs(pe.values)) < 100000 assert np.all(~np.isnan(pe.values)) - + pe.to_dense(os.path.join(template_ws,"basecase_pe.bin")) diff = np.abs(pe - bc) @@ -6544,8 +6423,8 @@ def draw_consistency_test(tmp_path): print("pe enforced",diff.values.max()) assert np.all(~np.isnan(diff)) assert diff.values.max() < 1e-6 - - + + if __name__ == "__main__": draw_consistency_test('.') #xsec_pars_as_obs_test(".") @@ -6563,7 +6442,7 @@ def draw_consistency_test(tmp_path): # invest() # test_add_array_parameters_pps_grid() - #freyberg_test(os.path.abspath(".")) + # test_freyberg(os.path.abspath(".")) # freyberg_prior_build_test() #mf6_freyberg_test(os.path.abspath(".")) #$mf6_freyberg_da_test() diff --git a/autotest/utils_tests.py b/autotest/utils_tests.py index be7288c6..14d4528e 100644 --- a/autotest/utils_tests.py +++ b/autotest/utils_tests.py @@ -11,37 +11,15 @@ import pyemu from pst_from_tests import _get_port -ext = '' -local_bins = False # change if wanting to test with local binary exes -if local_bins: - bin_path = os.path.join("..", "..", "bin") - if "linux" in platform.system().lower(): - pass - bin_path = os.path.join(bin_path, "linux") - elif "darwin" in platform.system().lower(): - pass - bin_path = os.path.join(bin_path, "mac") - else: - bin_path = os.path.join(bin_path, "win") - ext = '.exe' -else: - bin_path = '' - if "windows" in platform.system().lower(): - ext = '.exe' - -mf_exe_path = os.path.join(bin_path, "mfnwt") -mt_exe_path = os.path.join(bin_path, "mt3dusgs") -usg_exe_path = os.path.join(bin_path, "mfusg_gsi") -mf6_exe_path = os.path.join(bin_path, "mf6") -pp_exe_path = os.path.join(bin_path, "pestpp-glm") -ies_exe_path = os.path.join(bin_path, "pestpp-ies") -mou_exe_path = os.path.join(bin_path, "pestpp-mou") -swp_exe_path = os.path.join(bin_path, "pestpp-swp") - -mf_exe_name = os.path.basename(mf_exe_path) -mf6_exe_name = os.path.basename(mf6_exe_path) - - +from conftest import full_exe_ref_dict + +exepath_dict = full_exe_ref_dict() +ies_exe_path = exepath_dict['pestpp-ies'] +mf_exe_path = exepath_dict['mfnwt'] +mf6_exe_path = exepath_dict['mf6'] +pp_exe_path = exepath_dict['pestpp-glm'] +usg_exe_path = exepath_dict["mfusg_gsi"] +mou_exe_path = exepath_dict["pestpp-mou"] def add_pi_obj_func_test(tmp_path): import os diff --git a/examples/pstfrom_mf6.ipynb b/examples/pstfrom_mf6.ipynb index c043ce20..c2e21f67 100644 --- a/examples/pstfrom_mf6.ipynb +++ b/examples/pstfrom_mf6.ipynb @@ -911,10 +911,10 @@ ] }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "# load the mf6 model with flopy to get the spatial reference\n", "sim = flopy.mf6.MFSimulation.load(sim_ws=tmp_model_ws)\n", @@ -935,13 +935,13 @@ "\n", "# add observations from the sfr observation output file\n", "df = pd.read_csv(os.path.join(tmp_model_ws, \"sfr.csv\"), index_col=0)\n", - "pf.add_observations(\"sfr.csv\", insfile=\"sfr.csv.ins\", index_cols=\"time\", \n", + "pf.add_observations(\"sfr.csv\", insfile=\"sfr.csv.ins\", index_cols=\"time\",\n", " use_cols=list(df.columns.values),\n", " prefix=\"sfr\")\n", "\n", "# add observations for the heads observation output file\n", "df = pd.read_csv(os.path.join(tmp_model_ws, \"heads.csv\"), index_col=0)\n", - "pf.add_observations(\"heads.csv\", insfile=\"heads.csv.ins\", \n", + "pf.add_observations(\"heads.csv\", insfile=\"heads.csv.ins\",\n", " index_cols=\"time\", use_cols=list(df.columns.values),\n", " prefix=\"hds\")\n", "\n", @@ -977,28 +977,28 @@ "for tag,bnd in tags.items():\n", " lb,ub = bnd[0],bnd[1]\n", " # find all array based files that have the tag in the name\n", - " arr_files = [f for f in os.listdir(template_ws) if tag in f \n", + " arr_files = [f for f in os.listdir(template_ws) if tag in f\n", "\t\t\t\t and f.endswith(\".txt\")]\n", "\n", " if len(arr_files) == 0:\n", " print(\"warning: no array files found for \",tag)\n", " continue\n", - " \n", + "\n", " # make sure each array file in nrow X ncol dimensions (not wrapped, sigh)\n", " for arr_file in arr_files:\n", " arr = np.loadtxt(os.path.join(template_ws,arr_file)).reshape(ib.shape)\n", " np.savetxt(os.path.join(template_ws,arr_file),arr,fmt=\"%15.6E\")\n", - " \n", + "\n", " # if this is the recharge tag\n", " if \"rch\" in tag:\n", " # add one set of grid-scale parameters for all files\n", - " pf.add_parameters(filenames=arr_files, par_type=\"grid\", \n", - " \t\t\t\t par_name_base=\"rch_gr\",pargp=\"rch_gr\", \n", - " \t\t\t\t zone_array=ib, upper_bound=ub, \n", + " pf.add_parameters(filenames=arr_files, par_type=\"grid\",\n", + " \t\t\t\t par_name_base=\"rch_gr\",pargp=\"rch_gr\",\n", + " \t\t\t\t zone_array=ib, upper_bound=ub,\n", " \t\t\t\t lower_bound=lb,geostruct=rch_gs)\n", "\n", - " # add one constant parameter for each array, and \n", - " # assign it a datetime so we can work out the \n", + " # add one constant parameter for each array, and\n", + " # assign it a datetime so we can work out the\n", " # temporal correlation\n", " for arr_file in arr_files:\n", " kper = int(arr_file.split('.')[1].split('_')[-1]) - 1\n", @@ -1016,40 +1016,40 @@ " pargp=arr_file.split('.')[1]+\"_gr\",zone_array=ib,\n", " upper_bound=ub,lower_bound=lb,\n", " geostruct=grid_gs)\n", - " pf.add_parameters(filenames=arr_file, par_type=\"pilotpoints\", \n", + " pf.add_parameters(filenames=arr_file, par_type=\"pilotpoints\",\n", " par_name_base=arr_file.split('.')[1]+\"_pp\",\n", - " pargp=arr_file.split('.')[1]+\"_pp\", \n", + " pargp=arr_file.split('.')[1]+\"_pp\",\n", " zone_array=ib,upper_bound=ub,lower_bound=lb,\n", " pp_space=int(5 * redis_fac),geostruct=pp_gs)\n", "\n", "\n", "# get all the list-type files associated with the wel package\n", - "list_files = [f for f in os.listdir(tmp_model_ws) if \n", - "\t\t\t \"freyberg6.wel_stress_period_data_\" \n", + "list_files = [f for f in os.listdir(tmp_model_ws) if\n", + "\t\t\t \"freyberg6.wel_stress_period_data_\"\n", " in f and f.endswith(\".txt\")]\n", - "# for each wel-package list-type file \n", + "# for each wel-package list-type file\n", "for list_file in list_files:\n", " kper = int(list_file.split(\".\")[1].split('_')[-1]) - 1\n", " # add spatially constant, but temporally correlated parameter\n", " pf.add_parameters(filenames=list_file,par_type=\"constant\",\n", " \t\t\t\t par_name_base=\"twel_mlt_{0}\".format(kper),\n", " pargp=\"twel_mlt\".format(kper),index_cols=[0,1,2],\n", - " use_cols=[3],upper_bound=1.5,lower_bound=0.5, \n", + " use_cols=[3],upper_bound=1.5,lower_bound=0.5,\n", " datetime=dts[kper], geostruct=temporal_gs)\n", "\n", - " # add temporally indep, but spatially correlated grid-scale \n", + " # add temporally indep, but spatially correlated grid-scale\n", " # parameters, one per well\n", - " pf.add_parameters(filenames=list_file, par_type=\"grid\", \n", + " pf.add_parameters(filenames=list_file, par_type=\"grid\",\n", " par_name_base=\"wel_grid_{0}\".format(kper),\n", - " pargp=\"wel_{0}\".format(kper), index_cols=[0, 1, 2], \n", + " pargp=\"wel_{0}\".format(kper), index_cols=[0, 1, 2],\n", " use_cols=[3],upper_bound=1.5, lower_bound=0.5)\n", "\n", - "# add grid-scale parameters for SFR reach conductance. \n", - "# Use layer, row, col and reach number in the \n", + "# add grid-scale parameters for SFR reach conductance.\n", + "# Use layer, row, col and reach number in the\n", "# parameter names\n", - "pf.add_parameters(filenames=\"freyberg6.sfr_packagedata.txt\", \n", + "pf.add_parameters(filenames=\"freyberg6.sfr_packagedata.txt\",\n", " par_name_base=\"sfr_rhk\",\n", - " pargp=\"sfr_rhk\", index_cols=[0,1,2,3], \n", + " pargp=\"sfr_rhk\", index_cols=[0,1,2,3],\n", " use_cols=[9], upper_bound=10.,\n", " lower_bound=0.1,\n", " par_type=\"grid\")\n", @@ -1092,18 +1092,11 @@ ] }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], - "source": [] - }, - { "cell_type": "code", - "execution_count": null, - "metadata": {}, "outputs": [], - "source": [] + "execution_count": null, + "source": "" } ], "metadata": { @@ -1122,7 +1115,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.8" + "version": "3.13.5" } }, "nbformat": 4, diff --git a/examples/pstfrom_mf6_ppu.ipynb b/examples/pstfrom_mf6_ppu.ipynb index 22d2837f..d4fd4414 100644 --- a/examples/pstfrom_mf6_ppu.ipynb +++ b/examples/pstfrom_mf6_ppu.ipynb @@ -1,17 +1,16 @@ { "cells": [ { - "cell_type": "markdown", "metadata": {}, - "source": [ - "# Setting up a PEST interface from MODFLOW6 using the `PstFrom` class with `PyPestUtils` for advanced pilot point parameterization" - ] + "cell_type": "markdown", + "source": "# Setting up a PEST interface from MODFLOW6 using the `PstFrom` class with `PyPestUtils` for advanced pilot point parameterization", + "id": "597647f9253af23f" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "import os\n", "import shutil\n", @@ -20,285 +19,293 @@ "import matplotlib.pyplot as plt\n", "import pyemu\n", "import flopy" - ] + ], + "id": "9dd3398b66b0b998" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "import sys\n", "sys.path.append(os.path.join(\"..\",\"..\",\"pypestutils\"))" - ] + ], + "id": "b04bf3f138f02ffc" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], - "source": [ - "import pypestutils as ppu" - ] + "execution_count": null, + "source": "import pypestutils as ppu", + "id": "53f26ecf6ecf8e74" }, { - "cell_type": "markdown", "metadata": {}, - "source": [ - "An existing MODFLOW6 model is in the directory `freyberg_mf6`. Lets check it out:" - ] + "cell_type": "markdown", + "source": "An existing MODFLOW6 model is in the directory `freyberg_mf6`. Lets check it out:", + "id": "98769e861ab3f840" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "org_model_ws = os.path.join('freyberg_mf6')\n", "os.listdir(org_model_ws)" - ] + ], + "id": "c007d8b59aa1e92f" }, { - "cell_type": "markdown", "metadata": {}, + "cell_type": "markdown", "source": [ "You can see that all the input array and list data for this model have been written \"externally\" - this is key to using the `PstFrom` class. \n", "\n", "Let's quickly viz the model top just to remind us of what we are dealing with:" - ] + ], + "id": "bfcb10c3e3999ab2" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "id_arr = np.loadtxt(os.path.join(org_model_ws,\"freyberg6.dis_idomain_layer3.txt\"))\n", "top_arr = np.loadtxt(os.path.join(org_model_ws,\"freyberg6.dis_top.txt\"))\n", "top_arr[id_arr==0] = np.nan\n", "plt.imshow(top_arr)" - ] + ], + "id": "bc1bfe933fa33b1" }, { - "cell_type": "markdown", "metadata": {}, - "source": [ - "Now let's copy those files to a temporary location just to make sure we don't goof up those original files:" - ] + "cell_type": "markdown", + "source": "Now let's copy those files to a temporary location just to make sure we don't goof up those original files:", + "id": "2d0a09c8caa969c6" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "tmp_model_ws = \"temp_pst_from_ppu\"\n", "if os.path.exists(tmp_model_ws):\n", " shutil.rmtree(tmp_model_ws)\n", "shutil.copytree(org_model_ws,tmp_model_ws)\n", "os.listdir(tmp_model_ws)" - ] + ], + "id": "a074d2977f031532" }, { - "cell_type": "markdown", "metadata": {}, + "cell_type": "markdown", "source": [ "Now we need just a tiny bit of info about the spatial discretization of the model - this is needed to work out separation distances between parameters for build a geostatistical prior covariance matrix later.\n", "\n", "Here we will load the flopy sim and model instance just to help us define some quantities later - flopy is not required to use the `PstFrom` class." - ] + ], + "id": "b7448da37fb0d0db" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "sim = flopy.mf6.MFSimulation.load(sim_ws=tmp_model_ws)\n", "m = sim.get_model(\"freyberg6\")\n" - ] + ], + "id": "6d5e3c75d4ccff87" }, { - "cell_type": "markdown", "metadata": {}, - "source": [ - "Here we use the simple `SpatialReference` pyemu implements to help us spatially locate parameters" - ] + "cell_type": "markdown", + "source": "Here we use the simple `SpatialReference` pyemu implements to help us spatially locate parameters", + "id": "791e3a32322ba060" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "sr = pyemu.helpers.SpatialReference.from_namfile(\n", " os.path.join(tmp_model_ws, \"freyberg6.nam\"),\n", " delr=m.dis.delr.array, delc=m.dis.delc.array)\n", "sr" - ] + ], + "id": "3348e2c5449e3065" }, { - "cell_type": "markdown", "metadata": {}, - "source": [ - "Now we can instantiate a `PstFrom` class instance" - ] + "cell_type": "markdown", + "source": "Now we can instantiate a `PstFrom` class instance", + "id": "51fd4fad812b4a45" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "template_ws = \"freyberg6_template\"\n", "pf = pyemu.utils.PstFrom(original_d=tmp_model_ws, new_d=template_ws,\n", " remove_existing=True,\n", " longnames=True, spatial_reference=sr,\n", " zero_based=False,start_datetime=\"1-1-2018\")\n" - ] + ], + "id": "38b193a142016006" }, { - "cell_type": "markdown", "metadata": {}, + "cell_type": "markdown", "source": [ "## Observations\n", "\n", "So now that we have a `PstFrom` instance, but its just an empty container at this point, so we need to add some PEST interface \"observations\" and \"parameters\". Let's start with observations using MODFLOW6 head. These are stored in `heads.csv`:" - ] + ], + "id": "4d34cbe82af7d566" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "df = pd.read_csv(os.path.join(tmp_model_ws,\"heads.csv\"),index_col=0)\n", "df" - ] + ], + "id": "12f8b4718e9d6cbd" }, { - "cell_type": "markdown", "metadata": {}, - "source": [ - "The main entry point for adding observations is (surprise) `PstFrom.add_observations()`. This method works on the list-type observation output file. We need to tell it what column is the index column (can be string if there is a header or int if no header) and then what columns contain quantities we want to monitor (e.g. \"observe\") in the control file - in this case we want to monitor all columns except the index column:" - ] + "cell_type": "markdown", + "source": "The main entry point for adding observations is (surprise) `PstFrom.add_observations()`. This method works on the list-type observation output file. We need to tell it what column is the index column (can be string if there is a header or int if no header) and then what columns contain quantities we want to monitor (e.g. \"observe\") in the control file - in this case we want to monitor all columns except the index column:", + "id": "90444d9d7ea6b006" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "hds_df = pf.add_observations(\"heads.csv\",insfile=\"heads.csv.ins\",index_cols=\"time\",\n", " use_cols=list(df.columns.values),prefix=\"hds\",)\n", "hds_df" - ] + ], + "id": "139e2128db6100c" }, { - "cell_type": "markdown", "metadata": {}, - "source": [ - "We can see that it returned a dataframe with lots of useful info: the observation names that were formed (`obsnme`), the values that were read from `heads.csv` (`obsval`) and also some generic weights and group names. At this point, no control file has been created, we have simply prepared to add this observations to the control file later. " - ] + "cell_type": "markdown", + "source": "We can see that it returned a dataframe with lots of useful info: the observation names that were formed (`obsnme`), the values that were read from `heads.csv` (`obsval`) and also some generic weights and group names. At this point, no control file has been created, we have simply prepared to add this observations to the control file later. ", + "id": "4935231d1ffd7d8e" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], - "source": [ - "[f for f in os.listdir(template_ws) if f.endswith(\".ins\")]" - ] + "execution_count": null, + "source": "[f for f in os.listdir(template_ws) if f.endswith(\".ins\")]", + "id": "4e4baaae9a812573" }, { - "cell_type": "markdown", "metadata": {}, + "cell_type": "markdown", "source": [ "Nice! We also have a PEST-style instruction file for those obs.\n", "\n", "Now lets do the same for SFR observations:" - ] + ], + "id": "d56c174ee65114a7" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "df = pd.read_csv(os.path.join(tmp_model_ws, \"sfr.csv\"), index_col=0)\n", "sfr_df = pf.add_observations(\"sfr.csv\", insfile=\"sfr.csv.ins\", index_cols=\"time\", use_cols=list(df.columns.values))\n", "sfr_df" - ] + ], + "id": "e72eb48997a89ccd" }, { - "cell_type": "markdown", "metadata": {}, + "cell_type": "markdown", "source": [ "Sweet as! Now that we have some observations, let's add parameters!\n", "\n", "## Pilot points and `PyPestUtils`\n", "\n", "This notebook is mostly meant to demonstrate some advanced pilot point parameterization that is possible with `PyPestUtils`, so we will only focus on HK and VK pilot point parameters. This is just to keep the example short. In practice, please please please parameterize boundary conditions too!" - ] + ], + "id": "b8c36e9db08b7a1a" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "v = pyemu.geostats.ExpVario(contribution=1.0,a=5000,bearing=0,anisotropy=5)\n", "pp_gs = pyemu.geostats.GeoStruct(variograms=v, transform='log')" - ] + ], + "id": "e2fce4e4ae545c61" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "pp_gs.plot()\n", "print(\"spatial variogram\")" - ] + ], + "id": "4cf91a232ef26e6e" }, { - "cell_type": "markdown", "metadata": {}, - "source": [ - "Now let's get the idomain array to use as a zone array - this keeps us from setting up parameters in inactive model cells:" - ] + "cell_type": "markdown", + "source": "Now let's get the idomain array to use as a zone array - this keeps us from setting up parameters in inactive model cells:", + "id": "f7de7f611542a0d9" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], - "source": [ - "ib = m.dis.idomain[0].array" - ] + "execution_count": null, + "source": "ib = m.dis.idomain[0].array", + "id": "87d94be55152e1e0" }, { - "cell_type": "markdown", "metadata": {}, - "source": [ - "Find HK files for the upper and lower model layers (assuming model layer 2 is a semi-confining unit)" - ] + "cell_type": "markdown", + "source": "Find HK files for the upper and lower model layers (assuming model layer 2 is a semi-confining unit)", + "id": "8cf040c4ce99d692" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "hk_arr_files = [f for f in os.listdir(tmp_model_ws) if \"npf_k_\" in f and f.endswith(\".txt\") and \"layer2\" not in f]\n", "hk_arr_files" - ] + ], + "id": "459ed4d1c3139dde" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "arr_file = \"freyberg6.npf_k_layer1.txt\"\n", "tag = arr_file.split('.')[1].replace(\"_\",\"-\")\n", @@ -310,55 +317,57 @@ "# so we can make easy plots later...\n", "pf.add_observations(arr_file,prefix=tag,\n", " obsgp=tag,zone_array=ib)" - ] + ], + "id": "3292bf885ab36a90" }, { - "cell_type": "markdown", "metadata": {}, - "source": [ - "If you are familiar with how `PstFrom` has worked historically, we handed off the process to solve for the factor file (which requires solving the kriging equations for each active node) to a pure python (well, with pandas and numpy). This was ok for toy models, but hella slow for big ugly models. If you look at the log entries above, you should see that the instead, `PstFrom` successfully handed off the solve to `PyPestUtils`, which is exponentially faster for big models. sweet ez! " - ] + "cell_type": "markdown", + "source": "If you are familiar with how `PstFrom` has worked historically, we handed off the process to solve for the factor file (which requires solving the kriging equations for each active node) to a pure python (well, with pandas and numpy). This was ok for toy models, but hella slow for big ugly models. If you look at the log entries above, you should see that the instead, `PstFrom` successfully handed off the solve to `PyPestUtils`, which is exponentially faster for big models. sweet ez! ", + "id": "4a892258c71c90ce" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "tpl_files = [f for f in os.listdir(template_ws) if f.endswith(\".tpl\")]\n", "tpl_files" - ] + ], + "id": "d62cca5a36767595" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "with open(os.path.join(template_ws,tpl_files[0]),'r') as f:\n", " for _ in range(2):\n", " print(f.readline().strip())\n", " " - ] + ], + "id": "b8f9f52c986dbe6" }, { - "cell_type": "markdown", "metadata": {}, + "cell_type": "markdown", "source": [ "\n", "So those might look like pretty redic parameter names, but they contain heaps of metadata to help you post process things later..." - ] + ], + "id": "b2491117265b8653" }, { - "cell_type": "markdown", "metadata": {}, - "source": [ - "So those are you standard pilot points for HK in layer 1 - same as it ever was..." - ] + "cell_type": "markdown", + "source": "So those are you standard pilot points for HK in layer 1 - same as it ever was...", + "id": "4740988747c39978" }, { - "cell_type": "markdown", "metadata": {}, + "cell_type": "markdown", "source": [ "## Geostatistical hyper-parameters\n", "\n", @@ -367,25 +376,27 @@ "In `PyPestUtils`, we can supply the pilot-point-to-grid interpolation process with arrays of hyper-parameter values, one array for each variogram property. The result of this hyper parameter mess is referred to as a non-stationary spatial parameterization. buckle up...\n", "\n", "First let's define some additional geostatistical structures:" - ] + ], + "id": "572d96a02212dfa6" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "value_v = pyemu.geostats.ExpVario(contribution=1, a=5000, anisotropy=5, bearing=0.0)\n", "value_gs = pyemu.geostats.GeoStruct(variograms=value_v)\n", "bearing_v = pyemu.geostats.ExpVario(contribution=1,a=10000,anisotropy=5,bearing=0.0)\n", "bearing_gs = pyemu.geostats.GeoStruct(variograms=bearing_v)" - ] + ], + "id": "343f86f826327d3f" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "arr_file = \"freyberg6.npf_k_layer3.txt\"\n", "tag = arr_file.split('.')[1].replace(\"_\",\"-\")\n", @@ -396,34 +407,37 @@ " apply_order=2)\n", "pf.add_observations(arr_file,prefix=tag,\n", " obsgp=tag,zone_array=ib)" - ] + ], + "id": "be859202f72172fd" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "hyperpar_files = [f for f in os.listdir(pf.new_d) if tag in f]\n", "hyperpar_files" - ] + ], + "id": "cbfcf6fe35fc9fa0" }, { - "cell_type": "markdown", "metadata": {}, + "cell_type": "markdown", "source": [ "when we supplied the \"prep_hyperpars\" as `True` above, that triggered `PstFrom` to do something different. Instead of solving for the pilot point kriging factors as before, now, we have array-based files for the geostatistical hyper parameters, as well as some additional quantities we need to \"apply\" these hyper parameter at runtime. This is a key difference: When the pilot point variogram is changing for each model run, we need to re-solve for the kriging factors for each model run...\n", "\n", "We snuck in something else too - see that `apply_order` argument? That is how we can control what order of files being processed by the run-time multiplier parameter function. Since we are going to parameterize the hyper parameters and there is an implicit order between these hyper parameters and the underlying pilot points, we need to make sure the hyper parameters are processed first. \n", "\n", "Lets setup some hyper parameters for estimation. We will use a constant for the anisotropy ratio, but use pilot points for the bearing:" - ] + ], + "id": "4b16a43536e4689a" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "afile = 'npf-k-layer3.aniso.dat'\n", "tag = afile.split('.')[0].replace(\"_\",\"-\")+\"-aniso\"\n", @@ -440,18 +454,18 @@ " pp_options={\"try_use_ppu\":True},\n", " apply_order=1,geostruct=bearing_gs)\n", "pf.add_observations(bfile, prefix=tag, obsgp=tag) " - ] + ], + "id": "c72bac997d4c0a41" }, { - "cell_type": "markdown", "metadata": {}, - "source": [ - "Notice that the `apply_order` for these hyper pars is 1 so that any processing for these quantities happens before the actual underlying pilot points are processed" - ] + "cell_type": "markdown", + "source": "Notice that the `apply_order` for these hyper pars is 1 so that any processing for these quantities happens before the actual underlying pilot points are processed", + "id": "17a6f9a495cc9976" }, { - "cell_type": "markdown", "metadata": {}, + "cell_type": "markdown", "source": [ "## \"These go to 11\" - amp'ing things up with categorization\n", "\n", @@ -462,13 +476,14 @@ "lets setup non-stationary categorical parameterization for the VK of layer 2 (the semi confining unit). We can conceptualize this as a semi-confining unit that has \"windows\" in it that connects the two aquifers. Where there is not a window, the Vk will be very low, where there is a window, the VK will be much higher. Let's also assume the windows in the confining unit where created when a stream eroded thru it, so the shape of these windows will be higher-order (not derived from a standard geostatistical 2-point process), but rather from connected features.\n", "\n", "In what follows, we setup this complex parameterization. We also add lots of aux observations to lets plot and viz the steps in this parameterization process." - ] + ], + "id": "649aa96d83db3844" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "arr_file = \"freyberg6.npf_k33_layer2.txt\"\n", "print(arr_file)\n", @@ -549,51 +564,54 @@ "df = pd.read_csv(threshcsv.replace(\".csv\",\"_results.csv\"),index_col=0)\n", "pf.add_observations(os.path.split(threshcsv)[1].replace(\".csv\",\"_results.csv\"),index_cols=\"threshcat\",use_cols=df.columns.tolist(),prefix=prefix+\"-results_k:{0}\".format(k),\n", " obsgp=prefix+\"-results_k:{0}\".format(k),ofile_sep=\",\")\n" - ] + ], + "id": "42067ca6bd431a2" }, { - "cell_type": "markdown", "metadata": {}, + "cell_type": "markdown", "source": [ "### build the control file, pest interface files, and forward run script\n", "At this point, we have some parameters and some observations, so we can create a control file:" - ] + ], + "id": "324fd56e3a4a8d8d" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "pf.mod_sys_cmds.append(\"mf6\")\n", "pf.pre_py_cmds.insert(0,\"import sys\")\n", "pf.pre_py_cmds.insert(1,\"sys.path.append(os.path.join('..','..','..','pypestutils'))\")\n", "pst = pf.build_pst()" - ] + ], + "id": "e8390ebb7f31243e" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], - "source": [ - "_ = [print(line.rstrip()) for line in open(os.path.join(template_ws,\"forward_run.py\"))]" - ] + "execution_count": null, + "source": "_ = [print(line.rstrip()) for line in open(os.path.join(template_ws,\"forward_run.py\"))]", + "id": "27a01a3a59425112" }, { - "cell_type": "markdown", "metadata": {}, + "cell_type": "markdown", "source": [ "## Setting initial parameter bounds and values\n", "\n", "Here is some gory detail regarding defining the hyper parameters for both layer 3 HK and layer 2 VK..." - ] + ], + "id": "15c49fe430cd1d0a" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "#set the initial and bounds for the fill values\n", "par = pst.parameter_data\n", @@ -656,104 +674,108 @@ "par.loc[cat2par, \"parubnd\"] = 1\n", "par.loc[cat2par, \"parlbnd\"] = 1\n", "par.loc[cat2par,\"partrans\"] = \"fixed\"\n" - ] + ], + "id": "7e131502d074f897" }, { - "cell_type": "markdown", "metadata": {}, - "source": [ - "# Generating a prior parameter ensemble, then run and viz a real" - ] + "cell_type": "markdown", + "source": "# Generating a prior parameter ensemble, then run and viz a real", + "id": "c2154e42f31ec8dd" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "np.random.seed(122341)\n", "pe = pf.draw(num_reals=100)" - ] + ], + "id": "f09a17cf6493216f" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], - "source": [ - "pe.to_csv(os.path.join(template_ws,\"prior.csv\"))" - ] + "execution_count": null, + "source": "pe.to_csv(os.path.join(template_ws,\"prior.csv\"))", + "id": "89c5c4280a568acc" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "real = 0\n", "pst_name = \"real_{0}.pst\".format(real)\n", "pst.parameter_data.loc[pst.adj_par_names,\"parval1\"] = pe.loc[real,pst.adj_par_names].values" - ] + ], + "id": "a5bf620e94c93da4" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "pst.control_data.noptmax = 0\n", "pst.write(os.path.join(pf.new_d,pst_name))" - ] + ], + "id": "60804fce3b5ab3a6" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], - "source": [ - "pyemu.os_utils.run(\"pestpp-ies {0}\".format(pst_name),cwd=pf.new_d)" - ] + "execution_count": null, + "source": "pyemu.os_utils.run(\"pestpp-ies {0}\".format(pst_name),cwd=pf.new_d)", + "id": "b0724f47a40afc38" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "pst.set_res(os.path.join(pf.new_d,pst_name.replace(\".pst\",\".base.rei\")))\n", "res = pst.res\n", "obs = pst.observation_data\n", "grps = [o for o in obs.obgnme.unique() if o.startswith(\"npf\") and \"result\" not in o and \"aniso\" not in o]\n", "grps" - ] + ], + "id": "e7d925c78449818d" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "gobs = obs.loc[obs.obgnme.isin(grps),:].copy()\n", "gobs[\"i\"] = gobs.i.astype(int)\n", "gobs[\"j\"] = gobs.j.astype(int)\n", "gobs[\"k\"] = gobs.obgnme.apply(lambda x: int(x.split('-')[2].replace(\"layer\",\"\")) - 1)" - ] + ], + "id": "1bffb643bfe5b4b2" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "uk = gobs.k.unique()\n", "uk.sort()" - ] + ], + "id": "6107397c97e67caf" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], + "execution_count": null, "source": [ "for k in uk:\n", " kobs = gobs.loc[gobs.k==k,:]\n", @@ -782,42 +804,25 @@ " plt.tight_layout()\n", " plt.show()\n", " plt.close(fig)" - ] + ], + "id": "55c643ac16ae39b2" }, { - "cell_type": "markdown", "metadata": {}, - "source": [ - "Stunning isn't it?! There is clearly a lot subjectivity in the form of defining the prior for the hyper parameters required to use these non-stationary geostats, but they do afford more opportunities to express (stochastic) expert knowledge. To be honest, there was a lot of experimenting with this notebook to get these figures to look this way - playing with variograms and parameter initial values and bounds a lot. You encouraged to do the same! scroll back up, change things, and \"restart kernel and run all\" - this will help build some better intution, promise...." - ] + "cell_type": "markdown", + "source": "Stunning isn't it?! There is clearly a lot subjectivity in the form of defining the prior for the hyper parameters required to use these non-stationary geostats, but they do afford more opportunities to express (stochastic) expert knowledge. To be honest, there was a lot of experimenting with this notebook to get these figures to look this way - playing with variograms and parameter initial values and bounds a lot. You encouraged to do the same! scroll back up, change things, and \"restart kernel and run all\" - this will help build some better intution, promise....", + "id": "f61492b41877971d" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "cell_type": "code", "outputs": [], - "source": [] + "execution_count": null, + "source": "", + "id": "f7133475a2328689" } ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, + "metadata": {}, "nbformat": 4, - "nbformat_minor": 4 + "nbformat_minor": 5 } diff --git a/pyemu/utils/os_utils.py b/pyemu/utils/os_utils.py index c8dd23a2..83c1cb8d 100644 --- a/pyemu/utils/os_utils.py +++ b/pyemu/utils/os_utils.py @@ -141,7 +141,9 @@ def run_ossystem(cmd_str, cwd=".", verbose=False): exe_name = exe_name.replace(".exe", "") raw[0] = exe_name cmd_str = "{0} {1} ".format(*raw) - if os.path.exists(exe_name) and not exe_name.startswith("./"): + if (os.path.exists(exe_name) + and not exe_name.startswith("./") + and not exe_name.startswith("/")): cmd_str = "./" + cmd_str except Exception as e: @@ -193,8 +195,11 @@ def run_sp(cmd_str, cwd=".", verbose=True, logfile=False, **kwargs): bwd = os.getcwd() os.chdir(cwd) - - if platform.system() != "Windows" and not shutil.which(cmd_str.split()[0]): + exe_name = cmd_str.split()[0] + if (platform.system() != "Windows" + and not shutil.which(exe_name) + and not exe_name.startswith("./") + and not exe_name.startswith("/")): cmd_str = "./" + cmd_str try: