Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions autotest/pst_from_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -2635,7 +2635,7 @@ def mf6_freyberg_arr_obs_test():
#invest()
# freyberg_test()
#freyberg_prior_build_test()
# mf6_freyberg_test()
mf6_freyberg_test()
#mf6_freyberg_shortnames_test()
#mf6_freyberg_direct_test()
#mf6_freyberg_varying_idomain()
Expand All @@ -2644,8 +2644,8 @@ def mf6_freyberg_arr_obs_test():
# tpf = TestPstFrom()
# tpf.setup()
# tpf.test_add_direct_array_parameters()
pstfrom_profile()
mf6_freyberg_arr_obs_test()
#pstfrom_profile()
#mf6_freyberg_arr_obs_test()



14 changes: 13 additions & 1 deletion autotest/pst_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -945,6 +945,17 @@ def ctrl_data_test():
pst2.write(os.path.join("pst","test2.pst"),version=2)
pst3 = pyemu.Pst(os.path.join("pst", "test2.pst"))

def read_in_tpl_test():
import pyemu
tpl_d = "tpl"
tpl_files = [os.path.join(tpl_d,f) for f in os.listdir(tpl_d) if f.endswith(".tpl")]
df = pyemu.pst_utils.try_read_input_file_with_tpl(os.path.join(tpl_d,"test1.dat.tpl"))
print(df)
assert df.parval1["p1"] == df.parval1["p2"]
assert df.parval1["p3"] == df.parval1["p4"]
assert df.parval1["p5"] == df.parval1["p6"]
assert df.parval1["p5"] == df.parval1["p7"]


if __name__ == "__main__":
process_output_files_test()
Expand Down Expand Up @@ -990,5 +1001,6 @@ def ctrl_data_test():
#try_process_ins_test()
#tpl_ins_test()
#process_output_files_test()
#comments_test()
read_in_tpl_test()
comments_test()

25 changes: 22 additions & 3 deletions autotest/pst_tests_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -522,6 +522,7 @@ def pst_from_flopy_geo_draw_test():

def from_flopy_pp_test():
import numpy as np
import pandas as pd
try:
import flopy
except:
Expand All @@ -547,6 +548,23 @@ def from_flopy_pp_test():
use_pp_zones=False,
build_prior=False)

b_d = os.getcwd()
os.chdir(new_model_ws)
try:
pyemu.helpers.apply_array_pars()
except Exception as e:
os.chdir(b_d)
raise (str(e))
os.chdir(b_d)


mlt_dir = os.path.join(new_model_ws,"arr_mlt")
for f in os.listdir(mlt_dir):
arr = np.loadtxt(os.path.join(mlt_dir,f))
assert np.all(arr==1)
df = pd.read_csv(os.path.join(new_model_ws, "arr_pars.csv"), index_col=0)
assert np.all(df.pp_fill_value.values == 1)

new_model_ws = "temp_pst_from_flopy"
props = ["upw.ss","upw.hk","upw.vka"]
pp_props = []
Expand All @@ -564,6 +582,7 @@ def from_flopy_pp_test():
build_prior=True)



def pst_from_flopy_specsim_draw_test():
import shutil
import numpy as np
Expand Down Expand Up @@ -670,10 +689,10 @@ def at_bounds_test():
#pst_from_flopy_specsim_draw_test()
# run_array_pars()
# from_flopy_zone_pars()
# from_flopy_pp_test()
#from_flopy_pp_test()
# from_flopy()

# from_flopy_kl_test()
from_flopy_reachinput()
from_flopy_kl_test()
#from_flopy_reachinput()


4 changes: 4 additions & 0 deletions autotest/tpl/test1.dat
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@

1 1.2345678 1.2345678 1.2345678 9.87654321
222222222222222222222222222222222222
12.312.312.3
5 changes: 5 additions & 0 deletions autotest/tpl/test1.dat.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
ptf ~

1 ~ p1 ~ ~ p1 ~ ~ p2 ~ ~ p9 ~
1 ~ p3 ~ ~ p4 ~
~p5~~p6~~p7~
5 changes: 5 additions & 0 deletions pyemu/pst/pst_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -2452,6 +2452,9 @@ def add_parameters(self, template_file, in_file=None, pst_path=None):
# get the parameter names in the template file
parnme = pst_utils.parse_tpl_file(template_file)

parval1 = pst_utils.try_read_input_file_with_tpl(template_file,in_file)


# find "new" parameters that are not already in the control file
new_parnme = [p for p in parnme if p not in self.parameter_data.parnme]

Expand All @@ -2472,6 +2475,8 @@ def add_parameters(self, template_file, in_file=None, pst_path=None):
)
new_par_data.loc[new_parnme, "parnme"] = new_parnme
self.parameter_data = self.parameter_data.append(new_par_data)
if parval1 is not None:
new_par_data.loc[parval1.parnme,"parval1"] = parval1.parval1
if in_file is None:
in_file = template_file.replace(".tpl", "")
if pst_path is not None:
Expand Down
103 changes: 103 additions & 0 deletions pyemu/pst/pst_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -719,6 +719,109 @@ def generic_pst(par_names=["par1"], obs_names=["obs1"], addreg=False):
return new_pst


def try_read_input_file_with_tpl(tpl_file,input_file=None):
"""attempt to read parameter values from an input file using a template file
Args:
tpl_file (`str`): path and name of a template file
input_file (`str`,optional): path and name of existing model
input file to process. If `None`, `tpl_file.replace(".tpl","")`
is used. Default is None.

Returns:
`pandas.DataFrame`: a dataframe of parameter name and values
extracted from `input_file`.

Note:
If an exception is raised when reading the input file, the exception
is echoed to the screen and `None` is returned.

Example::

df = pyemu.pst_utils.try_process_output_file("my.tpl","my.input")

"""

if input_file is None:
input_file = tpl_file.replace(".tpl", "")
if not os.path.exists(input_file):
return None
# read the names first to see what we are dealing with
# and also to do some basic error checking
parnames = parse_tpl_file(tpl_file)
try:
df = _read_infile_with_tplfile(tpl_file,input_file)
except Exception as e:
print("error trying to read input file with tpl file:{0}".format(str(e)))
return None
return df

def _read_infile_with_tplfile(tpl_file,input_file):
"""attempt to read parameter values from an input file using a template file,
raising heaps of exceptions.
Args:
tpl_file (`str`): path and name of a template file
input_file (`str`): path and name of existing model

Returns:
`pandas.DataFrame`: a dataframe of parameter name and values
extracted from `input_file`.

Note:
use try_read_inputfile_with_tpl instead of this one.

"""

if not os.path.exists(input_file):
raise Exception("input file '{0}' not found".format(input_file))

f_tpl = open(tpl_file,'r')
f_in = open(input_file,'r')

# read the tpl header
_, marker = f_tpl.readline().split()
itpl,iin = 1,0
pnames,pvals = [],[]
pdict = {}
while True:
tpl_line =f_tpl.readline()
if tpl_line == "":
break

in_line = f_in.readline()
if in_line == "":
raise Exception("input file EOF, tpl file line {0}, in file line {1}".format(itpl,iin))

if marker in tpl_line:
idxs = [i for i, ltr in enumerate(tpl_line) if ltr == marker]
if len(idxs) % 2 != 0:
raise Exception("unbalanced markers on tpl line {0}".format(itpl))

for s,e in zip(idxs[0:-1:2],idxs[1::2]):
tpl_str = tpl_line[s:e]
pname = tpl_str.replace(marker,"").strip().lower()
if s > len(in_line):
raise Exception("input file EOL line {0}, tpl line {1}, looking for {2}"\
.format(iin,itpl,tpl_str))
in_str = in_line[s:e]
try:
v = float(in_str)
except Exception as e:
raise Exception("error casting '{0}' to float on in line {1}, tpl line {2} for {3}: {4}".\
format(in_str,iin,itpl,tpl_str,str(e)))

if pname in pdict:
eval = pdict[pname]
if not np.isclose(eval,v,1.0e-6):
raise Exception("different values {0}:{1} for par {2} on in line {3}".format(v,eval,pname,iin))
else:
pnames.append(pname)
pvals.append(v)
pdict[pname] = v
itpl += 1
iin += 1
df = pd.DataFrame({"parnme":pnames,"parval1":pvals},index=pnames)
return df

def try_process_output_file(ins_file, output_file=None):
"""attempt to process a model output file using a PEST-style instruction file

Expand Down
14 changes: 11 additions & 3 deletions pyemu/utils/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2343,6 +2343,10 @@ def _pp_prep(self, mlt_df):
pp_file = pp_files.iloc[0]
mlt_df.loc[mlt_df.mlt_file == out_file, "fac_file"] = fac_file
mlt_df.loc[mlt_df.mlt_file == out_file, "pp_file"] = pp_file
mlt_df.loc[mlt_df.mlt_file == out_file, "pp_fill_value"] = 1.0
mlt_df.loc[mlt_df.mlt_file == out_file, "pp_lower_limit"] = 1.0e-10
mlt_df.loc[mlt_df.mlt_file == out_file, "pp_upper_limit"] = 1.0e+10

self.par_dfs[self.pp_suffix] = pp_df

mlt_df.loc[mlt_df.suffix == self.pp_suffix, "tpl_file"] = np.NaN
Expand Down Expand Up @@ -2408,6 +2412,9 @@ def _kl_prep(self, mlt_df):
mlt_df.loc[mlt_df.prefix == prefix, "fac_file"] = os.path.split(fac_file)[
-1
]
mlt_df.loc[mlt_df.prefix == prefix, "pp_fill_value"] = 1.0
mlt_df.loc[mlt_df.prefix == prefix, "pp_lower_limit"] = 1.0e-10
mlt_df.loc[mlt_df.prefix == prefix, "pp_upper_limit"] = 1.0e+10

print(kl_mlt_df)
mlt_df.loc[mlt_df.suffix == self.kl_suffix, "tpl_file"] = np.NaN
Expand Down Expand Up @@ -3702,10 +3709,11 @@ def apply_array_pars(arr_par="arr_pars.csv", arr_par_file=None, chunk_len=50):

if "pp_file" in df.columns:
print("starting fac2real", datetime.now())
pp_df = df.loc[df.pp_file.notna(), ["pp_file", "fac_file", "mlt_file"]].rename(
columns={"fac_file": "factors_file", "mlt_file": "out_file"}
pp_df = df.loc[df.pp_file.notna(), ["pp_file", "fac_file", "mlt_file",
"pp_fill_value","pp_lower_limit","pp_upper_limit"]].rename(
columns={"fac_file": "factors_file", "mlt_file": "out_file",
"pp_fill_value":"fill_value","pp_lower_limit":"lower_lim","pp_upper_limit":"upper_lim"}
)
pp_df.loc[:, "lower_lim"] = 1.0e-10
# don't need to process all (e.g. if const. mults apply across kper...)
pp_args = pp_df.drop_duplicates().to_dict("records")
num_ppargs = len(pp_args)
Expand Down
9 changes: 7 additions & 2 deletions pyemu/utils/pst_from.py
Original file line number Diff line number Diff line change
Expand Up @@ -1075,7 +1075,7 @@ def _process_array_obs(self,out_filename,ins_filename,prefix,ofile_sep,ofile_ski
if zval is not None:
oname += "_zone:{0}".format(zval)
else:
oname = "{0}_{1}_{2}".format(prefix,iline,jr)
oname = "{0}_{1}_{2}".format(prefix,iidx,jr)
if zval is not None:
z_str = "_{0}".format(zval)
if len(oname) + len(z_str) < 20:
Expand Down Expand Up @@ -1537,6 +1537,8 @@ def add_parameters(
filenames = [
get_relative_filepath(self.original_d, filename) for filename in filenames
]
if len(filenames) == 0:
self.logger.lraise("add_parameters(): filenames is empty")
if par_style == "direct":
if len(filenames) != 1:
self.logger.lraise(
Expand Down Expand Up @@ -1925,7 +1927,7 @@ def add_parameters(
pp_info_dict = {
"pp_data": ok_pp.point_data.loc[:, ["x", "y", "zone"]],
"cov": ok_pp.point_cov_df,
"zn_ar": zone_array,
"zn_ar": zone_array
}
fac_processed = False
for facfile, info in self._pp_facs.items(): # check against
Expand Down Expand Up @@ -2004,6 +2006,9 @@ def add_parameters(
assert fac_filename is not None, "missing pilot-point input filename"
mult_dict["fac_file"] = os.path.relpath(fac_filename, self.new_d)
mult_dict["pp_file"] = pp_filename
mult_dict["pp_fill_value"] = 1.0
mult_dict["pp_lower_limit"] = 1.0e-10
mult_dict["pp_upper_limit"] = 1.0e+10
relate_parfiles.append(mult_dict)
relate_pars_df = pd.DataFrame(relate_parfiles)
# store on self for use in pest build etc
Expand Down