Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 13 additions & 6 deletions autotest/pst_from_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,9 @@ def freyberg_test():
assert all([gp in obsnmes for gp in ['qaquifer', 'qout']])
pf.post_py_cmds.append(
"sfodf.sort_index(1).to_csv('freyberg.sfo.csv', sep=',', index_label='idx')")

zone_array = np.arange(m.nlay*m.nrow*m.ncol)
s = lambda x: "zval_"+str(x)
zone_array = np.array([s(x) for x in zone_array]).reshape(m.nlay,m.nrow,m.ncol)
# pars
pf.add_parameters(filenames="RIV_0000.dat", par_type="grid",
index_cols=[0, 1, 2], use_cols=[3, 5],
Expand All @@ -184,7 +186,11 @@ def freyberg_test():
pf.add_parameters(filenames=["WEL_0000.dat", "WEL_0001.dat"],
par_type="grid", index_cols=[0, 1, 2], use_cols=3,
par_name_base="welflux_grid",
zone_array=m.bas6.ibound.array)
zone_array=zone_array)
pf.add_parameters(filenames="WEL_0000.dat",
par_type="grid", index_cols=[0, 1, 2], use_cols=3,
par_name_base="welflux_grid_direct",
zone_array=zone_array,par_style="direct")
pf.add_parameters(filenames=["WEL_0000.dat"], par_type="constant",
index_cols=[0, 1, 2], use_cols=3,
par_name_base=["flux_const"])
Expand Down Expand Up @@ -213,6 +219,7 @@ def freyberg_test():
# check mult files are in pst input files
csv = os.path.join(template_ws, "mult2model_info.csv")
df = pd.read_csv(csv, index_col=0)
df = df.loc[pd.notna(df.mlt_file),:]
pst_input_files = {str(f) for f in pst.input_files}
mults_not_linked_to_pst = ((set(df.mlt_file.unique()) -
pst_input_files) -
Expand Down Expand Up @@ -2670,10 +2677,10 @@ def mf6_freyberg_arr_obs_and_headerless_test():
#mf6_freyberg_varying_idomain()
#xsec_test()
#mf6_freyberg_short_direct_test()
tpf = TestPstFrom()
tpf.setup()
tpf.test_add_direct_array_parameters()
tpf.add
#tpf = TestPstFrom()
#tpf.setup()
#tpf.test_add_direct_array_parameters()
#tpf.add
#pstfrom_profile()
#mf6_freyberg_arr_obs_and_headerless_test()\

Expand Down
104 changes: 102 additions & 2 deletions autotest/pst_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -957,12 +957,112 @@ def read_in_tpl_test():
assert df.parval1["p5"] == df.parval1["p7"]


def write2_nan_test():
import numpy as np
import pyemu
import os

pst = pyemu.Pst(os.path.join("pst", "pest.pst"))
pyemu.helpers.zero_order_tikhonov(pst)
pst.prior_information.loc[pst.prior_names[0], "weight"] = np.NaN
try:
pst.write("test.pst", version=1)
except:
pass
else:
raise Exception("should have failed")
try:
pst.write("test.pst", version=2)
except:
pass
else:
raise Exception("should have failed")

pst = pyemu.Pst(os.path.join("pst", "pest.pst"))
pst.model_output_data.loc[pst.instruction_files[0], "pest_file"] = np.NaN
try:
pst.write("test.pst", version=1)
except:
pass
else:
raise Exception("should have failed")
try:
pst.write("test.pst", version=2)
except:
pass
else:
raise Exception("should have failed")

pst = pyemu.Pst(os.path.join("pst", "pest.pst"))
pst.model_input_data.loc[pst.template_files[0], "pest_file"] = np.NaN
try:
pst.write("test.pst", version=1)
except:
pass
else:
raise Exception("should have failed")
try:
pst.write("test.pst", version=2)
except:
pass
else:
raise Exception("should have failed")

pst = pyemu.Pst(os.path.join("pst","pest.pst"))
pst.parameter_data.loc[pst.par_names[0],"parval1"] = np.NaN
try:
pst.write("test.pst",version=2)
except:
pass
else:
raise Exception("should have failed")
try:
pst.write("test.pst",version=1)
except:
pass
else:
raise Exception("should have failed")

pst = pyemu.Pst(os.path.join("pst", "pest.pst"))
pst.parameter_groups.loc[pst.parameter_groups.pargpnme[0], "derinc"] = np.NaN
try:
pst.write("test.pst", version=2)
except:
pass
else:
raise Exception("should have failed")
try:
pst.write("test.pst", version=1)
except:
pass
else:
raise Exception("should have failed")

pst = pyemu.Pst(os.path.join("pst", "pest.pst"))
pst.observation_data.loc[pst.obs_names[0], "weight"] = np.NaN
try:
pst.write("test.pst", version=2)
except:
pass
else:
raise Exception("should have failed")
try:
pst.write("test.pst", version=1)
except:
pass
else:
raise Exception("should have failed")




if __name__ == "__main__":
process_output_files_test()
write2_nan_test()
#process_output_files_test()
# change_limit_test()
# new_format_test()
# lt_gt_constraint_names_test()
csv_to_ins_test()
#csv_to_ins_test()

# try_process_ins_test()
# write_tables_test()
Expand Down
2 changes: 2 additions & 0 deletions autotest/pst_tests_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -681,6 +681,8 @@ def at_bounds_test():
assert len(lb) == 1
assert len(ub) == 1



if __name__ == "__main__":

#at_bounds_test()
Expand Down
135 changes: 116 additions & 19 deletions pyemu/pst/pst_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -1520,24 +1520,64 @@ def _write_version2(self, new_filename, use_pst_path=True, pst_rel_path="."):
v = ",".join([str(vv) for vv in list(v)])
f_out.write("{0:30} {1}\n".format(k, v))



# parameter groups
name = "pargp_data"
columns = self.pargp_fieldnames
if self.parameter_groups.loc[:, columns].isnull().values.any():
# warnings.warn("WARNING: NaNs in {0} dataframe".format(name))
csv_name = "pst.{0}.nans.csv".format(
name.replace(" ", "_").replace("*", "")
)
self.parameter_groups.to_csv(csv_name)
raise Exception(
"NaNs in {0} dataframe, csv written to {1}".format(name, csv_name)
)
f_out.write("* parameter groups external\n")
pargp_filename = new_filename.lower().replace(".pst", ".pargrp_data.csv")
pargp_filename = new_filename.lower().replace(".pst", ".{0}.csv".format(name))
if pst_path is not None:
pargp_filename = os.path.join(pst_path, os.path.split(pargp_filename)[-1])
self.parameter_groups.to_csv(pargp_filename, index=False)
pargp_filename = os.path.join(pst_rel_path, os.path.split(pargp_filename)[-1])
f_out.write("{0}\n".format(pargp_filename))


# parameter data
name = "par_data"
columns = self.par_fieldnames
if self.parameter_data.loc[:, columns].isnull().values.any():
# warnings.warn("WARNING: NaNs in {0} dataframe".format(name))
csv_name = "pst.{0}.nans.csv".format(
name.replace(" ", "_").replace("*", "")
)
self.parameter_data.to_csv(csv_name)
raise Exception(
"NaNs in {0} dataframe, csv written to {1}".format(name, csv_name)
)
f_out.write("* parameter data external\n")
par_filename = new_filename.lower().replace(".pst", ".par_data.csv")
par_filename = new_filename.lower().replace(".pst", ".{0}.csv".format(name))
if pst_path is not None:
par_filename = os.path.join(pst_path, os.path.split(par_filename)[-1])
self.parameter_data.to_csv(par_filename, index=False)
par_filename = os.path.join(pst_rel_path, os.path.split(par_filename)[-1])
f_out.write("{0}\n".format(par_filename))


# observation data
name = "obs_data"
columns = self.obs_fieldnames
if self.observation_data.loc[:, columns].isnull().values.any():
# warnings.warn("WARNING: NaNs in {0} dataframe".format(name))
csv_name = "pst.{0}.nans.csv".format(
name.replace(" ", "_").replace("*", "")
)
self.observation_data.to_csv(csv_name)
raise Exception(
"NaNs in {0} dataframe, csv written to {1}".format(name, csv_name)
)
f_out.write("* observation data external\n")
obs_filename = new_filename.lower().replace(".pst", ".obs_data.csv")
obs_filename = new_filename.lower().replace(".pst", ".{0}.csv".format(name))
if pst_path is not None:
obs_filename = os.path.join(pst_path, os.path.split(obs_filename)[-1])
self.observation_data.to_csv(obs_filename, index=False)
Expand All @@ -1548,35 +1588,62 @@ def _write_version2(self, new_filename, use_pst_path=True, pst_rel_path="."):
for mc in self.model_command:
f_out.write("{0}\n".format(mc))

# model input
name = "tplfile_data"
columns = self.model_io_fieldnames
if self.model_input_data.loc[:, columns].isnull().values.any():
# warnings.warn("WARNING: NaNs in {0} dataframe".format(name))
csv_name = "pst.{0}.nans.csv".format(
name.replace(" ", "_").replace("*", "")
)
self.model_input_data.to_csv(csv_name)
raise Exception(
"NaNs in {0} dataframe, csv written to {1}".format(name, csv_name)
)
f_out.write("* model input external\n")
io_filename = new_filename.lower().replace(".pst", ".tplfile_data.csv")
io_filename = new_filename.lower().replace(".pst", ".{0}.csv".format(name))
if pst_path is not None:
io_filename = os.path.join(pst_path, os.path.split(io_filename)[-1])
# pfiles = self.template_files
# pfiles.extend(self.instruction_files)
# mfiles = self.input_files
# mfiles.extend(self.output_files)
# io_df = pd.DataFrame({"pest_file": pfiles, "model_file": mfiles})
# io_df.to_csv(io_filename, index=False)
self.model_input_data.to_csv(io_filename, index=False)
io_filename = os.path.join(pst_rel_path, os.path.split(io_filename)[-1])
f_out.write("{0}\n".format(io_filename))

# model output
name = "insfile_data"
columns = self.model_io_fieldnames
if self.model_output_data.loc[:, columns].isnull().values.any():
# warnings.warn("WARNING: NaNs in {0} dataframe".format(name))
csv_name = "pst.{0}.nans.csv".format(
name.replace(" ", "_").replace("*", "")
)
self.model_output_data.to_csv(csv_name)
raise Exception(
"NaNs in {0} dataframe, csv written to {1}".format(name, csv_name)
)
f_out.write("* model output external\n")
io_filename = new_filename.lower().replace(".pst", ".insfile_data.csv")
io_filename = new_filename.lower().replace(".pst", ".{0}.csv".format(name))
if pst_path is not None:
io_filename = os.path.join(pst_path, os.path.split(io_filename)[-1])
# pfiles = self.instruction_files
# mfiles = self.output_files
# io_df = pd.DataFrame({"pest_file": pfiles, "model_file": mfiles})
# io_df.to_csv(io_filename, index=False)
self.model_output_data.to_csv(io_filename, index=False)
io_filename = os.path.join(pst_rel_path, os.path.split(io_filename)[-1])
f_out.write("{0}\n".format(io_filename))


# prior info
if self.prior_information.shape[0] > 0:
name = "pi_data"
columns = self.prior_fieldnames
if self.prior_information.loc[:, columns].isnull().values.any():
# warnings.warn("WARNING: NaNs in {0} dataframe".format(name))
csv_name = "pst.{0}.nans.csv".format(
name.replace(" ", "_").replace("*", "")
)
self.prior_information.to_csv(csv_name)
raise Exception(
"NaNs in {0} dataframe, csv written to {1}".format(name, csv_name)
)
f_out.write("* prior information external\n")
pi_filename = new_filename.lower().replace(".pst", ".pi_data.csv")
pi_filename = new_filename.lower().replace(".pst", ".{0}.csv".format(name))
if pst_path is not None:
pi_filename = os.path.join(pst_path, os.path.split(pi_filename)[-1])
self.prior_information.to_csv(pi_filename, index=False)
Expand Down Expand Up @@ -1704,6 +1771,28 @@ def _write_version1(self, new_filename):
for cline in self.model_command:
f_out.write(cline + "\n")

name = "tplfile_data"
columns = self.model_io_fieldnames
if self.model_input_data.loc[:, columns].isnull().values.any():
# warnings.warn("WARNING: NaNs in {0} dataframe".format(name))
csv_name = "pst.{0}.nans.csv".format(
name.replace(" ", "_").replace("*", "")
)
self.model_input_data.to_csv(csv_name)
raise Exception(
"NaNs in {0} dataframe, csv written to {1}".format(name, csv_name)
)
name = "insfile_data"
columns = self.model_io_fieldnames
if self.model_output_data.loc[:, columns].isnull().values.any():
# warnings.warn("WARNING: NaNs in {0} dataframe".format(name))
csv_name = "pst.{0}.nans.csv".format(
name.replace(" ", "_").replace("*", "")
)
self.model_output_data.to_csv(csv_name)
raise Exception(
"NaNs in {0} dataframe, csv written to {1}".format(name, csv_name)
)
f_out.write("* model input/output\n")
for tplfle, infle in zip(
self.model_input_data.pest_file, self.model_input_data.model_file
Expand All @@ -1715,9 +1804,17 @@ def _write_version1(self, new_filename):
f_out.write("{0} {1}\n".format(insfle, outfle))

if self.nprior > 0:
if self.prior_information.isnull().values.any():
# print("WARNING: NaNs in prior_information dataframe")
warnings.warn("NaNs in prior_information dataframe", PyemuWarning)
name = "pi_data"
columns = self.prior_fieldnames
if self.prior_information.loc[:, columns].isnull().values.any():
# warnings.warn("WARNING: NaNs in {0} dataframe".format(name))
csv_name = "pst.{0}.nans.csv".format(
name.replace(" ", "_").replace("*", "")
)
self.prior_information.to_csv(csv_name)
raise Exception(
"NaNs in {0} dataframe, csv written to {1}".format(name, csv_name)
)
f_out.write("* prior information\n")
# self.prior_information.index = self.prior_information.pop("pilbl")
max_eq_len = self.prior_information.equation.apply(lambda x: len(x)).max()
Expand Down
5 changes: 4 additions & 1 deletion pyemu/pst/pst_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1163,7 +1163,10 @@ def csv_to_ins_file(
else:
nprefix = prefix
if longnames:
nname = f"{nprefix}_usecol:{clabel}"
if len(nprefix) > 0:
nname = f"{nprefix}_usecol:{clabel}"
else:
nname = f"usecol:{clabel}"
oname = f"{nname}_{rlabel}"
else:
nname = nprefix + clabel.replace(" ", "").replace("_", "")
Expand Down
7 changes: 5 additions & 2 deletions pyemu/utils/pst_from.py
Original file line number Diff line number Diff line change
Expand Up @@ -1189,7 +1189,9 @@ def add_observations(
new_obs = self.add_observations_from_ins(
ins_file=insfile, out_file=self.new_d / filename
)
if prefix is not None:
if obsgp is not None:
new_obs.loc[:, "obgnme"] = obsgp
elif prefix is not None:
new_obs.loc[:, "obgnme"] = prefix
self.logger.log("adding observations from array output file '{0}'".format(filenames))
if rebuild_pst:
Expand Down Expand Up @@ -1439,7 +1441,8 @@ def add_parameters(
sigma_range: not yet implemented # TODO
upper_bound (`float`): PEST parameter upper bound # TODO support different ubound,lbound,transform if multiple use_col
lower_bound (`float`): PEST parameter lower bound
transform (`str`): PEST parameter transformation
transform (`str`): PEST parameter transformation. Must be either "log","none" or "fixed. The "tied" transform
must be used after calling `PstFrom.build_pst()`.
par_name_base (`str`): basename for parameters that are set up
index_cols (`list`-like): if not None, will attempt to parameterize
expecting a tabular-style model input file. `index_cols`
Expand Down