Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 50 additions & 3 deletions autotest/pst_from_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@ def freyberg_test(tmp_path):
assert np.isclose(pst.phi, 0.), pst.phi


def freyberg_prior_build_test(tmp_path):
def test_freyberg_prior_build(tmp_path):
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
Expand Down Expand Up @@ -4544,7 +4544,7 @@ def shortname_conversion_test(tmp_path):
assert len(par) == 0, f"{len(par)} pars not found in tplfiles: {par[:100]}..."


def vertex_grid_test(tmp_path):
def test_vertex_grid(tmp_path):
pf, sim = setup_freyberg_mf6(tmp_path, "freyberg_quadtree")
m = sim.get_model()
mg = m.modelgrid
Expand Down Expand Up @@ -6307,6 +6307,52 @@ def test_dup_idxs(tmp_path):
pass


def invest_vertexpp_setup_speed():
import flopy
from flopy.utils.gridgen import Gridgen
from pathlib import Path
from shapely import Polygon
import cProfile
sim = flopy.mf6.MFSimulation(sim_name="test", sim_ws="test", exe_name="mf6")
gwf = flopy.mf6.ModflowGwf(sim, modelname="test")

dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=1,
nrow=400,
ncol=400,
delr=10,
delc=10,
top=1,
botm=0,
)
g = Gridgen(gwf.modelgrid, model_ws='test',
exe_name=Path('~', "Projects", 'dev',"gridgen.1.0.02", 'bin', 'gridgen').expanduser().as_posix())
polys = [Polygon([(1500, 1500), (2500, 1500), (2500, 2500), (1500, 2500)])]
g.add_refinement_features(polys, "polygon", 2, range(1))
g.build()
gridprops_vg = g.get_gridprops_vertexgrid()
vgrid = flopy.discretization.VertexGrid(**gridprops_vg)
# vgrid.plot()
np.savetxt(Path('test', 'testfile.csv'), np.ones(vgrid.shape[-1]))

pf = PstFrom(original_d='test', new_d='testtmp',
spatial_reference=vgrid,
remove_existing=True)
pr = cProfile.Profile()
pr.enable()
pf.add_parameters("testfile.csv", par_type="pp",
geostruct=pyemu.geostats.GeoStruct(
variograms=pyemu.geostats.ExpVario(contribution=1, a=100)),
pp_options={"try_use_ppu": True,
"pp_space": 25},
par_style="m",
transform="log")
pr.disable()
pr.print_stats(sort="cumtime")
pass


if __name__ == "__main__":
#add_py_function_test('.')
#mf6_freyberg_pp_locs_test('.')
Expand All @@ -6328,7 +6374,7 @@ def test_dup_idxs(tmp_path):
#$mf6_freyberg_da_test()
#shortname_conversion_test()
#mf6_freyberg_shortnames_test()
mf6_freyberg_direct_test(".")
# mf6_freyberg_direct_test(".")
#freyberg_test()
#mf6_freyberg_thresh_test(".")
#mf6_freyberg_test()
Expand All @@ -6354,6 +6400,7 @@ def test_dup_idxs(tmp_path):
#direct_quickfull_test()
# list_float_int_index_test('.')
#freyberg_test()
invest_vertexpp_setup_speed()



Expand Down
22 changes: 15 additions & 7 deletions pyemu/en.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import warnings
import numpy as np
import pandas as pd
from pathlib import Path

import pyemu
from .pyemu_warnings import PyemuWarning
Expand Down Expand Up @@ -364,7 +365,7 @@ def to_binary(self, filename):
"""write `Ensemble` to a PEST-style binary file

Args:
filename (`str`): file to write
filename (`str` or `Path`): file to write

Returns:
`str`: the filename written (may be modified from input)
Expand All @@ -380,34 +381,41 @@ def to_binary(self, filename):
values are in arithmetic space

"""
from pathlib import Path
if isinstance(filename, Path):
returnpath = True
else:
filename = Path(filename)
returnpath = False
retrans = False
if self.istransformed:
self.back_transform()
retrans = True
if self._df.isnull().values.any():
warnings.warn( "Ensemble.to_binary() warning: NaN in ensemble", PyemuWarning)
# if the nobs or npar is large, save as dense binary
if self._df.shape[1] > 1e6:
if self._df.shape[1] > 1e6 or filename.suffix == ".bin":
# if the filename doesn't have a .bin suffix, warn and add it
if Path(filename).suffix != ".bin":
if filename.suffix != ".bin":
warnings.warn(
"Ensemble.to_binary() warning: large ensemble dimension, will save as "
+ "dense binary format... adding '.bin' suffix", PyemuWarning
)
filename += ".bin"
filename = filename.with_suffix(".bin")
self.to_dense(filename)
else:
pyemu.Matrix.from_dataframe(self._df).to_coo(filename)
if retrans:
self.transform()
return filename
if returnpath:
return filename
else:
return filename.as_posix()

def to_dense(self, filename):
"""write `Ensemble` to a dense-format binary file

Args:
filename (`str`): file to write
filename (`str` or `Path`): file to write

Example::

Expand Down
7 changes: 3 additions & 4 deletions pyemu/mat/mat_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import copy
import struct
import warnings
from pathlib import Path
import numpy as np
import pandas as pd

Expand Down Expand Up @@ -1863,7 +1864,7 @@ def to_coo(self, filename, droptol=None, chunk=None):
the read with `Matrix.from_binary()`.

Args:
filename (`str`): filename to save binary file
filename (`str` or `Path`): filename to save binary file
droptol (`float`): absolute value tolerance to make values
smaller `droptol` than zero. Default is None (no dropping)
chunk (`int`): number of elements to write in a single pass.
Expand Down Expand Up @@ -1983,9 +1984,7 @@ def write_dense(filename, row_names, col_names, data, close=False):
"""
row_names = [str(r) for r in row_names]
col_names = [str(c) for c in col_names]


if isinstance(filename, str):
if isinstance(filename, (str, Path)):
f = open(filename, "wb")
header = np.array(
(0, -len(col_names), -len(col_names)), dtype=Matrix.binary_header_dt
Expand Down
99 changes: 48 additions & 51 deletions pyemu/utils/pp_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def setup_pilotpoints_grid(
if k not in prefix_dict.keys():
continue

pp_df = None
pp_df = []
ib = ibound[par][k]
assert (
ib.shape == xcentergrid.shape
Expand All @@ -209,24 +209,11 @@ def setup_pilotpoints_grid(

parval1 = 1.0

for pp in ppoint_xys:
for x, y in ppoint_xys:
# name from pilot point count
name = "pp_{0:04d}".format(pp_count)
x,y = pp[0], pp[-1]
if pp_df is None:
data = {
"name": name,
"x": x,
"y": y,
"zone": zone, # if use_ibound_zones is False this will always be 1
"parval1": parval1,
"k": k,
}
pp_df = pd.DataFrame(data=data, index=[0], columns=pp_names)
else:
data = [name, x, y, zone, parval1, k,]
pp_df.loc[pp_count, :] = data
pp_count+=1

pp_df.append([name, x, y, zone, parval1, k,])
pp_count += 1
else:
# cycle through rows and cols
# allow to run closer to outside edge rather than leaving a gap
Expand All @@ -246,25 +233,11 @@ def setup_pilotpoints_grid(
if use_ibound_zones:
zone = ib[i, j]
# stick this pilot point into a dataframe container

if pp_df is None:
data = {
"name": name,
"x": x,
"y": y,
"zone": zone, # if use_ibound_zones is False this will always be 1
"parval1": parval1,
"k": k,
"i": i,
"j": j,
}
pp_df = pd.DataFrame(data=data, index=[0], columns=pp_names)
else:
data = [name, x, y, zone, parval1, k, i, j]
pp_df.loc[pp_count, :] = data
pp_df.append([name, x, y, zone, parval1, k, i, j])
pp_count += 1
pp_df = pd.DataFrame(pp_df, columns=pp_names)
# if we found some acceptable locs...
if pp_df is not None:
if not pp_df.empty:
for prefix in prefix_dict[k]:
# if parameter prefix relates to current zone definition
if prefix.startswith(par) or (
Expand Down Expand Up @@ -657,7 +630,8 @@ def get_zoned_ppoints_for_vertexgrid(spacing, zone_array, mg, zone_number=None,

try:
from shapely.ops import unary_union
from shapely.geometry import Polygon, Point
from shapely.geometry import Polygon #, Point, MultiPoint
from shapely import points
except ImportError:
raise ImportError('The `shapely` library was not found. Please make sure it is installed.')

Expand Down Expand Up @@ -690,21 +664,44 @@ def get_zoned_ppoints_for_vertexgrid(spacing, zone_array, mg, zone_number=None,
x = np.linspace(xmin, xmax, nx)
y = np.linspace(ymin, ymax, ny)
xv, yv = np.meshgrid(x, y)
# make grid
grid_points = [Point(x,y) for x,y in list(zip(xv.flatten(), yv.flatten()))]

# get vertices for model grid/zone polygon
verts = [mg.get_cell_vertices(cellid) for cellid in range(mg.ncpl)]
if zone_number != None:
# select zone area
verts = [verts[i] for i in np.where(zone_array==zone_number)[0]]
# dissolve
polygon = unary_union([Polygon(v) for v in verts])
# add buffer
if add_buffer==True:
polygon = polygon.buffer(spacing)
# select ppoint coords within area
ppoints = [(p.x, p.y) for p in grid_points if polygon.covers(p) ]
def _get_ppoints():
# make grid
grid_points = points(list(zip(xv.flatten(), yv.flatten())))

# get vertices for model grid/zone polygon
verts = [mg.get_cell_vertices(cellid) for cellid in range(mg.ncpl)]
if zone_number != None:
# select zone area
verts = [verts[i] for i in np.where(zone_array==zone_number)[0]]
# dissolve
polygon = unary_union([Polygon(v) for v in verts])
# add buffer
if add_buffer==True:
polygon = polygon.buffer(spacing)
# select ppoint coords within area
ppoints = [(p.x, p.y) for p in grid_points[polygon.covers(grid_points)]]
return ppoints
#
# def _get_ppoints_new(): # alternative method searching for speedup (not too successful)
# # make grid
# grid_points = points(list(zip(xv.flatten(), yv.flatten())))
#
# # get vertices for model grid/zone polygon
# verts = [mg.get_cell_vertices(cellid) for cellid in range(mg.ncpl)]
# if zone_number != None:
# # select zone area
# verts = [verts[i] for i in np.where(zone_array == zone_number)[0]]
# # dissolve
# polygon = unary_union([Polygon(v) for v in verts])
# # add buffer
# if add_buffer == True:
# polygon = polygon.buffer(spacing)
# # select ppoint coords within area
# ppoints = [(p.x, p.y) for p in grid_points[polygon.covers(grid_points)]]
# return ppoints

ppoints = _get_ppoints()
# ppoints = _get_ppoints_new()
assert len(ppoints)>0
return ppoints

Expand Down
2 changes: 1 addition & 1 deletion pyemu/utils/pst_from.py
Original file line number Diff line number Diff line change
Expand Up @@ -2460,7 +2460,7 @@ def add_parameters(
pp_df = self._setup_pp_df(**pp_options)
# set par group -- already defined above
pp_df.loc[:, "pargp"] = pargp
self.logger.statement("pilot point 'pargp':{0}".format(",".join(pargp)))
self.logger.statement("pilot point 'pargp': {0}".format(pargp))
self.logger.log("setting up pilot point parameters")

# start working on interp factor calcs
Expand Down
Loading