Skip to content

Commit

Permalink
fix(extraneous blocks and zero length data in a list) (#990)
Browse files Browse the repository at this point in the history
* fix(extraneous package data): extraneous package data is stored as comments

* fix(loading list data): fix handles case where jagged data in a list has size zero

* fix(reading repeating data): variable initialization fix

* fix(test update): update test to properly function with the way flopy now treats extra stress periods as comment text
  • Loading branch information
spaulins-usgs committed Sep 3, 2020
1 parent 9983191 commit 44d8b5b
Show file tree
Hide file tree
Showing 4 changed files with 135 additions and 40 deletions.
2 changes: 1 addition & 1 deletion autotest/t504_test.py
Expand Up @@ -255,7 +255,7 @@ def test005_advgw_tidal():

# add a stress period beyond nper
spd = ghb.stress_period_data.get_data()
spd[20] = copy.deepcopy(spd[9])
spd[20] = copy.deepcopy(spd[0])
ghb.stress_period_data.set_data(spd)

# make temp folder to save simulation
Expand Down
12 changes: 7 additions & 5 deletions flopy/mf6/data/mfdatautil.py
Expand Up @@ -297,10 +297,12 @@ def __init__(self, comment, path, sim_data, line_number=0):
text to add
"""

def add_text(self, additional_text):
def add_text(self, additional_text, new_line=False):
if additional_text:
if isinstance(self.text, list):
self.text.append(additional_text)
elif new_line:
self.text = "{}{}".format(self.text, additional_text)
else:
self.text = "{} {}".format(self.text, additional_text)

Expand Down Expand Up @@ -376,12 +378,12 @@ def write(self, fd, eoln_suffix=True):
def is_empty(self, include_whitespace=True):
if include_whitespace:
if self.text():
return True
return False
return False
return True
else:
if self.text.strip():
return True
return False
return False
return True

"""
Check text to see if it is valid comment text
Expand Down
19 changes: 15 additions & 4 deletions flopy/mf6/data/mffileaccess.py
Expand Up @@ -1502,6 +1502,7 @@ def _load_list_line(
else:
# read variables
var_index = 0
repeat_count = 0
data = ""
for data_item_index, data_item in enumerate(
data_set.data_item_structures
Expand Down Expand Up @@ -1552,10 +1553,20 @@ def _load_list_line(
# comment mark found and expecting optional
# data_item, we are done
break
if (
data_index >= arr_line_len
and data_item.optional
):
if data_index >= arr_line_len:
if data_item.optional:
break
else:
unknown_repeats = (
storage.resolve_shape_list(
data_item,
repeat_count,
current_key,
data_line,
)[1]
)
if unknown_repeats:
break
break
more_data_expected = True
unknown_repeats = False
Expand Down
142 changes: 112 additions & 30 deletions flopy/mf6/mfpackage.py
Expand Up @@ -75,12 +75,37 @@ def __init__(
if simulation_data is None:
self.comment = comment
self.simulation_data = None
self.path = None
self.path = path
self.comment_path = None
else:
self.connect_to_dict(simulation_data, path, comment)
# TODO: Get data_items from dictionary
self.data_items = []
# build block comment paths
self.blk_trailing_comment_path = ("blk_trailing_comment",)
self.blk_post_comment_path = ("blk_post_comment",)
if isinstance(path, list):
path = tuple(path)
if path is not None:
self.blk_trailing_comment_path = path + (
name,
"blk_trailing_comment",
)
self.blk_post_comment_path = path + (
name,
"blk_post_comment",
)
if self.blk_trailing_comment_path not in simulation_data.mfdata:
simulation_data.mfdata[
self.blk_trailing_comment_path
] = MFComment("", "", simulation_data, 0)
if self.blk_post_comment_path not in simulation_data.mfdata:
simulation_data.mfdata[self.blk_post_comment_path] = MFComment(
"\n", "", simulation_data, 0
)
else:
self.blk_trailing_comment_path = ("blk_trailing_comment",)
self.blk_post_comment_path = ("blk_post_comment",)

def build_header_variables(
self,
Expand Down Expand Up @@ -119,7 +144,20 @@ def build_header_variables(
dimensions,
fixed_data,
)

self.add_data_item(new_data, data)

def add_data_item(self, new_data, data):
self.data_items.append(new_data)
while isinstance(data, list):
if len(data) > 0:
data = data[0]
else:
data = None
if not isinstance(data, tuple):
data = (data,)
self.blk_trailing_comment_path += data
self.blk_post_comment_path += data

def is_same_header(self, block_header):
if len(self.variable_strings) > 0:
Expand Down Expand Up @@ -318,16 +356,6 @@ def __init__(
self.path = path
self.datasets = OrderedDict()
self.datasets_keyword = {}
self.blk_trailing_comment_path = path + ("blk_trailing_comment",)
self.blk_post_comment_path = path + ("blk_post_comment",)
if self.blk_trailing_comment_path not in simulation_data.mfdata:
simulation_data.mfdata[self.blk_trailing_comment_path] = MFComment(
"", "", simulation_data, 0
)
if self.blk_post_comment_path not in simulation_data.mfdata:
simulation_data.mfdata[self.blk_post_comment_path] = MFComment(
"\n", "", simulation_data, 0
)
# initially disable if optional
self.enabled = structure.number_non_optional_data() > 0
self.loaded = False
Expand Down Expand Up @@ -581,6 +609,7 @@ def _build_repeating_header(self, header_data):
self.block_headers.append(block_header)
else:
block_header_path = self.path + (len(self.block_headers),)

struct = self.structure
last_header = self.block_headers[-1]
try:
Expand Down Expand Up @@ -615,7 +644,10 @@ def _new_dataset(
# stress periods are stored 0 based
initial_val = int(initial_val[0]) - 1
if isinstance(initial_val, list):
initial_val_path = tuple(initial_val)
initial_val = [tuple(initial_val)]
else:
initial_val_path = initial_val
try:
new_data = MFBlock.data_factory(
self._simulation_data,
Expand All @@ -636,7 +668,8 @@ def _new_dataset(
' dataset "{}" to block '
'"{}"'.format(dataset_struct.name, self.structure.name),
)
self.block_headers[-1].data_items.append(new_data)
self.block_headers[-1].add_data_item(new_data, initial_val_path)

else:
try:
self.datasets[key] = self.data_factory(
Expand Down Expand Up @@ -747,6 +780,7 @@ def load(self, block_header, fd, strict=True):
line = fd_block.readline()
datautil.PyListUtil.reset_delimiter_used()
arr_line = datautil.PyListUtil.split_data_line(line)
post_data_comments = MFComment("", "", self._simulation_data, 0)
while MFComment.is_comment(line, True):
initial_comment.add_text(line)
line = fd_block.readline()
Expand Down Expand Up @@ -859,9 +893,6 @@ def load(self, block_header, fd, strict=True):
else:
arr_line = ""
# capture any trailing comments
post_data_comments = MFComment(
"", "", self._simulation_data, 0
)
dataset.post_data_comments = post_data_comments
while arr_line and (
len(next_line[1]) <= 2 or arr_line[0][:3].upper() != "END"
Expand Down Expand Up @@ -914,10 +945,6 @@ def load(self, block_header, fd, strict=True):
and result[1][:3].upper() == "END"
):
break

self._simulation_data.mfdata[
self.blk_trailing_comment_path
].text = comments
self.loaded = True
self.is_valid()

Expand Down Expand Up @@ -1114,6 +1141,7 @@ def write(self, fd, ext_file_action=ExtFileAction.copy_relative_paths):
if len(repeating_datasets) > 0:
# loop through all block headers
for block_header in self.block_headers:
# write block
self._write_block(fd, block_header, ext_file_action)
else:
# write out block
Expand Down Expand Up @@ -1248,7 +1276,9 @@ def _write_block(self, fd, block_header, ext_file_action):
),
)
# write trailing comments
self._simulation_data.mfdata[self.blk_trailing_comment_path].write(fd)
pth = block_header.blk_trailing_comment_path
if pth in self._simulation_data.mfdata:
self._simulation_data.mfdata[pth].write(fd)

if self.external_file_name is not None:
# switch back writing to package file
Expand All @@ -1259,7 +1289,9 @@ def _write_block(self, fd, block_header, ext_file_action):
block_header.write_footer(fd)

# write post block comments
self._simulation_data.mfdata[self.blk_post_comment_path].write(fd)
pth = block_header.blk_post_comment_path
if pth in self._simulation_data.mfdata:
self._simulation_data.mfdata[pth].write(fd)

# write extra line if comments are off
if not self._simulation_data.comments_on:
Expand Down Expand Up @@ -1699,7 +1731,11 @@ def _get_block_header_info(self, line, path):
)
elif len(arr_clean_line) == 2:
return MFBlockHeader(
arr_clean_line[1], header_variable_strs, header_comment
arr_clean_line[1],
header_variable_strs,
header_comment,
self._simulation_data,
path,
)
else:
# process text after block name
Expand All @@ -1715,7 +1751,11 @@ def _get_block_header_info(self, line, path):
else:
header_variable_strs.append(entry)
return MFBlockHeader(
arr_clean_line[1], header_variable_strs, header_comment
arr_clean_line[1],
header_variable_strs,
header_comment,
self._simulation_data,
path,
)

def _update_size_defs(self):
Expand Down Expand Up @@ -2049,11 +2089,9 @@ def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize):
break
else:
found_first_block = True
self.post_block_comments = MFComment(
"", self.path, self._simulation_data
)
skip_block = False
if self.blocks[block_key].loaded:
cur_block = self.blocks[block_key]
if cur_block.loaded:
# Only blocks defined as repeating are allowed to have
# multiple entries
header_name = block_header_info.name
Expand All @@ -2074,6 +2112,21 @@ def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize):
)
print(warning_str)
skip_block = True
bhs = cur_block.structure.block_header_structure
bhval = block_header_info.variable_strings
if (
len(bhs) > 0
and len(bhval) > 0
and bhs[0].name == "iper"
):
nper = self._simulation_data.mfdata[
("tdis", "dimensions", "nper")
].get_data()
bhval_int = datautil.DatumUtil.is_int(bhval[0])
if not bhval_int or int(bhval[0]) > nper:
# skip block when block stress period is greater
# than nper
skip_block = True

if not skip_block:
if (
Expand All @@ -2082,20 +2135,49 @@ def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize):
):
print(
" loading block {}...".format(
self.blocks[block_key].structure.name
cur_block.structure.name
)
)
# reset comments
self.post_block_comments = MFComment(
"", self.path, self._simulation_data
)

self.blocks[block_key].load(
cur_block.load(
block_header_info, fd_input_file, strict
)

# write post block comment comment
self._simulation_data.mfdata[
self.blocks[block_key].blk_post_comment_path
cur_block.block_headers[-1].blk_post_comment_path
] = self.post_block_comments

blocks_read += 1
if blocks_read >= max_blocks:
break
else:
# treat skipped block as if it is all comments
arr_line = datautil.PyListUtil.split_data_line(
clean_line
)
self.post_block_comments.add_text(
"{}".format(line), True
)
while arr_line and (
len(line) <= 2 or arr_line[0][:3].upper() != "END"
):
line = fd_input_file.readline()
arr_line = datautil.PyListUtil.split_data_line(
line.strip()
)
if arr_line:
self.post_block_comments.add_text(
"{}".format(line), True
)
self._simulation_data.mfdata[
cur_block.block_headers[-1].blk_post_comment_path
] = self.post_block_comments

else:
if not (
len(clean_line) == 0
Expand Down

0 comments on commit 44d8b5b

Please sign in to comment.