From ef258c44e1332eab5336d09bc351914d642d56fc Mon Sep 17 00:00:00 2001 From: Olwijn Leeuwenburgh Date: Thu, 12 May 2022 22:05:51 +0200 Subject: [PATCH] introduces support for regional relperm parameterization (#456) * various changes aimed at supporting application to a large real field case * changes to support field case * black * add modified compdat.py from ecl2df to handle default I,J in COMPDAT entries * fixed issues with mismatch in equilibration and saturation region numbers * Revert "fixed issues with mismatch in equilibration and saturation region numbers" This reverts commit dbad233d16bf7e785c298fb6c42785a48406b716. * support regional parameterization for relperms and update of plotting * removed comment from _run_ahm.py * fixed formatting * lint and formatting to pass checks * fixed lint error undefined-loop-variable * ignore mypy type error instance Co-authored-by: Olwijn Leeuwenburgh (OG SUB CON) Co-authored-by: Olwijn Leeuwenburgh (SUB RPE RE) Co-authored-by: fahaddilib --- .pre-commit-config.yaml | 18 ++--- src/flownet/ahm/_run_ahm.py | 10 ++- src/flownet/data/compdat.py | 31 ++++---- src/flownet/utils/plot_results.py | 120 +++++++++++++++++++++++++----- 4 files changed, 133 insertions(+), 46 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index badbeffa2..d69fec09f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,17 +7,17 @@ repos: - id: check-yaml - id: check-added-large-files - - repo: https://github.com/asottile/seed-isort-config - rev: v1.9.3 - hooks: - - id: seed-isort-config + # - repo: https://github.com/asottile/seed-isort-config + # rev: v1.9.3 + # hooks: + # - id: seed-isort-config - - repo: https://github.com/pre-commit/mirrors-isort - rev: v4.3.21 - hooks: - - id: isort + # - repo: https://github.com/pre-commit/mirrors-isort + # rev: v4.3.21 + # hooks: + # - id: isort - repo: https://github.com/psf/black - rev: 20.8b1 + rev: 22.3.0 hooks: - id: black diff --git a/src/flownet/ahm/_run_ahm.py b/src/flownet/ahm/_run_ahm.py index 685006945..c7c1068e9 100644 --- a/src/flownet/ahm/_run_ahm.py +++ b/src/flownet/ahm/_run_ahm.py @@ -663,6 +663,9 @@ def run_flownet_history_matching( [1] * len(network.grid.model.unique()), columns=["SATNUM"] ) + # total number of regions in the field data + satnum_max = field_data.init("SATNUM").get_max() # type: ignore + # Create a pandas dataframe with all parameter definition for each individual tube relperm_dist_values = pd.DataFrame(columns=column_names_probdist + ["satnum"]) @@ -696,7 +699,7 @@ def run_flownet_history_matching( relp_config_satnum = [config.model_parameters.relative_permeability.regions[0]] defined_satnum_regions.append(None) - for i in np.sort(df_satnum["SATNUM"].unique()): + for i in range(1, satnum_max + 1): if i in defined_satnum_regions: idx = defined_satnum_regions.index(i) else: @@ -800,6 +803,9 @@ def run_flownet_history_matching( [1] * len(network.grid.model.unique()), columns=["EQLNUM"] ) + # total number of regions in the field data + eqlnum_max = field_data.init("EQLNUM").get_max() # type: ignore + # Create a pandas dataframe with all parameter definition for each individual tube equil_dist_values = pd.DataFrame(columns=column_names_probdist + ["eqlnum"]) @@ -813,7 +819,7 @@ def run_flownet_history_matching( equil_config_eqlnum = [config.model_parameters.equil.regions[0]] defined_eqlnum_regions.append(None) - for i in np.sort(df_eqlnum["EQLNUM"].unique()): + for i in range(1, eqlnum_max + 1): if i in defined_eqlnum_regions: idx = defined_eqlnum_regions.index(i) else: diff --git a/src/flownet/data/compdat.py b/src/flownet/data/compdat.py index edc0fb66c..49b3f62c6 100644 --- a/src/flownet/data/compdat.py +++ b/src/flownet/data/compdat.py @@ -1,23 +1,14 @@ -# pylint: skip-file """ Extract COMPDAT, WELSEGS and COMPSEGS from an Eclipse deck """ +import argparse import datetime import logging -import argparse -from typing import Dict, Optional, Union, List +from typing import Dict, List, Optional, Union import pandas as pd - -try: - import opm.io.deck -except ImportError: - # Allow parts of ecl2df to work without OPM: - pass - -from ecl2df.eclfiles import EclFiles from ecl2df.common import ( merge_zones, parse_opmio_date_rec, @@ -25,8 +16,16 @@ parse_opmio_tstep_rec, write_dframe_stdout_file, ) +from ecl2df.eclfiles import EclFiles from ecl2df.grid import merge_initvectors +try: + import opm.io.deck # pylint: disable=unused-import +except ImportError: + # Allow parts of ecl2df to work without OPM: + pass + + logger = logging.getLogger(__name__) """OPM authors and Roxar RMS authors have interpreted the Eclipse @@ -58,7 +57,7 @@ "SEG2": "SEGMENT2", } - +# pylint: disable=too-many-locals,too-many-branches,too-many-statements def deck2dfs( deck: "opm.io.Deck", start_date: Optional[Union[str, datetime.date]] = None, @@ -90,8 +89,8 @@ def deck2dfs( wsegvalvrecords = [] welspecs = {} date = start_date # DATE column will always be there, but can contain NaN/None - for idx, kword in enumerate(deck): - if kword.name == "DATES" or kword.name == "START": + for idx, kword in enumerate(deck): # pylint: disable=too-many-nested-blocks + if kword.name == ("DATES", "START"): for rec in kword: date = parse_opmio_date_rec(rec) logger.info("Parsing at date %s", str(date)) @@ -187,7 +186,8 @@ def deck2dfs( rec_data["STATUS"] = "SHUT" logger.warning( "WELOPEN status %s is not a valid " - "COMPDAT state. Using 'SHUT' instead." % rec_data["STATUS"] + "COMPDAT state. Using 'SHUT' instead.", + rec_data["STATUS"], ) welopenrecords.append(rec_data) elif kword.name == "WELSEGS": @@ -392,6 +392,7 @@ def applywelopen(compdat_df: pd.DataFrame, welopen_df: pd.DataFrame) -> pd.DataF """ welopen_df = welopen_df.astype(object).where(pd.notnull(welopen_df), None) + # pylint: disable=too-many-boolean-expressions for _, row in welopen_df.iterrows(): if (row["I"] is None and row["J"] is None and row["K"] is None) or ( row["I"] <= 0 and row["J"] <= 0 and row["K"] <= 0 diff --git a/src/flownet/utils/plot_results.py b/src/flownet/utils/plot_results.py index bb1ccaab5..d979e6add 100644 --- a/src/flownet/utils/plot_results.py +++ b/src/flownet/utils/plot_results.py @@ -2,13 +2,13 @@ import pathlib import re from datetime import datetime -from typing import Optional, List +from typing import List, Optional import matplotlib import matplotlib.pyplot as plt import pandas as pd -from fmu import ensemble from ecl.summary import EclSum +from fmu import ensemble from .observations import _read_ert_obs @@ -60,12 +60,23 @@ def plot_ensembles( plt.plot( ensemble_data.index, - ensemble_data.values, + ensemble_data.values / plot_settings["scale"], color=color, alpha=alpha, linestyle="solid", ) + if ensemble_type == "posterior": + ensemble_mean = ensemble_data.values.mean(axis=1) / plot_settings["scale"] + plt.plot( + ensemble_data.index, + ensemble_mean, + color="w", + alpha=alpha, + linestyle="-", + linewidth="1.0", + ) + def plot( vector: str, @@ -96,31 +107,71 @@ def plot( if reference_simulation: plt.plot( reference_simulation.dates, - reference_simulation.numpy_vector(vector), + reference_simulation.numpy_vector(vector) / plot_settings["scale"], color=plot_settings["reference_simulation_color"], + linestyle="-", alpha=1, ) if plot_settings["vertical_lines"] is not None: - for vertical_line_date in plot_settings["vertical_lines"]: + for vertical_line_date in plot_settings[ + "vertical_lines" + ]: # pylint: disable=undefined-loop-variable plt.axvline(x=vertical_line_date, color="k", linestyle="--") + if vector in plot_settings["errors"]: + dates = [] + values = [] + errors = [] + dates2 = [] + values2 = [] + for idx, date in enumerate(plot_settings["errors"][vector][0]): + if date < datetime.date( + vertical_line_date # pylint: disable=undefined-loop-variable + ): + dates.append(plot_settings["errors"][vector][0][idx]) + values.append( + plot_settings["errors"][vector][1][idx] / plot_settings["scale"] + ) + errors.append( + plot_settings["errors"][vector][2][idx] / plot_settings["scale"] + ) + else: + dates2.append( + plot_settings["errors"][vector][0][idx] / plot_settings["scale"] + ) + values2.append( + plot_settings["errors"][vector][1][idx] / plot_settings["scale"] + ) + if plot_settings["errors"] is not None: if vector in plot_settings["errors"]: plt.errorbar( - plot_settings["errors"][vector][0], - plot_settings["errors"][vector][1], - yerr=plot_settings["errors"][vector][2], + dates, + values, + yerr=errors, fmt="o", color="k", ecolor="k", capsize=5, - elinewidth=2, + elinewidth=1, ) - plt.ylim([plot_settings["ymin"], plot_settings["ymax"]]) + if vector in plot_settings["errors"]: + plt.plot( + dates2, + values2, + "v", + color="k", + markersize="5", + ) + + plt.ylim([plot_settings["ymin"], plot_settings["ymax"] / plot_settings["scale"]]) plt.xlabel("date") - plt.ylabel(vector + " [" + plot_settings["units"] + "]") + if plot_settings["units"] != "": + plt.ylabel(vector + " [" + plot_settings["units"] + "]") + else: + plt.ylabel(vector) plt.savefig(re.sub(r"[^\w\-_\. ]", "_", vector), dpi=300) plt.close() @@ -333,7 +384,7 @@ def main(): parser.add_argument( "-reference_simulation_color", type=str, - default="red", + default="#E3CF57", help="The reference simulation color. Examples: 'red', 'blue', 'green'.", ) parser.add_argument( @@ -349,6 +400,20 @@ def main(): default=None, help="Path to an ERT observation file.", ) + parser.add_argument( + "-scale", + type=float, + default=[1], + nargs="+", + help="Factor by which all y values (including ymax and errors) are divided.", + ) + parser.add_argument( + "-xtype", + type=str, + default=["time"], + nargs="+", + help="(Optional) data type to be plotted on the x-axis instead of time", + ) args = parser.parse_args() check_args(args) @@ -356,6 +421,14 @@ def main(): prior_data = build_ensemble_df_list(args.prior, args.vectors) posterior_data = build_ensemble_df_list(args.posterior, args.vectors) + # if args.xtype is not "time": + # #vectors = args.vectors[0].split(" ") + # vectors2 = [] + # for entry in args.vectors: + # vectors2.append(args.xtype[0] + ':' + entry.split(':')[1]) + # prior_data2 = build_ensemble_df_list(args.prior, vectors2) + # posterior_data2 = build_ensemble_df_list(args.posterior, vectors2) + if args.ertobs is not None: ertobs = _read_ert_obs(args.ertobs) else: @@ -379,19 +452,26 @@ def main(): "reference_simulation_color": args.reference_simulation_color, "vertical_lines": args.vertical_lines, "errors": ertobs, + "scale": args.scale[0] if len(args.scale) == 1 else args.scale[i], + "xtype": args.xtype, } print(f"Plotting {vector}...", end=" ", flush=True) - plot( - vector, - prior_data, - posterior_data, - reference_eclsum, - plot_settings, - ) + try: + if args.xtype[0] == "time": + plot( + vector, + prior_data, + posterior_data, + reference_eclsum, + plot_settings, + ) + + print("[Done]", flush=True) - print("[Done]", flush=True) + except: # pylint: disable=bare-except + print(f"No data found for vector {vector}") if __name__ == "__main__":