diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index 71688375e..599ff0b54 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -23,16 +23,16 @@ jobs: with: python-version: 3.8 - - name: Install ecl2df + - name: Install res2df run: | pip install pip -U pip install .[tests,ert] - name: Generate coverage report and upload run: | - pytest tests --disable-warnings --cov=ecl2df --cov-report=xml - # Uninstall packages that ecl2df supports not being installed: + pytest tests --disable-warnings --cov=res2df --cov-report=xml + # Uninstall packages that res2df supports not being installed: pip uninstall --yes ert networkx opm # Run tests again in cov-append-mode: - pytest tests --disable-warnings --cov=ecl2df --cov-report=xml --cov-append + pytest tests --disable-warnings --cov=res2df --cov-report=xml --cov-append bash <(curl -s https://codecov.io/bash) diff --git a/.github/workflows/ecl2df.yml b/.github/workflows/res2df.yml similarity index 83% rename from .github/workflows/ecl2df.yml rename to .github/workflows/res2df.yml index 1eaa793e0..bd0fbee9a 100644 --- a/.github/workflows/ecl2df.yml +++ b/.github/workflows/res2df.yml @@ -1,4 +1,4 @@ -name: ecl2df +name: res2df on: push: @@ -15,7 +15,7 @@ env: ERT_SHOW_BACKTRACE: 1 jobs: - ecl2df: + res2df: runs-on: ubuntu-latest strategy: matrix: @@ -40,11 +40,11 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Install ecl2df with dependencies + - name: Install res2df with dependencies run: | pip install --upgrade pip pip install . - python -c "import ecl2df" + python -c "import res2df" - name: Install ert if: matrix.install-ert @@ -55,17 +55,17 @@ jobs: - name: Check code style and typing run: | - black --check ecl2df tests setup.py docs/conf.py - flake8 ecl2df tests - isort --check-only --profile black ecl2df tests - mypy ecl2df + black --check res2df tests setup.py docs/conf.py + flake8 res2df tests + isort --check-only --profile black res2df tests + mypy res2df - name: List all installed packages run: pip freeze - name: Run tests run: | - python -c "import ecl2df" + python -c "import res2df" pytest tests/ - name: Syntax check documentation @@ -81,8 +81,8 @@ jobs: run: | cp -R ./build/sphinx/html ../html - git config --local user.email "ecl2df-github-action" - git config --local user.name "ecl2df-github-action" + git config --local user.email "res2df-github-action" + git config --local user.name "res2df-github-action" git fetch origin gh-pages git checkout --track origin/gh-pages git clean -f -f -d -x @@ -103,7 +103,7 @@ jobs: if: github.event_name == 'release' && matrix.python-version == '3.8' env: TWINE_USERNAME: __token__ - TWINE_PASSWORD: ${{ secrets.ecl2df_pypi_token }} + TWINE_PASSWORD: ${{ secrets.res2df_pypi_token }} run: | python -m pip install --upgrade setuptools wheel twine python setup.py sdist bdist_wheel diff --git a/.gitignore b/.gitignore index 7deb22b05..fe6149219 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,13 @@ .eggs .cache .coverage -ecl2df.egg-info +res2df.egg-info .tox *.swp *.pyc *~ docs/modules.rst -docs/ecl2df.rst -ecl2df/version.py +docs/res2df.rst +res2df/version.py \#* .\#* diff --git a/.pylintrc b/.pylintrc index ee7791fa6..aafe0859c 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1,4 +1,4 @@ -# PYLINT: General settings for ecl2df +# PYLINT: General settings for res2df [GENERAL] disable=R0205, F0010, C0330, E1136, E0401,C0114 diff --git a/README.md b/README.md index 5d265bfad..9a06b3332 100644 --- a/README.md +++ b/README.md @@ -1,30 +1,30 @@ -[![Build Status](https://img.shields.io/github/workflow/status/equinor/ecl2df/ecl2df)](https://github.com/equinor/ecl2df/actions?query=workflow%3Aecl2df) -[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/equinor/ecl2df.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/equinor/ecl2df/context:python) -[![Total alerts](https://img.shields.io/lgtm/alerts/g/equinor/ecl2df.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/equinor/ecl2df/alerts/) -[![codecov](https://codecov.io/gh/equinor/ecl2df/branch/master/graph/badge.svg)](https://codecov.io/gh/equinor/ecl2df) +[![Build Status](https://img.shields.io/github/workflow/status/equinor/res2df/res2df)](https://github.com/equinor/res2df/actions?query=workflow%3Ares2df) +[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/equinor/res2df.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/equinor/res2df/context:python) +[![Total alerts](https://img.shields.io/lgtm/alerts/g/equinor/res2df.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/equinor/res2df/alerts/) +[![codecov](https://codecov.io/gh/equinor/res2df/branch/master/graph/badge.svg)](https://codecov.io/gh/equinor/res2df) [![Python 3.8-3.10](https://img.shields.io/badge/python-3.8%20|%203.9%20|%203.10-blue.svg)](https://www.python.org) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://black.readthedocs.io/) [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) -# ecl2df +# res2df -ecl2df is a Pandas DataFrame wrapper around libecl and opm.io, which -are used to access binary files outputted by the reservoir simulator -Eclipse, or its input files --- or any other tool outputting to the same -data format. +res2df is a Pandas DataFrame wrapper around resdata and opm.io, which +are used to access binary files outputted by reservoir simulators, +or its input files --- or any other tool outputting to the same data format. -The reverse operation, from a Pandas DataFrame to Eclipse include files, -is provided for some of the modules. +The reverse operation, from a Pandas DataFrame to reservoir simulator include files +(commonly given the extension ".inc", ".grdecl" etc.) is provided for some of the +modules. The package consists of a module pr. datatype, e.g. one module for summary files (.UNSMRY), one for completion data etc. There is a command line frontend for almost all functionality, called -`ecl2csv`, which converts the Eclipse data to DataFrames, and then dumps -the dataframes to files in CSV format, and a similar `csv2ecl` for the +`res2csv`, which converts the reservoir data to DataFrames, and then dumps +the dataframes to files in CSV format, and a similar `csv2res` for the reverse operation. -For documentation, see +For documentation, see ## License diff --git a/docs/conf.py b/docs/conf.py index 4d3297f8a..e7ba8b232 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -19,14 +19,14 @@ # -- Project information ----------------------------------------------------- -project = "ecl2df" +project = "res2df" author = "Håvard Berland" copyright = f"Equinor 2019-{datetime.datetime.now().year}" # The short X.Y version -import ecl2df # noqa +import res2df # noqa -release = metadata.version("ecl2df") +release = metadata.version("res2df") version = release # -- General configuration --------------------------------------------------- @@ -50,7 +50,7 @@ "sphinxarg.ext", ] -autoapi_modules: dict = {"ecl2df": None} +autoapi_modules: dict = {"res2df": None} autodoc_default_options = {"members": None} @@ -118,7 +118,7 @@ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = "ecl2dfdoc" +htmlhelp_basename = "res2dfdoc" # -- Options for LaTeX output ------------------------------------------------ @@ -141,14 +141,14 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). -latex_documents = [(master_doc, "ecl2df.tex", "ecl2df Documentation", author, "manual")] +latex_documents = [(master_doc, "res2df.tex", "res2df Documentation", author, "manual")] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "ecl2df", "ecl2df Documentation", [author], 1)] +man_pages = [(master_doc, "res2df", "res2df Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------------- @@ -159,10 +159,10 @@ texinfo_documents = [ ( master_doc, - "ecl2df", - "ecl2df Documentation", + "res2df", + "res2df Documentation", author, - "ecl2df", + "res2df", "One line description of project.", "Miscellaneous", ) diff --git a/docs/contribution.rst b/docs/contribution.rst index 0ad8b9bbe..8d13419e2 100644 --- a/docs/contribution.rst +++ b/docs/contribution.rst @@ -1,41 +1,41 @@ ====================== -Contributing to ecl2df +Contributing to res2df ====================== -Contributing to ecl2df is easiest on Linux computers. Windows has not been +Contributing to res2df is easiest on Linux computers. Windows has not been tested, and for Mac you will have to compile OPM yourself. Getting started as a developer ------------------------------ -The first thing to do, is to create a fork of ecl2df to your personal -github account. Go to https://github.com/equinor/ecl2df and click the Fork +The first thing to do, is to create a fork of res2df to your personal +github account. Go to https://github.com/equinor/res2df and click the Fork button. Clone your fork to your local computer: .. code-block:: console - git clone git@github.com:/ecl2df - cd ecl2df + git clone git@github.com:/res2df + cd res2df Then add the upstream repository: .. code-block:: console - git remote add upstream git@github.com:equinor/ecl2df + git remote add upstream git@github.com:equinor/res2df This requires a valid login setup with SSH keys for you github account, needed for write access. After cloning, you should make a Python virtual environment in which you install -ecl2df and its dependencies. If you want to create a new virtual environment for -ecl2df, you can do something like the following: +res2df and its dependencies. If you want to create a new virtual environment for +res2df, you can do something like the following: .. code-block:: console - python3 -m venv venv-ecl2df - source venv-ecl2df/bin/activate + python3 -m venv venv-res2df + source venv-res2df/bin/activate and then run ``pip`` : @@ -43,7 +43,7 @@ and then run ``pip`` : pip install -e .[tests,docs] -to install ecl2df in "edit"-mode together will all dependencies for ecl2df, its +to install res2df in "edit"-mode together will all dependencies for res2df, its test suite and documentation. A good start is to verify that all tests pass after having cloned the @@ -73,13 +73,13 @@ Komodo in order to prepare for the command: NB: For every monthly Komodo release, you might have to remake your komodo-venv. -Using ecl2df without OPM +Using res2df without OPM ------------------------ -OPM is only pip-installable on Linux. To use the non-OPM dependent ecl2df -modules on something else than Linux (but with libecl installed), you should +OPM is only pip-installable on Linux. To use the non-OPM dependent res2df +modules on something else than Linux (but with resdata installed), you should install all the dependencies (except OPM) using ``pip`` (see ``setup.py`` for -list of dependencies), and then install ecl2df with the ``--no-deps`` option +list of dependencies), and then install res2df with the ``--no-deps`` option to ``pip``. After this, the non-OPM dependent modules should work, and others will fail with import errors. @@ -88,7 +88,7 @@ Development workflow If you have a feature or bugfix, a typical procedure is to: -* Consider writing an issue on https://github.com/equinor/ecl2df/issues describing +* Consider writing an issue on https://github.com/equinor/res2df/issues describing what is not working or what is not present. * Make a new git branch for your contribution, from an updated master branch. * Write a test for the feature or a test proving the bug. Verify that ``pytest`` @@ -100,7 +100,7 @@ If you have a feature or bugfix, a typical procedure is to: * Check your code quality with pylint. New code should aim for maximal pylint score. Pylint exceptions should only be used when warranted. * Commit your changes, remember to add any new files. -* Push your branch to your fork on github, and go to github.com/equinor/ecl2df +* Push your branch to your fork on github, and go to github.com/equinor/res2df and make a pull request from your branch. Link your pull request to any relevant issue. * Fix any errors that pop up from automated checks. diff --git a/docs/csv2ecl.rst b/docs/csv2ecl.rst deleted file mode 100644 index 9087a99a7..000000000 --- a/docs/csv2ecl.rst +++ /dev/null @@ -1,22 +0,0 @@ -csv2ecl -======= - -Some of the modules inside ecl2df is able to write Eclipse include files -from dataframes (in the format dumped by ecl2df). This makes it possible -to produce Eclipse input data in any application that can write CSV files, -and use this tool to convert it into Eclipse include files, or it can -facilitate operations/manipulations of an existing deck using any tool -that can work on CSV files, by first running ecl2csv on an input file, -transforming it, and writing back using csv2ecl. - -Mandatory argument for csv2ecl is -always the submodule responsible, a CSV file, and -an ``--output`` option to specify which include file to write to. -If you want output to your terminal, use ``-`` as the output filename. Unless -you also specify the ``--keywords`` argument with a list of wanted keywords, all -supported keywords for a submodule which is also found in the CSV file provided, -will be dumped to output file. - -.. argparse:: - :ref: ecl2df.csv2ecl.get_parser - :prog: csv2ecl diff --git a/docs/csv2res.rst b/docs/csv2res.rst new file mode 100644 index 000000000..7ad27649a --- /dev/null +++ b/docs/csv2res.rst @@ -0,0 +1,22 @@ +csv2res +======= + +Some of the modules inside res2df is able to write :term:`.DATA files<.DATA file>` +from dataframes (in the format dumped by res2df). This makes it possible +to produce :term:`.DATA files<.DATA file>` in any application that can write CSV files, +and use this tool to convert it into reservoir simulator files, or it can +facilitate operations/manipulations of an existing :term:`deck` using any tool +that can work on CSV files, by first running res2csv on an :term:`include file`, +transforming it, and writing back using csv2res. + +Mandatory argument for csv2res is +always the submodule responsible, a CSV file, and +an ``--output`` option to specify which include file to write to. +If you want output to your terminal, use ``-`` as the output filename. Unless +you also specify the ``--keywords`` argument with a list of wanted keywords, all +supported keywords for a submodule which is also found in the CSV file provided, +will be dumped to an :term:`output file`. + +.. argparse:: + :ref: res2df.csv2res.get_parser + :prog: csv2res diff --git a/docs/ecl2csv.rst b/docs/ecl2csv.rst deleted file mode 100644 index 89f125e46..000000000 --- a/docs/ecl2csv.rst +++ /dev/null @@ -1,13 +0,0 @@ -ecl2csv -======= - -Most of the functionality in ecl2df is exposed to the command line through -the script *ecl2csv*. The first argument to this script is always -the submodule (subcommand) from which you want functionality. Mandatory argument is -always an Eclipse deck or sometimes individual Eclipse include files, and -there is usually an ``--output`` option to specify which file to dump -the CSV to. If you want output to your terminal, use ``-`` as the output filename. - -.. argparse:: - :ref: ecl2df.ecl2csv.get_parser - :prog: ecl2csv diff --git a/docs/glossary.rst b/docs/glossary.rst new file mode 100644 index 000000000..e6ed5b095 --- /dev/null +++ b/docs/glossary.rst @@ -0,0 +1,29 @@ +Glossary +======== + +.. glossary:: + + reservoir simulator + Simulation of reservoir fields come in many forms, but for the purposes of + res2df we only consider simulators that take a :term:`deck` as input and produces + :term:`output files ` such `.UNSRMY`. This includes, OPM flow and Eclipse. + + .DATA file + Input provided to reservoir simulators such as Eclipse or OPM Flow. + Often a :term:`.DATA file` includes other :term:`include files ` with the INCLUDE keyword. + + include file + Files that provide inputs to reservoir simulators by using the INCLUDE statement + in :term:`.DATA files <.DATA file>`. By convention, these files often have the extension .INC/.inc + (generally) or .GRDECL/.grdecl (for files included into the grid section). + + deck + Refers to inputs passed to reservoir simulators. It may be a :term:`.DATA file` and the + include files it points to, or it may be a single or several include files. + If a deck contains all the information (i.e., keywords) the simulator needs + to run the requested simulation, it is defined as complete. Otherwise it is incomplete. + + output file + When a reservoir simulator runs, several files will be generated. + These will have extensions such as .EGRID, .FEGRID, .UNSMRY, .GRID, .INIT, etc. + See the opm flow manual Appendix D (https://opm-project.org/wp-content/uploads/2023/06/OPM_Flow_Reference_Manual_2023-04_Rev-0_Reduced.pdf) diff --git a/docs/index.rst b/docs/index.rst index b61ddf06d..187fcaa1a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,9 +1,9 @@ -ecl2df +res2df ====== -ecl2df is a Pandas DataFrame wrapper around libecl and opm.io, which -are used to access binary files outputted by the reservoir simulator -Eclipse, or its input files --- or any other tool outputting to the same +res2df is a Pandas DataFrame wrapper around resdata and opm.io, which +are used to access :term:`binary files outputted by the reservoir simulators ` or +their :term:`input files ` --- or any other tool outputting to the same data format. .. toctree:: @@ -11,18 +11,19 @@ data format. introduction usage - ecl2csv - csv2ecl + res2csv + csv2res installation contribution history + glossary .. toctree:: :hidden: :maxdepth: 10 :caption: Python API - ecl2df/ecl2df + res2df/res2df Indices and tables ================== diff --git a/docs/installation.rst b/docs/installation.rst index b7b6510d0..89a7a09a3 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -1,18 +1,18 @@ Installation ============ -Internally in Equinor, ecl2df is distributed through Komodo and +Internally in Equinor, res2df is distributed through Komodo and nothing is needed besides activating Komodo. See https://fmu-docs.equinor.com/docs/komodo/equinor_komodo_usage.html for Komodo instructions. -On Linux computers outside Equinor, ecl2df should be installed from +On Linux computers outside Equinor, res2df should be installed from https://pypi.org: .. code-block:: console - pip install ecl2df + pip install res2df For MacOS, the OPM dependency is not available from pypi, and OPM must be compiled manually. diff --git a/docs/introduction.rst b/docs/introduction.rst index e5a8aa131..1bcdcf84f 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -1,15 +1,15 @@ Introduction ============ -*ecl2df* is a `Pandas DataFrame `_ wrapper -around `libecl `_ and `opm.io +*res2df* is a `Pandas DataFrame `_ wrapper +around `resdata `_ and `opm.io `_, which are used to access -binary files outputted by the reservoir simulator Eclipse, or its -input files --- or any other tool outputting to the same data format, +:term:`binary files outputted by reservoir simulators ` such as Eclipse, or its +:term:`input files ` --- or any other tool outputting to the same data format, f.ex. `flow `_. Most of the features can be reached from the command line, through the -command line program ``ecl2csv``. Use the command line tool to dump the +command line program ``res2csv``. Use the command line tool to dump the extracted or computed data to a CSV file, and use any other tool to view the CSV data. @@ -18,24 +18,24 @@ Examples .. code-block:: console - > ecl2csv --help - > ecl2csv summary --help - > ecl2csv summary --column_keys "F*" --time_index monthly --output output.csv MYECLDECK.DATA - > ecl2csv pillars --help - > ecl2csv pillars --rstdates all MYECLDECK.DATA + > res2csv --help + > res2csv summary --help + > res2csv summary --column_keys "F*" --time_index monthly --output output.csv MYDECK.DATA + > res2csv pillars --help + > res2csv pillars --rstdates all MYDECK.DATA If you access the module from within a Python script, for each submodule there is a function called ``df()`` which provides more or less the same -functionality as through ``ecl2csv`` from the command line, but which returns +functionality as through ``res2csv`` from the command line, but which returns a Pandas Dataframe. .. code-block:: python - import ecl2df + import res2df - eclfiles = ecl2df.EclFiles("MYECLDECK.DATA") - smry = ecl2df.summary.df(eclfiles, column_keys="F*", time_index="monthly") - hc_contacts = ecl2df.pillars.df(eclfiles, rstdates="all") + resdatafiles = res2df.ResdataFiles("MYDECK.DATA") + smry = res2df.summary.df(resdatafiles, column_keys="F*", time_index="monthly") + hc_contacts = res2df.pillars.df(resdatafiles, rstdates="all") See the API for more documentation and possibilities for each module. @@ -45,7 +45,7 @@ Short description of each submodule ``summary`` ^^^^^^^^^^^^^^ -Extracts summary data from `.UNSMRY` files, at requested time sampling and +Extracts summary data from :term:`.UNSMRY ` files, at requested time sampling and for requested vectors. More documentation on :doc:`usage/summary`. @@ -53,10 +53,11 @@ More documentation on :doc:`usage/summary`. ``grid`` ^^^^^^^^ -Extracts grid data from `.INIT` and `.EGRID` and `.UNRST` files. Restart file +Extracts grid data from :term:`.INIT `, :term:`.EGRID `, +and :term:`.UNRST ` files. Restart file are optional to extract, and dates must be picked (or all). Data is merged into one DataFrame by the `i`, `j` and `k` indices. Bulk cell -volume is included. Cells are indexed as in Eclipse, starting with 1. +volume is included. Cells are indexed starting with 1. More documentation on :doc:`usage/grid`. @@ -94,7 +95,8 @@ More documentation on :doc:`usage/trans`. ``rft`` ^^^^^^^ -Reads the `.RFT` files which are outputted by the simulator when +Reads the `.RFT` files which are outputted by the +:term:`simulator ` when the `WRFTPLT` keyword is used, with details along wellbores. For multisegment wells, the well topology is calculated and data @@ -106,8 +108,8 @@ More documentation on :doc:`usage/rft`. ``fipreports`` ^^^^^^^^^^^^^^ -Parses the PRT file from Eclipse looking for region reports (starting -with " ... FIPNUM REPORT REGION". It will extract all the data +Parses the PRT file looking for region reports (starting +with " ... FIPNUM REPORT REGION"). It will extract all the data in the ASCII table in the PRT file and organize into a dataframe, currently-in-place, outflow to wells, outflows to regions, etc. It also supports custom FIPxxxxx names. @@ -118,8 +120,8 @@ More documentation on :doc:`usage/fipreports`. ``satfunc`` ^^^^^^^^^^^ -Extracts saturation functions (SWOF, SGOF, etc) from the deck and merges -into one DataFrame. Can write back to Eclipse include files. +Extracts saturation functions (SWOF, SGOF, etc) from the :term:`deck` and merges +into one DataFrame. Can write back to :term:`include files `. More documentation on :doc:`usage/satfunc`. @@ -127,19 +129,19 @@ More documentation on :doc:`usage/satfunc`. ^^^^^^^^^ Extracts the information in the `EQUIL` table, `RSVD` and `RVVD` in the -input deck. Can write back to Eclipse include files. +:term:`.DATA file`. Can write back to :term:`include files `. More documentation on :doc:`usage/equil`. ``compdat`` ^^^^^^^^^^^ -Extracts well connection data from the `COMPDAT` keyword in the input deck. +Extracts well connection data from the `COMPDAT` keyword in the :term:`deck`. For multi-segment wells, `WELSEGS` and `COMPSEGS` is also parsed. The data is available as three different dataframes, which can be merged. -It is also possible to parse individual "include" files, not only a -finished working deck. +It is also possible to parse individual :term:`"include files" `. +These files do not necessarily have to be part of a complete :term:`deck` More documentation on :doc:`usage/compdat`. @@ -147,7 +149,7 @@ More documentation on :doc:`usage/compdat`. ^^^^^^^^^^^^ Extracts the information from the `GRUPTREE` and `WELSPECS` keyword, at -all timesteps, from the input deck. The tree structure at each relevant +all timesteps, from the :term:`.DATA file`. The tree structure at each relevant date can be returned as a dataframe of the edges, as a nested dictionary or as a `treelib` tree. @@ -156,8 +158,8 @@ More documentation on :doc:`usage/gruptree`. ``pvt`` ^^^^^^^ -Extracts PVT data from an Eclipse deck, from the keywords `PVTO`, `PVDG`, -`DENSITY`, `ROCK` etc. Can write data back to Eclipse include files. +Extracts PVT data from a :term:`.DATA file`, from the keywords `PVTO`, `PVDG`, +`DENSITY`, `ROCK` etc. Can write data back to :term:`include files `. More documentation on :doc:`usage/pvt`. @@ -169,13 +171,13 @@ associated data in a dataframe format. More documentation on :doc:`usage/wcon`. -``eclfiles`` -^^^^^^^^^^^^ +``resdatafiles`` +^^^^^^^^^^^^^^^^ This is an internal helper module in order to represent finished or -unfinished Eclipse decks and runs. The class EclFiles can cache binary -files that are recently read, and is able to locate the various output -files based on the basename or the `.DATA` filename. +unfinished :term:`.DATA files <.DATA file>` and runs. The class ResdataFiles can cache binary +files that are recently read, and is able to locate the various +:term:`output files ` based on the basename or the `.DATA` filename. Metadata support ---------------- @@ -183,8 +185,8 @@ Metadata support parameters.txt ^^^^^^^^^^^^^^ -Metadata for each Eclipse deck are sometimes added in a text file named -``parameters.txt``, alongside the Eclipse DATA file or one or two directory levels +Metadata for each :term:`.DATA file` are sometimes added in a text file named +``parameters.txt``, alongside the Eclipse .DATA file or one or two directory levels above it. Each line in the text file should contain a string, interpreted as the key, and @@ -202,9 +204,9 @@ have to be merged with pandas.merge(). Zone names ^^^^^^^^^^ -If a text file with zone names are found alongside the Eclipse DATA file, some of the modules -will add that information to rows where appropriate. The zone or layer file should contains -lines like:: +If a text file with zone names are found alongside :term:`.DATA files <.DATA file>`, +some of the modules will add that information to rows where appropriate. +The zone or layer file should contains lines like:: 'ZoneA' 1-4 'ZoneB' 5-10 diff --git a/docs/res2csv.rst b/docs/res2csv.rst new file mode 100644 index 000000000..a5e26e16c --- /dev/null +++ b/docs/res2csv.rst @@ -0,0 +1,14 @@ +res2csv +======= + +Most of the functionality in res2df is exposed to the command line through +the script *res2csv*. The first argument to this script is always +the submodule (subcommand) from which you want functionality. Mandatory argument is +always a :term:`.DATA file` or sometimes individual +:term:`include files `, and there is usually an ``--output`` +option to specify which file to dump the CSV to. +If you want output to your terminal, use ``-`` as the output filename. + +.. argparse:: + :ref: res2df.res2csv.get_parser + :prog: res2csv diff --git a/docs/usage/compdat.rst b/docs/usage/compdat.rst index 1c2ff78b0..d9ff0f7a7 100644 --- a/docs/usage/compdat.rst +++ b/docs/usage/compdat.rst @@ -1,19 +1,19 @@ compdat ^^^^^^^ -This module extracts COMPDAT, WELSEGS and COMPSEGS from an Eclipse deck. +This module extracts COMPDAT, WELSEGS and COMPSEGS from a :term:`.DATA file`. Additionally, it will parse WELOPEN statements and emit new COMPDAT statements from the actions in WELOPEN. .. - compdat.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/compdat.csv', index=False) + compdat.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/compdat.csv', index=False) .. code-block:: python - from ecl2df import compdat, EclFiles + from res2df import compdat, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = compdat.df(eclfiles) + resdatafiles = ResdataFiles("MYDATADECK.DATA") + dframe = compdat.df(resdatafiles) .. csv-table:: Example COMPDAT table :file: compdat.csv @@ -26,7 +26,7 @@ for each of COMPDAT, and the segmentation keywords. .. warning:: When WELOPEN is in use, the dataframe can differ from Eclipse behaviour in certain circumstances. The dataframe representation from ``compdat`` does not separate from a "shut" well and the open-ness of its - connections. So in an Eclipse deck it is possible to shut a well, and then + connections. So in a :term:`.DATA file` it is possible to shut a well, and then reopen it, and get back the original open/shut state of individual connections prior to well shut. The dataframe format will display `all` connections as open if a well is opened with defaulted indices. @@ -39,5 +39,5 @@ be added to the returned data through the option ``--initvectors``: .. code-block:: console - ecl2csv compdat --verbose MYDATADECK.DATA --initvectors FIPNUM PERMX - # (put the DATA file first, if not it will be interpreted as a vector) + res2csv compdat --verbose MYDATADECK.DATA --initvectors FIPNUM PERMX + # (put the .DATA file first, if not it will be interpreted as a vector) diff --git a/docs/usage/equil.rst b/docs/usage/equil.rst index d1433a780..fea85f6a0 100644 --- a/docs/usage/equil.rst +++ b/docs/usage/equil.rst @@ -1,25 +1,25 @@ equil ----- -This is the ecl2df module for processing the ``SOLUTION`` section of -the Eclipse input deck. +This is the res2df module for processing the ``SOLUTION`` section of +the :term:`.DATA file`. Supported keywords are ``EQUIL``, ``RSVD``, ``RVVD``, ``PBVD`` and ``PDVD``. Typical usage is .. code-block:: python - from ecl2df import equil, EclFiles + from res2df import equil, ResdataFiles - dframe = equil.df(EclFiles('MYECLDECK.DATA')) + dframe = equil.df(ResdataFiles('MYDECK.DATA')) Which will provide a dataframe similar to the example below. Note that the column `Z` is used both for datum depth and the depth values in ``RSVD`` tables. The amount of columns obtained depends on the input dataset, and should be possible -to link up with the Eclipse documentation. API doc: :func:`ecl2df.equil.df` +to link up with the Eclipse documentation. API doc: :func:`res2df.equil.df` .. - dframe = equil.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) + dframe = equil.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) dframe[['EQLNUM', 'KEYWORD', 'Z', 'PRESSURE', 'OWC', 'GOC', 'RS']]\ .to_csv(index=False)) @@ -51,15 +51,16 @@ one meter for compatibility, which you could do by the statements: dframe.loc[rsvd_rows, "Z"] = dframe.loc[rsvd_rows, "Z"] + 1 -Re-exporting tables to Eclipse include files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Re-exporting tables to include-files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When you are done with the table, you can generate new include files for -Eclipse from your modified data by issuing +When you are done with the table, you can generate new +:term:`include files ` from your modified +data by issuing .. code-block:: python - equil.df2ecl(dframe, filename="solution.inc") + equil.df2res(dframe, filename="solution.inc") -The last step can also be done using the ``csv2ecl`` command line utility +The last step can also be done using the ``csv2res`` command line utility if you dump to CSV from your Python code instead. diff --git a/docs/usage/fipnum.inc b/docs/usage/fipnum.inc index 1e30880bb..2566db1d9 100644 --- a/docs/usage/fipnum.inc +++ b/docs/usage/fipnum.inc @@ -1,5 +1,5 @@ --- Output file printed by ecl2df.grid 0.6.0 --- at 2020-04-23 10:46:22.529558 +-- Output file printed by res2df.grid 0.17.2 +-- at 2023-11-16 9:31:23.318941 FIPNUM 21*2 19*1 20*2 20*1 20*2 20*1 19*2 21*1 19*2 21*1 18*2 diff --git a/docs/usage/fipreports.rst b/docs/usage/fipreports.rst index 905b69894..afe2fee2c 100644 --- a/docs/usage/fipreports.rst +++ b/docs/usage/fipreports.rst @@ -1,7 +1,7 @@ fipreports ---------- -fipreports is a parser for the Eclipse PRT output file, extracting data +fipreports is a parser for the PRT output file, extracting data from these tables: .. literalinclude:: fipreports-example.txt @@ -9,7 +9,7 @@ from these tables: This table found in a PRT file will be parsed to the following dataframe: .. - Generated with ecl2csv fipreports -v --fipname FIPZON fipreports-example.PRT -o fipreports-example.csv + Generated with res2csv fipreports -v --fipname FIPZON fipreports-example.PRT -o fipreports-example.csv Date added manually .. csv-table:: FIPZON table from PRT file @@ -17,9 +17,9 @@ This table found in a PRT file will be parsed to the following dataframe: :header-rows: 1 In this particular example, ``FIPZON`` was selected explicitly, either using the command line client or the Python API -through an option to the :func:`ecl2df.fipreports.df` function. +through an option to the :func:`res2df.fipreports.df` function. -Using this module is easiest through ``ecl2csv fipreports``. +Using this module is easiest through ``res2csv fipreports``. diff --git a/docs/usage/grid.rst b/docs/usage/grid.rst index 35a4e51f6..929a1bb9d 100644 --- a/docs/usage/grid.rst +++ b/docs/usage/grid.rst @@ -2,23 +2,24 @@ grid ---- The grid module will extract static and dynamic cell properties from -an Eclipse grid (from the binary output files from Eclipse). Each row -in a returned dataframe represents one cell. +a grid +(from the :term:`output files of reservoir simulators `). +Each row in a returned dataframe represents one cell. Typical usage .. code-block:: python - from ecl2df import grid, EclFiles + from res2df import grid, ResdataFiles - eclfiles = EclFiles('MYDATADECK.DATA') - dframe = grid.df(eclfiles, rstdates='last') + resdatafiles = ResdataFiles('MYDATADECK.DATA') + dframe = grid.df(resdatafiles, rstdates='last') -where the API is documented at :func:`ecl2df.grid.df`. +where the API is documented at :func:`res2df.grid.df`. .. - eclfiles = EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA') - grid.df(eclfiles).sample(10).to_csv('docs/usage/grid.csv', float_format="%.2f", index=False) + resdatafiles = ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA') + grid.df(resdatafiles).sample(10).to_csv('docs/usage/grid.csv', float_format="%.2f", index=False) .. csv-table:: Example grid table :file: grid.csv @@ -29,8 +30,8 @@ Alternatively, the same data can be produced as a CSV file using the command lin .. code-block:: console - ecl2csv grid --help # Will display some help text - ecl2csv grid MYDATADECK.DATA --rstdates last --verbose --output grid.csv + res2csv grid --help # Will display some help text + res2csv grid MYDATADECK.DATA --rstdates last --verbose --output grid.csv Select which vectors to include (INIT and/or restart vectors) with the @@ -38,7 +39,7 @@ Select which vectors to include (INIT and/or restart vectors) with the .. code-block:: console - ecl2csv grid --verbose MYDATADECK.DATA --vectors PRESSURE PERMX + res2csv grid --verbose MYDATADECK.DATA --vectors PRESSURE PERMX Example computations on a grid dataframe ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -110,20 +111,20 @@ the whereabouts of the file: .. code-block:: python - from ecl2df import grid, EclFiles, common + from res2df import grid, ResdataFiles, common - eclfiles = EclFiles("'MYDATADECK.DATA") - dframe = grid.df(eclfiles) - # The filename with layers is relative to DATA-file location + resdatafiles = ResdataFiles("'MYDATADECK.DATA") + dframe = grid.df(resdatafiles) + # The filename with layers is relative to .DATA file location # or an absolute path. - subzonemap = ecl2df.common.parse_zonemapfile("subzones.lyr") + subzonemap = res2df.common.parse_zonemapfile("subzones.lyr") dframe_with_subzones = common.merge_zones( dframe, subzonemap, zoneheader="SUBZONE", kname="K" ) For more control over merging of zones, check the documentation for -the function :func:`ecl2df.common.merge_zones` and -:meth:`ecl2df.common.parse_zonemapfile` +the function :func:`res2df.common.merge_zones` and +:meth:`res2df.common.parse_zonemapfile` Dynamic data ^^^^^^^^^^^^ @@ -142,34 +143,34 @@ Calculating volumes of dynamic data (pr. some region parameter) can be obtained from that module as a by-product of the pillar computations. -Generating Eclipse include files from grid data -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Generating include files from grid data +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you have loaded grid data into a Pandas frame, some operations are easily performed, scaling porosity, permeability etc. Or remapping some region parameters. Using the -:func:`ecl2df.grid.df2ecl()` function these manipulated vectors can be written back as -include files to Eclipse. +:func:`res2df.grid.df2res()` function these manipulated vectors can be written back as +:term:`include files `. Say you want to change the FIPNUM, and that FIPNUM 6 should be removed, and set it to FIPNUM 5. This can be accomplished using .. code-block:: python - from ecl2df import grid, EclFiles, common + from res2df import grid, ResdataFiles, common - eclfiles = EclFiles("'MYDATADECK.DATA") - dframe = grid.df(eclfiles) + resdatafiles = ResdataFiles("'MYDATADECK.DATA") + dframe = grid.df(resdatafiles) # Change FIPNUM 6 to FIPNUM 5: rows_to_touch = dframe["FIPNUM"] == 6 dframe.loc[rows_to_touch, "FIPNUM"] = 5 # Write back to new include file, ensure datatype is integer. - grid.df2ecl(dframe, "FIPNUM", dtype=int, filename="fipnum.inc", eclfiles=eclfiles) + grid.df2res(dframe, "FIPNUM", dtype=int, filename="fipnum.inc", resdatafiles=resdatafiles) This will produce the file `fipnum.inc` with the contents: .. literalinclude:: fipnum.inc -It is recommended to supply the ``eclfiles`` object to ``df2ecl``, if not, correct grid +It is recommended to supply the ``resdatafiles`` object to ``df2res``, if not, correct grid size can not be ensured. diff --git a/docs/usage/gruptree.rst b/docs/usage/gruptree.rst index a33ba774a..5be75f44f 100644 --- a/docs/usage/gruptree.rst +++ b/docs/usage/gruptree.rst @@ -1,13 +1,13 @@ gruptree -------- -Extracts data from the GRUPTREE, GRUPNET and WELSPECS keywords from an Eclipse -deck and presents the production network either as pretty-printed ASCII or in a +Extracts data from the GRUPTREE, GRUPNET and WELSPECS keywords from a :term:`.DATA file` +and presents the production network either as pretty-printed ASCII or in a dataframe-representation. -The GRUPTREE section of your Eclipse deck defines the production network +The GRUPTREE section of your :term:`.DATA file` defines the production network from wells and up to the platform (and possibly also to a field having -many platforms). In the Eclipse deck it be as simple as this:: +many platforms). In the :term:`.DATA file` it be as simple as this:: START 01 'JAN' 2000 / @@ -35,7 +35,7 @@ available (here also wells from WELSPECS is included): .. code-block:: console - > ecl2csv gruptree --prettyprint MYDATADECK.DATA + > res2csv gruptree --prettyprint MYDATADECK.DATA Date: 2000-01-01 └── NORTHSEA └── AREA @@ -49,7 +49,7 @@ available (here also wells from WELSPECS is included): └── INJEAST └── INJ1 -In your deck, the table will be repeated for every new occurence of the +In your :term:`deck`, the table will be repeated for every new occurence of the GRUPTREE keyword in the Schedule section. GRUPNET and WELSPECS @@ -57,10 +57,10 @@ GRUPNET and WELSPECS By default, the module will also pick up information from GRUPNET (typical terminal pressure values for the network nodes) and WELSPECS (well -specifications), so for a full deck, your dataframe will contain more +specifications), so for a full :term:`deck`, your dataframe will contain more information than in the example above. -If our deck also contains:: +If our :term:`deck` also contains:: GRUPNET 'FIELD' 90 / diff --git a/docs/usage/nnc.rst b/docs/usage/nnc.rst index ba8205612..641e7d263 100644 --- a/docs/usage/nnc.rst +++ b/docs/usage/nnc.rst @@ -1,7 +1,7 @@ nnc --- -nnc will extract Non-Neighbour-Connections from your Eclipse grid as pairs +nnc will extract Non-Neighbour-Connections from your grid as pairs of *ijk* indices together with their associated transmissibilities. See also the :doc:`trans` module, which can extract all transmissibilities, not only @@ -11,14 +11,14 @@ Note: Eclipse300 will not export TRANNNC data in parallel mode. Run in serial to get this output. .. - nnc.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/nnc.csv', index=False) + nnc.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/nnc.csv', index=False) .. code-block:: python - from ecl2df import nnc, EclFiles + from res2df import nnc, ResdataFiles - eclfiles = EclFiles('MYDATADECK.DATA') - dframe = nnc.df(eclfiles) + resdatafiles = ResdataFiles('MYDATADECK.DATA') + dframe = nnc.df(resdatafiles) .. csv-table:: Example nnc table :file: nnc.csv @@ -29,14 +29,14 @@ Alternatively, the same data can be produced as a CSV file using the command lin .. code-block:: console - ecl2csv nnc MYDATADECK.DATA --verbose --output nnc.csv + res2csv nnc MYDATADECK.DATA --verbose --output nnc.csv It is possible to add *xyz* coordinates for each connection (as the average of the xyz for each of the cells involved in a connection pair) as extra columns. If you only want vertical connections, add the option ``--pillars`` or ``-vertical``, -or set ``pillars=True`` if using the Python API (:func:`ecl2df.nnc.df`) +or set ``pillars=True`` if using the Python API (:func:`res2df.nnc.df`) ``EDITNNC`` export ^^^^^^^^^^^^^^^^^^ @@ -45,22 +45,22 @@ Data for the ``EDITNNC`` keyword can be dumped, in order to scale the NNC connec using Pandas operations. Select the connections you want to scale by slicing the nnc dataframe (either from the nnc module, or from the trans module), and fill transmissibility multipliers in a new column ``TRANM``, then this can be exported -to an Eclipse include file: +to an :term:`include file: ` .. code-block:: python - from ecl2f import nnc, EclFiles + from res2f import nnc, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - nnc_df = nnc.df(eclfiles) + resdatafiles = ResdataFiles("MYDATADECK.DATA") + nnc_df = nnc.df(resdatafiles) nnc_df["TRANM"] = 0.1 # Reduce all NNC transmissibilities - nnc.df2ecl_editnnc(nnc_df, filename="editnnc.inc") + nnc.df2res_editnnc(nnc_df, filename="editnnc.inc") and the contents of the exported file can be: .. - print(nnc.df2ecl_editnnc(nnc.df(eclfiles).head(4).assign(TRANM=0.1))) + print(nnc.df2res_editnnc(nnc.df(resdatafiles).head(4).assign(TRANM=0.1))) .. code-block:: console diff --git a/docs/usage/pillars.rst b/docs/usage/pillars.rst index 5e8ba7936..7ec04f119 100644 --- a/docs/usage/pillars.rst +++ b/docs/usage/pillars.rst @@ -3,8 +3,8 @@ pillars ------- -pillars is a module to compute data on "pillars" in the grid from an -Eclipse simulation, including both static and dynamic data from the grid. +pillars is a module to compute data on "pillars" in the grid from a +simulation, including both static and dynamic data from the grid. Static data ^^^^^^^^^^^ @@ -13,9 +13,9 @@ Typical usage is to obtain property statistics, and compute contacts pr. pillar (and optionally pr some region parameter). .. - from ecl2df import pillars, EclFiles - pillars.df(ecl2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) - pillars.df(ecl2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head().to_csv("pillars-example1.csv"float_format="%.1f", index=False)) + from res2df import pillars, ResdataFiles + pillars.df(res2df.ResdataFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) + pillars.df(res2df.ResdataFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head().to_csv("pillars-example1.csv"float_format="%.1f", index=False)) .. csv-table:: Example pillar table :file: pillars-example1.csv @@ -33,7 +33,7 @@ repeated for each region value where it exists. Dynamic data, volumes and fluid contacts ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The API :func:`ecl2df.pillars.df` and command line client allows specifying +The API :func:`res2df.pillars.df` and command line client allows specifying dates if dynamic data should be included through the ``rstdates`` option to the API or the ``--rstdates`` option on the command line. Providing dates as an option will trigger computation of phase volumes ``WATVOL``, ``OILVOL``, and @@ -55,15 +55,15 @@ Gas-water contact is only computed when ``SOIL`` is not present in the simulation (two-phase runs), it will be the deepest cell centre with gas saturation above sgascutoff, among those pillars with at least one cell above ``swatcutoff``. See the API documentation, -:func:`ecl2df.pillars.compute_pillar_contacts`. +:func:`res2df.pillars.compute_pillar_contacts`. -The functionality is also available through the command line tool ``ecl2csv pillars`` +The functionality is also available through the command line tool ``res2csv pillars`` as in the example: .. code-block:: console - ecl2csv pillars --help # This will display some help text - ecl2csv pillars MYDATAFILE.DATA --rstdates all --stackdates + res2csv pillars --help # This will display some help text + res2csv pillars MYDATAFILE.DATA --rstdates all --stackdates It is *strongly* recommended to play with the cutoffs to get the desired result. Also calibrate the computed contacts with the initial contacts, you may see that @@ -80,7 +80,7 @@ using ``--group`` to the command line client, and add optionally a ``--region`` parameter to group over a particular region, typically ``EQLNUM``. The Python API will group over any data that is supplied via the ``region`` -option, check :func:`ecl2df.pillars.df` +option, check :func:`res2df.pillars.df` Stacked version @@ -90,14 +90,14 @@ By default, dynamic data are added as a set of columns for every date, like in this example: .. - pillars.df(ecl2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), rstdates='all').dropna().head().to_csv('pillars-dyn1-unstacked.csv', float_format="%.1f", index=False) + pillars.df(res2df.ResdataFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), rstdates='all').dropna().head().to_csv('pillars-dyn1-unstacked.csv', float_format="%.1f", index=False) .. csv-table:: Example pillar table with dynamical data, unstacked :file: pillars-dyn1-unstacked.csv :header-rows: 1 This may be what you want, however it is also possible to have ``DATE`` as a column, -obtained by triggering the stacking option in :func:`ecl2df.pillars.df` or +obtained by triggering the stacking option in :func:`res2df.pillars.df` or ``--stackdates`` on the command line and get data like this: diff --git a/docs/usage/pvt.rst b/docs/usage/pvt.rst index 265ab570c..8774a081b 100644 --- a/docs/usage/pvt.rst +++ b/docs/usage/pvt.rst @@ -1,7 +1,7 @@ pvt --- -Extracts PVT related keyword data from the PROPS section in an Eclipse deck, +Extracts PVT related keyword data from the PROPS section in a :term:`.DATA file`, typically the keywords ``PVTO``, ``PVDG``, ``DENSITY`` and ``ROCK``. Data from all keywords will be merged into one common dataframe. @@ -9,12 +9,12 @@ Example usage: .. code-block:: python - from ecl2df import pvt, EclFiles + from res2df import pvt, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = pvt.df(eclfiles) + resdatafiles = ResdataFiles("MYDATADECK.DATA") + dframe = pvt.df(resdatafiles) -Alternatively, we may also read directly from an include file +Alternatively, we may also read directly from an :term:`include file` if we read the contents of the file and supply it as a string: .. code-block:: python @@ -22,7 +22,7 @@ if we read the contents of the file and supply it as a string: dframe = pvt.df(open("pvt.inc").read()) .. - pvt.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).tail(15).to_csv('docs/usage/pvt.csv', index=False) + pvt.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).tail(15).to_csv('docs/usage/pvt.csv', index=False) .. csv-table:: Example PVT table (last 15 rows to show non-Nan data) @@ -30,7 +30,7 @@ if we read the contents of the file and supply it as a string: :header-rows: 1 If your PVT data resides in multiple include files, but you can't import -the entire deck, you have to merge the dataframes in Python like this: +the entire :term:`deck`, you have to merge the dataframes in Python like this: .. code-block:: python @@ -67,21 +67,21 @@ Possibly, different viscosity scaling pr. PVTNUM is needed Density values are easier to scale up or down to whatever is needed. -Re-exporting tables to Eclipse include files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Re-exporting tables to include files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When you are done with the table, you can generate new include files for -Eclipse from your modified data by issuing +When you are done with the table, you can generate new +:term:`include files ` from your modified data by issuing .. code-block:: python - pvt.df2ecl(dframe, filename="pvt.inc") + pvt.df2res(dframe, filename="pvt.inc") -When injecting this produced ``pvt.inc`` into any new Eclipse deck, ensure you +When injecting this produced ``pvt.inc`` into any new :term:`.DATA file`, ensure you check which keywords have been written out, compared to what you gave in to -`ecl2df.pvt` above. Any non-supported keywords will get lost in the import phase -and need to be catered for outside ecl2df. +`res2df.pvt` above. Any non-supported keywords will get lost in the import phase +and need to be catered for outside res2df. -The last step can also be done using the ``csv2ecl`` command line utility +The last step can also be done using the ``csv2res`` command line utility if you dump to CSV from your Python code instead. diff --git a/docs/usage/rft.rst b/docs/usage/rft.rst index 01b87a618..81a2642e3 100644 --- a/docs/usage/rft.rst +++ b/docs/usage/rft.rst @@ -1,7 +1,7 @@ rft --- -rft will convert the binary RFT files from Eclipse to dataframes or CSV files, +rft will convert the binary RFT files to dataframes or CSV files, facilitating analysis of inflow and pressure for each connection the well has to the reservoir grid. @@ -9,7 +9,7 @@ Typical usage is to generate the CSV from the command line: .. code-block:: console - ecl2csv rft MYDATADECK.DATA --verbose --output rft.csv + res2csv rft MYDATADECK.DATA --verbose --output rft.csv It is possible to a specific well, a date (YYYY-MM-DD). If you enable debug mode through ``--debug``, more information is printed, including an ASCII representation @@ -20,7 +20,7 @@ Eclipse usage ^^^^^^^^^^^^^ In order to get RFT files emitted from Eclipse, you need the ``WRFTPLT`` keyword -in your DATA-file, example: +in your :term`.DATA file`, example: .. code-block:: console diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index cdb25423a..17143dcc7 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -1,9 +1,9 @@ satfunc ------- -satfunc will extract saturation functions from Eclipse decks or from Eclipse -include files, these are the keywords ``SWOF``, ``SGOF``, ``SGWFN``, ``SWFN``, -``SOF2``, ``SGFN``, ``SOF3`` and ``SLGOF``. +satfunc will extract saturation functions from :term:`.DATA files <.DATA file>` or from +:term:`include files `, these are the keywords ``SWOF``, ``SGOF``, +``SGWFN``, ``SWFN``, ``SOF2``, ``SGFN``, ``SOF3`` and ``SLGOF``. The data obtained from one invocation of the satfunc module will be put in one dataframe, where data from different keywords are separated by the ``KEYWORD`` @@ -11,14 +11,14 @@ column. .. import numpy as np - satfunc.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).iloc[np.r_[0:5, 37:42, -5:0]].to_csv('docs/usage/satfunc.csv', index=False) + satfunc.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).iloc[np.r_[0:5, 37:42, -5:0]].to_csv('docs/usage/satfunc.csv', index=False) .. code-block:: python - from ecl2df import satfunc, EclFiles + from res2df import satfunc, ResdataFiles - eclfiles = EclFiles('MYDATADECK.DATA') - dframe = satfunc.df(eclfiles) + resdatafiles = ResdataFiles('MYDATADECK.DATA') + dframe = satfunc.df(resdatafiles) .. csv-table:: Example satfunc table (only a subset of the rows are shown) :file: satfunc.csv @@ -28,16 +28,16 @@ Alternatively, the same data can be produced as a CSV file using the command lin .. code-block:: console - ecl2csv satfunc MYDATADECK.DATA --verbose --output satfunc.csv + res2csv satfunc MYDATADECK.DATA --verbose --output satfunc.csv It is possible to extract keywords one at a time using the ``--keywords`` command line option. -Instead of Eclipse data decks, individual include files may also be parsed, but +Instead of complete :term:`decks `, individual include files may also be parsed, but only one at a time. -Generating Eclipse include files from dataframes -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Generating include files from dataframes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When a dataframe of saturation function data is loaded into Python, any operation may be applied on the data. Simple operations would typically be scaling, perhaps @@ -55,33 +55,33 @@ the command # Multiplicate these rows by 0.5 dframe.loc[rows_to_touch, "KRW"] *= 0.5 -For a dataframe or a CSV file in the format provided by this module, an Eclipse -include file can be generated either with the Python API -:func:`ecl2df.satfunc.df2ecl` function or the command +For a dataframe or a CSV file in the format provided by this module, an +:term:`include file` can be generated either with the Python API +:func:`res2df.satfunc.df2res` function or the command .. code-block:: console - csv2ecl satfunc satfunc.csv --output relperm.inc --keywords SWOF SGOF --verbose + csv2res satfunc satfunc.csv --output relperm.inc --keywords SWOF SGOF --verbose -which should give a file ``relperm.inc`` that can be parsed by Eclipse. The command +which should give a file ``relperm.inc`` that can be parsed by reservoir simulators. The command above will only pick the keywords ``SWOF`` and ``SGOF`` (in the case there are data for more keywords in the dataframe). -There are no automated checks for validity of the dumped include files. +There are no automated checks for validity of the dumped :term:`include file `. Extracting properties pr. SATNUM ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you have an include file prepared (from any source), you might need to +If you have an :term:`include file` prepared (from any source), you might need to determine certain properties like endpoint. If you need to determine for example "SOWCR" - the largest oil saturation for which oil is immobile, because you need to avoid SOWCR + SWCR overshooting 1, you can write a code .. code-block:: python - from ecl2df import satfunc + from res2df import satfunc - # Read an Eclipse include file directly into a DataFrame + # Read an include file directly into a DataFrame with open("relperm.inc") as f_handle: sat_df = satfunc.df(f_handle.read()) @@ -94,7 +94,7 @@ because you need to avoid SOWCR + SWCR overshooting 1, you can write a code # Apply that function individually on each SATNUM: sat_df.groupby("SATNUM").apply(sowcr) -for an example include file, this could result in +for an example :term:`include file`, this could result in .. code-block:: console @@ -109,13 +109,13 @@ The pyscal library Manipulation of curve shapes or potentially interpolation between curves is hard to do directly on the dataframes. Before doing manipulations of dataframes in -``ecl2df.satfunc``, consider if it is better to implement the manipulations +``res2df.satfunc``, consider if it is better to implement the manipulations through the `pyscal `_ library. Pyscal can create curves from parametrizations, and interpolate between curves. -Pyscal can create initialize its relperm objects from Eclipse include files -though the parsing capabilities of ecl2df.satfunc. +Pyscal can initialize its relperm objects from :term:`include files` +through the parsing capabilities of res2df.satfunc. -The function ``pyscal.pyscallist.df()`` is analogous to ``ecl2df.satfunc.df()`` in -what it produces, and the :func:`ecl2df.satfunc.df2ecl()` can be used on both +The function ``pyscal.pyscallist.df()`` is analogous to ``res2df.satfunc.df()`` in +what it produces, and the :func:`res2df.satfunc.df2res()` can be used on both (potentially with some filtering needed.). diff --git a/docs/usage/summary.rst b/docs/usage/summary.rst index 0b7c01d06..e9020d535 100644 --- a/docs/usage/summary.rst +++ b/docs/usage/summary.rst @@ -1,18 +1,18 @@ summary ^^^^^^^ -This module extracts summary information from UNSMRY-files into +This module extracts summary information from :term:`UNSMRY-files ` into Pandas Dataframes. .. - summary.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), column_keys="F*PT", time_index='yearly').to_csv("summary.csv") + summary.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), column_keys="F*PT", time_index='yearly').to_csv("summary.csv") .. code-block:: python - from ecl2df import summary, EclFiles + from res2df import summary, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = summary.df(eclfiles, column_keys="F*PT", time_index="yearly") + resdatafiles = ResdataFiles("MYDATADECK.DATA") + dframe = summary.df(resdatafiles, column_keys="F*PT", time_index="yearly") If you don't specify ``column_keys``, all included summary vectors will be retrieved. Default for ``time_index`` is the report datetimes written by @@ -21,7 +21,7 @@ Eclipse equivalent to ``time_index="raw"``, other options are *daily*, *weekly*, data. Additional arguments are available, see the -`API documentation `_ +`API documentation `_ for an extensive overview. .. csv-table:: Example summary table diff --git a/docs/usage/trans.rst b/docs/usage/trans.rst index d27c00b4c..83577e70c 100644 --- a/docs/usage/trans.rst +++ b/docs/usage/trans.rst @@ -4,20 +4,20 @@ trans The trans module can extract transmissibilities (neighbour and non-neigbor-connections) from a simulation grid. -Python API: :func:`ecl2df.trans.df` +Python API: :func:`res2df.trans.df` -Applied on an Eclipse deck, the *trans* module will give out a dataframe of neighbour +Applied on a :term:`.DATA file`, the *trans* module will give out a dataframe of neighbour connections .. code-block:: python - from ecl2df import trans, EclFiles + from res2df import trans, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = ecl2df.trans.df(eclfiles) + resdatafiles = ResdataFiles("MYDATADECK.DATA") + dframe = res2df.trans.df(resdatafiles) .. - ecl2df.trans.df(ecl2df.EclFiles("2_R001_REEK-0.DATA")).sample(7)\ + res2df.trans.df(res2df.ResdataFiles("2_R001_REEK-0.DATA")).sample(7)\ .to_csv("trans1.csv", float_format="%.2f", index=False) .. csv-table:: Neighbour transmissibilities, sample rows from an example simulation. @@ -26,14 +26,14 @@ connections The last column ``DIR`` is the direction of the connection in i-j-k, space, and can take on the values ``I``, ``J``, and ``K``. The ``TRAN`` column has values from the -``TRANX``, ``TRANY`` or ``TRANZ`` in the Eclipse output files. +``TRANX``, ``TRANY`` or ``TRANZ`` in output files. You can obtain this dataframe as a CSV file by writing this command on the command line: .. code-block:: console - ecl2csv trans MYDATADECK.DATA --verbose --output trans.csv + res2csv trans MYDATADECK.DATA --verbose --output trans.csv Adding more data for each connection ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -79,12 +79,12 @@ like this. Example: .. code-block:: python - dframe = ecl2df.trans.df(eclfiles, vectors="FIPNUM", boundaryfilter=True, addnnc=True) + dframe = res2df.trans.df(resdatafiles, vectors="FIPNUM", boundaryfilter=True, addnnc=True) which gives the dataframe .. - ecl2df.trans.df(ecl2df.EclFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", boundaryfilter=True).sample(10).to_csv("trans-boundaries.csv", index=False, float_format="%.2f") + res2df.trans.df(res2df.ResdataFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", boundaryfilter=True).sample(10).to_csv("trans-boundaries.csv", index=False, float_format="%.2f") .. csv-table:: Sample rows from connections where FIPNUM is changing :file: trans-boundaries.csv @@ -105,13 +105,13 @@ over a region interface. This is accomplished by adding the ``group=True`` optio .. code-block:: python - from ecl2df import trans, EclFiles + from res2df import trans, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = ecl2df.trans.df(eclfiles, vectors="FIPNUM", addnnc=True, group=True) + resdatafiles = ResdataFiles("MYDATADECK.DATA") + dframe = res2df.trans.df(resdatafiles, vectors="FIPNUM", addnnc=True, group=True) .. - ecl2df.trans.df(ecl2df.EclFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", group=True).to_csv("trans-group.csv", index=False, float_format="%.2f") + res2df.trans.df(res2df.ResdataFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", group=True).to_csv("trans-group.csv", index=False, float_format="%.2f") .. csv-table:: Transmissibilities summed over each FIPNUM interface :file: trans-group.csv @@ -121,5 +121,5 @@ where this last table can also be exported directly from the command line using .. code-block:: console - ecl2csv trans MYDATADECK.DATA --vectors FIPNUM --nnc --group --output fipnuminterfaces.csv + res2csv trans MYDATADECK.DATA --vectors FIPNUM --nnc --group --output fipnuminterfaces.csv diff --git a/docs/usage/wcon.rst b/docs/usage/wcon.rst index 35f8ea4a8..910c62281 100644 --- a/docs/usage/wcon.rst +++ b/docs/usage/wcon.rst @@ -2,16 +2,16 @@ wcon ^^^^ This module extracts information from WCONHIST, WCONINJE, WCONINJH and -WCONPROD from an Eclipse deck. +WCONPROD from a :term:`.DATA file`. .. - wcon.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/wcon.csv', index=False) + wcon.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/wcon.csv', index=False) .. code-block:: python - from ecl2df import wcon, EclFiles + from res2df import wcon, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = wcon.df(eclfiles) + resdatafiles = ResdataFiles("MYDATADECK.DATA") + dframe = wcon.df(resdatafiles) .. csv-table:: Example WCON table :file: wcon.csv diff --git a/ecl2df/__init__.py b/res2df/__init__.py similarity index 90% rename from ecl2df/__init__.py rename to res2df/__init__.py index cf264352f..90a6b180e 100644 --- a/ecl2df/__init__.py +++ b/res2df/__init__.py @@ -11,7 +11,7 @@ __version__ = "v0.0.0" from .constants import MAGIC_STDOUT -from .eclfiles import EclFiles +from .resdatafiles import ResdataFiles SUBMODULES: List[str] = [ "compdat", @@ -34,11 +34,11 @@ ] -def getLogger_ecl2csv( - module_name: str = "ecl2df", args_dict: Optional[Dict[str, Union[str, bool]]] = None +def getLogger_res2csv( + module_name: str = "res2df", args_dict: Optional[Dict[str, Union[str, bool]]] = None ) -> logging.Logger: # pylint: disable=invalid-name - """Provide a custom logger for ecl2csv and csv2ecl + """Provide a custom logger for res2csv and csv2res Logging output is by default split by logging levels (split between WARNING and ERROR) to stdout and stderr, each log occurs in only one of the streams. @@ -91,5 +91,5 @@ def getLogger_ecl2csv( return logger -for submodule in SUBMODULES + ["ecl2csv", "csv2ecl"]: - importlib.import_module("ecl2df." + submodule) +for submodule in SUBMODULES + ["res2csv", "csv2res"]: + importlib.import_module("res2df." + submodule) diff --git a/ecl2df/common.py b/res2df/common.py similarity index 94% rename from ecl2df/common.py rename to res2df/common.py index c9aef1e18..42cb70e39 100644 --- a/ecl2df/common.py +++ b/res2df/common.py @@ -1,4 +1,4 @@ -"""Common functions for ecl2df modules""" +"""Common functions for res2df modules""" import argparse import datetime @@ -26,15 +26,15 @@ # to be included in DeckItem objects. from opm.io.deck import DeckKeyword # noqa except ImportError: - # Allow parts of ecl2df to work without OPM: + # Allow parts of res2df to work without OPM: pass -from ecl2df import __version__ +from res2df import __version__ from .constants import MAGIC_STDOUT # Parse named JSON files, this exposes a dict of dictionary describing the contents -# of supported Eclipse keyword data +# of supported keyword data OPMKEYWORDS: Dict[str, dict] = {} for keyw in [ "BRANPROP", @@ -94,7 +94,7 @@ .splitlines() ) ] -ECLMONTH2NUM = { +MONTH2NUM = { "JAN": 1, "FEB": 2, "MAR": 3, @@ -109,7 +109,7 @@ "NOV": 11, "DEC": 12, } -NUM2ECLMONTH = {num: month for month, num in ECLMONTH2NUM.items()} +NUM2MONTH = {num: month for month, num in MONTH2NUM.items()} logger: logging.Logger = logging.getLogger(__name__) @@ -154,7 +154,7 @@ def write_dframe_stdout_file( def write_inc_stdout_file(string: str, outputfilename: str) -> None: - """Write a string (typically an include file string) to stdout + """Write a string (typically an :term:`include file` string) to stdout or to a named file""" if outputfilename == MAGIC_STDOUT: # Ignore pipe errors when writing to stdout: @@ -165,14 +165,12 @@ def write_inc_stdout_file(string: str, outputfilename: str) -> None: print(f"Wrote to {outputfilename}") -def parse_ecl_month(eclmonth: str) -> int: - """Translate Eclipse month strings to integer months""" - return ECLMONTH2NUM[eclmonth] +def parse_month(rdmonth: str) -> int: + """Translate resdata month strings to integer months""" + return MONTH2NUM[rdmonth] -def datetime_to_eclipsedate( - timestamp: Union[str, datetime.datetime, datetime.date] -) -> str: +def datetime_to_ecldate(timestamp: Union[str, datetime.datetime, datetime.date]) -> str: """Convert a Python timestamp or date to the Eclipse DATE format""" if isinstance(timestamp, str): if list(map(len, timestamp.split(" ")[0].split("-"))) != [4, 2, 2]: @@ -181,20 +179,20 @@ def datetime_to_eclipsedate( timestamp = dateutil.parser.parse(timestamp) # noqa (py36 flake8 bug) if not isinstance(timestamp, (datetime.datetime, datetime.date)): raise TypeError("Require string or datetime") - string = f"{timestamp.day} '{NUM2ECLMONTH[timestamp.month]}' {timestamp.year}" + string = f"{timestamp.day} '{NUM2MONTH[timestamp.month]}' {timestamp.year}" if isinstance(timestamp, datetime.datetime): string += " " + timestamp.strftime("%H:%M:%S") return string.replace("00:00:00", "").strip() -def ecl_keyworddata_to_df( +def keyworddata_to_df( deck, keyword: str, renamer: Optional[Dict[str, Union[str, List[str]]]] = None, recordcountername: Optional[str] = None, emptyrecordcountername: Optional[str] = None, ) -> pd.DataFrame: - """Extract data associated to an Eclipse keyword into a tabular form. + """Extract data associated to a keyword into tabular form. Two modes of enumeration of tables in the keyworddata is supported, you will have to find out which one fits your particular keyword. Activate @@ -202,7 +200,7 @@ def ecl_keyworddata_to_df( will be the name of your enumeration, e.g. PVTNUM, EQLNUM or SATNUM. Arguments: - deck: Parsed deck + deck: Parsed :term:`deck` keyword: Name of the keyword for which to extract data. renamer: Mapping of names present in OPM json files for the keyword to desired column names in returned dataframe @@ -277,7 +275,7 @@ def parse_opmio_deckrecord( Args: record: Record be parsed - keyword: Which Eclipse keyword this belongs to + keyword: Which keyword this belongs to itemlistname: The key in the json dict that describes the items, typically 'items' or 'records' recordindex: For keywords where itemlistname is 'records', this is a @@ -353,7 +351,7 @@ def parse_opmio_date_rec(record: "opm.io.DeckRecord") -> datetime.date: day = record[0].get_int(0) month = record[1].get_str(0) year = record[2].get_int(0) - return datetime.date(year=year, month=parse_ecl_month(month), day=day) + return datetime.date(year=year, month=parse_month(month), day=day) def parse_opmio_tstep_rec(record: "opm.io.DeckRecord") -> List[Union[float, int]]: @@ -450,7 +448,7 @@ def handle_wanted_keywords( not_supported: Set[str] = set(wanted) - set(supported) if not_supported: logger.warning( - "Requested keyword(s) not supported by ecl2df.%s: %s", + "Requested keyword(s) not supported by res2df.%s: %s", modulename, str(not_supported), ) @@ -473,22 +471,23 @@ def fill_reverse_parser( parser: argparse.ArgumentParser, modulename: str, defaultoutputfile: str ): """A standardized submodule parser for the command line utility - to produce Eclipse include files from a CSV file. + to produce :term:`include files ` from a CSV file. Arguments: parser: parser to fill with arguments modulename: Will be included in the help text - defaultoutputfile: Default output filename + defaultoutputfile: Default :term:`output file` name """ parser.add_argument( - "csvfile", help="Name of CSV file with " + modulename + " data on ecl2df format" + "csvfile", + help="Name of CSV file with " + modulename + " data with " "res2df format", ) parser.add_argument( "-o", "--output", type=str, help=( - "Name of output Eclipse include file file, default " + "Name of output resdata include file file, default " + defaultoutputfile + ". " "Use '-' for stdout." @@ -508,7 +507,7 @@ def fill_reverse_parser( return parser -def df2ecl( +def df2res( dataframe: pd.DataFrame, keywords: Optional[Union[str, List[str], List[Optional[str]]]] = None, comments: Optional[Dict[str, str]] = None, @@ -516,16 +515,16 @@ def df2ecl( consecutive: Optional[str] = None, filename: Optional[str] = None, ) -> str: - """Generate Eclipse include strings from dataframes in ecl2df format. + """Generate resdata :term:`include file` content from dataframes in res2df format. This function hands over the actual text generation pr. keyword - to functions named df2ecl_ in the calling module. + to functions named df2res_ in the calling module. - These functions may again use generic_ecltable() from this module + These functions may again use generic_deck_table() from this module for the actual string construction. Args: - dataframe: Dataframe with Eclipse data on ecl2df format. + dataframe: Dataframe with res2df format. keywords: List of keywords to include. Will be reduced to the set of keywords available in dataframe and to those supported comments: Dictionary indexed by keyword with comments to be @@ -539,7 +538,7 @@ def df2ecl( to file. Returns: - string that can be used as an include file for Eclipse. + string that can be used as contents of :term:`include file`. """ from_module = inspect.stack()[1] calling_module = inspect.getmodule(from_module[0]) @@ -596,7 +595,7 @@ def df2ecl( return "" string = "" - ecl2df_header = ( + res2df_header = ( "Output file printed by " + calling_module.__name__ # type: ignore + " " @@ -605,13 +604,13 @@ def df2ecl( + " at " + str(datetime.datetime.now()) ) - string += comment_formatter(ecl2df_header) + string += comment_formatter(res2df_header) string += "\n" if "master" in comments: string += comment_formatter(comments["master"]) for keyword in keywords: # Construct the associated function names - function_name = "df2ecl_" + keyword.lower() + function_name = "df2res_" + keyword.lower() function = getattr(calling_module, function_name) if keyword in comments: string += function(dataframe, comments[keyword]) @@ -624,15 +623,14 @@ def df2ecl( return string -def generic_ecltable( +def generic_deck_table( dframe: pd.DataFrame, keyword: str, comment: Optional[str] = None, renamer: Optional[Dict[str, str]] = None, drop_trailing_columns: bool = True, ) -> str: - """Construct a typical Eclipse table for data following - a keyword. Each row (record in Eclipse terms) ends with a slash. + """Construct string contents of a :term:`.DATA file` table. This function will *not* add a final slash after all rows, as this is keyword dependent. Some keywords require it, some keywords @@ -747,7 +745,7 @@ def generic_ecltable( return string + tablestring + "\n" -def runlength_eclcompress(string: str, sep: str = " ") -> str: +def runlength_compress(string: str, sep: str = " ") -> str: """Compress a string of space-separated elements so that 2 2 2 2 2 3 3 4 @@ -844,7 +842,7 @@ def stack_on_colnames( def is_color(input_string: str) -> bool: """Checks if the input string is a valid color. That is six-digit hexadecimal, three-digit hexadecimal or - given as an SVG color keyword name + given as a SVG color keyword name """ if input_string.lower() in SVG_COLOR_NAMES: return True diff --git a/ecl2df/compdat.py b/res2df/compdat.py similarity index 95% rename from ecl2df/compdat.py rename to res2df/compdat.py index dace91cd4..446fcc63d 100644 --- a/ecl2df/compdat.py +++ b/res2df/compdat.py @@ -1,4 +1,4 @@ -"""Parser and dataframe generator for the Eclipse keywords: +"""Parser and dataframe generator for the keywords: * COMPDAT * COMPLUMP * COMPSEGS @@ -22,10 +22,10 @@ # pylint: disable=unused-import import opm.io.deck except ImportError: - # Allow parts of ecl2df to work without OPM: + # Allow parts of res2df to work without OPM: pass -from ecl2df import getLogger_ecl2csv +from res2df import getLogger_res2csv from .common import ( get_wells_matching_template, @@ -35,8 +35,8 @@ parse_opmio_tstep_rec, write_dframe_stdout_file, ) -from .eclfiles import EclFiles from .grid import merge_initvectors +from .resdatafiles import ResdataFiles logger = logging.getLogger(__name__) @@ -75,14 +75,13 @@ def deck2dfs( start_date: Optional[Union[str, datetime.date]] = None, unroll: bool = True, ) -> Dict[str, pd.DataFrame]: - """Loop through the deck and pick up information found + """Loop through the :term:`deck` and pick up information found - The loop over the deck is a state machine, as it has to pick up dates and + The loop over the :term:`deck` is a state machine, as it has to pick up dates and potential information from the WELSPECS keyword. Args: - deck: A deck representing the schedule - Does not have to be a full Eclipse deck, an include file is sufficient + deck: A :term:`deck` representing the schedule start_date: The default date to use for events where the DATE or START keyword is not found in advance. Default: None @@ -517,7 +516,7 @@ def unroll_complump(complump_df: pd.DataFrame) -> pd.DataFrame: ) if val_i == 0 or val_j == 0 or val_k1 == 0 or val_k2 == 0: raise ValueError( - f"Defaulted COMPLUMP coordinates are not supported in ecl2df: {row}" + f"Defaulted COMPLUMP coordinates are not supported in res2df: {row}" ) if val_k2 < val_k1: raise ValueError(f"K2 must be equal to or greater than K1: {row}") @@ -830,16 +829,17 @@ def applywelopen( 'OP2' SHUT 66 44 10 / / - This deck would define two wells where OP1 and OP2 have two connected grid cells - each. The first welopen statment acts on the whole well, closing both the well and - the connections. If this statement used STOP instead of SHUT, the connections would - be left open. The second welopen statement acts on a single connection. Here SHUT - and STOP would give the same result. This behavior has been proven to be correct - in the simulator. The Eclipse manual states that 'If items 3 - 7 are all defaulted, - the Open/Shut/Stop command applies to the well, leaving the connections unchanged', - but this has been proven to be wrong. The state of the connection can be tested - by looking at the CPI summary vectors. The connection is SHUT if CPI==0 and OPEN - if CPI>0. + This :term:`deck` would define two wells where OP1 and OP2 have two + connected grid cells each. The first welopen statment acts on the whole + well, closing both the well and the connections. If this statement used STOP + instead of SHUT, the connections would be left open. The second welopen + statement acts on a single connection. Here SHUT and STOP would give the + same result. This behavior has been proven to be correct in the simulator. + The Eclipse manual states that 'If items 3 - 7 are all defaulted, the + Open/Shut/Stop command applies to the well, leaving the connections + unchanged', but this has been proven to be wrong. The state of the + connection can be tested by looking at the CPI summary vectors. The + connection is SHUT if CPI==0 and OPEN if CPI>0. WELOPEN can also be used at different dates and changes therefore the state of connections without explicit use of the COMPDAT keyword. This function translates @@ -950,7 +950,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: Arguments: parser: parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of Eclipse DATA file.") + parser.add_argument( + "DATAFILE", help="Name of the .DATA input file for the reservoir simulator" + ) parser.add_argument( "-o", "--output", @@ -970,16 +972,16 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def compdat_main(args): """Entry-point for module, for command line utility""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - compdat_df = df(eclfiles, initvectors=args.initvectors) + resdatafiles = ResdataFiles(args.DATAFILE) + compdat_df = df(resdatafiles, initvectors=args.initvectors) write_dframe_stdout_file(compdat_df, args.output, index=False, caller_logger=logger) def df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, initvectors: Optional[List[str]] = None, zonemap: Optional[Dict[int, str]] = None, ) -> pd.DataFrame: @@ -992,17 +994,17 @@ def df( Returns: pd.Dataframe with one row pr cell to well connection """ - compdat_df = deck2dfs(eclfiles.get_ecldeck())["COMPDAT"] + compdat_df = deck2dfs(resdatafiles.get_deck())["COMPDAT"] compdat_df = unrolldf(compdat_df) if initvectors: compdat_df = merge_initvectors( - eclfiles, compdat_df, initvectors, ijknames=["I", "J", "K1"] + resdatafiles, compdat_df, initvectors, ijknames=["I", "J", "K1"] ) if zonemap is None: # If no zonemap is submitted, search for zonemap in default location - zonemap = eclfiles.get_zonemap() + zonemap = resdatafiles.get_zonemap() if zonemap: logger.info("Merging zonemap into compdat") diff --git a/ecl2df/config_jobs/CSV2ECL b/res2df/config_jobs/CSV2RES similarity index 79% rename from ecl2df/config_jobs/CSV2ECL rename to res2df/config_jobs/CSV2RES index 65890bc2a..1f3d42955 100644 --- a/ecl2df/config_jobs/CSV2ECL +++ b/res2df/config_jobs/CSV2RES @@ -1,4 +1,4 @@ -EXECUTABLE csv2ecl +EXECUTABLE csv2res ARGLIST "--verbose" "--output" diff --git a/ecl2df/config_jobs/ECL2CSV b/res2df/config_jobs/RES2CSV similarity index 94% rename from ecl2df/config_jobs/ECL2CSV rename to res2df/config_jobs/RES2CSV index 3f879339b..8be172aa1 100644 --- a/ecl2df/config_jobs/ECL2CSV +++ b/res2df/config_jobs/RES2CSV @@ -1,4 +1,4 @@ -EXECUTABLE ecl2csv +EXECUTABLE res2csv DEFAULT "" DEFAULT "" diff --git a/ecl2df/constants.py b/res2df/constants.py similarity index 86% rename from ecl2df/constants.py rename to res2df/constants.py index 545430ed8..75ed52176 100644 --- a/ecl2df/constants.py +++ b/res2df/constants.py @@ -1,4 +1,4 @@ -"""Constants for use in ecl2df.""" +"""Constants for use in res2df.""" # This is a magic filename that means read/write from/to stdout # This makes it impossible to write to a file called "-" on disk diff --git a/ecl2df/csv2ecl.py b/res2df/csv2res.py similarity index 73% rename from ecl2df/csv2ecl.py rename to res2df/csv2res.py index 4cab5970b..19867fc22 100644 --- a/ecl2df/csv2ecl.py +++ b/res2df/csv2res.py @@ -1,26 +1,26 @@ #!/usr/bin/env python """ -Convert dataframes (in ecl2df format) to Eclipse include files, +Convert dataframes (in res2df format) to include files, for selected keywords """ import argparse import sys -from ecl2df import __version__, equil, pvt, satfunc, summary, vfp +from res2df import __version__, equil, pvt, satfunc, summary, vfp # String constants in use for generating ERT forward model documentation: -DESCRIPTION: str = """Convert CSV files into Eclipse include files. Uses the command -line utility ``csv2ecl``. Run ``csv2ecl --help`` to see which subcommands are supported. +DESCRIPTION: str = """Convert CSV files into include files. Uses the command +line utility ``csv2res``. Run ``csv2res --help`` to see which subcommands are supported. No options other than the output file is possible when used directly as a forward model. When writing synthetic summary files, the ECLBASE with no filename suffix is expected as the OUTPUT argument.""" CATEGORY: str = "utility.eclipse" EXAMPLES: str = ( "``FORWARD_MODEL " - "CSV2ECL(=equil, =equil.csv, " + "CSV2RES(=equil, =equil.csv, " "=eclipse/include/equil.inc)``" - "CSV2ECL(=summary, =summary-monthly.csv, " + "CSV2RES(=summary, =summary-monthly.csv, " "=eclipse/model/MONTHLYSUMMARY)``" ) @@ -30,8 +30,8 @@ def get_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( - "csv2ecl (" + __version__ + ") is a command line frontend to ecl2df. " - "Documentation at https://equinor.github.io/ecl2df/ " + "csv2res (" + __version__ + ") is a command line frontend to res2df. " + "Documentation at https://equinor.github.io/res2df/ " ), ) parser.add_argument( @@ -51,8 +51,8 @@ def get_parser() -> argparse.ArgumentParser: summary_parser = subparsers.add_parser( "summary", - help="Write EclSum UNSMRY files", - description=("Write Eclipse UNSMRY files from CSV files."), + help="Write UNSMRY files", + description=("Write UNSMRY files from CSV files."), ) summary.fill_reverse_parser(summary_parser) summary_parser.set_defaults(func=summary.summary_reverse_main) @@ -62,7 +62,7 @@ def get_parser() -> argparse.ArgumentParser: help="Write SOLUTION include files", description=( "Write SOLUTION keywords (EQUIL, RSVD, RVVD) " - "to Eclipse include files from CSV in ecl2df format." + "to include files from CSV in res2df format." ), ) equil.fill_reverse_parser(equil_parser) @@ -71,9 +71,7 @@ def get_parser() -> argparse.ArgumentParser: pvt_parser = subparsers.add_parser( "pvt", help="Write PVT include files", - description=( - "Write Eclipse include files from CSV files on the ecl2df format." - ), + description=("Write include files from CSV files with res2df format."), ) pvt.fill_reverse_parser(pvt_parser) pvt_parser.set_defaults(func=pvt.pvt_reverse_main) @@ -82,8 +80,8 @@ def get_parser() -> argparse.ArgumentParser: "satfunc", help="Write saturation function include files", description=( - "Write saturation function include files from CSV files on " - "the ecl2df format." + "Write saturation function include files from CSV files with " + "res2df format." ), ) satfunc.fill_reverse_parser(satfunc_parser) @@ -93,7 +91,7 @@ def get_parser() -> argparse.ArgumentParser: "vfp", help="Write VFPPROD/VFPINJ include files", description=( - "Write VFPPROD/VFPINJ include files from CSV files on the ecl2df format." + "Write VFPPROD/VFPINJ include files from CSV files with res2df format." ), ) vfp.fill_reverse_parser(vfp_parser) diff --git a/ecl2df/equil.py b/res2df/equil.py similarity index 76% rename from ecl2df/equil.py rename to res2df/equil.py index 2a91e6796..e27141309 100644 --- a/ecl2df/equil.py +++ b/res2df/equil.py @@ -1,5 +1,5 @@ """ -Extract EQUIL from an Eclipse deck as Pandas DataFrame +Extract EQUIL from a :term:`.DATA file` as Pandas DataFrame """ import argparse @@ -9,9 +9,9 @@ import pandas as pd -from ecl2df import common, getLogger_ecl2csv, inferdims +from res2df import common, getLogger_res2csv, inferdims -from .eclfiles import EclFiles +from .resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -72,7 +72,7 @@ def df( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResdataFiles, "opm.libopmcommon_python.Deck"], keywords: Optional[List[str]] = None, ntequl: Optional[int] = None, ) -> pd.DataFrame: @@ -80,20 +80,20 @@ def df( PBVD and PDVD. How each data value in the EQUIL records are to be interpreted - depends on the phase configuration in the deck, which means + depends on the phase configuration in the :term:`deck`, which means that we need more than the EQUIL section alone to determine the dataframe. - If ntequl is not supplied and EQLDIMS is not in the deck, the + If ntequl is not supplied and EQLDIMS is not in the :term:`deck`, the equil data is not well defined in terms of OPM. This means that we have to infer the correct number of EQUIL lines from what gives us successful parsing from OPM. In those cases, the - deck must be supplied as a string, if not, extra EQUIL lines - are possibly already removed by the OPM parser in eclfiles.str2deck(). + :term:`deck` must be supplied as a string, if not, extra EQUIL lines + are possibly already removed by the OPM parser in resdatafiles.str2deck(). Arguments: - deck: Eclipse deck or string with deck. If - not string, EQLDIMS must be present in the deck. + deck: :term:`.DATA file` or string with :term:`deck`. If + not string, EQLDIMS must be present in the :term:`deck`. keywords: Requested keywords for which to extract data. ntequl: If not None, should state the NTEQUL in EQLDIMS. If None and EQLDIMS is not present, it will be inferred. @@ -101,8 +101,8 @@ def df( Return: pd.DataFrame, at least with columns KEYWORD and EQLNUM """ - if isinstance(deck, EclFiles): - deck = deck.get_ecldeck() + if isinstance(deck, ResdataFiles): + deck = deck.get_deck() deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) ntequl = deck["EQLDIMS"][0][inferdims.DIMS_POS["NTEQUL"]].get_int(0) @@ -132,16 +132,16 @@ def df( def rsvd_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None ) -> pd.DataFrame: - """Extract RSVD data from a deck + """Extract RSVD data from a :term:`deck` Args: deck - ntequl: Number of EQLNUM regions in deck. Will - be inferred if not present in deck + ntequl: Number of EQLNUM regions in :term:`deck`. Will + be inferred if not present in :term:`deck` """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "RSVD", renamer=RENAMERS["RSVD"], recordcountername="EQLNUM" ) @@ -149,16 +149,16 @@ def rsvd_fromdeck( def rvvd_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None ) -> pd.DataFrame: - """Extract RVVD data from a deck + """Extract RVVD data from a :term:`deck` Args: deck - ntequl: Number of EQLNUM regions in deck. Will - be inferred if not present in deck + ntequl: Number of EQLNUM regions in :term:`deck`. Will + be inferred if not present in :term:`deck` """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "RVVD", renamer=RENAMERS["RVVD"], recordcountername="EQLNUM" ) @@ -166,16 +166,16 @@ def rvvd_fromdeck( def pbvd_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None ) -> pd.DataFrame: - """Extract PBVD data from a deck + """Extract PBVD data from a :term:`deck` Args: deck - ntequl: Number of EQLNUM regions in deck. Will - be inferred if not present in deck + ntequl: Number of EQLNUM regions in :term:`deck`. Will + be inferred if not present in :term:`deck` """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "PBVD", renamer=RENAMERS["PBVD"], recordcountername="EQLNUM" ) @@ -183,27 +183,27 @@ def pbvd_fromdeck( def pdvd_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None ) -> pd.DataFrame: - """Extract PDVD data from a deck + """Extract PDVD data from a :term:`deck` Args: deck - ntequl: Number of EQLNUM regions in deck. Will - be inferred if not present in deck + ntequl: Number of EQLNUM regions in :term:`deck`. Will + be inferred if not present in :term:`deck` """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "PDVD", renamer=RENAMERS["PDVD"], recordcountername="EQLNUM" ) def phases_from_deck(deck: Union[str, "opm.libopmcommon_python.Deck"]) -> str: - """Determined the set of phases from a deck, as + """Determined the set of phases from a :term:`deck`, as a string with values "oil-water-gas", "gas-water", "oil-water", or "oil-gas" Args: - deck: A parsed deck or DATA-file as a string + deck: A parsed :term:`deck` or :term:`.DATA file` as a string Returns: String with phase configuration. Empty string if inconclusive. @@ -247,14 +247,14 @@ def phases_from_columns(columns: List[str]) -> str: def equil_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None ) -> pd.DataFrame: - """Extract EQUIL data from a deck + """Extract EQUIL data from a :term:`deck` - If the deck is supplied as a string object, the number + If the :term:`deck` is supplied as a string object, the number of EQLNUM regions will be inferred if needed. Args: deck - ntequl: Number of EQLNUM regions in deck. + ntequl: Number of EQLNUM regions in :term:`deck`. """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) @@ -264,7 +264,7 @@ def equil_fromdeck( raise ValueError(f"Could not determine phase configuration, got '{phases}'") columnrenamer = RENAMERS[phases_from_deck(deck)] - dataframe = common.ecl_keyworddata_to_df( + dataframe = common.keyworddata_to_df( deck, "EQUIL", renamer=columnrenamer, recordcountername="EQLNUM" ) @@ -283,7 +283,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (argparse.ArgumentParser or argparse.subparser): parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of Eclipse DATA file.") + parser.add_argument( + "DATAFILE", help="Name of the .DATA input file for the reservoir simulator" + ) parser.add_argument( "-o", "--output", @@ -305,18 +307,18 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Fill a parser for the operation dataframe -> eclipse include file""" + """Fill a parser for the operation dataframe -> resdata :term:`include file`""" return common.fill_reverse_parser(parser, "EQUIL, RSVD++", "solution.inc") def equil_main(args) -> None: """Read from disk and write CSV back to disk""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + resdatafiles = ResdataFiles(args.DATAFILE) + if resdatafiles: + deck = resdatafiles.get_deck() if "EQLDIMS" in deck: # Things are easier when a full deck with (correct) EQLDIMS # is supplied: @@ -342,28 +344,30 @@ def equil_main(args) -> None: def equil_reverse_main(args) -> None: - """Entry-point for module, for command line utility for CSV to Eclipse""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + """Entry-point for module, for command line utility + for CSV to reservoir simulator :term:`include files ` + """ + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) equil_df = pd.read_csv(args.csvfile) logger.info("Parsed %s", args.csvfile) - inc_string = df2ecl(equil_df, keywords=args.keywords) + inc_string = df2res(equil_df, keywords=args.keywords) common.write_inc_stdout_file(inc_string, args.output) -def df2ecl( +def df2res( equil_df: pd.DataFrame, keywords: Optional[List[str]] = None, comments: Optional[Dict[str, str]] = None, withphases: bool = False, filename: Optional[str] = None, ) -> str: - """Generate Eclipse include strings from dataframes with - solution (EQUIL, RSVD++) data. + """Generate string contents of :term:`include files ` + from dataframes with solution (EQUIL, RSVD++) data. Args: - equil_df: Dataframe with data on ecl2df format. + equil_df: Dataframe with res2df format. keywords: List of keywords to include. Must be supported and present in the incoming dataframe. comments: Dictionary indexed by keyword with comments to be @@ -381,7 +385,7 @@ def df2ecl( string += ( phases_from_columns(equil_df.columns).upper().replace("-", "\n") + "\n\n" ) - string += common.df2ecl( + string += common.df2res( equil_df, keywords=keywords, comments=comments, @@ -392,8 +396,8 @@ def df2ecl( return string -def df2ecl_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print EQUIL keyword with data +def df2res_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for EQUIL keyword Args: dframe: Containing EQUIL data @@ -418,7 +422,7 @@ def df2ecl_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: phases = phases_from_columns(subset.columns) - return common.generic_ecltable( + return common.generic_deck_table( subset, "EQUIL", renamer=RENAMERS[phases], # type: ignore @@ -427,8 +431,8 @@ def df2ecl_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: ) -def df2ecl_rsvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print RSVD keyword with data +def df2res_rsvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for RSVD keyword This data consists of one table (rs as a function of depth) for each EQLNUM @@ -437,11 +441,11 @@ def df2ecl_rsvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: dframe: Containing RSVD data comment Text that will be included as a comment """ - return _df2ecl_equilfuncs("RSVD", dframe, comment) + return _df2res_equilfuncs("RSVD", dframe, comment) -def df2ecl_rvvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print RVVD keyword with data +def df2res_rvvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for RVVD keyword This data consists of one table (rv as a function of depth) for each EQLNUM @@ -450,11 +454,11 @@ def df2ecl_rvvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: dframe: Containing RVVD data comment: Text that will be included as a comment """ - return _df2ecl_equilfuncs("RVVD", dframe, comment) + return _df2res_equilfuncs("RVVD", dframe, comment) -def df2ecl_pbvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print PBVD keyword with data +def df2res_pbvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for PBVD keyword Bubble-point versus depth @@ -465,11 +469,11 @@ def df2ecl_pbvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: dframe: Containing PBVD data comment: Text that will be included as a comment """ - return _df2ecl_equilfuncs("PBVD", dframe, comment) + return _df2res_equilfuncs("PBVD", dframe, comment) -def df2ecl_pdvd(dframe: pd.DataFrame, comment: Optional[str] = None): - """Print PDVD keyword with data. +def df2res_pdvd(dframe: pd.DataFrame, comment: Optional[str] = None): + """Create string with :term:`include file` contents for PDVD keyword. Dew-point versus depth. @@ -480,13 +484,13 @@ def df2ecl_pdvd(dframe: pd.DataFrame, comment: Optional[str] = None): dframe: Containing PDVD data comment: Text that will be included as a comment """ - return _df2ecl_equilfuncs("PDVD", dframe, comment) + return _df2res_equilfuncs("PDVD", dframe, comment) -def _df2ecl_equilfuncs( +def _df2res_equilfuncs( keyword: str, dframe: pd.DataFrame, comment: Optional[str] = None ) -> str: - """Internal function to be used by df2ecl_() functions""" + """Internal function to be used by df2res_() functions""" if dframe.empty: return "-- No data!" string = f"{keyword}\n" @@ -500,9 +504,9 @@ def _df2ecl_equilfuncs( else: subset = dframe[dframe["KEYWORD"] == keyword] - def _df2ecl_equilfuncs_eqlnum(dframe: pd.DataFrame) -> str: - """Print one equilibriation function table for a specific - EQLNUM + def _df2res_equilfuncs_eqlnum(dframe: pd.DataFrame) -> str: + """Create string with :term:`include file` contents + for one equilibriation function table for a specific EQLNUM Args: dframe (pd.DataFrame): Cropped to only contain data for one EQLNUM @@ -519,5 +523,5 @@ def _df2ecl_equilfuncs_eqlnum(dframe: pd.DataFrame) -> str: subset = subset.set_index("EQLNUM").sort_index() for eqlnum in subset.index.unique(): string += f"-- EQLNUM: {eqlnum}\n" - string += _df2ecl_equilfuncs_eqlnum(subset[subset.index == eqlnum]) + string += _df2res_equilfuncs_eqlnum(subset[subset.index == eqlnum]) return string + "\n" diff --git a/ecl2df/faults.py b/res2df/faults.py similarity index 79% rename from ecl2df/faults.py rename to res2df/faults.py index c168106fe..f1b1880ac 100644 --- a/ecl2df/faults.py +++ b/res2df/faults.py @@ -10,8 +10,8 @@ import pandas as pd -from ecl2df import EclFiles, getLogger_ecl2csv -from ecl2df.common import parse_opmio_deckrecord, write_dframe_stdout_file +from res2df import ResdataFiles, getLogger_res2csv +from res2df.common import parse_opmio_deckrecord, write_dframe_stdout_file try: # Needed for mypy @@ -30,16 +30,16 @@ ALLOWED_FACES = ["X", "Y", "Z", "I", "J", "K", "X-", "Y-", "Z-", "I-", "J-", "K-"] -def df(deck: Union[EclFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: - """Produce a dataframe of fault data from a deck +def df(deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: + """Produce a dataframe of fault data from a :term:`deck` All data for the keyword FAULTS will be returned. Args: - deck: Eclipse deck + deck: A :term:`deck` """ - if isinstance(deck, EclFiles): - deck = deck.get_ecldeck() + if isinstance(deck, ResdataFiles): + deck = deck.get_deck() # In[91]: list(deck['FAULTS'][0]) # Out[91]: [[u'F1'], [36], [36], [41], [42], [1], [14], [u'I']] @@ -69,7 +69,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: Arguments: parser: argparse.ArgumentParser or argparse.subparser """ - parser.add_argument("DATAFILE", help="Name of Eclipse DATA file.") + parser.add_argument( + "DATAFILE", help="Name of the .DATA input file for the reservoir simulator" + ) parser.add_argument( "-o", "--output", @@ -83,12 +85,12 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def faults_main(args) -> None: """Read from disk and write CSV back to disk""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + resdatafiles = ResdataFiles(args.DATAFILE) + if resdatafiles: + deck = resdatafiles.get_deck() faults_df = df(deck) write_dframe_stdout_file( faults_df, diff --git a/ecl2df/fipreports.py b/res2df/fipreports.py similarity index 90% rename from ecl2df/fipreports.py rename to res2df/fipreports.py index de94f3329..8f7b2f60c 100644 --- a/ecl2df/fipreports.py +++ b/res2df/fipreports.py @@ -1,5 +1,5 @@ # pylint: disable=c0301 -"""Extract FIP region reports from Eclipse PRT file""" +"""Extract FIP region reports from PRT file""" import argparse import datetime @@ -10,8 +10,8 @@ import numpy as np import pandas as pd -from ecl2df import EclFiles, getLogger_ecl2csv -from ecl2df.common import parse_ecl_month, write_dframe_stdout_file +from res2df import ResdataFiles, getLogger_res2csv +from res2df.common import parse_month, write_dframe_stdout_file logger = logging.getLogger(__name__) @@ -97,21 +97,21 @@ def float_or_nan(string: str) -> float: ) -def df(prtfile: Union[str, EclFiles], fipname: str = "FIPNUM") -> pd.DataFrame: +def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFrame: """ - Parses a PRT file from Eclipse and finds FIPXXXX REGION REPORT blocks and + Parses a PRT file from and finds FIPXXXX REGION REPORT blocks and organizes those numbers into a dataframe Each row in the dataframe represents one parsed line in the PRT file, with DATE and region index added. Args: - prtfile: filename (PRT) or an EclFiles object + prtfile: filename (PRT) or a ResdataFiles object fipname: The name of the regport regions, FIPNUM, FIPZON or whatever Max length of the string is 8, the first three characters must be FIP, - and the next 3 characters must be unique for a given Eclipse deck. + and the next 3 characters must be unique for a given :term:`.DATA file`. """ - if isinstance(prtfile, EclFiles): + if isinstance(prtfile, ResdataFiles): prtfile = prtfile.get_prtfilename() if not fipname.startswith("FIP"): raise ValueError("fipname must start with FIP") @@ -134,7 +134,7 @@ def df(prtfile: Union[str, EclFiles], fipname: str = "FIPNUM") -> pd.DataFrame: ".+" + fipname + r"\s+REPORT\s+REGION\s+(\d+)", re.IGNORECASE ) - # Flag for whether we are supposedly parsing a PRT file made by OPM flow: + # Flag for whether we are supposedly parsing a PRT file made by OPM Flow: opm = False with open(prtfile, encoding="utf-8") as prt_fh: @@ -152,7 +152,7 @@ def df(prtfile: Union[str, EclFiles], fipname: str = "FIPNUM") -> pd.DataFrame: if matcheddate is not None: newdate = datetime.date( year=int(matcheddate.group(3)), - month=parse_ecl_month(matcheddate.group(2).upper()), + month=parse_month(matcheddate.group(2).upper()), day=int(matcheddate.group(1)), ) if newdate != date: @@ -194,7 +194,7 @@ def df(prtfile: Union[str, EclFiles], fipname: str = "FIPNUM") -> pd.DataFrame: def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """Fill parser with command line arguments""" - parser.add_argument("PRTFILE", type=str, help="Eclipse PRT file (or DATA file)") + parser.add_argument("PRTFILE", type=str, help="PRT file (or .DATA file)") parser.add_argument( "--fipname", type=str, @@ -211,12 +211,12 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fipreports_main(args) -> None: """Command line API""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) if args.PRTFILE.endswith(".PRT"): prtfile = args.PRTFILE else: - prtfile = EclFiles(args.PRTFILE).get_prtfilename() + prtfile = ResdataFiles(args.PRTFILE).get_prtfilename() dframe = df(prtfile, args.fipname) write_dframe_stdout_file(dframe, args.output, index=False, caller_logger=logger) diff --git a/ecl2df/grid.py b/res2df/grid.py similarity index 88% rename from ecl2df/grid.py rename to res2df/grid.py index 2af2755ca..0a59fbf96 100644 --- a/ecl2df/grid.py +++ b/res2df/grid.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -Extract grid information from Eclipse output files as Dataframes. +Extract grid information from grid files as Dataframes. Each cell in the grid correspond to one row. @@ -24,29 +24,30 @@ import pyarrow.feather from resdata.resfile import ResdataFile -from ecl2df import __version__, common, getLogger_ecl2csv +from res2df import __version__, common, getLogger_res2csv -from .eclfiles import EclFiles +from .resdatafiles import ResdataFiles logger = logging.getLogger(__name__) -def get_available_rst_dates(eclfiles: EclFiles) -> List[datetime.date]: +def get_available_rst_dates(resdatafiles: ResdataFiles) -> List[datetime.date]: """Return a list of datetime objects for the available dates in the RST file""" - report_indices = ResdataFile.file_report_list(eclfiles.get_rstfilename()) + report_indices = ResdataFile.file_report_list(resdatafiles.get_rstfilename()) logger.info( "Restart report indices (count %s): %s", str(len(report_indices)), str(report_indices), ) return [ - eclfiles.get_rstfile().iget_restart_sim_time(index).date() + resdatafiles.get_rstfile().iget_restart_sim_time(index).date() for index in range(0, len(report_indices)) ] def dates2rstindices( - eclfiles: EclFiles, dates: Optional[Union[str, datetime.date, List[datetime.date]]] + resdatafiles: ResdataFiles, + dates: Optional[Union[str, datetime.date, List[datetime.date]]], ) -> Tuple[List[int], List[datetime.date], List[str]]: """Return the restart index/indices for a given datetime or list of datetimes @@ -68,7 +69,7 @@ def dates2rstindices( if not dates: return ([], [], []) - availabledates = get_available_rst_dates(eclfiles) + availabledates = get_available_rst_dates(resdatafiles) supportedmnemonics = ["first", "last", "all"] @@ -141,7 +142,7 @@ def _df2pyarrow(dframe: pd.DataFrame) -> pyarrow.Table: def rst2df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, date: Union[str, datetime.date, List[datetime.date]], vectors: Optional[Union[str, List[str]]] = None, dateinheaders: bool = False, @@ -155,7 +156,7 @@ def rst2df( when merging with the grid geometry dataframe. Args: - eclfiles: EclFiles object + resdatafiles: ResdataFiles object date: datetime.date or list of datetime.date, must correspond to an existing date. If list, it forces dateinheaders to be True. @@ -183,16 +184,16 @@ def rst2df( # First task is to determine the restart index to extract # data for: - (rstindices, chosendates, isodates) = dates2rstindices(eclfiles, date) + (rstindices, chosendates, isodates) = dates2rstindices(resdatafiles, date) logger.info("Extracting restart information at dates %s", str(isodates)) # Determine the available restart vectors, we only include # those with correct length, meaning that they are defined # for all active cells: - activecells = eclfiles.get_egrid().getNumActive() + activecells = resdatafiles.get_egrid().getNumActive() rstvectors = [] - for vec in eclfiles.get_rstfile().headers: + for vec in resdatafiles.get_rstfile().headers: if vec[1] == activecells and any( fnmatch.fnmatch(vec[0], key) for key in vectors ): @@ -211,7 +212,7 @@ def rst2df( present_rstvectors = [] for vec in rstvectors: try: - if eclfiles.get_rstfile().iget_named_kw(vec, rstindex): + if resdatafiles.get_rstfile().iget_named_kw(vec, rstindex): present_rstvectors.append(vec) except IndexError: pass @@ -232,7 +233,7 @@ def rst2df( columns=present_rstvectors, data=np.hstack( [ - eclfiles.get_rstfile() + resdatafiles.get_rstfile() .iget_named_kw(vec, rstindex) .numpyView() .reshape(-1, 1) @@ -256,7 +257,7 @@ def rst2df( if dateinheaders or len(rstindices) > 1 and not stackdates: rst_df.columns = [colname + "@" + datestr for colname in rst_df.columns] - # libecl emits a number around -1.0000000200408773e+20 which + # resdata emits a number around -1.0000000200408773e+20 which # should be considered Not-a-number rst_df = rst_df.where(rst_df > -1e20 + 1e13) # some trial and error @@ -279,15 +280,15 @@ def rst2df( def gridgeometry2df( - eclfiles: EclFiles, zonemap: Optional[Dict[int, str]] = None + resdatafiles: ResdataFiles, zonemap: Optional[Dict[int, str]] = None ) -> pd.DataFrame: - """Produce a Pandas Dataframe with Eclipse gridgeometry + """Produce a Pandas Dataframe with grid geometry - Order is significant, and is determined by the order from libecl, and used + Order is significant, and is determined by the order from resdata, and used when merging with other dataframes with cell-data. Args: - eclfiles: object holding the Eclipse output files. + resdatafiles: object holding the :term:`output files `. zonemap: A zonemap dictionary mapping every K index to a string, which will be put in a column ZONE. If none is provided, a zonemap from a default file will be looked for. Provide an empty @@ -299,8 +300,8 @@ def gridgeometry2df( pr. cell. The index of the dataframe are the global indices. If a zonemap is provided, zone information will be in the column ZONE. """ - egrid_file = eclfiles.get_egridfile() - grid = eclfiles.get_egrid() + egrid_file = resdatafiles.get_egridfile() + grid = resdatafiles.get_egrid() if not egrid_file or not grid: raise ValueError("No EGRID file supplied") @@ -348,7 +349,7 @@ def gridgeometry2df( if zonemap is None: # Look for default zonemap file: - zonemap = eclfiles.get_zonemap() + zonemap = resdatafiles.get_zonemap() if zonemap: logger.info("Merging zonemap into grid") grid_df = common.merge_zones(grid_df, zonemap, kname="K") @@ -357,7 +358,7 @@ def gridgeometry2df( def merge_initvectors( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, dframe: pd.DataFrame, initvectors: List[str], ijknames: Optional[List[str]] = None, @@ -368,7 +369,7 @@ def merge_initvectors( for API users to only use the df() function. Args: - eclfiles: Object representing the Eclipse output files + resdatafiles: Object representing the :term:`output files ` dframe: Table data to merge with initvectors: Names of INIT vectors to merge in. ijknames: Three strings that determine the I, J and K columns to use @@ -385,7 +386,7 @@ def merge_initvectors( if len(ijknames) != 3: raise ValueError("ijknames must be a list of length 3") assert isinstance(dframe, pd.DataFrame) - assert isinstance(eclfiles, EclFiles) + assert isinstance(resdatafiles, ResdataFiles) if not set(ijknames).issubset(dframe.columns): raise ValueError( @@ -398,12 +399,12 @@ def merge_initvectors( assert isinstance(initvectors, list) logger.info("Merging INIT data %s into dataframe", str(initvectors)) - ijkinit = df(eclfiles, vectors=initvectors)[["I", "J", "K"] + initvectors] + ijkinit = df(resdatafiles, vectors=initvectors)[["I", "J", "K"] + initvectors] return pd.merge(dframe, ijkinit, left_on=ijknames, right_on=["I", "J", "K"]) def init2df( - eclfiles: EclFiles, vectors: Optional[Union[str, List[str]]] = None + resdatafiles: ResdataFiles, vectors: Optional[Union[str, List[str]]] = None ) -> pd.DataFrame: """Extract information from INIT file with cell data @@ -413,7 +414,7 @@ def init2df( Order is significant, as index is used for merging Args: - eclfiles: Object that can serve the EGRID and INIT files + resdatafiles: Object that can serve the EGRID and INIT files vectors: List of vectors to include, glob-style wildcards supported. """ @@ -422,8 +423,8 @@ def init2df( if not isinstance(vectors, list): vectors = [vectors] - init = eclfiles.get_initfile() - egrid = eclfiles.get_egrid() + init = resdatafiles.get_initfile() + egrid = resdatafiles.get_egrid() # Build list of vector names to include: usevectors = [] @@ -446,7 +447,7 @@ def init2df( ] ), ) - # libecl emits a number around -1.0000000200408773e+20 which + # resdata emits a number around -1.0000000200408773e+20 which # should be considered Not-a-number init_df = init_df.where(init_df > -1e20 + 1e13) # some trial and error @@ -470,7 +471,7 @@ def init2df( def df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, vectors: Union[str, List[str]] = "*", dropconstants: bool = False, rstdates: Optional[Union[str, datetime.date, List[datetime.date]]] = None, @@ -482,11 +483,11 @@ def df( Grid information (center coordinates x, y, z), cell indices (i, j, k) (indices follow the Eclipse convention starting - at 1, not zero as in libecl), properties from INIT, and optionally + at 1, not zero as in resdata), properties from INIT, and optionally any time dependent data from Restart files. Args: - eclfiles: Handle to an Eclipse case + resdatafiles: Handle to a simulator case vectors: Vectors to include, wildcards supported. Used to match both INIT vectors and RESTART vectors. @@ -506,12 +507,12 @@ def df( dictionary to avoid looking for the default file, and no ZONE column will be added. """ - gridgeom = gridgeometry2df(eclfiles, zonemap) - initdf = init2df(eclfiles, vectors=vectors) + gridgeom = gridgeometry2df(resdatafiles, zonemap) + initdf = init2df(resdatafiles, vectors=vectors) rst_df = None if rstdates: rst_df = rst2df( - eclfiles, + resdatafiles, rstdates, vectors=vectors, dateinheaders=dateinheaders, @@ -539,7 +540,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of Eclipse DATA file. " + "INIT and EGRID file must lie alongside.", + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist .INIT and .EGRID files with the same path and basename.", ) parser.add_argument( "--vectors", @@ -614,17 +616,17 @@ def drop_constant_columns( return dframe.drop(columnstodelete, axis=1) -def df2ecl( +def df2res( grid_df: pd.DataFrame, keywords: Union[str, List[str]], - eclfiles: Optional[EclFiles] = None, + resdatafiles: Optional[ResdataFiles] = None, dtype: Optional[Type] = None, filename: Optional[str] = None, nocomments: bool = False, ) -> str: """ - Write an include file with grid data keyword, like PERMX, PORO, - FIPNUM etc, for the GRID section of the Eclipse deck. + Write a :term:`include file` contents with grid data keyword, like PERMX, PORO, + FIPNUM etc, for the GRID section of the :term:`.DATA file`. Output (returned as string and optionally written to file) will then contain f.ex:: @@ -643,7 +645,7 @@ def df2ecl( The grid can contain both active and inactive cells. keywords: The keyword(s) to export, with one value for every cell. - eclfiles: If provided, the total cell count for the grid + resdatafiles: If provided, the total cell count for the grid will be requested from this object. If not, it will be *guessed* from the maximum number of GLOBAL_INDEX, which can be under-estimated in the corner-case that the last cells are inactive. @@ -670,10 +672,10 @@ def df2ecl( # Figure out the total number of cells for which we need to export data for: global_size = None active_cells = None - if eclfiles is not None: - if eclfiles.get_egrid() is not None: - global_size = eclfiles.get_egrid().get_global_size() - active_cells = eclfiles.get_egrid().getNumActive() + if resdatafiles is not None: + if resdatafiles.get_egrid() is not None: + global_size = resdatafiles.get_egrid().get_global_size() + active_cells = resdatafiles.get_egrid().getNumActive() if "GLOBAL_INDEX" not in grid_df: logger.warning( @@ -695,9 +697,9 @@ def df2ecl( active_cells = len(grid_df[grid_df.index >= 0]) logger.warning("Global grid size estimated to %s", str(global_size)) - ecl2df_header = ( + res2df_header = ( "Output file printed by " - + "ecl2df.grid " + + "res2df.grid " + __version__ + "\n" + " at " @@ -706,7 +708,7 @@ def df2ecl( string = "" if not nocomments: - string += common.comment_formatter(ecl2df_header) + string += common.comment_formatter(res2df_header) string += "\n" # If we have NaNs in the dataframe, we will be more careful (costs memory) @@ -728,14 +730,14 @@ def df2ecl( logger.warning( ( "Mismatch between dumped vector length " - "%d from df2ecl and assumed grid size %d" + "%d from df2res and assumed grid size %d" ), len(vector), global_size, ) logger.warning("Data will be dumped, but may error in simulator") strvector = " ".join([str(x) for x in vector]) - strvector = common.runlength_eclcompress(strvector) + strvector = common.runlength_compress(strvector) string += keyword + "\n" indent = " " * 5 @@ -760,12 +762,12 @@ def df2ecl( def grid_main(args) -> None: """This is the command line API""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) + resdatafiles = ResdataFiles(args.DATAFILE) grid_df = df( - eclfiles, + resdatafiles, vectors=args.vectors, rstdates=args.rstdates, dropconstants=args.dropconstants, diff --git a/ecl2df/gruptree.py b/res2df/gruptree.py similarity index 95% rename from ecl2df/gruptree.py rename to res2df/gruptree.py index 0e95344ac..6dc4552aa 100644 --- a/ecl2df/gruptree.py +++ b/res2df/gruptree.py @@ -1,4 +1,4 @@ -"""Extract GRUPTREE information from an Eclipse deck""" +"""Extract GRUPTREE information from a :term:`.DATA file`""" import argparse import collections @@ -19,8 +19,8 @@ except ImportError: pass -from ecl2df import EclFiles, getLogger_ecl2csv -from ecl2df.common import ( +from res2df import ResdataFiles, getLogger_res2csv +from res2df.common import ( parse_opmio_date_rec, parse_opmio_deckrecord, parse_opmio_tstep_rec, @@ -31,11 +31,11 @@ def df( - deck: Union[EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"], startdate: Optional[datetime.date] = None, welspecs: bool = True, ) -> pd.DataFrame: - """Extract all group information from a deck + """Extract all group information from a :term:`deck` and present as a Pandas Dataframe of all edges. Properties for nodes given in GRUPNET/NODEPROP will @@ -52,14 +52,14 @@ def df( previous tree is copied and a new complete tree is added to the dataframe tagged with the new date. - startdate is only relevant when START is not in the deck. + startdate is only relevant when START is not in the :term:`deck`. Args: - deck: opm.io Deck object or EclFiles + deck: opm.io Deck object or ResdataFiles Returns: pd.DataFrame with one row pr edge. Empty dataframe if no - information is found in deck. + information is found in :term:`deck`. """ date: Optional[datetime.date] @@ -68,8 +68,8 @@ def df( else: date = None - if isinstance(deck, EclFiles): - deck = deck.get_ecldeck() + if isinstance(deck, ResdataFiles): + deck = deck.get_deck() edgerecords = [] # list of dict of rows containing an edge. nodedatarecords = [] @@ -392,7 +392,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (argparse.ArgumentParser or argparse.subparser): parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of Eclipse DATA file.") + parser.add_argument( + "DATAFILE", help="Name of the .DATA input file for the reservoir simulator" + ) parser.add_argument( "-o", "--output", @@ -447,14 +449,14 @@ def prettyprint(dframe: pd.DataFrame) -> str: def gruptree_main(args) -> None: """Entry-point for module, for command line utility.""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) if not args.output and not args.prettyprint: print("Nothing to do. Set --output or --prettyprint") sys.exit(0) - eclfiles = EclFiles(args.DATAFILE) - dframe = df(eclfiles.get_ecldeck(), startdate=args.startdate) + resdatafiles = ResdataFiles(args.DATAFILE) + dframe = df(resdatafiles.get_deck(), startdate=args.startdate) if args.prettyprint: if "DATE" in dframe: print(prettyprint(dframe)) diff --git a/ecl2df/hook_implementations/__init__.py b/res2df/hook_implementations/__init__.py similarity index 100% rename from ecl2df/hook_implementations/__init__.py rename to res2df/hook_implementations/__init__.py diff --git a/ecl2df/hook_implementations/jobs.py b/res2df/hook_implementations/jobs.py similarity index 85% rename from ecl2df/hook_implementations/jobs.py rename to res2df/hook_implementations/jobs.py index d4f7485e0..7efcf09c2 100644 --- a/ecl2df/hook_implementations/jobs.py +++ b/res2df/hook_implementations/jobs.py @@ -18,7 +18,7 @@ def decorator(func): def _get_jobs_from_directory(directory): - resource_directory = Path(sys.modules["ecl2df"].__file__).parent / directory + resource_directory = Path(sys.modules["res2df"].__file__).parent / directory all_files = [ resource_directory / filename @@ -29,7 +29,7 @@ def _get_jobs_from_directory(directory): @hook_implementation -@plugin_response(plugin_name="ecl2df") +@plugin_response(plugin_name="res2df") def installable_jobs(): return _get_jobs_from_directory("config_jobs") @@ -44,13 +44,13 @@ def _get_module_variable_if_exists(module_name, variable_name, default=""): @hook_implementation -@plugin_response(plugin_name="ecl2df") +@plugin_response(plugin_name="res2df") def job_documentation(job_name): - ecl2df_jobs = set(installable_jobs().data.keys()) - if job_name not in ecl2df_jobs: + res2df_jobs = set(installable_jobs().data.keys()) + if job_name not in res2df_jobs: return None - module_name = f"ecl2df.{job_name.lower()}" + module_name = f"res2df.{job_name.lower()}" description = _get_module_variable_if_exists( module_name=module_name, variable_name="DESCRIPTION" diff --git a/ecl2df/inferdims.py b/res2df/inferdims.py similarity index 83% rename from ecl2df/inferdims.py rename to res2df/inferdims.py index ed76f049c..3b8506b34 100644 --- a/ecl2df/inferdims.py +++ b/res2df/inferdims.py @@ -1,6 +1,6 @@ """ Support module for inferring EQLDIMS and TABDIMS from incomplete -Eclipse 100 decks (typically single include-files) +reservoir simulator decks (typically single include-files) """ import logging @@ -9,10 +9,10 @@ try: import opm.io except ImportError: - # Let parts of ecl2df work without OPM: + # Let parts of res2df work without OPM: pass -from ecl2df import EclFiles +from res2df import ResdataFiles logger = logging.getLogger(__name__) @@ -21,9 +21,9 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: - """Guess the correct dimension count for an incoming deck (string) + """Guess the correct dimension count for an incoming :term:`deck` (string) - The incoming deck must in string form, if not, extra data is most + The incoming :term:`deck` must in string form, if not, extra data is most likely already removed by the opm.io parser. TABDIMS or EQLDIMS must not be present @@ -31,7 +31,7 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: stricter mode, to detect the correct table dimensionality Arguments: - deck: String containing an Eclipse deck or only a few Eclipse keywords + deck: String containing a :term:`.DATA file` or only a few keywords dimkeyword: Either TABDIMS or EQLDIMS dimitem: The element number in TABDIMS/EQLDIMS to modify Returns: @@ -48,7 +48,7 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: if dimitem not in [0]: raise ValueError("Only item 0 in EQLDIMS can be estimated") - # A less than ecl2df-standard permissive opm.io, when using + # A less than res2df-standard permissive opm.io, when using # this one opm.io will fail if there are extra records # in tables (if NTSFUN in TABDIMS is wrong f.ex): opmioparser_recovery_fail_extra_records = [ @@ -68,7 +68,7 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: deckstring, dimkeyword, dimitem, dimcountguess, nowarn=True ) try: - EclFiles.str2deck( + ResdataFiles.str2deck( deck_candidate, parsecontext=opm.io.ParseContext( opmioparser_recovery_fail_extra_records @@ -94,15 +94,15 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: def inject_dimcount( deckstr: str, dimkeyword: str, dimitem: int, dimvalue: int, nowarn: bool = False ) -> str: - """Insert a TABDIMS with NTSFUN into a deck + """Insert a TABDIMS with NTSFUN into a :term:`deck` string This is simple string manipulation, not opm.io - deck manipulation (which might be possible to do). + :term:`deck` manipulation (which might be possible to do). This function is to be wrapped by inject_xxxdims_ntxxx() Arguments: - deckstr: A string containing a partial deck (f.ex only + deckstr: A string containing a partial :term:`deck` (f.ex only the SWOF keyword). dimkeyword: Either TABDIMS or EQLDIMS dimitem: Item 0 (NTSSFUN) or 1 (NTPVT) of TABDIMS, only 0 for EQLDIMS. @@ -112,7 +112,7 @@ def inject_dimcount( nowarn: By default it will warn if this function is run on a deckstr with TABDIMS/EQLDIMS present. Mute this if True. Returns: - New deck with TABDIMS/EQLDIMS prepended. + New :term:`deck` string with TABDIMS/EQLDIMS prepended. """ assert dimvalue > 0, "dimvalue must be larger than zero" if dimkeyword not in ["TABDIMS", "EQLDIMS"]: @@ -146,19 +146,19 @@ def inject_xxxdims_ntxxx( deck: Union[str, "opm.libopmcommon_python.Deck"], ntxxx_value: Optional[int] = None, ) -> "opm.libopmcommon_python.Deck": - """Ensures TABDIMS/EQLDIMS is present in a deck. + """Ensures TABDIMS/EQLDIMS is present in a :term:`deck`. - If ntxxx_value=None and ntxxx_name not in the deck, ntxxx_name will - be inferred through trial-and-error parsing of the deck, and then injected - into the deck. + If ntxxx_value=None and ntxxx_name not in the :term:`deck`, ntxxx_name will + be inferred through trial-and-error parsing of the :term:`deck`, and then injected + into the :term:`deck`. Args: xxxdims: TABDIMS or EQLDIMS ntxxx_name: NTPVT, NTEQUL or NTSFUN - deck: A data deck. If ntxxx_name is to be - estimated this *must* be a string and not a fully parsed deck. + deck: A data :term:`deck`. If ntxxx_name is to be + estimated this *must* be a string and not a fully parsed :term:`deck`. npxxx_value: Supply this if ntxxx_name is known, but not present in the - deck, this will override any guessing. If the deck already + deck, this will override any guessing. If the :term:`deck` already contains XXXDIMS, this will be ignored. Returns: @@ -170,7 +170,7 @@ def inject_xxxdims_ntxxx( if xxxdims in deck and ntxxx_value is None: # Then we have nothing to do, but ensure we parse a potential string to a deck if isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResdataFiles.str2deck(deck) return deck if xxxdims in deck and ntxxx_value is not None: @@ -195,6 +195,6 @@ def inject_xxxdims_ntxxx( str(deck), xxxdims, DIMS_POS[ntxxx_name], ntxxx_estimate, nowarn=True ) # Overwrite the deck object - deck = EclFiles.str2deck(augmented_strdeck) + deck = ResdataFiles.str2deck(augmented_strdeck) return deck diff --git a/ecl2df/nnc.py b/res2df/nnc.py similarity index 83% rename from ecl2df/nnc.py rename to res2df/nnc.py index b201ecac2..9ced3ea6e 100644 --- a/ecl2df/nnc.py +++ b/res2df/nnc.py @@ -1,5 +1,6 @@ """ -Extract non-neighbour connection (NNC) information from Eclipse output files. +Extract non-neighbour connection (NNC) information from reservoir +simulator :term:`output files `. """ import argparse import datetime @@ -10,13 +11,15 @@ import pandas as pd -from ecl2df import EclFiles, __version__, common, getLogger_ecl2csv, grid -from ecl2df.common import write_dframe_stdout_file +from res2df import ResdataFiles, __version__, common, getLogger_res2csv, grid +from res2df.common import write_dframe_stdout_file logger: logging.Logger = logging.getLogger(__name__) -def df(eclfiles: EclFiles, coords: bool = False, pillars: bool = False) -> pd.DataFrame: +def df( + resdatafiles: ResdataFiles, coords: bool = False, pillars: bool = False +) -> pd.DataFrame: """Produce a Pandas Dataframe with NNC information A NNC is a pair of cells that are not next to each other @@ -28,8 +31,8 @@ def df(eclfiles: EclFiles, coords: bool = False, pillars: bool = False) -> pd.Da between the two cells) Args: - eclfiles: object that can serve EclFile and EclGrid - on demand + resdatafiles: object that can serve + :term:`reservoir simulator output files ` on demand. coords: Set to True if you want the midpoint of the two connected cells to be computed and added to the columns X, Y and Z. @@ -39,9 +42,9 @@ def df(eclfiles: EclFiles, coords: bool = False, pillars: bool = False) -> pd.Da Returns: Empty if no NNC information found. """ - egrid_file = eclfiles.get_egridfile() - egrid_grid = eclfiles.get_egrid() - init_file = eclfiles.get_initfile() + egrid_file = resdatafiles.get_egridfile() + egrid_grid = resdatafiles.get_egrid() + init_file = resdatafiles.get_initfile() if not ("NNC1" in egrid_file and "NNC2" in egrid_file): logger.warning("No NNC data in EGRID") @@ -51,7 +54,7 @@ def df(eclfiles: EclFiles, coords: bool = False, pillars: bool = False) -> pd.Da return pd.DataFrame() # Grid indices for first cell in cell pairs, into a vertical - # vector. The indices are "global" in libecl terms, and are + # vector. The indices are "global" in resdata terms, and are # 1-based (FORTRAN). Convert to zero-based before sending to get_ijk() nnc1 = egrid_file["NNC1"][0].numpy_view().reshape(-1, 1) logger.info( @@ -98,11 +101,11 @@ def df(eclfiles: EclFiles, coords: bool = False, pillars: bool = False) -> pd.Da if pillars: nncdf = filter_vertical(nncdf) if coords: - nncdf = add_nnc_coords(nncdf, eclfiles) + nncdf = add_nnc_coords(nncdf, resdatafiles) return nncdf -def add_nnc_coords(nncdf: pd.DataFrame, eclfiles: EclFiles) -> pd.DataFrame: +def add_nnc_coords(nncdf: pd.DataFrame, resdatafiles: ResdataFiles) -> pd.DataFrame: """Add columns X, Y and Z for the connection midpoint This extracts x, y and z for (I1, J1, K1) and (I2, J2, K2) @@ -110,12 +113,12 @@ def add_nnc_coords(nncdf: pd.DataFrame, eclfiles: EclFiles) -> pd.DataFrame: Arguments: nncdf: With grid index columns (I1, J1, K1, I2, J2, K2) - eclfiles: Object used to fetch grid data from EGRID. + resdatafiles: Object used to fetch grid data from EGRID. Returns: Incoming dataframe augmented with the columns X, Y and Z. """ - gridgeometry = grid.gridgeometry2df(eclfiles) + gridgeometry = grid.gridgeometry2df(resdatafiles) gnncdf = pd.merge( nncdf, gridgeometry, @@ -175,7 +178,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of Eclipse DATA file. " + "INIT and EGRID file must lie alongside.", + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist .INIT and .EGRID files with the same path and basename.", ) parser.add_argument( "-c", @@ -197,7 +201,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: return parser -def df2ecl_editnnc( +def df2res_editnnc( nnc_df: pd.DataFrame, filename: Optional[str] = None, nocomments: bool = False ) -> str: """Write an EDITNNC keyword @@ -230,12 +234,12 @@ def df2ecl_editnnc( in the produced string/file Returns: - string with the EDITNNC keyword. + :term:`include file` content string with the EDITNNC keyword. """ string = "" - ecl2df_header = ( - "Output file printed by ecl2df.nnc" + res2df_header = ( + "Output file printed by res2df.nnc" + " " + __version__ + "\n" @@ -243,7 +247,7 @@ def df2ecl_editnnc( + str(datetime.datetime.now()) ) if not nocomments: - string += common.comment_formatter(ecl2df_header) + string += common.comment_formatter(res2df_header) string += "\n" if "DIR" in nnc_df: @@ -275,12 +279,12 @@ def df2ecl_editnnc( def nnc_main(args) -> None: - """Command line access point from main() or from ecl2csv via subparser""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + """Command line access point from main() or from res2csv via subparser""" + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - nncdf = df(eclfiles, coords=args.coords, pillars=args.pillars) + resdatafiles = ResdataFiles(args.DATAFILE) + nncdf = df(resdatafiles, coords=args.coords, pillars=args.pillars) write_dframe_stdout_file( nncdf, args.output, diff --git a/ecl2df/opmkeywords/BRANPROP b/res2df/opmkeywords/BRANPROP similarity index 100% rename from ecl2df/opmkeywords/BRANPROP rename to res2df/opmkeywords/BRANPROP diff --git a/ecl2df/opmkeywords/COMPDAT b/res2df/opmkeywords/COMPDAT similarity index 100% rename from ecl2df/opmkeywords/COMPDAT rename to res2df/opmkeywords/COMPDAT diff --git a/ecl2df/opmkeywords/COMPLUMP b/res2df/opmkeywords/COMPLUMP similarity index 100% rename from ecl2df/opmkeywords/COMPLUMP rename to res2df/opmkeywords/COMPLUMP diff --git a/ecl2df/opmkeywords/COMPSEGS b/res2df/opmkeywords/COMPSEGS similarity index 100% rename from ecl2df/opmkeywords/COMPSEGS rename to res2df/opmkeywords/COMPSEGS diff --git a/ecl2df/opmkeywords/DENSITY b/res2df/opmkeywords/DENSITY similarity index 100% rename from ecl2df/opmkeywords/DENSITY rename to res2df/opmkeywords/DENSITY diff --git a/ecl2df/opmkeywords/EQLDIMS b/res2df/opmkeywords/EQLDIMS similarity index 100% rename from ecl2df/opmkeywords/EQLDIMS rename to res2df/opmkeywords/EQLDIMS diff --git a/ecl2df/opmkeywords/EQUIL b/res2df/opmkeywords/EQUIL similarity index 100% rename from ecl2df/opmkeywords/EQUIL rename to res2df/opmkeywords/EQUIL diff --git a/ecl2df/opmkeywords/FAULTS b/res2df/opmkeywords/FAULTS similarity index 100% rename from ecl2df/opmkeywords/FAULTS rename to res2df/opmkeywords/FAULTS diff --git a/ecl2df/opmkeywords/GRUPNET b/res2df/opmkeywords/GRUPNET similarity index 100% rename from ecl2df/opmkeywords/GRUPNET rename to res2df/opmkeywords/GRUPNET diff --git a/ecl2df/opmkeywords/GRUPTREE b/res2df/opmkeywords/GRUPTREE similarity index 100% rename from ecl2df/opmkeywords/GRUPTREE rename to res2df/opmkeywords/GRUPTREE diff --git a/ecl2df/opmkeywords/NODEPROP b/res2df/opmkeywords/NODEPROP similarity index 100% rename from ecl2df/opmkeywords/NODEPROP rename to res2df/opmkeywords/NODEPROP diff --git a/ecl2df/opmkeywords/PBVD b/res2df/opmkeywords/PBVD similarity index 100% rename from ecl2df/opmkeywords/PBVD rename to res2df/opmkeywords/PBVD diff --git a/ecl2df/opmkeywords/PDVD b/res2df/opmkeywords/PDVD similarity index 100% rename from ecl2df/opmkeywords/PDVD rename to res2df/opmkeywords/PDVD diff --git a/ecl2df/opmkeywords/PVDG b/res2df/opmkeywords/PVDG similarity index 100% rename from ecl2df/opmkeywords/PVDG rename to res2df/opmkeywords/PVDG diff --git a/ecl2df/opmkeywords/PVDO b/res2df/opmkeywords/PVDO similarity index 100% rename from ecl2df/opmkeywords/PVDO rename to res2df/opmkeywords/PVDO diff --git a/ecl2df/opmkeywords/PVTG b/res2df/opmkeywords/PVTG similarity index 100% rename from ecl2df/opmkeywords/PVTG rename to res2df/opmkeywords/PVTG diff --git a/ecl2df/opmkeywords/PVTO b/res2df/opmkeywords/PVTO similarity index 100% rename from ecl2df/opmkeywords/PVTO rename to res2df/opmkeywords/PVTO diff --git a/ecl2df/opmkeywords/PVTW b/res2df/opmkeywords/PVTW similarity index 100% rename from ecl2df/opmkeywords/PVTW rename to res2df/opmkeywords/PVTW diff --git a/ecl2df/opmkeywords/ROCK b/res2df/opmkeywords/ROCK similarity index 100% rename from ecl2df/opmkeywords/ROCK rename to res2df/opmkeywords/ROCK diff --git a/ecl2df/opmkeywords/RSVD b/res2df/opmkeywords/RSVD similarity index 100% rename from ecl2df/opmkeywords/RSVD rename to res2df/opmkeywords/RSVD diff --git a/ecl2df/opmkeywords/RVVD b/res2df/opmkeywords/RVVD similarity index 100% rename from ecl2df/opmkeywords/RVVD rename to res2df/opmkeywords/RVVD diff --git a/ecl2df/opmkeywords/SGFN b/res2df/opmkeywords/SGFN similarity index 100% rename from ecl2df/opmkeywords/SGFN rename to res2df/opmkeywords/SGFN diff --git a/ecl2df/opmkeywords/SGOF b/res2df/opmkeywords/SGOF similarity index 100% rename from ecl2df/opmkeywords/SGOF rename to res2df/opmkeywords/SGOF diff --git a/ecl2df/opmkeywords/SGWFN b/res2df/opmkeywords/SGWFN similarity index 100% rename from ecl2df/opmkeywords/SGWFN rename to res2df/opmkeywords/SGWFN diff --git a/ecl2df/opmkeywords/SLGOF b/res2df/opmkeywords/SLGOF similarity index 100% rename from ecl2df/opmkeywords/SLGOF rename to res2df/opmkeywords/SLGOF diff --git a/ecl2df/opmkeywords/SOF2 b/res2df/opmkeywords/SOF2 similarity index 100% rename from ecl2df/opmkeywords/SOF2 rename to res2df/opmkeywords/SOF2 diff --git a/ecl2df/opmkeywords/SOF3 b/res2df/opmkeywords/SOF3 similarity index 100% rename from ecl2df/opmkeywords/SOF3 rename to res2df/opmkeywords/SOF3 diff --git a/ecl2df/opmkeywords/SWFN b/res2df/opmkeywords/SWFN similarity index 100% rename from ecl2df/opmkeywords/SWFN rename to res2df/opmkeywords/SWFN diff --git a/ecl2df/opmkeywords/SWOF b/res2df/opmkeywords/SWOF similarity index 100% rename from ecl2df/opmkeywords/SWOF rename to res2df/opmkeywords/SWOF diff --git a/ecl2df/opmkeywords/TABDIMS b/res2df/opmkeywords/TABDIMS similarity index 100% rename from ecl2df/opmkeywords/TABDIMS rename to res2df/opmkeywords/TABDIMS diff --git a/ecl2df/opmkeywords/VFPINJ b/res2df/opmkeywords/VFPINJ similarity index 100% rename from ecl2df/opmkeywords/VFPINJ rename to res2df/opmkeywords/VFPINJ diff --git a/ecl2df/opmkeywords/VFPPROD b/res2df/opmkeywords/VFPPROD similarity index 100% rename from ecl2df/opmkeywords/VFPPROD rename to res2df/opmkeywords/VFPPROD diff --git a/ecl2df/opmkeywords/WCONHIST b/res2df/opmkeywords/WCONHIST similarity index 100% rename from ecl2df/opmkeywords/WCONHIST rename to res2df/opmkeywords/WCONHIST diff --git a/ecl2df/opmkeywords/WCONINJE b/res2df/opmkeywords/WCONINJE similarity index 100% rename from ecl2df/opmkeywords/WCONINJE rename to res2df/opmkeywords/WCONINJE diff --git a/ecl2df/opmkeywords/WCONINJH b/res2df/opmkeywords/WCONINJH similarity index 100% rename from ecl2df/opmkeywords/WCONINJH rename to res2df/opmkeywords/WCONINJH diff --git a/ecl2df/opmkeywords/WCONPROD b/res2df/opmkeywords/WCONPROD similarity index 100% rename from ecl2df/opmkeywords/WCONPROD rename to res2df/opmkeywords/WCONPROD diff --git a/ecl2df/opmkeywords/WELOPEN b/res2df/opmkeywords/WELOPEN similarity index 100% rename from ecl2df/opmkeywords/WELOPEN rename to res2df/opmkeywords/WELOPEN diff --git a/ecl2df/opmkeywords/WELSEGS b/res2df/opmkeywords/WELSEGS similarity index 100% rename from ecl2df/opmkeywords/WELSEGS rename to res2df/opmkeywords/WELSEGS diff --git a/ecl2df/opmkeywords/WELSPECS b/res2df/opmkeywords/WELSPECS similarity index 100% rename from ecl2df/opmkeywords/WELSPECS rename to res2df/opmkeywords/WELSPECS diff --git a/ecl2df/opmkeywords/WLIST b/res2df/opmkeywords/WLIST similarity index 100% rename from ecl2df/opmkeywords/WLIST rename to res2df/opmkeywords/WLIST diff --git a/ecl2df/opmkeywords/WSEGAICD b/res2df/opmkeywords/WSEGAICD similarity index 100% rename from ecl2df/opmkeywords/WSEGAICD rename to res2df/opmkeywords/WSEGAICD diff --git a/ecl2df/opmkeywords/WSEGSICD b/res2df/opmkeywords/WSEGSICD similarity index 100% rename from ecl2df/opmkeywords/WSEGSICD rename to res2df/opmkeywords/WSEGSICD diff --git a/ecl2df/opmkeywords/WSEGVALV b/res2df/opmkeywords/WSEGVALV similarity index 100% rename from ecl2df/opmkeywords/WSEGVALV rename to res2df/opmkeywords/WSEGVALV diff --git a/ecl2df/opmkeywords/readme b/res2df/opmkeywords/readme similarity index 79% rename from ecl2df/opmkeywords/readme rename to res2df/opmkeywords/readme index fe7237899..fd341e5df 100644 --- a/ecl2df/opmkeywords/readme +++ b/res2df/opmkeywords/readme @@ -1,6 +1,6 @@ This directory contains JSON files downloaded from https://github.com/OPM/opm-common/tree/master/src/opm/parser/eclipse/share/keywords/000_Eclipse100 -When a new keyword is to be supported by ecl2df, add it in +When a new keyword is to be supported by res2df, add it in runmetoupdate.sh AND add it to the list of supported keywords in common.py diff --git a/ecl2df/opmkeywords/runmetoupdate.sh b/res2df/opmkeywords/runmetoupdate.sh similarity index 100% rename from ecl2df/opmkeywords/runmetoupdate.sh rename to res2df/opmkeywords/runmetoupdate.sh diff --git a/ecl2df/parameters.py b/res2df/parameters.py similarity index 91% rename from ecl2df/parameters.py rename to res2df/parameters.py index b97af9c57..706734bf9 100644 --- a/ecl2df/parameters.py +++ b/res2df/parameters.py @@ -1,5 +1,5 @@ """Support module for extra files with key-value information -related to Eclipse runs""" +related to simulator runs""" import json import logging @@ -10,13 +10,13 @@ import pandas as pd import yaml -from ecl2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles logger = logging.getLogger(__name__) def find_parameter_files( - ecldeck_or_eclpath: Union[EclFiles, str, Path], filebase: str = "parameters" + deckpath: Union[ResdataFiles, str, Path], filebase: str = "parameters" ) -> List[Path]: """Locate a default prioritized list of files to try to read as key-value @@ -24,8 +24,8 @@ def find_parameter_files( current dir, one directory up, and two directories up. Args: - ecldeck_or_eclpath: Either an EclFiles object of - an Eclipse output set (only the corresponding path will be used), + deckpath: Either a ResdataFiles object of + a simulator output set (only the corresponding path will be used), or path to a file or directory, that will be used as a starting point for locating parameter files filebase: the base of filenames to look for. @@ -35,10 +35,10 @@ def find_parameter_files( """ eclbasepath: Path fname: str - if isinstance(ecldeck_or_eclpath, EclFiles): - eclbasepath = Path(ecldeck_or_eclpath.get_path()) - elif isinstance(ecldeck_or_eclpath, (str, Path)): - eclbasepath = Path(ecldeck_or_eclpath).parent.absolute() + if isinstance(deckpath, ResdataFiles): + eclbasepath = Path(deckpath.get_path()) + elif isinstance(deckpath, (str, Path)): + eclbasepath = Path(deckpath).parent.absolute() else: raise TypeError files_to_lookfor: List[str] = [ diff --git a/ecl2df/pillars.py b/res2df/pillars.py similarity index 95% rename from ecl2df/pillars.py rename to res2df/pillars.py index 5d6bf63f5..76e3d45f9 100644 --- a/ecl2df/pillars.py +++ b/res2df/pillars.py @@ -8,7 +8,7 @@ import dateutil.parser import pandas as pd -from ecl2df import EclFiles, common, getLogger_ecl2csv, grid +from res2df import ResdataFiles, common, getLogger_res2csv, grid logger: logging.Logger = logging.getLogger(__name__) @@ -33,7 +33,7 @@ def df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, region: Optional[str] = None, rstdates: Optional[Union[str, datetime.date, List[datetime.date]]] = None, soilcutoff: float = 0.2, @@ -83,9 +83,11 @@ def df( if region: vectors.append(region) vectors.extend(["POR*", "PERM*", "SWAT", "SGAS", "1OVERBO", "1OVERBG"]) - grid_df = grid.df(eclfiles, rstdates=rstdates, vectors=vectors, dateinheaders=True) + grid_df = grid.df( + resdatafiles, rstdates=rstdates, vectors=vectors, dateinheaders=True + ) - rstdates_iso = grid.dates2rstindices(eclfiles, rstdates)[2] + rstdates_iso = grid.dates2rstindices(resdatafiles, rstdates)[2] grid_df["PILLAR"] = grid_df["I"].astype(str) + "-" + grid_df["J"].astype(str) logger.info("Computing pillar statistics") @@ -330,12 +332,13 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help=("Name of Eclipse DATA file. " "INIT and EGRID file must lie alongside."), + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist .INIT and .EGRID files with the same path and basename.", ) parser.add_argument( "--region", help=( - "Name of Eclipse region parameter for which to separate the computations. " + "Name of region parameter for which to separate the computations. " "Set to empty string to have no grouping (only by pillar)." ), type=str, @@ -411,13 +414,13 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def pillars_main(args) -> None: """This is the command line API""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) + resdatafiles = ResdataFiles(args.DATAFILE) dframe = df( - eclfiles, + resdatafiles, region=args.region, rstdates=args.rstdates, soilcutoff=args.soilcutoff, diff --git a/ecl2df/pvt.py b/res2df/pvt.py similarity index 81% rename from ecl2df/pvt.py rename to res2df/pvt.py index 3dd0a6101..c38222b0f 100644 --- a/ecl2df/pvt.py +++ b/res2df/pvt.py @@ -1,7 +1,7 @@ """ -Extract the PVT data from an Eclipse (input) deck as Pandas Dataframes +Extract the PVT data from a .DATA file as Pandas Dataframes -Data can be extracted from a full Eclipse deck or from individual files. +Data can be extracted from a complete deck or from individual files. """ import argparse @@ -11,7 +11,7 @@ import pandas as pd -from ecl2df import EclFiles, common, getLogger_ecl2csv, inferdims +from res2df import ResdataFiles, common, getLogger_res2csv, inferdims try: # Needed for mypy @@ -72,16 +72,16 @@ def pvtw_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract PVTW from a deck + """Extract PVTW from a :term:`deck` Args: deck - ntpvt: Number of PVT regions in deck. Will - be inferred if not present in deck. + ntpvt: Number of PVT regions in :term:`deck`. Will + be inferred if not present in :term:`deck`. """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "PVTW", renamer=RENAMERS["PVTW"], recordcountername="PVTNUM" ) @@ -89,16 +89,16 @@ def pvtw_fromdeck( def density_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract DENSITY from a deck + """Extract DENSITY from a :term:`deck` Args: deck - ntpvt: Number of PVT regions in deck. Will - be inferred if not present in deck. + ntpvt: Number of PVT regions in :term:`deck`. Will + be inferred if not present in :term:`deck`. """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "DENSITY", renamer=RENAMERS["DENSITY"], recordcountername="PVTNUM" ) @@ -106,16 +106,16 @@ def density_fromdeck( def rock_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract ROCK from a deck + """Extract ROCK from a :term:`deck` Args: deck - ntpvt: Number of PVT regions in deck. Will - be inferred if not present in deck. + ntpvt: Number of PVT regions in :term:`deck`. Will + be inferred if not present in :term:`deck`. """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "ROCK", renamer=RENAMERS["ROCK"], recordcountername="PVTNUM" ) @@ -123,16 +123,16 @@ def rock_fromdeck( def pvto_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract PVTO from a deck + """Extract PVTO from a :term:`deck` Args: deck - ntpvt: Number of PVT regions in deck. Will - be inferred if not present in deck. + ntpvt: Number of PVT regions in :term:`deck`. Will + be inferred if not present in :term:`deck`. """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - pvto_df = common.ecl_keyworddata_to_df( + pvto_df = common.keyworddata_to_df( deck, "PVTO", renamer=RENAMERS["PVTO"], emptyrecordcountername="PVTNUM" ) return pvto_df @@ -141,7 +141,7 @@ def pvto_fromdeck( def pvdo_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract PVDO from a deck + """Extract PVDO from a :term:`deck` Args: deck @@ -150,7 +150,7 @@ def pvdo_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - pvdg_df = common.ecl_keyworddata_to_df( + pvdg_df = common.keyworddata_to_df( deck, "PVDO", renamer=RENAMERS["PVDO"], recordcountername="PVTNUM" ) return pvdg_df @@ -159,16 +159,16 @@ def pvdo_fromdeck( def pvdg_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract PVDG from a deck + """Extract PVDG from a :term:`deck` Args: deck - ntpvt: Number of PVT regions in deck. Will - be inferred if not present in deck. + ntpvt: Number of PVT regions in :term:`deck`. Will + be inferred if not present in :term:`deck`. """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - pvdg_df = common.ecl_keyworddata_to_df( + pvdg_df = common.keyworddata_to_df( deck, "PVDG", renamer=RENAMERS["PVDG"], recordcountername="PVTNUM" ) return pvdg_df @@ -177,16 +177,16 @@ def pvdg_fromdeck( def pvtg_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract PVTG from a deck + """Extract PVTG from a :term:`deck` Args: deck - ntpvt: Number of PVT regions in deck. Will - be inferred if not present in deck. + ntpvt: Number of PVT regions in :term:`deck`. Will + be inferred if not present in :term:`deck`. """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - pvtg_df = common.ecl_keyworddata_to_df( + pvtg_df = common.keyworddata_to_df( deck, "PVTG", renamer=RENAMERS["PVTG"], emptyrecordcountername="PVTNUM" ) return pvtg_df @@ -197,28 +197,28 @@ def df( keywords: Optional[List[str]] = None, ntpvt: Optional[int] = None, ) -> pd.DataFrame: - """Extract all (most) PVT data from a deck. + """Extract all (most) PVT data from a :term:`deck`. - If you want to call this function on Eclipse include files, + If you want to call this function on :term:`include files `, read them in to strings as in this example: > pvt_df = pvt.df(open("pvt.inc").read()) Arguments: - deck: Incoming data deck. Always + deck: Incoming data :term:`deck`. Always supply as a string if you don't know TABDIMS-NTSFUN. keywords: List of keywords for which data is wanted. All data will be merged into one dataframe. - pvtnumcount: Number of PVTNUMs defined in the deck, only - needed if TABDIMS with NTPVT is not found in the deck. + pvtnumcount: Number of PVTNUMs defined in the :term:`deck`, only + needed if TABDIMS with NTPVT is not found in the :term:`deck`. If not supplied (or None) and NTPVT is not defined, it will be attempted inferred. Return: pd.DataFrame """ - if isinstance(deck, EclFiles): - deck = deck.get_ecldeck() + if isinstance(deck, ResdataFiles): + deck = deck.get_deck() deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) ntpvt = deck["TABDIMS"][0][inferdims.DIMS_POS["NTPVT"]].get_int(0) @@ -239,14 +239,16 @@ def df( def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Set up sys.argv parsers for parsing Eclipse deck or - include files into dataframes + """Set up sys.argv parsers for parsing :term:`.DATA file` or + :term:`include files ` into dataframes Arguments: parser (ArgumentParser or subparser): parser to fill with arguments """ parser.add_argument( - "DATAFILE", help="Name of Eclipse DATA file or file with PVT keywords." + "DATAFILE", + help="Name of the .DATA input file for the reservoir simulator," + + " or file with PVT keywords.", ) parser.add_argument( "-o", @@ -269,7 +271,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Set up sys.argv parsers for writing Eclipse include files from + """Set up sys.argv parsers for writing :term:`include files ` from dataframes (as CSV files) Arguments: @@ -280,13 +282,13 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar def pvt_main(args) -> None: """Entry-point for module, for command line utility for Eclipse to CSV""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) + resdatafiles = ResdataFiles(args.DATAFILE) logger.info("Parsed %s", args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + if resdatafiles: + deck = resdatafiles.get_deck() if "TABDIMS" in deck: # Things are easier when a full deck with correct TABDIMS # is supplied: @@ -313,26 +315,27 @@ def pvt_main(args) -> None: def pvt_reverse_main(args) -> None: - """Entry-point for module, for command line utility for CSV to Eclipse""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + """Entry-point for module, for command line utility for CSV to simulator + :term:`deck`""" + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) pvt_df = pd.read_csv(args.csvfile) logger.info("Parsed %s", args.csvfile) - inc_string = df2ecl(pvt_df, keywords=args.keywords) + inc_string = df2res(pvt_df, keywords=args.keywords) common.write_inc_stdout_file(inc_string, args.output) -def df2ecl( +def df2res( pvt_df: pd.DataFrame, keywords: Optional[Union[str, List[str]]] = None, comments: Optional[Dict[str, str]] = None, filename: Optional[str] = None, ) -> str: - """Generate Eclipse include strings from PVT dataframes + """Generate resdata :term:`include file` content from PVT dataframes Args: - pvt_df: Dataframe with PVT data on ecl2df format. + pvt_df: Dataframe with PVT data in res2df format. keywords: List of keywords to include. Must be supported and present in the incoming dataframe. comments: Dictionary indexed by keyword with comments to be @@ -341,7 +344,7 @@ def df2ecl( filename: If supplied, the generated text will also be dumped to file. """ - return common.df2ecl( + return common.df2res( pvt_df, keywords, comments, @@ -351,8 +354,8 @@ def df2ecl( ) -def df2ecl_rock(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print ROCK keyword with data +def df2res_rock(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for ROCK keyword Args: dframe (pd.DataFrame): Containing ROCK data @@ -379,8 +382,8 @@ def df2ecl_rock(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: return string + "\n" -def df2ecl_density(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print DENSITY keyword with data +def df2res_density(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for DENSITY keyword Args: dframe: Containing DENSITY data @@ -408,8 +411,8 @@ def df2ecl_density(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: return string + "\n" -def df2ecl_pvtw(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print PVTW keyword with data +def df2res_pvtw(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for PVTW keyword PVTW is one line/record with data for a reference pressure for each PVTNUM. @@ -444,8 +447,8 @@ def df2ecl_pvtw(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: return string + "\n" -def df2ecl_pvtg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print PVTG keyword with data +def df2res_pvtg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for PVTG keyword Args: dframe: Containing PVTG data @@ -472,7 +475,8 @@ def df2ecl_pvtg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: subset = subset.set_index("PVTNUM").sort_index() def _pvtg_pvtnum(dframe): - """Print PVTG-data for a specific PVTNUM""" + """Create string with :term:`include file` contents for + PVTG-data with a specific PVTNUM""" string = "" dframe = dframe.set_index("PRESSURE").sort_index() for p_gas in dframe.index.unique(): @@ -480,7 +484,8 @@ def _pvtg_pvtnum(dframe): return string + "/\n" def _pvtg_pvtnum_pg(dframe): - """Print PVTG-data for a particular gas phase pressure""" + """Create string with :term:`include file` contents for + PVTG-data with a particular gas phase pressure""" string = "" assert len(dframe.index.unique()) == 1 p_gas = dframe.index.values[0] @@ -503,8 +508,8 @@ def _pvtg_pvtnum_pg(dframe): return string + "\n" -def df2ecl_pvdg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print PVDG keyword with data +def df2res_pvdg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for PVDG keyword This data consists of one table (volumefactor and visosity as a function of pressure) pr. PVTNUM. @@ -530,7 +535,8 @@ def df2ecl_pvdg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: subset["PVTNUM"] = 1 def _pvdg_pvtnum(dframe): - """Print PVDG-data for a specific PVTNUM + """Create string with :term:`include file` contents for + PVDG-data with a specific PVTNUM Args: dframe (pd.DataFrame): Cropped to only contain the relevant data. @@ -553,8 +559,8 @@ def _pvdg_pvtnum(dframe): return string + "\n" -def df2ecl_pvdo(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print PVDO keyword with data +def df2res_pvdo(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for PVDO keyword Args: dframe: Containing PVDO data @@ -577,7 +583,8 @@ def df2ecl_pvdo(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: subset["PVTNUM"] = 1 def _pvdo_pvtnum(dframe: pd.DataFrame) -> str: - """Print PVDO-data for a specific PVTNUM + """Create string with :term:`include file` contents + for PVDO-data for a specific PVTNUM Args: dframe (pd.DataFrame): Cropped to only contain the relevant data. @@ -600,8 +607,8 @@ def _pvdo_pvtnum(dframe: pd.DataFrame) -> str: return string + "\n" -def df2ecl_pvto(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print PVTO-data from a dataframe +def df2res_pvto(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for PVTO-data from a dataframe Args: dframe: Containing PVTO data @@ -626,7 +633,8 @@ def df2ecl_pvto(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: subset = subset.set_index("PVTNUM").sort_index() def _pvto_pvtnum(dframe: pd.DataFrame) -> str: - """Print PVTO-data for a specific PVTNUM""" + """Create string with :term:`include file` contents + for PVTO-data for a specific PVTNUM""" string = "" dframe = dframe.set_index("RS").sort_index() for rs in dframe.index.unique(): @@ -634,7 +642,8 @@ def _pvto_pvtnum(dframe: pd.DataFrame) -> str: return string + "/\n" def _pvto_pvtnum_rs(dframe: pd.DataFrame) -> str: - """Print PVTO-data for a particular RS""" + """Create string with :term:`include file` contents + for PVTO-data for a particular RS""" string = "" assert len(dframe.index.unique()) == 1 rs = dframe.index.values[0] diff --git a/ecl2df/ecl2csv.py b/res2df/res2csv.py similarity index 89% rename from ecl2df/ecl2csv.py rename to res2df/res2csv.py index b0b702370..342dfbb69 100644 --- a/ecl2df/ecl2csv.py +++ b/res2df/res2csv.py @@ -1,7 +1,7 @@ #!/usr/bin/env python """ End-user command line tool for accessing functionality -in ecl2df +in res2df """ import argparse import functools @@ -9,30 +9,30 @@ import sys from typing import Optional -from ecl2df import __version__ +from res2df import __version__ # String constants in use for generating ERT forward model documentation: -DESCRIPTION: str = """Convert Eclipse input and output files into CSV files, -with the command line utility ``ecl2csv``. Run ``ecl2csv --help`` to see +DESCRIPTION: str = """Convert reservoir simulator input and output files into CSV files, +with the command line utility ``res2csv``. Run ``res2csv --help`` to see which subcommands are supported. For supplying options to subcommands, you can use the arguments ```` where ``n`` goes from 1 to 10. -For more documentation, see https://equinor.github.io/ecl2df/. +For more documentation, see https://equinor.github.io/res2df/. """ CATEGORY: str = "utility.eclipse" EXAMPLES: str = """ -Outputting the EQUIL data from an Eclipse deck. The ECLBASE variable from your -ERT config is supplied implicitly:: +Outputting the EQUIL data from a .DATA file. This is implicitly +supplied in ERT configs:: - FORWARD_MODEL ECL2CSV(=equil, =equil.csv) + FORWARD_MODEL RES2CSV(=equil, =equil.csv) For a yearly summary export of the realization, options have to be supplied with the XARG options:: - FORWARD_MODEL ECL2CSV(=summary, =yearly.csv, ="--time_index", ="yearly") + FORWARD_MODEL RES2CSV(=summary, =yearly.csv, ="--time_index", ="yearly") The quotes around double-dashed options are critical to avoid ERT taking for a comment. For more options, use ```` etc. @@ -44,8 +44,8 @@ def get_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( - "ecl2csv (" + __version__ + ") is a command line frontend to ecl2df. " - "Documentation at https://equinor.github.io/ecl2df/ " + "res2csv (" + __version__ + ") is a command line frontend to res2df. " + "Documentation at https://equinor.github.io/res2df/ " ), ) parser.add_argument( @@ -108,7 +108,7 @@ def get_parser() -> argparse.ArgumentParser: help="Extract transmissibilities from EGRID file", formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( - "Extract transmissibilities (TRANX, TRANY, TRANZ) from Eclipse " + "Extract transmissibilities (TRANX, TRANY, TRANZ) from simulator " "binary output files. Each row represent a connection between a cell pair " "(I1, J1, K1) and (I2, J2, K2). It is possible to add INIT vectors for " "each of the cell in the cell pair, e.g. FIPNUM can be added as FIPNUM1 " @@ -130,7 +130,7 @@ def get_parser() -> argparse.ArgumentParser: "pvt", help="Extract PVT data", description=( - "Extract data for the PVT keywords in an Eclipse deck " + "Extract data for the PVT keywords in a .DATA file " "and merge all data into a single dataframe. " "Supported keywords are PVTO, PVDO, PVTG, PVDG, PVTW, " "ROCK and DENSITY. Gas phase pressure and oil phase " @@ -143,21 +143,21 @@ def get_parser() -> argparse.ArgumentParser: ) subparsers_dict["rft"] = subparsers.add_parser( "rft", - help=("Extract RFT data from Eclipse binary output files."), + help=("Extract RFT data from simulator binary output files."), formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( - "Extract RFT data from Eclipse binary output files to CSV. " + "Extract RFT data from simulator binary output files to CSV. " "Each row in the resulting table represents one point in a " "particular well at a particular time. " "If multisegment wells are found, associated data " "to a connection is merged onto the same row as additional columns. " - "You need the Eclipse keyword WRFTPLT present in your DATA-file to get " + "You need the keyword WRFTPLT present in your .DATA file to get " "the data outputted." ), ) subparsers_dict["fipreports"] = subparsers.add_parser( "fipreports", - help=("Extract FIPxxxxx REPORT REGION data from Eclipse PRT output file."), + help=("Extract FIPxxxxx REPORT REGION data from PRT output file."), formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( "Extract FIPxxxxx REPORT REGION data from PRT file. " @@ -178,7 +178,7 @@ def get_parser() -> argparse.ArgumentParser: ) subparsers_dict["fipreports"] = subparsers.add_parser( "fipreports", - help=("Extract FIPxxxxx REPORT REGION data from Eclipse PRT output file."), + help=("Extract FIPxxxxx REPORT REGION data from PRT output file."), formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( "Extract FIPxxxxx REPORT REGION data from PRT file. " @@ -244,7 +244,7 @@ def get_parser() -> argparse.ArgumentParser: for submodule, subparser in subparsers_dict.items(): # Use the submodule's fill_parser() to add the submodule specific # arguments: - importlib.import_module("ecl2df." + submodule).fill_parser( # type: ignore + importlib.import_module("res2df." + submodule).fill_parser( # type: ignore subparser ) @@ -286,7 +286,7 @@ def run_subparser_main( Args: args (Namespace): argparse argument namespace - submodule: One of ecl2df's submodules. That module + submodule: One of res2df's submodules. That module must have a function called _main() parser: Used for raising errors. """ @@ -300,7 +300,7 @@ def run_subparser_main( if len(positionals) > 1 and parser is not None: parser.error(f"Unknown argument in {positionals}") - mod = importlib.import_module("ecl2df." + submodule) + mod = importlib.import_module("res2df." + submodule) main_func = getattr(mod, submodule + "_main") main_func(args) diff --git a/ecl2df/eclfiles.py b/res2df/resdatafiles.py similarity index 86% rename from ecl2df/eclfiles.py rename to res2df/resdatafiles.py index 91186b90b..e37452714 100644 --- a/ecl2df/eclfiles.py +++ b/res2df/resdatafiles.py @@ -1,4 +1,4 @@ -"""Module to hold Eclipse input and output filenames""" +"""Module to hold simulator input and output filenames""" import errno import logging @@ -18,7 +18,7 @@ from resdata.resfile import ResdataFile from resdata.summary import Summary -from ecl2df import common +from res2df import common logger = logging.getLogger(__name__) @@ -39,16 +39,16 @@ ] -class EclFiles(object): +class ResdataFiles(object): """ - Class for holding an Eclipse deck with result files + Class for holding reservoir simulator :term:`output files ` Exists only for convenience, so that loading of ResdataFile/Summary objects is easy for users, and with caching if wanted. - Various functions that needs some of the Eclipse output - (or input file) should be able to ask this class, and + Various functions that needs some of the simulator :term:`output ` + (or :term:`include file`) should be able to ask this class, and it should be loaded or served from cache. """ @@ -60,7 +60,7 @@ def __init__(self, eclbase): if ".DATA" in eclbase and not Path(eclbase).is_file(): logger.warning("File %s does not exist", eclbase) # (this is not an error, because it is possible - # to obtain summary without the DATA file being present) + # to obtain summary without the .DATA file being present) # Strip .DATA or . at end of eclbase: eclbase = rreplace(".DATA", "", eclbase) @@ -70,7 +70,7 @@ def __init__(self, eclbase): # Set class variables to None self._egridfile = None # Should be ResdataFile self._initfile = None # Should be ResdataFile - self._eclsum = None # Should be Summary + self._summary = None # Should be Summary self._egrid = None # Should be Grid @@ -80,11 +80,11 @@ def __init__(self, eclbase): self._deck = None def get_path(self) -> Path: - """Return the full path to the directory with the DATA file""" + """Return the full path to the directory with the .DATA file""" return Path(self._eclbase).absolute().parent - def get_ecldeck(self) -> "opm.libopmcommon_python.Deck": - """Return a opm.io deck of the DATA file""" + def get_deck(self) -> "opm.libopmcommon_python.Deck": + """Return a opm.io :term:`deck` of the .DATA file""" if not self._deck: if Path(self._eclbase + ".DATA").is_file(): deckfile = self._eclbase + ".DATA" @@ -100,7 +100,7 @@ def get_ecldeck(self) -> "opm.libopmcommon_python.Deck": def str2deck( string: str, parsecontext: Optional[List[Tuple[str, Any]]] = None ) -> "opm.libopmcommon_python.Deck": - """Produce a opm.io deck from a string, using permissive + """Produce a opm.io :term:`deck` from a string, using permissive parsing by default""" if parsecontext is None: parsecontext = opm.io.ParseContext(OPMIOPARSER_RECOVERY) @@ -109,10 +109,10 @@ def str2deck( @staticmethod def file2deck(filename: Union[str, Path]) -> "opm.libopmcommon_python.Deck": """Try to convert standalone files into opm.io Deck objects""" - return EclFiles.str2deck(Path(filename).read_text(encoding="utf-8")) + return ResdataFiles.str2deck(Path(filename).read_text(encoding="utf-8")) def get_egrid(self) -> Grid: - """Find and return EGRID file as an Grid object""" + """Find and return EGRID file as a Grid object""" if not self._egrid: egridfilename = self._eclbase + ".EGRID" if not Path(egridfilename).is_file(): @@ -138,26 +138,26 @@ def get_egridfile(self) -> ResdataFile: return self._egridfile - def get_eclsum(self, include_restart: bool = True) -> Summary: + def get_summary(self, include_restart: bool = True) -> Summary: """Find and return the summary file and return as Summary object Args: - include_restart: Sent to libecl for whether restart files + include_restart: Sent to resdata for whether restart files should be traversed. """ - if not self._eclsum: + if not self._summary: smryfilename = self._eclbase + ".UNSMRY" if not Path(smryfilename).is_file(): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), smryfilename ) logger.info("Opening UNSMRY file: %s", smryfilename) - self._eclsum = Summary(smryfilename, include_restart=include_restart) - return self._eclsum + self._summary = Summary(smryfilename, include_restart=include_restart) + return self._summary def get_initfile(self) -> ResdataFile: - """Find and return the INIT file as an ResdataFile object""" + """Find and return the INIT file as a ResdataFile object""" if not self._initfile: initfilename = self._eclbase + ".INIT" if not Path(initfilename).is_file(): @@ -169,7 +169,7 @@ def get_initfile(self) -> ResdataFile: return self._initfile def get_rftfile(self) -> ResdataFile: - """Find and return the RFT file as an ResdataFile object""" + """Find and return the RFT file as a ResdataFile object""" if not self._rftfile: rftfilename = self._eclbase + ".RFT" if not Path(rftfilename).is_file(): @@ -181,7 +181,7 @@ def get_rftfile(self) -> ResdataFile: return self._rftfile def get_rstfile(self) -> ResdataFile: - """Find and return the UNRST file as an ResdataFile object""" + """Find and return the UNRST file as a ResdataFile object""" if not self._rstfile: rstfilename = self._eclbase + ".UNRST" if not Path(rstfilename).is_file(): @@ -207,7 +207,7 @@ def close(self) -> None: self._egridfile = None self._initfile = None # This is necessary for garbage collection to close the Summary file: - self._eclsum = None + self._summary = None self._rstfile = None self._rftfile = None diff --git a/ecl2df/rft.py b/res2df/rft.py similarity index 97% rename from ecl2df/rft.py rename to res2df/rft.py index 9cb22ec68..ec8f4e4d8 100644 --- a/ecl2df/rft.py +++ b/res2df/rft.py @@ -1,4 +1,4 @@ -"""Converter module for Eclipse RFT output files to Pandas Dataframes +"""Converter module for simulator RFT output files to Pandas Dataframes If MULTISEG wells are found, the segment data associated to a connection is merged onto the same row as additional columns, @@ -23,11 +23,11 @@ import pandas as pd from resdata.resfile import ResdataFile -from ecl2df import getLogger_ecl2csv +from res2df import getLogger_res2csv from .common import merge_zones, write_dframe_stdout_file -from .eclfiles import EclFiles from .gruptree import tree_from_dict +from .resdatafiles import ResdataFiles logger: logging.Logger = logging.getLogger(__name__) @@ -515,18 +515,20 @@ def add_extras(dframe: pd.DataFrame, inplace: bool = True) -> pd.DataFrame: def df( - eclfiles: EclFiles, wellname: Optional[str] = None, date: Optional[str] = None + resdatafiles: ResdataFiles, + wellname: Optional[str] = None, + date: Optional[str] = None, ) -> pd.DataFrame: """Loop over an RFT file and construct a dataframe representation of the data, ordered by well and date. Args: - eclfiles: Object used to locate the RFT file + resdatafiles: Object used to locate the RFT file wellname: If provided, only wells matching this string exactly will be included date: If provided, all other dates will be ignored. YYYY-MM-DD. """ - rftfile = eclfiles.get_rftfile() + rftfile = resdatafiles.get_rftfile() rftdata = [] for rftrecord in rftrecords(rftfile): @@ -632,7 +634,7 @@ def df( if rftdata_df.HOSTGRID.unique()[0].strip() == "": del rftdata_df["HOSTGRID"] - zonemap = eclfiles.get_zonemap() + zonemap = resdatafiles.get_zonemap() if zonemap: if "K" in rftdata_df: kname = "K" @@ -652,9 +654,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", help=( - "Name of Eclipse DATA file or RFT file. " - "If DATA file is provided, it will look for" - " the associated DATA file" + "Name of .DATA input file for the reservoir simulator, or RFT file. " + "If .DATA file is provided, it will look for" + " the associated .DATA file" ), ) parser.add_argument( @@ -674,15 +676,15 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def rft_main(args) -> None: """Entry-point for module, for command line utility""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) if args.DATAFILE.endswith(".RFT"): # Support the RFT file as an argument also: - eclfiles = EclFiles(args.DATAFILE.replace(".RFT", "") + ".DATA") + resdatafiles = ResdataFiles(args.DATAFILE.replace(".RFT", "") + ".DATA") else: - eclfiles = EclFiles(args.DATAFILE) - rft_df = df(eclfiles, wellname=args.wellname, date=args.date) + resdatafiles = ResdataFiles(args.DATAFILE) + rft_df = df(resdatafiles, wellname=args.wellname, date=args.date) if rft_df.empty: if args.wellname is not None or args.date is not None: logger.warning("No data. Check your well and/or date filter") diff --git a/ecl2df/satfunc.py b/res2df/satfunc.py similarity index 72% rename from ecl2df/satfunc.py rename to res2df/satfunc.py index 227038755..c3aa62fb6 100644 --- a/ecl2df/satfunc.py +++ b/res2df/satfunc.py @@ -1,8 +1,8 @@ """ Extract saturation function data (SWOF, SGOF, SWFN, etc.) -from an Eclipse deck as Pandas DataFrame. +from a .DATA file as Pandas DataFrame. -Data can be extracted from a full Eclipse deck (`*.DATA`) +Data can be extracted from a complete deck (`*.DATA`) or from individual files. Note that when parsing from individual files, it is @@ -25,10 +25,10 @@ except ImportError: pass -from ecl2df import common, getLogger_ecl2csv, inferdims +from res2df import common, getLogger_res2csv, inferdims from .common import write_dframe_stdout_file -from .eclfiles import EclFiles +from .resdatafiles import ResdataFiles logger: logging.Logger = logging.getLogger(__name__) @@ -69,32 +69,32 @@ def df( The two first columns in the dataframe are 'KEYWORD' (which can be SWOF, SGOF, etc.), and then SATNUM which is an index counter from 1 and onwards. Then follows the data for each individual keyword that - is found in the deck. + is found in the :term:`deck`. SATNUM data can only be parsed correctly if TABDIMS is present and stating how many saturation functions there should be. If you have a string with TABDIMS missing, you must supply - this as a string to this function, and not a parsed deck, as - the default parser in EclFiles is very permissive (and only + this as a string to this function, and not a parsed :term:`deck`, as + the default parser in ResdataFiles is very permissive (and only returning the first function by default). Arguments: - deck: Incoming data deck. Always + deck: Incoming data :term:`deck`. Always supply as a string if you don't know TABDIMS-NTSFUN. keywords: Requested keywords for which to to extract data. - ntsfun: Number of SATNUMs defined in the deck, only - needed if TABDIMS with NTSFUN is not found in the deck. + ntsfun: Number of SATNUMs defined in the :term:`deck`, only + needed if TABDIMS with NTSFUN is not found in the :term:`deck`. If not supplied (or None) and NTSFUN is not defined, it will be attempted inferred. Return: pd.DataFrame, columns 'KEYWORD', 'SW', 'KRW', 'KROW', 'PC', .. """ - if isinstance(deck, EclFiles): - # NB: If this is done on include files and not on DATA files + if isinstance(deck, ResdataFiles): + # NB: If this is done on include files and not on .DATA files # we can loose data for SATNUM > 1 - deck = deck.get_ecldeck() + deck = deck.get_deck() deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTSFUN", deck, ntsfun) assert "TABDIMS" in deck @@ -104,7 +104,7 @@ def df( for keyword in wanted_keywords: frames.append( interpolate_defaults( - common.ecl_keyworddata_to_df( + common.keyworddata_to_df( deck, keyword, renamer=RENAMERS[keyword], recordcountername="SATNUM" ).assign(KEYWORD=keyword) ) @@ -129,8 +129,9 @@ def df( def interpolate_defaults(dframe: pd.DataFrame) -> pd.DataFrame: """Interpolate NaN's linearly in saturation. - Saturation function tables in Eclipse decks can have certain values defaulted. - When parsed by common.ecl2df, these values are returned as np.nan. + Saturation function tables in :term:`.DATA files <.DATA file>` + can have certain values defaulted. + When parsed by common.res2df, these values are returned as np.nan. The incoming dataframe must be associated to one keyword only, but can consist of multiple SATNUMs. """ @@ -160,7 +161,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (ArgumentParser or subparser): parser to fill with arguments """ parser.add_argument( - "DATAFILE", help="Name of Eclipse DATA file or file with saturation functions." + "DATAFILE", + help="Name of .DATA input file for the reservoir simulator," + + " or file with saturation functions.", ) parser.add_argument( "-o", @@ -183,22 +186,22 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Fill a parser for the operation dataframe -> eclipse include file""" + """Fill a parser for the operation dataframe -> resdata :term:`include file`""" return common.fill_reverse_parser(parser, "SWOF, SGOF++", "relperm.inc") def satfunc_main(args) -> None: """Entry-point for module, for command line utility""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + resdatafiles = ResdataFiles(args.DATAFILE) + if resdatafiles: + deck = resdatafiles.get_deck() if "TABDIMS" in deck: # Things are easier when a full deck with (correct) TABDIMS # is supplied: - satfunc_df = df(eclfiles, keywords=args.keywords) + satfunc_df = df(resdatafiles, keywords=args.keywords) else: # This might be an include file for which we have to infer/guess # TABDIMS. Then we send it to df() as a string @@ -221,27 +224,27 @@ def satfunc_main(args) -> None: def satfunc_reverse_main(args) -> None: - """For command line utility for CSV to Eclipse""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + """For command line utility for CSV to resdata""" + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) satfunc_df = pd.read_csv(args.csvfile) logger.info("Parsed %s", args.csvfile) - inc_string = df2ecl(satfunc_df, keywords=args.keywords) + inc_string = df2res(satfunc_df, keywords=args.keywords) common.write_inc_stdout_file(inc_string, args.output) -def df2ecl( +def df2res( satfunc_df: pd.DataFrame, keywords: Optional[List[str]] = None, comments: Optional[Dict[str, str]] = None, filename: Optional[str] = None, ) -> str: - """Generate Eclipse include strings from dataframes with + """Generate resdata :term:`include file` content from dataframes with saturation functions (SWOF, SGOF, ...) Args: - satfunc_df: Dataframe with data on ecl2df format. + satfunc_df: Dataframe with res2df format. keywords: List of keywords to include. Must be supported and present in the incoming dataframe. Keywords are printed in the order defined by this list. @@ -252,11 +255,11 @@ def df2ecl( to file. Returns: - Generated Eclipse include string + Generated resdata :term:`include file` content """ string = "" - string += common.df2ecl( + string += common.df2res( satfunc_df, keywords=keywords, comments=comments, @@ -267,87 +270,87 @@ def df2ecl( return string -def df2ecl_swof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SWOF data. Used by df2ecl(). +def df2res_swof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for SWOF. Used by df2res(). Args: dframe: Containing SWOF data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SWOF", dframe, comment) + return _df2res_satfuncs("SWOF", dframe, comment) -def df2ecl_sgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SGOF data. Used by df2ecl(). +def df2res_sgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for SGOF. Used by df2res(). Args: dframe: Containing SGOF data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SGOF", dframe, comment) + return _df2res_satfuncs("SGOF", dframe, comment) -def df2ecl_sgfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SGFN data. Used by df2ecl(). +def df2res_sgfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for SGFN. Used by df2res(). Args: dframe: Containing SGFN data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SGFN", dframe, comment) + return _df2res_satfuncs("SGFN", dframe, comment) -def df2ecl_sgwfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SGWFN data. Used by df2ecl(). +def df2res_sgwfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for SGWFN. Used by df2res(). Args: dframe: Containing SGWFN data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SGWFN", dframe, comment) + return _df2res_satfuncs("SGWFN", dframe, comment) -def df2ecl_swfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SWFN data. Used by df2ecl(). +def df2res_swfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for SWFN. Used by df2res(). Args: dframe: Containing SWFN data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SWFN", dframe, comment) + return _df2res_satfuncs("SWFN", dframe, comment) -def df2ecl_slgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SLGOF data. Used by df2ecl(). +def df2res_slgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for SLGOF. Used by df2res(). Args: dframe: Containing SLGOF data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SLGOF", dframe, comment) + return _df2res_satfuncs("SLGOF", dframe, comment) -def df2ecl_sof2(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SOF2 data. Used by df2ecl(). +def df2res_sof2(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for SOF2. Used by df2res(). Args: dframe: Containing SOF2 data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SOF2", dframe, comment) + return _df2res_satfuncs("SOF2", dframe, comment) -def df2ecl_sof3(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SOF3 data. Used by df2ecl(). +def df2res_sof3(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Create string with :term:`include file` contents for SOF3. Used by df2res(). Args: dframe: Containing SOF3 data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SOF3", dframe, comment) + return _df2res_satfuncs("SOF3", dframe, comment) -def _df2ecl_satfuncs( +def _df2res_satfuncs( keyword: str, dframe: pd.DataFrame, comment: Optional[str] = None ) -> str: if dframe.empty: @@ -365,8 +368,9 @@ def _df2ecl_satfuncs( subset = subset.set_index("SATNUM").sort_index() # Make a function that is to be called for each SATNUM - def _df2ecl_satfuncs_satnum(keyword, dframe): - """Print one saturation function for one specific SATNUM""" + def _df2res_satfuncs_satnum(keyword, dframe): + """Create string with :term:`include file` contents + for one saturation function for one specific SATNUM""" col_headers = RENAMERS[keyword]["DATA"] string = ( "-- " @@ -379,5 +383,5 @@ def _df2ecl_satfuncs_satnum(keyword, dframe): # Loop over every SATNUM for satnum in subset.index.unique(): string += f"-- SATNUM: {satnum}\n" - string += _df2ecl_satfuncs_satnum(keyword, subset[subset.index == satnum]) + string += _df2res_satfuncs_satnum(keyword, subset[subset.index == satnum]) return string + "\n" diff --git a/ecl2df/summary.py b/res2df/summary.py similarity index 87% rename from ecl2df/summary.py rename to res2df/summary.py index 666b51e50..4b67d3e58 100644 --- a/ecl2df/summary.py +++ b/res2df/summary.py @@ -16,11 +16,11 @@ import pyarrow.feather from resdata.summary import Summary, SummaryKeyWordVector -from ecl2df import getLogger_ecl2csv +from res2df import getLogger_res2csv from . import parameters from .common import write_dframe_stdout_file -from .eclfiles import EclFiles +from .resdatafiles import ResdataFiles logger: logging.Logger = logging.getLogger(__name__) @@ -36,14 +36,14 @@ # Any frequency mnemonics not mentioned here will be # passed on to Pandas. } -"""Mapping from ecl2df custom offset strings to Pandas DateOffset strings. +"""Mapping from res2df custom offset strings to Pandas DateOffset strings. See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects """ # noqa def date_range(start_date: dt.date, end_date: dt.date, freq: str) -> List[dt.datetime]: - """Wrapper for pandas.date_range to allow for extra ecl2df specific mnemonics + """Wrapper for pandas.date_range to allow for extra res2df specific mnemonics 'yearly', 'daily', 'weekly', mapped over to pandas DateOffsets. Args: @@ -84,7 +84,7 @@ def _ensure_date_or_none(some_date: Optional[Union[str, dt.date]]) -> Optional[d def _crop_datelist( - eclsumsdates: List[dt.datetime], + summarydates: List[dt.datetime], freq: Union[dt.date, dt.datetime, str], start_date: Optional[dt.date] = None, end_date: Optional[dt.date] = None, @@ -94,7 +94,7 @@ def _crop_datelist( only cropped or returned as is. Args: - eclsumsdates: list of datetimes, typically coming from Summary.dates + summarydates: list of datetimes, typically coming from Summary.dates freq: Either a date or datetime, or a frequency string "raw", "first" or "last". start_date: Dates prior to this date will be cropped. @@ -105,7 +105,7 @@ def _crop_datelist( """ datetimes: Union[List[dt.date], List[dt.datetime]] = [] # type: ignore if freq == FREQ_RAW: - datetimes = eclsumsdates + datetimes = summarydates datetimes.sort() if start_date: # Convert to datetime (at 00:00:00) @@ -117,9 +117,9 @@ def _crop_datelist( datetimes = [x for x in datetimes if x < end_date] datetimes = datetimes + [end_date] elif freq == FREQ_FIRST: - datetimes = [min(eclsumsdates).date()] + datetimes = [min(summarydates).date()] elif freq == FREQ_LAST: - datetimes = [max(eclsumsdates).date()] + datetimes = [max(summarydates).date()] elif isinstance(freq, (dt.date, dt.datetime)): datetimes = [freq] return datetimes @@ -193,7 +193,7 @@ def _fallback_date_range(start: dt.date, end: dt.date, freq: str) -> List[dt.dat def resample_smry_dates( - eclsumsdates: List[dt.datetime], + summarydates: List[dt.datetime], freq: str = FREQ_RAW, normalize: bool = True, start_date: Optional[Union[str, dt.date]] = None, @@ -206,7 +206,7 @@ def resample_smry_dates( can be returned, on the same date range. Incoming dates can also be cropped. Args: - eclsumsdates: list of datetimes, typically coming from Summary.dates + summarydates: list of datetimes, typically coming from Summary.dates freq: string denoting requested frequency for the returned list of datetime. 'raw' will return the input datetimes (no resampling). @@ -233,7 +233,7 @@ def resample_smry_dates( if freq in [FREQ_RAW, FREQ_FIRST, FREQ_LAST] or isinstance( freq, (dt.date, dt.datetime) ): - return _crop_datelist(eclsumsdates, freq, start_date, end_date) + return _crop_datelist(summarydates, freq, start_date, end_date) # In case freq is an ISO-date(time)-string, interpret as such: try: @@ -244,8 +244,8 @@ def resample_smry_dates( pass # These are datetime.datetime, not datetime.date - start_smry = min(eclsumsdates) - end_smry = max(eclsumsdates) + start_smry = min(summarydates) + end_smry = max(summarydates) # Normalize start and end date according to frequency by extending the time range. # [1997-11-05, 2020-03-02] and monthly frequecy @@ -297,7 +297,7 @@ def resample_smry_dates( def df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, time_index: Optional[str] = None, column_keys: Optional[Union[List[str], str]] = None, start_date: Optional[Union[str, dt.date]] = None, @@ -321,8 +321,8 @@ def df( is always named "DATE". Arguments: - eclfiles: EclFiles object representing the Eclipse deck. Alternatively - an Summary object. + resdatafiles: ResdataFiles object representing a + :term:`.DATA file`. Alternatively a Summary object. time_index: string indicating a resampling frequency, 'yearly', 'monthly', 'daily', 'last' or 'raw', the latter will return the simulated report steps (also default). @@ -336,7 +336,7 @@ def df( Dates past this date will be dropped, supplied end_date will always be included. Overriden if time_index is 'last'. - include_restart: boolean sent to libecl for whether restart + include_restart: boolean sent to resdata for whether restart files should be traversed params: If set, parameters.txt will be attempted loaded and merged with the summary data. @@ -354,11 +354,11 @@ def df( if isinstance(column_keys, str): column_keys = [column_keys] - if isinstance(eclfiles, Summary): - eclsum = eclfiles + if isinstance(resdatafiles, Summary): + summary = resdatafiles else: try: - eclsum = eclfiles.get_eclsum(include_restart=include_restart) + summary = resdatafiles.get_summary(include_restart=include_restart) except OSError: logger.warning("Error reading summary instance, returning empty dataframe") return pd.DataFrame() @@ -366,7 +366,7 @@ def df( time_index_arg: Optional[Union[List[dt.date], List[dt.datetime]]] if isinstance(time_index, str) and time_index == "raw": time_index_arg = resample_smry_dates( - eclsum.dates, + summary.dates, "raw", False, start_date, @@ -374,7 +374,7 @@ def df( ) elif isinstance(time_index, str): time_index_arg = resample_smry_dates( - eclsum.dates, + summary.dates, time_index, True, start_date, @@ -402,8 +402,7 @@ def df( time_index_str or "raw", ) - # dframe = eclsum.pandas_frame(time_index_arg, column_keys) - dframe = _libecl_eclsum_pandas_frame(eclsum, time_index_arg, column_keys) + dframe = _summary_pandas_frame(summary, time_index_arg, column_keys) logger.info( "Dataframe with smry data ready, %d columns and %d rows", @@ -412,21 +411,21 @@ def df( ) dframe.index.name = "DATE" if params or paramfile: - dframe = _merge_params(dframe, paramfile, eclfiles) + dframe = _merge_params(dframe, paramfile, resdatafiles) # Add metadata as an attribute the dataframe, using experimental Pandas features: - meta = smry_meta(eclsum) + meta = smry_meta(summary) # Slice meta to dataframe columns: dframe.attrs["meta"] = { column_key: meta[column_key] for column_key in dframe if column_key in meta } - # Remove duplicated column names. These will occur from libecl + # Remove duplicated column names. These will occur from resdata # when the user has repeated vector names in the summary SECTION dupes = dframe.columns.duplicated() if dupes.any(): logger.warning( - "Duplicated columns detected, check your DATA file " + "Duplicated columns detected, check your .DATA file " "for repeated vectors in the SUMMARY section" ) logger.warning("Duplicates: %s", list(dframe.columns[dupes])) @@ -539,7 +538,7 @@ def _df2pyarrow(dframe: pd.DataFrame) -> pyarrow.Table: def _merge_params( dframe: pd.DataFrame, paramfile: Optional[Union[str, Path]] = None, - eclfiles: Optional[Union[str, EclFiles]] = None, + resdatafiles: Optional[Union[str, ResdataFiles]] = None, ) -> pd.DataFrame: """Locate parameters in a file and add to the dataframe. @@ -547,16 +546,18 @@ def _merge_params( the parameters.txt file based on the location of an Eclise run. """ - if paramfile is None and eclfiles is not None: - param_files = parameters.find_parameter_files(eclfiles) + if paramfile is None and resdatafiles is not None: + param_files = parameters.find_parameter_files(resdatafiles) logger.info("Loading parameters from files: %s", str(param_files)) param_dict = parameters.load_all(param_files) elif ( paramfile is not None - and eclfiles is not None + and resdatafiles is not None and not Path(paramfile).is_absolute() ): - param_files = parameters.find_parameter_files(eclfiles, filebase=str(paramfile)) + param_files = parameters.find_parameter_files( + resdatafiles, filebase=str(paramfile) + ) logger.info("Loading parameters from files: %s", str(param_files)) param_dict = parameters.load_all(param_files) elif paramfile is not None and Path(paramfile).is_absolute(): @@ -574,7 +575,7 @@ def _merge_params( return dframe -def smry_meta(eclfiles: EclFiles) -> Dict[str, Dict[str, Any]]: +def smry_meta(resdatafiles: ResdataFiles) -> Dict[str, Dict[str, Any]]: """Provide metadata for summary data vectors. A dictionary indexed by summary vector name is returned, and each @@ -589,27 +590,27 @@ def smry_meta(eclfiles: EclFiles) -> Dict[str, Dict[str, Any]]: * keyword (str) * wgname (str or None) """ - if isinstance(eclfiles, Summary): - eclsum = eclfiles + if isinstance(resdatafiles, Summary): + summary = resdatafiles else: - eclsum = eclfiles.get_eclsum() + summary = resdatafiles.get_summary() meta: Dict[str, Dict[str, Any]] = {} - for col in eclsum.keys(): + for col in summary.keys(): meta[col] = {} - meta[col]["unit"] = eclsum.unit(col) - meta[col]["is_total"] = eclsum.is_total(col) - meta[col]["is_rate"] = eclsum.is_rate(col) - meta[col]["is_historical"] = eclsum.smspec_node(col).is_historical() - meta[col]["keyword"] = eclsum.smspec_node(col).keyword - meta[col]["wgname"] = eclsum.smspec_node(col).wgname - num = eclsum.smspec_node(col).get_num() + meta[col]["unit"] = summary.unit(col) + meta[col]["is_total"] = summary.is_total(col) + meta[col]["is_rate"] = summary.is_rate(col) + meta[col]["is_historical"] = summary.smspec_node(col).is_historical() + meta[col]["keyword"] = summary.smspec_node(col).keyword + meta[col]["wgname"] = summary.smspec_node(col).wgname + num = summary.smspec_node(col).get_num() if num is not None: meta[col]["get_num"] = num return meta -def _fix_dframe_for_libecl(dframe: pd.DataFrame) -> pd.DataFrame: +def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame: """Fix a dataframe making it ready for Summary.from_pandas() * Ensures that the index is always datetime, and sorted. @@ -677,16 +678,16 @@ def _fix_dframe_for_libecl(dframe: pd.DataFrame) -> pd.DataFrame: return dframe -def df2eclsum( +def df2ressum( dframe: pd.DataFrame, casename: str = "SYNTHETIC", ) -> Summary: - """Convert a dataframe to an Summary object + """Convert a dataframe to a Summary object Args: dframe: Dataframe with a DATE colum (or with the dates/datetimes in the index). - casename: Name of Eclipse casename/basename to be used for the Summary object + casename: Name of casename/basename to be used for the Summary object If the Summary object is later written to disk, this will be used to construct the filenames. """ @@ -698,41 +699,41 @@ def df2eclsum( if "." in casename: raise ValueError(f"Do not use dots in casename {casename}") - dframe = _fix_dframe_for_libecl(dframe) - return _libecl_eclsum_from_pandas(casename, dframe) + dframe = _fix_dframe_for_resdata(dframe) + return resdata_summary_from_pandas(casename, dframe) # return Summary.from_pandas(casename, dframe) -def _libecl_eclsum_pandas_frame( - eclsum: Summary, +def _summary_pandas_frame( + summary: Summary, time_index: Optional[Union[List[dt.date], List[dt.datetime]]] = None, column_keys: Optional[List[str]] = None, ) -> pd.DataFrame: - """Build a Pandas dataframe from an Summary object. + """Build a Pandas dataframe from a Summary object. - Temporarily copied from libecl to circumvent bug + Temporarily copied from resdata to circumvent bug - https://github.com/equinor/ecl/issues/802 + https://github.com/equinor/resdata/issues/802 """ if column_keys is None: - keywords = SummaryKeyWordVector(eclsum, add_keywords=True) + keywords = SummaryKeyWordVector(summary, add_keywords=True) else: - keywords = SummaryKeyWordVector(eclsum) + keywords = SummaryKeyWordVector(summary) for key in column_keys: keywords.add_keywords(key) # pylint: disable=protected-access if time_index is None: - time_index = eclsum.dates # Changed from libecl + time_index = summary.dates # Changed from resdata data = np.zeros([len(time_index), len(keywords)]) Summary._init_pandas_frame( - eclsum, keywords, data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) + summary, keywords, data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) ) else: - time_points = eclsum._make_time_vector(time_index) + time_points = summary._make_time_vector(time_index) data = np.zeros([len(time_points), len(keywords)]) Summary._init_pandas_frame_interp( - eclsum, + summary, keywords, time_points, data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), @@ -752,15 +753,15 @@ def _libecl_eclsum_pandas_frame( return frame -def _libecl_eclsum_from_pandas( +def resdata_summary_from_pandas( case: str, frame: pd.DataFrame, dims: Optional[List[int]] = None, headers: Optional[List[tuple]] = None, ) -> Summary: - """Build an Summary object from a Pandas dataframe. + """Build a Summary object from a Pandas dataframe. - Temporarily copied from libecl to circumvent bug + Temporarily copied from resdata to circumvent bug https://github.com/equinor/ecl/issues/802 """ @@ -779,18 +780,20 @@ def _libecl_eclsum_from_pandas( header_list = Summary._compile_headers_list(headers, dims) if dims is None: dims = [1, 1, 1] - ecl_sum = Summary.writer(case, start_time, dims[0], dims[1], dims[2]) + the_summary = Summary.writer(case, start_time, dims[0], dims[1], dims[2]) for keyword, wgname, num, unit in header_list: var_list.append( - ecl_sum.add_variable(keyword, wgname=wgname, num=num, unit=unit).getKey1() + the_summary.add_variable( + keyword, wgname=wgname, num=num, unit=unit + ).getKey1() ) for idx, time in enumerate(frame.index): days = (time - start_time).days - t_step = ecl_sum.add_t_step(idx + 1, days) + t_step = the_summary.add_t_step(idx + 1, days) for var in var_list: t_step[var] = frame.iloc[idx][var] - return ecl_sum + return the_summary def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: @@ -801,7 +804,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of Eclipse DATA file. " + "UNSMRY file must lie alongside.", + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist a UNSMRY file with the same path and basename.", ) parser.add_argument( "--time_index", @@ -853,7 +857,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: type=str, help=( "Filename of key-value parameter file to look for if -p is set, " - "relative to Eclipse DATA file or an absolute filename. " + "relative to simulator input (.DATA) file or an absolute filename. " "If not supplied, parameters.{json,yml,txt} in " "{., .. and ../..} will be merged in." ), @@ -880,13 +884,13 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Fill a parser for the operation: dataframe -> eclsum files""" + """Fill a parser for the operation: dataframe -> summary files""" parser.add_argument( "-o", "--output", type=str, - help="Basename for Eclipse output files", + help="Basename for output files", default="SYNTSMRY", ) parser.add_argument("csvfile", help="Name of CSV file with summary data.") @@ -897,16 +901,16 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar def summary_main(args) -> None: """Read summary data from disk and write CSV back to disk""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) eclbase = ( args.DATAFILE.replace(".DATA", "").replace(".UNSMRY", "").replace(".SMSPEC", "") ) - eclfiles = EclFiles(eclbase) + resdatafiles = ResdataFiles(eclbase) sum_df = df( - eclfiles, + resdatafiles, time_index=args.time_index, column_keys=args.column_keys, start_date=args.start_date, @@ -923,8 +927,8 @@ def summary_main(args) -> None: def summary_reverse_main(args) -> None: - """Entry point for usage with "csv2ecl summary" on the command line""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + """Entry point for usage with "csv2res summary" on the command line""" + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) @@ -936,10 +940,10 @@ def summary_reverse_main(args) -> None: # Summary.fwrite() can only write to current directory: cwd = os.getcwd() - eclsum = df2eclsum(summary_df, eclbase) + summary = df2ressum(summary_df, eclbase) try: os.chdir(outputdir) - Summary.fwrite(eclsum) + Summary.fwrite(summary) finally: os.chdir(cwd) diff --git a/ecl2df/svg_color_keyword_names.txt b/res2df/svg_color_keyword_names.txt similarity index 100% rename from ecl2df/svg_color_keyword_names.txt rename to res2df/svg_color_keyword_names.txt diff --git a/ecl2df/trans.py b/res2df/trans.py similarity index 91% rename from ecl2df/trans.py rename to res2df/trans.py index b289e3bef..f041917ed 100644 --- a/ecl2df/trans.py +++ b/res2df/trans.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -Extract transmissibility information from Eclipse output files as Dataframes. +Extract transmissibility information from output files as Dataframes. """ import argparse import logging @@ -8,12 +8,12 @@ import pandas as pd -import ecl2df.grid -import ecl2df.nnc -from ecl2df import getLogger_ecl2csv -from ecl2df.common import write_dframe_stdout_file +import res2df.grid +import res2df.nnc +from res2df import getLogger_res2csv +from res2df.common import write_dframe_stdout_file -from .eclfiles import EclFiles +from .resdatafiles import ResdataFiles try: import networkx @@ -26,7 +26,7 @@ def df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, vectors: Optional[Union[str, List[str]]] = None, boundaryfilter: bool = False, group: bool = False, @@ -57,8 +57,8 @@ def df( you will get a corresponding FIPNUM1 and FIPNUM2 added. Args: - eclfiles: An object representing your Eclipse run - vectors: Eclipse INIT vectors that you want to include + resdatafiles: An object representing your simulator run + vectors: simulator INIT vectors that you want to include boundaryfilter: Set to true if you want to filter where one INIT vector change. Only use for integer INIT vectors. group: Set to true if you want to sum transmissibilities over @@ -101,7 +101,7 @@ def df( "Filtering to both k and to ij simultaneously results in empty dataframe" ) - grid_df = ecl2df.grid.df(eclfiles) + grid_df = res2df.grid.df(resdatafiles) existing_vectors = [vec for vec in vectors if vec in grid_df.columns] if len(existing_vectors) < len(vectors): logger.warning( @@ -149,7 +149,7 @@ def df( if addnnc: logger.info("Adding NNC data") - nnc_df = ecl2df.nnc.df(eclfiles, coords=False, pillars=False) + nnc_df = res2df.nnc.df(resdatafiles, coords=False, pillars=False) nnc_df["DIR"] = "NNC" trans_df = pd.concat([trans_df, nnc_df], sort=False) @@ -236,12 +236,14 @@ def df( return trans_df -def make_nx_graph(eclfiles: EclFiles, region: str = "FIPNUM") -> "networkx.Graph": +def make_nx_graph( + resdatafiles: ResdataFiles, region: str = "FIPNUM" +) -> "networkx.Graph": """Construct a networkx graph for the transmissibilities.""" if not HAVE_NETWORKX: logger.error("Please install networkx for this function to work") return None - trans_df = df(eclfiles, vectors=[region], coords=True, group=True) + trans_df = df(resdatafiles, vectors=[region], coords=True, group=True) reg1 = region + "1" reg2 = region + "2" graph = networkx.Graph() @@ -259,7 +261,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of Eclipse DATA file. " + "INIT and EGRID file must lie alongside.", + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist INIT and EGRID files with the same path and basename.", ) parser.add_argument("--vectors", nargs="+", help="Extra INIT vectors to be added") parser.add_argument( @@ -303,12 +306,12 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def trans_main(args): """This is the command line API""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) + resdatafiles = ResdataFiles(args.DATAFILE) trans_df = df( - eclfiles, + resdatafiles, vectors=args.vectors, boundaryfilter=args.boundaryfilter, onlykdir=args.onlyk, diff --git a/ecl2df/vfp/__init__.py b/res2df/vfp/__init__.py similarity index 76% rename from ecl2df/vfp/__init__.py rename to res2df/vfp/__init__.py index 4a202ddca..4fd957cbf 100644 --- a/ecl2df/vfp/__init__.py +++ b/res2df/vfp/__init__.py @@ -1,4 +1,4 @@ -""" Module with interface for ecl2df to VFPPROD and VFPINJ +""" Module with interface for res2df to VFPPROD and VFPINJ keywords in Eclipse. """ from ._vfp import ( # noqa F:401 @@ -7,8 +7,8 @@ basic_data2pyarrow, df, df2basic_data, - df2ecl, - df2ecls, + df2res, + df2ress, dfs, fill_parser, fill_reverse_parser, diff --git a/ecl2df/vfp/_vfp.py b/res2df/vfp/_vfp.py similarity index 87% rename from ecl2df/vfp/_vfp.py rename to res2df/vfp/_vfp.py index 416884b04..7bb254893 100755 --- a/ecl2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -1,6 +1,6 @@ """Extract the VFPPROD/VFPINJ data from an Eclipse (input) deck as Pandas Dataframes -Data can be extracted from a full Eclipse deck or from individual files. Supports +Data can be extracted from a complete deck or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow and pyarrow.table """ @@ -25,7 +25,7 @@ except ImportError: pass -from ecl2df import EclFiles, common, getLogger_ecl2csv +from res2df import ResdataFiles, common, getLogger_res2csv from . import _vfpinj as vfpinj from . import _vfpprod as vfpprod @@ -35,7 +35,7 @@ def basic_data( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResdataFiles, "opm.libopmcommon_python.Deck"], keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> List[Dict[str, Any]]: @@ -45,16 +45,16 @@ def basic_data( BASIC_DATA_KEYS in _vfpprod and _vfpinj. Args: - deck: Eclipse deck or string with deck + deck: :term:`.DATA file` or string with :term:`deck` keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ - if isinstance(deck, EclFiles): - deck = deck.get_ecldeck() + if isinstance(deck, ResdataFiles): + deck = deck.get_deck() elif isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResdataFiles.str2deck(deck) if keyword not in SUPPORTED_KEYWORDS: raise ValueError( @@ -241,24 +241,24 @@ def pyarrow2basic_data(pa_table: pa.Table) -> Union[Dict[str, Any], None]: def dfs( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResdataFiles, "opm.libopmcommon_python.Deck"], keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> List[pd.DataFrame]: - """Produce a list of dataframes of vfp tables from a deck + """Produce a list of dataframes of vfp tables from a :term:`deck` Data for the keyword VFPPROD or VFPINJ will be returned as separate item in list Args: - deck: Eclipse deck or string with deck + deck: :term:`.DATA file` or string with :term:`deck` keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ - if isinstance(deck, EclFiles): - deck = deck.get_ecldeck() + if isinstance(deck, ResdataFiles): + deck = deck.get_deck() elif isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResdataFiles.str2deck(deck) if keyword not in SUPPORTED_KEYWORDS: raise ValueError( @@ -284,24 +284,24 @@ def dfs( def pyarrow_tables( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResdataFiles, "opm.libopmcommon_python.Deck"], keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> List[pa.Table]: - """Produce a list of pyarrow.Table of vfp tables from a deck + """Produce a list of pyarrow.Table of vfp tables from a :term:`deck` Data for the keyword VFPPROD or VFPINJ will be returned as separate item in list Args: - deck: Eclipse deck or string with deck + deck: :term:`.DATA file` or string with :term:`deck` keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ - if isinstance(deck, EclFiles): - deck = deck.get_ecldeck() + if isinstance(deck, ResdataFiles): + deck = deck.get_deck() elif isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResdataFiles.str2deck(deck) if keyword not in SUPPORTED_KEYWORDS: raise ValueError( @@ -326,12 +326,13 @@ def pyarrow_tables( return pyarrow_tables_vfp -def df2ecls( +def df2ress( dframe: pd.DataFrame, keyword: str = "VFPPROD", comments: Optional[Dict[str, str]] = None, ) -> List[str]: - """Produce a list of strings defining VFPPROD/VFPINJ Eclipse input from a dataframe + """Produce a list of strings defining VFPPROD/VFPINJ Eclipse + :term:`include file` contents from a dataframe All data for the keyword VFPPROD or VFPINJ will be returned. @@ -355,14 +356,14 @@ def df2ecls( if np.all(df_vfp["VFP_TYPE"] == keyword): if comments and keyword in comments.keys(): if keyword == "VFPPROD": - vfp_strs.append(vfpprod.df2ecl(df_vfp, comments["VFPPROD"])) + vfp_strs.append(vfpprod.df2res(df_vfp, comments["VFPPROD"])) elif keyword == "VFPINJ": - vfp_strs.append(vfpinj.df2ecl(df_vfp, comments["VFPINJ"])) + vfp_strs.append(vfpinj.df2res(df_vfp, comments["VFPINJ"])) else: if keyword == "VFPPROD": - vfp_strs.append(vfpprod.df2ecl(df_vfp)) + vfp_strs.append(vfpprod.df2res(df_vfp)) elif keyword == "VFPINJ": - vfp_strs.append(vfpinj.df2ecl(df_vfp)) + vfp_strs.append(vfpinj.df2res(df_vfp)) else: raise ValueError( f"VFP number {vfpno} does not have consistent " @@ -372,13 +373,14 @@ def df2ecls( return vfp_strs -def df2ecl( +def df2res( dframe: pd.DataFrame, keyword: str = "VFPPROD", comments: Optional[Dict[str, str]] = None, filename: Optional[str] = None, ) -> str: - """Produce a string defining all VFPPROD/VFPINJ Eclipse input from a dataframe + """Create a string defining all VFPPROD/VFPINJ Eclipse + :term:`include file` contents from a dataframe All data for the keywords VFPPROD/VFPINJ will be returned. @@ -392,7 +394,7 @@ def df2ecl( to file. """ - strs_vfp = df2ecls(dframe, keyword=keyword, comments=comments) + strs_vfp = df2ress(dframe, keyword=keyword, comments=comments) str_vfps = "" if comments and "master" in comments.keys(): @@ -409,7 +411,7 @@ def df2ecl( def df( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResdataFiles, "opm.libopmcommon_python.Deck"], keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> pd.DataFrame: @@ -418,7 +420,7 @@ def df( All data for the keywords VFPPROD/VFPINJ will be returned. Args: - deck: Eclipse deck or string wit deck + deck: :term:`.DATA file` or string wit :term:`deck` keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: str with list of VFP table numbers to extract """ @@ -427,10 +429,10 @@ def df( logger.warning("No keywords provided to vfp.df. Empty dataframe returned") return pd.DataFrame() - if isinstance(deck, EclFiles): - deck = deck.get_ecldeck() + if isinstance(deck, ResdataFiles): + deck = deck.get_deck() elif isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResdataFiles.str2deck(deck) # Extract all VFPROD/VFPINJ as separate dataframes dfs_vfp = dfs(deck, keyword, vfpnumbers_str) @@ -448,7 +450,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (argparse.ArgumentParser or argparse.subparser): parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of Eclipse DATA file.") + parser.add_argument( + "DATAFILE", help="Name of the .DATA input file for the reservoir simulator" + ) parser.add_argument( "-o", "--output", @@ -476,13 +480,13 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Fill a parser for the operation dataframe -> eclipse include file""" + """Fill a parser for the operation dataframe -> resdata :term:`include file`""" return common.fill_reverse_parser(parser, "VFPPROD, VFPINJ", "vfp.inc") def vfp_main(args) -> None: """Entry-point for module, for command line utility.""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) if args.keyword: @@ -495,12 +499,12 @@ def vfp_main(args) -> None: if "vfpnumbers" in args: vfpnumbers = str(args.vfpnumbers) - eclfiles = EclFiles(args.DATAFILE) + resdatafiles = ResdataFiles(args.DATAFILE) if args.arrow: outputfile = args.output outputfile.replace(".arrow", "") vfp_arrow_tables = pyarrow_tables( - eclfiles.get_ecldeck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers + resdatafiles.get_deck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers ) for vfp_table in vfp_arrow_tables: table_number = int( @@ -513,7 +517,7 @@ def vfp_main(args) -> None: logger.info(f"Parsed file {args.DATAFILE} for vfp.dfs_arrow") else: dframe = df( - eclfiles.get_ecldeck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers + resdatafiles.get_deck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers ) if args.output: common.write_dframe_stdout_file( @@ -524,11 +528,11 @@ def vfp_main(args) -> None: def vfp_reverse_main(args) -> None: """Entry-point for module, for command line utility for CSV to Eclipse""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) vfp_df = pd.read_csv(args.csvfile) logger.info("Parsed {args.csvfile}") - inc_string = df2ecl(vfp_df, args.keyword) + inc_string = df2res(vfp_df, args.keyword) if args.output: common.write_inc_stdout_file(inc_string, args.output) diff --git a/ecl2df/vfp/_vfpcommon.py b/res2df/vfp/_vfpcommon.py similarity index 93% rename from ecl2df/vfp/_vfpcommon.py rename to res2df/vfp/_vfpcommon.py index 0d4a75a5b..83b23ea5d 100755 --- a/ecl2df/vfp/_vfpcommon.py +++ b/res2df/vfp/_vfpcommon.py @@ -1,8 +1,8 @@ -"""Common functionality for vfp module to extract VFPPROD/VFPINJ data from Eclipse +"""Common functionality for vfp module to extract VFPPROD/VFPINJ data from input deck to extract the VFPPROD/VFPINJ data from an Eclipse (input) deck as Pandas Dataframes -Data can be extracted from a full Eclipse deck or from individual files. Supports +Data can be extracted from a complete deck or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow and pyarrow.table """ @@ -25,7 +25,7 @@ except ImportError: pass -from ecl2df import common +from res2df import common logger = logging.getLogger(__name__) @@ -68,7 +68,7 @@ def _deckrecord2list( Args: record: Record be parsed - keyword: Which Eclipse keyword this belongs to + keyword: Which keyword this belongs to recordindex: For keywords where itemlistname is 'records', this is a list index to the "record". recordname: Name of the record @@ -181,7 +181,8 @@ def _write_vfp_range( format: str = "%10.6g", values_per_line: int = 5, ) -> str: - """Produce a string representing an Eclipse record for a given table range + """Creates a :term:`include file` content string of a resdata + record for a given table range Args: values: List/array with the range sorted @@ -195,12 +196,12 @@ def _write_vfp_range( if var_type != "UNDEFINED": var_type_str = var_type - ecl_str = f"-- {var_type_str} units - {unit_type} ( {len(values)} values )\n" + deck_str = f"-- {var_type_str} units - {unit_type} ( {len(values)} values )\n" for i, value in enumerate(values): - ecl_str += format % value + deck_str += format % value if (i + 1) % values_per_line == 0 and i < len(values) - 1: - ecl_str += "\n" - ecl_str += " /\n" - ecl_str += "\n" + deck_str += "\n" + deck_str += " /\n" + deck_str += "\n" - return ecl_str + return deck_str diff --git a/ecl2df/vfp/_vfpdefs.py b/res2df/vfp/_vfpdefs.py similarity index 99% rename from ecl2df/vfp/_vfpdefs.py rename to res2df/vfp/_vfpdefs.py index a95d00dd1..fcb71bd65 100755 --- a/ecl2df/vfp/_vfpdefs.py +++ b/res2df/vfp/_vfpdefs.py @@ -2,7 +2,7 @@ Some definitions and parameters used to define VFPPROD and VFPINJ keywords in Eclipse. This includes definitions of rates, thp, wfr (water fractions), gfr (gas fractions), alq (artificial-lift-quantities), units and so on. Used for consistency check in IO -routines for VFPPROD and VFPINJ keywords in ecl2df. +routines for VFPPROD and VFPINJ keywords in res2df. """ from enum import Enum diff --git a/ecl2df/vfp/_vfpinj.py b/res2df/vfp/_vfpinj.py similarity index 90% rename from ecl2df/vfp/_vfpinj.py rename to res2df/vfp/_vfpinj.py index 963f0bbde..adba85e8b 100755 --- a/ecl2df/vfp/_vfpinj.py +++ b/res2df/vfp/_vfpinj.py @@ -3,7 +3,7 @@ basic_data (dictionary with basic data types), df (pandas DataFrame) or pyarrow_tables (pyarrow.Tables). -Data can be extracted from a full Eclipse deck or from individual files. +Data can be extracted from a complete deck or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow a pyarrow.Table. Also functionality to write pandas DataFrame and pyarrow.Table to file as Eclipse .Ecl format @@ -29,7 +29,7 @@ except ImportError: pass -from ecl2df import common +from res2df import common from ._vfpcommon import ( _deckrecord2list, @@ -74,7 +74,7 @@ def basic_data( Empty string returned if vfp table number does not match any number in list Args: - keyword: Eclipse deck keyword + keyword: :term:`.DATA file` keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds """ @@ -473,12 +473,12 @@ def df( keyword: "opm.libopmcommon_python.DeckKeyword", vfpnumbers_str: Optional[str] = None, ) -> Union[pd.DataFrame, None]: - """Return a dataframes of a single VFPINJ table from an Eclipse deck + """Return a dataframes of a single VFPINJ table from a :term:`.DATA file` Data from the VFPINJ keyword are stacked into a Pandas Dataframe Args: - keyword: Eclipse deck keyword + keyword: :term:`.DATA file` keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ @@ -510,11 +510,11 @@ def pyarrow( keyword: "opm.libopmcommon_python.DeckKeyword", vfpnumbers_str: Optional[str] = None, ) -> Union[pa.Table, None]: - """Return a pyarrow Table of a single VFPINJ table from an Eclipse deck + """Return a pyarrow Table of a single VFPINJ table from a :term:`.DATA file` If no VFPINJ table found, return None Args: - keyword: Eclipse deck keyword + keyword: :term:`.DATA file` keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ @@ -550,7 +550,8 @@ def _write_basic_record( unit_type: str, tab_type: str, ) -> str: - """Produce a string representing the first record for Eclipse VFPINJ keyword + """Creates a :term:`include file` content string of the + first record for the Eclipse VFPINJ keyword Args: tableno: VFPROD table number @@ -564,15 +565,15 @@ def _write_basic_record( if unit_type != "DEFAULT": unit_type_str = unit_type - ecl_str = "-- Table Datum Depth Rate Type THP Type UNITS TAB Type\n" - ecl_str += "-- ----- ----------- --------- -------- -------- --------\n" - ecl_str += f" {tableno:5d}" - ecl_str += f" {datum:11.1f}" - ecl_str += f" {flo_type:>9s}" - ecl_str += f" {pressure_type:>8s}" - ecl_str += f" {unit_type_str:>8s}" - ecl_str += f" {tab_type:>8s} /\n\n" - return ecl_str + deck_str = "-- Table Datum Depth Rate Type THP Type UNITS TAB Type\n" + deck_str += "-- ----- ----------- --------- -------- -------- --------\n" + deck_str += f" {tableno:5d}" + deck_str += f" {datum:11.1f}" + deck_str += f" {flo_type:>9s}" + deck_str += f" {pressure_type:>8s}" + deck_str += f" {unit_type_str:>8s}" + deck_str += f" {tab_type:>8s} /\n\n" + return deck_str def _write_table( @@ -580,7 +581,8 @@ def _write_table( format: str = "%10.6g", values_per_line: int = 5, ) -> str: - """Produce a string representing an Eclipse record for a VFPINJ table (BHP part) + """Creates a :term:`include file` content string representing + a resdata record for a VFPINJ table (BHP part) Args: table: DataFrame with multiindex for table ranges and colums @@ -589,23 +591,23 @@ def _write_table( values_per_line: Number of values per line in output """ - ecl_str = "" + deck_str = "" for idx, row in table.iterrows(): - ecl_str += f"{idx:2d}" + deck_str += f"{idx:2d}" no_flo = len(table.loc[idx].to_list()) for n, value in enumerate(table.loc[idx].to_list()): - ecl_str += format % value + deck_str += format % value if (n + 1) % values_per_line == 0: if n < no_flo - 1: - ecl_str += "\n" - ecl_str += " " * 2 + deck_str += "\n" + deck_str += " " * 2 else: - ecl_str += "\n" + deck_str += "\n" elif n == no_flo - 1: - ecl_str += "\n" - ecl_str += "/\n" + deck_str += "\n" + deck_str += "/\n" - return ecl_str + return deck_str def _write_table_records( @@ -614,7 +616,8 @@ def _write_table_records( format: str = "%10.6g", values_per_line: int = 5, ) -> str: - """Produce a string representing an Eclipse record for a VFPINJ table (BHP part) + """Creates a :term:`include file` content string representing + for a VFPINJ table (BHP part) Args: thp_indices: array of int representing index for THP value for record @@ -624,7 +627,7 @@ def _write_table_records( values_per_line: Number of values per line in output """ - ecl_str = "" + deck_str = "" no_records = len(thp_indices) no_flow_values = table.size // no_records if table.size % no_records > 0: @@ -634,25 +637,26 @@ def _write_table_records( for row in range(0, no_records): thp = thp_indices[row] - ecl_str += f"{thp:2d}" + deck_str += f"{thp:2d}" for n, value in enumerate(table[row, :]): - ecl_str += format % value + deck_str += format % value if (n + 1) % values_per_line == 0: if n < no_flow_values - 1: - ecl_str += "\n" - ecl_str += " " * 2 + deck_str += "\n" + deck_str += " " * 2 else: - ecl_str += "\n" + deck_str += "\n" elif n == no_flow_values - 1: - ecl_str += "\n" + deck_str += "\n" - ecl_str += "/\n" + deck_str += "/\n" - return ecl_str + return deck_str -def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Produce a string defining single VFPINJ Eclipse input from a dataframe +def df2res(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Creates a :term:`include file` content string + representing single VFPINJ Eclipse input from a dataframe All data for the keywords VFPINJ will be returned. @@ -670,16 +674,16 @@ def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: unit_type = vfpinj_data["UNIT_TYPE"] # Write dataframe to string with Eclipse format for VFPINJ - ecl_str = "VFPINJ\n" + deck_str = "VFPINJ\n" if comment: - ecl_str += common.comment_formatter(comment) + deck_str += common.comment_formatter(comment) else: - ecl_str += "\n" + deck_str += "\n" unit_value = vfpinj_data["UNIT_TYPE"].value if vfpinj_data["UNIT_TYPE"] == UNITTYPE.DEFAULT: unit_value = "1*" - ecl_str += _write_basic_record( + deck_str += _write_basic_record( vfpinj_data["TABLE_NUMBER"], vfpinj_data["DATUM"], vfpinj_data["RATE_TYPE"].value, @@ -687,22 +691,22 @@ def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: unit_value, vfpinj_data["TAB_TYPE"].value, ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpinj_data["FLOW_VALUES"], rate_type.value, VFPINJ_UNITS[unit_type.value]["FLO"][rate_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpinj_data["THP_VALUES"], thp_type.value, VFPINJ_UNITS[unit_type.value]["THP"][thp_type.value], "%10.6g", ) - ecl_str += _write_table_records( + deck_str += _write_table_records( vfpinj_data["THP_INDICES"], vfpinj_data["BHP_TABLE"], "%10.6g", ) - return ecl_str + return deck_str diff --git a/ecl2df/vfp/_vfpprod.py b/res2df/vfp/_vfpprod.py similarity index 93% rename from ecl2df/vfp/_vfpprod.py rename to res2df/vfp/_vfpprod.py index b782ed262..44bf1ed92 100755 --- a/ecl2df/vfp/_vfpprod.py +++ b/res2df/vfp/_vfpprod.py @@ -3,7 +3,7 @@ basic_data (dictionary with basic data types), df (pandas DataFrame) or pyarrow_tables (pyarrow.Tables). -Data can be extracted from a full Eclipse deck or from individual files. +Data can be extracted from a complete deck or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow as pyarrow.Table. Also functionality to write pandas DataFrame and pyarrow.Table to file as Eclipse .Ecl format. @@ -29,7 +29,7 @@ except ImportError: pass -from ecl2df import common +from res2df import common from ._vfpcommon import ( _deckrecord2list, @@ -86,7 +86,7 @@ def basic_data( Empty string returned if vfp table number does not match any number in list Args: - keyword: Eclipse deck keyword + keyword: :term:`.DATA file` keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds """ @@ -655,7 +655,7 @@ def pyarrow2basic_data(pa_table: pa.Table) -> Dict[str, Any]: def _check_basic_data(vfp_data: Dict[str, Any]) -> bool: """Perform a check of the VFPPROD data contained in the dictionary. Checks if all data is present and if the dimensions of the arrays - are consisitent. + are consistent. Args: vfp_data: Dictionary containing all data for a VFPPROD keyword in Eclipse @@ -720,10 +720,10 @@ def df( vfpnumbers_str: Optional[str] = None, ) -> Union[pd.DataFrame, None]: """Return a dataframe or pyarrow Table of a single VFPPROD table - from an Eclipse deck. + from a :term:`.DATA file`. Args: - keyword: Eclipse deck keyword + keyword: :term:`.DATA file` keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ @@ -764,11 +764,11 @@ def pyarrow( keyword: "opm.libopmcommon_python.DeckKeyword", vfpnumbers_str: Optional[str] = None, ) -> Union[pa.Table, None]: - """Return a pyarrow Table of a single VFPPROD table from an Eclipse deck. + """Return a pyarrow Table of a single VFPPROD table from a :term:`.DATA file`. If no VFPPROD curve found, return None Args: - keyword: Eclipse deck keyword + keyword: :term:`.DATA file` keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ @@ -816,7 +816,8 @@ def _write_basic_record( unit_type: str, tab_type: str, ) -> str: - """Produce a string representing the first record for Eclipse VFPPROD keyword + """Creates a :term:`include file` content string representing + the first record for Eclipse VFPPROD keyword Args: tableno: VFPROD table number @@ -834,20 +835,20 @@ def _write_basic_record( if alq_type != "UNDEFINED": alq_type_str = alq_type - ecl_str = "-- Table Datum Depth Rate Type WFR Type " - ecl_str += "GFR Type THP Type ALQ Type UNITS TAB Type\n" - ecl_str += "-- ----- ----------- --------- -------- " - ecl_str += "-------- -------- -------- ------ --------\n" - ecl_str += f" {tableno:5d}" - ecl_str += f" {datum:11.1f}" - ecl_str += f" {flo_type:>8s}" - ecl_str += f" {wfr_type:>8s}" - ecl_str += f" {gfr_type:>8s}" - ecl_str += f" {pressure_type:>8s}" - ecl_str += f" {alq_type_str:>8s}" - ecl_str += f" {unit_type:>6s}" - ecl_str += f" {tab_type:>8s} /\n\n" - return ecl_str + deck_str = "-- Table Datum Depth Rate Type WFR Type " + deck_str += "GFR Type THP Type ALQ Type UNITS TAB Type\n" + deck_str += "-- ----- ----------- --------- -------- " + deck_str += "-------- -------- -------- ------ --------\n" + deck_str += f" {tableno:5d}" + deck_str += f" {datum:11.1f}" + deck_str += f" {flo_type:>8s}" + deck_str += f" {wfr_type:>8s}" + deck_str += f" {gfr_type:>8s}" + deck_str += f" {pressure_type:>8s}" + deck_str += f" {alq_type_str:>8s}" + deck_str += f" {unit_type:>6s}" + deck_str += f" {tab_type:>8s} /\n\n" + return deck_str def _write_table( @@ -855,7 +856,8 @@ def _write_table( format: str = "%10.3", values_per_line: int = 5, ) -> str: - """Produce a string representing an Eclipse record for a VFPPROD table (BHP part) + """Creates a :term:`include file` content string representing + a resdata record for a VFPPROD table (BHP part) Args: table: DataFrame with multiindex for table ranges and colums @@ -864,23 +866,23 @@ def _write_table( values_per_line: Number of values per line in output """ - ecl_str = "" + deck_str = "" for idx, row in table.iterrows(): - ecl_str += f"{idx[0]:2d} {idx[1]:2d} {idx[2]:2d} {idx[3]:2d}" + deck_str += f"{idx[0]:2d} {idx[1]:2d} {idx[2]:2d} {idx[3]:2d}" no_flo = len(table.loc[idx].to_list()) for n, value in enumerate(table.loc[idx].to_list()): - ecl_str += format % value + deck_str += format % value if (n + 1) % values_per_line == 0: if n < no_flo - 1: - ecl_str += "\n" - ecl_str += " " * 11 + deck_str += "\n" + deck_str += " " * 11 else: - ecl_str += "\n" + deck_str += "\n" elif n == no_flo - 1: - ecl_str += "\n" - ecl_str += "/\n" + deck_str += "\n" + deck_str += "/\n" - return ecl_str + return deck_str def _write_table_records( @@ -892,7 +894,8 @@ def _write_table_records( format: str = "%10.3", values_per_line: int = 5, ) -> str: - """Produce a string representing an Eclipse record for a VFPPROD table (BHP part) + """Creates a :term:`include file` content string representing a + resdata record for a VFPPROD table (BHP part) Args: thp_indices: array of int representing index for THP value for record @@ -905,7 +908,7 @@ def _write_table_records( values_per_line: Number of values per line in output """ - ecl_str = "" + deck_str = "" no_records = len(thp_indices) no_flow_values = table.size // no_records if table.size % no_records > 0: @@ -918,25 +921,26 @@ def _write_table_records( wfr = wfr_indices[row] gfr = gfr_indices[row] alq = alq_indices[row] - ecl_str += f"{thp:2d} {wfr:2d} {gfr:2d} {alq:2d}" + deck_str += f"{thp:2d} {wfr:2d} {gfr:2d} {alq:2d}" for n, value in enumerate(table[row, :]): - ecl_str += format % value + deck_str += format % value if (n + 1) % values_per_line == 0: if n < no_flow_values - 1: - ecl_str += "\n" - ecl_str += " " * 11 + deck_str += "\n" + deck_str += " " * 11 else: - ecl_str += "\n" + deck_str += "\n" elif n == no_flow_values - 1: - ecl_str += "\n" + deck_str += "\n" - ecl_str += "/\n" + deck_str += "/\n" - return ecl_str + return deck_str -def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Produce a string defining single VFPPROD Eclipse input from a dataframe +def df2res(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Creates a :term:`include file` content string + representing single VFPPROD Eclipse input from a dataframe All data for the keywords VFPPROD will be returned. @@ -957,16 +961,16 @@ def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: unit_type = vfpprod_data["UNIT_TYPE"] # Write dataframe to string with Eclipse format for VFPPROD - ecl_str = "VFPPROD\n" + deck_str = "VFPPROD\n" if comment: - ecl_str += common.comment_formatter(comment) + deck_str += common.comment_formatter(comment) else: - ecl_str += "\n" + deck_str += "\n" unit_value = vfpprod_data["UNIT_TYPE"].value if vfpprod_data["UNIT_TYPE"] == UNITTYPE.DEFAULT: unit_value = "1*" - ecl_str += _write_basic_record( + deck_str += _write_basic_record( vfpprod_data["TABLE_NUMBER"], vfpprod_data["DATUM"], vfpprod_data["RATE_TYPE"].value, @@ -977,37 +981,37 @@ def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: unit_value, vfpprod_data["TAB_TYPE"].value, ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpprod_data["FLOW_VALUES"], rate_type.value, VFPPROD_UNITS[unit_type.value]["FLO"][rate_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpprod_data["THP_VALUES"], thp_type.value, VFPPROD_UNITS[unit_type.value]["THP"][thp_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpprod_data["WFR_VALUES"], wfr_type.value, VFPPROD_UNITS[unit_type.value]["WFR"][wfr_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpprod_data["GFR_VALUES"], gfr_type.value, VFPPROD_UNITS[unit_type.value]["GFR"][gfr_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpprod_data["ALQ_VALUES"], alq_type.value, VFPPROD_UNITS[unit_type.value]["ALQ"][alq_type.value], "%10.6g", ) - ecl_str += _write_table_records( + deck_str += _write_table_records( vfpprod_data["THP_INDICES"], vfpprod_data["WFR_INDICES"], vfpprod_data["GFR_INDICES"], @@ -1016,4 +1020,4 @@ def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: "%10.6g", ) - return ecl_str + return deck_str diff --git a/ecl2df/wcon.py b/res2df/wcon.py similarity index 79% rename from ecl2df/wcon.py rename to res2df/wcon.py index 2ee0a3f55..35f261544 100644 --- a/ecl2df/wcon.py +++ b/res2df/wcon.py @@ -1,4 +1,4 @@ -"""Extract WCON* from an Eclipse deck""" +"""Extract WCON* from a .DATA file""" import argparse import datetime @@ -15,8 +15,8 @@ except ImportError: pass -from ecl2df import EclFiles, getLogger_ecl2csv -from ecl2df.common import ( +from res2df import ResdataFiles, getLogger_res2csv +from res2df.common import ( parse_opmio_date_rec, parse_opmio_deckrecord, write_dframe_stdout_file, @@ -28,14 +28,14 @@ WCONKEYS = ["WCONHIST", "WCONINJE", "WCONINJH", "WCONPROD"] -def df(deck: Union[EclFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: - """Loop through the deck and pick up information found +def df(deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: + """Loop through the :term:`deck` and pick up information found - The loop over the deck is a state machine, as it has to pick up dates + The loop over the :term:`deck` is a state machine, as it has to pick up dates """ - if isinstance(deck, EclFiles): - deck = deck.get_ecldeck() + if isinstance(deck, ResdataFiles): + deck = deck.get_deck() wconrecords = [] # List of dicts of every line in input file date = None # DATE columns will always be there, but can contain NaN @@ -80,7 +80,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: fill with arguments """ parser.add_argument( - "DATAFILE", help="Name of Eclipse DATA file or Eclipse include file." + "DATAFILE", + help="Name of the .DATA input file or include file.", ) parser.add_argument( "-o", "--output", type=str, help="Name of output csv file.", default="wcon.csv" @@ -91,12 +92,12 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def wcon_main(args) -> None: """Read from disk and write CSV back to disk""" - logger = getLogger_ecl2csv( # pylint: disable:redefined-outer_name + logger = getLogger_res2csv( # pylint: disable:redefined-outer_name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + resdatafiles = ResdataFiles(args.DATAFILE) + if resdatafiles: + deck = resdatafiles.get_deck() wcon_df = df(deck) write_dframe_stdout_file( wcon_df, diff --git a/ecl2df/wellcompletiondata.py b/res2df/wellcompletiondata.py similarity index 87% rename from ecl2df/wellcompletiondata.py rename to res2df/wellcompletiondata.py index e7d225a04..6922ba3b9 100644 --- a/ecl2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -10,15 +10,15 @@ import pyarrow import pyarrow.feather -from ecl2df import common, compdat, getLogger_ecl2csv, wellconnstatus -from ecl2df.eclfiles import EclFiles +from res2df import common, compdat, getLogger_res2csv, wellconnstatus +from res2df.resdatafiles import ResdataFiles from .common import write_dframe_stdout_file logger = logging.getLogger(__name__) -class EclipseUnitSystem(str, Enum): +class UnitSystem(str, Enum): METRIC = "METRIC" FIELD = "FIELD" LAB = "LAB" @@ -33,7 +33,7 @@ class KHUnit(Enum): def df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, zonemap: Dict[int, str], use_wellconnstatus: bool = False, excl_well_startswith: Optional[str] = None, @@ -49,14 +49,14 @@ def df( only. Args: - eclfiles; EclFiles object + resdatafiles; ResdataFiles object zonemap: dictionary with layer->zone mapping use_wellconnstatus: boolean Returns: pd.DataFrame with one row per unique combination of well, zone and date. """ - compdat_df = compdat.df(eclfiles, zonemap=zonemap) + compdat_df = compdat.df(resdatafiles, zonemap=zonemap) if "ZONE" not in compdat_df.columns: logger.warning( "ZONE column not generated in compdat table. " @@ -75,13 +75,13 @@ def df( compdat_df = _excl_well_startswith(compdat_df, excl_well_startswith) if use_wellconnstatus: - wellconnstatus_df = wellconnstatus.df(eclfiles) + wellconnstatus_df = wellconnstatus.df(resdatafiles) compdat_df = _merge_compdat_and_connstatus(compdat_df, wellconnstatus_df) compdat_df = _aggregate_layer_to_zone(compdat_df) # Add metadata as an attribute the dataframe - meta = _get_metadata(eclfiles) + meta = _get_metadata(resdatafiles) # Slice meta to dataframe columns: compdat_df.attrs["meta"] = { column_key: meta[column_key] for column_key in compdat_df if column_key in meta @@ -90,29 +90,29 @@ def df( return compdat_df -def _get_ecl_unit_system(eclfiles: EclFiles) -> EclipseUnitSystem: - """Returns the unit system of an eclipse deck. The options are \ +def _get_unit_system(resdatafiles: ResdataFiles) -> UnitSystem: + """Returns the unit system of a :term:`.DATA file`. The options are \ METRIC, FIELD, LAB and PVT-M. If none of these are found, the function returns METRIC which is the default unit system in Eclipse. """ - unit_systems = [unitsystem.value for unitsystem in EclipseUnitSystem] - for keyword in eclfiles.get_ecldeck(): + unit_systems = [unitsystem.value for unitsystem in UnitSystem] + for keyword in resdatafiles.get_deck(): if keyword.name in unit_systems: - return EclipseUnitSystem(keyword.name) - return EclipseUnitSystem.METRIC + return UnitSystem(keyword.name) + return UnitSystem.METRIC -def _get_metadata(eclfiles: EclFiles) -> Dict[str, Dict[str, Any]]: +def _get_metadata(resdatafiles: ResdataFiles) -> Dict[str, Dict[str, Any]]: """Provide metadata for the well completion data export""" meta: Dict[str, Dict[str, str]] = {} - unitsystem = _get_ecl_unit_system(eclfiles) + unitsystem = _get_unit_system(resdatafiles) kh_units = { - EclipseUnitSystem.METRIC: KHUnit.METRIC, - EclipseUnitSystem.FIELD: KHUnit.FIELD, - EclipseUnitSystem.LAB: KHUnit.LAB, - EclipseUnitSystem.PVTM: KHUnit.PVTM, + UnitSystem.METRIC: KHUnit.METRIC, + UnitSystem.FIELD: KHUnit.FIELD, + UnitSystem.LAB: KHUnit.LAB, + UnitSystem.PVTM: KHUnit.PVTM, } meta["KH"] = {} meta["KH"]["unit"] = kh_units[unitsystem].value @@ -250,7 +250,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", type=str, - help="Name of Eclipse DATA file. " + "UNSMRY file must lie alongside.", + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist a UNSMRY file with the same path and basename", ) parser.add_argument( "--zonemap", @@ -286,16 +287,16 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def wellcompletiondata_main(args): """Entry-point for module, for command line utility""" - logger = getLogger_ecl2csv(__name__, vars(args)) + logger = getLogger_res2csv(__name__, vars(args)) - eclfiles = EclFiles(args.DATAFILE) + resdatafiles = ResdataFiles(args.DATAFILE) if not Path(args.zonemap).is_file(): wellcompletiondata_df = pd.DataFrame() logger.info(f"Zonemap not found: {args.zonemap}") else: zonemap = common.convert_lyrlist_to_zonemap(common.parse_lyrfile(args.zonemap)) wellcompletiondata_df = df( - eclfiles, zonemap, args.use_wellconnstatus, args.excl_well_startswith + resdatafiles, zonemap, args.use_wellconnstatus, args.excl_well_startswith ) logger.info( f"Well completion data successfully generated with zonemap: {zonemap}" diff --git a/ecl2df/wellconnstatus.py b/res2df/wellconnstatus.py similarity index 87% rename from ecl2df/wellconnstatus.py rename to res2df/wellconnstatus.py index 87ef90c3c..69cf06706 100644 --- a/ecl2df/wellconnstatus.py +++ b/res2df/wellconnstatus.py @@ -8,15 +8,15 @@ import numpy as np import pandas as pd -from ecl2df import getLogger_ecl2csv, summary -from ecl2df.eclfiles import EclFiles +from res2df import getLogger_res2csv, summary +from res2df.resdatafiles import ResdataFiles from .common import write_dframe_stdout_file logger = logging.getLogger(__name__) -def df(eclfiles: EclFiles) -> pd.DataFrame: +def df(resdatafiles: ResdataFiles) -> pd.DataFrame: """Exctracts connection status history for each compdat connection that is included in the summary data on the form CPI:WELL,I,J,K. CPI stands for connection productivity index. @@ -28,7 +28,7 @@ def df(eclfiles: EclFiles) -> pd.DataFrame: The output data set is very sparse compared to the CPI summary data. """ - smry = summary.df(eclfiles, column_keys="CPI*") + smry = summary.df(resdatafiles, column_keys="CPI*") return _extract_status_changes(smry) @@ -96,7 +96,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", type=str, - help="Name of Eclipse DATA file. " + "UNSMRY file must lie alongside.", + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist a UNSMRY file with the same path and basename.", ) parser.add_argument( "-o", @@ -114,10 +115,10 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def wellconnstatus_main(args): """Entry-point for module, for command line utility""" - logger = getLogger_ecl2csv(__name__, vars(args)) - eclfiles = EclFiles(args.DATAFILE) + logger = getLogger_res2csv(__name__, vars(args)) + resdatafiles = ResdataFiles(args.DATAFILE) - wellconnstatus_df = df(eclfiles) + wellconnstatus_df = df(resdatafiles) write_dframe_stdout_file( wellconnstatus_df, args.output, index=False, caller_logger=logger ) diff --git a/setup.cfg b/setup.cfg index ce6e3e7c4..b2fbe9cd8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,7 +6,7 @@ test=pytest [flake8] max-line-length = 88 -exclude = docs, ecl2df/__init__.py +exclude = docs, res2df/__init__.py [tool:pytest] markers = diff --git a/setup.py b/setup.py index e75d37e5b..866f09358 100644 --- a/setup.py +++ b/setup.py @@ -44,20 +44,20 @@ } setup( - name="ecl2df", - use_scm_version={"write_to": "ecl2df/version.py"}, + name="res2df", + use_scm_version={"write_to": "res2df/version.py"}, cmdclass=cmdclass, - description="Convert Eclipse 100 input and output to DataFrames", + description="Convert reservoir simulator input and output to DataFrames", long_description=LONG_DESCRIPTION, long_description_content_type="text/markdown", - url="http://github.com/equinor/ecl2df", + url="http://github.com/equinor/res2df", author="Håvard Berland", author_email="havb@equinor.com", license="GPLv3", - packages=find_packages(include=["ecl2df*"]), - package_dir={"ecl2df": "ecl2df"}, + packages=find_packages(include=["res2df*"]), + package_dir={"res2df": "res2df"}, package_data={ - "ecl2df": [ + "res2df": [ "opmkeywords/*", "config_jobs/*", "py.typed", @@ -67,11 +67,11 @@ zip_safe=False, entry_points={ "console_scripts": [ - "csv2ecl=ecl2df.csv2ecl:main", - "ecl2csv=ecl2df.ecl2csv:main", - "ecl2arrow=ecl2df.ecl2csv:main", + "csv2res=res2df.csv2res:main", + "res2csv=res2df.res2csv:main", + "res2arrow=res2df.res2csv:main", ], - "ert": ["ecl2df_jobs = ecl2df.hook_implementations.jobs"], + "ert": ["res2df_jobs = res2df.hook_implementations.jobs"], }, test_suite="tests", install_requires=REQUIREMENTS, diff --git a/tests/conftest.py b/tests/conftest.py index 2440e3ce4..3db98cd74 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,10 +2,10 @@ import pytest -import ecl2df +import res2df @pytest.fixture -def path_to_ecl2df(): - """Path to installed ecl2df module""" - return Path(ecl2df.__file__).parent +def path_to_res2df(): + """Path to installed res2df module""" + return Path(res2df.__file__).parent diff --git a/tests/data/reek/eclipse/model/2_R001_REEK-0-OPMFLOW.PRT b/tests/data/reek/eclipse/model/2_R001_REEK-0-OPMFLOW.PRT index 477f9678f..3c8940fd2 100644 --- a/tests/data/reek/eclipse/model/2_R001_REEK-0-OPMFLOW.PRT +++ b/tests/data/reek/eclipse/model/2_R001_REEK-0-OPMFLOW.PRT @@ -198,404 +198,404 @@ VtkWriteWaterFormationVolumeFactor="0" ZoltanImbalanceTol="1.1" Reading deck file '2_R001_REEK-0.DATA' - 0 Reading RUNSPEC in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 11 - 1 Reading TITLE in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 14 - 2 Reading SAVE in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 19 - 3 Reading NOECHO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 22 - 4 Reading DIMENS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 25 - 5 Reading START in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 29 - 6 Reading OIL in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 36 - 7 Reading GAS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 37 - 8 Reading WATER in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 38 - 9 Reading METRIC in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 42 - 10 Reading GRIDOPTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 44 - 11 Reading EQLOPTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 52 - 12 Reading TABDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 56 - 13 Reading EQLDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 61 - 14 Reading REGDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 65 - 15 Reading FAULTDIM in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 70 - 16 Reading WELLDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 74 - 17 Reading VFPPDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 78 - 18 Reading VFPIDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 83 - 19 Reading SMRYDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 87 - 20 Reading UNIFIN in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 91 - 21 Reading UNIFOUT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 92 - 22 Reading NOINSPEC in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 95 - 23 Reading NORSSPEC in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 98 - 24 Reading GRID in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 104 - 25 Reading NOECHO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 107 - 26 Reading INIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 110 - 27 Reading GRIDFILE in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 113 - 28 Reading MESSAGES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 117 - 29 Reading PINCH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 121 - 30 Reading NOECHO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 125 - 31 Reading NOECHO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 4 - 32 Reading MAPUNITS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 12 - 33 Reading MAPAXES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 15 - 34 Reading GRIDUNIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 19 - 35 Reading SPECGRID in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 22 - 36 Reading GDORIENT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 25 - 37 Reading COORD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 29 - 38 Reading ZCORN in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 3229 - 39 Reading ACTNUM in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 34211 - 40 Reading ECHO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 34222 - 41 Reading FAULTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.faults line 10 - 42 Reading PORO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.poro line 11 - 43 Reading PERMX in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.perm line 11 - 44 Reading PERMY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.perm line 7167 - 45 Reading PERMZ in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.perm line 14323 - 46 Reading MULTIPLY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 146 - 47 Reading MULTFLT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.multflt line 1 - 48 Reading EQUALS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.multz line 1 - 49 Reading EDIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 163 - 50 Reading PROPS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 170 - 51 Reading SWOF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/swof.inc line 9 - 52 Reading SGOF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/sgof.txt line 9 - 53 Reading EQUALS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.endpoints line 1 - 54 Reading COPY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 184 - 55 Reading MULTIPLY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 190 - 56 Reading ADD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 193 - 57 Reading SCALECRS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 197 - 58 Reading ROCKOPTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt line 1 - 59 Reading ROCK in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt line 4 - 60 Reading PVTW in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt line 7 - 61 Reading PVTO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt line 10 - 62 Reading PVDG in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt line 139 - 63 Reading DENSITY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt line 157 - 64 Reading SWATINIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.swatinit line 11 - 65 Reading REGIONS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 212 - 66 Reading EQLNUM in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/regions/reek.eqlnum line 11 - 67 Reading FIPNUM in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/regions/reek.fipnum line 11 - 68 Reading EQUALS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 222 - 69 Reading SOLUTION in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 231 - 70 Reading RTEMP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 233 - 71 Reading EQUIL in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/solution/reek.equil line 1 - 72 Reading RSVD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 240 - 73 Reading RPTSOL in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 246 - 74 Reading RPTRST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 249 - 75 Reading SUMMARY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 256 - 76 Reading FOPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 7 - 77 Reading FOPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 8 - 78 Reading FGPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 9 - 79 Reading FGPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 10 - 80 Reading FWPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 11 - 81 Reading FWPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 12 - 82 Reading FLPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 13 - 83 Reading FLPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 14 - 84 Reading FVPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 15 - 85 Reading FOPRF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 16 - 86 Reading FOPRS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 17 - 87 Reading FGSR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 18 - 88 Reading FGPRF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 19 - 89 Reading FGPRS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 20 - 90 Reading FOPP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 24 - 91 Reading FWPP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 25 - 92 Reading FGPP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 26 - 93 Reading FMWPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 30 - 94 Reading FMWIN in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 31 - 95 Reading FVIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 33 - 96 Reading FWIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 34 - 97 Reading FWIRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 35 - 98 Reading FGIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 36 - 99 Reading FGIRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 37 - 100 Reading FGLIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 39 - 101 Reading FMCTP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 43 - 102 Reading FVPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 48 - 103 Reading FOPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 49 - 104 Reading FOPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 50 - 105 Reading FWPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 51 - 106 Reading FWPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 52 - 107 Reading FGPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 53 - 108 Reading FGPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 54 - 109 Reading FWIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 55 - 110 Reading FWITH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 56 - 111 Reading FGIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 57 - 112 Reading FGITH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 58 - 113 Reading FOPTF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 59 - 114 Reading FOPTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 60 - 115 Reading FWIP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 64 - 116 Reading FOIP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 65 - 117 Reading FGIP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 66 - 118 Reading FWCT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 68 - 119 Reading FWCTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 69 - 120 Reading FGOR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 70 - 121 Reading FGORH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 71 - 122 Reading FGLR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 72 - 123 Reading FWGR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 73 - 124 Reading FPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 75 - 125 Reading RPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 77 - 126 Reading ROIP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 79 - 127 Reading ROIPL in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 81 - 128 Reading ROIPG in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 83 - 129 Reading RGIP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 85 - 130 Reading RGIPL in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 87 - 131 Reading RGIPG in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 89 - 132 Reading RGPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 91 - 133 Reading RGPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 93 - 134 Reading GMWPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 101 - 135 Reading GGLIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 104 - 136 Reading GOPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 106 - 137 Reading GOPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 108 - 138 Reading GGPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 110 - 139 Reading GGPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 112 - 140 Reading GWPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 114 - 141 Reading GWPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 116 - 142 Reading GVPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 118 - 143 Reading GLPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 120 - 144 Reading GOPRF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 122 - 145 Reading GOPRS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 124 - 146 Reading GGPRF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 126 - 147 Reading GGPRS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 128 - 148 Reading GWCT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 130 - 149 Reading GWCTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 132 - 150 Reading GGOR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 134 - 151 Reading GGORH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 136 - 152 Reading GWGR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 138 - 153 Reading GGLR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 140 - 154 Reading GOPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 142 - 155 Reading GOPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 144 - 156 Reading GGPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 146 - 157 Reading GGPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 148 - 158 Reading GWPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 150 - 159 Reading GWPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 152 - 160 Reading GVPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 154 - 161 Reading GLPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 156 - 162 Reading GOPTF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 158 - 163 Reading GOPTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 160 - 164 Reading GGPTF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 162 - 165 Reading GGPTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 164 - 166 Reading GWIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 166 - 167 Reading GVIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 168 - 168 Reading GWIRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 170 - 169 Reading GGIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 172 - 170 Reading GGIRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 174 - 171 Reading GOPP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 177 - 172 Reading GGPP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 179 - 173 Reading GWPP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 181 - 174 Reading GMCTP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 186 - 175 Reading WOPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 190 - 176 Reading WOPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 192 - 177 Reading WGPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 194 - 178 Reading WGPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 196 - 179 Reading WWPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 198 - 180 Reading WWPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 200 - 181 Reading WOPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 203 - 182 Reading WWPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 205 - 183 Reading WGPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 207 - 184 Reading WOPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 209 - 185 Reading WWPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 211 - 186 Reading WGPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 213 - 187 Reading WWCT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 216 - 188 Reading WWCTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 218 - 189 Reading WGOR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 220 - 190 Reading WGORH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 222 - 191 Reading WWIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 224 - 192 Reading WWIRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 226 - 193 Reading WGIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 228 - 194 Reading WGIRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 230 - 195 Reading WWIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 232 - 196 Reading WWITH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 234 - 197 Reading WGIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 236 - 198 Reading WGITH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 238 - 199 Reading WBHP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 241 - 200 Reading WTHP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 243 - 201 Reading WPI in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 245 - 202 Reading WVPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 247 - 203 Reading WBP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 249 - 204 Reading WBP4 in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 251 - 205 Reading WBP9 in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 253 - 206 Reading WMCTL in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 255 - 207 Reading WLPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 257 - 208 Reading WGLIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 260 - 209 Reading WOGLR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 265 - 210 Reading BPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 269 - 211 Reading TCPU in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 277 - 212 Reading TCPUDAY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 278 - 213 Reading SCHEDULE in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 265 - 214 Reading TUNING in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 267 - 215 Reading GRUPTREE in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 4 - 216 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 9 - 217 Reading WELSPECS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 13 - 218 Reading COMPORD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 21 - 219 Reading COMPDAT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 28 - 220 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 96 - 221 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 103 - 222 Reading WRFTPLT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 108 - 223 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 116 - 224 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 120 - 225 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 127 - 226 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 132 - 227 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 136 - 228 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 143 - 229 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 147 - 230 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 154 - 231 Reading WELSPECS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 158 - 232 Reading COMPORD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 163 - 233 Reading COMPDAT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 167 - 234 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 187 - 235 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 194 - 236 Reading WRFTPLT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 199 - 237 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 204 - 238 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 208 - 239 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 215 - 240 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 220 - 241 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 224 - 242 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 231 - 243 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 236 - 244 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 240 - 245 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 247 - 246 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 252 - 247 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 256 - 248 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 263 - 249 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 268 - 250 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 272 - 251 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 279 - 252 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 284 - 253 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 288 - 254 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 295 - 255 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 300 - 256 Reading WELSPECS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 304 - 257 Reading COMPORD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 310 - 258 Reading COMPDAT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 315 - 259 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 350 - 260 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 359 - 261 Reading WRFTPLT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 364 - 262 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 370 - 263 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 374 - 264 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 383 - 265 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 388 - 266 Reading WELSPECS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 392 - 267 Reading COMPORD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 397 - 268 Reading COMPDAT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 401 - 269 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 421 - 270 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 430 - 271 Reading WRFTPLT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 436 - 272 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 441 - 273 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 445 - 274 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 454 - 275 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 460 - 276 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 464 - 277 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 473 - 278 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 478 - 279 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 482 - 280 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 491 - 281 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 496 - 282 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 500 - 283 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 509 - 284 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 514 - 285 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 518 - 286 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 527 - 287 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 532 - 288 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 536 - 289 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 545 - 290 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 550 - 291 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 554 - 292 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 563 - 293 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 568 - 294 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 572 - 295 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 581 - 296 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 586 - 297 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 590 - 298 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 599 - 299 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 604 - 300 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 608 - 301 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 617 - 302 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 622 - 303 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 626 - 304 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 635 - 305 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 640 - 306 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 644 - 307 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 653 - 308 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 658 - 309 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 662 - 310 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 671 - 311 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 676 - 312 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 680 - 313 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 689 - 314 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 694 - 315 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 698 - 316 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 707 - 317 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 712 - 318 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 716 - 319 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 725 - 320 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 730 - 321 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 734 - 322 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 743 - 323 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 748 - 324 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 752 - 325 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 761 - 326 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 766 - 327 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 770 - 328 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 779 - 329 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 784 - 330 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 788 - 331 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 797 - 332 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 802 - 333 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 806 - 334 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 815 - 335 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 820 - 336 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 824 - 337 Reading SAVE in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 276 - 338 Reading TSTEP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 279 + 0 Reading RUNSPEC in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 11 + 1 Reading TITLE in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 14 + 2 Reading SAVE in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 19 + 3 Reading NOECHO in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 22 + 4 Reading DIMENS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 25 + 5 Reading START in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 29 + 6 Reading OIL in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 36 + 7 Reading GAS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 37 + 8 Reading WATER in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 38 + 9 Reading METRIC in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 42 + 10 Reading GRIDOPTS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 44 + 11 Reading EQLOPTS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 52 + 12 Reading TABDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 56 + 13 Reading EQLDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 61 + 14 Reading REGDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 65 + 15 Reading FAULTDIM in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 70 + 16 Reading WELLDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 74 + 17 Reading VFPPDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 78 + 18 Reading VFPIDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 83 + 19 Reading SMRYDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 87 + 20 Reading UNIFIN in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 91 + 21 Reading UNIFOUT in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 92 + 22 Reading NOINSPEC in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 95 + 23 Reading NORSSPEC in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 98 + 24 Reading GRID in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 104 + 25 Reading NOECHO in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 107 + 26 Reading INIT in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 110 + 27 Reading GRIDFILE in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 113 + 28 Reading MESSAGES in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 117 + 29 Reading PINCH in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 121 + 30 Reading NOECHO in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 125 + 31 Reading NOECHO in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 4 + 32 Reading MAPUNITS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 12 + 33 Reading MAPAXES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 15 + 34 Reading GRIDUNIT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 19 + 35 Reading SPECGRID in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 22 + 36 Reading GDORIENT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 25 + 37 Reading COORD in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 29 + 38 Reading ZCORN in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 3229 + 39 Reading ACTNUM in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 34211 + 40 Reading ECHO in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 34222 + 41 Reading FAULTS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.faults line 10 + 42 Reading PORO in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.poro line 11 + 43 Reading PERMX in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.perm line 11 + 44 Reading PERMY in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.perm line 7167 + 45 Reading PERMZ in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.perm line 14323 + 46 Reading MULTIPLY in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 146 + 47 Reading MULTFLT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.multflt line 1 + 48 Reading EQUALS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.multz line 1 + 49 Reading EDIT in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 163 + 50 Reading PROPS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 170 + 51 Reading SWOF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/swof.inc line 9 + 52 Reading SGOF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/sgof.txt line 9 + 53 Reading EQUALS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.endpoints line 1 + 54 Reading COPY in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 184 + 55 Reading MULTIPLY in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 190 + 56 Reading ADD in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 193 + 57 Reading SCALECRS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 197 + 58 Reading ROCKOPTS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt line 1 + 59 Reading ROCK in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt line 4 + 60 Reading PVTW in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt line 7 + 61 Reading PVTO in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt line 10 + 62 Reading PVDG in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt line 139 + 63 Reading DENSITY in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt line 157 + 64 Reading SWATINIT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.swatinit line 11 + 65 Reading REGIONS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 212 + 66 Reading EQLNUM in /home/berland/projects/res2df/tests/data/reek/eclipse/include/regions/reek.eqlnum line 11 + 67 Reading FIPNUM in /home/berland/projects/res2df/tests/data/reek/eclipse/include/regions/reek.fipnum line 11 + 68 Reading EQUALS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 222 + 69 Reading SOLUTION in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 231 + 70 Reading RTEMP in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 233 + 71 Reading EQUIL in /home/berland/projects/res2df/tests/data/reek/eclipse/include/solution/reek.equil line 1 + 72 Reading RSVD in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 240 + 73 Reading RPTSOL in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 246 + 74 Reading RPTRST in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 249 + 75 Reading SUMMARY in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 256 + 76 Reading FOPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 7 + 77 Reading FOPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 8 + 78 Reading FGPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 9 + 79 Reading FGPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 10 + 80 Reading FWPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 11 + 81 Reading FWPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 12 + 82 Reading FLPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 13 + 83 Reading FLPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 14 + 84 Reading FVPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 15 + 85 Reading FOPRF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 16 + 86 Reading FOPRS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 17 + 87 Reading FGSR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 18 + 88 Reading FGPRF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 19 + 89 Reading FGPRS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 20 + 90 Reading FOPP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 24 + 91 Reading FWPP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 25 + 92 Reading FGPP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 26 + 93 Reading FMWPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 30 + 94 Reading FMWIN in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 31 + 95 Reading FVIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 33 + 96 Reading FWIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 34 + 97 Reading FWIRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 35 + 98 Reading FGIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 36 + 99 Reading FGIRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 37 + 100 Reading FGLIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 39 + 101 Reading FMCTP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 43 + 102 Reading FVPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 48 + 103 Reading FOPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 49 + 104 Reading FOPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 50 + 105 Reading FWPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 51 + 106 Reading FWPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 52 + 107 Reading FGPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 53 + 108 Reading FGPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 54 + 109 Reading FWIT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 55 + 110 Reading FWITH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 56 + 111 Reading FGIT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 57 + 112 Reading FGITH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 58 + 113 Reading FOPTF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 59 + 114 Reading FOPTS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 60 + 115 Reading FWIP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 64 + 116 Reading FOIP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 65 + 117 Reading FGIP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 66 + 118 Reading FWCT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 68 + 119 Reading FWCTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 69 + 120 Reading FGOR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 70 + 121 Reading FGORH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 71 + 122 Reading FGLR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 72 + 123 Reading FWGR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 73 + 124 Reading FPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 75 + 125 Reading RPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 77 + 126 Reading ROIP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 79 + 127 Reading ROIPL in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 81 + 128 Reading ROIPG in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 83 + 129 Reading RGIP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 85 + 130 Reading RGIPL in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 87 + 131 Reading RGIPG in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 89 + 132 Reading RGPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 91 + 133 Reading RGPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 93 + 134 Reading GMWPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 101 + 135 Reading GGLIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 104 + 136 Reading GOPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 106 + 137 Reading GOPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 108 + 138 Reading GGPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 110 + 139 Reading GGPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 112 + 140 Reading GWPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 114 + 141 Reading GWPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 116 + 142 Reading GVPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 118 + 143 Reading GLPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 120 + 144 Reading GOPRF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 122 + 145 Reading GOPRS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 124 + 146 Reading GGPRF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 126 + 147 Reading GGPRS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 128 + 148 Reading GWCT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 130 + 149 Reading GWCTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 132 + 150 Reading GGOR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 134 + 151 Reading GGORH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 136 + 152 Reading GWGR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 138 + 153 Reading GGLR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 140 + 154 Reading GOPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 142 + 155 Reading GOPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 144 + 156 Reading GGPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 146 + 157 Reading GGPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 148 + 158 Reading GWPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 150 + 159 Reading GWPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 152 + 160 Reading GVPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 154 + 161 Reading GLPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 156 + 162 Reading GOPTF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 158 + 163 Reading GOPTS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 160 + 164 Reading GGPTF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 162 + 165 Reading GGPTS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 164 + 166 Reading GWIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 166 + 167 Reading GVIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 168 + 168 Reading GWIRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 170 + 169 Reading GGIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 172 + 170 Reading GGIRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 174 + 171 Reading GOPP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 177 + 172 Reading GGPP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 179 + 173 Reading GWPP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 181 + 174 Reading GMCTP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 186 + 175 Reading WOPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 190 + 176 Reading WOPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 192 + 177 Reading WGPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 194 + 178 Reading WGPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 196 + 179 Reading WWPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 198 + 180 Reading WWPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 200 + 181 Reading WOPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 203 + 182 Reading WWPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 205 + 183 Reading WGPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 207 + 184 Reading WOPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 209 + 185 Reading WWPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 211 + 186 Reading WGPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 213 + 187 Reading WWCT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 216 + 188 Reading WWCTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 218 + 189 Reading WGOR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 220 + 190 Reading WGORH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 222 + 191 Reading WWIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 224 + 192 Reading WWIRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 226 + 193 Reading WGIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 228 + 194 Reading WGIRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 230 + 195 Reading WWIT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 232 + 196 Reading WWITH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 234 + 197 Reading WGIT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 236 + 198 Reading WGITH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 238 + 199 Reading WBHP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 241 + 200 Reading WTHP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 243 + 201 Reading WPI in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 245 + 202 Reading WVPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 247 + 203 Reading WBP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 249 + 204 Reading WBP4 in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 251 + 205 Reading WBP9 in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 253 + 206 Reading WMCTL in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 255 + 207 Reading WLPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 257 + 208 Reading WGLIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 260 + 209 Reading WOGLR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 265 + 210 Reading BPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 269 + 211 Reading TCPU in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 277 + 212 Reading TCPUDAY in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 278 + 213 Reading SCHEDULE in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 265 + 214 Reading TUNING in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 267 + 215 Reading GRUPTREE in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 4 + 216 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 9 + 217 Reading WELSPECS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 13 + 218 Reading COMPORD in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 21 + 219 Reading COMPDAT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 28 + 220 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 96 + 221 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 103 + 222 Reading WRFTPLT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 108 + 223 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 116 + 224 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 120 + 225 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 127 + 226 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 132 + 227 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 136 + 228 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 143 + 229 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 147 + 230 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 154 + 231 Reading WELSPECS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 158 + 232 Reading COMPORD in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 163 + 233 Reading COMPDAT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 167 + 234 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 187 + 235 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 194 + 236 Reading WRFTPLT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 199 + 237 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 204 + 238 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 208 + 239 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 215 + 240 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 220 + 241 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 224 + 242 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 231 + 243 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 236 + 244 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 240 + 245 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 247 + 246 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 252 + 247 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 256 + 248 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 263 + 249 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 268 + 250 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 272 + 251 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 279 + 252 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 284 + 253 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 288 + 254 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 295 + 255 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 300 + 256 Reading WELSPECS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 304 + 257 Reading COMPORD in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 310 + 258 Reading COMPDAT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 315 + 259 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 350 + 260 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 359 + 261 Reading WRFTPLT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 364 + 262 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 370 + 263 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 374 + 264 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 383 + 265 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 388 + 266 Reading WELSPECS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 392 + 267 Reading COMPORD in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 397 + 268 Reading COMPDAT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 401 + 269 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 421 + 270 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 430 + 271 Reading WRFTPLT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 436 + 272 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 441 + 273 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 445 + 274 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 454 + 275 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 460 + 276 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 464 + 277 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 473 + 278 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 478 + 279 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 482 + 280 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 491 + 281 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 496 + 282 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 500 + 283 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 509 + 284 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 514 + 285 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 518 + 286 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 527 + 287 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 532 + 288 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 536 + 289 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 545 + 290 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 550 + 291 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 554 + 292 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 563 + 293 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 568 + 294 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 572 + 295 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 581 + 296 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 586 + 297 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 590 + 298 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 599 + 299 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 604 + 300 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 608 + 301 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 617 + 302 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 622 + 303 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 626 + 304 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 635 + 305 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 640 + 306 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 644 + 307 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 653 + 308 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 658 + 309 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 662 + 310 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 671 + 311 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 676 + 312 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 680 + 313 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 689 + 314 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 694 + 315 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 698 + 316 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 707 + 317 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 712 + 318 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 716 + 319 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 725 + 320 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 730 + 321 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 734 + 322 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 743 + 323 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 748 + 324 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 752 + 325 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 761 + 326 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 766 + 327 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 770 + 328 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 779 + 329 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 784 + 330 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 788 + 331 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 797 + 332 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 802 + 333 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 806 + 334 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 815 + 335 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 820 + 336 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 824 + 337 Reading SAVE in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 276 + 338 Reading TSTEP in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 279 Warning: Unsupported keywords or keyword items: SAVE: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 19 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 19 NOECHO: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 22 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 22 NOINSPEC: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 95 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 95 NORSSPEC: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 98 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 98 NOECHO: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 107 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 107 PINCH: invalid value 'NOGAP' in record 1 for item 2 - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 121 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 121 NOECHO: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 125 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 125 NOECHO: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid, line 4 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid, line 4 MAPUNITS: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid, line 12 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid, line 12 GRIDUNIT: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid, line 19 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid, line 19 GDORIENT: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid, line 25 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid, line 25 ECHO: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid, line 34222 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid, line 34222 ROCKOPTS: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt, line 1 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt, line 1 WBP: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry, line 249 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry, line 249 WBP4: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry, line 251 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry, line 251 WBP9: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry, line 253 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry, line 253 SAVE: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 276 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 276 Creating cornerpoint grid from keywords ZCORN, COORD and ACTNUM -Loading faults from FAULTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.faults line 10 +Loading faults from FAULTS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.faults line 10 -Applying MULTFLT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.multflt line 1 +Applying MULTFLT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.multflt line 1 Setting fault transmissibility multiplier 0.001 for fault F1 Setting fault transmissibility multiplier 0.001 for fault F2 Setting fault transmissibility multiplier 0.001 for fault F3 @@ -604,10 +604,10 @@ Setting fault transmissibility multiplier 0.001 for fault F4 Setting fault transmissibility multiplier 0.001 for fault F5 Processing dynamic information from -/home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 265 +/home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 265 Initializing report step 1/38 at 2000-01-01 0.0 DAYS line 265 Processing keyword TUNING at line 267 -Reading from: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 4 +Reading from: /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 4 Processing keyword GRUPTREE at line 4 Complete report step 1 (31.0 DAYS) at 2000-02-01 (0.0 DAYS) @@ -805,13 +805,13 @@ Initializing report step 37/38 at 2002-12-31 (1065.0 DAYS) - line 820 Complete report step 37 (1.0 DAYS) at 2003-01-01 (1095.0 DAYS) Initializing report step 38/38 at 2003-01-01 (1095.0 DAYS) - line 824 -Reading from: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 276 +Reading from: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 276 Processing keyword SAVE at line 276 Complete report step 38 (1.0 DAYS) at 2003-01-02 (1096.0 DAYS) Warning: Request for missing group GMWIN in GMWPR -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 101 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 101 Processing grid Total number of active cells: 35641 / total pore volume: 399077425 RM3 @@ -830,25 +830,25 @@ Property tree for linear solver: Warning: Unhandled summary keyword FGSR -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 18 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 18 Warning: Unhandled summary keyword FWGR -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 73 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 73 Warning: Unhandled summary keyword GWGR -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 138 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 138 Warning: Unhandled summary keyword WBP -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 249 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 249 Warning: Unhandled summary keyword WBP4 -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 251 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 251 Warning: Unhandled summary keyword WBP9 -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 253 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 253 Warning: Unhandled summary keyword WOGLR -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 265 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 265 ===============Saturation Functions Diagnostics=============== diff --git a/tests/test_common.py b/tests/test_common.py index e602ff15b..65d6e63d5 100644 --- a/tests/test_common.py +++ b/tests/test_common.py @@ -1,4 +1,4 @@ -"""Test module for ecl2df.common""" +"""Test module for res2df.common""" import datetime import os @@ -9,7 +9,7 @@ import pandas as pd import pytest -from ecl2df import common, eclfiles, equil +from res2df import common, equil, resdatafiles try: # pylint: disable=unused-import @@ -147,19 +147,19 @@ def test_parse_opmio_deckrecord(): ) def test_handle_wanted_keywords(wanted, deckstr, supported, expected): """Test that we can handle list of wanted, supported and available keywords.""" - deck = eclfiles.EclFiles.str2deck(deckstr) + deck = resdatafiles.ResdataFiles.str2deck(deckstr) assert common.handle_wanted_keywords(wanted, deck, supported) == expected -def df2ecl_equil(dframe, comment: str = None): - """Wrapper function to be able to test df2ecl +def df2res_equil(dframe, comment: str = None): + """Wrapper function to be able to test df2res (it asks for a function in the calling module)""" - return equil.df2ecl_equil(dframe, comment) + return equil.df2res_equil(dframe, comment) -def test_df2ecl(): - """Test general properties of df2ecl. +def test_df2res(): + """Test general properties of df2res. This function is mainly tested in each submodule.""" dframe = pd.DataFrame( @@ -177,33 +177,33 @@ def test_df2ecl(): ) with pytest.raises(AssertionError): # supported keywords are not supplied - common.df2ecl(dframe) + common.df2res(dframe) with pytest.raises(AssertionError): - common.df2ecl(dframe, supported=None) + common.df2res(dframe, supported=None) with pytest.raises(ValueError, match="KEYWORD must be in the dataframe"): - common.df2ecl( + common.df2res( dframe.drop("KEYWORD", axis=1), keywords=["EQUIL"], supported=["EQUIL"] ) - string = common.df2ecl(dframe, supported=["EQUIL"]) + string = common.df2res(dframe, supported=["EQUIL"]) # The next calls differ only in timestamp: assert len(string) == len( - common.df2ecl(dframe, keywords="EQUIL", supported=["EQUIL"]) + common.df2res(dframe, keywords="EQUIL", supported=["EQUIL"]) ) assert len(string) == len( - common.df2ecl(dframe, keywords=["EQUIL"], supported=["EQUIL"]) + common.df2res(dframe, keywords=["EQUIL"], supported=["EQUIL"]) ) assert "EQUIL\n" in string assert "2469" in string assert "-- Output file printed by tests.test_common" in string - assert common.df2ecl(dframe, supported=["PORO"]) == "" + assert common.df2res(dframe, supported=["PORO"]) == "" - assert "EQUIL\n-- foobar" in common.df2ecl( + assert "EQUIL\n-- foobar" in common.df2res( dframe, comments={"EQUIL": "foobar"}, supported=["EQUIL"] ) - assert "\n\n-- masterfoobar\nEQUIL" in common.df2ecl( + assert "\n\n-- masterfoobar\nEQUIL" in common.df2res( dframe, comments={"master": "masterfoobar"}, supported=["EQUIL"] ) @@ -211,16 +211,16 @@ def test_df2ecl(): tworows["EQLNUM"] = [3, 1] tworows["PRESSURE"] = [3456, 1234] with pytest.raises(ValueError): - common.df2ecl(tworows, supported=["EQUIL"], consecutive="EQLNUM") + common.df2res(tworows, supported=["EQUIL"], consecutive="EQLNUM") # This would be a bug if client code did this, because the wrong # consecutive column is set: - assert "3456" in common.df2ecl(tworows, supported=["EQUIL"], consecutive="PVTNUM") + assert "3456" in common.df2res(tworows, supported=["EQUIL"], consecutive="PVTNUM") tworows["EQLNUM"] = [1, 3] with pytest.raises(ValueError): - common.df2ecl(tworows, supported=["EQUIL"], consecutive="EQLNUM") + common.df2res(tworows, supported=["EQUIL"], consecutive="EQLNUM") tworows["EQLNUM"] = [2, 1] # Passes because the frame is sorted on EQLNUM: - string = common.df2ecl(tworows, supported=["EQUIL"], consecutive="EQLNUM") + string = common.df2res(tworows, supported=["EQUIL"], consecutive="EQLNUM") assert "EQUIL" in string assert string.find("3456") > string.find("1234") @@ -250,24 +250,24 @@ def test_df2ecl(): ), ], ) -def test_datetime_to_eclipsedate(somedate, expected): +def test_datetime_to_ecldate(somedate, expected): """Test conversion of datetime to Eclipse date or datetime syntax""" - assert common.datetime_to_eclipsedate(somedate) == expected + assert common.datetime_to_ecldate(somedate) == expected def test_eclcompress(): """Test that we can compress string using Eclipse style run-length encoding""" - assert common.runlength_eclcompress("") == "" - assert common.runlength_eclcompress(" ") == "" - assert common.runlength_eclcompress("1 2") == "1 2" - assert common.runlength_eclcompress("1 2", sep=" ") == "1 2" - assert common.runlength_eclcompress("1 2", sep=" ") == "1 2" - assert common.runlength_eclcompress("1") == "1" - assert common.runlength_eclcompress("1 1") == "2*1" - assert common.runlength_eclcompress("1 1 1") == "3*1" - assert common.runlength_eclcompress("1 1 1") == "3*1" - assert common.runlength_eclcompress("1 \n 1 1 2") == "3*1 2" + assert common.runlength_compress("") == "" + assert common.runlength_compress(" ") == "" + assert common.runlength_compress("1 2") == "1 2" + assert common.runlength_compress("1 2", sep=" ") == "1 2" + assert common.runlength_compress("1 2", sep=" ") == "1 2" + assert common.runlength_compress("1") == "1" + assert common.runlength_compress("1 1") == "2*1" + assert common.runlength_compress("1 1 1") == "3*1" + assert common.runlength_compress("1 1 1") == "3*1" + assert common.runlength_compress("1 \n 1 1 2") == "3*1 2" @pytest.mark.parametrize( @@ -446,10 +446,10 @@ def test_well_matching_template(template, wells, output): ), ], ) -def test_generic_ecltable( +def test_generic_deck_table( dframe, keyword, comment, renamer, drop_trailing_columns, expected ): - stringtable = common.generic_ecltable( + stringtable = common.generic_deck_table( dframe, keyword, comment=comment, diff --git a/tests/test_compdat.py b/tests/test_compdat.py index d3e20593e..608f3a394 100644 --- a/tests/test_compdat.py +++ b/tests/test_compdat.py @@ -5,7 +5,7 @@ import pandas as pd import pytest -from ecl2df import EclFiles, compdat, ecl2csv +from res2df import ResdataFiles, compdat, res2csv try: # pylint: disable=unused-import @@ -32,8 +32,8 @@ def test_df(): """Test main dataframe API, only testing that something comes out""" - eclfiles = EclFiles(EIGHTCELLS) - compdat_df = compdat.df(eclfiles) + resdatafiles = ResdataFiles(EIGHTCELLS) + compdat_df = compdat.df(resdatafiles) assert not compdat_df.empty assert "ZONE" in compdat_df assert "K1" in compdat_df @@ -42,8 +42,8 @@ def test_df(): def test_comp2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(EIGHTCELLS) - compdfs = compdat.deck2dfs(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(EIGHTCELLS) + compdfs = compdat.deck2dfs(resdatafiles.get_deck()) assert not compdfs["COMPDAT"].empty assert not compdfs["WELSEGS"].empty @@ -53,7 +53,7 @@ def test_comp2df(): def test_schfile2df(): """Test that we can process individual files""" - deck = EclFiles.file2deck(SCHFILE) + deck = ResdataFiles.file2deck(SCHFILE) compdfs = compdat.deck2dfs(deck) assert not compdfs["COMPDAT"].columns.empty assert not compdfs["COMPDAT"].empty @@ -67,7 +67,7 @@ def test_str_compdat(): -- comments. / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) compdfs = compdat.deck2dfs(deck) compdat_df = compdfs["COMPDAT"] assert compdat_df.loc[0, "SATN"] == 0 @@ -79,7 +79,7 @@ def test_str_compdat(): 'FOO' 303 1010 031 39 / / """ - compdat_df = compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"] + compdat_df = compdat.deck2dfs(ResdataFiles.str2deck(schstr))["COMPDAT"] assert len(compdat_df) == 9 assert not compdat_df["DFACT"].values[0] assert not compdat_df["TRAN"].values[0] @@ -114,7 +114,7 @@ def test_str2df(): 'OP1' 166 1 7.4294683E-06 0 / icd on segment 17, cell 41 125 29 / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) compdfs = compdat.deck2dfs(deck) compdat_df = compdfs["COMPDAT"] welsegs = compdfs["WELSEGS"] @@ -182,7 +182,7 @@ def test_tstep(): 'OP1' 35 111 33 33 'SHUT' / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) compdf = compdat.deck2dfs(deck)["COMPDAT"] dates = [str(x) for x in compdf["DATE"].unique()] assert len(dates) == 3 @@ -202,7 +202,7 @@ def test_tstep(): 'OP1' 34 111 32 32 'OPEN' / / """ - assert compdat.deck2dfs(EclFiles.str2deck(schstr_nodate)) == {} + assert compdat.deck2dfs(ResdataFiles.str2deck(schstr_nodate)) == {} # (critical error logged) @@ -215,14 +215,14 @@ def test_unrollcompdatk1k2(): 'OP1' 33 44 10 20 / / """ - df = compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"] + df = compdat.deck2dfs(ResdataFiles.str2deck(schstr))["COMPDAT"] assert df["I"].unique() == 33 assert df["J"].unique() == 44 assert (df["K1"].values == range(10, 20 + 1)).all() assert (df["K2"].values == range(10, 20 + 1)).all() # Check that we can read withoug unrolling: - df_noroll = compdat.deck2dfs(EclFiles.str2deck(schstr), unroll=False)["COMPDAT"] + df_noroll = compdat.deck2dfs(ResdataFiles.str2deck(schstr), unroll=False)["COMPDAT"] assert len(df_noroll) == 1 @@ -234,7 +234,7 @@ def test_samecellperf(): 'OP2' 1 1 1 1 / / """ - df = compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"] + df = compdat.deck2dfs(ResdataFiles.str2deck(schstr))["COMPDAT"] assert len(df) == 2 @@ -248,10 +248,10 @@ def test_unrollwelsegs(): 2 3 1 1 1923.9 1689.000 0.1172 0.000015 / / """ - df = compdat.deck2dfs(EclFiles.str2deck(schstr))["WELSEGS"] + df = compdat.deck2dfs(ResdataFiles.str2deck(schstr))["WELSEGS"] assert len(df) == 2 - df = compdat.deck2dfs(EclFiles.str2deck(schstr), unroll=False)["WELSEGS"] + df = compdat.deck2dfs(ResdataFiles.str2deck(schstr), unroll=False)["WELSEGS"] assert len(df) == 1 @@ -267,33 +267,33 @@ def test_unrollbogus(): def test_initmerging(): """Test that we can ask for INIT vectors to be merged into the data""" - eclfiles = EclFiles(REEK) - noinit_df = compdat.df(eclfiles) - df = compdat.df(eclfiles, initvectors=[]) + resdatafiles = ResdataFiles(REEK) + noinit_df = compdat.df(resdatafiles) + df = compdat.df(resdatafiles, initvectors=[]) assert isinstance(df, pd.DataFrame) assert not df.empty - df = compdat.df(eclfiles, initvectors=["FIPNUM", "EQLNUM", "SATNUM"]) + df = compdat.df(resdatafiles, initvectors=["FIPNUM", "EQLNUM", "SATNUM"]) assert "FIPNUM" in df assert "EQLNUM" in df assert "SATNUM" in df assert len(df) == len(noinit_df) - df = compdat.df(eclfiles, initvectors="FIPNUM") + df = compdat.df(resdatafiles, initvectors="FIPNUM") assert "FIPNUM" in df assert len(df) == len(noinit_df) with pytest.raises(AssertionError): - compdat.df(eclfiles, initvectors=2) + compdat.df(resdatafiles, initvectors=2) def test_main_subparsers(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "compdat.csv" mocker.patch( - "sys.argv", ["ecl2csv", "compdat", "-v", EIGHTCELLS, "-o", str(tmpcsvfile)] + "sys.argv", ["res2csv", "compdat", "-v", EIGHTCELLS, "-o", str(tmpcsvfile)] ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -303,7 +303,7 @@ def test_main_subparsers(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "compdat", EIGHTCELLS, "--initvectors", @@ -312,7 +312,7 @@ def test_main_subparsers(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -322,7 +322,7 @@ def test_main_subparsers(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "compdat", EIGHTCELLS, "--initvectors", @@ -332,7 +332,7 @@ def test_main_subparsers(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -369,37 +369,39 @@ def test_defaulted_compdat_i_j(): # pylint: disable=expression-not-assigned with pytest.raises(ValueError, match="WELSPECS must be provided when I"): - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_i))["COMPDAT"] + compdat.deck2dfs(ResdataFiles.str2deck(compdat_str_def_i))["COMPDAT"] # I value of 0 also means defaulted: with pytest.raises(ValueError, match="WELSPECS must be provided when I"): - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_i.replace("1*", "0")))[ + compdat.deck2dfs(ResdataFiles.str2deck(compdat_str_def_i.replace("1*", "0")))[ "COMPDAT" ] with pytest.raises(ValueError, match="WELSPECS must be provided when J"): - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_j))["COMPDAT"] + compdat.deck2dfs(ResdataFiles.str2deck(compdat_str_def_j))["COMPDAT"] # J value of 0 also means defaulted: with pytest.raises(ValueError, match="WELSPECS must be provided when J"): - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_j.replace("1*", "0")))[ + compdat.deck2dfs(ResdataFiles.str2deck(compdat_str_def_j.replace("1*", "0")))[ "COMPDAT" ] with pytest.raises(ValueError, match="WELSPECS must be provided"): # Wrong order: - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_i + welspecs_str))["COMPDAT"] + compdat.deck2dfs(ResdataFiles.str2deck(compdat_str_def_i + welspecs_str))[ + "COMPDAT" + ] # Simplest example: - compdat_df = compdat.deck2dfs(EclFiles.str2deck(welspecs_str + compdat_str_def_i))[ - "COMPDAT" - ] + compdat_df = compdat.deck2dfs( + ResdataFiles.str2deck(welspecs_str + compdat_str_def_i) + )["COMPDAT"] assert compdat_df["I"].unique() == [20] assert compdat_df["J"].unique() == [30] # Two wells: compdat_df = compdat.deck2dfs( - EclFiles.str2deck( + ResdataFiles.str2deck( welspecs_str.replace("OP1", "OP2").replace("30", "99") + welspecs_str + compdat_str_def_i @@ -408,14 +410,14 @@ def test_defaulted_compdat_i_j(): # Partial defaulting compdat_df = compdat.deck2dfs( - EclFiles.str2deck(welspecs_str + compdat_str_def_i + compdat_str_nodefaults) + ResdataFiles.str2deck(welspecs_str + compdat_str_def_i + compdat_str_nodefaults) )["COMPDAT"] assert set(compdat_df["I"].unique()) == {20, 55} assert set(compdat_df["J"].unique()) == {30, 66} compdat_df = compdat.deck2dfs( - EclFiles.str2deck( + ResdataFiles.str2deck( welspecs_str.replace("OP1", "OP2").replace("30", "99") + welspecs_str + compdat_str_def_i @@ -430,7 +432,7 @@ def test_defaulted_compdat_i_j(): # Same well redrilled to new location compdat_df = compdat.deck2dfs( - EclFiles.str2deck( + ResdataFiles.str2deck( "DATES\n 1 JAN 2030 /\n/\n" + welspecs_str + compdat_str_def_i @@ -450,17 +452,17 @@ def test_defaulted_compdat_i_j(): # Multisegement well testing def test_msw_schfile2df(): """Test that we can process individual files with AICD and ICD MSW""" - deck = EclFiles.file2deck(SCHFILE_AICD) + deck = ResdataFiles.file2deck(SCHFILE_AICD) compdfs = compdat.deck2dfs(deck) assert not compdfs["WSEGAICD"].empty assert not compdfs["WSEGAICD"].columns.empty - deck = EclFiles.file2deck(SCHFILE_ICD) + deck = ResdataFiles.file2deck(SCHFILE_ICD) compdfs = compdat.deck2dfs(deck) assert not compdfs["WSEGSICD"].empty assert not compdfs["WSEGSICD"].columns.empty - deck = EclFiles.file2deck(SCHFILE_VALV) + deck = ResdataFiles.file2deck(SCHFILE_VALV) compdfs = compdat.deck2dfs(deck) assert not compdfs["WSEGVALV"].empty assert not compdfs["WSEGVALV"].columns.empty @@ -507,7 +509,7 @@ def test_msw_str2df(): OP_6 31 0.0084252 0.00075 1* / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) compdfs = compdat.deck2dfs(deck) wsegaicd = compdfs["WSEGAICD"] wsegsicd = compdfs["WSEGSICD"] @@ -539,7 +541,7 @@ def test_wsegaicd(): OPEN 1.0 1.0 1.0 2.43 1.18 10.0 / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) wsegaicd = compdat.deck2dfs(deck)["WSEGAICD"] pd.testing.assert_frame_equal( wsegaicd, @@ -585,7 +587,7 @@ def test_wsegsicd(): OPEN / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) wsegsicd = compdat.deck2dfs(deck)["WSEGSICD"] pd.testing.assert_frame_equal( wsegsicd, @@ -620,7 +622,7 @@ def test_wsegvalv(): WELL_A 31 0.0084252 0.00075 0.5 0.216 0.0005 0.0366 SHUT 0.0008 / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) wsegvalv = compdat.deck2dfs(deck)["WSEGVALV"] pd.testing.assert_frame_equal( wsegvalv, @@ -654,7 +656,7 @@ def test_wsegvalv_max_blank(): WELL_A 31 0.0084252 0.00075 / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) wsegvalv = compdat.deck2dfs(deck)["WSEGVALV"] pd.testing.assert_frame_equal( wsegvalv, @@ -688,7 +690,7 @@ def test_wsegvalv_max_default(): WELL_A 31 0.0084252 0.00075 6* / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) wsegvalv = compdat.deck2dfs(deck)["WSEGVALV"] pd.testing.assert_frame_equal( wsegvalv, diff --git a/tests/test_eclfiles.py b/tests/test_eclfiles.py index 69e3548b4..2b01fbf71 100644 --- a/tests/test_eclfiles.py +++ b/tests/test_eclfiles.py @@ -3,7 +3,7 @@ import pytest -from ecl2df import EclFiles +from res2df import ResdataFiles try: # pylint: disable=unused-import @@ -29,52 +29,52 @@ def test_filedescriptors(): pre_fd_count = len(list(fd_dir.glob("*"))) - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) # No opened files yet: assert len(list(fd_dir.glob("*"))) == pre_fd_count - eclfiles.close() + resdatafiles.close() # No change, no files to close: assert len(list(fd_dir.glob("*"))) == pre_fd_count - eclfiles.get_egrid() + resdatafiles.get_egrid() # This should not leave any file descriptor open assert len(list(fd_dir.glob("*"))) == pre_fd_count - eclfiles.get_initfile() + resdatafiles.get_initfile() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._initfile is not None - eclfiles.close() + assert resdatafiles._initfile is not None + resdatafiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._initfile is None + assert resdatafiles._initfile is None - eclfiles.get_rstfile() - # Automatically closed by libecl + resdatafiles.get_rstfile() + # Automatically closed by resdata assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._rstfile is not None - eclfiles.close() + assert resdatafiles._rstfile is not None + resdatafiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._rstfile is None + assert resdatafiles._rstfile is None - eclfiles.get_eclsum() + resdatafiles.get_summary() assert len(list(fd_dir.glob("*"))) == pre_fd_count + 1 - eclfiles.close() + resdatafiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - eclfiles.get_egridfile() + resdatafiles.get_egridfile() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._egridfile is not None - eclfiles.close() + assert resdatafiles._egridfile is not None + resdatafiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._egridfile is None + assert resdatafiles._egridfile is None - eclfiles.get_rftfile() + resdatafiles.get_rftfile() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._rftfile is not None - eclfiles.close() + assert resdatafiles._rftfile is not None + resdatafiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._rftfile is None + assert resdatafiles._rftfile is None - eclfiles.get_ecldeck() + resdatafiles.get_deck() # This should not leave any file descriptor open assert len(list(fd_dir.glob("*"))) == pre_fd_count diff --git a/tests/test_equil.py b/tests/test_equil.py index bb15e8f7f..698d3c9a4 100644 --- a/tests/test_equil.py +++ b/tests/test_equil.py @@ -8,8 +8,8 @@ import pandas as pd import pytest -from ecl2df import csv2ecl, ecl2csv, equil -from ecl2df.eclfiles import EclFiles +from res2df import csv2res, equil, res2csv +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -28,8 +28,8 @@ def test_equil2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - equildf = equil.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + equildf = equil.df(resdatafiles) expected = {} expected["EQUIL"] = pd.DataFrame( [ @@ -77,25 +77,25 @@ def test_equil2df(): # Check that we can dump from dataframe to include file # and reparse to the same dataframe: - inc = equil.df2ecl(equildf, withphases=True) + inc = equil.df2res(equildf, withphases=True) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(equildf, df_from_inc, check_dtype=False) -def test_df2ecl(tmp_path): +def test_df2res(tmp_path): """Test that we can write include files to disk""" os.chdir(tmp_path) - eclfiles = EclFiles(EIGHTCELLS) - equildf = equil.df(eclfiles) - equil.df2ecl(equildf, filename="equil.inc") + resdatafiles = ResdataFiles(EIGHTCELLS) + equildf = equil.df(resdatafiles) + equil.df2res(equildf, filename="equil.inc") assert Path("equil.inc").is_file() # Test automatic directory creation: - equil.df2ecl(equildf, filename="eclipse/include/equil.inc") + equil.df2res(equildf, filename="eclipse/include/equil.inc") assert Path("eclipse/include/equil.inc").is_file() -def test_df2ecl_equil(): +def test_df2res_equil(): """Test the underlying function directly""" dframe = pd.DataFrame( [ @@ -111,18 +111,18 @@ def test_df2ecl_equil(): ] ) # Check that we don't need the KEYWORD in the underlying function - assert equil.df2ecl_equil(dframe) == equil.df2ecl_equil( + assert equil.df2res_equil(dframe) == equil.df2res_equil( dframe.drop("KEYWORD", axis="columns") ) # Can also drop EQLNUM since we have only one row: - assert equil.df2ecl_equil(dframe) == equil.df2ecl_equil( + assert equil.df2res_equil(dframe) == equil.df2res_equil( dframe.drop("EQLNUM", axis="columns") ) # Problem if we have two rows, nothing is returned and a critical error is logged assert ( - equil.df2ecl_equil(pd.concat([dframe, dframe]).drop("EQLNUM", axis="columns")) + equil.df2res_equil(pd.concat([dframe, dframe]).drop("EQLNUM", axis="columns")) == "" ) @@ -142,22 +142,22 @@ def test_decks(): assert len(df) == 1 assert "IGNORE1" not in df assert df["EQLNUM"].unique()[0] == 1 - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) # 0 columns can be both integers and floats. pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) # Test empty data: - inc = equil.df2ecl_equil(equil.df("")) + inc = equil.df2res_equil(equil.df("")) assert "No data" in inc assert equil.df(inc).empty # Test more empty data: - assert "No data" in equil.df2ecl_equil(equil.df("")) - assert "No data" in equil.df2ecl_rsvd(equil.df("")) - assert "No data" in equil.df2ecl_rvvd(equil.df("")) - assert "No data" in equil.df2ecl_pbvd(equil.df("")) - assert "No data" in equil.df2ecl_pdvd(equil.df("")) + assert "No data" in equil.df2res_equil(equil.df("")) + assert "No data" in equil.df2res_rsvd(equil.df("")) + assert "No data" in equil.df2res_rvvd(equil.df("")) + assert "No data" in equil.df2res_pbvd(equil.df("")) + assert "No data" in equil.df2res_pdvd(equil.df("")) deckstr = """ OIL @@ -170,7 +170,7 @@ def test_decks(): assert df["OWC"].values == 2200 assert len(df) == 1 assert "IGNORE1" not in df - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) # 0 columns can be both integers and floats. pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) @@ -187,7 +187,7 @@ def test_decks(): assert "OWC" not in df assert len(df) == 1 assert "IGNORE2" not in df - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) # 0 columns can be both integers and floats. pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) @@ -205,7 +205,7 @@ def test_decks(): assert "OWC" not in df assert len(df) == 1 assert "IGNORE2" not in df - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) # 0 columns can be both integers and floats. pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) @@ -215,7 +215,7 @@ def test_decks(): WATER GAS --- Output file printed by ecl2df.equil 0.5.2.dev12+g785dc0d.d20200402 +-- Output file printed by res2df.equil 0.5.2.dev12+g785dc0d.d20200402 -- at 2020-04-03 16:18:57.450100 EQUIL @@ -229,7 +229,7 @@ def test_decks(): assert "OWC" in df assert len(df) == 2 assert "IGNORE2" not in df - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) # 0 columns can be both integers and floats. pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) @@ -255,7 +255,9 @@ def test_equil_fromdeck(): assert len(equil.equil_fromdeck(deckstr)) == 2 # correct assert len(equil.equil_fromdeck(deckstr, 2)) == 2 assert len(equil.equil_fromdeck(deckstr, 1)) == 1 - assert len(equil.equil_fromdeck(EclFiles.str2deck(deckstr))) == 1 # (watch out!) + assert ( + len(equil.equil_fromdeck(ResdataFiles.str2deck(deckstr))) == 1 + ) # (watch out!) wrongdeck = """ EQUIL @@ -292,7 +294,7 @@ def test_rsvd(): assert max(rsvd_df["EQLNUM"]) == 3 assert set(rsvd_df["Z"].values) == {10, 30, 50} assert set(rsvd_df["RS"].values) == {100, 400} - inc = equil.df2ecl(rsvd_df) + inc = equil.df2res(rsvd_df) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(rsvd_df, df_from_inc) @@ -315,7 +317,7 @@ def test_rsvd(): assert max(rsvd_df["EQLNUM"]) == 2 assert set(rsvd_df["Z"].values) == {10, 30, 50, 60} assert set(rsvd_df["RS"].values) == {100, 400, 1000} - inc = equil.df2ecl(rsvd_df) + inc = equil.df2res(rsvd_df) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(rsvd_df, df_from_inc) @@ -338,7 +340,7 @@ def test_rvvd(): assert set(rvvd_df["Z"].values) == {10, 30, 50} assert set(rvvd_df["RV"].values) == {100, 400} - inc = equil.df2ecl(rvvd_df) + inc = equil.df2res(rvvd_df) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(rvvd_df, df_from_inc) @@ -362,7 +364,7 @@ def test_rvvd(): assert set(rvvd_df["Z"].values) == {10, 30, 50, 60} assert set(rvvd_df["RV"].values) == {100, 400, 1000} - inc = equil.df2ecl(rvvd_df) + inc = equil.df2res(rvvd_df) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(rvvd_df, df_from_inc) @@ -381,7 +383,7 @@ def test_pbvd(): assert set(pbvd_df["Z"].values) == {10, 30, 50} assert set(pbvd_df["PB"].values) == {100, 400} - inc = equil.df2ecl(pbvd_df) + inc = equil.df2res(pbvd_df) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(pbvd_df, df_from_inc) @@ -392,14 +394,14 @@ def test_pbvd(): pd.testing.assert_frame_equal(pbvd_df.drop("KEYWORD", axis="columns"), pbvd_df2) # Check that we don't need the KEYWORD column for the underlying function: - assert equil.df2ecl_pbvd(pbvd_df) == equil.df2ecl_pbvd( + assert equil.df2res_pbvd(pbvd_df) == equil.df2res_pbvd( pbvd_df.drop("KEYWORD", axis="columns") ) # If EQLNUM column is dropped it is not possible to guess the # correct include file, so the code must fail: with pytest.raises(KeyError): - equil.df2ecl_pbvd(pbvd_df.drop("EQLNUM", axis="columns")) + equil.df2res_pbvd(pbvd_df.drop("EQLNUM", axis="columns")) def test_pdvd(): @@ -416,7 +418,7 @@ def test_pdvd(): assert set(pdvd_df["Z"].values) == {10, 30, 50} assert set(pdvd_df["PD"].values) == {100, 400} - inc = equil.df2ecl(pdvd_df) + inc = equil.df2res(pdvd_df) df_from_inc = equil.df(inc) pdvd_df2 = equil.pdvd_fromdeck(deckstr) pd.testing.assert_frame_equal(pdvd_df, df_from_inc) @@ -440,8 +442,8 @@ def test_rsvd_via_file(tmp_path, mocker): 60 1000 /""" rsvd_df = equil.df(deckstr) Path("rsvd.inc").write_text(deckstr, encoding="utf8") - mocker.patch("sys.argv", ["ecl2csv", "equil", "-v", "rsvd.inc", "-o", "rsvd.csv"]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "equil", "-v", "rsvd.inc", "-o", "rsvd.csv"]) + res2csv.main() rsvd_df_fromcsv = pd.read_csv("rsvd.csv") pd.testing.assert_frame_equal(rsvd_df, rsvd_df_fromcsv) @@ -465,7 +467,7 @@ def test_ntequl(): df = equil.df(deckstr, ntequl=2) assert len(df) == 2 - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) @@ -492,7 +494,7 @@ def test_ntequl(): assert set(df["GOC"].values) == set([2100, 2100]) assert len(df) == 2 - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) @@ -534,15 +536,15 @@ def test_eclipse_rounding(somefloat, expected): } ] ) - assert expected in equil.df2ecl(dframe, withphases=False) + assert expected in equil.df2res(dframe, withphases=False) def test_main_subparser(tmp_path, mocker): """Test command line interface""" os.chdir(tmp_path) tmpcsvfile = "equil.csv" - mocker.patch("sys.argv", ["ecl2csv", "equil", "-v", REEK, "-o", tmpcsvfile]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "equil", "-v", REEK, "-o", tmpcsvfile]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(tmpcsvfile) @@ -550,9 +552,9 @@ def test_main_subparser(tmp_path, mocker): # Test the reverse operation: mocker.patch( - "sys.argv", ["csv2ecl", "equil", "-v", "--output", "equil.inc", tmpcsvfile] + "sys.argv", ["csv2res", "equil", "-v", "--output", "equil.inc", tmpcsvfile] ) - csv2ecl.main() + csv2res.main() # NB: cvs2ecl does not output the phase configuration! phases = "WATER\nGAS\nOIL\n\n" ph_equil_inc = Path("phasesequil.inc") @@ -567,7 +569,7 @@ def test_main_subparser(tmp_path, mocker): # Test via stdout: result = subprocess.run( - ["csv2ecl", "equil", "--output", "-", tmpcsvfile], + ["csv2res", "equil", "--output", "-", tmpcsvfile], stdout=subprocess.PIPE, check=True, ) @@ -588,8 +590,8 @@ def test_main_subparser(tmp_path, mocker): """, encoding="utf8", ) - mocker.patch("sys.argv", ["ecl2csv", "equil", "-v", "poro.inc", "-o", "empty.csv"]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "equil", "-v", "poro.inc", "-o", "empty.csv"]) + res2csv.main() assert not Path("empty.csv").read_text(encoding="utf8").strip() @@ -611,7 +613,7 @@ def test_main_subparser(tmp_path, mocker): ) def test_phases_from_deck(deckstring, expected): """Test that we can extract phase configuration from a deck""" - deck = EclFiles.str2deck(deckstring) + deck = ResdataFiles.str2deck(deckstring) assert equil.phases_from_deck(deck) == expected diff --git a/tests/test_ert_hooks.py b/tests/test_ert_hooks.py index 645276d29..391858315 100644 --- a/tests/test_ert_hooks.py +++ b/tests/test_ert_hooks.py @@ -5,8 +5,8 @@ import pandas as pd import pytest -import ecl2df -from ecl2df.hook_implementations import jobs +import res2df +from res2df.hook_implementations import jobs try: # pylint: disable=unused-import @@ -24,7 +24,7 @@ @pytest.mark.skipif( not HAVE_ERT, reason="ERT is not installed, skipping hook implementation tests." ) -def test_ecl2csv_through_ert(tmp_path): +def test_res2csv_through_ert(tmp_path): """Test running the ERT executable on a mocked config file""" os.chdir(tmp_path) @@ -53,50 +53,50 @@ def test_ecl2csv_through_ert(tmp_path): "RUNPATH .", ] - csv2ecl_subcommands = ["equil", "pvt", "satfunc"] + csv2res_subcommands = ["equil", "pvt", "satfunc"] - for subcommand in ecl2df.SUBMODULES: + for subcommand in res2df.SUBMODULES: ert_config.append( - f"FORWARD_MODEL ECL2CSV(={subcommand}, " + f"FORWARD_MODEL RES2CSV(={subcommand}, " f"={subcommand}.csv)" ) # Test what we can also supply additional options for some submodules: ert_config.append( - "FORWARD_MODEL ECL2CSV(=summary, " + "FORWARD_MODEL RES2CSV(=summary, " '=summary-yearly.csv, ="--time_index", =yearly)' ) ert_config.append( - "FORWARD_MODEL ECL2CSV(=equil, " + "FORWARD_MODEL RES2CSV(=equil, " '=equil-rsvd.csv, ="--keywords", ="RSVD")' ) ert_config.append( - "FORWARD_MODEL ECL2CSV(=pvt, " + "FORWARD_MODEL RES2CSV(=pvt, " '=pvt-custom.csv, ="--keywords", ="PVTO")' ) ert_config.append( - "FORWARD_MODEL ECL2CSV(=satfunc, " + "FORWARD_MODEL RES2CSV(=satfunc, " '=satfunc-swof.csv, ="--keywords", ="SWOF")' ) - for subcommand in csv2ecl_subcommands: + for subcommand in csv2res_subcommands: ert_config.append( - f"FORWARD_MODEL CSV2ECL(={subcommand}, " + f"FORWARD_MODEL CSV2RES(={subcommand}, " f"={subcommand}.csv, ={subcommand}.inc)" ) ert_config.append( - "FORWARD_MODEL CSV2ECL(=summary, =summary-yearly.csv, " + "FORWARD_MODEL CSV2RES(=summary, =summary-yearly.csv, " "=SUMYEARLY)" ) - ert_config_filename = "ecl2csv_test.ert" + ert_config_filename = "res2csv_test.ert" Path(ert_config_filename).write_text("\n".join(ert_config), encoding="utf-8") subprocess.call(["ert", "test_run", ert_config_filename]) assert Path("OK").is_file() - for subcommand in ecl2df.SUBMODULES: + for subcommand in res2df.SUBMODULES: assert Path(subcommand + ".csv").is_file() # Check the custom output where options were supplied to the subcommands: @@ -105,7 +105,7 @@ def test_ecl2csv_through_ert(tmp_path): assert set(pd.read_csv("pvt-custom.csv")["KEYWORD"]) == set(["PVTO"]) assert set(pd.read_csv("satfunc-swof.csv")["KEYWORD"]) == set(["SWOF"]) - for subcommand in csv2ecl_subcommands: + for subcommand in csv2res_subcommands: assert Path(subcommand + ".inc").is_file() @@ -114,17 +114,17 @@ def test_job_documentation(): """Test that for registered ERT forward models the documentation is non-empty""" if HAVE_ERT: assert ( - type(jobs.job_documentation("ECL2CSV")) + type(jobs.job_documentation("RES2CSV")) == ert.shared.plugins.plugin_response.PluginResponse ) assert ( - type(jobs.job_documentation("CSV2ECL")) + type(jobs.job_documentation("CSV2RES")) == ert.shared.plugins.plugin_response.PluginResponse ) else: - assert jobs.job_documentation("ECL2CSV") is None - assert jobs.job_documentation("CSV2ECL") is None + assert jobs.job_documentation("RES2CSV") is None + assert jobs.job_documentation("CSV2RES") is None assert jobs.job_documentation("foobar") is None @@ -137,9 +137,9 @@ def test_get_module_variable(): # pylint: disable=protected-access assert jobs._get_module_variable_if_exists("foo", "bar") == "" assert jobs._get_module_variable_if_exists( - "ecl2df.ecl2csv", "DESCRIPTION" - ).startswith("Convert Eclipse input and output") - assert jobs._get_module_variable_if_exists("ecl2df.ecl2csv", "NOPE") == "" + "res2df.res2csv", "DESCRIPTION" + ).startswith("Convert reservoir simulator input and output") + assert jobs._get_module_variable_if_exists("res2df.res2csv", "NOPE") == "" @pytest.mark.skipif(HAVE_ERT, reason="Tested only when ERT is not available") @@ -147,4 +147,4 @@ def test_no_erthooks(): """Test that we can import the hook implementations even when ERT is unavailable.""" # pylint: disable=redefined-outer-name, unused-import # pylint: disable=reimported, import-outside-toplevel - from ecl2df.hook_implementations import jobs # noqa + from res2df.hook_implementations import jobs # noqa diff --git a/tests/test_faults.py b/tests/test_faults.py index 0ea9f5167..c93817313 100644 --- a/tests/test_faults.py +++ b/tests/test_faults.py @@ -7,8 +7,8 @@ import pandas as pd import pytest -from ecl2df import ecl2csv, faults -from ecl2df.eclfiles import EclFiles +from res2df import faults, res2csv +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -26,8 +26,8 @@ def test_faults2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - faultsdf = faults.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + faultsdf = faults.df(resdatafiles.get_deck()) assert "NAME" in faultsdf assert "I" in faultsdf @@ -46,7 +46,7 @@ def test_str2df(): 'B' 2 3 4 5 6 7 'J' / / """ - deck = EclFiles.str2deck(deckstr) + deck = ResdataFiles.str2deck(deckstr) faultsdf = faults.df(deck) assert len(faultsdf) == 16 @@ -54,8 +54,8 @@ def test_str2df(): def test_nofaults(): """Test on a dataset with no faults""" - eclfiles = EclFiles(EIGHTCELLS) - faultsdf = faults.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(EIGHTCELLS) + faultsdf = faults.df(resdatafiles.get_deck()) assert faultsdf.empty @@ -71,7 +71,7 @@ def test_multiplestr2df(): 'D' 2 2 4 4 10 10 'J' / / """ - deck = EclFiles.str2deck(deckstr) + deck = ResdataFiles.str2deck(deckstr) faultsdf = faults.df(deck).set_index("NAME") assert len(faultsdf) == 23 @@ -82,8 +82,8 @@ def test_multiplestr2df(): def test_main_subparser(tmp_path, mocker): """Test command line interface with subparsers""" tmpcsvfile = tmp_path / "faultsdf.csv" - mocker.patch("sys.argv", ["ecl2csv", "faults", REEK, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "faults", REEK, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -93,7 +93,7 @@ def test_main_subparser(tmp_path, mocker): def test_magic_stdout(): """Test that we can pipe the output into a dataframe""" result = subprocess.run( - ["ecl2csv", "faults", "-o", "-", REEK], check=True, stdout=subprocess.PIPE + ["res2csv", "faults", "-o", "-", REEK], check=True, stdout=subprocess.PIPE ) df_stdout = pd.read_csv(io.StringIO(result.stdout.decode())) assert not df_stdout.empty diff --git a/tests/test_fipreports.py b/tests/test_fipreports.py index 6faf25fe1..52df874ca 100644 --- a/tests/test_fipreports.py +++ b/tests/test_fipreports.py @@ -8,9 +8,9 @@ import pandas as pd import pytest -from ecl2df import ecl2csv, fipreports -from ecl2df.eclfiles import EclFiles -from ecl2df.fipreports import report_block_lineparser as parser +from res2df import fipreports, res2csv +from res2df.fipreports import report_block_lineparser as parser +from res2df.resdatafiles import ResdataFiles TESTDIR = Path(__file__).absolute().parent DATAFILE = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -19,7 +19,7 @@ def test_fipreports2df(): """Test parsing of Reek dataset""" - prtfile = EclFiles(DATAFILE).get_prtfilename() + prtfile = ResdataFiles(DATAFILE).get_prtfilename() fipreport_df = fipreports.df(prtfile) assert len(fipreport_df["REGION"].unique()) == 6 assert len(fipreport_df["DATE"].unique()) == 1 @@ -346,7 +346,7 @@ def test_rogue_eclipse_output(tmp_path): def test_prtstring_opmflow(tmp_path): - """Test parsing the PRT output from OPM flow.""" + """Test parsing the PRT output from OPM Flow.""" prtstring = """ Starting time step 3, stepsize 19.6 days, at day 11.4/31, date = 12-Jan-2000 @@ -440,9 +440,9 @@ def test_cmdline(tmp_path, mocker): tmpcsvfile = tmp_path / "TMP-fipreports.csv" mocker.patch( "sys.argv", - ["ecl2csv", "fipreports", "-v", DATAFILE, "--output", str(tmpcsvfile)], + ["res2csv", "fipreports", "-v", DATAFILE, "--output", str(tmpcsvfile)], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(tmpcsvfile) @@ -454,7 +454,7 @@ def test_cmdline(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "fipreports", "--debug", DATAFILE, @@ -462,19 +462,19 @@ def test_cmdline(tmp_path, mocker): "debugmode.csv", ], ) - ecl2csv.main() + res2csv.main() pd.testing.assert_frame_equal(pd.read_csv("debugmode.csv"), disk_df) # Directly on PRT file: mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "fipreports", DATAFILE.replace("DATA", "PRT"), "--output", "fromprtfile.csv", ], ) - ecl2csv.main() + res2csv.main() pd.testing.assert_frame_equal(pd.read_csv("fromprtfile.csv"), disk_df) diff --git a/tests/test_grid.py b/tests/test_grid.py index 86cbabdee..456d646b3 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -1,4 +1,4 @@ -"""Test module for ecl2df.grid""" +"""Test module for res2df.grid""" import datetime import os from pathlib import Path @@ -8,8 +8,8 @@ import pyarrow import pytest -from ecl2df import common, ecl2csv, grid -from ecl2df.eclfiles import EclFiles +from res2df import common, grid, res2csv +from res2df.resdatafiles import ResdataFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -18,8 +18,8 @@ def test_gridgeometry2df(mocker): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - grid_geom = grid.gridgeometry2df(eclfiles) + resdatafiles = ResdataFiles(REEK) + grid_geom = grid.gridgeometry2df(resdatafiles) assert isinstance(grid_geom, pd.DataFrame) assert not grid_geom.empty @@ -50,38 +50,40 @@ def test_gridgeometry2df(mocker): grid.gridgeometry2df(None) with pytest.raises(ValueError, match="No EGRID file supplied"): - mocker.patch("ecl2df.eclfiles.EclFiles.get_egridfile", return_value=None) - grid.gridgeometry2df(eclfiles) + mocker.patch( + "res2df.resdatafiles.ResdataFiles.get_egridfile", return_value=None + ) + grid.gridgeometry2df(resdatafiles) def test_wrongfile(): - """Test the EclFiles object on nonexistent files""" + """Test the ResdataFiles object on nonexistent files""" # pylint: disable=invalid-name,redefined-builtin # We can initalize this object with bogus: - eclfiles = EclFiles("FOO.DATA") + resdatafiles = ResdataFiles("FOO.DATA") # but when we try to use it, things should fail: with pytest.raises(FileNotFoundError): - grid.init2df(eclfiles) + grid.init2df(resdatafiles) def test_gridzonemap(): """Check that zonemap can be merged automatically be default, and also that there is some API for supplying the zonemap directly as a dictionary""" - eclfiles = EclFiles(EIGHTCELLS) - grid_geom = grid.gridgeometry2df(eclfiles, zonemap=None) + resdatafiles = ResdataFiles(EIGHTCELLS) + grid_geom = grid.gridgeometry2df(resdatafiles, zonemap=None) default_zonemap = grid_geom["ZONE"] - grid_no_zone = grid.gridgeometry2df(eclfiles, zonemap={}) + grid_no_zone = grid.gridgeometry2df(resdatafiles, zonemap={}) assert "ZONE" not in grid_no_zone - assert (grid.df(eclfiles, zonemap=None)["ZONE"] == default_zonemap).all() + assert (grid.df(resdatafiles, zonemap=None)["ZONE"] == default_zonemap).all() - df_no_zone = grid.df(eclfiles, zonemap={}) + df_no_zone = grid.df(resdatafiles, zonemap={}) assert "ZONE" not in df_no_zone - df_custom_zone = grid.gridgeometry2df(eclfiles, zonemap={1: "FIRSTLAYER"}) + df_custom_zone = grid.gridgeometry2df(resdatafiles, zonemap={1: "FIRSTLAYER"}) assert "ZONE" in df_custom_zone assert set(df_custom_zone[df_custom_zone["K"] == 1]["ZONE"].unique()) == set( ["FIRSTLAYER"] @@ -89,14 +91,14 @@ def test_gridzonemap(): assert len(df_custom_zone) == len(grid_no_zone) df_bogus_zones = grid.gridgeometry2df( - eclfiles, zonemap={999999: "nonexistinglayer"} + resdatafiles, zonemap={999999: "nonexistinglayer"} ) assert pd.isnull(df_bogus_zones["ZONE"]).all() - # Test a custom "subzone" map via direct usage of merge_zone on an dataframe + # Test a custom "subzone" map via direct usage of merge_zone on a dataframe # where ZONE already exists: - dframe = grid.df(eclfiles) + dframe = grid.df(resdatafiles) subzonemap = {1: "SUBZONE1", 2: "SUBZONE2"} dframe = common.merge_zones(dframe, subzonemap, zoneheader="SUBZONE", kname="K") assert (dframe["ZONE"] == default_zonemap).all() @@ -107,20 +109,22 @@ def test_gridzonemap(): def test_merge_initvectors(): """Test merging of INIT-vectors into the grid dataframe""" - eclfiles = EclFiles(REEK) - assert grid.merge_initvectors(eclfiles, pd.DataFrame(), []).empty + resdatafiles = ResdataFiles(REEK) + assert grid.merge_initvectors(resdatafiles, pd.DataFrame(), []).empty foo_df = pd.DataFrame([{"FOO": 1}]) - pd.testing.assert_frame_equal(grid.merge_initvectors(eclfiles, foo_df, []), foo_df) + pd.testing.assert_frame_equal( + grid.merge_initvectors(resdatafiles, foo_df, []), foo_df + ) with pytest.raises(ValueError, match="All of the columns"): - grid.merge_initvectors(eclfiles, foo_df, ["NONEXISTING"]) + grid.merge_initvectors(resdatafiles, foo_df, ["NONEXISTING"]) minimal_df = pd.DataFrame([{"I": 10, "J": 11, "K": 12}]) with pytest.raises(KeyError): - grid.merge_initvectors(eclfiles, minimal_df, ["NONEXISTING"]) + grid.merge_initvectors(resdatafiles, minimal_df, ["NONEXISTING"]) - withporo = grid.merge_initvectors(eclfiles, minimal_df, ["PORO"]) + withporo = grid.merge_initvectors(resdatafiles, minimal_df, ["PORO"]) pd.testing.assert_frame_equal( withporo, minimal_df.assign(PORO=0.221848), check_dtype=False ) @@ -128,18 +132,20 @@ def test_merge_initvectors(): with pytest.raises(ValueError): # ijknames must be length 3 grid.merge_initvectors( - eclfiles, minimal_df, ["PORO"], ijknames=["I", "J", "K", "L"] + resdatafiles, minimal_df, ["PORO"], ijknames=["I", "J", "K", "L"] ) with pytest.raises(ValueError): - grid.merge_initvectors(eclfiles, minimal_df, ["PORO"], ijknames=["I", "J"]) + grid.merge_initvectors(resdatafiles, minimal_df, ["PORO"], ijknames=["I", "J"]) with pytest.raises(ValueError, match="All of the columns"): - grid.merge_initvectors(eclfiles, minimal_df, ["PORO"], ijknames=["A", "B", "C"]) + grid.merge_initvectors( + resdatafiles, minimal_df, ["PORO"], ijknames=["A", "B", "C"] + ) def test_init2df(): """Test that dataframe with INIT vectors can be produced""" - eclfiles = EclFiles(REEK) - init_df = grid.init2df(eclfiles) + resdatafiles = ResdataFiles(REEK) + init_df = grid.init2df(resdatafiles) assert isinstance(init_df, pd.DataFrame) # pylint: disable=unsupported-membership-test # false positive on Dataframe @@ -149,8 +155,8 @@ def test_init2df(): assert "PORV" in init_df # The KRO data from the INIT file in Reek contains only NaN's, - # but libecl gives out a large negative integer/float. - # ecl2df should ensure this comes out as a NaN (but it + # but resdata gives out a large negative integer/float. + # res2df should ensure this comes out as a NaN (but it # should be allowed later to drop columns which have only NaNs)) if "KRO" in init_df: assert np.isnan(init_df["KRO"].unique()).all() @@ -158,8 +164,8 @@ def test_init2df(): def test_grid_df(): """Test that dataframe with INIT vectors and coordinates can be produced""" - eclfiles = EclFiles(EIGHTCELLS) - grid_df = grid.df(eclfiles) + resdatafiles = ResdataFiles(EIGHTCELLS) + grid_df = grid.df(resdatafiles) assert isinstance(grid_df, pd.DataFrame) assert not grid_df.empty @@ -182,82 +188,82 @@ def test_grid_df(): ) -def test_df2ecl(tmp_path): +def test_df2res(tmp_path): """Test if we are able to output include files for grid data""" - eclfiles = EclFiles(REEK) - grid_df = grid.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + grid_df = grid.df(resdatafiles) - fipnum_str = grid.df2ecl(grid_df, "FIPNUM", dtype=int) - assert grid.df2ecl(grid_df, "FIPNUM", dtype="int", nocomments=True) == grid.df2ecl( + fipnum_str = grid.df2res(grid_df, "FIPNUM", dtype=int) + assert grid.df2res(grid_df, "FIPNUM", dtype="int", nocomments=True) == grid.df2res( grid_df, "FIPNUM", dtype=int, nocomments=True ) with pytest.raises(ValueError, match="Wrong dtype argument foo"): - grid.df2ecl(grid_df, "FIPNUM", dtype="foo") + grid.df2res(grid_df, "FIPNUM", dtype="foo") assert "FIPNUM" in fipnum_str - assert "-- Output file printed by ecl2df.grid" in fipnum_str + assert "-- Output file printed by res2df.grid" in fipnum_str assert "35817 active cells" in fipnum_str # (comment at the end) assert "35840 total cell count" in fipnum_str # (comment at the end) assert len(fipnum_str) > 100 - fipnum_str_nocomment = grid.df2ecl(grid_df, "FIPNUM", dtype=int, nocomments=True) + fipnum_str_nocomment = grid.df2res(grid_df, "FIPNUM", dtype=int, nocomments=True) assert "--" not in fipnum_str_nocomment - fipnum2_str = grid.df2ecl( - grid_df, "FIPNUM", dtype=int, eclfiles=eclfiles, nocomments=True + fipnum2_str = grid.df2res( + grid_df, "FIPNUM", dtype=int, resdatafiles=resdatafiles, nocomments=True ) # This would mean that we guessed the correct global size in the first run assert fipnum_str_nocomment == fipnum2_str - float_fipnum_str = grid.df2ecl(grid_df, "FIPNUM", dtype=float) + float_fipnum_str = grid.df2res(grid_df, "FIPNUM", dtype=float) assert len(float_fipnum_str) > len(fipnum_str) # lots of .0 in the string. - fipsatnum_str = grid.df2ecl(grid_df, ["FIPNUM", "SATNUM"], dtype=int) + fipsatnum_str = grid.df2res(grid_df, ["FIPNUM", "SATNUM"], dtype=int) assert "FIPNUM" in fipsatnum_str assert "SATNUM" in fipsatnum_str grid_df["FIPNUM"] = grid_df["FIPNUM"] * 3333 - fipnum_big_str = grid.df2ecl(grid_df, "FIPNUM", dtype=int) + fipnum_big_str = grid.df2res(grid_df, "FIPNUM", dtype=int) assert "3333" in fipnum_big_str assert len(fipnum_big_str) > len(fipnum_str) os.chdir(tmp_path) - grid.df2ecl(grid_df, ["PERMX", "PERMY", "PERMZ"], dtype=float, filename="perm.inc") + grid.df2res(grid_df, ["PERMX", "PERMY", "PERMZ"], dtype=float, filename="perm.inc") assert Path("perm.inc").is_file() incstring = Path("perm.inc").read_text(encoding="utf8").splitlines() assert sum([1 for line in incstring if "PERM" in line]) == 6 - assert grid.df2ecl(grid_df, ["PERMX"], dtype=float, nocomments=True) == grid.df2ecl( + assert grid.df2res(grid_df, ["PERMX"], dtype=float, nocomments=True) == grid.df2res( grid_df, ["PERMX"], dtype="float", nocomments=True ) # with pytest.raises(ValueError, match="Wrong dtype argument"): - grid.df2ecl(grid_df, ["PERMX"], dtype=dict) + grid.df2res(grid_df, ["PERMX"], dtype=dict) with pytest.raises(ValueError): - grid.df2ecl(grid_df, ["PERMRR"]) + grid.df2res(grid_df, ["PERMRR"]) # Check when we have restart info included: - gr_rst = grid.df(eclfiles, rstdates="all") - fipnum_str_rst = grid.df2ecl(gr_rst, "FIPNUM", dtype=int, nocomments=True) + gr_rst = grid.df(resdatafiles, rstdates="all") + fipnum_str_rst = grid.df2res(gr_rst, "FIPNUM", dtype=int, nocomments=True) assert fipnum_str_rst == fipnum_str_nocomment # When dates are stacked, there are NaN's in the FIPNUM column, # which should be gracefully ignored. - gr_rst_stacked = grid.df(eclfiles, rstdates="all", stackdates=True) - fipnum_str_rst = grid.df2ecl(gr_rst_stacked, "FIPNUM", dtype=int, nocomments=True) + gr_rst_stacked = grid.df(resdatafiles, rstdates="all", stackdates=True) + fipnum_str_rst = grid.df2res(gr_rst_stacked, "FIPNUM", dtype=int, nocomments=True) assert fipnum_str_rst == fipnum_str_nocomment # dateinheaders here will be ignored due to stackdates: pd.testing.assert_frame_equal( gr_rst_stacked, - grid.df(eclfiles, rstdates="all", stackdates=True, dateinheaders=True), + grid.df(resdatafiles, rstdates="all", stackdates=True, dateinheaders=True), ) -def test_df2ecl_mock(): - """Test that we can use df2ecl for mocked minimal dataframes""" +def test_df2res_mock(): + """Test that we can use df2res for mocked minimal dataframes""" a_grid = pd.DataFrame(columns=["FIPNUM"], data=[[1], [2], [3]]) - simple_fipnum_inc = grid.df2ecl( + simple_fipnum_inc = grid.df2res( a_grid, keywords="FIPNUM", dtype=int, nocomments=True ) # (A warning is printed, that warning is warranted) @@ -267,25 +273,25 @@ def test_df2ecl_mock(): def test_subvectors(): """Test that we can ask for a few vectors only""" - eclfiles = EclFiles(EIGHTCELLS) - init_df = grid.init2df(eclfiles, "PORO") + resdatafiles = ResdataFiles(EIGHTCELLS) + init_df = grid.init2df(resdatafiles, "PORO") assert "PORO" in init_df assert "PERMX" not in init_df assert "PORV" not in init_df - init_df = grid.init2df(eclfiles, "P*") + init_df = grid.init2df(resdatafiles, "P*") assert "PORO" in init_df assert "PERMX" in init_df assert "PVTNUM" in init_df assert "SATNUM" not in init_df - init_df = grid.init2df(eclfiles, ["P*"]) + init_df = grid.init2df(resdatafiles, ["P*"]) assert "PORO" in init_df assert "PERMX" in init_df assert "PVTNUM" in init_df assert "SATNUM" not in init_df - init_df = grid.init2df(eclfiles, ["P*", "*NUM"]) + init_df = grid.init2df(resdatafiles, ["P*", "*NUM"]) assert "PORO" in init_df assert "PERMX" in init_df assert "PVTNUM" in init_df @@ -313,55 +319,59 @@ def test_dropconstants(): def test_df(): """Test the df function""" - eclfiles = EclFiles(REEK) + resdatafiles = ResdataFiles(REEK) # assert error.. with pytest.raises(TypeError): # pylint: disable=no-value-for-parameter grid.df() - grid_df = grid.df(eclfiles) + grid_df = grid.df(resdatafiles) assert not grid_df.empty assert "I" in grid_df # From GRID assert "PORO" in grid_df # From INIT assert "SOIL" not in grid_df # We do not get RST unless we ask for it. - grid_df = grid.df(eclfiles, vectors="*") + grid_df = grid.df(resdatafiles, vectors="*") assert "I" in grid_df # From GRID assert "PORO" in grid_df # From INIT assert "SOIL" not in grid_df # We do not get RST unless we ask for it. - grid_df = grid.df(eclfiles, vectors=["*"]) + grid_df = grid.df(resdatafiles, vectors=["*"]) assert "I" in grid_df # From GRID assert "PORO" in grid_df # From INIT assert "SOIL" not in grid_df # We do not get RST unless we ask for it. - grid_df = grid.df(eclfiles, vectors="PRESSURE") + grid_df = grid.df(resdatafiles, vectors="PRESSURE") assert "I" in grid_df assert "PRESSURE" not in grid_df # that vector is only in RST assert len(grid_df) == 35817 assert "VOLUME" in grid_df - grid_df = grid.df(eclfiles, vectors=["PRESSURE"]) + grid_df = grid.df(resdatafiles, vectors=["PRESSURE"]) assert "I" in grid_df assert not grid_df.empty assert "PRESSURE" not in grid_df geometry_cols = len(grid_df.columns) - grid_df = grid.df(eclfiles, vectors=["PRESSURE"], rstdates="last", stackdates=True) + grid_df = grid.df( + resdatafiles, vectors=["PRESSURE"], rstdates="last", stackdates=True + ) assert "PRESSURE" in grid_df assert len(grid_df.columns) == geometry_cols + 2 assert "DATE" in grid_df # Present because of stackdates - grid_df = grid.df(eclfiles, vectors="PRESSURE", rstdates="last") + grid_df = grid.df(resdatafiles, vectors="PRESSURE", rstdates="last") assert "PRESSURE" in grid_df assert len(grid_df.columns) == geometry_cols + 1 - grid_df = grid.df(eclfiles, vectors="PRESSURE", rstdates="last", dateinheaders=True) + grid_df = grid.df( + resdatafiles, vectors="PRESSURE", rstdates="last", dateinheaders=True + ) assert "PRESSURE" not in grid_df assert "PRESSURE@2001-08-01" in grid_df grid_df = grid.df( - eclfiles, vectors=["PORO", "PRESSURE"], rstdates="all", stackdates=True + resdatafiles, vectors=["PORO", "PRESSURE"], rstdates="all", stackdates=True ) assert "PRESSURE" in grid_df assert len(grid_df.columns) == geometry_cols + 3 @@ -393,20 +403,20 @@ def test_df(): pd.testing.assert_frame_equal(df1, df3) pd.testing.assert_frame_equal(df1, df4) - grid_df = grid.df(eclfiles, vectors="PORO") + grid_df = grid.df(resdatafiles, vectors="PORO") assert "I" in grid_df assert "PORO" in grid_df assert len(grid_df) == 35817 assert "DATE" not in grid_df - grid_df = grid.df(eclfiles, vectors="PORO", rstdates="all") + grid_df = grid.df(resdatafiles, vectors="PORO", rstdates="all") assert "I" in grid_df assert "PORO" in grid_df assert "DATE" not in grid_df # (no RST columns, so no DATE info in the dataframe) # (warnings should be printed) - grid_df = grid.df(eclfiles, vectors="PORO", rstdates="all", stackdates=True) + grid_df = grid.df(resdatafiles, vectors="PORO", rstdates="all", stackdates=True) assert "I" in grid_df assert "PORO" in grid_df assert "DATE" not in grid_df @@ -421,7 +431,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "grid", EIGHTCELLS, "-o", @@ -432,7 +442,7 @@ def test_main(tmp_path, mocker): "PORO", ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert not disk_df.empty @@ -442,7 +452,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "grid", "--verbose", EIGHTCELLS, @@ -455,7 +465,7 @@ def test_main(tmp_path, mocker): "PERMX", ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert not disk_df.empty @@ -465,9 +475,9 @@ def test_main(tmp_path, mocker): # Test with constants dropping mocker.patch( - "sys.argv", ["ecl2csv", "grid", REEK, "-o", str(tmpcsvfile), "--dropconstants"] + "sys.argv", ["res2csv", "grid", REEK, "-o", str(tmpcsvfile), "--dropconstants"] ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) # That PVTNUM is constant is a particular feature @@ -480,15 +490,15 @@ def test_main_arrow(tmp_path, mocker): """Check that we can export grid in arrow format""" mocker.patch( "sys.argv", - ["ecl2csv", "grid", "--arrow", EIGHTCELLS, "-o", str(tmp_path / "grid.arrow")], + ["res2csv", "grid", "--arrow", EIGHTCELLS, "-o", str(tmp_path / "grid.arrow")], ) - ecl2csv.main() + res2csv.main() # Obtain the CSV version for comparison: mocker.patch( - "sys.argv", ["ecl2csv", "grid", EIGHTCELLS, "-o", str(tmp_path / "grid.csv")] + "sys.argv", ["res2csv", "grid", EIGHTCELLS, "-o", str(tmp_path / "grid.csv")] ) - ecl2csv.main() + res2csv.main() # Read from disk and verify similarity disk_frame_arrow = pyarrow.feather.read_table(tmp_path / "grid.arrow").to_pandas() @@ -499,13 +509,13 @@ def test_main_arrow(tmp_path, mocker): def test_get_available_rst_dates(): """Test the support of dates in restart files""" - eclfiles = EclFiles(REEK) - # rstfile = eclfiles.get_rstfile() + resdatafiles = ResdataFiles(REEK) + # rstfile = resdatafiles.get_rstfile() - alldates = grid.get_available_rst_dates(eclfiles) + alldates = grid.get_available_rst_dates(resdatafiles) assert len(alldates) == 4 - didx = grid.dates2rstindices(eclfiles, "all") + didx = grid.dates2rstindices(resdatafiles, "all") assert len(didx[0]) == len(alldates) assert len(didx[1]) == len(alldates) assert isinstance(didx[0][0], int) @@ -513,38 +523,40 @@ def test_get_available_rst_dates(): assert didx[1][0] == alldates[0] assert didx[1][-1] == alldates[-1] - somedate = grid.dates2rstindices(eclfiles, "2000-07-01") + somedate = grid.dates2rstindices(resdatafiles, "2000-07-01") assert somedate[1] == [alldates[1]] with pytest.raises(ValueError, match="date 1999-09-09 not found in UNRST file"): - grid.dates2rstindices(eclfiles, "1999-09-09") + grid.dates2rstindices(resdatafiles, "1999-09-09") with pytest.raises(ValueError, match="date 1999-0909 not understood"): - grid.dates2rstindices(eclfiles, "1999-0909") + grid.dates2rstindices(resdatafiles, "1999-0909") - expl_date = grid.dates2rstindices(eclfiles, datetime.date(2000, 7, 1)) + expl_date = grid.dates2rstindices(resdatafiles, datetime.date(2000, 7, 1)) assert expl_date[1] == [alldates[1]] expl_datetime = grid.dates2rstindices( - eclfiles, datetime.datetime(2000, 7, 1, 0, 0, 0) + resdatafiles, datetime.datetime(2000, 7, 1, 0, 0, 0) ) assert expl_datetime[1] == [alldates[1]] - expl_list_datetime = grid.dates2rstindices(eclfiles, [datetime.date(2000, 7, 1)]) + expl_list_datetime = grid.dates2rstindices( + resdatafiles, [datetime.date(2000, 7, 1)] + ) assert expl_list_datetime[1] == [alldates[1]] # For list input, only datetime.date objects are allowed: expl_list2_date = grid.dates2rstindices( - eclfiles, [datetime.date(2000, 7, 1), datetime.date(2001, 2, 1)] + resdatafiles, [datetime.date(2000, 7, 1), datetime.date(2001, 2, 1)] ) assert expl_list2_date[1] == [alldates[1], alldates[2]] with pytest.raises(ValueError, match="None of the requested dates were found"): - grid.dates2rstindices(eclfiles, ["2000-07-01", "2001-02-01"]) + grid.dates2rstindices(resdatafiles, ["2000-07-01", "2001-02-01"]) with pytest.raises(ValueError, match="None of the requested dates were found"): grid.dates2rstindices( - eclfiles, + resdatafiles, [ datetime.datetime(2000, 7, 1, 0, 0, 0), datetime.datetime(2001, 2, 1, 0, 0, 0), @@ -552,40 +564,43 @@ def test_get_available_rst_dates(): ) with pytest.raises(ValueError, match="not understood"): - grid.dates2rstindices(eclfiles, {"2000-07-01": "2001-02-01"}) + grid.dates2rstindices(resdatafiles, {"2000-07-01": "2001-02-01"}) - first = grid.dates2rstindices(eclfiles, "first") + first = grid.dates2rstindices(resdatafiles, "first") assert first[1][0] == alldates[0] - last = grid.dates2rstindices(eclfiles, "last") + last = grid.dates2rstindices(resdatafiles, "last") assert last[1][0] == alldates[-1] - dates = grid.get_available_rst_dates(eclfiles) + dates = grid.get_available_rst_dates(resdatafiles) assert isinstance(dates, list) # Test with missing RST file: - eclfiles = EclFiles("BOGUS.DATA") + resdatafiles = ResdataFiles("BOGUS.DATA") with pytest.raises(IOError): - eclfiles.get_rstfile() + resdatafiles.get_rstfile() def test_rst2df(): """Test producing dataframes from restart files""" - eclfiles = EclFiles(REEK) - assert grid.rst2df(eclfiles, "first").shape == (35817, 24) - assert grid.rst2df(eclfiles, "last").shape == (35817, 24) - assert grid.rst2df(eclfiles, "all").shape == (35817, 23 * 4 + 1) + resdatafiles = ResdataFiles(REEK) + assert grid.rst2df(resdatafiles, "first").shape == (35817, 24) + assert grid.rst2df(resdatafiles, "last").shape == (35817, 24) + assert grid.rst2df(resdatafiles, "all").shape == (35817, 23 * 4 + 1) - assert "SOIL" in grid.rst2df(eclfiles, date="first", dateinheaders=False) + assert "SOIL" in grid.rst2df(resdatafiles, date="first", dateinheaders=False) assert ( - "SOIL@2000-01-01" in grid.rst2df(eclfiles, "first", dateinheaders=True).columns + "SOIL@2000-01-01" + in grid.rst2df(resdatafiles, "first", dateinheaders=True).columns ) - rst_df = grid.rst2df(eclfiles, "first", stackdates=True) + rst_df = grid.rst2df(resdatafiles, "first", stackdates=True) assert "DATE" in rst_df assert rst_df["DATE"].unique()[0] == "2000-01-01" - rst_df = grid.rst2df(eclfiles, "all", stackdates=True) - assert len(rst_df["DATE"].unique()) == len(grid.get_available_rst_dates(eclfiles)) + rst_df = grid.rst2df(resdatafiles, "all", stackdates=True) + assert len(rst_df["DATE"].unique()) == len( + grid.get_available_rst_dates(resdatafiles) + ) # "DATE" and "active" are now the extra columns: assert rst_df.shape == (4 * 35817, 23 + 2) @@ -599,21 +614,21 @@ def test_rst2df(): assert sum(nancols) == 1 # All other columns are "False" # Check vector slicing: - rst_df = grid.rst2df(eclfiles, "first", vectors="S???") + rst_df = grid.rst2df(resdatafiles, "first", vectors="S???") assert rst_df.shape == (35817, 4) assert "SGAS" in rst_df assert "SWAT" in rst_df assert "SOIL" in rst_df # This is actually computed assert "FIPWAT" not in rst_df - rst_df = grid.rst2df(eclfiles, "first", vectors=["PRESSURE", "SWAT"]) + rst_df = grid.rst2df(resdatafiles, "first", vectors=["PRESSURE", "SWAT"]) assert "PRESSURE" in rst_df assert "SWAT" in rst_df assert "SGAS" not in rst_df assert "SOIL" not in rst_df # Check that we can avoid getting SOIL if we are explicit: - rst_df = grid.rst2df(eclfiles, "first", vectors=["SGAS", "SWAT"]) + rst_df = grid.rst2df(resdatafiles, "first", vectors=["SGAS", "SWAT"]) assert "SOIL" not in rst_df assert "SGAS" in rst_df assert "SWAT" in rst_df diff --git a/tests/test_gruptree.py b/tests/test_gruptree.py index 9902bb4cb..a090d2639 100644 --- a/tests/test_gruptree.py +++ b/tests/test_gruptree.py @@ -7,8 +7,8 @@ import pandas as pd import pytest -from ecl2df import ecl2csv, gruptree -from ecl2df.eclfiles import EclFiles +from res2df import gruptree, res2csv +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -27,8 +27,8 @@ def test_eightcells_dataset(): """Test Eightcells dataset""" - eclfiles = EclFiles(EIGHTCELLS) - gruptree_df = gruptree.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(EIGHTCELLS) + gruptree_df = gruptree.df(resdatafiles.get_deck()) expected_dframe = pd.DataFrame( [ @@ -44,8 +44,8 @@ def test_eightcells_dataset(): def test_gruptree2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - grupdf = gruptree.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + grupdf = gruptree.df(resdatafiles.get_deck()) assert not grupdf.empty assert len(grupdf["DATE"].unique()) == 5 @@ -53,7 +53,7 @@ def test_gruptree2df(): assert len(grupdf["PARENT"].dropna().unique()) == 3 assert set(grupdf["KEYWORD"].unique()) == set(["GRUPTREE", "WELSPECS"]) - grupdfnowells = gruptree.df(eclfiles.get_ecldeck(), welspecs=False) + grupdfnowells = gruptree.df(resdatafiles.get_deck(), welspecs=False) assert len(grupdfnowells["KEYWORD"].unique()) == 1 assert grupdf["PARENT"].dropna().unique()[0] == "FIELD" @@ -75,7 +75,7 @@ def test_str2df(): / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) grupdf = gruptree.df(deck) assert grupdf.dropna().empty # the DATE is empty @@ -118,7 +118,7 @@ def test_grupnet_rst_docs(tmp_path): / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) grupdf = gruptree.df(deck) grupdf[["DATE", "CHILD", "PARENT", "KEYWORD"]].to_csv("gruptree.csv", index=False) grupdf.to_csv("gruptreenet.csv", index=False) @@ -161,7 +161,7 @@ def test_grupnetdf(): / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) grupdf = gruptree.df(deck, startdate="2000-01-01") print(grupdf) assert "TERMINAL_PRESSURE" in grupdf @@ -308,7 +308,7 @@ def test_dict2treelib_deprecated(): def test_grupnetroot(schstr, expected_dframe, expected_tree): """Test that terminal pressure of the tree root can be included in the dataframe (with an empty parent)""" - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) grupdf = gruptree.df(deck, startdate="2000-01-01") non_default_columns = ["CHILD", "PARENT", "TERMINAL_PRESSURE"] pd.testing.assert_frame_equal( @@ -414,7 +414,7 @@ def test_edge_dataframe2dict(dframe, expected): def test_emptytree_strdeck(): """Test empty schedule sections. Don't want to crash""" schstr = "" - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) grupdf = gruptree.df(deck) assert grupdf.empty gruptreedict = gruptree.edge_dataframe2dict(grupdf) @@ -427,19 +427,19 @@ def test_emptytree_strdeck(): def test_emptytree_commandlinetool(tmp_path, mocker, caplog): - """Test the command line tool on an Eclipse deck which is empty""" + """Test the command line tool on a .DATA file which is empty""" os.chdir(tmp_path) Path("EMPTY.DATA").write_text("", encoding="utf8") - mocker.patch("sys.argv", ["ecl2csv", "gruptree", "--prettyprint", "EMPTY.DATA"]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "gruptree", "--prettyprint", "EMPTY.DATA"]) + res2csv.main() assert "No tree data to prettyprint" in caplog.text def test_cli_nothing_to_do(mocker, capsys): """Test that the client says nothing to do when DATA is supplied, but no action.""" - mocker.patch("sys.argv", ["ecl2csv", "gruptree", "EMPTY.DATA"]) + mocker.patch("sys.argv", ["res2csv", "gruptree", "EMPTY.DATA"]) with pytest.raises(SystemExit): - ecl2csv.main() + res2csv.main() assert "Nothing to do" in capsys.readouterr().out @@ -461,7 +461,7 @@ def test_tstep(): / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) grupdf = gruptree.df(deck) assert len(grupdf["DATE"].unique()) == 2 print(grupdf) @@ -470,8 +470,8 @@ def test_tstep(): def test_main(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "gruptree.csv" - mocker.patch("sys.argv", ["ecl2csv", "gruptree", REEK, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "gruptree", REEK, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -480,8 +480,8 @@ def test_main(tmp_path, mocker): def test_prettyprint_commandline(mocker, capsys): """Test pretty printing via command line interface""" - mocker.patch("sys.argv", ["ecl2csv", "gruptree", REEK, "--prettyprint"]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "gruptree", REEK, "--prettyprint"]) + res2csv.main() stdout = capsys.readouterr().out.strip() print(stdout) assert ( @@ -551,8 +551,8 @@ def test_prettyprint_commandline(mocker, capsys): def test_main_subparser(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "gruptree.csv" - mocker.patch("sys.argv", ["ecl2csv", "gruptree", "-v", REEK, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "gruptree", "-v", REEK, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -724,7 +724,7 @@ def test_branprop_nodeprop(schstr, expected_dframe, check_columns): """Testing that the gruptree dataframe works correctly when the schedule string contains BRANPROP and NODEPROP """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) dframe = gruptree.df(deck).reset_index() expected_dframe.DATE = pd.to_datetime(expected_dframe.DATE) pd.testing.assert_frame_equal( @@ -789,5 +789,5 @@ def test_prettyprint(): """ - dframe = gruptree.df(EclFiles.str2deck(schstr)) + dframe = gruptree.df(ResdataFiles.str2deck(schstr)) assert gruptree.prettyprint(dframe).strip() == expected_prettyprint.strip() diff --git a/tests/test_hook_implementations.py b/tests/test_hook_implementations.py index 7ed1b5639..f03471707 100644 --- a/tests/test_hook_implementations.py +++ b/tests/test_hook_implementations.py @@ -16,22 +16,22 @@ from ert.shared.plugins.plugin_manager import ErtPluginManager -import ecl2df.hook_implementations.jobs +import res2df.hook_implementations.jobs @pytest.fixture(name="expected_jobs") -def fixture_expected_jobs(path_to_ecl2df: Path) -> Dict[str, Path]: +def fixture_expected_jobs(path_to_res2df: Path) -> Dict[str, Path]: """Dictionary of installed jobs with location to job configuration""" expected_job_names = [ - "ECL2CSV", - "CSV2ECL", + "RES2CSV", + "CSV2RES", ] - return {name: path_to_ecl2df / "config_jobs" / name for name in expected_job_names} + return {name: path_to_res2df / "config_jobs" / name for name in expected_job_names} def test_hook_implementations(expected_jobs): """Test that the expected jobs can be found using an ERT plugin manager""" - plugin_m = ErtPluginManager(plugins=[ecl2df.hook_implementations.jobs]) + plugin_m = ErtPluginManager(plugins=[res2df.hook_implementations.jobs]) installable_jobs = plugin_m.get_installable_jobs() for wf_name, wf_location in expected_jobs.items(): @@ -75,7 +75,7 @@ def test_executables(expected_jobs): def test_hook_implementations_job_docs(): """Test extracting docs from ERT hooks""" - plugin_m = ErtPluginManager(plugins=[ecl2df.hook_implementations.jobs]) + plugin_m = ErtPluginManager(plugins=[res2df.hook_implementations.jobs]) installable_jobs = plugin_m.get_installable_jobs() diff --git a/tests/test_inferdims.py b/tests/test_inferdims.py index c61b05e3a..85288ca4b 100644 --- a/tests/test_inferdims.py +++ b/tests/test_inferdims.py @@ -3,7 +3,7 @@ import pytest -from ecl2df import inferdims +from res2df import inferdims try: # pylint: disable=unused-import diff --git a/tests/test_init.py b/tests/test_init.py index 26d11db69..d1c197708 100644 --- a/tests/test_init.py +++ b/tests/test_init.py @@ -1,21 +1,21 @@ -"""Check that ecl2df's submodules are always imported""" +"""Check that res2df's submodules are always imported""" import sys # This file tests what happens when we do this import: -import ecl2df +import res2df def test_init(): - """Test the top level properties of the ecl2df package""" - assert "ecl2df.compdat" in sys.modules + """Test the top level properties of the res2df package""" + assert "res2df.compdat" in sys.modules # This should be a list of all submodules - assert ecl2df.SUBMODULES + assert res2df.SUBMODULES - for submodule in ecl2df.SUBMODULES: - assert "ecl2df." + submodule in sys.modules + for submodule in res2df.SUBMODULES: + assert "res2df." + submodule in sys.modules - # The Eclfiles object inside eclfiles should be lifted up to top-level: - assert hasattr(ecl2df, "EclFiles") + # The Eclfiles object inside resdatafiles should be lifted up to top-level: + assert hasattr(res2df, "ResdataFiles") - assert isinstance(ecl2df.__version__, str) + assert isinstance(res2df.__version__, str) diff --git a/tests/test_integration.py b/tests/test_integration.py index 99c93085a..2205e5061 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -4,7 +4,7 @@ import pytest -import ecl2df +import res2df try: # pylint: disable=unused-import @@ -19,22 +19,22 @@ @pytest.mark.integration def test_integration(): """Test that all endpoints that are to be installed are installed""" - assert subprocess.check_output(["ecl2csv", "-h"]) # nosec - assert subprocess.check_output(["csv2ecl", "-h"]) # nosec + assert subprocess.check_output(["res2csv", "-h"]) # nosec + assert subprocess.check_output(["csv2res", "-h"]) # nosec # The subparsers should exit "cleanly" with exit code 2 ("Incorrect usage") # when no more options are provided on the command line with pytest.raises(subprocess.CalledProcessError) as exception: - subprocess.check_output(["ecl2csv"]) # nosec + subprocess.check_output(["res2csv"]) # nosec assert exception.value.returncode == 2 with pytest.raises(subprocess.CalledProcessError) as exception: - subprocess.check_output(["csv2ecl"]) # nosec + subprocess.check_output(["csv2res"]) # nosec assert exception.value.returncode == 2 # ref: https://stackoverflow.com/questions/23714542/ \ # why-does-pythons-argparse-use-an-error-code-of-2-for-systemexit - for submodule in ecl2df.SUBMODULES: - helptext = subprocess.check_output(["ecl2csv", submodule, "-h"]) + for submodule in res2df.SUBMODULES: + helptext = subprocess.check_output(["res2csv", submodule, "-h"]) # Test that this option is hidden, the argument is only there # to support optional number of arguments in ERT forward models. assert "hiddenemptyplaceholders" not in str(helptext) diff --git a/tests/test_logging.py b/tests/test_logging.py index 5e5b05482..8281773ab 100644 --- a/tests/test_logging.py +++ b/tests/test_logging.py @@ -2,7 +2,7 @@ import pytest -import ecl2df +import res2df from .test_grid import EIGHTCELLS, REEK @@ -18,7 +18,7 @@ def test_default_logger_levels_and_split(capsys): """Verify that the intended usage of this logger have expected results""" - splitlogger = ecl2df.getLogger_ecl2csv("test_levels_split") + splitlogger = res2df.getLogger_res2csv("test_levels_split") splitlogger.debug("This DEBUG-text is not to be seen") captured = capsys.readouterr() @@ -40,7 +40,7 @@ def test_default_logger_levels_and_split(capsys): assert "ERROR-text" in captured.err # If output is written to stdout, all logs should go to stderr: - nosplit_logger = ecl2df.getLogger_ecl2csv( + nosplit_logger = res2df.getLogger_res2csv( "test_levels_nosplit", args_dict={"output": "-", "debug": True} ) nosplit_logger.debug("This DEBUG-text is to be seen in stderr") @@ -66,10 +66,10 @@ def test_default_logger_levels_and_split(capsys): @pytest.mark.skipif(not HAVE_OPM, reason="Command line client requires OPM") @pytest.mark.parametrize( - "ecl2df_module, verbose, fileexport", - itertools.product(ecl2df.SUBMODULES, [False, True], [True, False]), + "res2df_module, verbose, fileexport", + itertools.product(res2df.SUBMODULES, [False, True], [True, False]), ) -def test_ecl2csv_logging(tmp_path, ecl2df_module, verbose, fileexport, mocker, capsys): +def test_res2csv_logging(tmp_path, res2df_module, verbose, fileexport, mocker, capsys): """Test that the command line client for each submodule logs correctly. Each submodule should write logs to stdout for INFO and WARNING messages @@ -84,24 +84,24 @@ def test_ecl2csv_logging(tmp_path, ecl2df_module, verbose, fileexport, mocker, c test invocation. """ # pylint: disable=too-many-arguments - if ecl2df_module == "nnc": + if res2df_module == "nnc": # There are no nnc's in EIGHTCELLS, so for that test # we need the REEK dataset: - commands = ["ecl2csv", ecl2df_module, REEK, "--output"] + commands = ["res2csv", res2df_module, REEK, "--output"] else: - commands = ["ecl2csv", ecl2df_module, EIGHTCELLS, "--output"] + commands = ["res2csv", res2df_module, EIGHTCELLS, "--output"] if fileexport: commands.append(str(tmp_path / "output.csv")) else: - commands.append(ecl2df.common.MAGIC_STDOUT) + commands.append(res2df.common.MAGIC_STDOUT) if verbose: commands.append("-v") mocker.patch("sys.argv", commands) - ecl2df.ecl2csv.main() + res2df.res2csv.main() captured = capsys.readouterr() stdout_output = captured.out stderr_output = captured.err @@ -124,8 +124,8 @@ def test_ecl2csv_logging(tmp_path, ecl2df_module, verbose, fileexport, mocker, c def test_repeated_logger_construction(capsys): """If we repeatedly call getLogger(), ensure handlers are not added on top""" - logger = ecl2df.getLogger_ecl2csv("nodouble") - logger = ecl2df.getLogger_ecl2csv("nodouble") + logger = res2df.getLogger_res2csv("nodouble") + logger = res2df.getLogger_res2csv("nodouble") logger.warning("Don't repeat me") captured = capsys.readouterr() assert captured.out.count("Don't repeat me") == 1 diff --git a/tests/test_nnc.py b/tests/test_nnc.py index b51363be0..1be3befce 100644 --- a/tests/test_nnc.py +++ b/tests/test_nnc.py @@ -8,8 +8,8 @@ import pandas as pd import pytest -from ecl2df import ecl2csv, faults, nnc, trans -from ecl2df.eclfiles import EclFiles +from res2df import faults, nnc, res2csv, trans +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -27,8 +27,8 @@ def test_nnc2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - nncdf = nnc.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + nncdf = nnc.df(resdatafiles) assert not nncdf.empty assert "I1" in nncdf @@ -48,14 +48,14 @@ def test_nnc2df(): def test_no_nnc(): """Test nnc on an Eclipse case with no NNCs""" - eclfiles = EclFiles(EIGHTCELLS) - assert nnc.df(eclfiles).empty + resdatafiles = ResdataFiles(EIGHTCELLS) + assert nnc.df(resdatafiles).empty def test_nnc2df_coords(): """Test that we are able to add coordinates""" - eclfiles = EclFiles(REEK) - gnncdf = nnc.df(eclfiles, coords=True) + resdatafiles = ResdataFiles(REEK) + gnncdf = nnc.df(resdatafiles, coords=True) assert not gnncdf.empty assert "X" in gnncdf assert "Y" in gnncdf @@ -65,9 +65,9 @@ def test_nnc2df_coords(): @pytest.mark.skipif(not HAVE_OPM, reason="Requires OPM") def test_nnc2df_faultnames(): """Add faultnames from FAULTS keyword to connections""" - eclfiles = EclFiles(REEK) - nncdf = nnc.df(eclfiles) - faultsdf = faults.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + nncdf = nnc.df(resdatafiles) + faultsdf = faults.df(resdatafiles.get_deck()) merged = pd.merge( nncdf, @@ -87,14 +87,14 @@ def test_nnc2df_faultnames(): # Remove I_x, J_x, K_x (and _y) which is not needed -def test_df2ecl_editnnc(tmp_path): +def test_df2res_editnnc(tmp_path): """Test generation of EDITNNC keyword""" - eclfiles = EclFiles(REEK) - nncdf = nnc.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + nncdf = nnc.df(resdatafiles) os.chdir(tmp_path) nncdf["TRANM"] = 2 - editnnc = nnc.df2ecl_editnnc(nncdf, filename="editnnc.inc") + editnnc = nnc.df2res_editnnc(nncdf, filename="editnnc.inc") editnnc_fromfile = Path("editnnc.inc").read_text(encoding="utf8") assert editnnc == editnnc_fromfile assert "EDITNNC" in editnnc @@ -103,25 +103,25 @@ def test_df2ecl_editnnc(tmp_path): # Fails when columns are missing with pytest.raises((KeyError, ValueError)): - nnc.df2ecl_editnnc(nncdf[["I1", "I2"]]) + nnc.df2res_editnnc(nncdf[["I1", "I2"]]) - editnnc = nnc.df2ecl_editnnc(nncdf, nocomments=True) + editnnc = nnc.df2res_editnnc(nncdf, nocomments=True) assert "avg multiplier" not in editnnc # Test compatibility with trans module: - trans_df = trans.df(eclfiles, addnnc=True) - editnnc = nnc.df2ecl_editnnc(trans_df.assign(TRANM=0.3)) + trans_df = trans.df(resdatafiles, addnnc=True) + editnnc = nnc.df2res_editnnc(trans_df.assign(TRANM=0.3)) assert "avg multiplier 0.3" in editnnc or "avg multiplier 0.29999" in editnnc - print(nnc.df2ecl_editnnc(nnc.df(eclfiles).head(4).assign(TRANM=0.1))) + print(nnc.df2res_editnnc(nnc.df(resdatafiles).head(4).assign(TRANM=0.1))) @pytest.mark.skipif(not HAVE_OPM, reason="Requires OPM") def test_main(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "nnc.csv" - mocker.patch("sys.argv", ["ecl2csv", "nnc", "-v", REEK, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "nnc", "-v", REEK, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -134,7 +134,7 @@ def test_main(tmp_path, mocker): def test_magic_stdout(): """Test that we can pipe the output into a dataframe""" result = subprocess.run( - ["ecl2csv", "nnc", "-o", "-", REEK], check=True, stdout=subprocess.PIPE + ["res2csv", "nnc", "-o", "-", REEK], check=True, stdout=subprocess.PIPE ) df_stdout = pd.read_csv(io.StringIO(result.stdout.decode())) assert not df_stdout.empty diff --git a/tests/test_parameters.py b/tests/test_parameters.py index 253e1e91b..1f762e65c 100644 --- a/tests/test_parameters.py +++ b/tests/test_parameters.py @@ -7,8 +7,8 @@ import pytest import yaml -from ecl2df.eclfiles import EclFiles -from ecl2df.parameters import find_parameter_files, load, load_all +from res2df.parameters import find_parameter_files, load, load_all +from res2df.resdatafiles import ResdataFiles TESTDIR = Path(__file__).absolute().parent DATAFILE = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -16,12 +16,12 @@ def test_parameters(): """Test import of parameters.txt++""" - eclfiles = EclFiles(DATAFILE) + resdatafiles = ResdataFiles(DATAFILE) # NB: This test easily fails due to remnants of other test code.. - assert not find_parameter_files(eclfiles) + assert not find_parameter_files(resdatafiles) - parameterstxt = Path(eclfiles.get_path()) / "parameters.txt" + parameterstxt = Path(resdatafiles.get_path()) / "parameters.txt" # If this exists, it is a remnant from test code that has # crashed. It should NOT be in git. if parameterstxt.is_file(): @@ -32,10 +32,10 @@ def test_parameters(): assert "FOO" in param_dict assert "BAR" in param_dict - assert len(find_parameter_files(eclfiles)) == 1 + assert len(find_parameter_files(resdatafiles)) == 1 parameterstxt.unlink() - parameterstxt = Path(eclfiles.get_path()).parent / "parameters.txt" + parameterstxt = Path(resdatafiles.get_path()).parent / "parameters.txt" if parameterstxt.is_file(): parameterstxt.unlink() parameterstxt.write_text("FOO 1\nBAR 3\nCONTACT:BARF 2700", encoding="utf-8") @@ -45,33 +45,33 @@ def test_parameters(): assert "BAR" in param_dict assert param_dict["BAR"] == 3 assert param_dict["CONTACT:BARF"] == 2700 - assert len(find_parameter_files(eclfiles)) == 1 + assert len(find_parameter_files(resdatafiles)) == 1 parameterstxt.unlink() # Typical parameters.json structure: The group "CONTACT" is assumed having # duplicate information, and is to be ignored dump_me = {"FOO": 1, "BAR": "com", "CONTACT:BARF": 2700, "CONTACT": {"BARF": 2700}} - parametersyml = Path(eclfiles.get_path()) / "parameters.yml" + parametersyml = Path(resdatafiles.get_path()) / "parameters.yml" if parametersyml.is_file(): parametersyml.unlink() parametersyml.write_text(yaml.dump(dump_me), encoding="utf-8") assert Path(parametersyml).is_file() - assert len(find_parameter_files(eclfiles)) == 1 + assert len(find_parameter_files(resdatafiles)) == 1 param_dict = load(parametersyml) assert "FOO" in param_dict assert "BAR" in param_dict assert param_dict["BAR"] == "com" parametersyml.unlink() - parametersjson = Path(eclfiles.get_path()) / "parameters.json" + parametersjson = Path(resdatafiles.get_path()) / "parameters.json" if parametersjson.is_file(): parametersjson.unlink() parametersjson.write_text(json.dumps(dump_me), encoding="utf-8") assert Path(parametersjson).is_file() - assert len(find_parameter_files(eclfiles)) == 1 - param_dict = load(find_parameter_files(eclfiles)[0]) - param_dict_m = load_all(find_parameter_files(eclfiles)) + assert len(find_parameter_files(resdatafiles)) == 1 + param_dict = load(find_parameter_files(resdatafiles)[0]) + param_dict_m = load_all(find_parameter_files(resdatafiles)) assert "FOO" in param_dict assert "BAR" in param_dict assert param_dict["BAR"] == "com" @@ -81,12 +81,12 @@ def test_parameters(): def test_multiple_parameters(): """Test what happens when we have duplicate parameter files""" - eclfiles = EclFiles(DATAFILE) - parametersjson = Path(eclfiles.get_path()) / "parameters.json" - parameterstxt = Path(eclfiles.get_path()).parent / "parameters.txt" + resdatafiles = ResdataFiles(DATAFILE) + parametersjson = Path(resdatafiles.get_path()) / "parameters.json" + parameterstxt = Path(resdatafiles.get_path()).parent / "parameters.txt" parameterstxt.write_text("FOO 1\nBAR 4", encoding="utf-8") parametersjson.write_text(json.dumps({"BAR": 5, "COM": 6}), encoding="utf-8") - param_dict = load_all(find_parameter_files(eclfiles)) + param_dict = load_all(find_parameter_files(resdatafiles)) assert len(param_dict) == 3 assert param_dict["BAR"] == 5 # json has precedence over txt parametersjson.unlink() diff --git a/tests/test_pillars.py b/tests/test_pillars.py index e962119f4..1edb0c6c4 100644 --- a/tests/test_pillars.py +++ b/tests/test_pillars.py @@ -1,12 +1,12 @@ -"""Test module for ecl2df.pillars""" +"""Test module for res2df.pillars""" from pathlib import Path import pandas as pd import pytest -from ecl2df import ecl2csv, grid, pillars -from ecl2df.eclfiles import EclFiles +from res2df import grid, pillars, res2csv +from res2df.resdatafiles import ResdataFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -14,8 +14,8 @@ def test_pillars(): """Test that we can build a dataframe of pillar statistics""" - eclfiles = EclFiles(REEK) - pillars_df = pillars.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + pillars_df = pillars.df(resdatafiles) assert "PILLAR" in pillars_df assert "VOLUME" in pillars_df assert "PORV" in pillars_df @@ -30,25 +30,27 @@ def test_pillars(): assert "GOC" not in pillars_df assert len(pillars_df) == 2560 - pillars_df = pillars.df(eclfiles, region="FIPNUM") + pillars_df = pillars.df(resdatafiles, region="FIPNUM") assert "FIPNUM" in pillars_df assert len(pillars_df["FIPNUM"].unique()) == 6 assert "OILVOL" not in pillars_df - pillars_df = pillars.df(eclfiles, rstdates="first") - firstdate = str(grid.dates2rstindices(eclfiles, "first")[1][0]) + pillars_df = pillars.df(resdatafiles, rstdates="first") + firstdate = str(grid.dates2rstindices(resdatafiles, "first")[1][0]) assert "OILVOL@" + firstdate in pillars_df assert "GASVOL@" + firstdate in pillars_df assert "WATVOL@" + firstdate in pillars_df - pillars_df = pillars.df(eclfiles, rstdates="last", soilcutoff=0.2, sgascutoff=0.2) - lastdate = str(grid.dates2rstindices(eclfiles, "last")[1][0]) + pillars_df = pillars.df( + resdatafiles, rstdates="last", soilcutoff=0.2, sgascutoff=0.2 + ) + lastdate = str(grid.dates2rstindices(resdatafiles, "last")[1][0]) assert "OWC@" + lastdate in pillars_df assert "GOC@" + lastdate not in pillars_df # Because the dataset has no GAS... # Grouping by unknowns only trigger a warning pd.testing.assert_frame_equal( - pillars.df(eclfiles), pillars.df(eclfiles, region="FOOBAR") + pillars.df(resdatafiles), pillars.df(resdatafiles, region="FOOBAR") ) @@ -326,8 +328,8 @@ def test_compute_volumes(dframe, datestr, expectedrows): def test_main(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "pillars.csv" - mocker.patch("sys.argv", ["ecl2csv", "pillars", REEK, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "pillars", REEK, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" in disk_df @@ -338,7 +340,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -348,7 +350,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" not in disk_df # because of grouping @@ -362,7 +364,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -371,7 +373,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" in disk_df @@ -383,7 +385,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -393,7 +395,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" not in disk_df # because of grouping @@ -404,7 +406,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -416,7 +418,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" not in disk_df # because of region averaging @@ -433,7 +435,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -445,7 +447,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" not in disk_df # because of region averaging @@ -462,7 +464,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -474,7 +476,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" not in disk_df # because of region averaging @@ -489,7 +491,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -502,7 +504,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" not in disk_df # because of region averaging @@ -519,7 +521,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -531,7 +533,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" in disk_df @@ -545,7 +547,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", "-v", REEK, @@ -556,7 +558,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" in disk_df diff --git a/tests/test_pvt.py b/tests/test_pvt.py index 3dd0b1dc7..fa3313510 100644 --- a/tests/test_pvt.py +++ b/tests/test_pvt.py @@ -8,8 +8,8 @@ import pandas as pd import pytest -from ecl2df import csv2ecl, ecl2csv, pvt -from ecl2df.eclfiles import EclFiles +from res2df import csv2res, pvt, res2csv +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -33,7 +33,7 @@ def test_pvto_strings(): 18 25 1.14 0.59 / / -- One table (pvtnum=1), two records (two gor's) """ - dframe = pvt.pvto_fromdeck(EclFiles.str2deck(pvto_deck)) + dframe = pvt.pvto_fromdeck(ResdataFiles.str2deck(pvto_deck)) assert "PVTNUM" in dframe assert "RS" in dframe assert "PRESSURE" in dframe @@ -47,7 +47,7 @@ def test_pvto_strings(): assert set(dframe["PVTNUM"].values) == {1} assert max(dframe["PRESSURE"]) == 200 - dframe_via_string = pvt.pvto_fromdeck(pvt.df2ecl_pvto(dframe)) + dframe_via_string = pvt.pvto_fromdeck(pvt.df2res_pvto(dframe)) pd.testing.assert_frame_equal(dframe_via_string, dframe) # Provide TABDIMS in first test.. Infer later @@ -64,7 +64,7 @@ def test_pvto_strings(): 19 30 1.14 0.59 / / """ - dframe = pvt.pvto_fromdeck(EclFiles.str2deck(pvto_deck)) + dframe = pvt.pvto_fromdeck(ResdataFiles.str2deck(pvto_deck)) assert len(dframe) == 6 assert "PVTNUM" in dframe assert set(dframe["PVTNUM"].astype(int).unique()) == {1, 2} @@ -72,7 +72,7 @@ def test_pvto_strings(): assert len(dframe["PRESSURE"].unique()) == 6 assert len(dframe["VOLUMEFACTOR"].unique()) == 3 - dframe_via_string = pvt.pvto_fromdeck(pvt.df2ecl_pvto(dframe)) + dframe_via_string = pvt.pvto_fromdeck(pvt.df2res_pvto(dframe)) pd.testing.assert_frame_equal(dframe_via_string, dframe) # Now test the same but without TABDIMS: @@ -94,11 +94,11 @@ def test_pvto_strings(): assert len(dframe["RS"].unique()) == 4 assert len(dframe["PRESSURE"].unique()) == 6 assert len(dframe["VOLUMEFACTOR"].unique()) == 3 - dframe_via_string = pvt.pvto_fromdeck(pvt.df2ecl_pvto(dframe)) + dframe_via_string = pvt.pvto_fromdeck(pvt.df2res_pvto(dframe)) pd.testing.assert_frame_equal(dframe_via_string, dframe) # Test emtpy data: - inc = pvt.df2ecl_pvto(pvt.df("")) + inc = pvt.df2res_pvto(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty @@ -123,7 +123,7 @@ def test_pvdg_string(): assert "VISCOSITY" in dframe # Test emtpy data: - inc = pvt.df2ecl_pvdg(pvt.df("")) + inc = pvt.df2res_pvdg(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty @@ -152,7 +152,7 @@ def test_pvdo_string(): ) # Test emtpy data: - inc = pvt.df2ecl_pvdo(pvt.df("")) + inc = pvt.df2res_pvdo(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty @@ -160,8 +160,8 @@ def test_pvdo_string(): def test_pvt_reek(): """Test that the Reek PVT input can be parsed individually""" - eclfiles = EclFiles(REEK) - pvto_df = pvt.pvto_fromdeck(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + pvto_df = pvt.pvto_fromdeck(resdatafiles.get_deck()) assert "PVTNUM" in pvto_df assert "PRESSURE" in pvto_df assert "VOLUMEFACTOR" in pvto_df @@ -177,10 +177,10 @@ def test_pvt_reek(): assert pvto_df["VOLUMEFACTOR"].max() == 2.851 assert pvto_df["VISCOSITY"].max() == 1.0001 - dframe_via_string = pvt.pvto_fromdeck(pvt.df2ecl_pvto(pvto_df)) + dframe_via_string = pvt.pvto_fromdeck(pvt.df2res_pvto(pvto_df)) pd.testing.assert_frame_equal(dframe_via_string, pvto_df) - density_df = pvt.density_fromdeck(eclfiles.get_ecldeck()) + density_df = pvt.density_fromdeck(resdatafiles.get_deck()) pd.testing.assert_frame_equal( density_df, pd.DataFrame( @@ -189,17 +189,17 @@ def test_pvt_reek(): ), check_like=True, ) - dframe_via_string = pvt.density_fromdeck(pvt.df2ecl_density(density_df)) + dframe_via_string = pvt.density_fromdeck(pvt.df2res_density(density_df)) pd.testing.assert_frame_equal(dframe_via_string, density_df) - rock_df = pvt.rock_fromdeck(eclfiles.get_ecldeck()) + rock_df = pvt.rock_fromdeck(resdatafiles.get_deck()) assert "PVTNUM" in rock_df assert len(rock_df) == 1 assert "PRESSURE" in rock_df assert "COMPRESSIBILITY" in rock_df assert rock_df["PRESSURE"].values[0] == 327.3 - pvtw_df = pvt.pvtw_fromdeck(eclfiles.get_ecldeck()) + pvtw_df = pvt.pvtw_fromdeck(resdatafiles.get_deck()) assert "PVTNUM" in pvtw_df assert pvtw_df["PVTNUM"].values[0] == 1 assert len(pvtw_df) == 1 @@ -210,7 +210,7 @@ def test_pvt_reek(): assert "VISCOSIBILITY" in pvtw_df assert pvtw_df["VISCOSITY"].values[0] == 0.25 - pvdg_df = pvt.pvdg_fromdeck(eclfiles.get_ecldeck()) + pvdg_df = pvt.pvdg_fromdeck(resdatafiles.get_deck()) assert "PVTNUM" in pvdg_df assert "PRESSURE" in pvdg_df assert "VOLUMEFACTOR" in pvdg_df @@ -263,7 +263,7 @@ def test_pvtg_string(): assert max(pvtg_df["VISCOSITY"]) == 0.0393 # Test empty data: - inc = pvt.df2ecl_pvtg(pvt.df("")) + inc = pvt.df2res_pvtg(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty @@ -292,15 +292,15 @@ def test_pvtg_string(): def test_density(): """Test that DENSITY can be parsed from files and from strings""" - eclfiles = EclFiles(REEK) - density_df = pvt.density_fromdeck(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + density_df = pvt.density_fromdeck(resdatafiles.get_deck()) assert len(density_df) == 1 assert "PVTNUM" in density_df assert "OILDENSITY" in density_df assert "WATERDENSITY" in density_df assert "GASDENSITY" in density_df - dframe_via_string = pvt.density_fromdeck(pvt.df2ecl_density(density_df)) + dframe_via_string = pvt.density_fromdeck(pvt.df2res_density(density_df)) pd.testing.assert_frame_equal(dframe_via_string, density_df) two_pvtnum_deck = """DENSITY @@ -308,7 +308,7 @@ def test_density(): 800 950 1.05 / """ - density_df = pvt.density_fromdeck(EclFiles.str2deck(two_pvtnum_deck)) + density_df = pvt.density_fromdeck(ResdataFiles.str2deck(two_pvtnum_deck)) # (a warning will be printed that we cannot guess) assert len(density_df) == 1 density_df = pvt.density_fromdeck(two_pvtnum_deck) @@ -316,11 +316,11 @@ def test_density(): assert density_df["PVTNUM"].max() == 2 assert density_df["PVTNUM"].min() == 1 assert "OILDENSITY" in density_df - dframe_via_string = pvt.density_fromdeck(pvt.df2ecl_density(density_df)) + dframe_via_string = pvt.density_fromdeck(pvt.df2res_density(density_df)) pd.testing.assert_frame_equal(dframe_via_string, density_df) # Test emtpy data: - inc = pvt.df2ecl_density(pvt.df("")) + inc = pvt.df2res_density(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty @@ -329,7 +329,7 @@ def test_pvtw(): """Test that PVTW can be parsed from a string""" deck = """PVTW 327.3 1.03 4.51E-005 0.25 0 /""" - pvtw_df = pvt.pvtw_fromdeck(EclFiles.str2deck(deck)) + pvtw_df = pvt.pvtw_fromdeck(ResdataFiles.str2deck(deck)) pd.testing.assert_frame_equal( pvtw_df, pd.DataFrame( @@ -353,7 +353,7 @@ def test_pvtw(): assert len(pvtw_df) == 2 # Test emtpy data: - inc = pvt.df2ecl_pvtw(pvt.df("")) + inc = pvt.df2res_pvtw(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty @@ -362,23 +362,23 @@ def test_rock(): """Test parsing of the ROCK keyword from a string""" deck = """ROCK 100 1.1 /""" - rock_df = pvt.rock_fromdeck(EclFiles.str2deck(deck)) + rock_df = pvt.rock_fromdeck(ResdataFiles.str2deck(deck)) assert len(rock_df) == 1 assert "PRESSURE" in rock_df assert "COMPRESSIBILITY" in rock_df - dframe_via_string = pvt.rock_fromdeck(pvt.df2ecl_rock(rock_df)) + dframe_via_string = pvt.rock_fromdeck(pvt.df2res_rock(rock_df)) pd.testing.assert_frame_equal(dframe_via_string, rock_df) # Test emtpy data: - inc = pvt.df2ecl_rock(pvt.df("")) + inc = pvt.df2res_rock(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty def test_df(): """Test that aggregate dataframes are produced""" - eclfiles = EclFiles(REEK) - pvtdf = pvt.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + pvtdf = pvt.df(resdatafiles) assert not pvtdf.empty assert set(pvtdf["KEYWORD"]) == {"PVTO", "PVDG", "DENSITY", "ROCK", "PVTW"} @@ -395,9 +395,9 @@ def test_main(tmp_path, mocker): os.chdir(tmp_path) tmpcsvfile = tmp_path / "pvt.csv" mocker.patch( - "sys.argv", ["ecl2csv", "pvt", "-v", EIGHTCELLS, "-o", str(tmpcsvfile)] + "sys.argv", ["res2csv", "pvt", "-v", EIGHTCELLS, "-o", str(tmpcsvfile)] ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(tmpcsvfile) @@ -408,9 +408,9 @@ def test_main(tmp_path, mocker): # Write back to include file: incfile = tmp_path / "pvt.inc" mocker.patch( - "sys.argv", ["csv2ecl", "pvt", "-v", str(tmpcsvfile), "-o", str(incfile)] + "sys.argv", ["csv2res", "pvt", "-v", str(tmpcsvfile), "-o", str(incfile)] ) - csv2ecl.main() + csv2res.main() # Reparse the include file on disk back to dataframe # and check dataframe equality @@ -428,8 +428,8 @@ def test_main(tmp_path, mocker): """, encoding="utf8", ) - mocker.patch("sys.argv", ["ecl2csv", "pvt", "-v", "pvto.inc", "-o", "pvto.csv"]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "pvt", "-v", "pvto.inc", "-o", "pvto.csv"]) + res2csv.main() assert Path("pvto.csv").is_file() # Empty data: @@ -440,8 +440,8 @@ def test_main(tmp_path, mocker): """, encoding="utf8", ) - mocker.patch("sys.argv", ["ecl2csv", "pvt", "-v", "empty.inc", "-o", "empty.csv"]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "pvt", "-v", "empty.inc", "-o", "empty.csv"]) + res2csv.main() assert not Path("empty.csv").read_text(encoding="utf8").strip() @@ -449,14 +449,14 @@ def test_magic_stdout(tmp_path): """Test writing dataframes and include files to stdout""" os.chdir(tmp_path) result = subprocess.run( - ["ecl2csv", "pvt", "-o", "-", EIGHTCELLS], check=True, stdout=subprocess.PIPE + ["res2csv", "pvt", "-o", "-", EIGHTCELLS], check=True, stdout=subprocess.PIPE ) df_stdout = pd.read_csv(io.StringIO(result.stdout.decode())) assert not df_stdout.empty # Verbose options should not ruin it: result = subprocess.run( - ["ecl2csv", "pvt", "--verbose", "-o", "-", EIGHTCELLS], + ["res2csv", "pvt", "--verbose", "-o", "-", EIGHTCELLS], check=True, stdout=subprocess.PIPE, ) @@ -464,10 +464,10 @@ def test_magic_stdout(tmp_path): # pylint: disable=no-member # false positive on Dataframes assert not df_stdout.empty - # Pipe back to csv2ecl: + # Pipe back to csv2res: df_stdout.to_csv("pvt.csv", index=False) result = subprocess.run( - ["csv2ecl", "pvt", "--verbose", "-o", "-", "pvt.csv"], + ["csv2res", "pvt", "--verbose", "-o", "-", "pvt.csv"], check=True, stdout=subprocess.PIPE, ) @@ -475,22 +475,22 @@ def test_magic_stdout(tmp_path): assert not dframe.empty -def test_df2ecl(): - """df2ecl is a wrapper around the df2ecl_* functions +def test_df2res(): + """df2res is a wrapper around the df2res_* functions The validity of produced dataframes is tested in other test functions herein, here we mainly test for the API and error handling""" with pytest.raises(ValueError): - pvt.df2ecl(pd.DataFrame()) + pvt.df2res(pd.DataFrame()) -def test_df2ecl_pvto(): +def test_df2res_pvto(): """Test that we can print a PVTO dataframe to E100 include file""" dframe = pd.DataFrame( columns=["PVTNUM", "RS", "PRESSURE", "VOLUMEFACTOR", "VISCOSITY"], data=[[1, 50, 100, 2, 1.04]], ) - pvto_string = pvt.df2ecl_pvto(dframe) + pvto_string = pvt.df2res_pvto(dframe) assert "PVTO" in pvto_string assert "1.04" in pvto_string assert "100" in pvto_string @@ -506,7 +506,7 @@ def test_df2ecl_pvto(): columns=["PVTNUM", "RS", "PRESSURE", "VOLUMEFACTOR", "VISCOSITY"], data=[[1, 50, 100, 2, 1.04], [1, 50, 120, 3, 1.05]], ) - pvto_string = pvt.df2ecl_pvto(dframe) + pvto_string = pvt.df2res_pvto(dframe) assert "PVTO" in pvto_string assert "1.05" in pvto_string assert "120" in pvto_string @@ -519,17 +519,17 @@ def test_df2ecl_pvto(): ) # If PVTNUM is missing, the code gives up if there are many rows. - assert "PVTO" not in pvt.df2ecl_pvto( + assert "PVTO" not in pvt.df2res_pvto( pd.concat([dframe, dframe]).drop("PVTNUM", axis="columns") ) # If only one row, this is accepted: - assert "PVTO" in pvt.df2ecl_pvto(dframe.head(1).drop("PVTNUM", axis="columns")) + assert "PVTO" in pvt.df2res_pvto(dframe.head(1).drop("PVTNUM", axis="columns")) # (the corner case with only one row is not very meaningful, but at # least it is well defined how to treat it) -def test_df2ecl_rock(tmp_path): +def test_df2res_rock(tmp_path): """Test generation of ROCK include files from dataframes""" os.chdir(tmp_path) @@ -538,14 +538,14 @@ def test_df2ecl_rock(tmp_path): data=[[1, "ROCK", 100, 0.001]], ) - rock_inc = pvt.df2ecl(rock_df) + rock_inc = pvt.df2res(rock_df) assert "ROCK" in rock_inc - rock_inc = pvt.df2ecl(rock_df, comments=dict(ROCK="foo")) + rock_inc = pvt.df2res(rock_df, comments=dict(ROCK="foo")) assert "foo" in rock_inc - rock_inc = pvt.df2ecl(rock_df, comments=dict(DENSITY="foo")) + rock_inc = pvt.df2res(rock_df, comments=dict(DENSITY="foo")) assert "foo" not in rock_inc - rock_inc = pvt.df2ecl(rock_df, comments=dict(ROCK="foo\nbar"), filename="foo.inc") + rock_inc = pvt.df2res(rock_df, comments=dict(ROCK="foo\nbar"), filename="foo.inc") assert Path("foo.inc").is_file() assert "foo" in rock_inc assert "bar" in rock_inc @@ -559,15 +559,15 @@ def test_df2ecl_rock(tmp_path): rock_df = rock_df_from_inc.reindex(sorted(rock_df.columns), axis=1) pd.testing.assert_frame_equal(rock_df_from_inc, rock_df) - rock_inc = pvt.df2ecl(rock_df, keywords=["DENSITY"]) + rock_inc = pvt.df2res(rock_df, keywords=["DENSITY"]) assert not rock_inc - rock_inc = pvt.df2ecl(rock_df, keywords="DENSITY") + rock_inc = pvt.df2res(rock_df, keywords="DENSITY") assert not rock_inc - rock_inc = pvt.df2ecl(rock_df, keywords=["ROCK", "DENSITY"]) + rock_inc = pvt.df2res(rock_df, keywords=["ROCK", "DENSITY"]) assert "ROCK" in rock_inc assert "DENSITY" not in rock_inc - rock_inc = pvt.df2ecl(rock_df, keywords="ROCK") + rock_inc = pvt.df2res(rock_df, keywords="ROCK") assert "ROCK" in rock_inc # This dataframe is ignored, if we miss PVTNUM: @@ -575,40 +575,40 @@ def test_df2ecl_rock(tmp_path): columns=["KEYWORD", "PRESSURE", "COMPRESSIBILITY"], data=[["ROCK", 100, 0.001], ["ROCK", 200, 0.002]], ) - assert "ROCK" not in pvt.df2ecl_rock(ambig_rock_df) + assert "ROCK" not in pvt.df2res_rock(ambig_rock_df) # But if only one row, it is ok: - assert "ROCK" in pvt.df2ecl_rock(ambig_rock_df.head(1)) + assert "ROCK" in pvt.df2res_rock(ambig_rock_df.head(1)) # If we don't want the ROCK keyword, we won't get it: - nonrock_inc = pvt.df2ecl(rock_df, keywords=["PVTO"]) + nonrock_inc = pvt.df2res(rock_df, keywords=["PVTO"]) assert "ROCK" not in nonrock_inc -def test_df2ecl_density(): +def test_df2res_density(): """Test generation of PVT density include files from dataframes""" density_df = pd.DataFrame( columns=["PVTNUM", "OILDENSITY", "WATERDENSITY", "GASDENSITY"], data=[[1, 827.64, 999.04, 1.1427]], ) - dens_inc = pvt.df2ecl_density(density_df) + dens_inc = pvt.df2res_density(density_df) assert "DENSITY" in dens_inc # If PVTNUM is missing, the code gives up: - assert "DENSITY" not in pvt.df2ecl_density( + assert "DENSITY" not in pvt.df2res_density( pd.concat([density_df, density_df]).drop("PVTNUM", axis="columns") ) # Unless there is only one row: - assert "DENSITY" in pvt.df2ecl_density(density_df.drop("PVTNUM", axis="columns")) + assert "DENSITY" in pvt.df2res_density(density_df.drop("PVTNUM", axis="columns")) # Missing column: with pytest.raises(KeyError, match="OILDENSITY"): - pvt.df2ecl_density(density_df.drop("OILDENSITY", axis="columns")) + pvt.df2res_density(density_df.drop("OILDENSITY", axis="columns")) -def test_df2ecl_pvtw(): +def test_df2res_pvtw(): """Test generation of PVTW include statements""" pvtw_df = pd.DataFrame( columns=[ @@ -621,22 +621,22 @@ def test_df2ecl_pvtw(): ], data=[[327.3, 1.03, 4.51e-005, 0.25, 0.0, 1]], ) - assert "PVTW" in pvt.df2ecl_pvtw(pvtw_df) + assert "PVTW" in pvt.df2res_pvtw(pvtw_df) # If PVTNUM is missing, the code gives up: - assert "PVTW" not in pvt.df2ecl_pvtw( + assert "PVTW" not in pvt.df2res_pvtw( pd.concat([pvtw_df, pvtw_df]).drop("PVTNUM", axis="columns") ) # Unless there is only one row: - assert "PVTW" in pvt.df2ecl_pvtw(pvtw_df.drop("PVTNUM", axis="columns")) + assert "PVTW" in pvt.df2res_pvtw(pvtw_df.drop("PVTNUM", axis="columns")) # Missing column: with pytest.raises(KeyError, match="VOLUMEFACTOR"): - pvt.df2ecl_pvtw(pvtw_df.drop("VOLUMEFACTOR", axis="columns")) + pvt.df2res_pvtw(pvtw_df.drop("VOLUMEFACTOR", axis="columns")) -def test_df2ecl_pvtg(): +def test_df2res_pvtg(): """Test generation of PVTG include statements""" pvtg_df = pd.DataFrame( columns=["OGR", "VOLUMEFACTOR", "VISCOSITY", "PRESSURE", "PVTNUM"], @@ -646,26 +646,26 @@ def test_df2ecl_pvtg(): [0.00014, 0.0523, 0.0234, 60.0, 2], ], ) - assert "PVTG" in pvt.df2ecl_pvtg(pvtg_df) - assert "PVTG" in pvt.df2ecl_pvtg(pvtg_df.assign(KEYWORD="PVTG")) + assert "PVTG" in pvt.df2res_pvtg(pvtg_df) + assert "PVTG" in pvt.df2res_pvtg(pvtg_df.assign(KEYWORD="PVTG")) pd.testing.assert_frame_equal( - pvt.df(pvt.df2ecl_pvtg(pvtg_df)).drop("KEYWORD", axis="columns"), pvtg_df + pvt.df(pvt.df2res_pvtg(pvtg_df)).drop("KEYWORD", axis="columns"), pvtg_df ) # If PVTNUM is missing, the code gives up: - assert "PVTG" not in pvt.df2ecl_pvtg( + assert "PVTG" not in pvt.df2res_pvtg( pd.concat([pvtg_df, pvtg_df]).drop("PVTNUM", axis="columns") ) # Unless there is only one row: - assert "PVTG" in pvt.df2ecl_pvtg(pvtg_df.head(1).drop("PVTNUM", axis="columns")) + assert "PVTG" in pvt.df2res_pvtg(pvtg_df.head(1).drop("PVTNUM", axis="columns")) # Missing column: with pytest.raises(KeyError, match="VOLUMEFACTOR"): - pvt.df2ecl_pvtg(pvtg_df.drop("VOLUMEFACTOR", axis="columns")) + pvt.df2res_pvtg(pvtg_df.drop("VOLUMEFACTOR", axis="columns")) -def test_df2ecl_pvdo_pvdg(): +def test_df2res_pvdo_pvdg(): """Test construction of PVDO and PVDG statements from dataframe. The keyword data and code is similar enough to warrant one test @@ -680,33 +680,33 @@ def test_df2ecl_pvdo_pvdg(): ], ) - assert "PVDO" in pvt.df2ecl_pvdo(pvdog_df) - assert "PVDG" in pvt.df2ecl_pvdg(pvdog_df) + assert "PVDO" in pvt.df2res_pvdo(pvdog_df) + assert "PVDG" in pvt.df2res_pvdg(pvdog_df) - assert "PVDO" in pvt.df2ecl_pvdo(pvdog_df.assign(KEYWORD="PVDO")) - assert "PVDG" in pvt.df2ecl_pvdg(pvdog_df.assign(KEYWORD="PVDG")) + assert "PVDO" in pvt.df2res_pvdo(pvdog_df.assign(KEYWORD="PVDO")) + assert "PVDG" in pvt.df2res_pvdg(pvdog_df.assign(KEYWORD="PVDG")) pd.testing.assert_frame_equal( - pvt.df(pvt.df2ecl_pvdo(pvdog_df)).drop("KEYWORD", axis="columns"), pvdog_df + pvt.df(pvt.df2res_pvdo(pvdog_df)).drop("KEYWORD", axis="columns"), pvdog_df ) pd.testing.assert_frame_equal( - pvt.df(pvt.df2ecl_pvdg(pvdog_df)).drop("KEYWORD", axis="columns"), pvdog_df + pvt.df(pvt.df2res_pvdg(pvdog_df)).drop("KEYWORD", axis="columns"), pvdog_df ) # If PVTNUM is missing, the code gives up: - assert "PVDO" not in pvt.df2ecl_pvdo( + assert "PVDO" not in pvt.df2res_pvdo( pd.concat([pvdog_df, pvdog_df]).drop("PVTNUM", axis="columns") ) - assert "PVDG" not in pvt.df2ecl_pvdg( + assert "PVDG" not in pvt.df2res_pvdg( pd.concat([pvdog_df, pvdog_df]).drop("PVTNUM", axis="columns") ) # Unless there is only one row: - assert "PVDO" in pvt.df2ecl_pvdo(pvdog_df.head(1).drop("PVTNUM", axis="columns")) - assert "PVDG" in pvt.df2ecl_pvdg(pvdog_df.head(1).drop("PVTNUM", axis="columns")) + assert "PVDO" in pvt.df2res_pvdo(pvdog_df.head(1).drop("PVTNUM", axis="columns")) + assert "PVDG" in pvt.df2res_pvdg(pvdog_df.head(1).drop("PVTNUM", axis="columns")) # Missing column: with pytest.raises(KeyError, match="VOLUMEFACTOR"): - pvt.df2ecl_pvdo(pvdog_df.drop("VOLUMEFACTOR", axis="columns")) + pvt.df2res_pvdo(pvdog_df.drop("VOLUMEFACTOR", axis="columns")) with pytest.raises(KeyError, match="VOLUMEFACTOR"): - pvt.df2ecl_pvdg(pvdog_df.drop("VOLUMEFACTOR", axis="columns")) + pvt.df2res_pvdg(pvdog_df.drop("VOLUMEFACTOR", axis="columns")) diff --git a/tests/test_rft.py b/tests/test_rft.py index f79410102..d73cd37a2 100644 --- a/tests/test_rft.py +++ b/tests/test_rft.py @@ -7,8 +7,8 @@ import pandas as pd import pytest -from ecl2df import ecl2csv, rft -from ecl2df.eclfiles import EclFiles +from res2df import res2csv, rft +from res2df.resdatafiles import ResdataFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -20,7 +20,7 @@ def test_rftrecords2df(): """Test that we can construct a dataframe for navigating in RFT records""" - rftrecs = rft._rftrecords2df(EclFiles(EIGHTCELLS).get_rftfile()) + rftrecs = rft._rftrecords2df(ResdataFiles(EIGHTCELLS).get_rftfile()) assert len(rftrecs[rftrecs["recordname"] == "TIME"]) == len( rftrecs["timeindex"].unique() ) @@ -35,7 +35,7 @@ def test_rftrecords2df(): def test_rftrecords_generator(): """Test the generator that will iterate over an EclFile/RFTFile and provide one yield pr. well pr. date""" - for rftrecord in rft.rftrecords(EclFiles(EIGHTCELLS).get_rftfile()): + for rftrecord in rft.rftrecords(ResdataFiles(EIGHTCELLS).get_rftfile()): assert isinstance(rftrecord, dict) assert "date" in rftrecord assert isinstance(rftrecord["date"], datetime.date) @@ -50,7 +50,7 @@ def test_rftrecords_generator(): def test_get_con_seg_data(): """Get CON data. Later add more code here to defend the name""" - rftfile = EclFiles(EIGHTCELLS).get_rftfile() + rftfile = ResdataFiles(EIGHTCELLS).get_rftfile() # Test the first record, it is a CON type (not multisegment) rftrecord = next(rft.rftrecords(rftfile)) @@ -464,8 +464,8 @@ def test_add_extras(dframe, inplace, expected): def test_rft2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - rftdf = rft.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + rftdf = rft.df(resdatafiles) assert "ZONE" in rftdf assert "LEAF" not in rftdf # Topology metadata should not be exported assert set(rftdf["WELLMODEL"]) == {"STANDARD"} @@ -489,8 +489,8 @@ def test_rft2df(): def test_main_subparsers(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / ".TMP-rft.csv" - mocker.patch("sys.argv", ["ecl2csv", "rft", EIGHTCELLS, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "rft", EIGHTCELLS, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -501,7 +501,7 @@ def test_main_subparsers(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2cvsv", + "res2cvsv", "rft", "-v", REEK.replace(".DATA", ".RFT"), @@ -509,7 +509,7 @@ def test_main_subparsers(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert not disk_df.empty @@ -519,9 +519,9 @@ def test_main_debugmode(tmp_path, mocker): """Test debug mode""" os.chdir(tmp_path) mocker.patch( - "sys.argv", ["ecl2csv", "rft", "--debug", EIGHTCELLS, "-o", "indebugmode.csv"] + "sys.argv", ["res2csv", "rft", "--debug", EIGHTCELLS, "-o", "indebugmode.csv"] ) - ecl2csv.main() + res2csv.main() # Extra files emitted in debug mode: assert not pd.read_csv("con.csv").empty assert Path("seg.csv").exists() # too simple example data, no segments. diff --git a/tests/test_satfunc.py b/tests/test_satfunc.py index d7fb9f90d..a58520de1 100644 --- a/tests/test_satfunc.py +++ b/tests/test_satfunc.py @@ -8,8 +8,8 @@ import pandas as pd import pytest -from ecl2df import csv2ecl, ecl2csv, inferdims, satfunc -from ecl2df.eclfiles import EclFiles +from res2df import csv2res, inferdims, res2csv, satfunc +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -26,11 +26,11 @@ EIGHTCELLS = str(TESTDIR / "data/eightcells/EIGHTCELLS.DATA") -def test_ecldeck_to_satfunc_dframe(): - """Test that dataframes can be produced from a full Eclipse deck (the +def test_deck_to_satfunc_dframe(): + """Test that dataframes can be produced from a complete deck (the example Reek case)""" - eclfiles = EclFiles(REEK) - satdf = satfunc.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + satdf = satfunc.df(resdatafiles.get_deck()) assert set(satdf["KEYWORD"]) == {"SWOF", "SGOF"} assert set(satdf["SATNUM"]) == {1} @@ -56,9 +56,9 @@ def test_ecldeck_to_satfunc_dframe(): def test_satfunc_roundtrip(): """Test that we can produce a SATNUM dataframe from the Reek case, convert it back to an include file, and then reinterpret it to the same""" - eclfiles = EclFiles(EIGHTCELLS) - satdf = satfunc.df(eclfiles.get_ecldeck()) - inc = satfunc.df2ecl(satdf) + resdatafiles = ResdataFiles(EIGHTCELLS) + satdf = satfunc.df(resdatafiles.get_deck()) + inc = satfunc.df2res(satdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal( satdf.sort_values(["SATNUM", "KEYWORD"]), @@ -66,20 +66,20 @@ def test_satfunc_roundtrip(): ) -def test_df2ecl_order(): +def test_df2res_order(): """Test that we can control the keyword order in generated strings by the list supplied in keywords argument""" - eclfiles = EclFiles(REEK) - satdf = satfunc.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + satdf = satfunc.df(resdatafiles.get_deck()) - swof_sgof = satfunc.df2ecl(satdf, keywords=["SWOF", "SGOF"]) + swof_sgof = satfunc.df2res(satdf, keywords=["SWOF", "SGOF"]) assert swof_sgof.find("SWOF") < swof_sgof.find("SGOF") - sgof_swof = satfunc.df2ecl(satdf, keywords=["SGOF", "SWOF"]) + sgof_swof = satfunc.df2res(satdf, keywords=["SGOF", "SWOF"]) assert sgof_swof.find("SGOF") < sgof_swof.find("SWOF") - only_swof = satfunc.df2ecl(satdf, keywords=["SWOF"]) + only_swof = satfunc.df2res(satdf, keywords=["SWOF"]) assert "SGOF" not in only_swof - only_sgof = satfunc.df2ecl(satdf, keywords="SGOF") + only_sgof = satfunc.df2res(satdf, keywords="SGOF") assert "SWOF" not in only_sgof @@ -90,7 +90,7 @@ def test_nodata(): satdf = satfunc.df(swofstr) assert len(satdf) == 0 - inc = satfunc.df2ecl_swof(satdf) + inc = satfunc.df2res_swof(satdf) assert "No data" in inc df_from_inc = satfunc.df(inc) assert df_from_inc.empty @@ -245,7 +245,7 @@ def test_str2df(string, expected_df): if expected_df.empty: return - inc = satfunc.df2ecl(satdf) + inc = satfunc.df2res(satdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal(df_from_inc, expected_df) @@ -272,7 +272,7 @@ def test_sgof_satnuminferrer(tmp_path, mocker): assert "SATNUM" in sgofdf assert len(sgofdf["SATNUM"].unique()) == 3 assert len(sgofdf) == 8 - inc = satfunc.df2ecl(sgofdf) + inc = satfunc.df2res(sgofdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal(sgofdf, df_from_inc) @@ -280,9 +280,9 @@ def test_sgof_satnuminferrer(tmp_path, mocker): sgoffile = "__sgof_tmp.txt" Path(sgoffile).write_text(sgofstr, encoding="utf8") mocker.patch( - "sys.argv", ["ecl2csv", "satfunc", "-v", sgoffile, "-o", sgoffile + ".csv"] + "sys.argv", ["res2csv", "satfunc", "-v", sgoffile, "-o", sgoffile + ".csv"] ) - ecl2csv.main() + res2csv.main() parsed_sgof = pd.read_csv(sgoffile + ".csv") assert len(parsed_sgof["SATNUM"].unique()) == 3 @@ -657,8 +657,8 @@ def test_multiple_keywords_family2(): def test_main_subparsers(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "satfunc.csv" - mocker.patch("sys.argv", ["ecl2csv", "satfunc", EIGHTCELLS, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "satfunc", EIGHTCELLS, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -669,7 +669,7 @@ def test_main_subparsers(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "satfunc", EIGHTCELLS, "--keywords", @@ -678,15 +678,15 @@ def test_main_subparsers(tmp_path, mocker): str(tmpcsvfile2), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile2).is_file() disk_df = pd.read_csv(str(tmpcsvfile2)) assert set(disk_df["KEYWORD"].unique()) == {"SWOF"} -def test_csv2ecl(tmp_path, mocker): - """Test command line interface for csv to Eclipse include files""" +def test_csv2res(tmp_path, mocker): + """Test command line interface for csv to include files""" os.chdir(tmp_path) tmpcsvfile = "satfunc.csv" @@ -695,8 +695,8 @@ def test_csv2ecl(tmp_path, mocker): data=[["SWOF", 0.0, 0.0, 1.0, 0.0], ["SWOF", 1.0, 1.0, 0.0, 0.0]], ) swof_df.to_csv(tmpcsvfile, index=False) - mocker.patch("sys.argv", ["csv2ecl", "satfunc", "--output", "swof.inc", tmpcsvfile]) - csv2ecl.main() + mocker.patch("sys.argv", ["csv2res", "satfunc", "--output", "swof.inc", tmpcsvfile]) + csv2res.main() pd.testing.assert_frame_equal( satfunc.df(Path("swof.inc").read_text(encoding="utf8")).drop( "SATNUM", axis="columns" @@ -707,7 +707,7 @@ def test_csv2ecl(tmp_path, mocker): # Test writing to stdout: result = subprocess.run( - ["csv2ecl", "satfunc", "--output", "-", tmpcsvfile], + ["csv2res", "satfunc", "--output", "-", tmpcsvfile], stdout=subprocess.PIPE, check=True, ) diff --git a/tests/test_summary.py b/tests/test_summary.py index 138d544d8..0c9fbb508 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -10,15 +10,15 @@ import yaml from resdata.summary import Summary -from ecl2df import csv2ecl, ecl2csv, summary -from ecl2df.eclfiles import EclFiles -from ecl2df.summary import ( +from res2df import csv2res, res2csv, summary +from res2df.resdatafiles import ResdataFiles +from res2df.summary import ( _df2pyarrow, _fallback_date_roll, - _fix_dframe_for_libecl, + _fix_dframe_for_resdata, date_range, df, - df2eclsum, + df2ressum, resample_smry_dates, smry_meta, ) @@ -44,8 +44,8 @@ def test_df(): """Test that dataframes are produced""" - eclfiles = EclFiles(EIGHTCELLS) - sumdf = summary.df(eclfiles) + resdatafiles = ResdataFiles(EIGHTCELLS) + sumdf = summary.df(resdatafiles) assert sumdf.index.name == "DATE" assert sumdf.index.dtype in ["datetime64[ns]", "datetime64"] @@ -55,7 +55,7 @@ def test_df(): assert not sumdf.columns.empty assert "FOPT" in sumdf.columns - sumdf = summary.df(eclfiles, datetime=True) + sumdf = summary.df(resdatafiles, datetime=True) # (datetime=True is implicit when raw time reports are requested) assert sumdf.index.name == "DATE" assert sumdf.index.dtype in ["datetime64[ns]", "datetime64"] @@ -68,7 +68,7 @@ def test_df(): def test_df_column_keys(): """Test that we can slice the dataframe on columns""" - sumdf = summary.df(EclFiles(REEK), column_keys="FOPT") + sumdf = summary.df(ResdataFiles(REEK), column_keys="FOPT") assert set(sumdf.columns) == {"FOPT"} assert set(sumdf.attrs["meta"].keys()) == {"FOPT"} @@ -83,29 +83,29 @@ def test_df_column_keys(): "FOPTF", "FOPP", } - sumdf = summary.df(EclFiles(REEK), column_keys="FOP*") + sumdf = summary.df(ResdataFiles(REEK), column_keys="FOP*") assert set(sumdf.columns) == fop_cols assert set(sumdf.attrs["meta"].keys()) == fop_cols - sumdf = summary.df(EclFiles(REEK), column_keys=["FOP*"]) + sumdf = summary.df(ResdataFiles(REEK), column_keys=["FOP*"]) assert set(sumdf.columns) == fop_cols assert set(sumdf.attrs["meta"].keys()) == fop_cols - sumdf = summary.df(EclFiles(REEK), column_keys=["FOPR", "FOPT"]) + sumdf = summary.df(ResdataFiles(REEK), column_keys=["FOPR", "FOPT"]) assert set(sumdf.columns) == {"FOPT", "FOPR"} assert set(sumdf.attrs["meta"].keys()) == {"FOPT", "FOPR"} - sumdf_no_columns = summary.df(EclFiles(REEK), column_keys=["BOGUS"]) + sumdf_no_columns = summary.df(ResdataFiles(REEK), column_keys=["BOGUS"]) assert sumdf_no_columns.columns.empty assert all(sumdf_no_columns.index == sumdf.index) def test_summary2df_dates(): """Test that we have some API possibilities with ISO dates""" - eclfiles = EclFiles(REEK) + resdatafiles = ResdataFiles(REEK) sumdf = summary.df( - eclfiles, + resdatafiles, start_date=datetime.date(2002, 1, 2), end_date="2002-03-01", time_index="daily", @@ -119,25 +119,25 @@ def test_summary2df_dates(): assert sumdf.index.values[0] == np.datetime64("2002-01-02") assert sumdf.index.values[-1] == np.datetime64("2002-03-01") - sumdf = summary.df(eclfiles, time_index="last", datetime=True) + sumdf = summary.df(resdatafiles, time_index="last", datetime=True) assert len(sumdf) == 1 assert sumdf.index.values[0] == np.datetime64("2003-01-02") # Leave this test for the datetime=False behaviour: - sumdf = summary.df(eclfiles, time_index="first") + sumdf = summary.df(resdatafiles, time_index="first") assert len(sumdf) == 1 assert str(sumdf.index.values[0]) == "2000-01-01" @pytest.mark.integration -def test_ecl2csv_summary(tmp_path, mocker): - """Test that the command line utility ecl2csv is installed and +def test_res2csv_summary(tmp_path, mocker): + """Test that the command line utility res2csv is installed and works with summary data""" tmpcsvfile = tmp_path / "sum.csv" mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "summary", "-v", REEK, @@ -149,7 +149,7 @@ def test_ecl2csv_summary(tmp_path, mocker): "2003-01-02", ], ) - ecl2csv.main() + res2csv.main() disk_df = pd.read_csv(tmpcsvfile) assert len(disk_df) == 97 # Includes timestamps assert str(disk_df["DATE"].values[0]) == "2002-01-02 00:00:00" @@ -159,7 +159,7 @@ def test_ecl2csv_summary(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "summary", REEK, "-o", @@ -172,7 +172,7 @@ def test_ecl2csv_summary(tmp_path, mocker): "2003-01-02", ], ) - ecl2csv.main() + res2csv.main() disk_df = pd.read_csv(tmpcsvfile) assert len(disk_df) == 366 # Pandas' csv export writes datetime64 as pure date @@ -191,16 +191,16 @@ def test_paramsupport(tmp_path, mocker): """ tmpcsvfile = tmp_path / "sum.csv" - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) - parameterstxt = Path(eclfiles.get_path()) / "parameters.txt" + parameterstxt = Path(resdatafiles.get_path()) / "parameters.txt" if parameterstxt.is_file(): parameterstxt.unlink() parameterstxt.write_text("FOO 1\nBAR 3", encoding="utf-8") mocker.patch( - "sys.argv", ["ecl2csv", "summary", EIGHTCELLS, "-o", str(tmpcsvfile), "-p"] + "sys.argv", ["res2csv", "summary", EIGHTCELLS, "-o", str(tmpcsvfile), "-p"] ) - ecl2csv.main() + res2csv.main() disk_df = pd.read_csv(tmpcsvfile) assert "FOPT" in disk_df assert "FOO" in disk_df @@ -208,14 +208,14 @@ def test_paramsupport(tmp_path, mocker): assert disk_df["BAR"].unique()[0] == 3 parameterstxt.unlink() - parametersyml = Path(eclfiles.get_path()) / "parameters.yml" + parametersyml = Path(resdatafiles.get_path()) / "parameters.yml" if parametersyml.is_file(): parametersyml.unlink() parametersyml.write_text(yaml.dump({"FOO": 1, "BAR": 3}), encoding="utf-8") mocker.patch( - "sys.argv", ["ecl2csv", "summary", EIGHTCELLS, "-o", str(tmpcsvfile), "-p"] + "sys.argv", ["res2csv", "summary", EIGHTCELLS, "-o", str(tmpcsvfile), "-p"] ) - ecl2csv.main() + res2csv.main() disk_df = pd.read_csv(str(tmpcsvfile)) assert "FOPT" in disk_df assert "FOO" in disk_df @@ -226,22 +226,24 @@ def test_paramsupport(tmp_path, mocker): assert disk_df["BAR"].unique()[0] == 3 # Test the merging from summary.df() explicitly: - assert "FOO" in summary.df(eclfiles, params=True, paramfile=None) - assert "FOO" not in summary.df(eclfiles, params=False, paramfile=None) - assert "FOO" not in summary.df(eclfiles, params=None, paramfile=None) + assert "FOO" in summary.df(resdatafiles, params=True, paramfile=None) + assert "FOO" not in summary.df(resdatafiles, params=False, paramfile=None) + assert "FOO" not in summary.df(resdatafiles, params=None, paramfile=None) - assert "FOO" in summary.df(eclfiles, params=False, paramfile=parametersyml) - assert "FOO" in summary.df(eclfiles, params=None, paramfile=parametersyml) - assert "FOO" in summary.df(eclfiles, params=None, paramfile="parameters.yml") + assert "FOO" in summary.df(resdatafiles, params=False, paramfile=parametersyml) + assert "FOO" in summary.df(resdatafiles, params=None, paramfile=parametersyml) + assert "FOO" in summary.df(resdatafiles, params=None, paramfile="parameters.yml") # Non-existing relative path is a soft error: assert "FOO" not in summary.df( - eclfiles, params=None, paramfile="notexisting/parameters.yml" + resdatafiles, params=None, paramfile="notexisting/parameters.yml" ) # Non-existing absolute path is a hard error: with pytest.raises(FileNotFoundError): - summary.df(eclfiles, params=None, paramfile="/tmp/notexisting/parameters.yml") + summary.df( + resdatafiles, params=None, paramfile="/tmp/notexisting/parameters.yml" + ) parametersyml.unlink() @@ -250,7 +252,7 @@ def test_paramsupport_explicitfile(tmp_path, mocker): """Test explicit naming of parameters file from command line. This is a little bit tricky because the parameter file is assumed to be - relative to the DATA file, not to working directory unless it is absolute.""" + relative to the .DATA file, not to working directory unless it is absolute.""" tmpcsvfile = tmp_path / "smrywithrandomparams.txt" randomparamfile = tmp_path / "fooparams.txt" @@ -258,7 +260,7 @@ def test_paramsupport_explicitfile(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "summary", "--verbose", EIGHTCELLS, @@ -268,7 +270,7 @@ def test_paramsupport_explicitfile(tmp_path, mocker): str(randomparamfile), # Absolute filepath ], ) - ecl2csv.main() + res2csv.main() assert pd.read_csv(tmpcsvfile)["FOO"].unique() == ["barrbarr"] assert pd.read_csv(tmpcsvfile)["COM"].unique() == [1234] @@ -278,7 +280,7 @@ def test_paramsupport_explicitfile(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "summary", "--verbose", EIGHTCELLS, @@ -288,15 +290,15 @@ def test_paramsupport_explicitfile(tmp_path, mocker): Path(randomparamfile).name, # A relative filepath ], ) - ecl2csv.main() + res2csv.main() assert "FOO" not in pd.read_csv("smry_noparams.csv") def test_main_subparser(tmp_path, mocker): """Test command line interface with output to both CSV and arrow/feather.""" tmpcsvfile = tmp_path / "sum.csv" - mocker.patch("sys.argv", ["ecl2csv", "summary", EIGHTCELLS, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "summary", EIGHTCELLS, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -307,9 +309,9 @@ def test_main_subparser(tmp_path, mocker): tmparrowfile = tmp_path / "sum.arrow" mocker.patch( "sys.argv", - ["ecl2csv", "summary", "--arrow", EIGHTCELLS, "-o", str(tmparrowfile)], + ["res2csv", "summary", "--arrow", EIGHTCELLS, "-o", str(tmparrowfile)], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_arraydf = pyarrow.feather.read_table(tmparrowfile).to_pandas() assert "FOPT" in disk_arraydf @@ -317,32 +319,34 @@ def test_main_subparser(tmp_path, mocker): # Alternative and equivalent command line syntax for arrow output: tmparrowfile_alt = tmp_path / "sum2.arrow" mocker.patch( - "sys.argv", ["ecl2arrow", "summary", EIGHTCELLS, "-o", str(tmparrowfile_alt)] + "sys.argv", ["res2arrow", "summary", EIGHTCELLS, "-o", str(tmparrowfile_alt)] ) - ecl2csv.main() + res2csv.main() pd.testing.assert_frame_equal( disk_arraydf, pyarrow.feather.read_table(str(tmparrowfile_alt)).to_pandas() ) # Not possible (yet?) to write arrow to stdout: - mocker.patch("sys.argv", ["ecl2arrow", "summary", EIGHTCELLS, "-o", "-"]) + mocker.patch("sys.argv", ["res2arrow", "summary", EIGHTCELLS, "-o", "-"]) with pytest.raises(SystemExit): - ecl2csv.main() + res2csv.main() def test_datenormalization(): """Test normalization of dates, where dates can be ensured to be on dategrid boundaries""" # realization-0 here has its last summary date at 2003-01-02 - eclfiles = EclFiles(REEK) - daily = summary.df(eclfiles, column_keys="FOPT", time_index="daily", datetime=True) + resdatafiles = ResdataFiles(REEK) + daily = summary.df( + resdatafiles, column_keys="FOPT", time_index="daily", datetime=True + ) assert str(daily.index[-1])[0:10] == "2003-01-02" monthly = summary.df( - eclfiles, column_keys="FOPT", time_index="monthly", datetime=True + resdatafiles, column_keys="FOPT", time_index="monthly", datetime=True ) assert str(monthly.index[-1])[0:10] == "2003-02-01" yearly = summary.df( - eclfiles, column_keys="FOPT", time_index="yearly", datetime=True + resdatafiles, column_keys="FOPT", time_index="yearly", datetime=True ) assert str(yearly.index[-1])[0:10] == "2004-01-01" @@ -350,9 +354,9 @@ def test_datenormalization(): def test_extrapolation(): """Summary data should be possible to extrapolate into the future, rates should be zero, cumulatives should be constant""" - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) lastfopt = summary.df( - eclfiles, column_keys="FOPT", time_index="last", datetime=True + resdatafiles, column_keys="FOPT", time_index="last", datetime=True )["FOPT"].values[0] answer = pd.DataFrame( # This is the maximal date for datetime64[ns] @@ -363,7 +367,7 @@ def test_extrapolation(): pd.testing.assert_frame_equal( summary.df( - eclfiles, + resdatafiles, column_keys=["FOPT", "FOPR"], time_index="2262-04-11", datetime=True, @@ -372,7 +376,7 @@ def test_extrapolation(): ) pd.testing.assert_frame_equal( summary.df( - eclfiles, + resdatafiles, column_keys=["FOPT", "FOPR"], time_index=[datetime.date(2262, 4, 11)], # NB: df() does not support datetime64 for time_index @@ -384,15 +388,15 @@ def test_extrapolation(): # Pandas does not support DatetimeIndex beyound 2262: with pytest.raises(pd.errors.OutOfBoundsDatetime): summary.df( - eclfiles, + resdatafiles, column_keys=["FOPT"], time_index=[datetime.date(2300, 1, 1)], datetime=True, ) - # But without datetime, we can get it extrapolated by libecl: + # But without datetime, we can get it extrapolated by resdata: assert summary.df( - eclfiles, column_keys=["FOPT"], time_index=[datetime.date(2300, 1, 1)] + resdatafiles, column_keys=["FOPT"], time_index=[datetime.date(2300, 1, 1)] )["FOPT"].values == [lastfopt] @@ -407,9 +411,9 @@ def test_foreseeable_future(tmp_path): {"DATE": "2500-01-01", "FPR": 180}, ] ) - eclsum = df2eclsum(src_dframe, casename="PLUGABANDON") + res_summary = df2ressum(src_dframe, casename="PLUGABANDON") - dframe = summary.df(eclsum) + dframe = summary.df(res_summary) assert ( dframe.index == [ @@ -422,7 +426,7 @@ def test_foreseeable_future(tmp_path): ).all() # Try with time interpolation involved: - dframe = summary.df(eclsum, time_index="yearly") + dframe = summary.df(res_summary, time_index="yearly") assert len(dframe) == 501 assert dframe.index.max() == datetime.date(year=2500, month=1, day=1) @@ -433,8 +437,8 @@ def test_foreseeable_future(tmp_path): "FPR": range(70), } ) - eclsum = df2eclsum(src_dframe, casename="PLUGABANDON") - dframe = summary.df(eclsum) + res_summary = df2ressum(src_dframe, casename="PLUGABANDON") + dframe = summary.df(res_summary) # Still buggy: assert dframe.index[-1] == dt(2068, 12, 31, 23, 57, 52) @@ -445,8 +449,8 @@ def test_foreseeable_future(tmp_path): "FPR": range(69), } ) - eclsum = df2eclsum(src_dframe, casename="PLUGABANDON") - dframe = summary.df(eclsum) + res_summary = df2ressum(src_dframe, casename="PLUGABANDON") + dframe = summary.df(res_summary) # Works fine when stepping only 68 years: assert dframe.index[-1] == dt(2468, 1, 1, 0, 0, 0) @@ -629,9 +633,9 @@ def test_date_range(start, end, freq, expected): def test_resample_smry_dates(): """Test resampling of summary dates""" - eclfiles = EclFiles(REEK) + resdatafiles = ResdataFiles(REEK) - ecldates = eclfiles.get_eclsum().dates + ecldates = resdatafiles.get_summary().dates assert isinstance(resample_smry_dates(ecldates), list) assert isinstance(resample_smry_dates(ecldates, freq="last"), list) @@ -764,7 +768,7 @@ def test_resample_smry_dates(): == 2 + 300 # boundary dates + 2001-01-01 to 2300-01-01 ) - # Verify boundary date bug up to and including ecl2df v0.13.2 + # Verify boundary date bug up to and including res2df v0.13.2 assert resample_smry_dates( ecldates, start_date="2300-06-05", @@ -792,7 +796,7 @@ def test_resample_smry_dates(): ], ) def test_unique_datetime_for_short_timesteps(filepath): - assert summary.df(EclFiles(filepath)).index.is_unique + assert summary.df(ResdataFiles(filepath)).index.is_unique @pytest.mark.parametrize( @@ -804,12 +808,12 @@ def test_unique_datetime_for_short_timesteps(filepath): ) def test_unique_datetime_retain_index_name(filepath): """Test _ensure_unique_datetime_index method retain index name""" - assert summary.df(EclFiles(filepath)).index.name == "DATE" + assert summary.df(ResdataFiles(filepath)).index.name == "DATE" def test_smry_meta(): - """Test obtaining metadata dictionary for summary vectors from an EclSum object""" - meta = smry_meta(EclFiles(REEK)) + """Test obtaining metadata dictionary for summary vectors from a summary object""" + meta = smry_meta(ResdataFiles(REEK)) assert isinstance(meta, dict) assert "FOPT" in meta @@ -838,7 +842,7 @@ def test_smry_meta(): def test_smry_meta_synthetic(): """What does meta look like when we start from a synthetic summary? - ecl2df currently does not try to set the units to anything when + res2df currently does not try to set the units to anything when making synthetic summary. """ dframe = pd.DataFrame( @@ -846,9 +850,9 @@ def test_smry_meta_synthetic(): {"DATE": np.datetime64("2016-01-01"), "FOPT": 1000, "FOPR": 100}, ] ).set_index("DATE") - synt_meta = smry_meta(df2eclsum(dframe)) + synt_meta = smry_meta(df2ressum(dframe)) - # Dummy unit provided by EclSum: + # Dummy unit provided by summary: assert synt_meta["FOPT"]["unit"] == "UNIT" @@ -939,10 +943,10 @@ def test_smry_meta_synthetic(): ), ], ) -def test_fix_dframe_for_libecl(dframe, expected_dframe): - """Test the dataframe preprocessor/validator for df2eclsum works""" +def test_fix_dframe_for_resdata(dframe, expected_dframe): + """Test the dataframe preprocessor/validator for df2ressum works""" pd.testing.assert_frame_equal( - _fix_dframe_for_libecl(dframe), expected_dframe, check_index_type=False + _fix_dframe_for_resdata(dframe), expected_dframe, check_index_type=False ) @@ -1015,19 +1019,19 @@ def test_fix_dframe_for_libecl(dframe, expected_dframe): ), ], ) -def test_df2eclsum(dframe): - """Test that a dataframe can be converted to an EclSum object, and then read +def test_df2ressum(dframe): + """Test that a dataframe can be converted to a summary object, and then read back again""" # Massage the dframe first so we can assert on equivalence after. - dframe = _fix_dframe_for_libecl(dframe) + dframe = _fix_dframe_for_resdata(dframe) - eclsum = df2eclsum(dframe) + summary = df2ressum(dframe) if dframe.empty: - assert eclsum is None + assert summary is None return - dframe_roundtrip = df(eclsum) + dframe_roundtrip = df(summary) pd.testing.assert_frame_equal( dframe.sort_index(axis=1), dframe_roundtrip.sort_index(axis=1), @@ -1035,7 +1039,7 @@ def test_df2eclsum(dframe): ) -def test_df2eclsum_datetimeindex(): +def test_df2ressum_datetimeindex(): """Test that providing a dataframe with a datetimeindex also works""" dframe = pd.DataFrame( [ @@ -1045,21 +1049,21 @@ def test_df2eclsum_datetimeindex(): dframe["DATE"] = pd.to_datetime(dframe["DATE"]) dframe.set_index("DATE") - roundtrip = df(df2eclsum(dframe)) + roundtrip = df(df2ressum(dframe)) assert isinstance(roundtrip.index, pd.DatetimeIndex) assert roundtrip["FOPR"].values == [100] assert roundtrip["FOPT"].values == [1000] def test_duplicated_summary_vectors(caplog): - """EclSum files on disk may contain repeated vectors + """summary files on disk may contain repeated vectors if the user has inserted a vector name twice in the SUMMARY section - ecl2df.summary.df() should deduplicate this, and give a warning. + res2df.summary.df() should deduplicate this, and give a warning. """ - # ecl2df.df2eclsum() is not able to mock such a UNSMRY file. + # res2df.df2ressum() is not able to mock such a UNSMRY file. dupe_datafile = ( TESTDIR / "data" @@ -1068,7 +1072,7 @@ def test_duplicated_summary_vectors(caplog): / "EIGHTCELLS_DUPES.DATA" ) assert "SUMMARY\nFOPR\nFOPR" in dupe_datafile.read_text() - deduplicated_dframe = df(EclFiles(dupe_datafile)) + deduplicated_dframe = df(ResdataFiles(dupe_datafile)) assert (deduplicated_dframe.columns == ["YEARS", "FOPR"]).all() assert "Duplicated columns detected" in caplog.text @@ -1158,27 +1162,27 @@ def test_df2pyarrow_strings(): @pytest.mark.skipif(not HAVE_OPM, reason="Test requires OPM") -def test_ecl2df_errors(tmp_path): +def test_res2df_errors(tmp_path): """Test error handling on bogus/corrupted summary files""" os.chdir(tmp_path) Path("FOO.UNSMRY").write_bytes(os.urandom(100)) Path("FOO.SMSPEC").write_bytes(os.urandom(100)) with pytest.raises(OSError, match="Failed to create summary instance"): - # This is how libecl reacts to bogus binary data + # This is how resdata reacts to bogus binary data Summary("FOO.UNSMRY") - # But EclFiles should be more tolerant, as it should be possible + # But ResdataFiles should be more tolerant, as it should be possible # to extract other data if SMRY is corrupted Path("FOO.DATA").write_text("RUNSPEC", encoding="utf8") - assert str(EclFiles("FOO").get_ecldeck()).strip() == "RUNSPEC" + assert str(ResdataFiles("FOO").get_deck()).strip() == "RUNSPEC" with pytest.raises(OSError): - EclFiles("FOO").get_eclsum() + ResdataFiles("FOO").get_summary() # Getting a dataframe from bogus data should give empty data: - assert df(EclFiles("FOO")).empty + assert df(ResdataFiles("FOO")).empty -def test_df2eclsum_errors(): +def test_df2ressum_errors(): """Test various error conditions, checking that the correct error message is emitted""" dframe = pd.DataFrame( @@ -1187,18 +1191,18 @@ def test_df2eclsum_errors(): ] ) with pytest.raises(ValueError, match="casename foobar must be UPPER CASE"): - df2eclsum(dframe, casename="foobar") + df2ressum(dframe, casename="foobar") with pytest.raises(ValueError, match="Do not use dots in casename"): - df2eclsum(dframe, casename="FOOBAR.UNSMRY") # .UNSMRY should not be included + df2ressum(dframe, casename="FOOBAR.UNSMRY") # .UNSMRY should not be included # No date included: with pytest.raises(ValueError, match="dataframe must have a datetime index"): - df2eclsum(pd.DataFrame([{"FOPT": 1000}])) + df2ressum(pd.DataFrame([{"FOPT": 1000}])) @pytest.mark.integration -def test_csv2ecl_summary(tmp_path, mocker): - """Check that we can call df2eclsum through the csv2ecl command line +def test_csv2res_summary(tmp_path, mocker): + """Check that we can call df2ressum through the csv2res command line utility""" dframe = pd.DataFrame( [ @@ -1211,7 +1215,7 @@ def test_csv2ecl_summary(tmp_path, mocker): mocker.patch( "sys.argv", [ - "csv2ecl", + "csv2res", "summary", "-v", "summary.csv", @@ -1219,7 +1223,7 @@ def test_csv2ecl_summary(tmp_path, mocker): "SYNTHETIC", ], ) - csv2ecl.main() + csv2res.main() assert Path("SYNTHETIC.UNSMRY").is_file() assert Path("SYNTHETIC.SMSPEC").is_file() @@ -1228,7 +1232,7 @@ def test_csv2ecl_summary(tmp_path, mocker): mocker.patch( "sys.argv", [ - "csv2ecl", + "csv2res", "summary", "--debug", "summary.csv", @@ -1236,6 +1240,6 @@ def test_csv2ecl_summary(tmp_path, mocker): str(Path("foo") / Path("SYNTHETIC")), ], ) - csv2ecl.main() + csv2res.main() assert ("foo" / Path("SYNTHETIC.UNSMRY")).is_file() assert ("foo" / Path("SYNTHETIC.SMSPEC")).is_file() diff --git a/tests/test_trans.py b/tests/test_trans.py index ae8860184..8f9f90d5c 100644 --- a/tests/test_trans.py +++ b/tests/test_trans.py @@ -1,4 +1,4 @@ -"""Test module for ecl2df.trans""" +"""Test module for res2df.trans""" from pathlib import Path @@ -13,8 +13,8 @@ import pandas as pd -from ecl2df import ecl2csv, trans -from ecl2df.eclfiles import EclFiles +from res2df import res2csv, trans +from res2df.resdatafiles import ResdataFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -23,8 +23,8 @@ def test_trans(): """Test that we can build a dataframe of transmissibilities""" - eclfiles = EclFiles(REEK) - trans_df = trans.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + trans_df = trans.df(resdatafiles) assert "TRAN" in trans_df assert "DIR" in trans_df assert set(trans_df["DIR"].unique()) == set(["I", "J", "K"]) @@ -33,45 +33,45 @@ def test_trans(): trans_full_length = len(trans_df) # Try including some vectors: - trans_df = trans.df(eclfiles, vectors="FIPNUM") + trans_df = trans.df(resdatafiles, vectors="FIPNUM") assert "FIPNUM" not in trans_df assert "FIPNUM1" in trans_df assert "EQLNUM2" not in trans_df - trans_df = trans.df(eclfiles, vectors=["FIPNUM", "EQLNUM"]) + trans_df = trans.df(resdatafiles, vectors=["FIPNUM", "EQLNUM"]) assert "FIPNUM1" in trans_df assert "EQLNUM2" in trans_df - trans_df = trans.df(eclfiles, vectors="BOGUS") + trans_df = trans.df(resdatafiles, vectors="BOGUS") assert "BOGUS1" not in trans_df assert "TRAN" in trans_df # (we should have gotten a warning only) - assert "K" not in trans.df(eclfiles, onlyijdir=True)["DIR"] - assert "I" not in trans.df(eclfiles, onlykdir=True)["DIR"] + assert "K" not in trans.df(resdatafiles, onlyijdir=True)["DIR"] + assert "I" not in trans.df(resdatafiles, onlykdir=True)["DIR"] # A warning is logged, seems strange to filter on both, but # the answer (empty) makes sense given the instruction. Alternative # would be a ValueError. - assert trans.df(eclfiles, onlykdir=True, onlyijdir=True).empty + assert trans.df(resdatafiles, onlykdir=True, onlyijdir=True).empty - transnnc_df = trans.df(eclfiles, addnnc=True) + transnnc_df = trans.df(resdatafiles, addnnc=True) assert len(transnnc_df) > trans_full_length - trans_df = trans.df(eclfiles, vectors=["FIPNUM", "EQLNUM"], boundaryfilter=True) + trans_df = trans.df(resdatafiles, vectors=["FIPNUM", "EQLNUM"], boundaryfilter=True) assert trans_df.empty - trans_df = trans.df(eclfiles, vectors="FIPNUM", boundaryfilter=True) + trans_df = trans.df(resdatafiles, vectors="FIPNUM", boundaryfilter=True) assert len(trans_df) < trans_full_length - trans_df = trans.df(eclfiles, coords=True) + trans_df = trans.df(resdatafiles, coords=True) assert "X" in trans_df assert "Y" in trans_df def test_grouptrans(): """Test grouping of transmissibilities""" - eclfiles = EclFiles(REEK) - trans_df = trans.df(eclfiles, vectors="FIPNUM", group=True, coords=True) + resdatafiles = ResdataFiles(REEK) + trans_df = trans.df(resdatafiles, vectors="FIPNUM", group=True, coords=True) assert "FIPNUMPAIR" in trans_df assert "FIPNUM1" in trans_df assert "FIPNUM2" in trans_df @@ -80,14 +80,14 @@ def test_grouptrans(): assert "X" in trans_df # (average X coord for that FIPNUM interface) # This gives a logged error: - assert trans.df(eclfiles, vectors=["FIPNUM", "EQLNUM"], group=True).empty + assert trans.df(resdatafiles, vectors=["FIPNUM", "EQLNUM"], group=True).empty @pytest.mark.skipif(not HAVE_NETWORKX, reason="Requires networkx being installed") def test_nx(tmp_path): """Test graph generation""" - eclfiles = EclFiles(REEK) - network = trans.make_nx_graph(eclfiles, region="FIPNUM") + resdatafiles = ResdataFiles(REEK) + network = trans.make_nx_graph(resdatafiles, region="FIPNUM") assert network.number_of_nodes() == 6 networkx.write_gexf(network, tmp_path / "reek-fipnum-trans.gxf", prettyprint=True) assert (tmp_path / "reek-fipnum-trans.gxf").is_file() @@ -97,9 +97,9 @@ def test_main(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "trans.csv" mocker.patch( - "sys.argv", ["ecl2csv", "trans", "-v", EIGHTCELLS, "-o", str(tmpcsvfile)] + "sys.argv", ["res2csv", "trans", "-v", EIGHTCELLS, "-o", str(tmpcsvfile)] ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert not disk_df.empty diff --git a/tests/test_userapi.py b/tests/test_userapi.py index 153221be8..2bb211fbb 100644 --- a/tests/test_userapi.py +++ b/tests/test_userapi.py @@ -1,10 +1,10 @@ -"""Test module for user API for ecl2df""" +"""Test module for user API for res2df""" from pathlib import Path import pytest -import ecl2df +import res2df try: # pylint: disable=unused-import @@ -28,22 +28,22 @@ def test_userapi(): To the user reading the source: Skip all 'assert' lines, read the rest. """ - eclfiles = ecl2df.EclFiles(REEK) - - compdatdf = ecl2df.compdat.df(eclfiles) - equil = ecl2df.equil.df(eclfiles) - faults = ecl2df.faults.df(eclfiles) - fipreports = ecl2df.fipreports.df(eclfiles) - grid_df = ecl2df.grid.df(eclfiles) - grst_df = ecl2df.grid.df(eclfiles, rstdates="last") - gruptree = ecl2df.gruptree.df(eclfiles) - nnc = ecl2df.nnc.df(eclfiles) - pillars = ecl2df.pillars.df(eclfiles) - rft = ecl2df.rft.df(eclfiles) - satfunc = ecl2df.satfunc.df(eclfiles) - smry = ecl2df.summary.df(eclfiles, datetime=True) - trans = ecl2df.trans.df(eclfiles) - wcon = ecl2df.wcon.df(eclfiles) + resdatafiles = res2df.ResdataFiles(REEK) + + compdatdf = res2df.compdat.df(resdatafiles) + equil = res2df.equil.df(resdatafiles) + faults = res2df.faults.df(resdatafiles) + fipreports = res2df.fipreports.df(resdatafiles) + grid_df = res2df.grid.df(resdatafiles) + grst_df = res2df.grid.df(resdatafiles, rstdates="last") + gruptree = res2df.gruptree.df(resdatafiles) + nnc = res2df.nnc.df(resdatafiles) + pillars = res2df.pillars.df(resdatafiles) + rft = res2df.rft.df(resdatafiles) + satfunc = res2df.satfunc.df(resdatafiles) + smry = res2df.summary.df(resdatafiles, datetime=True) + trans = res2df.trans.df(resdatafiles) + wcon = res2df.wcon.df(resdatafiles) assert "PORV" in grid_df assert "SOIL" not in grid_df @@ -57,7 +57,7 @@ def test_userapi(): hcpv_table = grst_df.groupby("FIPNUM").sum()[["OILPV", "HCPV"]] assert not hcpv_table.empty - # Print the HCPV table by FIPNUM: + # Create string with :term:`include file` contents for the HCPV table by FIPNUM: print() print((hcpv_table / 1e6).round(2)) diff --git a/tests/test_vfp.py b/tests/test_vfp.py index 0692a3d33..3d048a88a 100644 --- a/tests/test_vfp.py +++ b/tests/test_vfp.py @@ -3,7 +3,7 @@ import pandas as pd import pytest -from ecl2df import EclFiles, vfp +from res2df import ResdataFiles, vfp try: import opm # noqa @@ -991,23 +991,23 @@ @pytest.mark.parametrize("test_input, expected", VFPPROD_CASES) -def test_ecl2df_vfpprod(test_input, expected): - """Test ecl2df for VFPPROD""" - deck = EclFiles.str2deck(test_input) +def test_res2df_vfpprod(test_input, expected): + """Test res2df for VFPPROD""" + deck = ResdataFiles.str2deck(test_input) vfpdf = vfp.df(deck, "VFPPROD") pd.testing.assert_frame_equal(vfpdf, expected) @pytest.mark.parametrize("test_input, expected", VFPPROD_CASES) -def test_ecl2pyarrow_vfpprod(test_input, expected): - """Test ecl2pyarrow for VFPPROD""" - deck = EclFiles.str2deck(test_input) +def test_res2pyarrow_vfpprod(test_input, expected): + """Test res2pyarrow for VFPPROD""" + deck = ResdataFiles.str2deck(test_input) # Read first into pyarrow tables vfppa = vfp.pyarrow_tables(deck, "VFPPROD") # Convert pyarrow table to basic data types for VFPPROD vfpprod_data = vfp.pyarrow2basic_data(vfppa[0]) - # Convert basic data types to ecl2df DataFrame for VFPPROD + # Convert basic data types to res2df DataFrame for VFPPROD vfpdf = vfp.basic_data2df(vfpprod_data) # Check that all steps lead to desired end result @@ -1015,9 +1015,9 @@ def test_ecl2pyarrow_vfpprod(test_input, expected): @pytest.mark.parametrize("test_input, expected", [VFPPROD_CASES[0]]) -def test_df2ecl_vfpprod(test_input, expected): - """Test df2ecl for VFPPROD (case without default values)""" - ecl_vfpprod = vfp.df2ecl(expected, "VFPPROD") +def test_df2res_vfpprod(test_input, expected): + """Test df2res for VFPPROD (case without default values)""" + ecl_vfpprod = vfp.df2res(expected, "VFPPROD") assert ecl_vfpprod.strip() == test_input.strip() @@ -1025,30 +1025,30 @@ def test_df2ecl_vfpprod(test_input, expected): @pytest.mark.parametrize("test_input, expected", [VFPPROD_CASES[0]]) def test_pyarrow2ecl_vfpprod(test_input, expected): """Test pyarrow2ecl for VFPPROD (case without default values)""" - deck = EclFiles.str2deck(vfp.df2ecl(expected, "VFPPROD")) + deck = ResdataFiles.str2deck(vfp.df2res(expected, "VFPPROD")) vfpprod_df = vfp.df(deck, "VFPPROD") vfpprod_data = vfp.df2basic_data(vfpprod_df) vfpprod_pa = vfp.basic_data2pyarrow(vfpprod_data) vfpprod_data = vfp.pyarrow2basic_data(vfpprod_pa) vfpprod_df = vfp.basic_data2df(vfpprod_data) - vfpprod_ecl = vfp.df2ecl(vfpprod_df, "VFPPROD") + vfpprod_ecl = vfp.df2res(vfpprod_df, "VFPPROD") assert vfpprod_ecl.strip() == test_input.strip() @pytest.mark.parametrize("test_input, expected", VFPINJ_CASES) -def test_ecl2df_vfpinj(test_input, expected): - """Test ecl2df for VFPINJ""" - deck = EclFiles.str2deck(test_input) +def test_res2df_vfpinj(test_input, expected): + """Test res2df for VFPINJ""" + deck = ResdataFiles.str2deck(test_input) vfpdf = vfp.df(deck, "VFPINJ") pd.testing.assert_frame_equal(vfpdf, expected) @pytest.mark.parametrize("test_input, expected", [VFPINJ_CASES[0]]) -def test_df2ecl_vfpinj(test_input, expected): - """Test df2ecl for VFPINJ (case without default values)""" - ecl_vfpinj = vfp.df2ecl(expected, "VFPINJ") +def test_df2res_vfpinj(test_input, expected): + """Test df2res for VFPINJ (case without default values)""" + ecl_vfpinj = vfp.df2res(expected, "VFPINJ") assert ecl_vfpinj.strip() == test_input.strip() @@ -1056,21 +1056,21 @@ def test_df2ecl_vfpinj(test_input, expected): @pytest.mark.parametrize("test_input, expected", [VFPINJ_CASES[0]]) def test_pyarrow2ecl_vfpinj(test_input, expected): """Test pyarrow2ecl for VFPPROD (case without default values)""" - deck = EclFiles.str2deck(vfp.df2ecl(expected, "VFPINJ")) + deck = ResdataFiles.str2deck(vfp.df2res(expected, "VFPINJ")) vfpinj_df = vfp.df(deck, "VFPINJ") vfpinj_data = vfp.df2basic_data(vfpinj_df) vfpinj_pa = vfp.basic_data2pyarrow(vfpinj_data) vfpinj_data = vfp.pyarrow2basic_data(vfpinj_pa) vfpinj_df = vfp.basic_data2df(vfpinj_data) - vfpinj_ecl = vfp.df2ecl(vfpinj_df, "VFPINJ") + vfpinj_ecl = vfp.df2res(vfpinj_df, "VFPINJ") assert vfpinj_ecl.strip() == test_input.strip() @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2df_vfpprods(test_input, expected): - """Test ecl2df for files with multiple VFPPROD""" - deck = EclFiles.str2deck(test_input) +def test_res2df_vfpprods(test_input, expected): + """Test res2df for files with multiple VFPPROD""" + deck = ResdataFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPPROD") # Two VFPPROD curves in file corresponding to curves 0 and 1 @@ -1079,9 +1079,9 @@ def test_ecl2df_vfpprods(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2pyarrow_vfpprods(test_input, expected): - """Test ecl2df with pyarrow for files with multiple VFPPROD""" - deck = EclFiles.str2deck(test_input) +def test_res2pyarrow_vfpprods(test_input, expected): + """Test res2df with pyarrow for files with multiple VFPPROD""" + deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPPROD") # Two VFPPROD curves in file corresponding to curves 0 and 1 @@ -1092,9 +1092,9 @@ def test_ecl2pyarrow_vfpprods(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2df_vfpinjs(test_input, expected): - """Test ecl2df for files with multiple VFPINJ""" - deck = EclFiles.str2deck(test_input) +def test_res2df_vfpinjs(test_input, expected): + """Test res2df for files with multiple VFPINJ""" + deck = ResdataFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPINJ") # Two VFPINJ curves in file corresponding to curves 2 and 3 @@ -1104,8 +1104,8 @@ def test_ecl2df_vfpinjs(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_eclpyarrow_vfpinjs(test_input, expected): - """Test ecl2df for pyarrow for files with multiple VFPINJ""" - deck = EclFiles.str2deck(test_input) + """Test res2df for pyarrow for files with multiple VFPINJ""" + deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPINJ") # Two VFPINJ curves in file corresponding to curves 2 and 3 @@ -1116,9 +1116,9 @@ def test_eclpyarrow_vfpinjs(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2df_vfpprod_no(test_input, expected): - """Test ecl2df for files with multiple VFPPROD with vfp number argument""" - deck = EclFiles.str2deck(test_input) +def test_res2df_vfpprod_no(test_input, expected): + """Test res2df for files with multiple VFPPROD with vfp number argument""" + deck = ResdataFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPPROD", "2") # VFPPROD curve with VFP number 2 is curve 1 in file @@ -1126,11 +1126,11 @@ def test_ecl2df_vfpprod_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2pyarrow_vfpprod_no(test_input, expected): - """Test ecl2df for pyarrow for files with multiple +def test_res2pyarrow_vfpprod_no(test_input, expected): + """Test res2df for pyarrow for files with multiple VFPPROD with vfp number argument """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPPROD", "2") vfpprod_data = vfp.pyarrow2basic_data(vfppas[0]) vfpdf = vfp.basic_data2df(vfpprod_data) @@ -1140,9 +1140,9 @@ def test_ecl2pyarrow_vfpprod_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2df_vfpinj_no(test_input, expected): - """Test ecl2df for files with multiple VFPINJ with vfp number argument""" - deck = EclFiles.str2deck(test_input) +def test_res2df_vfpinj_no(test_input, expected): + """Test res2df for files with multiple VFPINJ with vfp number argument""" + deck = ResdataFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPINJ", "4") # VFPINJ curve with VFP number 4 is curve 3 in file @@ -1150,9 +1150,9 @@ def test_ecl2df_vfpinj_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2pyarrow_vfpinj_no(test_input, expected): - """Test ecl2df for pyarrow files with multiple VFPINJ with vfp number argument""" - deck = EclFiles.str2deck(test_input) +def test_res2pyarrow_vfpinj_no(test_input, expected): + """Test res2df for pyarrow files with multiple VFPINJ with vfp number argument""" + deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPINJ", "4") vfpinj_data = vfp.pyarrow2basic_data(vfppas[0]) @@ -1163,9 +1163,9 @@ def test_ecl2pyarrow_vfpinj_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2df_vfpprods_no(test_input, expected): - """Test ecl2df for files with multiple VFPPROD with vfp number argument as range""" - deck = EclFiles.str2deck(test_input) +def test_res2df_vfpprods_no(test_input, expected): + """Test res2df for files with multiple VFPPROD with vfp number argument as range""" + deck = ResdataFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPPROD", "[1:2]") # VFPPROD curves with VFP numbers 1 and 2 are curves 0 and 1 @@ -1174,11 +1174,11 @@ def test_ecl2df_vfpprods_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2pyarrow_vfpprods_no(test_input, expected): - """Test ecl2df for pyarrow for files with multiple VFPPROD +def test_res2pyarrow_vfpprods_no(test_input, expected): + """Test res2df for pyarrow for files with multiple VFPPROD with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPPROD", "[1:2]") # VFPPROD curves with VFP numbers 1 and 2 are curves 0 and 1 @@ -1189,11 +1189,11 @@ def test_ecl2pyarrow_vfpprods_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2df_vfpinjs_no(test_input, expected): - """Test ecl2df for files with multiple VFPINJ with vfp number +def test_res2df_vfpinjs_no(test_input, expected): + """Test res2df for files with multiple VFPINJ with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPINJ", "[3:4]") # VFPINJ curves with VFP numbers 3 and 4 are curves 2 and 3 @@ -1202,11 +1202,11 @@ def test_ecl2df_vfpinjs_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2pyarrow_vfpinjs_no(test_input, expected): - """Test ecl2df for pyararow for files with multiple VFPINJ with vfp +def test_res2pyarrow_vfpinjs_no(test_input, expected): + """Test res2df for pyararow for files with multiple VFPINJ with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPINJ", "[3:4]") # VFPINJ curves with VFP numbers 3 and 4 are curves 2 and 3 @@ -1218,10 +1218,10 @@ def test_ecl2pyarrow_vfpinjs_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_basic_data_vfpprods_no(test_input, expected): - """Test ecl2df basic_data reading for files with multiple VFPPROD + """Test res2df basic_data reading for files with multiple VFPPROD with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfps = vfp.basic_data(deck, "VFPPROD", "[1:2]") # VFPPROD curves with VFP numbers 1 and 2 are curves 0 and 1 @@ -1232,10 +1232,10 @@ def test_basic_data_vfpprods_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_basic_data_vfpinjs_no(test_input, expected): - """Test ecl2df basic_data reading for files with multiple VFPINJ with vfp + """Test res2df basic_data reading for files with multiple VFPINJ with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfps = vfp.basic_data(deck, "VFPINJ", "[3:4]") # VFPINJ curves with VFP numbers 3 and 4 are curves 2 and 3 @@ -1246,10 +1246,10 @@ def test_basic_data_vfpinjs_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_pyarrow2basic_data_vfpprods_no(test_input, expected): - """Test ecl2df pyarrow2basic_data for files with multiple VFPPROD + """Test res2df pyarrow2basic_data for files with multiple VFPPROD with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) pyarrow_vfps = vfp.pyarrow_tables(deck, "VFPPROD", "[1:2]") # VFPPROD curves with VFP numbers 1 and 2 are curves 0 and 1 @@ -1261,10 +1261,10 @@ def test_pyarrow2basic_data_vfpprods_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_pyarrow2basic_data_vfpinjs_no(test_input, expected): - """Test ecl2df pyarrow2basic_data for files with multiple VFPINJ with vfp + """Test res2df pyarrow2basic_data for files with multiple VFPINJ with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) pyarrow_vfps = vfp.pyarrow_tables(deck, "VFPINJ", "[3:4]") # VFPINJ curves with VFP numbers 3 and 4 are curves 2 and 3 @@ -1285,7 +1285,7 @@ def test_basic_data_key_exceptions_vfpprods(self, vfpprod_key, test_input, dummy """Test exceptions for basic data format (not containing all required keywords) for VFPPROD" """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfpprods = vfp.basic_data(deck, "VFPPROD") # Check if exception is raises if one key is missing @@ -1312,7 +1312,7 @@ def test_basic_data_array_dim_exceptions_vfpprods( """Test exceptions for basic data format (inconsistency in array dimensions) for VFPPROD" """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfpprods = vfp.basic_data(deck, "VFPPROD") # Check if exception is raises if array dimension is wrong @@ -1329,7 +1329,7 @@ def test_basic_data_dims_vfpprods(test_input, expected): """Test exceptions for dimensions consistency for basic data format (not containing all required keywords) for VFPPROD" """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfpprods = vfp.basic_data(deck, "VFPPROD") # Check if exception is raised if dimensions are wrong @@ -1352,7 +1352,7 @@ def test_basic_data_key_exceptions_vfpinjs(self, vfpinj_key, test_input, dummy): """Test exceptions for basic data format (not containing all required keywords) for VFPINJ" """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfpinjs = vfp.basic_data(deck, "VFPINJ") # Check if exception is raises if one key is missing @@ -1379,7 +1379,7 @@ def test_basic_data_array_dim_exceptions_vfpinjs( """Test exceptions for basic data format (inconsistency in array dimensions) for VFPINJ" """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfpinjs = vfp.basic_data(deck, "VFPINJ") # Check if exception is raises if array dimension if wrong diff --git a/tests/test_wcon.py b/tests/test_wcon.py index 7cb1fbe24..16a23dfe6 100644 --- a/tests/test_wcon.py +++ b/tests/test_wcon.py @@ -7,8 +7,8 @@ import pandas as pd import pytest -from ecl2df import ecl2csv, wcon -from ecl2df.eclfiles import EclFiles +from res2df import res2csv, wcon +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -24,8 +24,8 @@ def test_wcon2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(EIGHTCELLS) - wcondf = wcon.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(EIGHTCELLS) + wcondf = wcon.df(resdatafiles.get_deck()) assert not wcondf.empty assert "DATE" in wcondf # for all data @@ -41,7 +41,7 @@ def test_wconhist(): 'FOO' 0 1 / / """ - deck = EclFiles.str2deck(wconstr) + deck = ResdataFiles.str2deck(wconstr) wconhist_df = wcon.df(deck) pd.testing.assert_frame_equal( wconhist_df, @@ -74,7 +74,7 @@ def test_wconinjh(): 'FOO' 0 1 / / """ - deck = EclFiles.str2deck(wconstr) + deck = ResdataFiles.str2deck(wconstr) wconinjh_df = wcon.df(deck) pd.testing.assert_frame_equal( wconinjh_df, @@ -108,7 +108,7 @@ def test_wconinje(): 'FOO' 0 1 / / """ - deck = EclFiles.str2deck(wconstr) + deck = ResdataFiles.str2deck(wconstr) wconinje_df = wcon.df(deck) pd.testing.assert_frame_equal( wconinje_df, @@ -145,7 +145,7 @@ def test_wconprod(): 'FOO' 0 1 / / """ - deck = EclFiles.str2deck(wconstr) + deck = ResdataFiles.str2deck(wconstr) wconprod_df = wcon.df(deck) pd.testing.assert_frame_equal( wconprod_df, @@ -207,7 +207,7 @@ def test_tstep(): 'OP1' 3000 / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) wcondf = wcon.df(deck) dates = [str(x) for x in wcondf["DATE"].unique()] assert len(dates) == 3 @@ -219,8 +219,8 @@ def test_tstep(): def test_main_subparsers(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / ".TMP-wcondf.csv" - mocker.patch("sys.argv", ["ecl2csv", "wcon", EIGHTCELLS, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "wcon", EIGHTCELLS, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -230,7 +230,7 @@ def test_main_subparsers(tmp_path, mocker): def test_magic_stdout(): """Test that we can pipe the output into a dataframe""" result = subprocess.run( - ["ecl2csv", "wcon", "-v", "-o", "-", EIGHTCELLS], + ["res2csv", "wcon", "-v", "-o", "-", EIGHTCELLS], check=True, stdout=subprocess.PIPE, ) diff --git a/tests/test_wellcompletiondata.py b/tests/test_wellcompletiondata.py index 07101459e..70892880a 100644 --- a/tests/test_wellcompletiondata.py +++ b/tests/test_wellcompletiondata.py @@ -5,9 +5,9 @@ import pandas as pd import pytest -from ecl2df import common, compdat, wellcompletiondata -from ecl2df.eclfiles import EclFiles -from ecl2df.wellcompletiondata import ( +from res2df import common, compdat, wellcompletiondata +from res2df.resdatafiles import ResdataFiles +from res2df.wellcompletiondata import ( _aggregate_layer_to_zone, _df2pyarrow, _excl_well_startswith, @@ -34,7 +34,7 @@ def test_eightcells_with_wellconnstatus(): """Test the Eightcells dataset with the well connection status option activated (connection status extracted from summary data) """ - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) expected_dframe = pd.DataFrame( [ { @@ -48,7 +48,7 @@ def test_eightcells_with_wellconnstatus(): ) pd.testing.assert_frame_equal( wellcompletiondata.df( - eclfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=True + resdatafiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=True ), expected_dframe, check_dtype=False, @@ -58,7 +58,7 @@ def test_eightcells_with_wellconnstatus(): def test_eightcells_without_wellconnstatus(): """Test the Eightcells dataset with only the compdat export data (connection status extracted from parsing the schedule file)""" - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) expected_dframe = pd.DataFrame( [ { @@ -72,7 +72,7 @@ def test_eightcells_without_wellconnstatus(): ) pd.testing.assert_frame_equal( wellcompletiondata.df( - eclfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False + resdatafiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False ), expected_dframe, check_dtype=False, @@ -81,9 +81,9 @@ def test_eightcells_without_wellconnstatus(): def test_df2pyarrow(): """Test that dataframe is conserved using _df2pyarrow""" - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) df = wellcompletiondata.df( - eclfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False + resdatafiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False ) df["KH"] = df["KH"].astype(np.int32) pd.testing.assert_frame_equal(df, _df2pyarrow(df).to_pandas(), check_like=True) @@ -91,9 +91,9 @@ def test_df2pyarrow(): def test_metadata(): """Test that the KH column has metadata and that unit is mDm""" - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) df = wellcompletiondata.df( - eclfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False + resdatafiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False ) assert df.attrs["meta"] == {"KH": {"unit": "mDm"}} @@ -107,21 +107,21 @@ def test_empty_zonemap(): """Test empty zonemap and zonemap with layers that doesn't exist in the compdat table. Both returns an empty dataframe """ - eclfiles = EclFiles(EIGHTCELLS) - df = wellcompletiondata.df(eclfiles, zonemap={}, use_wellconnstatus=False) + resdatafiles = ResdataFiles(EIGHTCELLS) + df = wellcompletiondata.df(resdatafiles, zonemap={}, use_wellconnstatus=False) assert df.empty zonemap = {1000: "ZONE1", -1: "ZONE1"} - df = wellcompletiondata.df(eclfiles, zonemap=zonemap, use_wellconnstatus=False) + df = wellcompletiondata.df(resdatafiles, zonemap=zonemap, use_wellconnstatus=False) assert df.empty def test_zonemap_with_some_undefined_layers(): """Layers in the zonemap that don't exist in the compdat output will be ignored.""" - eclfiles = EclFiles(REEK) + resdatafiles = ResdataFiles(REEK) zonemap = {1: "ZONE1", 2: "ZONE1"} - df = wellcompletiondata.df(eclfiles, zonemap=zonemap, use_wellconnstatus=False) - compdat_df = compdat.df(eclfiles) + df = wellcompletiondata.df(resdatafiles, zonemap=zonemap, use_wellconnstatus=False) + compdat_df = compdat.df(resdatafiles) # Filter compdat on layer 1 and 2 compdat_df = compdat_df[compdat_df["K1"] <= 2] diff --git a/tests/test_wellconnstatus.py b/tests/test_wellconnstatus.py index 291e27b4e..4a62fa0dc 100644 --- a/tests/test_wellconnstatus.py +++ b/tests/test_wellconnstatus.py @@ -3,8 +3,8 @@ import pandas as pd import pytest -from ecl2df import wellconnstatus -from ecl2df.eclfiles import EclFiles +from res2df import wellconnstatus +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -23,15 +23,15 @@ def test_reek_dataset(): """Test Reek dataset. It contains no CPI data and should return an empty dataframe. """ - eclfiles = EclFiles(REEK) - wellconnstatus_df = wellconnstatus.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + wellconnstatus_df = wellconnstatus.df(resdatafiles) assert wellconnstatus_df.empty def test_eightcells_dataset(): """Test the Eightcells dataset which has CPI data""" - eclfiles = EclFiles(EIGHTCELLS) - wellconnstatus_df = wellconnstatus.df(eclfiles) + resdatafiles = ResdataFiles(EIGHTCELLS) + wellconnstatus_df = wellconnstatus.df(resdatafiles) expected_dframe = pd.DataFrame( [ { diff --git a/tests/test_welopen.py b/tests/test_welopen.py index df297645b..314677ed3 100644 --- a/tests/test_welopen.py +++ b/tests/test_welopen.py @@ -3,7 +3,7 @@ import pandas as pd import pytest -from ecl2df import EclFiles, compdat +from res2df import ResdataFiles, compdat try: # pylint: disable=unused-import @@ -930,7 +930,7 @@ @pytest.mark.parametrize("test_input, expected", WELOPEN_CASES) def test_welopen(test_input, expected): """Test with WELOPEN present""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) compdf = compdat.deck2dfs(deck)["COMPDAT"] columns_to_check = ["WELL", "I", "J", "K1", "K2", "OP/SH", "DATE"] @@ -1025,7 +1025,7 @@ def test_welopen(test_input, expected): 'IN2' 2 1 1 1 'OPEN' / / WELOPEN - -- In ecl2df, the WELOPEN is allowed to be before WLIST + -- In res2df, the WELOPEN is allowed to be before WLIST '*OP' 'SHUT' 0 0 0 / / WLIST @@ -1131,7 +1131,7 @@ def test_welopen(test_input, expected): ) def test_welopen_wlist(test_input, expected): """Test that WELOPEN can be used on well lists determined by WLIST""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) dfs = compdat.deck2dfs(deck) pd.testing.assert_frame_equal(dfs["COMPDAT"][expected.columns], expected) @@ -1139,7 +1139,7 @@ def test_welopen_wlist(test_input, expected): def test_welopen_df(): """Test that we can obtain WELOPEN information when it applies on well state, not on connections.""" - deck = EclFiles.str2deck( + deck = ResdataFiles.str2deck( """ DATES 1 JAN 2000 / @@ -1300,7 +1300,7 @@ def test_welopen_df(): id="complump_defaults", marks=pytest.mark.xfail( raises=ValueError, - match="Defaulted COMPLUMP coordinates are not supported in ecl2df", + match="Defaulted COMPLUMP coordinates are not supported in res2df", ), ), pytest.param( @@ -1481,8 +1481,8 @@ def test_welopen_df(): ], ) def test_welopen_complump(test_input, expected): - """Test the welopen_complump functionality through Eclipse decks""" - deck = EclFiles.str2deck(test_input) + """Test the welopen_complump functionality through .DATA files""" + deck = ResdataFiles.str2deck(test_input) dfs = compdat.deck2dfs(deck) pd.testing.assert_frame_equal(dfs["COMPDAT"][expected.columns], expected) diff --git a/tests/test_wlist.py b/tests/test_wlist.py index 88b1a1821..6f64aad96 100644 --- a/tests/test_wlist.py +++ b/tests/test_wlist.py @@ -3,7 +3,7 @@ import pandas as pd import pytest -from ecl2df import EclFiles, compdat +from res2df import ResdataFiles, compdat try: # pylint: disable=unused-import @@ -145,7 +145,7 @@ ) def test_parse_wlist(deckstr, expected_df): """Test basic parsing of WLIST keywords into a dataframe representation""" - deck = EclFiles.str2deck(deckstr) + deck = ResdataFiles.str2deck(deckstr) wlistdf = compdat.deck2dfs(deck)["WLIST"] pd.testing.assert_frame_equal(wlistdf, expected_df, check_like=True) diff --git a/tests/test_zonemap.py b/tests/test_zonemap.py index 32f6fd0fc..70a46ed2a 100644 --- a/tests/test_zonemap.py +++ b/tests/test_zonemap.py @@ -5,7 +5,7 @@ import pandas as pd import pytest -import ecl2df +import res2df TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -14,13 +14,13 @@ def test_stdzoneslyr(): """Test that we can read zones if the zonemap is in a standard location. - The eclfiles object defines what is the standard location for the file, while - the actual parsing is done in ecl2df.common.parse_lyrfile() and + The resdatafiles object defines what is the standard location for the file, while + the actual parsing is done in res2df.common.parse_lyrfile() and converted to zonemap in common.convert_lyrlist_to_zonemap() """ - eclfiles = ecl2df.EclFiles(REEK) + resdatafiles = res2df.ResdataFiles(REEK) - zonemap = eclfiles.get_zonemap() + zonemap = resdatafiles.get_zonemap() assert isinstance(zonemap, dict) assert zonemap[3] == "UpperReek" assert zonemap[10] == "MidReek" @@ -37,8 +37,8 @@ def test_stdzoneslyr(): def test_nonexistingzones(): """Test an Eclipse case with non-existing zonemap (i.e. no zonemap file in the standard location)""" - eclfiles = ecl2df.EclFiles(REEK) - zonemap = eclfiles.get_zonemap("foobar") + resdatafiles = res2df.ResdataFiles(REEK) + zonemap = resdatafiles.get_zonemap("foobar") # (we got a warning and an empty dict) assert not zonemap @@ -52,7 +52,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert ecl2df.common.parse_lyrfile(lyrfile) is None + assert res2df.common.parse_lyrfile(lyrfile) is None assert "Could not parse lyr file" in caplog.text assert "Failed on content: foo" in caplog.text @@ -64,7 +64,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert ecl2df.common.parse_lyrfile(lyrfile) is None + assert res2df.common.parse_lyrfile(lyrfile) is None assert "Failed on content: foo 1 2 3" in caplog.text lyrfile = tmp_path / "formations.lyr" @@ -74,7 +74,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert ecl2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.ResdataFiles(REEK).get_zonemap(str(lyrfile)) is None assert "From_layer higher than to_layer" in caplog.text lyrfile = tmp_path / "formations.lyr" @@ -85,7 +85,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert ecl2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.ResdataFiles(REEK).get_zonemap(str(lyrfile)) is None assert "Failed on content: foo 3- 4 #FFGGHH" in caplog.text lyrfile = tmp_path / "formations.lyr" @@ -96,7 +96,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert ecl2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.ResdataFiles(REEK).get_zonemap(str(lyrfile)) is None assert "Failed on content: foo 3- 4 bluez" in caplog.text lyrfile.write_text( @@ -105,7 +105,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert ecl2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.ResdataFiles(REEK).get_zonemap(str(lyrfile)) is None def test_lyrlist_format(tmp_path): @@ -123,7 +123,7 @@ def test_lyrlist_format(tmp_path): """, encoding="utf-8", ) - lyrlist = ecl2df.common.parse_lyrfile(lyrfile) + lyrlist = res2df.common.parse_lyrfile(lyrfile) assert lyrlist == [ {"name": "ZoneA", "from_layer": 1, "to_layer": 5, "color": "#FFE5F7"}, @@ -155,8 +155,8 @@ def test_convert_lyrlist_to_zonemap(tmp_path): """, encoding="utf-8", ) - lyrlist = ecl2df.common.parse_lyrfile(lyrfile) - zonemap = ecl2df.common.convert_lyrlist_to_zonemap(lyrlist) + lyrlist = res2df.common.parse_lyrfile(lyrfile) + zonemap = res2df.common.convert_lyrlist_to_zonemap(lyrlist) assert zonemap assert len(lyrlist) == 3 assert len(zonemap) == 20 @@ -176,8 +176,8 @@ def test_nonstandardzones(tmp_path): # Difficult quote parsing above, might not run in ResInsight. """ lyrfile.write_text(lyrfilecontent) - lyrlist = ecl2df.common.parse_lyrfile(lyrfile) - zonemap = ecl2df.common.convert_lyrlist_to_zonemap(lyrlist) + lyrlist = res2df.common.parse_lyrfile(lyrfile) + zonemap = res2df.common.convert_lyrlist_to_zonemap(lyrlist) assert 0 not in zonemap assert zonemap[1] == "Eiriksson" assert zonemap[10] == "Eiriksson" @@ -252,7 +252,7 @@ def test_nonstandardzones(tmp_path): def test_merge_zones(dframe, zonedict, zoneheader, kname, expected_df): """Test merging of zone information into a (mocked) grid dataframe""" pd.testing.assert_frame_equal( - ecl2df.common.merge_zones(dframe, zonedict, zoneheader, kname), + res2df.common.merge_zones(dframe, zonedict, zoneheader, kname), expected_df, check_like=True, ) @@ -264,8 +264,8 @@ def test_repeated_merge_zone(): dframe = pd.DataFrame([{"K1": 1, "ZONE": "upper"}]) pd.testing.assert_frame_equal( - ecl2df.common.merge_zones(dframe, {1: "upper"}, "ZONE"), dframe + res2df.common.merge_zones(dframe, {1: "upper"}, "ZONE"), dframe ) pd.testing.assert_frame_equal( - ecl2df.common.merge_zones(dframe, {1: "lower"}, "ZONE"), dframe + res2df.common.merge_zones(dframe, {1: "lower"}, "ZONE"), dframe )