Skip to content

Commit

Permalink
fix: CI, tests & modpathfile (#1495)
Browse files Browse the repository at this point in the history
* fix ci.yml -> commit.yml
* fix #1491: workaround intermittent macos CI matplotlib failures
* fix #1479: sort in child's ctor instead of _ModpathSeries.get_data()
* don't plt.show() in tests
* add comments to conftest.py
* give test_mt3d.py::test_mfnwt_CrnkNic more retries
* skip ex-gwtgwt-mt3dms-p10 mf6 example (per MODFLOW-USGS/modflow6#1008)
* rename release/ to scripts/
* move pull_request_prepare.py to scripts/
* add postprocess_benchmarks.py to scripts/
* separate CI workflows for benchmarks, examples and regression tests
* name benchmark CI artifacts benchmarks-<system>-python version>-<run ID>
* add CI job to post-process benchmarks (creates artifact benchmarks-<run ID>)
* add cross-platform CI action to cache modflow exes & invalidate on new release
* reenable PathlineFile.get_destination_pathline_data() benchmark
* don't upload coverage after smoke tests, benchmarks, regression tests and example tests
* upload coverage on PR as well as push (fix codecov bot comments)
* decrease coverage precision to 1 decimal place (avoid small deltas)
* update to codecov action v3
  • Loading branch information
wpbonelli committed Aug 22, 2022
1 parent c9e6f61 commit 58938b9
Show file tree
Hide file tree
Showing 25 changed files with 971 additions and 748 deletions.
108 changes: 108 additions & 0 deletions .github/actions/cache_exes/action.yml
@@ -0,0 +1,108 @@
name: Cache Modflow executables
description: 'Cache MODFLOW executables from the MODFLOW-USGS/executables repository'
inputs:
path:
description: 'The path to store the executables (e.g. a bin directory)'
required: true
default: 'bin'
github_token:
description: 'The GitHub API access token'
required: true
runs:
using: "composite"
steps:
- name: Make bin directory
if: runner.os != 'Windows'
shell: bash
run: |
mkdir -p ${{ inputs.path }}
- name: Make bin directory (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
md -Force ${{ inputs.path }}
- name: Check release
if: runner.os != 'Windows'
shell: bash
run: |
# get info for the executables repository's latest release
release_json=$(gh api -X GET -H "Accept: application/vnd.github+json" /repos/MODFLOW-USGS/executables/releases/latest)
# get asset ID of the release's metadata file, if one exists
get_asset_id="
import json
import sys
release = json.load(sys.stdin, strict=False)
metadata = next(iter([a for a in release['assets'] if a['name'] == 'code.json']), None)
print(dict(metadata)['id'] if metadata else '')
"
asset_id=$(echo "$release_json" | python -c "$get_asset_id")
# asset_id is empty if metadata file asset wasn't found
if [ ${#asset_id} -gt 0 ]; then
gh api -H "Accept: application/octet-stream" "/repos/MODFLOW-USGS/executables/releases/assets/$asset_id" >> executables.json
else
# give hashFiles an empty file to hash
touch executables.json
fi
env:
GH_TOKEN: ${{ inputs.github_token }}

- name: Check release (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
# get info for the executables repository's latest release
$release_json=(gh api -X GET -H "Accept: application/vnd.github+json" /repos/MODFLOW-USGS/executables/releases/latest)
# get asset ID of the release's metadata file, if one exists
$pattern="code.json"
$release=(echo $release_json | ConvertFrom-Json)
$asset_id=($release.assets | Where-Object {$_.name -match "$pattern"} | % {echo $_.id})
# asset_id is empty if metadata file asset wasn't found
if ($asset_id.Length -gt 0) {
gh api -H "Accept: application/octet-stream" "/repos/MODFLOW-USGS/executables/releases/assets/$asset_id" >> executables.json
} else {
# give hashFiles an empty file to hash
New-Item -Name "executables.json" -ItemType File
}
env:
GH_TOKEN: ${{ inputs.github_token }}

- name: Cache executables
id: cache_executables
uses: actions/cache@v3
with:
path: ${{ inputs.path }}
key: modflow-exes-${{ runner.os }}-${{ hashFiles('executables.json') }}

- name: Install executables
if: runner.os != 'Windows' && steps.cache_executables.outputs.cache-hit != 'true'
shell: bash
run: |
get-modflow ${{ inputs.path }}
env:
GITHUB_TOKEN: ${{ inputs.github_token }}

- name: Install executables (Windows)
if: runner.os == 'Windows' && steps.cache_executables.outputs.cache-hit != 'true'
shell: pwsh
run: |
get-modflow ${{ inputs.path }}
env:
GITHUB_TOKEN: ${{ inputs.github_token }}

- name: Add executables to path
if: runner.os != 'Windows'
shell: bash
run: |
echo ${{ inputs.path }} >> $GITHUB_PATH
- name: Add executables to path (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
echo ${{ inputs.path }} | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
231 changes: 231 additions & 0 deletions .github/workflows/benchmark.yml
@@ -0,0 +1,231 @@
name: FloPy benchmarks

on:
schedule:
- cron: '0 8 * * *' # run at 8 AM UTC (12 am PST)

jobs:
benchmark:
name: Benchmarks
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ ubuntu-latest, macos-latest ]
python-version: [ 3.7, 3.8, 3.9, "3.10" ]
exclude:
# avoid shutil.copytree infinite recursion bug
# https://github.com/python/cpython/pull/17098
- python-version: '3.8.0'
include:
- os: ubuntu-latest
path: ~/.cache/pip
- os: macos-latest
path: ~/Library/Caches/pip
defaults:
run:
shell: bash
timeout-minutes: 90

steps:
- name: Checkout repo
uses: actions/checkout@v2.3.4

- name: Cache Python
uses: actions/cache@v3
with:
path: ${{ matrix.path }}
key: ${{ matrix.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('setup.cfg') }}
restore-keys: |
${{ matrix.os }}-${{ matrix.python-version }}-pip-
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}

- name: Get branch name
uses: nelonoel/branch-name@v1.0.1

- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install .
pip install ".[test, optional]"
- name: Install Modflow executables
uses: ./.github/actions/cache_exes
with:
path: ~/.local/bin
github_token: ${{ secrets.GITHUB_TOKEN }}

- name: Run benchmarks
working-directory: ./autotest
run: |
mkdir -p .benchmarks
pytest -v --durations=0 --benchmark-only --benchmark-json .benchmarks/${{ matrix.os }}_python${{ matrix.python-version }}.json --keep-failed=.failed
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

- name: Upload failed benchmark artifact
uses: actions/upload-artifact@v2
if: failure()
with:
name: failed-benchmark-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }}
path: |
./autotest/.failed/**
- name: Upload benchmark result artifact
uses: actions/upload-artifact@v2
with:
name: benchmarks-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }}
path: |
./autotest/.benchmarks/**/*.json
benchmark_windows:
name: Benchmarks (Windows)
runs-on: windows-latest
strategy:
fail-fast: false
matrix:
python-version: [ 3.7, 3.8, 3.9, "3.10" ]
exclude:
# avoid shutil.copytree infinite recursion bug
# https://github.com/python/cpython/pull/17098
- python-version: '3.8.0'
defaults:
run:
shell: pwsh
timeout-minutes: 90

steps:
- name: Checkout repo
uses: actions/checkout@v2.3.4

- name: Get branch name
uses: nelonoel/branch-name@v1.0.1

- name: Cache Miniconda
uses: actions/cache@v3
with:
path: ~/conda_pkgs_dir
key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }}

# Standard python fails on windows without GDAL installation
# Using custom bash shell ("shell: bash -l {0}") with Miniconda
- name: Setup Miniconda
uses: conda-incubator/setup-miniconda@v2.1.1
with:
python-version: ${{ matrix.python-version }}
channels: conda-forge
auto-update-conda: true
activate-environment: flopy
use-only-tar-bz2: true

- name: Install Python dependencies
run: |
conda env update --name flopy --file etc/environment.yml
python -m pip install --upgrade pip
pip install https://github.com/modflowpy/pymake/zipball/master
pip install xmipy
pip install .
- name: Install Modflow executables
uses: ./.github/actions/cache_exes
with:
path: C:\Users\runneradmin\.local\bin
github_token: ${{ secrets.GITHUB_TOKEN }}

- name: Run benchmarks
working-directory: ./autotest
run: |
md -Force .benchmarks
pytest -v --durations=0 --benchmark-only --benchmark-json .benchmarks/${{ runner.os }}_python${{ matrix.python-version }}.json --keep-failed=.failed
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

- name: Upload failed benchmark artifact
uses: actions/upload-artifact@v2
if: failure()
with:
name: failed-benchmark-${{ runner.os }}-${{ matrix.python-version }}-${{ github.run_id }}
path: |
./autotest/.failed/**
- name: Upload benchmark result artifact
uses: actions/upload-artifact@v2
with:
name: benchmarks-${{ runner.os }}-${{ matrix.python-version }}-${{ github.run_id }}
path: |
./autotest/.benchmarks/**/*.json
post_benchmark:
needs:
- benchmark
- benchmark_windows
name: Process benchmark results
runs-on: ubuntu-latest
defaults:
run:
shell: bash
timeout-minutes: 10

steps:
- name: Checkout repo
uses: actions/checkout@v2.3.4

- name: Cache Python
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-3.7-pip-${{ hashFiles('setup.cfg') }}
restore-keys: |
${{ runner.os }}-3.7-pip-
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: 3.7

- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install numpy pandas matplotlib seaborn
- name: Download all artifacts
uses: actions/download-artifact@v3
with:
path: ./autotest/.benchmarks

- name: Process benchmark results
run: |
artifact_json=$(gh api -X GET -H "Accept: application/vnd.github+json" /repos/modflowpy/flopy/actions/artifacts)
get_artifact_ids="
import json
import sys
from os import linesep
artifacts = json.load(sys.stdin, strict=False)['artifacts']
artifacts = [a for a in artifacts if a['name'].startswith('benchmarks-') and a['name'].split('-')[-1].isdigit()]
print(linesep.join([str(a['id']) for a in artifacts]))
"
echo $artifact_json \
| python -c "$get_artifact_ids" \
| xargs -I@ bash -c "gh api -H 'Accept: application/vnd.github+json' /repos/modflowpy/flopy/actions/artifacts/@/zip >> ./autotest/.benchmarks/@.zip"
zipfiles=( ./autotest/.benchmarks/*.zip )
if (( ${#zipfiles[@]} )); then
unzip -o './autotest/.benchmarks/*.zip' -d ./autotest/.benchmarks
fi
python ./scripts/process_benchmarks.py ./autotest/.benchmarks ./autotest/.benchmarks
env:
ARTIFACTS: ${{steps.run_tests.outputs.artifact_ids}}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

- name: Upload benchmark results
uses: actions/upload-artifact@v2
with:
name: benchmarks-${{ github.run_id }}
path: |
./autotest/.benchmarks/*.csv
./autotest/.benchmarks/*.png

0 comments on commit 58938b9

Please sign in to comment.