Skip to content

Commit 58938b9

Browse files
authored
fix: CI, tests & modpathfile (#1495)
* fix ci.yml -> commit.yml * fix #1491: workaround intermittent macos CI matplotlib failures * fix #1479: sort in child's ctor instead of _ModpathSeries.get_data() * don't plt.show() in tests * add comments to conftest.py * give test_mt3d.py::test_mfnwt_CrnkNic more retries * skip ex-gwtgwt-mt3dms-p10 mf6 example (per MODFLOW-ORG/modflow6#1008) * rename release/ to scripts/ * move pull_request_prepare.py to scripts/ * add postprocess_benchmarks.py to scripts/ * separate CI workflows for benchmarks, examples and regression tests * name benchmark CI artifacts benchmarks-<system>-python version>-<run ID> * add CI job to post-process benchmarks (creates artifact benchmarks-<run ID>) * add cross-platform CI action to cache modflow exes & invalidate on new release * reenable PathlineFile.get_destination_pathline_data() benchmark * don't upload coverage after smoke tests, benchmarks, regression tests and example tests * upload coverage on PR as well as push (fix codecov bot comments) * decrease coverage precision to 1 decimal place (avoid small deltas) * update to codecov action v3
1 parent c9e6f61 commit 58938b9

25 files changed

+971
-748
lines changed

.github/actions/cache_exes/action.yml

+108
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
name: Cache Modflow executables
2+
description: 'Cache MODFLOW executables from the MODFLOW-USGS/executables repository'
3+
inputs:
4+
path:
5+
description: 'The path to store the executables (e.g. a bin directory)'
6+
required: true
7+
default: 'bin'
8+
github_token:
9+
description: 'The GitHub API access token'
10+
required: true
11+
runs:
12+
using: "composite"
13+
steps:
14+
- name: Make bin directory
15+
if: runner.os != 'Windows'
16+
shell: bash
17+
run: |
18+
mkdir -p ${{ inputs.path }}
19+
20+
- name: Make bin directory (Windows)
21+
if: runner.os == 'Windows'
22+
shell: pwsh
23+
run: |
24+
md -Force ${{ inputs.path }}
25+
26+
- name: Check release
27+
if: runner.os != 'Windows'
28+
shell: bash
29+
run: |
30+
# get info for the executables repository's latest release
31+
release_json=$(gh api -X GET -H "Accept: application/vnd.github+json" /repos/MODFLOW-USGS/executables/releases/latest)
32+
33+
# get asset ID of the release's metadata file, if one exists
34+
get_asset_id="
35+
import json
36+
import sys
37+
release = json.load(sys.stdin, strict=False)
38+
metadata = next(iter([a for a in release['assets'] if a['name'] == 'code.json']), None)
39+
print(dict(metadata)['id'] if metadata else '')
40+
"
41+
asset_id=$(echo "$release_json" | python -c "$get_asset_id")
42+
43+
# asset_id is empty if metadata file asset wasn't found
44+
if [ ${#asset_id} -gt 0 ]; then
45+
gh api -H "Accept: application/octet-stream" "/repos/MODFLOW-USGS/executables/releases/assets/$asset_id" >> executables.json
46+
else
47+
# give hashFiles an empty file to hash
48+
touch executables.json
49+
fi
50+
env:
51+
GH_TOKEN: ${{ inputs.github_token }}
52+
53+
- name: Check release (Windows)
54+
if: runner.os == 'Windows'
55+
shell: pwsh
56+
run: |
57+
# get info for the executables repository's latest release
58+
$release_json=(gh api -X GET -H "Accept: application/vnd.github+json" /repos/MODFLOW-USGS/executables/releases/latest)
59+
60+
# get asset ID of the release's metadata file, if one exists
61+
$pattern="code.json"
62+
$release=(echo $release_json | ConvertFrom-Json)
63+
$asset_id=($release.assets | Where-Object {$_.name -match "$pattern"} | % {echo $_.id})
64+
65+
# asset_id is empty if metadata file asset wasn't found
66+
if ($asset_id.Length -gt 0) {
67+
gh api -H "Accept: application/octet-stream" "/repos/MODFLOW-USGS/executables/releases/assets/$asset_id" >> executables.json
68+
} else {
69+
# give hashFiles an empty file to hash
70+
New-Item -Name "executables.json" -ItemType File
71+
}
72+
env:
73+
GH_TOKEN: ${{ inputs.github_token }}
74+
75+
- name: Cache executables
76+
id: cache_executables
77+
uses: actions/cache@v3
78+
with:
79+
path: ${{ inputs.path }}
80+
key: modflow-exes-${{ runner.os }}-${{ hashFiles('executables.json') }}
81+
82+
- name: Install executables
83+
if: runner.os != 'Windows' && steps.cache_executables.outputs.cache-hit != 'true'
84+
shell: bash
85+
run: |
86+
get-modflow ${{ inputs.path }}
87+
env:
88+
GITHUB_TOKEN: ${{ inputs.github_token }}
89+
90+
- name: Install executables (Windows)
91+
if: runner.os == 'Windows' && steps.cache_executables.outputs.cache-hit != 'true'
92+
shell: pwsh
93+
run: |
94+
get-modflow ${{ inputs.path }}
95+
env:
96+
GITHUB_TOKEN: ${{ inputs.github_token }}
97+
98+
- name: Add executables to path
99+
if: runner.os != 'Windows'
100+
shell: bash
101+
run: |
102+
echo ${{ inputs.path }} >> $GITHUB_PATH
103+
104+
- name: Add executables to path (Windows)
105+
if: runner.os == 'Windows'
106+
shell: pwsh
107+
run: |
108+
echo ${{ inputs.path }} | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append

.github/workflows/benchmark.yml

+231
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,231 @@
1+
name: FloPy benchmarks
2+
3+
on:
4+
schedule:
5+
- cron: '0 8 * * *' # run at 8 AM UTC (12 am PST)
6+
7+
jobs:
8+
benchmark:
9+
name: Benchmarks
10+
runs-on: ${{ matrix.os }}
11+
strategy:
12+
fail-fast: false
13+
matrix:
14+
os: [ ubuntu-latest, macos-latest ]
15+
python-version: [ 3.7, 3.8, 3.9, "3.10" ]
16+
exclude:
17+
# avoid shutil.copytree infinite recursion bug
18+
# https://github.com/python/cpython/pull/17098
19+
- python-version: '3.8.0'
20+
include:
21+
- os: ubuntu-latest
22+
path: ~/.cache/pip
23+
- os: macos-latest
24+
path: ~/Library/Caches/pip
25+
defaults:
26+
run:
27+
shell: bash
28+
timeout-minutes: 90
29+
30+
steps:
31+
- name: Checkout repo
32+
uses: actions/[email protected]
33+
34+
- name: Cache Python
35+
uses: actions/cache@v3
36+
with:
37+
path: ${{ matrix.path }}
38+
key: ${{ matrix.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('setup.cfg') }}
39+
restore-keys: |
40+
${{ matrix.os }}-${{ matrix.python-version }}-pip-
41+
42+
- name: Setup Python
43+
uses: actions/setup-python@v4
44+
with:
45+
python-version: ${{ matrix.python-version }}
46+
47+
- name: Get branch name
48+
uses: nelonoel/[email protected]
49+
50+
- name: Install Python dependencies
51+
run: |
52+
python -m pip install --upgrade pip
53+
pip install .
54+
pip install ".[test, optional]"
55+
56+
- name: Install Modflow executables
57+
uses: ./.github/actions/cache_exes
58+
with:
59+
path: ~/.local/bin
60+
github_token: ${{ secrets.GITHUB_TOKEN }}
61+
62+
- name: Run benchmarks
63+
working-directory: ./autotest
64+
run: |
65+
mkdir -p .benchmarks
66+
pytest -v --durations=0 --benchmark-only --benchmark-json .benchmarks/${{ matrix.os }}_python${{ matrix.python-version }}.json --keep-failed=.failed
67+
env:
68+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
69+
70+
- name: Upload failed benchmark artifact
71+
uses: actions/upload-artifact@v2
72+
if: failure()
73+
with:
74+
name: failed-benchmark-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }}
75+
path: |
76+
./autotest/.failed/**
77+
78+
- name: Upload benchmark result artifact
79+
uses: actions/upload-artifact@v2
80+
with:
81+
name: benchmarks-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }}
82+
path: |
83+
./autotest/.benchmarks/**/*.json
84+
85+
benchmark_windows:
86+
name: Benchmarks (Windows)
87+
runs-on: windows-latest
88+
strategy:
89+
fail-fast: false
90+
matrix:
91+
python-version: [ 3.7, 3.8, 3.9, "3.10" ]
92+
exclude:
93+
# avoid shutil.copytree infinite recursion bug
94+
# https://github.com/python/cpython/pull/17098
95+
- python-version: '3.8.0'
96+
defaults:
97+
run:
98+
shell: pwsh
99+
timeout-minutes: 90
100+
101+
steps:
102+
- name: Checkout repo
103+
uses: actions/[email protected]
104+
105+
- name: Get branch name
106+
uses: nelonoel/[email protected]
107+
108+
- name: Cache Miniconda
109+
uses: actions/cache@v3
110+
with:
111+
path: ~/conda_pkgs_dir
112+
key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }}
113+
114+
# Standard python fails on windows without GDAL installation
115+
# Using custom bash shell ("shell: bash -l {0}") with Miniconda
116+
- name: Setup Miniconda
117+
uses: conda-incubator/[email protected]
118+
with:
119+
python-version: ${{ matrix.python-version }}
120+
channels: conda-forge
121+
auto-update-conda: true
122+
activate-environment: flopy
123+
use-only-tar-bz2: true
124+
125+
- name: Install Python dependencies
126+
run: |
127+
conda env update --name flopy --file etc/environment.yml
128+
python -m pip install --upgrade pip
129+
pip install https://github.com/modflowpy/pymake/zipball/master
130+
pip install xmipy
131+
pip install .
132+
133+
- name: Install Modflow executables
134+
uses: ./.github/actions/cache_exes
135+
with:
136+
path: C:\Users\runneradmin\.local\bin
137+
github_token: ${{ secrets.GITHUB_TOKEN }}
138+
139+
- name: Run benchmarks
140+
working-directory: ./autotest
141+
run: |
142+
md -Force .benchmarks
143+
pytest -v --durations=0 --benchmark-only --benchmark-json .benchmarks/${{ runner.os }}_python${{ matrix.python-version }}.json --keep-failed=.failed
144+
env:
145+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
146+
147+
- name: Upload failed benchmark artifact
148+
uses: actions/upload-artifact@v2
149+
if: failure()
150+
with:
151+
name: failed-benchmark-${{ runner.os }}-${{ matrix.python-version }}-${{ github.run_id }}
152+
path: |
153+
./autotest/.failed/**
154+
155+
- name: Upload benchmark result artifact
156+
uses: actions/upload-artifact@v2
157+
with:
158+
name: benchmarks-${{ runner.os }}-${{ matrix.python-version }}-${{ github.run_id }}
159+
path: |
160+
./autotest/.benchmarks/**/*.json
161+
162+
post_benchmark:
163+
needs:
164+
- benchmark
165+
- benchmark_windows
166+
name: Process benchmark results
167+
runs-on: ubuntu-latest
168+
defaults:
169+
run:
170+
shell: bash
171+
timeout-minutes: 10
172+
173+
steps:
174+
- name: Checkout repo
175+
uses: actions/[email protected]
176+
177+
- name: Cache Python
178+
uses: actions/cache@v3
179+
with:
180+
path: ~/.cache/pip
181+
key: ${{ runner.os }}-3.7-pip-${{ hashFiles('setup.cfg') }}
182+
restore-keys: |
183+
${{ runner.os }}-3.7-pip-
184+
185+
- name: Setup Python
186+
uses: actions/setup-python@v4
187+
with:
188+
python-version: 3.7
189+
190+
- name: Install Python dependencies
191+
run: |
192+
python -m pip install --upgrade pip
193+
pip install numpy pandas matplotlib seaborn
194+
195+
- name: Download all artifacts
196+
uses: actions/download-artifact@v3
197+
with:
198+
path: ./autotest/.benchmarks
199+
200+
- name: Process benchmark results
201+
run: |
202+
artifact_json=$(gh api -X GET -H "Accept: application/vnd.github+json" /repos/modflowpy/flopy/actions/artifacts)
203+
get_artifact_ids="
204+
import json
205+
import sys
206+
from os import linesep
207+
208+
artifacts = json.load(sys.stdin, strict=False)['artifacts']
209+
artifacts = [a for a in artifacts if a['name'].startswith('benchmarks-') and a['name'].split('-')[-1].isdigit()]
210+
211+
print(linesep.join([str(a['id']) for a in artifacts]))
212+
"
213+
echo $artifact_json \
214+
| python -c "$get_artifact_ids" \
215+
| xargs -I@ bash -c "gh api -H 'Accept: application/vnd.github+json' /repos/modflowpy/flopy/actions/artifacts/@/zip >> ./autotest/.benchmarks/@.zip"
216+
zipfiles=( ./autotest/.benchmarks/*.zip )
217+
if (( ${#zipfiles[@]} )); then
218+
unzip -o './autotest/.benchmarks/*.zip' -d ./autotest/.benchmarks
219+
fi
220+
python ./scripts/process_benchmarks.py ./autotest/.benchmarks ./autotest/.benchmarks
221+
env:
222+
ARTIFACTS: ${{steps.run_tests.outputs.artifact_ids}}
223+
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
224+
225+
- name: Upload benchmark results
226+
uses: actions/upload-artifact@v2
227+
with:
228+
name: benchmarks-${{ github.run_id }}
229+
path: |
230+
./autotest/.benchmarks/*.csv
231+
./autotest/.benchmarks/*.png

0 commit comments

Comments
 (0)