1
+ name : FloPy benchmarks
2
+
3
+ on :
4
+ schedule :
5
+ - cron : ' 0 8 * * *' # run at 8 AM UTC (12 am PST)
6
+
7
+ jobs :
8
+ benchmark :
9
+ name : Benchmarks
10
+ runs-on : ${{ matrix.os }}
11
+ strategy :
12
+ fail-fast : false
13
+ matrix :
14
+ os : [ ubuntu-latest, macos-latest ]
15
+ python-version : [ 3.7, 3.8, 3.9, "3.10" ]
16
+ exclude :
17
+ # avoid shutil.copytree infinite recursion bug
18
+ # https://github.com/python/cpython/pull/17098
19
+ - python-version : ' 3.8.0'
20
+ include :
21
+ - os : ubuntu-latest
22
+ path : ~/.cache/pip
23
+ - os : macos-latest
24
+ path : ~/Library/Caches/pip
25
+ defaults :
26
+ run :
27
+ shell : bash
28
+ timeout-minutes : 90
29
+
30
+ steps :
31
+ - name : Checkout repo
32
+
33
+
34
+ - name : Cache Python
35
+ uses : actions/cache@v3
36
+ with :
37
+ path : ${{ matrix.path }}
38
+ key : ${{ matrix.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('setup.cfg') }}
39
+ restore-keys : |
40
+ ${{ matrix.os }}-${{ matrix.python-version }}-pip-
41
+
42
+ - name : Setup Python
43
+ uses : actions/setup-python@v4
44
+ with :
45
+ python-version : ${{ matrix.python-version }}
46
+
47
+ - name : Get branch name
48
+
49
+
50
+ - name : Install Python dependencies
51
+ run : |
52
+ python -m pip install --upgrade pip
53
+ pip install .
54
+ pip install ".[test, optional]"
55
+
56
+ - name : Install Modflow executables
57
+ uses : ./.github/actions/cache_exes
58
+ with :
59
+ path : ~/.local/bin
60
+ github_token : ${{ secrets.GITHUB_TOKEN }}
61
+
62
+ - name : Run benchmarks
63
+ working-directory : ./autotest
64
+ run : |
65
+ mkdir -p .benchmarks
66
+ pytest -v --durations=0 --benchmark-only --benchmark-json .benchmarks/${{ matrix.os }}_python${{ matrix.python-version }}.json --keep-failed=.failed
67
+ env :
68
+ GITHUB_TOKEN : ${{ secrets.GITHUB_TOKEN }}
69
+
70
+ - name : Upload failed benchmark artifact
71
+ uses : actions/upload-artifact@v2
72
+ if : failure()
73
+ with :
74
+ name : failed-benchmark-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }}
75
+ path : |
76
+ ./autotest/.failed/**
77
+
78
+ - name : Upload benchmark result artifact
79
+ uses : actions/upload-artifact@v2
80
+ with :
81
+ name : benchmarks-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }}
82
+ path : |
83
+ ./autotest/.benchmarks/**/*.json
84
+
85
+ benchmark_windows :
86
+ name : Benchmarks (Windows)
87
+ runs-on : windows-latest
88
+ strategy :
89
+ fail-fast : false
90
+ matrix :
91
+ python-version : [ 3.7, 3.8, 3.9, "3.10" ]
92
+ exclude :
93
+ # avoid shutil.copytree infinite recursion bug
94
+ # https://github.com/python/cpython/pull/17098
95
+ - python-version : ' 3.8.0'
96
+ defaults :
97
+ run :
98
+ shell : pwsh
99
+ timeout-minutes : 90
100
+
101
+ steps :
102
+ - name : Checkout repo
103
+
104
+
105
+ - name : Get branch name
106
+
107
+
108
+ - name : Cache Miniconda
109
+ uses : actions/cache@v3
110
+ with :
111
+ path : ~/conda_pkgs_dir
112
+ key : ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }}
113
+
114
+ # Standard python fails on windows without GDAL installation
115
+ # Using custom bash shell ("shell: bash -l {0}") with Miniconda
116
+ - name : Setup Miniconda
117
+ uses :
conda-incubator/[email protected]
118
+ with :
119
+ python-version : ${{ matrix.python-version }}
120
+ channels : conda-forge
121
+ auto-update-conda : true
122
+ activate-environment : flopy
123
+ use-only-tar-bz2 : true
124
+
125
+ - name : Install Python dependencies
126
+ run : |
127
+ conda env update --name flopy --file etc/environment.yml
128
+ python -m pip install --upgrade pip
129
+ pip install https://github.com/modflowpy/pymake/zipball/master
130
+ pip install xmipy
131
+ pip install .
132
+
133
+ - name : Install Modflow executables
134
+ uses : ./.github/actions/cache_exes
135
+ with :
136
+ path : C:\Users\runneradmin\.local\bin
137
+ github_token : ${{ secrets.GITHUB_TOKEN }}
138
+
139
+ - name : Run benchmarks
140
+ working-directory : ./autotest
141
+ run : |
142
+ md -Force .benchmarks
143
+ pytest -v --durations=0 --benchmark-only --benchmark-json .benchmarks/${{ runner.os }}_python${{ matrix.python-version }}.json --keep-failed=.failed
144
+ env :
145
+ GITHUB_TOKEN : ${{ secrets.GITHUB_TOKEN }}
146
+
147
+ - name : Upload failed benchmark artifact
148
+ uses : actions/upload-artifact@v2
149
+ if : failure()
150
+ with :
151
+ name : failed-benchmark-${{ runner.os }}-${{ matrix.python-version }}-${{ github.run_id }}
152
+ path : |
153
+ ./autotest/.failed/**
154
+
155
+ - name : Upload benchmark result artifact
156
+ uses : actions/upload-artifact@v2
157
+ with :
158
+ name : benchmarks-${{ runner.os }}-${{ matrix.python-version }}-${{ github.run_id }}
159
+ path : |
160
+ ./autotest/.benchmarks/**/*.json
161
+
162
+ post_benchmark :
163
+ needs :
164
+ - benchmark
165
+ - benchmark_windows
166
+ name : Process benchmark results
167
+ runs-on : ubuntu-latest
168
+ defaults :
169
+ run :
170
+ shell : bash
171
+ timeout-minutes : 10
172
+
173
+ steps :
174
+ - name : Checkout repo
175
+
176
+
177
+ - name : Cache Python
178
+ uses : actions/cache@v3
179
+ with :
180
+ path : ~/.cache/pip
181
+ key : ${{ runner.os }}-3.7-pip-${{ hashFiles('setup.cfg') }}
182
+ restore-keys : |
183
+ ${{ runner.os }}-3.7-pip-
184
+
185
+ - name : Setup Python
186
+ uses : actions/setup-python@v4
187
+ with :
188
+ python-version : 3.7
189
+
190
+ - name : Install Python dependencies
191
+ run : |
192
+ python -m pip install --upgrade pip
193
+ pip install numpy pandas matplotlib seaborn
194
+
195
+ - name : Download all artifacts
196
+ uses : actions/download-artifact@v3
197
+ with :
198
+ path : ./autotest/.benchmarks
199
+
200
+ - name : Process benchmark results
201
+ run : |
202
+ artifact_json=$(gh api -X GET -H "Accept: application/vnd.github+json" /repos/modflowpy/flopy/actions/artifacts)
203
+ get_artifact_ids="
204
+ import json
205
+ import sys
206
+ from os import linesep
207
+
208
+ artifacts = json.load(sys.stdin, strict=False)['artifacts']
209
+ artifacts = [a for a in artifacts if a['name'].startswith('benchmarks-') and a['name'].split('-')[-1].isdigit()]
210
+
211
+ print(linesep.join([str(a['id']) for a in artifacts]))
212
+ "
213
+ echo $artifact_json \
214
+ | python -c "$get_artifact_ids" \
215
+ | xargs -I@ bash -c "gh api -H 'Accept: application/vnd.github+json' /repos/modflowpy/flopy/actions/artifacts/@/zip >> ./autotest/.benchmarks/@.zip"
216
+ zipfiles=( ./autotest/.benchmarks/*.zip )
217
+ if (( ${#zipfiles[@]} )); then
218
+ unzip -o './autotest/.benchmarks/*.zip' -d ./autotest/.benchmarks
219
+ fi
220
+ python ./scripts/process_benchmarks.py ./autotest/.benchmarks ./autotest/.benchmarks
221
+ env :
222
+ ARTIFACTS : ${{steps.run_tests.outputs.artifact_ids}}
223
+ GH_TOKEN : ${{ secrets.GITHUB_TOKEN }}
224
+
225
+ - name : Upload benchmark results
226
+ uses : actions/upload-artifact@v2
227
+ with :
228
+ name : benchmarks-${{ github.run_id }}
229
+ path : |
230
+ ./autotest/.benchmarks/*.csv
231
+ ./autotest/.benchmarks/*.png
0 commit comments