Skip to content

Commit 20ea9e5

Browse files
committed
laaber + schultz's method of estimating slowdown
1 parent 8b3c84a commit 20ea9e5

20 files changed

+916
-4660
lines changed
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
data/
2+
out/
Lines changed: 277 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,277 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"This notebook explores variability in hail's python (macro)-benchmarks when\n",
8+
"said benchmarks are executed on the hail batch service. The analyses within \n",
9+
"are based off the methods proposed in [1], albeit slightly modified for long\n",
10+
"running benchmarks. The goals of these analyses are\n",
11+
"\n",
12+
"- to determine if we can detect slowdowns of 5% or less reliably when running\n",
13+
" benchmarks on hail batch.\n",
14+
"- to identify configurations (number of batch jobs x iterations) that allow us\n",
15+
" to detect slowdowns efficiently (ie without excesssive time and money).\n",
16+
"\n",
17+
"[1] Laaber et al., Software Microbenchmarking in the Cloud.How Bad is it Really?\n",
18+
" https://dl.acm.org/doi/10.1007/s10664-019-09681-1"
19+
]
20+
},
21+
{
22+
"cell_type": "code",
23+
"execution_count": null,
24+
"metadata": {},
25+
"outputs": [],
26+
"source": [
27+
"from pathlib import Path\n",
28+
"\n",
29+
"from benchmark.tools.impex import import_timings\n",
30+
"from benchmark.tools.plotting import plot_mean_time_per_instance, plot_trial_against_time\n",
31+
"from benchmark.tools.statistics import (\n",
32+
" bootstrap_mean_confidence_interval,\n",
33+
" laaber_mds,\n",
34+
" schultz_mds,\n",
35+
" variability,\n",
36+
")\n",
37+
"from IPython.display import clear_output\n",
38+
"from plotly.io import renderers\n",
39+
"\n",
40+
"import hail as hl\n",
41+
"\n",
42+
"renderers.default = 'notebook_connected'"
43+
]
44+
},
45+
{
46+
"cell_type": "code",
47+
"execution_count": null,
48+
"metadata": {},
49+
"outputs": [],
50+
"source": [
51+
"hl.init(backend='spark', idempotent=True, local_tmpdir='/tmp/mds')\n",
52+
"hl._set_flags(use_new_shuffle='1', lower='1')"
53+
]
54+
},
55+
{
56+
"cell_type": "code",
57+
"execution_count": null,
58+
"metadata": {},
59+
"outputs": [],
60+
"source": [
61+
"# Import benchmark data\n",
62+
"# ---------------------\n",
63+
"#\n",
64+
"# benchmarks under `hail/python/benchmarks` are executed with a custom pytest\n",
65+
"# plugin and their results are output as json lines (.jsonl).\n",
66+
"# Unscrupulously, we use hail to analyse itself.\n",
67+
"\n",
68+
"ht = import_timings(Path('data/1k.jsonl'))\n",
69+
"ht = ht.checkpoint('out/imported.ht', overwrite=True)\n",
70+
"benchmarks = ht.aggregate(hl.agg.collect_as_set(ht.name))\n",
71+
"print(*benchmarks, sep='\\n')"
72+
]
73+
},
74+
{
75+
"cell_type": "code",
76+
"execution_count": null,
77+
"metadata": {},
78+
"outputs": [],
79+
"source": [
80+
"t = ht\n",
81+
"t = t.filter(hl.len(t.instances) == 60)\n",
82+
"names = t.aggregate(hl.array(hl.agg.collect_as_set(t.name)))\n",
83+
"print(*names, sep='\\n')"
84+
]
85+
},
86+
{
87+
"cell_type": "code",
88+
"execution_count": null,
89+
"metadata": {},
90+
"outputs": [],
91+
"source": [
92+
"# Plotting the time vs iteration for all instances provides a visual way of\n",
93+
"# identifying the number of burn-in iteration required to reach a steady-state.\n",
94+
"# Note that a steady state is never reached in some cases.\n",
95+
"\n",
96+
"for fig in plot_trial_against_time(ht, names=names):\n",
97+
" clear_output(wait=True)\n",
98+
" print(fig.labels.title)\n",
99+
" fig.show()\n",
100+
" input()"
101+
]
102+
},
103+
{
104+
"cell_type": "code",
105+
"execution_count": null,
106+
"metadata": {},
107+
"outputs": [],
108+
"source": [
109+
"# This is an iterative process. Select the minimum number of burn-in iterations\n",
110+
"# required for each benchmark. Replot and verify that the graph is more-or-less\n",
111+
"# flat. This may not be possible in all cases.\n",
112+
"\n",
113+
"\n",
114+
"def filter_burn_in_iterations(ht: hl.Table) -> hl.Table:\n",
115+
" ht = ht.annotate_globals(\n",
116+
" first_stable_index={\n",
117+
" 'benchmark_join_partitions_table[100-10]': 15,\n",
118+
" 'benchmark_union_partitions_table[10-10]': 4,\n",
119+
" 'benchmark_join_partitions_table[1000-1000]': 15,\n",
120+
" 'benchmark_write_range_table[10000000-1000]': 5,\n",
121+
" 'benchmark_matrix_table_array_arithmetic': 15,\n",
122+
" 'benchmark_table_aggregate_array_sum': 5,\n",
123+
" 'benchmark_matrix_table_cols_show': 10,\n",
124+
" 'benchmark_pc_relate': hl.missing(hl.tint),\n",
125+
" 'benchmark_write_profile_mt': 20,\n",
126+
" 'benchmark_table_aggregate_approx_cdf': 28,\n",
127+
" 'benchmark_table_aggregate_counter': 12,\n",
128+
" 'benchmark_table_show': 10,\n",
129+
" 'benchmark_export_range_matrix_table_entry_field_p100': 5,\n",
130+
" 'benchmark_group_by_collect_per_row': 8,\n",
131+
"\n",
132+
" 'benchmark_export_range_matrix_table_row_p100': 20,\n",
133+
" 'benchmark_import_gvcf_force_count': 10,\n",
134+
" 'benchmark_matrix_table_take_col': 30,\n",
135+
" 'benchmark_ndarray_matmul_int64': 23,\n",
136+
" 'benchmark_sample_qc': 14,\n",
137+
" 'benchmark_shuffle_key_rows_by_mt': 10,\n",
138+
" 'benchmark_union_partitions_table[100-100]': 40,\n",
139+
" },\n",
140+
" )\n",
141+
"\n",
142+
" return ht.select(\n",
143+
" instances=ht.instances.map(\n",
144+
" lambda instance: instance.annotate(\n",
145+
" trials=(instance.trials.filter(lambda t: t.iteration >= ht.first_stable_index[ht.name]))\n",
146+
" )\n",
147+
" ),\n",
148+
" )\n",
149+
"\n",
150+
"\n",
151+
"ht = filter_burn_in_iterations(ht)\n",
152+
"plot_trial_against_time(ht)"
153+
]
154+
},
155+
{
156+
"cell_type": "code",
157+
"execution_count": null,
158+
"metadata": {},
159+
"outputs": [],
160+
"source": [
161+
"# As a final step of cleaning, we'll filter out trials that differ by some\n",
162+
"# multiplier of the median for each instance\n",
163+
"\n",
164+
"\n",
165+
"def filter_outliers(ht: hl.Table, factor: hl.Float64Expression) -> hl.Table:\n",
166+
" # Filter out failures and\n",
167+
" return ht.select(\n",
168+
" instances=ht.instances.map(\n",
169+
" lambda instance: instance.annotate(\n",
170+
" trials=hl.bind(\n",
171+
" lambda median: instance.trials.filter(\n",
172+
" lambda t: hl.max([t.time, median]) / hl.min([t.time, median]) < factor\n",
173+
" ),\n",
174+
" hl.median(instance.trials.map(lambda t: t.time)),\n",
175+
" )\n",
176+
" ),\n",
177+
" ),\n",
178+
" )\n",
179+
"\n",
180+
"\n",
181+
"ht = filter_outliers(ht, hl.float64(10))\n",
182+
"plot_trial_against_time(ht)"
183+
]
184+
},
185+
{
186+
"cell_type": "code",
187+
"execution_count": null,
188+
"metadata": {},
189+
"outputs": [],
190+
"source": [
191+
"# These plots show the mean time per instance. This provides a visual way of\n",
192+
"# identifying differences in instance type if there are multiple distinct layers\n",
193+
"\n",
194+
"plot_mean_time_per_instance(ht)"
195+
]
196+
},
197+
{
198+
"cell_type": "code",
199+
"execution_count": null,
200+
"metadata": {},
201+
"outputs": [],
202+
"source": [
203+
"ht = ht.select(instances=ht.instances.trials.time).checkpoint('out/pruned.ht', overwrite=True)"
204+
]
205+
},
206+
{
207+
"cell_type": "code",
208+
"execution_count": null,
209+
"metadata": {},
210+
"outputs": [],
211+
"source": [
212+
"# laaber et al. section 4\n",
213+
"\n",
214+
"variability(ht).show()"
215+
]
216+
},
217+
{
218+
"cell_type": "code",
219+
"execution_count": null,
220+
"metadata": {},
221+
"outputs": [],
222+
"source": [
223+
"# laaber et al. section 5 - boostrapping confidence intervals of the mean\n",
224+
"\n",
225+
"bootstrap_mean_confidence_interval(ht, 1000, 0.95).show()"
226+
]
227+
},
228+
{
229+
"cell_type": "code",
230+
"execution_count": null,
231+
"metadata": {},
232+
"outputs": [],
233+
"source": [
234+
"# Laaber et al - Minimal-Detectable Slowdown\n",
235+
"\n",
236+
"laaber = laaber_mds(ht).checkpoint('out/laaber-mds.ht', overwrite=True)\n",
237+
"schultz = schultz_mds(ht).checkpoint('out/schultz-mds.ht', overwrite=True)"
238+
]
239+
},
240+
{
241+
"cell_type": "code",
242+
"execution_count": null,
243+
"metadata": {
244+
"slideshow": {
245+
"slide_type": "fragment"
246+
}
247+
},
248+
"outputs": [],
249+
"source": [
250+
"\n",
251+
"mds = laaber.select(laaber=laaber.row_value, schultz=schultz[laaber.key])\n",
252+
"mds.show(100_000)"
253+
]
254+
}
255+
],
256+
"metadata": {
257+
"kernelspec": {
258+
"display_name": ".venv",
259+
"language": "python",
260+
"name": "python3"
261+
},
262+
"language_info": {
263+
"codemirror_mode": {
264+
"name": "ipython",
265+
"version": 3
266+
},
267+
"file_extension": ".py",
268+
"mimetype": "text/x-python",
269+
"name": "python",
270+
"nbconvert_exporter": "python",
271+
"pygments_lexer": "ipython3",
272+
"version": "3.9.18"
273+
}
274+
},
275+
"nbformat": 4,
276+
"nbformat_minor": 4
277+
}

0 commit comments

Comments
 (0)