Skip to content

Commit f128be5

Browse files
committed
Added an example script for downloading ERA5 by chunks to prevent 'requested data too large' error from the server.
1 parent 0c716c0 commit f128be5

File tree

1 file changed

+148
-0
lines changed

1 file changed

+148
-0
lines changed
Lines changed: 148 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,148 @@
1+
"""
2+
Usage:
3+
- change parameters in the main block at the bottom and run this script to generate sflux files from ERA5 data.
4+
- if the download fails because the requested data is too large, increase n_chunks to split the task into smaller parts.
5+
- check the docstring of gen_sflux_era5() for parameter details.
6+
"""
7+
8+
import os
9+
import gc
10+
import psutil
11+
from datetime import datetime, timedelta
12+
from time import time
13+
from pathlib import Path
14+
15+
from pyschism.forcing.nws.nws2.era5 import ERA5
16+
17+
from pyschism.mesh.hgrid import Hgrid
18+
from matplotlib.transforms import Bbox
19+
20+
import re
21+
22+
23+
def _replace_number(fname, n):
24+
# replace ".0001.nc" → ".1.nc" (no leading zeros)
25+
return re.sub(r'\.(\d{4})\.nc$', f'.{n}.nc', fname)
26+
27+
28+
def _split_time(start, end, n):
29+
"""
30+
Split the time period from start to end into n parts.
31+
"""
32+
total_days = (end - start).days
33+
base = total_days // n
34+
remainder = total_days % n
35+
36+
parts = []
37+
current = start
38+
for i in range(n):
39+
# Distribute the remainder among the first few parts
40+
length = base + (1 if i < remainder else 0)
41+
parts.append((current, length))
42+
current += timedelta(days=length)
43+
return parts
44+
45+
46+
def gen_sflux_era5(
47+
startdate=datetime(2020, 12, 1), rnday=60, n_chunks=1,
48+
bbox=None, hgrid: Hgrid = None,
49+
):
50+
"""
51+
Generate sflux files from ERA5 data for a given period by splitting the task into n_chunks.
52+
Individual sflux files are symlinked into a single sflux directory.
53+
54+
Parameters:
55+
- startdate: datetime, start date of the data
56+
- rnday: int, number of days to generate data for
57+
- n_chunks: int, number of chunks to split the data generation into
58+
- bbox: list of float, [[min_lon, min_lat], [max_lon, max_lat]], optional bounding box for the data
59+
- hgrid: Hgrid object, optional hgrid to derive bounding box from
60+
61+
Either bbox or hgrid_object must be provided to define the spatial extent.
62+
If both are provided, bbox takes precedence.
63+
64+
Outputs:
65+
- sflux/sflux_inputs.txt: configuration file for SCHISM
66+
- sflux/sflux_*.nc: symlinked sflux files for air, rad, and prc variables
67+
- sflux_era5_*/: directories containing the original sflux files for each chunk
68+
69+
Note: the outdir is set to the current directory './', setting it to another path may require
70+
modifying deeper level code in ERA5DataInventory, which strips the path to only keep the last part.
71+
"""
72+
73+
t0 = time()
74+
75+
# sanitize inputs
76+
if bbox is None and hgrid is None:
77+
raise ValueError("Either bbox or hgrid_object must be provided.")
78+
if bbox is None:
79+
bbox = hgrid.get_bbox('EPSG:4326', output_type='bbox')
80+
print(f"Derived bbox from hgrid: {bbox}")
81+
else:
82+
bbox = Bbox(bbox)
83+
print(f"Using provided bbox: {bbox}")
84+
85+
outdir = Path('./')
86+
87+
if any(outdir.glob('sflux*')):
88+
raise FileExistsError(
89+
f"sflux files or related directories already exist in {outdir}. "
90+
"Please remove them before running this script.")
91+
92+
# split time periods and generate sflux files
93+
periods = _split_time(start=startdate, end=(startdate + timedelta(days=rnday)), n=n_chunks)
94+
process = psutil.Process(os.getpid())
95+
96+
for i, [period_start, period_days] in enumerate(periods):
97+
print(f'\nGenerating sflux data from {period_start} for {period_days} days...')
98+
this_out_dir = outdir / f'sflux_era5_{i}'
99+
os.makedirs(this_out_dir, exist_ok=True)
100+
er = ERA5()
101+
if period_days < 1:
102+
continue # skip zero-day periods
103+
er.write(
104+
outdir=this_out_dir,
105+
start_date=period_start, rnday=period_days-1,
106+
air=True, rad=True, prc=True,
107+
bbox=bbox, output_interval=1, # raw data is hourly, so output_interval=1 means keeping all hours
108+
overwrite=True, tmpdir=this_out_dir
109+
) # default tmpdir is system's tmp (/tmp), it is possbile /tmp has no enough space for large dataset.
110+
del er
111+
gc.collect()
112+
mem = process.memory_info().rss / 1024**2
113+
print(f"[Iter {i}] Memory usage after GC: {mem:.1f} MB")
114+
115+
# consolidate sflux files
116+
# write sflux_inputs.txt
117+
os.makedirs(outdir / 'sflux', exist_ok=True)
118+
with open(outdir / 'sflux' / 'sflux_inputs.txt', 'w') as f:
119+
f.write("&sflux_inputs\n/\n")
120+
# link individual sflux periods to sflux directory
121+
file_id = 1
122+
for i, [_, _] in enumerate(periods):
123+
for var in ['air', 'rad', 'prc']:
124+
nc_files = sorted(Path(outdir / f'sflux_era5_{i}').glob(f'sflux*{var}*.nc'))
125+
for k, nc_file in enumerate(nc_files):
126+
src = f'../sflux_era5_{i}/{nc_file.name}'
127+
dst = outdir / 'sflux' / _replace_number(f'sflux_{var}.{file_id:04d}.nc', k + file_id)
128+
print(f"sym-linking: src: {src}, dest: {dst}")
129+
os.symlink(src, dst)
130+
file_id += len(nc_files)
131+
132+
print(f'\nIt took {(time()-t0)/60} minutes to generate {rnday} days')
133+
134+
135+
if __name__ == "__main__":
136+
hgrid = Hgrid.open('./hgrid.gr3', crs='EPSG:4326')
137+
138+
# example using hgrid to define bbox
139+
gen_sflux_era5(
140+
startdate=datetime(2020, 12, 1), rnday=10, n_chunks=2,
141+
hgrid=hgrid,
142+
)
143+
144+
# example using explicit bbox
145+
gen_sflux_era5(
146+
startdate=datetime(2020, 12, 1), rnday=3, n_chunks=1,
147+
bbox=[[-76.0, 34.0], [-70.0, 40.0]],
148+
)

0 commit comments

Comments
 (0)