diff --git a/RAPIDpy/gis/weight.py b/RAPIDpy/gis/weight.py index f17c020..f2eee8d 100644 --- a/RAPIDpy/gis/weight.py +++ b/RAPIDpy/gis/weight.py @@ -83,7 +83,8 @@ def find_nearest(array, value): def rtree_create_weight_table(lsm_grid_lat, lsm_grid_lon, in_catchment_shapefile, river_id, in_rapid_connect, out_weight_table, - file_geodatabase=None, area_id=None): + file_geodatabase=None, area_id=None, + lsm_grid_mask=None): """ Create Weight Table for Land Surface Model Grids """ @@ -242,6 +243,11 @@ def rtree_create_weight_table(lsm_grid_lat, lsm_grid_lon, lsm_grid_lon, lsm_grid_feature_list[sub_lsm_grid_pos]['lat'], lsm_grid_feature_list[sub_lsm_grid_pos]['lon']) + + if lsm_grid_mask is not None: + if lsm_grid_mask[int(index_lsm_grid_lat), int(index_lsm_grid_lon)] > 0: + poly_area /= lsm_grid_mask[int(index_lsm_grid_lat), int(index_lsm_grid_lon)] + intersect_grid_info_list.append({ 'rivid': rapid_connect_rivid, 'area': poly_area, @@ -257,7 +263,6 @@ def rtree_create_weight_table(lsm_grid_lat, lsm_grid_lon, # If no intersection found, add dummy row if npoints <= 0: connectwriter.writerow([rapid_connect_rivid] + dummy_row_end) - for intersect_grid_info in intersect_grid_info_list: connectwriter.writerow([ intersect_grid_info['rivid'], @@ -280,7 +285,8 @@ def CreateWeightTableECMWF(in_ecmwf_nc, in_connectivity_file, out_weight_table, area_id=None, - file_geodatabase=None): + file_geodatabase=None, + in_ecmwf_mask_var=None): """ Create Weight Table for ECMWF Grids @@ -338,12 +344,25 @@ def CreateWeightTableECMWF(in_ecmwf_nc, (data_ecmwf_nc.variables[in_ecmwf_lon_var][:] + 180) % 360 - 180 # assume [-90, 90] ecmwf_lat = data_ecmwf_nc.variables[in_ecmwf_lat_var][:] + + if in_ecmwf_mask_var is not None: + if in_ecmwf_mask_var in variables_list: + ecmwf_mask = data_ecmwf_nc.variables[in_ecmwf_mask_var][0,:,:] + else: + print('Variable "{}" not found in {}.'.format( + in_ecmwf_mask_var, in_ecmwf_nc)) + print('Continuing with no land mask.') + ecmwf_mask = None + else: + ecmwf_mask = None + data_ecmwf_nc.close() rtree_create_weight_table(ecmwf_lat, ecmwf_lon, in_catchment_shapefile, river_id, in_connectivity_file, out_weight_table, - file_geodatabase, area_id) + file_geodatabase, area_id, + lsm_grid_mask=ecmwf_mask) def CreateWeightTableLDAS(in_ldas_nc, diff --git a/RAPIDpy/inflow/CreateInflowFileFromERARunoff.py b/RAPIDpy/inflow/CreateInflowFileFromERARunoff.py new file mode 100644 index 0000000..50a2e8b --- /dev/null +++ b/RAPIDpy/inflow/CreateInflowFileFromERARunoff.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +""" + CreateInflowFileFromERARunoff.py + RAPIDpy + + Created by Alan D. Snow, 2015 + Adapted from CreateInflowFileFromECMWFRunoff.py. + License: BSD-3-Clause +""" +from netCDF4 import Dataset + +from .CreateInflowFileFromGriddedRunoff import \ + CreateInflowFileFromGriddedRunoff + + +class CreateInflowFileFromERARunoff(CreateInflowFileFromGriddedRunoff): + """Create Inflow File From ERA Runoff + + Creates RAPID NetCDF input of water inflow based on + ERA runoff and previously created weight table. + """ + land_surface_model_name = "ERA" + header_wt = ['rivid', 'area_sqm', 'lon_index', 'lat_index', 'npoints'] + dims_oi = [['lon', 'lat', 'time'], + ['longitude', 'latitude', 'time'], + ['time','lon','lat'], + ['time','longitude','latitude'], + ['latitude','longitude','time'], + ['time','latitude','longitude']] + vars_oi = [['lon', 'lat', 'time', 'RO'],['lon','lat','time','ro'], + ['time','lon','lat','RO'],['time','lon','lat','ro'], + ['time','longitude','latitude','RO'],['time','longitude','latitude','ro'], + ['longitude', 'latitude', 'time', 'RO'],['longitude', 'latitude', 'time', 'ro'], + ['latitude','longitude','time','RO'],['latitude','longitude','time','ro'], + ['latitude','longitude','RO','time'],['latitude','longitude','ro','time'], + ['time','latitude','longitude','RO'],['time','latitude','longitude','ro']] + length_time = {"Daily": 1, "3-Hourly": 8, "1-Hourly":24} + + def __init__(self): + """Define the attributes to look for""" + self.runoff_vars = ['ro'] + super(CreateInflowFileFromERARunoff, self).__init__() + + def data_validation(self, in_nc): + """Check the necessary dimensions and variables in the input + netcdf data""" + data_nc = Dataset(in_nc) + + dims = list(data_nc.dimensions) + + if dims not in self.dims_oi: + data_nc.close() + raise Exception("{0} {1}".format(self.error_messages[1], dims)) + + nc_vars = list(data_nc.variables) + + if nc_vars == self.vars_oi[0]: + self.runoff_vars = [self.vars_oi[0][-1]] + elif nc_vars == self.vars_oi[1]: + self.runoff_vars = [self.vars_oi[1][-1]] + elif nc_vars == self.vars_oi[2]: + self.runoff_vars = [self.vars_oi[2][-1]] + elif nc_vars == self.vars_oi[3]: + self.runoff_vars = [self.vars_oi[3][-1]] + elif nc_vars == self.vars_oi[4]: + self.runoff_vars = [self.vars_oi[4][-1]] + elif nc_vars == self.vars_oi[5]: + self.runoff_vars = [self.vars_oi[5][-1]] + elif nc_vars == self.vars_oi[6]: + self.runoff_vars = [self.vars_oi[6][-1]] + elif nc_vars == self.vars_oi[7]: + self.runoff_vars = [self.vars_oi[7][-1]] + elif nc_vars == self.vars_oi[8]: + self.runoff_vars = [self.vars_oi[8][-1]] + elif nc_vars == self.vars_oi[9]: + self.runoff_vars = [self.vars_oi[9][-1]] + elif nc_vars == self.vars_oi[10]: + self.runoff_vars = [self.vars_oi[10][-2]] + elif nc_vars == self.vars_oi[11]: + self.runoff_vars = [self.vars_oi[11][-2]] + elif nc_vars == self.vars_oi[12]: + self.runoff_vars = [self.vars_oi[12][-1]] + elif nc_vars == self.vars_oi[13]: + self.runoff_vars = [self.vars_oi[13][-1]] + else: + data_nc.close() + raise Exception("{0} {1}".format(self.error_messages[2], nc_vars)) + data_nc.close() diff --git a/RAPIDpy/inflow/CreateInflowFileFromGriddedRunoff.py b/RAPIDpy/inflow/CreateInflowFileFromGriddedRunoff.py index b299ab7..4b62011 100644 --- a/RAPIDpy/inflow/CreateInflowFileFromGriddedRunoff.py +++ b/RAPIDpy/inflow/CreateInflowFileFromGriddedRunoff.py @@ -481,6 +481,26 @@ def execute(self, nc_file_list, index_list, in_weight_table, np.concatenate([ro_first_half, ro_second_half]), area_sqm_npoints) + elif grid_type == 't1279': + # A) ERA Interim Low Res (T1279) - data is cumulative + # from time 6/12 + # 0 1 2 3 4 + # (time zero not included, so assumed to be zero) + ro_first_half = \ + np.concatenate([data_goal[0:1, ], + np.subtract(data_goal[1:2, ], + data_goal[0:1, ])]) + # from time 15/18/21/24 + # (time restarts at time 12, assumed to be zero) + ro_second_half = \ + np.concatenate([data_goal[2:3, ], + np.subtract(data_goal[3:, ], + data_goal[2:3, ])]) + ro_stream = \ + np.multiply( + np.concatenate([ro_first_half, ro_second_half]), + area_sqm_npoints) + else: ro_stream = data_goal * area_sqm_npoints * \ conversion_factor diff --git a/RAPIDpy/inflow/lsm_rapid_process.py b/RAPIDpy/inflow/lsm_rapid_process.py index 6088f7a..6ef861d 100644 --- a/RAPIDpy/inflow/lsm_rapid_process.py +++ b/RAPIDpy/inflow/lsm_rapid_process.py @@ -20,8 +20,8 @@ # local imports from ..rapid import RAPID -from .CreateInflowFileFromERAInterimRunoff import \ - CreateInflowFileFromERAInterimRunoff +from .CreateInflowFileFromERARunoff import \ + CreateInflowFileFromERARunoff from .CreateInflowFileFromLDASRunoff import CreateInflowFileFromLDASRunoff from .CreateInflowFileFromWRFHydroRunoff import \ CreateInflowFileFromWRFHydroRunoff @@ -31,9 +31,6 @@ get_valid_directory_list, partition) -from ..pangaea.read import open_mfdataset - - # ----------------------------------------------------------------------------- # MULTIPROCESSING FUNCTION @@ -66,12 +63,10 @@ def generate_inflows_from_runoff(args): index_string = "Index: {0}".format(file_index_list[0]) if len(file_index_list) > 1: index_string += " to {0}".format(file_index_list[-1]) - print(index_string) runoff_string = "File(s): {0}".format(runoff_file_list[0]) if len(runoff_file_list) > 1: runoff_string += " to {0}".format(runoff_file_list[-1]) - print(runoff_string) - print("Converting inflow ...") + print("Converting inflow in generate inflow from runoff...") try: rapid_inflow_tool.execute(nc_file_list=runoff_file_list, index_list=file_index_list, @@ -94,6 +89,14 @@ def generate_inflows_from_runoff(args): # UTILITY FUNCTIONS # ----------------------------------------------------------------------------- DEFAULT_LSM_INPUTS = { + 'llera5': { + 'file_datetime_re_pattern': r'\d{8}', + 'file_datetime_pattern': "%Y%m%d", + }, + 't1279': { + 'file_datetime_re_pattern': r'\d{8}', + 'file_datetime_pattern': "%Y%m%d", + }, 't255': { 'file_datetime_re_pattern': r'\d{8}', 'file_datetime_pattern': "%Y%m%d", @@ -232,7 +235,6 @@ def identify_lsm_grid(lsm_grid_path): elif 'X' in var_list: # FLDAS longitude_var = 'X' - time_var = None if 'time' in var_list: time_var = 'time' @@ -280,6 +282,9 @@ def identify_lsm_grid(lsm_grid_path): elif var.lower() == "ro": # ERA Interim total_runoff_var = var + elif var.lower() == "RO": + # ERA5 + total_runoff_var = var elif var == "total runoff": # CMIP5 data total_runoff_var = var @@ -312,9 +317,11 @@ def identify_lsm_grid(lsm_grid_path): runoff_vars = [surface_runoff_var, subsurface_runoff_var] + print('Checking grid type.',total_runoff_var.lower(),institution) if institution == "European Centre for Medium-Range Weather Forecasts" \ - or total_runoff_var.lower() == "ro": + or total_runoff_var.lower() == "ro" or total_runoff_var.lower() == "RO": # these are the ECMWF models + print('This is an ECMWF model') if lat_dim_size == 361 and lon_dim_size == 720: print("Runoff file identified as ERA Interim Low Res (T255) GRID") # A) ERA Interim Low Res (T255) @@ -343,17 +350,53 @@ def identify_lsm_grid(lsm_grid_path): # Downloaded as 1.125 degree grid # dimensions: # longitude = 320 ; + # Downloaded as 1.125 degree grid + # dimensions: + # longitude = 320 ; + lsm_file_data["grid_type"] = 't511' + elif lat_dim_size == 161 and lon_dim_size == 320: + print("Runoff file identified as ERA 20CM (T159) GRID") + # C) ERA 20CM (T159) - 3hr - 10 ensembles + # Downloaded as 1.125 degree grid + # dimensions: + # longitude = 320 ; + # C) ERA 20CM (T159) - 3hr - 10 ensembles + # Downloaded as 1.125 degree grid + # dimensions: + # longitude = 320 ; # latitude = 161 ; lsm_file_data["description"] = "ERA 20CM (T159 Grid)" lsm_file_data["weight_file_name"] = r'weight_era_t159\.csv' lsm_file_data["model_name"] = "era_20cm" lsm_file_data["grid_type"] = 't159' + elif lat_dim_size == 721 and lon_dim_size == 1440: + print("Runoff file identified as ERA5 lat lon .25 degree GRID") + # C) ERA 20CM (lat lon quarter degree) - 1hr + # Downloaded as .25 degree grid + # dimensions: + # longitude = 1440 ; + # latitude = 721 ; + lsm_file_data["description"] = "ERA5 (LL Grid)" + lsm_file_data["weight_file_name"] = r'weight_era5_ll\.csv' + lsm_file_data["model_name"] = "era5" + lsm_file_data["grid_type"] = 'llera5' + elif lat_dim_size == 1280 and lon_dim_size == 2576: + print("Runoff file identified as ERAI Gaussian GRID") + # C) ERA INTERIM (Gaussian) - Daily + # Downloaded as Gaussian grid + # dimensions: + # longitude = 2576 ; + # latitude = 1280 ; + lsm_file_data["description"] = "ERAI (Gaussian Grid)" + lsm_file_data["weight_file_name"] = r'weight_erai_1279\.csv' + lsm_file_data["model_name"] = "erai_1279" + lsm_file_data["grid_type"] = 't1279' else: lsm_example_file.close() raise Exception("Unsupported ECMWF grid.") lsm_file_data["rapid_inflow_tool"] = \ - CreateInflowFileFromERAInterimRunoff() + CreateInflowFileFromERARunoff() elif institution == "NASA GSFC": if title == "GLDAS2.0 LIS land surface model output": @@ -530,7 +573,8 @@ def determine_start_end_timestep(lsm_file_list, file_datetime_pattern) \ + timedelta(seconds=(file_size_time-1) * time_step) else: - with open_mfdataset(lsm_file_list, #pangaea.open_mfdataset +# with pangaea.open_mfdataset(lsm_file_list, + with open_mfdataset(lsm_file_list, lat_var=lsm_grid_info['latitude_var'], lon_var=lsm_grid_info['longitude_var'], time_var=lsm_grid_info['time_var'], @@ -592,7 +636,13 @@ def run_lsm_rapid_process(rapid_executable_location, modeling_institution="US Army Engineer Research " "and Development Center", convert_one_hour_to_three=False, - expected_time_step=None): + expected_time_step=None, + BS_opt_dam='', + IS_dam_tot=0, + IS_dam_use=0, + dam_tot_id_file='', + dam_use_id_file='', + dam_file=''): # pylint: disable=anomalous-backslash-in-string """ This is the main process to generate inflow for RAPID and to run RAPID. @@ -802,7 +852,6 @@ def run_lsm_rapid_process(rapid_executable_location, lsm_file_list.append( os.path.join(walkdir_info[0], lsm_file)) lsm_file_list = sorted(lsm_file_list) - # IDENTIFY THE GRID lsm_file_data = identify_lsm_grid(lsm_file_list[0]) @@ -815,7 +864,6 @@ def run_lsm_rapid_process(rapid_executable_location, DEFAULT_LSM_INPUTS[lsm_file_data['grid_type']][ 'file_datetime_pattern'] file_re_match = re.compile(file_datetime_re_pattern) - # get subset based on time bounds if simulation_start_datetime is not None: print("Filtering files by datetime ...") @@ -828,9 +876,7 @@ def run_lsm_rapid_process(rapid_executable_location, break if file_date >= simulation_start_datetime: lsm_file_list_subset.append(lsm_file) - lsm_file_list = sorted(lsm_file_list_subset) - print("Running from {0} to {1}".format(lsm_file_list[0], lsm_file_list[-1])) @@ -843,7 +889,6 @@ def run_lsm_rapid_process(rapid_executable_location, file_datetime_pattern=file_datetime_pattern, expected_time_step=expected_time_step, lsm_grid_info=lsm_file_data) - # VALIDATING INPUT IF DIVIDING BY 3 if (lsm_file_data['grid_type'] in ('nldas', 'lis', 'joules')) \ and convert_one_hour_to_three: @@ -863,7 +908,6 @@ def run_lsm_rapid_process(rapid_executable_location, actual_simulation_start_datetime, actual_simulation_end_datetime, ensemble_file_ending) - # run LSM processes for master_watershed_input_directory, \ master_watershed_output_directory in rapid_directories: @@ -939,7 +983,7 @@ def run_lsm_rapid_process(rapid_executable_location, # generate_inflows_from_runoff(( # cpu_grouped_file_list, # partition_index_list[loop_index], -# lsm_file_data['weight_table_file'], +# weight_table_file, #m_file_data['weight_table_file'], # lsm_file_data['grid_type'], # master_rapid_runoff_file, # lsm_file_data['rapid_inflow_tool'], @@ -959,7 +1003,13 @@ def run_lsm_rapid_process(rapid_executable_location, ZS_TauR=time_step, ZS_dtR=15 * 60, ZS_TauM=total_num_time_steps * time_step, - ZS_dtM=time_step) + ZS_dtM=time_step, + BS_opt_dam=BS_opt_dam, + IS_dam_tot=IS_dam_tot, + IS_dam_use=IS_dam_use, + dam_tot_id_file=dam_tot_id_file, + dam_use_id_file=dam_use_id_file, + dam_file=dam_file) if initial_flows_file and os.path.exists(initial_flows_file): rapid_manager.update_parameters( @@ -1027,7 +1077,6 @@ def run_lsm_rapid_process(rapid_executable_location, num_cpus=num_cpus, storm_duration_days=storm_length_days, method=return_period_method) - # generate seasonal averages file if generate_seasonal_averages_file and \ os.path.exists(lsm_rapid_output_file) and \ diff --git a/README.md b/README.md index effd289..30f2eff 100644 --- a/README.md +++ b/README.md @@ -7,9 +7,9 @@ More information about installation and the input parameters for RAPID can be fo The source code for RAPID is located at https://github.com/c-h-david/rapid/. -[![DOI](https://zenodo.org/badge/19918/erdc-cm/RAPIDpy.svg)](https://zenodo.org/badge/latestdoi/19918/erdc-cm/RAPIDpy) +[![DOI](https://zenodo.org/badge/19918/erdc/RAPIDpy.svg)](https://zenodo.org/badge/latestdoi/19918/erdc/RAPIDpy) -[![License (3-Clause BSD)](https://img.shields.io/badge/license-BSD%203--Clause-yellow.svg)](https://github.com/erdc-cm/RAPIDpy/blob/master/LICENSE) +[![License (3-Clause BSD)](https://img.shields.io/badge/license-BSD%203--Clause-yellow.svg)](https://github.com/erdc/RAPIDpy/blob/master/LICENSE) [![PyPI version](https://badge.fury.io/py/RAPIDpy.svg)](https://badge.fury.io/py/RAPIDpy) @@ -60,5 +60,5 @@ Ahmad A Tavakoly. (2017). RAPID input files corresponding to the Mississippi Riv ## Other tools to prepare input for RAPID - For ESRI users: https://github.com/Esri/python-toolbox-for-rapid -- Modified version of the ESRI RAPID Toolbox: https://github.com/erdc-cm/python-toolbox-for-rapid +- Modified version of the ESRI RAPID Toolbox: https://github.com/erdc/python-toolbox-for-rapid - For the NHDPlus dataset: https://github.com/c-h-david/RRR diff --git a/docs/gis_stream_network.rst b/docs/gis_stream_network.rst index 45857ef..0d5b9b4 100644 --- a/docs/gis_stream_network.rst +++ b/docs/gis_stream_network.rst @@ -7,7 +7,7 @@ Using ArcHydro to Generate Stream Network See: - https://github.com/Esri/python-toolbox-for-rapid -- https://github.com/erdc-cm/python-toolbox-for-rapid +- https://github.com/erdc/python-toolbox-for-rapid Using TauDEM to Generate Stream Network --------------------------------------- diff --git a/docs/index.rst b/docs/index.rst index 782a2e3..5aa9c4d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -26,7 +26,7 @@ https://github.com/c-h-david/rapid. .. |Coverage Status| image:: https://coveralls.io/repos/github/erdc/RAPIDpy/badge.svg?branch=master :target: https://coveralls.io/github/erdc/RAPIDpy .. |License (3-Clause BSD)| image:: https://img.shields.io/badge/license-BSD%203--Clause-yellow.svg - :target: https://github.com/erdc-cm/RAPIDpy/blob/master/LICENSE + :target: https://github.com/erdc/RAPIDpy/blob/master/LICENSE Contents: @@ -90,7 +90,7 @@ Other tools to prepare input for RAPID --------------------------------------- - For ESRI users: https://github.com/Esri/python-toolbox-for-rapid -- Modified version of the ESRI RAPID Toolbox: https://github.com/erdc-cm/python-toolbox-for-rapid +- Modified version of the ESRI RAPID Toolbox: https://github.com/erdc/python-toolbox-for-rapid - For the NHDPlus dataset: https://github.com/c-h-david/RRR diff --git a/docs/installation.rst b/docs/installation.rst index 563cfa6..364ff74 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -79,11 +79,11 @@ or from https://conda.io/miniconda.html. This is how you get the most up-to-date version of the code. -.. note:: If you don't have git, you can download the code from https://github.com/erdc-cm/RAPIDpy +.. note:: If you don't have git, you can download the code from https://github.com/erdc/RAPIDpy :: - $ git clone https://github.com/erdc-cm/RAPIDpy.git + $ git clone https://github.com/erdc/RAPIDpy.git $ cd RAPIDpy $ conda env create -f rapidpy_env.yml $ conda activate rapidpy_env @@ -93,7 +93,7 @@ To develop on the latest version: :: - $ git clone https://github.com/erdc-cm/RAPIDpy.git + $ git clone https://github.com/erdc/RAPIDpy.git $ cd RAPIDpy $ conda env create -f rapidpy_env.yml $ conda activate rapidpy_env diff --git a/setup.py b/setup.py index 3bff51f..ef0b48f 100644 --- a/setup.py +++ b/setup.py @@ -11,12 +11,12 @@ 'parameters for RAPID can be found at http://rapid-hub.org.' ' The source code for RAPID is located at ' 'https://github.com/c-h-david/rapid/. \n\n' - '.. image:: https://zenodo.org/badge/19918/erdc-cm/RAPIDpy.svg \n' - ' :target: https://zenodo.org/badge/latestdoi/19918/erdc-cm/RAPIDpy', + '.. image:: https://zenodo.org/badge/19918/erdc/RAPIDpy.svg \n' + ' :target: https://zenodo.org/badge/latestdoi/19918/erdc/RAPIDpy', keywords='RAPID', author='Alan Dee Snow', author_email='alan.d.snow@usace.army.mil', - url='https://github.com/erdc-cm/RAPIDpy', + url='https://github.com/erdc/RAPIDpy', license='BSD 3-Clause', packages=find_packages(), package_data={'': ['gis/lsm_grids/*.nc']}, diff --git a/tests/compare/gis/mendocino_nhdplus_catchment/rapid_connectivity_mendocino_sample.csv b/tests/compare/gis/mendocino_nhdplus_catchment/rapid_connectivity_mendocino_sample.csv new file mode 100755 index 0000000..26d9523 --- /dev/null +++ b/tests/compare/gis/mendocino_nhdplus_catchment/rapid_connectivity_mendocino_sample.csv @@ -0,0 +1,6 @@ +8267669,8267695,0,0,0,0,0 +8267671,8267695,0,0,0,0,0 +8267697,8267725,0,0,0,0,0 +8267723,8267745,0,0,0,0,0 +8267695,8267725,2,8267669,8267671,0,0 +8267725,8267745,2,8267695,8267697,0,0 diff --git a/tests/compare/gis/mendocino_nhdplus_catchment/weight_mendocino_era5_land_mask.csv b/tests/compare/gis/mendocino_nhdplus_catchment/weight_mendocino_era5_land_mask.csv new file mode 100644 index 0000000..f453973 --- /dev/null +++ b/tests/compare/gis/mendocino_nhdplus_catchment/weight_mendocino_era5_land_mask.csv @@ -0,0 +1,9 @@ +rivid,area_sqm,lon_index,lat_index,npoints,lsm_grid_lon,lsm_grid_lat +8267669,1022424.4461225494,7,2,1,-123.25,39.5 +8267671,678361.3599336375,7,2,2,-123.25,39.5 +8267671,313143.12121458724,7,3,2,-123.25,39.25 +8267697,887055.8385729626,7,3,1,-123.25,39.25 +8267723,1539436.066144096,7,3,1,-123.25,39.25 +8267695,710034.1467391531,7,2,2,-123.25,39.5 +8267695,4245512.985625456,7,3,2,-123.25,39.25 +8267725,2136545.1570262704,7,3,1,-123.25,39.25 diff --git a/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.cpg b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.cpg new file mode 100755 index 0000000..3ad133c --- /dev/null +++ b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.cpg @@ -0,0 +1 @@ +UTF-8 \ No newline at end of file diff --git a/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.dbf b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.dbf new file mode 100755 index 0000000..bda85fa Binary files /dev/null and b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.dbf differ diff --git a/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.prj b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.prj new file mode 100755 index 0000000..5ded4bc --- /dev/null +++ b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.prj @@ -0,0 +1 @@ +GEOGCS["GCS_North_American_1983",DATUM["D_North_American_1983",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]] \ No newline at end of file diff --git a/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.sbn b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.sbn new file mode 100755 index 0000000..f4794b4 Binary files /dev/null and b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.sbn differ diff --git a/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.sbx b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.sbx new file mode 100755 index 0000000..cb6a6e6 Binary files /dev/null and b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.sbx differ diff --git a/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.shp b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.shp new file mode 100755 index 0000000..eeb993e Binary files /dev/null and b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.shp differ diff --git a/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.shx b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.shx new file mode 100755 index 0000000..c7aed03 Binary files /dev/null and b/tests/data/gis/mendocino_nhdplus_catchment/NHDCat_mendocino_watershed_hopland_sample.shx differ diff --git a/tests/data/lsm_grids/era5/era5_land-sea_mask_mendocino_subset.nc b/tests/data/lsm_grids/era5/era5_land-sea_mask_mendocino_subset.nc new file mode 100755 index 0000000..6e51081 Binary files /dev/null and b/tests/data/lsm_grids/era5/era5_land-sea_mask_mendocino_subset.nc differ diff --git a/tests/test_gis.py b/tests/test_gis.py index cbe7285..933bff7 100644 --- a/tests/test_gis.py +++ b/tests/test_gis.py @@ -765,3 +765,37 @@ def test_weight_table_with_area_id(): generated_weight_table_file_solution)) remove_files(generated_weight_table_file) + +def test_gen_weight_table_era5_land_mask(): + """ + Checks generating weight table for ERA5 grid with land mask. + """ + print("TEST 18: TEST GENERATE WEIGHT TABLE FOR ERA5 GRID WITH LAND MASK.") + generated_weight_table_file = os.path.join( + OUTPUT_DATA_PATH, "weight_mendocino_era5_land_mask.csv") + + #rapid_connect + rapid_connect_file = os.path.join( + COMPARE_DATA_PATH, "mendocino_nhdplus_catchment", + "rapid_connectivity_mendocino_sample.csv") + + lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "era5", "era5_land-sea_mask_mendocino_subset.nc") + + CreateWeightTableECMWF(in_ecmwf_nc=lsm_grid, + in_catchment_shapefile=os.path.join( + GIS_INPUT_DATA_PATH, + 'mendocino_nhdplus_catchment', + 'NHDCat_mendocino_watershed_hopland_sample.shp'), + river_id="FEATUREID", + in_connectivity_file=rapid_connect_file, + out_weight_table=generated_weight_table_file, + in_ecmwf_mask_var='lsm') + + generated_weight_table_file_solution = os.path.join( + COMPARE_DATA_PATH, 'mendocino_nhdplus_catchment', + 'weight_mendocino_era5_land_mask.csv') + + assert (compare_csv_decimal_files(generated_weight_table_file, + generated_weight_table_file_solution)) + + remove_files(generated_weight_table_file)