Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -56,16 +56,19 @@ tests/_LargeFileStash
# Temporary files
_TutorialData/


# Specific files
run.py
tests/GLWACSO/HSP2results/hspp007.hdf
tests/GLWACSO/HSPFresults/hspf006.HBN
tests/GLWACSO/HSP2results/hspp007.uci
tests/test_report_conversion.html

# Omit big files
tests/land_spec/hwmA51800.h5
tests/testcbp/HSP2results/PL3_5250_0001.h5
tests/testcbp/HSP2results/PL3_5250_specl.h5
tests/testcbp/HSP2results/*.csv
tests/test10/HSP2results/test10.h5

# R files
.Rdata
Expand Down
3 changes: 1 addition & 2 deletions HSP2/HYDR.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,6 @@ def hydr(io_manager, siminfo, uci, ts, ftables, state):
# initialize the hydr paths in case they don't already reside here
hydr_init_ix(state_ix, state_paths, state['domain'])
op_tokens = state['op_tokens']
print("state_ix is type", type(state_ix))
#######################################################################################

# Do the simulation with _hydr_ (ie run reaches simulation code)
Expand All @@ -173,7 +172,7 @@ def hydr(io_manager, siminfo, uci, ts, ftables, state):
return errors, ERRMSGS


@njit(cache=True)
@njit
def _hydr_(ui, ts, COLIND, OUTDGT, rowsFT, funct, Olabels, OVOLlabels, state_info, state_paths, state_ix, dict_ix, ts_ix, state_step_hydr, op_tokens, model_exec_list):
errors = zeros(int(ui['errlen'])).astype(int64)

Expand Down
113 changes: 47 additions & 66 deletions HSP2/om.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
import time
from numba.typed import Dict
from numpy import zeros, int32
from numba import int8, float32, njit, types, typed # import the types
from numba import int8, float64, njit, types, typed # import the types
import random # this is only used for a demo so may be deprecated
from HSP2.state import *

Expand Down Expand Up @@ -55,11 +55,11 @@ def is_float_digit(n: str) -> bool:
# Import Code Classes
from HSP2.om_model_object import *
from HSP2.om_sim_timer import *
#from HSP2.om_equation import *
from HSP2.om_equation import *
from HSP2.om_model_linkage import *
from HSP2.om_special_action import *
#from HSP2.om_data_matrix import *
#from HSP2.om_model_broadcast import *
from HSP2.om_model_broadcast import *
#from HSP2.om_simple_channel import *
#from HSP2.om_impoundment import *
from HSP2.utilities import versions, get_timeseries, expand_timeseries_names, save_timeseries, get_gener_timeseries
Expand All @@ -78,7 +78,6 @@ def init_om_dicts():
def state_load_om_json(state, io_manager, siminfo):
# - model objects defined in file named '[model h5 base].json -- this will populate an array of object definitions that will
# be loadable by "model_loader_recursive()"
model_data = state['model_data']
# JSON file would be in same path as hdf5
hdf5_path = io_manager._input.file_path
(fbase, fext) = os.path.splitext(hdf5_path)
Expand All @@ -90,8 +89,10 @@ def state_load_om_json(state, io_manager, siminfo):
jfile = open(fjson)
json_data = json.load(jfile)
# dict.update() combines the arg dict with the base
model_data.update(json_data)
state['model_data'] = model_data
state['model_data'].update(json_data)
# merge in the json siminfo data
if 'siminfo' in state['model_data'].keys():
siminfo.update(state['model_data']['siminfo'])
return

def state_load_om_python(state, io_manager, siminfo):
Expand Down Expand Up @@ -133,30 +134,42 @@ def state_load_dynamics_om(state, io_manager, siminfo):
state_load_om_json(state, io_manager, siminfo)
return

def state_om_model_run_prep(state, io_manager, siminfo):
def state_om_model_root_object(state, siminfo):
# Create the base that everything is added to. this object does nothing except host the rest.
model_root_object = ModelObject("")
# set up the timer as the first element
timer = SimTimer('timer', model_root_object, siminfo)

if 'model_root_object' not in state.keys():
model_root_object = ModelObject("")
state['model_root_object'] = model_root_object
# set up the timer as the first element
if '/STATE/timer' not in state['state_paths'].keys():
timer = SimTimer('timer', model_root_object, siminfo)


def state_om_model_run_prep(state, io_manager, siminfo):
# insure model base is set
state_om_model_root_object(state, siminfo)
# now instantiate and link objects
# state['model_data'] has alread been prepopulated from json, .py files, hdf5, etc.
model_root_object = state['model_root_object']
model_loader_recursive(state['model_data'], model_root_object)
print("Loaded objects & paths: insures all paths are valid, connects models as inputs")
# both state['model_object_cache'] and the model_object_cache property of the ModelObject class def
# will hold a global repo for this data this may be redundant? They DO point to the same datset?
# since this is a function that accepts state as an argument and these were both set in state_load_dynamics_om
# we can assume they are there and functioning
model_object_cache = state['model_object_cache']
if 'model_object_cache' in state.keys():
model_object_cache = state['model_object_cache']
else:
model_object_cache = ModelObject.model_object_cache
model_path_loader(model_object_cache)
# len() will be 1 if we only have a simtimer, but > 1 if we have a river being added
model_exec_list = []
# put all objects in token form for fast runtime execution and sort according to dependency order
print("Tokenizing models")
if 'ops_data_type' in siminfo.keys():
ModelObject.ops_data_type = siminfo['ops_data_type'] # allow override of dat astructure settings
ModelObject.op_tokens = ModelObject.make_op_tokens(max(ModelObject.state_ix.keys()) + 1)
model_tokenizer_recursive(model_root_object, model_object_cache, model_exec_list)
op_tokens = ModelObject.op_tokens
print("op_tokens has", len(op_tokens),"elements")
# model_exec_list is the ordered list of component operations
#print("model_exec_list(", len(model_exec_list),"items):", model_exec_list)
# This is used to stash the model_exec_list in the dict_ix, this might be slow, need to verify.
Expand All @@ -165,10 +178,18 @@ def state_om_model_run_prep(state, io_manager, siminfo):
state['model_object_cache'] = model_object_cache
state['model_exec_list'] = np.asarray(model_exec_list, dtype="i8")
if ModelObject.ops_data_type == 'ndarray':
state['state_ix'] = np.asarray(list(state['state_ix'].values()), dtype="float64")
state_keyvals = np.asarray(zeros(max(ModelObject.state_ix.keys()) + 1), dtype="float64")
for ix, val in ModelObject.state_ix.items():
state_keyvals[ix] = val
state['state_ix'] = state_keyvals
else:
state['state_ix'] = ModelObject.state_ix
state['op_tokens'] = op_tokens
if len(op_tokens) > 0:
state['state_step_om'] = 'enabled'
print("op_tokens is type", type(op_tokens))
print("state_ix is type", type(state['state_ix']))
print("op_tokens has", len(op_tokens),"elements, with ", len(model_exec_list),"executable elements")
return

# model class reader
Expand Down Expand Up @@ -271,6 +292,11 @@ def model_class_translate(model_props, object_class):
if object_class == 'hydroImpSmall':
model_props['object_class'] = 'Impoundment'
print("Handling hydroImpSmall as Impoundment")
# now handle disabled classes - this is temporary to prevent having to comment and uncomment
disabled_classes = {'SimpleChannel', 'Impoundment', 'DataMatrix', 'dataMatrix'}
if model_props['object_class'] in disabled_classes:
print("Disabling class", model_props['object_class'], 'rendering as ModelObject')
model_props['object_class'] = 'ModelObject'

def model_loader_recursive(model_data, container):
k_list = model_data.keys()
Expand Down Expand Up @@ -447,7 +473,7 @@ def pre_step_model(model_exec_list, op_tokens, state_ix, dict_ix, ts_ix, step):
for i in model_exec_list:
if op_tokens[i][0] == 12:
# register type data (like broadcast accumulators)
pass#pre_step_register(op_tokens[i], state_ix, dict_ix)
pre_step_register(op_tokens[i], state_ix)
return

@njit
Expand All @@ -467,7 +493,7 @@ def step_one(op_tokens, ops, state_ix, dict_ix, ts_ix, step, debug = 0):
if debug == 1:
print("DEBUG: Operator ID", ops[1], "is op type", ops[0])
if ops[0] == 1:
pass #step_equation(ops, state_ix)
step_equation(ops, state_ix)
elif ops[0] == 2:
# todo: this should be moved into a single function,
# with the conforming name step_matrix(op_tokens, ops, state_ix, dict_ix)
Expand Down Expand Up @@ -500,40 +526,14 @@ def step_one(op_tokens, ops, state_ix, dict_ix, ts_ix, step, debug = 0):

@njit
def step_model_test(model_exec_list, op_tokens, state_ix, dict_ix, ts_ix, step, debug_step = -1):
val = 0
for i in model_exec_list:
ops = op_tokens[i]
val = 0
if (step == debug_step):
print("Exec'ing step ", step, " model ID", i)
# op_tokens is passed in for ops like matrices that have lookups from other
# locations. All others rely only on ops
# todo: decide if all step_[class() functions should set value in state_ix instead of returning value?
val = 0
if ops[0] == 1:
step_equation(ops, state_ix)
elif ops[0] == 2:
# todo: this should be moved into a single function,
# with the conforming name step_matrix(op_tokens, ops, state_ix, dict_ix)
if (ops[1] == ops[2]):
# this insures a matrix with variables in it is up to date
# only need to do this if the matrix data and matrix config are on same object
# otherwise, the matrix data is an input and has already been evaluated
state_ix[ops[1]] = exec_tbl_values(ops, state_ix, dict_ix)
if (ops[3] > 0):
# this evaluates a single value from a matrix if the matrix is configured to do so.
state_ix[ops[1]] = exec_tbl_eval(op_tokens, ops, state_ix, dict_ix)
elif ops[0] == 3:
step_model_link(ops, state_ix, ts_ix, step)
continue
elif ops[0] == 5:
step_sim_timer(ops, state_ix, dict_ix, ts_ix, step)
elif ops[0] == 9:
continue
elif ops[0] == 13:
step_simple_channel(ops, state_ix, dict_ix, step)
# Op 100 is Basic ACTION in Special Actions
elif ops[0] == 100:
step_special_action(ops, state_ix, dict_ix, step)
step_one(op_tokens, op_tokens[i], state_ix, dict_ix, ts_ix, step, 0)
return

@njit
Expand All @@ -557,16 +557,6 @@ def step_model_pcode(model_exec_list, op_tokens, state_info, state_paths, state_
def post_step_model(model_exec_list, op_tokens, state_ix, dict_ix, ts_ix, step):
return

@njit
def test_model(model_exec_list, op_tokens, state_ix, dict_ix, ts_ix, step):
val = 0
for i in model_exec_list:
print(i)
print(op_tokens[i][0])
print(op_tokens[i])
step_one(op_tokens, op_tokens[i], state_ix, dict_ix, ts_ix, step, 0)
return

def step_object(thisobject, step):
# this calls the step for a given model object and timestep
# this is a workaround since the object method ModelObject.step() fails to find the step_one() function ?
Expand All @@ -581,28 +571,19 @@ def pre_step_test(model_exec_list, op_tokens, state_ix, dict_ix, ts_ix, step):
# op = op_tokens[i]
if ops[0] == 12:
# register type data (like broadcast accumulators)
pre_step_register(ops, state_ix, dict_ix)
continue
pre_step_register(ops, state_ix)
#continue
#elif ops[0] == 1:
# # register type data (like broadcast accumulators)
# continue
return

@njit
def test_perf(model_exec_list, op_tokens, state_ix, dict_ix, ts_ix, step):
# for i in model_exec_list:
# ops = op_tokens[i]
for ops in op_tokens:
#step_one(op_tokens, ops, state_ix, dict_ix, ts_ix, step)
continue
return

@njit
def iterate_perf(model_exec_list, op_tokens, state_ix, dict_ix, ts_ix, steps, debug_step = -1):
checksum = 0.0
for step in range(steps):
pre_step_test(model_exec_list, op_tokens, state_ix, dict_ix, ts_ix, step)
step_model_test(model_exec_list, op_tokens, state_ix, dict_ix, ts_ix, step, debug_step)
#step_model_test(model_exec_list, op_tokens, state_ix, dict_ix, ts_ix, step, debug_step)
#print("Steps completed", step)
return checksum

Expand Down
Loading