diff --git a/src/hsp2/hsp2/HYDR.py b/src/hsp2/hsp2/HYDR.py index 91262a0e..727a6570 100644 --- a/src/hsp2/hsp2/HYDR.py +++ b/src/hsp2/hsp2/HYDR.py @@ -146,8 +146,10 @@ def hydr(io_manager, siminfo, uci, ts, ftables, state): # must split dicts out of state Dict since numba cannot handle mixed-type nested Dicts state_ix, dict_ix, ts_ix = state['state_ix'], state['dict_ix'], state['ts_ix'] state_paths = state['state_paths'] - ep_list = ["DEP","IVOL","O1","O2","O3","OVOL1","OVOL2","OVOL3","PRSUPY","RO","ROVOL","SAREA","TAU","USTAR","VOL","VOLEV"] - model_exec_list = model_domain_dependencies(state, state_info['domain'], ep_list) + ep_list = hydr_state_vars() + # note: calling dependencies with 4th arg = True grabs only "runnable" types, which can save time + # in long simulations, as iterating through non-runnables like Constants consumes time. + model_exec_list = model_domain_dependencies(state, state_info['domain'], ep_list, True) model_exec_list = asarray(model_exec_list, dtype="i8") # format for use in numba op_tokens = state['op_tokens'] ####################################################################################### @@ -698,4 +700,10 @@ def expand_HYDR_masslinks(flags, uci, dat, recs): rec['SVOL'] = dat.SVOL recs.append(rec) return recs - \ No newline at end of file + +def hydr_state_vars(): + return ["DEP","IVOL","O1","O2","O3","OVOL1","OVOL2","OVOL3","PRSUPY","RO","ROVOL","SAREA","TAU","USTAR","VOL","VOLEV"] + +def hydr_load_om(state, io_manager, siminfo): + for i in hydr_state_vars(): + state['model_data'][seg_name][i] = {'object_class':'ModelVariable', 'name':i, 'value':0.0} \ No newline at end of file diff --git a/src/hsp2/hsp2/SPECL.py b/src/hsp2/hsp2/SPECL.py index a4207712..56441f7b 100644 --- a/src/hsp2/hsp2/SPECL.py +++ b/src/hsp2/hsp2/SPECL.py @@ -9,7 +9,7 @@ from numba import njit from pandas import DataFrame, date_range -def specl_load_actions(state, io_manager, siminfo): +def specl_load_om(state, io_manager, siminfo): if 'ACTIONS' in state['specactions']: dc = state['specactions']['ACTIONS'] for ix in dc.index: @@ -32,7 +32,7 @@ def specl_load_actions(state, io_manager, siminfo): return def specl_load_state(state, io_manager, siminfo): - specl_load_actions(state, io_manager, siminfo) + specl_load_om(state, io_manager, siminfo) # others defined below, like: # specl_load_uvnames(state, io_manager, siminfo) # ... diff --git a/src/hsp2/hsp2/main.py b/src/hsp2/hsp2/main.py index 41d24e49..94ab883f 100644 --- a/src/hsp2/hsp2/main.py +++ b/src/hsp2/hsp2/main.py @@ -69,7 +69,7 @@ def main(io_manager:Union[str, IOManager], saveall:bool=False, jupyterlab:bool=T ####################################################################################### # Set up Things in state that will be used in all modular activities like SPECL state = init_state_dicts() - state_siminfo_hsp2(uci_obj, siminfo) + state_siminfo_hsp2(uci_obj, siminfo, io_manager, state) # Add support for dynamic functions to operate on STATE # - Load any dynamic components if present, and store variables on objects state_load_dynamics_hsp2(state, io_manager, siminfo) diff --git a/src/hsp2/hsp2/om.py b/src/hsp2/hsp2/om.py index 7118ada8..97d30e37 100644 --- a/src/hsp2/hsp2/om.py +++ b/src/hsp2/hsp2/om.py @@ -66,7 +66,7 @@ def model_element_paths(mel, state): # Import Code Classes from hsp2.hsp2.om_model_object import ModelObject, ModelVariable, ModelRegister, pre_step_register from hsp2.hsp2.om_sim_timer import SimTimer, step_sim_timer -#from hsp2.hsp2.om_equation import * +from hsp2.hsp2.om_equation import Equation, step_equation from hsp2.hsp2.om_model_linkage import ModelLinkage, step_model_link from hsp2.hsp2.om_special_action import SpecialAction, step_special_action #from hsp2.hsp2.om_data_matrix import * @@ -148,16 +148,22 @@ def state_load_dynamics_om(state, io_manager, siminfo): def state_om_model_root_object(state, siminfo): # Create the base that everything is added to. this object does nothing except host the rest. if 'model_root_object' not in state.keys(): - model_root_object = ModelObject("", False, {}, state) # we give this no name so that it does not interfer with child paths like timer, year, etc (i.e. /STATE/year, ...) + model_root_object = ModelObject(state['model_root_name'], False, {}, state) # we give this no name so that it does not interfer with child paths like timer, year, etc (i.e. /STATE/year, ...) state['model_root_object'] = model_root_object # set up the timer as the first element + model_root_object = state['model_root_object'] if '/STATE/timer' not in state['state_paths'].keys(): timer = SimTimer('timer', model_root_object, siminfo) # add base object for the HSP2 domains and other things already added to state so they can be influenced for (seg_name,seg_path) in state['hsp_segments'].items(): if (seg_path not in state['model_object_cache'].keys()): + # BUG: need to figure out if this is OK, then how do we add attributes to these River Objects + # later when adding from json? + # Can we simply check the model_object_cache during load step? # Create an object shell for this - ModelObject(seg_name, model_root_object) + #river_seg = ModelObject(seg_name, model_root_object, {}, state) + #state['model_object_cache'][river_seg.state_path] = river_seg + pass def state_om_model_run_prep(state, io_manager, siminfo): @@ -166,7 +172,7 @@ def state_om_model_run_prep(state, io_manager, siminfo): # now instantiate and link objects # state['model_data'] has alread been prepopulated from json, .py files, hdf5, etc. model_root_object = state['model_root_object'] - model_loader_recursive(state['model_data'], model_root_object) + model_loader_recursive(state['model_data'], model_root_object, state) # print("Loaded objects & paths: insures all paths are valid, connects models as inputs") # both state['model_object_cache'] and the model_object_cache property of the ModelObject class def # will hold a global repo for this data this may be redundant? They DO point to the same datset? @@ -204,18 +210,21 @@ def state_om_model_run_prep(state, io_manager, siminfo): #print("op_tokens is type", type(op_tokens)) #print("state_ix is type", type(state['state_ix'])) + #print("state_paths final", state['state_paths']) #print("op_tokens final", op_tokens) - + # Stash a list of runnables + state['runnables'] = ModelObject.runnable_op_list(state['op_tokens'], list(state['state_paths'].values())) #print("Operational model status:", state['state_step_om']) if len(model_exec_list) > 0: - pass - #print("op_tokens has", len(op_tokens),"elements, with ", len(model_exec_list),"executable elements") + #pass + print("op_tokens has", len(op_tokens),"elements, with ", len(model_exec_list),"executable elements") + print("Exec list:", model_exec_list) return # model class reader # get model class to guess object type in this lib # the parent object must be known -def model_class_loader(model_name, model_props, container = False): +def model_class_loader(model_name, model_props, container = False, state = None): # todo: check first to see if the model_name is an attribute on the container # Use: if hasattr(container, model_name): # if so, we set the value on the container, if not, we create a new subcomp on the container @@ -223,7 +232,7 @@ def model_class_loader(model_name, model_props, container = False): return False if type(model_props) is str: if is_float_digit(model_props): - model_object = ModelVariable(model_name, container, float(model_props) ) + model_object = ModelVariable(model_name, container, {'value':float(model_props)}, state ) return model_object else: return False @@ -240,14 +249,14 @@ def model_class_loader(model_name, model_props, container = False): # for attributes to pass in. # ".get()" will return NoValue if it does not exist or the value. if object_class == 'Equation': - model_object = Equation(model_props.get('name'), container, model_props ) + model_object = Equation(model_props.get('name'), container, model_props, state) #remove_used_keys(model_props, elif object_class == 'SimpleChannel': - model_object = SimpleChannel(model_props.get('name'), container, model_props ) + model_object = SimpleChannel(model_props.get('name'), container, model_props, state) elif object_class == 'Impoundment': - model_object = Impoundment(model_props.get('name'), container, model_props ) + model_object = Impoundment(model_props.get('name'), container, model_props, state ) elif object_class == 'Constant': - model_object = ModelVariable(model_props.get('name'), container, model_props.get('value') ) + model_object = ModelVariable(model_props.get('name'), container, {'value':model_props.get('value')} ) elif ( object_class.lower() == 'datamatrix'): # add a matrix with the data, then add a matrix accessor for each required variable has_props = DataMatrix.check_properties(model_props) @@ -255,7 +264,7 @@ def model_class_loader(model_name, model_props, container = False): print("Matrix object must have", DataMatrix.required_properties()) return False # create it - model_object = DataMatrix(model_props.get('name'), container, model_props) + model_object = DataMatrix(model_props.get('name'), container, model_props, state) elif object_class == 'ModelBroadcast': # add a matrix with the data, then add a matrix accessor for each required variable has_props = ModelBroadcast.check_properties(model_props) @@ -263,7 +272,7 @@ def model_class_loader(model_name, model_props, container = False): print("ModelBroadcast object must have", ModelBroadcast.required_properties()) return False # create it - model_object = ModelBroadcast(model_props.get('name'), container, model_props) + model_object = ModelBroadcast(model_props.get('name'), container, model_props, state) elif object_class == 'MicroWatershedModel': # add a matrix with the data, then add a matrix accessor for each required variable has_props = MicroWatershedModel.check_properties(model_props) @@ -271,14 +280,14 @@ def model_class_loader(model_name, model_props, container = False): print("MicroWatershedModel object must have", MicroWatershedModel.required_properties()) return False # create it - model_object = DataMatrix(model_props.get('name'), container, model_props) + model_object = DataMatrix(model_props.get('name'), container, model_props, state) elif object_class == 'ModelLinkage': - model_object = ModelLinkage(model_props.get('name'), container, model_props) + model_object = ModelLinkage(model_props.get('name'), container, model_props, state) elif object_class == 'SpecialAction': - model_object = SpecialAction(model_props.get('name'), container, model_props) + model_object = SpecialAction(model_props.get('name'), container, model_props, state) else: #print("Loading", model_props.get('name'), "with object_class", object_class,"as ModelObject") - model_object = ModelObject(model_props.get('name'), container, model_props) + model_object = ModelObject(model_props.get('name'), container, model_props, state) # one way to insure no class attributes get parsed as sub-comps is: # model_object.remove_used_keys() if len(model_object.model_props_parsed) == 0: @@ -318,7 +327,7 @@ def model_class_translate(model_props, object_class): print("Disabling class", model_props['object_class'], 'rendering as ModelObject') model_props['object_class'] = 'ModelObject' -def model_loader_recursive(model_data, container): +def model_loader_recursive(model_data, container, state): k_list = model_data.keys() object_names = dict.fromkeys(k_list , 1) if type(object_names) is not dict: @@ -340,7 +349,7 @@ def model_loader_recursive(model_data, container): if not ('object_class' in model_props): # this is either a class attribute or an un-handleable meta-data # if the class atttribute exists, we should pass it to container to load - #print("Skipping un-typed", object_name) + print("Skipping un-typed", object_name) continue #print("Translating", object_name) # this is a kludge, but can be important @@ -348,14 +357,15 @@ def model_loader_recursive(model_data, container): model_class_translate(model_props, object_class) # now we either have a constant (key and value), or a # fully defined object. Either one should work OK. - #print("Trying to load", object_name) - model_object = model_class_loader(object_name, model_props, container) + #print("Loading", object_name) + model_object = model_class_loader(object_name, model_props, container, state) if model_object == False: print("Could not load", object_name) continue # not handled, but for now we will continue, tho later we should bail? # now for container type objects, go through its properties and handle + #print("loaded object", model_object, "with container", container) if type(model_props) is dict: - model_loader_recursive(model_props, model_object) + model_loader_recursive(model_props, model_object, state) def model_path_loader(model_object_cache): k_list = model_object_cache.keys() @@ -381,7 +391,7 @@ def model_tokenizer_recursive(model_object, model_object_cache, model_exec_list, """ if model_touch_list is None: model_touch_list = [] - #print("Handling", model_object.name, " ", model_object.state_path) + #print("Tokenizing", model_object.name, " ", model_object.state_path) if model_object.ix in model_exec_list: return if model_object.ix in model_touch_list: @@ -474,7 +484,7 @@ def model_order_recursive(model_object, model_object_cache, model_exec_list, mod # now after loading input dependencies, add this to list model_exec_list.append(model_object.ix) -def model_domain_dependencies(state, domain, ep_list): +def model_domain_dependencies(state, domain, ep_list, only_runnable = False): """ Given an hdf5 style path to a domain, and a list of variable endpoints in that domain, Find all model elements that influence the endpoints state @@ -485,11 +495,14 @@ def model_domain_dependencies(state, domain, ep_list): mel = [] mtl = [] # if the given element is NOT in model_object_cache, then nothing is acting on it, so we return empty list - if (domain + '/' + ep) in state['model_object_cache'].keys(): - endpoint = state['model_object_cache'][domain + '/' + ep] - model_order_recursive(endpoint, state['model_object_cache'], mel, mtl) - mello = mello + mel + if (domain + '/' + ep) in state['state_paths']: + if (domain + '/' + ep) in state['model_object_cache'].keys(): + endpoint = state['model_object_cache'][domain + '/' + ep] + model_order_recursive(endpoint, state['model_object_cache'], mel, mtl) + mello = mello + mel + if (only_runnable == True): + mello = ModelObject.runnable_op_list(state['op_tokens'], mello) return mello def save_object_ts(io_manager, siminfo, op_tokens, ts_ix, ts): @@ -533,8 +546,9 @@ def step_one(op_tokens, ops, state_ix, dict_ix, ts_ix, step, debug = 0): # todo: decide if all step_[class() functions should set value in state_ix instead of returning value? if debug > 0: print("DEBUG: Operator ID", ops[1], "is op type", ops[0]) + print("DEBUG: ops: ", ops) if ops[0] == 1: - pass #step_equation(ops, state_ix) + step_equation(ops, state_ix) elif ops[0] == 2: # todo: this should be moved into a single function, # with the conforming name step_matrix(op_tokens, ops, state_ix, dict_ix) diff --git a/src/hsp2/hsp2/om_data_matrix.py b/src/hsp2/hsp2/om_data_matrix.py new file mode 100644 index 00000000..5c2bebcd --- /dev/null +++ b/src/hsp2/hsp2/om_data_matrix.py @@ -0,0 +1,309 @@ +""" +The class DataMatrix is used to translate provide table lookup and interpolation function. + # we need to check these matrix_vals because the OM system allowed + # column headers to be put into the first row as placeholders, and + # if those string variables did NOT resolve to something in state it would simply + # ignore them, which is sloppy. See: "Handle legacy header values" in https://github.com/HARPgroup/HSPsquared/issues/26 + # self.op_matrix = [] # this is the final opcoded matrix for runtime +""" +from numba import njit +import numpy as np +from HSP2.state import * +from HSP2.om import * +from HSP2.om_model_object import * + +class DataMatrix(ModelObject): + def __init__(self, name, container = False, model_props = {}): + super(DataMatrix, self).__init__(name, container, model_props) + if not DataMatrix.check_properties(model_props): + raise Exception("DataMatrix requires: " + ','.join(DataMatrix.required_properties()) + " ... process terminated.") + # check this fitrst, because it may determine how we process the inputted matrix + self.model_props_parsed = model_props # stash these for debugging the loader process + self.auto_set_vars = self.handle_prop(model_props, 'autosetvars') + if type(model_props['matrix']) is str: + # this is a matrix that uses another matrix for its data source + self.add_input('matrix', model_props['matrix'], 2, False) + self.matrix_ix = self.get_dict_state(self.inputs_ix['matrix']) + # we are accessing a remote matrix. + data_matrix = self.get_dict_state(self.matrix_ix) + else: + self.matrix = np.asarray(self.handle_prop(model_props, 'matrix')) # gets passed in at creation + if self.auto_set_vars == 1: + # we need to extract the first row + self.auto_var_names = self.matrix[0] # stash it + self.matrix = np.delete(self.matrix, 0, 0) # now remove it + data_matrix = self.matrix + self.matrix_ix = self.ix + self.mx_type = self.handle_prop(model_props, 'mx_type') + if self.mx_type == None: + # check for valuetype instead - this will go away when we enforce mx_type + print("Guessing mx_type for", self.name, " from valuetype. This is deprecated. ") + self.valuetype = self.handle_prop(model_props, 'valuetype') + if self.valuetype != None: + self.mx_type = int(self.valuetype) + model_props['mx_type'] = self.mx_type + print("mx_type set to ", self.mx_type) + if self.mx_type == None: + #raise Exception("Matrix", self.name, " has neither mx_type nor valuetype. Object creation halted. ") + # note: the drupal hydroImpoundment classes ship storage_stage_area as 'matrix' and overwrite all other stuff. Which is kinda busted, but is apparently needed by the small impoundments in the old OM model. + # this is not a huge issue, since we will not use those classes in the new model but forces us to + # be OK with missing mx_type during testing. + print("Matrix", self.name, " has neither mx_type nor valuetype. This is deprecated. ") + self.mx_type = 0 + if not DataMatrixLookup.check_properties(model_props): + self.mx_type = 0 + self.optype = 2 # 0 - shell object, 1 - equation, 2 - DataMatrix, 3 - input, 4 - broadcastChannel, 5 - ? + # tokenized version of the matrix with variable references and constant references + self.matrix_tokens = [] + # old version on accessor had the below. necessary? + # self.matrix_tokens = np.zeros(self.nrows * self.ncols, dtype=int ) + # set of default values to populate dict_ix + self.init_matrix_attributes(data_matrix) + # get and stash the variable name (could be numeric?) inputs for row and col lookup var (keycols1 and 2) + self.keycol1 = self.handle_prop(model_props, 'keycol1', True) + self.keycol2 = self.handle_prop(model_props, 'keycol2', True) + if not self.mx_type > 0: + # just a matrix of values, return. + return + self.lu_type1 = int(self.handle_prop(model_props, 'lutype1', True )) + if (self.mx_type > 1): + self.lu_type2 = int(self.handle_prop(model_props, 'lutype2', True ) ) + else: + self.key2_ix = 0 + self.lu_type2 = 0 + + def init_matrix_attributes(self, data_matrix): + self.nrows = data_matrix.shape[0] + self.ncols = data_matrix.shape[1] + self.matrix_values = np.zeros(data_matrix.shape) + + @staticmethod + def required_properties(): + req_props = super(DataMatrix, DataMatrix).required_properties() + req_props.extend(['matrix']) + return req_props + + def find_paths(self): + super().find_paths() + self.key1_ix = self.constant_or_path(self.keycol1, self.keycol1, False ) + if (self.mx_type > 1): + self.key2_ix = self.constant_or_path(self.keycol1, self.keycol2, False ) + else: + self.key2_ix = 0 + self.lu_type2 = 0 + self.paths_found = False # override parent setting until we verify everything + self.matrix_tokens = [] # reset this in case it is called multiple times + # Now, we filter for the weird cases that legacy OM allowed + header_suspects = 0 # this will tally up how many oddities we get + for k in range(self.ncols): + # just check if we can find the columns, and count how many we can find. + # if this is a 2-d lookup we set + m_var = self.matrix[0][k] + if is_float_digit(m_var) == False: + # this is a string variable, check if it is a header or actual reference + var_exists = self.find_var_path(self.matrix[0][k]) + if (var_exists == False): + if ( (self.mx_type == 2) and (k == 0) ): + # this is fine, 2-d arrays don't even use the 0,0 value + # so we set it to a constant value of zero + self.matrix[0][k] = 0.0 + else: + header_suspects = header_suspects + 1 + print("Checked the first row for headers, header_suspects = ", header_suspects) + if (header_suspects > 0): + if (header_suspects == self.ncols): + print("Removing the header row") + # we have what looks like a header, discard, but warn that this sucks + self.matrix = np.delete(self.matrix, 0, 0) # now remove it + # reset rows, cols, and value state storage matrix + self.init_matrix_attributes(self.matrix) + for i in range(self.nrows): + for j in range(self.ncols): + el_name = 'rowcol_' + str(i) + "_" + str(j) + el_value = self.matrix[i][j] + # this registers the constants, or values, as inputs + self.matrix_tokens.append(self.constant_or_path(el_name, el_value, False) ) + self.paths_found = True + + def tokenize(self): + # call parent method to set basic ops common to all + super().tokenize() + # - insure we have a entity_ix pointing to state_ix + # - check matrix for string vars and get entity_ix for string variables + # - add numerical constants to the state_ix and get the resulting entity_ix + # - format array of all rows and columns state_ix references + # - store array in dict_ix keyed with entity_ix + # - get entity_ix for lookup key(s) + # - create tokenized array with entity_ix, lookup types, + # renders tokens for high speed execution + # note: first 3 ops are type, ix, and matrix_ix. + # a normal matrix has ix = matrix_ix, but this allows for a matrix object that just accesses another + # matrix object as an input. + self.ops = self.ops + [self.matrix_ix, self.nrows, self.ncols, self.mx_type, self.key1_ix, self.lu_type1, self.key2_ix, self.lu_type2 ] + self.matrix_tokens + + def add_op_tokens(self): + # this puts the tokens into the global simulation queue + # can be customized by subclasses to add multiple lines if needed. + super().add_op_tokens() + self.dict_ix[self.ix] = DataFrame(self.matrix_values).to_numpy() + + +class DataMatrixLookup(DataMatrix): + # this is just here for info purposes when creating a matrix that has its own accessor + @staticmethod + def required_properties(): + req_props = super(DataMatrixLookup, DataMatrixLookup).required_properties() + req_props.extend(['mx_type', 'keycol1', 'lutype1', 'keycol2', 'lutype2']) + return req_props + + +# njit functions for runtime + +@njit +def om_table_lookup(data_table, mx_type, ncols, keyval1, lu_type1, keyval2, lu_type2): + # mx_type = 0: no lookup, matrix, 1: 1d (default to col 2 as value), 2: 2d (both ways), 3: 1.5d (keyval2 = column index) + # - 1: 1d, look up row based on column 0, return value from column 1 + # - 2: 2d, look up based on row and column + # - 3: 1.5d, look up/interp row based on column 0, return value from column + # lu_type: 0 - exact match; 1 - interpolate values; 2 - stair step + if mx_type == 1: + valcol = int(1) + luval = table_lookup(data_table, keyval1, lu_type1, valcol) + return luval + if ( (mx_type == 3) or (lu_type2 == 0) ): # 1.5d (a 2-d with exact match column functions just like a 1.5d ) + valcol = int(keyval2) + luval = table_lookup(data_table, keyval1, lu_type1, valcol) + return luval + # must be a 2-d lookup + # if lu_type1 is stair step or exact match, we call the whole row + if (lu_type1 == 2): + row_vals = table_row_lookup(data_table, keyval1, lu_type1) + elif (lu_type1 == 0): + row_vals = table_row_lookup(data_table, keyval1, lu_type1) + else: + # create an interpolated version of the table + row_vals = row_interp(data_table, ncols, keyval1, lu_type1) + # have to use row zero as the keys for row_vals now cause we will interpolate on those + row_keys = data_table[0] + # 1: get value for all columns based on the row interp/match type + luval = np.interp(keyval2, row_keys, row_vals) + # show value at tis point + return luval + +@njit +def row_interp(data_table, ncols, keyval, lu_type): + row_vals = data_table[0].copy() # initialize to the first row + #print("interping for keyval", keyval, "lutype:", lu_type, "ncols", ncols, "in table", data_table) + for i in range(ncols): + row_vals[i] = table_lookup(data_table, keyval, lu_type, i) + return row_vals + +@njit +def table_row_lookup(data_table, keyval, lu_type): + #print("looking for keyval", keyval, "lutype:", lu_type, "in table", data_table) + if (lu_type == 2): + # stair step retrieve whole row + idx = (data_table[:, 0][0:][(data_table[:, 0][0:]- keyval) <= 0]).argmax() + elif (lu_type == 0): + idx = int(keyval) + #print("looking for row", idx, "in table", data_table) + row_vals = data_table[:][0:][idx] + return row_vals + +@njit +def table_lookup(data_table, keyval, lu_type, valcol): + if lu_type == 2: #stair-step + idx = (data_table[:, 0][0:][(data_table[:, 0][0:]- keyval) <= 0]).argmax() + luval = data_table[:, valcol][0:][idx] + elif lu_type == 1: # interpolate + luval = np.interp(keyval,data_table[:, 0][0:], data_table[:, valcol][0:]) + elif lu_type == 0: # exact match + lurow = table_row_lookup(data_table, keyval, lu_type) + luval = lurow[valcol] + + # show value at this point + return luval + + +""" +exec_tbl_values() updates the values in the dict_ix state for a data matrix as it may contain named variables +- todo: determine if time savings can be achieved by storing state references for ONLY variable names in matrices + Since we currently tokenize every item in the dict_ix and copy it from state_ix every timestep + regardless of whether it is an actual named variable reference or a static numeric value. + See if speed increases by storing a series of row,col,ix ops for ONLY those entries that are + actual variable references, instead of the current method of storing a ix pointer for every single + matrix cell. +""" +@njit(cache=True) +def exec_tbl_values(op, state_ix, dict_ix): + # this f + ix = op[1] + data_matrix = dict_ix[ix] + nrows = op[3] + ncols = op[4] + k = 0 + for i in range(nrows): + for j in range(ncols): + # the indices pointing to data columns begin at item 10 in the array + data_matrix[i][j] = state_ix[op[10 + k]] + k = k + 1 + dict_ix[ix] = data_matrix + return 0.0 + +@njit(cache=True) +def exec_tbl_eval(op_tokens, op, state_ix, dict_ix): + # Note: these indices must be adjusted to reflect the number of common op tokens + # check this first, if it is type = 0, then it is just a matrix, and only needs to be loaded, not evaluated + #print("Called exec_tbl_eval, with ops=", op) + ix = op[1] + dix = op[2] + tbl_op = op_tokens[dix] + #print("Target Table ops:", tbl_op) + nrows = tbl_op[3] + ncols = tbl_op[4] + # load lookup infor for this accessor + mx_type = op[5] # not used yet, what type of table? in past this was always 1-d or 2-d + key1_ix = op[6] + #print("ix, dict_ix, mx_type, key1_ix", ix, dix, mx_type, key1_ix) + lu_type1 = op[7] + key2_ix = op[8] + lu_type2 = op[9] + data_table = dict_ix[dix] + keyval1 = state_ix[key1_ix] + if key2_ix != 0: + keyval2 = state_ix[key2_ix] + else: + keyval2 = 0 + #print("keyval1, lu_type1, keyval2, lu_type2, ncols", keyval1, lu_type1, keyval2, lu_type2, ncols) + result = om_table_lookup(data_table, mx_type, ncols, keyval1, lu_type1, keyval2, lu_type2) + return result + +def debug_tbl_eval(op_tokens, op, state_ix, dict_ix): + # Note: these indices must be adjusted to reflect the number of common op tokens + # check this first, if it is type = 0, then it is just a matrix, and only needs to be loaded, not evaluated + ix = op[1] + dix = op[2] + tbl_op = op_tokens[dix] + print("Target Table ops:", tbl_op) + nrows = tbl_op[3] + ncols = tbl_op[4] + # load lookup infor for this accessor + mx_type = op[5] # not used yet, what type of table? in past this was always 1-d or 2-d + key1_ix = op[6] + print("ix, dict_ix, mx_type, key1_ix", ix, dix, mx_type, key1_ix) + lu_type1 = op[7] + key2_ix = op[8] + lu_type2 = op[9] + print("lu_type1, key2_ix, lu_type2", lu_type1, key2_ix, lu_type2) + data_table = dict_ix[dix] + print("data_table", data_table) + print("key1_ix, key2_ix", key1_ix, key2_ix) + keyval1 = state_ix[key1_ix] + if key2_ix != 0: + keyval2 = state_ix[key2_ix] + else: + keyval2 = 0 + print("key1_ix, key2_ix, keyval1, keyval2", key1_ix, key2_ix, keyval1, keyval2) + print("keyval1, lu_type1, keyval2, lu_type2, ncols", keyval1, lu_type1, keyval2, lu_type2, ncols) + result = om_table_lookup(data_table, mx_type, ncols, keyval1, lu_type1, keyval2, lu_type2) + return result diff --git a/src/hsp2/hsp2/om_equation.py b/src/hsp2/hsp2/om_equation.py new file mode 100644 index 00000000..a738ac53 --- /dev/null +++ b/src/hsp2/hsp2/om_equation.py @@ -0,0 +1,456 @@ +""" +The class Equation is used to translate an equation in text string form into a tokenized model op code +The equation will look for variable names inside the equation string (i.e. not numeric, not math operator) +and will then search the local object inputs and the containing object inputs (if object has parent) for +the variable name in question. Ultimately, everyting becomes either an operator or a reference to a variable +in the state_ix Dict for runtime execution. +""" +from hsp2.hsp2.om import get_exec_order, is_float_digit +from hsp2.hsp2.state import set_state, get_state_ix +from hsp2.hsp2.om_model_object import ModelObject, ModelConstant +from numba import njit, types +from numpy import array, append + +# from hsp2.hsp2.state import set_state, get_state_ix +# from numba.typed import Dict +# from hsp2.hsp2.om import get_exec_order, is_float_digit +# from pandas import Series, DataFrame, concat, HDFStore, set_option, to_numeric +# from pandas import Timestamp, Timedelta, read_hdf, read_csv +# from numpy import pad, asarray, zeros, int32 +# from numba import njit, types + + +class Equation(ModelObject): + # the following are supplied by the parent class: name, log_path, attribute_path, state_path, inputs + + def __init__(self, name, container = False, model_props = {}, state = None): + super(Equation, self).__init__(name, container, model_props) + self.equation = self.handle_prop(model_props, 'equation') + self.ps = False + self.ps_names = [] # Intermediate with constants turned into variable references in state_paths + self.var_ops = [] # keep these separate since the equation functions should not have to handle overhead + self.optype = 1 # 0 - shell object, 1 - equation, 2 - datamatrix, 3 - input, 4 - broadcastChannel, 5 - ? + self.non_neg = self.handle_prop(model_props, 'nonnegative', False, 0) + min_value = self.handle_prop(model_props, 'minvalue', False, 0.0) + self.min_value_ix = ModelConstant('min_value', self, {'name':'min_value', 'value':min_value}).ix + self.deconstruct_eqn() + + def handle_prop(self, model_props, prop_name, strict = False, default_value = None ): + prop_val = super().handle_prop(model_props, prop_name, strict, default_value ) + if (prop_name == 'equation'): + if type(prop_val) is str: + return prop_val + elif prop_val == None: + # try for equation stored as normal propcode + prop_val = str(self.handle_prop(model_props, 'value', True)) + return prop_val + + def deconstruct_eqn(self): + exprStack = [] + exprStack[:] = [] + self.ps = deconstruct_equation(self.equation) + # if ps is empty, we may have gotten a constant, so we will check it, + # and create a set of ps [constant] + 0.0 and return + # if this does not yield ps > 0 we will throw an error + if (len(self.ps) == 0): + tps = deconstruct_equation(self.equation + " + 0.0") + if len(tps) == 1: + # seemed to have succeeded, try to use this now + self.ps = tps + self.equation = self.equation + " + 0.0" + else: + raise Exception("Error: Cannot parse equation: " + self.equation + " on object " + self.name + " Halting.") + if (len(self.ps) == 1): + # this is a single set of operands, but we need to check for a solo negative number + # which will also get returned as one op set, with the first slot a number, then 0, 0 + # the first token *should* be an operator, followed by 2 operands + if is_float_digit(self.ps[0][0]): + # if it is longer than 1 character + if (self.ps[0][1] == 0) and (self.ps[0][2] == 0): + tps = deconstruct_equation(" 0.0 " + self.equation) + if len(tps) == 1: + # seemed to have succeeded, try to use this now + self.ps = tps + self.equation = " 0.0 " + self.equation + else: + raise Exception("Error: Cannot parse equation: " + self.equation + " on object " + self.name + " Halting.") + + #print(exprStack) + + def find_paths(self): + super().find_paths() + self.paths_found = False # override parent setting until we verify everything + #return + # we now want to check to see that all variables can be found (i.e. path exists) + # and we want to create variables for any constants that we have here + # do not handle mathematical operators + self.ps_names = [] + for i in range(len(self.ps)): + name_op = ["", "", ""] + name_op[0] = self.ps[i][0] + for j in range(1,3): + # range 1,3 counts thru 1 and 2 (not 3, cause python is so array centric it knows you know) + op_value = self.ps[i][j] + if op_value == None: + # don't need to check these as they are just referring to the stack. + continue + if is_float_digit(op_value): + op_name = "_op_" + str(i) + "_" + str(j) + else: + op_name = op_value + #print("Checking op set", i, "op", j, ":", op_name) + # constant_or_path() looks at name and path, since we only have a var name, we must assume + # the path is either a sibling or child variable or explicitly added other input, so this should + # resolve correctly, but we have not tried it + var_ix = self.constant_or_path(op_name, op_value, False) + # we now return, trusting that the state_path for each operand + # is stored in self.inputs, with the varix saved in self.inputs_ix + name_op[j] = op_name + self.ps_names.append(name_op) + self.paths_found = True + return + + def tokenize_ops(self): + self.deconstruct_eqn() + self.var_ops = tokenize_ops(self.ps) + + def tokenize_vars(self): + # now stash the string vars as new state vars + for j in range(2,len(self.var_ops)): + if isinstance(self.var_ops[j], int): + continue # already has been tokenized, so skip ahead + elif is_float_digit(self.var_ops[j]): + # must add this to the state array as a constant + constant_path = self.state_path + '/_ops/_op' + str(j) + s_ix = set_state(self.state['state_ix'], self.state['state_paths'], constant_path, float(self.var_ops[j]) ) + self.var_ops[j] = s_ix + else: + # this is a variable, must find it's data path index + var_path = self.find_var_path(self.var_ops[j]) + s_ix = get_state_ix(self.state['state_ix'], self.state['state_paths'], var_path) + if s_ix == False: + print("Error: unknown variable ", self.var_ops[j], "path", var_path, "index", s_ix) + print("searched: ", self.state['state_paths'], self.state['state_ix']) + return + else: + self.var_ops[j] = s_ix + + def tokenize(self): + # call parent to render standard tokens + super().tokenize() + # replaces operators with integer code, + # and turns the array of 3 value opcode arrays into a single sequential array + self.tokenize_ops() + # finds the ix value for each operand + self.tokenize_vars() + # renders tokens for high speed execution + self.ops = self.ops + [self.non_neg, self.min_value_ix] + self.var_ops + + +from pyparsing import ( + Literal, + Word, + Group, + Forward, + alphas, + alphanums, + Regex, + ParseException, + CaselessKeyword, + Suppress, + delimitedList, +) +import math +import operator + +exprStack = [] + + +def push_first(toks): + exprStack.append(toks[0]) + + +def push_unary_minus(toks): + for t in toks: + if t == "-": + exprStack.append("unary -") + else: + break + +def deconstruct_equation(eqn): + """ + We should get really good at using docstrings... + + we parse the equation during readuci/pre-processing and break it into njit'able pieces + this forms the basis of our object parser code to run at import_uci step + """ + results = BNF().parseString(eqn, parseAll=True) + ps = [] + ep = exprStack + pre_evaluate_stack(ep[:], ps) + return ps + +def get_operator_token(op): + # find the numerical token for an operator + # returns integer value, or 0 if this is not a recorgnized mathematical operator + if op == '-': opn = 1 + elif op == '+': opn = 2 + elif op == '*': opn = 3 + elif op == '/': opn = 4 + elif op == '^': opn = 5 + else: opn = False + return opn + +def tokenize_ops(ps): + '''Translates a set of string operands into integer keyed tokens for faster execution.''' + tops = [len(ps)] # first token is number of ops + for i in range(len(ps)): + op = get_operator_token(ps[i][0]) + # a negative op code indicates null + # this should cause no confusion since all op codes are references and none are actual values + if ps[i][1] == None: o1 = -1 + else: o1 = ps[i][1] + if ps[i][2] == None: o2 = -1 + else: o2 = ps[i][2] + tops.append(op) + tops.append(o1) + tops.append(o2) + return tops + +bnf = None + + +def BNF(): + """ + expop :: '^' + multop :: '*' | '/' + addop :: '+' | '-' + integer :: ['+' | '-'] '0'..'9'+ + atom :: PI | E | real | fn '(' expr ')' | '(' expr ')' + factor :: atom [ expop factor ]* + term :: factor [ multop factor ]* + expr :: term [ addop term ]* + """ + global bnf + if not bnf: + # use CaselessKeyword for e and pi, to avoid accidentally matching + # functions that start with 'e' or 'pi' (such as 'exp'); Keyword + # and CaselessKeyword only match whole words + e = CaselessKeyword("E") + pi = CaselessKeyword("PI") + # fnumber = Combine(Word("+-"+nums, nums) + + # Optional("." + Optional(Word(nums))) + + # Optional(e + Word("+-"+nums, nums))) + # or use provided pyparsing_common.number, but convert back to str: + # fnumber = ppc.number().addParseAction(lambda t: str(t[0])) + fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?") + ident = Word(alphas, alphanums + "_$") + + plus, minus, mult, div = map(Literal, "+-*/") + lpar, rpar = map(Suppress, "()") + addop = plus | minus + multop = mult | div + expop = Literal("^") + + expr = Forward() + expr_list = delimitedList(Group(expr)) + # add parse action that replaces the function identifier with a (name, number of args) tuple + def insert_fn_argcount_tuple(t): + fn = t.pop(0) + num_args = len(t[0]) + t.insert(0, (fn, num_args)) + + fn_call = (ident + lpar - Group(expr_list) + rpar).setParseAction( + insert_fn_argcount_tuple + ) + atom = ( + addop[...] + + ( + (fn_call | pi | e | fnumber | ident).setParseAction(push_first) + | Group(lpar + expr + rpar) + ) + ).setParseAction(push_unary_minus) + + # by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left + # exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2. + factor = Forward() + factor <<= atom + (expop + factor).setParseAction(push_first)[...] + term = factor + (multop + factor).setParseAction(push_first)[...] + expr <<= term + (addop + term).setParseAction(push_first)[...] + bnf = expr + return bnf + + +# map operator symbols to corresponding arithmetic operations +epsilon = 1e-12 +opn = { + "+": operator.add, + "-": operator.sub, + "*": operator.mul, + "/": operator.truediv, + "^": operator.pow, +} + +fn = { + "sin": math.sin, + "cos": math.cos, + "tan": math.tan, + "exp": math.exp, + "abs": abs, + "trunc": int, + "round": round, + "sgn": lambda a: -1 if a < -epsilon else 1 if a > epsilon else 0, + # functionsl with multiple arguments + "multiply": lambda a, b: a * b, + "hypot": math.hypot, + # functions with a variable number of arguments + "all": lambda *a: all(a), +} + +fns = { + "sin": "math.sin", + "cos": "math.cos", + "tan": "math.tan", + "exp": "math.exp", + "abs": "abs", + "trunc": "int", + "round": "round", +} + + +def evaluate_stack(s): + op, num_args = s.pop(), 0 + if isinstance(op, tuple): + op, num_args = op + if op == "unary -": + return -evaluate_stack(s) + if op in "+-*/^": + # note: operands are pushed onto the stack in reverse order + op2 = evaluate_stack(s) + op1 = evaluate_stack(s) + return opn[op](op1, op2) + elif op == "PI": + return math.pi # 3.1415926535 + elif op == "E": + return math.e # 2.718281828 + elif op in fn: + # note: args are pushed onto the stack in reverse order + args = reversed([evaluate_stack(s) for _ in range(num_args)]) + return fn[op](*args) + elif op[0].isalpha(): + raise Exception("invalid identifier '%s'" % op) + else: + # try to evaluate as int first, then as float if int fails + try: + return int(op) + except ValueError: + return float(op) + +def pre_evaluate_stack(s, ps): + op, num_args = s.pop(), 0 + if isinstance(op, tuple): + op, num_args = op + if op == "unary -": + ps.append([-evaluate_stack(s), 0, 0]) + return + if op in "+-*/^": + # note: operands are pushed onto the stack in reverse order + op2 = pre_evaluate_stack(s, ps) + op1 = pre_evaluate_stack(s, ps) + ps.append([ op, op1, op2]) + return + elif op == "PI": + ps.append([math.pi, 0, 0]) # 3.1415926535 + return + elif op == "E": + ps.append([math.e, 0, 0]) # 2.718281828 + return + elif op in fns: + # note: args are pushed onto the stack in reverse order + #print("s:", s, "op", op) + args = [] + for x in range(num_args): + args.append(pre_evaluate_stack(s, ps)) + args.reverse() + args.insert(fns[op], 0) + ps.append(args) + return + elif op[0].isalpha(): + return op + else: + # return the operand now + return op + + +@njit(cache=True) +def evaluate_eq_ops(op, val1, val2): + if op == 1: + #print(val1, " - ", val2) + result = val1 - val2 + return result + if op == 2: + #print(val1, " + ", val2) + result = val1 + val2 + return result + if op == 3: + #print(val1, " * ", val2) + result = val1 * val2 + return result + if op == 4: + #print(val1, " / ", val2) + result = val1 / val2 + return result + if op == 5: + #print(val1, " ^ ", val2) + result = pow(val1, val2) + return result + return 0 + + +@njit +def step_equation(op_token, state_ix): + result = 0 + s = array([0.0]) + s_ix = -1 # pointer to the top of the stack + s_len = 1 + # handle special equation settings like "non-negative", etc. + non_neg = op_token[2] + min_ix = op_token[3] + num_ops = op_token[4] # this index is equal to the number of ops common to all classes + 1. See om_model_object for base ops and adjust + op_loc = 5 # where do the operators and operands start in op_token + #print(num_ops, " operations") + # is the below faster since it avoids a brief loop and a couple ifs for 2 op equations? + if num_ops == 1: + result = evaluate_eq_ops(op_token[op_loc], state_ix[op_token[op_loc + 1]], state_ix[op_token[op_loc + 2]]) + else: + for i in range(num_ops): + # the number of ops common to all classes + 1 (the counter for math operators) is offset for this + # currently 3 (2 common ops (0,1), plus 1 to indicate number of equation operand sets(2), so this is ix 3) + op = op_token[op_loc + 3*i] + t1 = op_token[op_loc + 3*i + 1] + t2 = op_token[op_loc + 3*i + 2] + # if val1 or val2 are < 0 this means they are to come from the stack + # if token is negative, means we need to use a stack value + #print("s", s) + if t1 < 0: + val1 = s[s_ix] + s_ix -= 1 + else: + val1 = state_ix[t1] + if t2 < 0: + val2 = s[s_ix] + s_ix -= 1 + else: + val2 = state_ix[t2] + #print(s_ix, op, val1, val2) + result = evaluate_eq_ops(op, val1, val2) + s_ix += 1 + if s_ix >= s_len: + s = append(s, 0) + s_len += 1 + s[s_ix] = result + result = s[s_ix] + if (non_neg == 1) and (result < 0): + result = state_ix[min_ix] + state_ix[op_token[1]] = result + return True \ No newline at end of file diff --git a/src/hsp2/hsp2/om_flowby.py b/src/hsp2/hsp2/om_flowby.py new file mode 100644 index 00000000..a9f76ebb --- /dev/null +++ b/src/hsp2/hsp2/om_flowby.py @@ -0,0 +1,93 @@ +""" +The class SpecialAction is used to support original HSPF ACTIONS. +""" +from HSP2.state import * +from HSP2.om import * +from HSP2.om_model_object import ModelObject +from numba import njit +class FlowBy(ModelObject): + def __init__(self, name, container = False, model_props = None, state = None): + super(FlowBy, self).__init__(name, container, model_props, state = None) + self.optype = 15 # Special Actions start indexing at 100 + + def parse_model_props(self, model_props, strict=False): + print("FlowBy.parse_model_props() called") + super().parse_model_props(model_props, strict) + # Handle props for a basic flowby + # - cfb_condition: eq / lt / gt / lte / gte + # - cfb_var: string (reference input) + # - enable_cfb: 0 / 1 + # - flowby_eqn: string, will be converted into an equation object called "flowby" that is an input + print("Creating ACTION with props", model_props) + self.enable_cfb = int(self.handle_prop(model_props, 'enable_cfb', False, 0)) + self.cfb_condition = int(self.handle_prop(model_props, 'cfb_condition')) + self.cfb_var = self.handle_prop(model_props, 'cfb_var') + self.flowby_eqn = self.handle_prop(model_props, 'flowby_eqn') # + # @todo: handle lookup table types, should be just a child table, anduse the value of the table instead of an equation, OR, use the conditional + # will have to see if there are DataMatrix inputs that are renamed in this set and convert appropriately + # now add the state value that we are operating on (the target) as an input, so that this gets executed AFTER this is set initially + + def handle_prop(self, model_props, prop_name, strict = False, default_value = None ): + # Insure all values are legal ex: no DIV by Zero + prop_val = super().handle_prop(model_props, prop_name, strict, default_value ) + if (prop_name == 'cfb_var'): + if (self.enable_cfb > 0 ): + if self.cfb_var == None: + raise Exception("Error: Flowby with CFB enabled must have CFB: " + self.name + " Halting.") + else: + self.add_input('cfb', self.cfb_var, 1, True) + if (prop_name == 'equation'): + eq = Equation('flowby', self, {'equation':self.flowby_eqn}) + if (prop_name == 'cfb_condition'): + ids = {'lt': 0, 'gt':1} + if prop_val in lds.keys(): + self.cfb_con_int = lds[prop_val] + else: + raise Exception("Error: Non-valid flowby CFB given:" + prop_val + " for " + self.name + " valid values are 'lt' and 'gt'. Halting.") + return prop_val + + def tokenize(self): + # call parent method to set basic ops common to all + super().tokenize() # sets self.ops = op_type, op_ix + self.ops = self.ops + [self.inputs_ix['flowby'], self.enable_cfb, self.inputs_ix['cfb'], self.cfb_con_int] + # @tbd: check if time ops have been set and tokenize accordingly + print("Flowby", self.name, "tokens", self.ops) + + def add_op_tokens(self): + # this puts the tokens into the global simulation queue + # can be customized by subclasses to add multiple lines if needed. + super().add_op_tokens() + + +# njit functions for runtime + +@njit(cache=True) +def step_flowby(op, state_ix, dict_ix, step): + ix = op[1] # ID of this op + # these indices must be adjusted to reflect the number of common op tokens + # SpecialAction has: + # - type of condition (+=, -=, ...) + # - operand 1 (left side) + # - operand 2 (right side) + # @tbd: check if time ops have been set and enable/disable accordingly + # - 2 ops will be added for each time matching switch, the state_ix of the time element (year, month, ...) and the state_ix of the constant to match + # - matching should be as simple as if (state_ix[tix1] <> state_ix[vtix1]): return state_ix[ix1] (don't modify the value) + # - alternative: save the integer timestamp or timestep of the start, and if step/stamp > value, enable + # @tbd: add number of repeats, and save the value of repeats in a register + flowby = state_ix[op[3]] # self.ops + [self.enable_cfb, self.inputs_ix['cfb'], self.cfb_con_int] + enable_cfb = state_ix[op[3]] # + cfb_val = state_ix[op[4]] + cfb_op = state_ix[op[5]] + # set preliminary value as computed flowby + result = flowby + if (enable_cfb == 1): + if cfb_op == 0: + if (cfb_val < flowby): + result = cfb_val + elif cfb_op == 1: + if (cfb_val > flowby): + result = cfb_val + # set value in state + state_ix[ix] = result + return True + diff --git a/src/hsp2/hsp2/om_model_broadcast.py b/src/hsp2/hsp2/om_model_broadcast.py new file mode 100644 index 00000000..321b2ed7 --- /dev/null +++ b/src/hsp2/hsp2/om_model_broadcast.py @@ -0,0 +1,180 @@ +""" +The class Broadcast is used to send and receive data to shared accumulator "channel" and "register". +See also Branch: an actual flow control structure that looks similar to Conditional, but changes execution +""" +from HSP2.state import * +from HSP2.om import * +from HSP2.om_model_object import * +from HSP2.om_model_linkage import ModelLinkage +from numba import njit +import warnings + +class ModelBroadcast(ModelObject): + def __init__(self, name, container = False, model_props = None, state = None): + super(ModelBroadcast, self).__init__(name, container, model_props, state) + self.model_props_parsed = model_props + # broadcast_params = [ [local_name1, remote_name1], [local_name2, remote_name2], ...] + # broadcast_channel = state_path/[broadcast_channel] + # broadcast_hub = self, parent, /state/path/to/element/ + # we call handle+=_prop() because this will be OK with any format of caller data + self.linkages = {} # store of objects created by this + self.optype = 4 # 0 - shell object, 1 - equation, 2 - DataMatrix, 3 - input, 4 - broadcastChannel, 5 - ? + self.bc_type_id = 2 # assume read -- is this redundant? is it really the input type ix? + #self.parse_model_props(model_props) + self.setup_broadcast(self.broadcast_type, self.broadcast_params, self.broadcast_channel, self.broadcast_hub) + + + def parse_model_props(self, model_props, strict = False ): + # handle props array + super().parse_model_props(model_props, strict) + self.broadcast_type = self.handle_prop(model_props, 'broadcast_type') + self.broadcast_hub = self.handle_prop(model_props, 'broadcast_hub') + self.broadcast_channel = self.handle_prop(model_props, 'broadcast_channel') + self.broadcast_params = self.handle_prop(model_props, 'broadcast_params') + if self.broadcast_type == None: + self.broadcast_type = 'read' + if self.broadcast_channel == None: + self.broadcast_channel = False + if self.broadcast_hub == None: + self.broadcast_hub = 'self' + if self.broadcast_params == None: + self.broadcast_params = [] + + def handle_prop(self, model_props, prop_name, strict = False, default_value = None ): + # parent method handles most cases, but subclass handles special situations. + if ( prop_name == 'broadcast_params'): + prop_val = model_props['broadcast_params'] # check that this is OK + else: + prop_val = super().handle_prop(model_props, prop_name, strict, default_value) + return prop_val + + def setup_broadcast(self, broadcast_type, broadcast_params, broadcast_channel, broadcast_hub): + if (broadcast_hub == 'parent') and (self.container.container == False): + warnings.warn("Broadcast named " + self.name + " is parent object " + self.container.name + " with path " + self.container.state_path + " does not have a grand-parent container. Broadcast to hub 'parent' guessing as a path-type global. ") + broadcast_hub = '/STATE/global' + warnings.warn("Proceeding with broadcast_type, broadcast_params, broadcast_channel, broadcast_hub = " + str(broadcast_type) + "," + str(broadcast_params) + "," + str(broadcast_channel) + "," + str(broadcast_hub)) + if ( (broadcast_hub == 'self') or (broadcast_hub == 'child') ): + hub_path = self.container.state_path # the parent of this object is the "self" in question + hub_container = self.container + elif broadcast_hub == 'parent': + hub_path = self.container.container.state_path + hub_container = self.container.container + else: + # we assume this is a valid path. but we verify and fail if it doesn't work during tokenization + # this is not really yet operational since it would be a global broadcast of sorts + print("Broadcast ", self.name, " hub Path not parent, self or child. Trying to find another hub_path = ", broadcast_hub) + hub_path = broadcast_hub + hub_exists = self.find_var_path(hub_path) + if hub_exists == False: + hub_container = False + else: + hub_container = self.state['model_object_cache'][hub_path] + # add the channel to the hub path + channel_path = hub_path + "/" + broadcast_channel + channel = self.insure_channel(broadcast_channel, hub_container) + # now iterate through pairs of source/destination broadcast lines + i = 0 + for b_pair in broadcast_params: + # create an object for each element in the array + if (broadcast_type == 'read'): + # a broadcast channel (object has been created above) + # + a register to hold the data (if not yet created) + # + an input on the parent to read the data + src_path = hub_path + "/" + b_pair[1] + self.bc_type_id = 2 # pull type + register_varname = b_pair[0] + # create a link object of type 2, property reader to local state + #print("Adding broadcast read as input from ", channel_path, " as local var named ",register_varname) + # create a register if it does not exist already + var_register = self.insure_register(register_varname, 0.0, channel) + # add input to parent container for this variable from the hub path + self.container.add_input(register_varname, var_register.state_path, 1, True) + # this registers a variable on the parent so the channel is not required to access it + # this is redundant for the purpose of calculation, all model components will know how to find it by tracing inputs + # but this makes it easier for tracking + puller = ModelLinkage(register_varname, self.container, {'right_path':var_register.state_path, 'link_type':2} ) + added = True + else: + # a broadcast hub (if not yet created) + # + a register to hold the data (if not yet created) + # + an input on the broadcast hub to read the data (or a push on this object?) + dest_path = hub_path + "/" + b_pair[1] + self.bc_type_id = 4 # push accumulator type + local_varname = b_pair[0] # this is where we take the data from + register_varname = b_pair[1] # this is the name that will be stored on the register + #print("Adding send from local var ", local_varname, " to ",channel.name) + # create a register as a placeholder for the data at the hub path + # in case there are no readers + var_register = self.insure_register(register_varname, 0.0, channel) + dest_path = var_register.state_path + src_path = self.find_var_path(local_varname) + # this linkage pushes from local value to the remote path + pusher = ModelLinkage(register_varname, self, {'right_path':src_path, 'link_type':self.bc_type_id, 'left_path':dest_path} ) + # try adding the linkage an input, just to see if it influences the ordering + #print("Adding broadcast source ", local_varname, " as input to register ",var_register.name) + # we do an object connection here, as this is a foolproof way to + # add already created objects as inputs + var_register.add_object_input(register_varname + str(pusher.ix), pusher, 1) + # this linkage creates a pull on the remote path, not yet ready since + # there is no bc_type_id that corresponds to an accumulator pull + #puller = ModelLinkage(register_varname, var_register, src_path, self.bc_type_id) + + def insure_channel(self, broadcast_channel, hub_container): + if hub_container == False: + # this is a global, so no container + hub_name = '/STATE/global' + channel_path = False + else: + #print("Looking for channel ", broadcast_channel, " on ", hub_container.name) + # must create absolute path, otherwise, it will seek upstream and get the parent + # we send with local_only = True so it won't go upstream + channel_path = hub_container.find_var_path(broadcast_channel, True) + hub_name = hub_container.name + if channel_path == False: + #print(self.state_path, "is Creating broadcast hub ", broadcast_channel, " on ", hub_name) + hub_object = ModelObject(broadcast_channel, hub_container) + else: + hub_object = self.state['model_object_cache'][channel_path] + return hub_object + + def tokenize(self): + # call parent method to set basic ops common to all + super().tokenize() + # because we added each type as a ModelLinkage, this may be superfluous? + # this ModelLinkage will be handled on it's own. + # are there exec hierarchy challenges because of skipping this? + # exec hierarchy function on object.inputs[] alone. Since broadcasts + # are not treated as inputs, or are they? Do ModelLinkages create inputs? + # - should read inputs create linkages, but send/push linkages not? + # - ex: a facility controls a reservoir release with a push linkage + # the reservoir *should* execute after the facility in this case + # but perhaps that just means we *shouldn't* use a push, instead + # we should have the reservoir pull the info? + # - however, since "parent" pushes will automatically have hierarchy + # preserved, since the child object already is an input to the parent + # and therefore will execute before the parent + # - but, we must insure that ModelLinkages get executed when their container + # is executed (which should come de facto as they are inputs to their container) + #if (self.broadcast_type == 'send'): + # for i in self.linkages.keys(): + # self.ops = self.ops + [self.left_ix, cop_codes[self.cop], self.right_ix] + #else: + # this is a read, so we simply rewrite as a model linkage + # not yet complete or functioning + # self.linkage.tokenize() + + def add_op_tokens(self): + # this puts the tokens into the global simulation queue + # can be customized by subclasses to add multiple lines if needed. + super().add_op_tokens() + + + +# njit functions for runtime + +@njit(cache=True) +def pre_step_broadcast(op, state_ix, dict_ix): + ix = op[1] + dix = op[2] + # broadcasts do not need to act, as they are now handled as MdelLinkage + # with type = accumulate diff --git a/src/hsp2/hsp2/om_model_linkage.py b/src/hsp2/hsp2/om_model_linkage.py index b584eb5e..53b1dafa 100644 --- a/src/hsp2/hsp2/om_model_linkage.py +++ b/src/hsp2/hsp2/om_model_linkage.py @@ -8,10 +8,10 @@ from hsp2.hsp2.om_model_object import ModelObject from numba import njit class ModelLinkage(ModelObject): - def __init__(self, name, container = False, model_props = None): + def __init__(self, name, container = False, model_props = None, state = None): if model_props is None: model_props = {} - super(ModelLinkage, self).__init__(name, container, model_props) + super(ModelLinkage, self).__init__(name, container, model_props, state = False) # ModelLinkage copies a values from right to left # right_path: is the data source for the link # left_path: is the destination of the link @@ -30,9 +30,9 @@ def __init__(self, name, container = False, model_props = None): return False self.right_path = self.handle_prop(model_props, 'right_path') self.link_type = self.handle_prop(model_props, 'link_type', False, 0) - self.left_path = self.handle_prop(model_props, 'left_path') + self.left_path = self.handle_prop(model_props, 'left_path', False, None) - if self.left_path == False: + if self.left_path == None: # self.state_path gets set when creating at the parent level self.left_path = self.state_path if (self.link_type == 0): @@ -49,11 +49,16 @@ def handle_prop(self, model_props, prop_name, strict = False, default_value = No prop_val = super().handle_prop(model_props, prop_name, strict, default_value) if ( (prop_name == 'right_path') and (prop_val == None) or (prop_val == '')): raise Exception("right_path cannot be empty. Object creation halted. Path to object with error is " + self.state_path) - if ( (prop_name == 'right_path')): + if ( (prop_name == 'right_path') or (prop_name == 'left_path')): # check for special keyword [parent] - pre_val = prop_val - prop_val.replace("[parent]", self.container.state_path) - #print("Changed ", pre_val, " to ", prop_val) + pre_val = prop_val # stash this in case fin_var_path() returns False + #prop_val = self.handle_path_aliases(prop_val) + prop_val = self.find_var_path(prop_val) + if prop_val == False: + # discovery has returned false, meaning it could not find the property name + # so we need to retain the old path, with aliases fixed + prop_val = self.handle_path_aliases(pre_val) + print("Changed ", pre_val, " to ", prop_val) return prop_val @staticmethod @@ -74,6 +79,12 @@ def find_paths(self): # the left path, if this is type 4 or 5, is a push, so we must require it if ( (self.link_type == 4) or (self.link_type == 5) or (self.link_type == 6) ): self.insure_path(self.left_path) + push_pieces = self.left_path.split('/') + push_name = push_pieces[len(push_pieces) - 1] + var_register = self.insure_register(push_name, 0.0, False, self.left_path, False) + print("Created register", var_register.name, "with path", var_register.state_path) + # add already created objects as inputs + var_register.add_object_input(self.name, self, 1) self.paths_found = True return @@ -104,7 +115,8 @@ def tokenize(self): @njit def step_model_link(op_token, state_ix, ts_ix, step): #if step == 2: - #print("step_model_link() called at step 2 with op_token=", op_token) + # print("step_model_link() called at step 2 with op_token=", op_token) + #print("step_model_link() called at step 2 with op_token=", op_token) if op_token[3] == 1: return True elif op_token[3] == 2: diff --git a/src/hsp2/hsp2/om_model_object.py b/src/hsp2/hsp2/om_model_object.py index 1803edf2..43a9ed3e 100644 --- a/src/hsp2/hsp2/om_model_object.py +++ b/src/hsp2/hsp2/om_model_object.py @@ -18,13 +18,16 @@ class ModelObject: def __init__(self, name, container = False, model_props = None, state = None): self.name = name + self.handle_deprecated_args(name, container, model_props, state) + # END - handle deprecated if model_props is None: model_props = {} self.container = container # will be a link to another object - if (state == None): + self.state_path = self.handle_prop(model_props, 'state_path', False, False) + if type(state) != dict: # we must verify that we have a properly formatted state Dictionary, or that our parent does. if self.container == False: - raise Exception("Error: State dictionary must be passed to root object. " + name + " cannot be created. See state::init_state_dicts()") + raise Exception("Error: State dictionary must be passed to root object. ", type(state), "passed instead." + name + " cannot be created. See state::init_state_dicts()") else: state = self.container.state self.state = state # make a copy here. is this efficient? @@ -51,7 +54,21 @@ def __init__(self, name, container = False, model_props = None, state = None): # 100 - SpecialAction, 101 - UVNAME, 102 - UVQUAN, 103 - DISTRB self.register_path() # note this registers the path AND stores the object in model_object_cache self.parse_model_props(model_props) - + def handle_deprecated_args(self, name, container, model_props, state): + # Handle old deprecated format for Register, Constant and Variable + state_path = False + if type(model_props) != dict: + value = model_props + model_props = {'name':name, 'value':value} + print("WARNING: deprecated 3rd argument for ModelObject. Use: [Model class](name, container, model_props, state)") + if type(state) == str: + model_props['state_path'] = state + state = None + if container != None: + state = container.state + print("WARNING: deprecated 4th argument for ModelObject. Use: [Model class](name, container, model_props, state)") + + @staticmethod def required_properties(): # returns a list or minimum properties to create. @@ -110,12 +127,18 @@ def check_properties(cls, model_props): return False return True + def handle_path_aliases(self, object_path): + if not (self.container == False): + object_path = object_path.replace("[parent]", self.container.state_path) + object_path = object_path.replace("[self]", self.state_path) + return object_path + def handle_inputs(self, model_props): if 'inputs' in model_props.keys(): for i_pair in model_props['inputs']: i_name = i_pair[0] i_target = i_pair[1] - i_target.replace("[parent]", self.container.state_path) + i_target = handle_path_aliases(i_target) self.add_input(i_name, i_target) def handle_prop(self, model_props, prop_name, strict = False, default_value = None ): @@ -160,6 +183,7 @@ def save_object_hdf(self, hdfname, overwrite = False ): dummy_var = True def make_paths(self, base_path = False): + #print("calling make_paths from", self.name, "with base path", base_path) if base_path == False: # we are NOT forcing paths if not (self.container == False): self.state_path = self.container.state_path + "/" + str(self.name) @@ -205,6 +229,10 @@ def get_object(self, var_name = False): def find_var_path(self, var_name, local_only = False): # check local inputs for name + if type(var_name) == str: + #print("Expanding aliases for", var_name) + var_name = self.handle_path_aliases(var_name) # sub out any wildcards + #print(self.name, "called", "find_var_path(self, ", var_name, ", local_only = False)") if var_name in self.inputs.keys(): return self.inputs[var_name] if local_only: @@ -216,7 +244,7 @@ def find_var_path(self, var_name, local_only = False): if ("/STATE/" + var_name) in self.state['state_paths'].keys(): #return self.state['state_paths'][("/STATE/" + var_name)] return ("/STATE/" + var_name) - # check for root state vars + # check for full paths if var_name in self.state['state_paths'].keys(): #return self.state['state_paths'][var_name] return var_name @@ -225,7 +253,8 @@ def find_var_path(self, var_name, local_only = False): def constant_or_path(self, keyname, keyval, trust = False): if is_float_digit(keyval): # we are given a constant value, not a variable reference - k = ModelVariable(keyname, self, float(keyval)) + #print("Creating ModelVariable", keyname, "on", self.name, "with default", keyval) + k = ModelVariable(keyname, self, {'name':keyname, 'value':float(keyval)}, self.state) kix = k.ix else: kix = self.add_input(keyname, keyval, 2, trust) @@ -233,7 +262,8 @@ def constant_or_path(self, keyname, keyval, trust = False): def register_path(self): # initialize the path variable if not already set - if self.state_path == '': + #print("register_path called for", self.name, "with state_path", self.state_path) + if self.state_path == '' or self.state_path == False: self.make_paths() self.ix = set_state(self.state['state_ix'], self.state['state_paths'], self.state_path, self.default_value) # store object in model_object_cache @@ -344,7 +374,8 @@ def insure_register(self, var_name, default_value, register_container, register_ var_register = ModelRegister(var_name, register_container, default_value, register_path) else: # this is just a standard numerical data holder so set up a constant - var_register = ModelVariable(var_name, register_container, default_value, register_path) + reg_props = {'name':var_name, 'value':default_value, 'state_path':register_path} + var_register = ModelVariable(var_name, register_container, reg_props, self.state) else: var_register = self.state['model_object_cache'][register_path] return var_register @@ -375,11 +406,10 @@ def step(self, step): The class ModelVariable is a base cass for storing numerical values. Used for UVQUAN and misc numerical constants... """ class ModelVariable(ModelObject): - def __init__(self, name, container = False, value = 0.0, state_path = False): - if (state_path != False): - # this allows us to mandate the location. useful for placeholders, broadcasts, etc. - self.state_path = state_path - super(ModelVariable, self).__init__(name, container) + def __init__(self, name, container = False, model_props = None, state = None): + super(ModelVariable, self).__init__(name, container, model_props, state) + #print("ModelVariable named", name, "with path", self.state_path,"beginning") + value = self.handle_prop(model_props, 'value') self.default_value = float(value) self.optype = 7 # 0 - shell object, 1 - equation, 2 - datamatrix, 3 - input, 4 - broadcastChannel, 5 - SimTimer, 6 - Conditional, 7 - ModelVariable (numeric) #print("ModelVariable named",self.name, "with path", self.state_path,"and ix", self.ix, "value", value) @@ -396,8 +426,8 @@ def required_properties(): The class ModelConstant is for storing non-changing values. """ class ModelConstant(ModelVariable): - def __init__(self, name, container = False, value = 0.0, state_path = False): - super(ModelConstant, self).__init__(name, container, value, state_path) + def __init__(self, name, container = False, model_props = None, state = None): + super(ModelConstant, self).__init__(name, container, model_props, state) self.optype = 16 # 0 - shell object, 1 - equation, 2 - datamatrix, 3 - input, 4 - broadcastChannel, 5 - SimTimer, 6 - Conditional, 7 - ModelVariable (numeric) #print("ModelVariable named",self.name, "with path", self.state_path,"and ix", self.ix, "value", value) @@ -408,8 +438,8 @@ def __init__(self, name, container = False, value = 0.0, state_path = False): Maybe combined with stack behavior? Or accumulator? """ class ModelRegister(ModelVariable): - def __init__(self, name, container = False, value = 0.0, state_path = False): - super(ModelRegister, self).__init__(name, container, value, state_path) + def __init__(self, name, container = None, model_props = False, state = False): + super(ModelRegister, self).__init__(name, container, model_props, state) self.optype = 12 # # self.state['state_ix'][self.ix] = self.default_value diff --git a/src/hsp2/hsp2/om_sim_timer.py b/src/hsp2/hsp2/om_sim_timer.py index ec38f695..2f9f0532 100644 --- a/src/hsp2/hsp2/om_sim_timer.py +++ b/src/hsp2/hsp2/om_sim_timer.py @@ -11,7 +11,7 @@ from numpy import int64 class SimTimer(ModelObject): - def __init__(self, name, container, model_props = None): + def __init__(self, name, container, model_props = None, state = None): if model_props is None: model_props = {} # Note: hsp2 siminfo will match model_props here diff --git a/src/hsp2/hsp2/om_special_action.py b/src/hsp2/hsp2/om_special_action.py index e859095e..88e8a5b6 100644 --- a/src/hsp2/hsp2/om_special_action.py +++ b/src/hsp2/hsp2/om_special_action.py @@ -15,10 +15,10 @@ from hsp2.hsp2.om_model_object import ModelObject class SpecialAction(ModelObject): - def __init__(self, name, container = False, model_props = None): + def __init__(self, name, container = False, model_props = None, state = None): if model_props is None: model_props = {} - super(SpecialAction, self).__init__(name, container, model_props) + super(SpecialAction, self).__init__(name, container, model_props, state) self.optype = 100 # Special Actions start indexing at 100 @@ -46,14 +46,6 @@ def parse_model_props(self, model_props, strict=False): self.num = self.handle_prop(model_props, 'NUM', False, 1) # number of times to perform action self.timer_ix = self.handle_prop(model_props, 'when', False, 1) # when to begin the first attempt at action self.ctr_ix = self.constant_or_path('ctr', 0) # this initializes the counter for how many times an action has been performed - # NOTE: since the spec-action modifies the same quantity that is it's input, it does *not* set it as a proper "input" since that would create a circular dependency - domain = self.state['model_object_cache'][('/STATE/' + self.op_type + '_' + self.op_type[0] + str(self.range1).zfill(3) )] - var_register = self.insure_register(self.vari, 0.0, domain, False, False) - #print("Created register", var_register.name, "with path", var_register.state_path) - # add already created objects as inputs - var_register.add_object_input(self.name, self, 1) - self.op1_ix = var_register.ix - # @tbd: support time enable/disable # - check if time ops have been set and add as inputs like "year", or "month", etc could give explicit path /STATE/year ... # - add the time values to match as constants i.e. self.constant_or_path() @@ -125,6 +117,18 @@ def tokenize(self): self.ops = self.ops + [self.op1_ix, self.opid, self.op2_ix, self.timer_ix, self.ctr_ix, self.num] # @tbd: check if time ops have been set and tokenize accordingly + def find_paths(self): + # this makes sure that the source prop (and destination prop) exists in the model. + # NOTE: since the spec-action modifies the same quantity that is it's input, it does *not* set it as a proper "input" since that would create a circular dependency + domain_path = self.container.state_path + "/" + self.op_type + '_' + self.op_type[0] + str(self.range1).zfill(3) + domain = self.state['model_object_cache'][domain_path] + var_register = self.insure_register(self.vari, 0.0, domain, False, False) + #print("Created register", var_register.name, "with path", var_register.state_path) + # add already created objects as inputs + var_register.add_object_input(self.name, self, 1) + self.op1_ix = var_register.ix + return super().find_paths() + def add_op_tokens(self): # this puts the tokens into the global simulation queue # can be customized by subclasses to add multiple lines if needed. diff --git a/src/hsp2/hsp2/state.py b/src/hsp2/hsp2/state.py index a9c34305..84c31bce 100644 --- a/src/hsp2/hsp2/state.py +++ b/src/hsp2/hsp2/state.py @@ -105,7 +105,7 @@ def state_context_hsp2(state, operation, segment, activity): # give shortcut to state path for the upcoming function # insure that there is a model object container seg_name = operation + "_" + segment - seg_path = '/STATE/' + seg_name + seg_path = '/STATE/' + state['model_root_name'] + "/" + seg_name if 'hsp_segments' not in state.keys(): state['hsp_segments'] = {} # for later use by things that need to know hsp entities and their paths if seg_name not in state['hsp_segments'].keys(): @@ -113,12 +113,15 @@ def state_context_hsp2(state, operation, segment, activity): state['domain'] = seg_path # + "/" + activity # may want to comment out activity? -def state_siminfo_hsp2(uci_obj, siminfo): +def state_siminfo_hsp2(uci_obj, siminfo, io_manager, state): # Add crucial simulation info for dynamic operation support delt = uci_obj.opseq.INDELT_minutes[0] # get initial value for STATE objects siminfo['delt'] = delt siminfo['tindex'] = date_range(siminfo['start'], siminfo['stop'], freq=Minute(delt))[1:] siminfo['steps'] = len(siminfo['tindex']) + hdf5_path = io_manager._input.file_path + (fbase, fext) = os.path.splitext(hdf5_path) + state['model_root_name'] = os.path.split(fbase)[1] # takes the text before .h5 def state_init_hsp2(state, opseq, activities): # This sets up the state entries for all state compatible HSP2 model variables @@ -146,7 +149,7 @@ def state_load_dynamics_hsp2(state, io_manager, siminfo): def hydr_init_ix(state, domain): # get a list of keys for all hydr state variables - hydr_state = ["DEP","IVOL","O1","O2","O3","OVOL1","OVOL2","OVOL3","PRSUPY","RO","ROVOL","SAREA","TAU","USTAR","VOL","VOLEV"] + hydr_state = ["DEP","IVOL","O1","O2","O3", "O4", "O5","OVOL1","OVOL2","OVOL3","PRSUPY","RO","ROVOL","SAREA","TAU","USTAR","VOL","VOLEV"] hydr_ix = Dict.empty(key_type=types.unicode_type, value_type=types.int64) for i in hydr_state: #var_path = f'{domain}/{i}' diff --git a/tests/testcbp/HSP2results/JL1_6562_6560.simple.json b/tests/testcbp/HSP2results/JL1_6562_6560.simple.json new file mode 100644 index 00000000..a70e9e4c --- /dev/null +++ b/tests/testcbp/HSP2results/JL1_6562_6560.simple.json @@ -0,0 +1,21 @@ +{ + "RCHRES_R001": { + "name": "RCHRES_R001", + "object_class": "ModelObject", + "value": "0", + "wd_cfs": { + "name": "wd_cfs", + "object_class": "Equation", + "equation": "2.0" + }, + "WDwrite": { + "name": "WDwrite", + "object_class": "ModelLinkage", + "left_path": "[parent]/O2", + "right_path": "[parent]/wd_cfs", + "link_type": 5, + "comments": "The use of [parent] is useful in that it will insure that we create a variable if it doesn't exist. If the system already created O2 (it should) we would not need this. But it does not, so we do." + } + } +} + diff --git a/tests/testcbp/HSP2results/JL1_6562_6560.uci b/tests/testcbp/HSP2results/JL1_6562_6560.uci new file mode 100644 index 00000000..a6f3ad8f --- /dev/null +++ b/tests/testcbp/HSP2results/JL1_6562_6560.uci @@ -0,0 +1,261 @@ +RUN + +GLOBAL + JL1_6562_6 riv | P5 | subsheds | Beaver + START 1984/01/01 END 2020/12/31 + RUN INTERP OUTPUT LEVEL 1 1 + RESUME 0 RUN 1 UNIT SYSTEM 1 +END GLOBAL + +FILES + ***<----FILE NAME-------------------------------------------------> +WDM1 21 met_N51003.wdm +WDM2 22 prad_N51003.wdm +WDM3 23 ps_sep_div_ams_subsheds_JL1_6562_6560.wdm +WDM4 24 JL1_6562_6560.wdm +MESSU 25 JL1_6562_6560.ech + 26 JL1_6562_6560.out + 31 JL1_6562_6560.tau +END FILES + +OPN SEQUENCE + INGRP INDELT 01:00 + RCHRES 1 + PLTGEN 1 + END INGRP +END OPN SEQUENCE + +RCHRES + ACTIVITY + # - # HYFG ADFG CNFG HTFG SDFG GQFG OXFG NUFG PKFG PHFG *** + 1 1 1 0 0 0 0 0 0 0 0 + END ACTIVITY + + PRINT-INFO + # - # HYFG ADFG CNFG HTFG SDFG GQFG OXFG NUFG PKFG PHFG PIVL***PY + 1 5 5 0 0 0 0 0 0 0 0 0 12 + END PRINT-INFO + + GEN-INFO + RCHRES<-------Name------->Nexit Unit Systems Printer *** + # - # User t-series Engl Metr LKFG *** + 1 JL1_6562_6560 3 1 1 1 26 0 0 + END GEN-INFO + + HYDR-PARM1 + RCHRES Flags for HYDR section *** + # - # VC A1 A2 A3 ODFVFG for each ODGTFG for each *** FUNCT for each + FG FG FG FG possible exit possible exit *** possible exit + 1 2 3 4 5 1 2 3 4 5 *** 1 2 3 4 5 + VC A1 A2 A3 V1 V2 V3 V4 V5 G1 G2 G3 G4 G5 *** F1 F2 F3 F4 F5 + 1 0 1 1 1 0 0 4 0 0 1 2 0 0 0 0 0 0 0 0 + END HYDR-PARM1 + + HYDR-PARM2 + RCHRES *** + # - # FTABNO LEN DELTH STCOR KS DB50 *** + 1 1. 17.01 249.28 0.5 + END HYDR-PARM2 + + HYDR-INIT + RCHRES Initial conditions for HYDR section *** + # - # VOL Initial value of COLIND *** Initial value of OUTDGT + (ac-ft) for each possible exit *** for each possible exit + VOL CEX1 CEX2 CEX3 CEX4 CEX5 *** DEX1 DEX2 DEX3 DEX4 DEX5 + 1 293.16000 + END HYDR-INIT + + ADCALC-DATA + RCHRES Data for section ADCALC *** + # - # CRRAT VOL *** + 1 1.5 293.16 + END ADCALC-DATA + +END RCHRES + +FTABLES + FTABLE 1 +NOTE: FLOODPLAIN BASE = 5*BANKFULL WIDTH *** + FLOODPLAIN SIDE-SLOPE = SAME AS CHANNEL'S *** + ROWS COLS *** + 19 4 + DEPTH AREA VOLUME DISCH *** + (FT) (ACRES) (AC-FT) (CFS) *** + 0.000 0.000 0.00 0.00 + 0.423 11.894 4.78 23.85 + 0.846 13.061 10.06 77.00 + 1.269 14.227 15.83 154.23 + 1.691 15.393 22.09 254.22 + 2.114 16.560 28.85 376.68 + 2.537 17.726 36.10 521.75 + 2.960 18.893 43.84 689.83 + 3.383 20.059 52.07 881.43 + 3.806 21.225 60.80 1097.15 + 4.806 108.886 168.31 1305.89 + 6.108 112.478 312.43 3577.19 + 7.410 116.069 461.23 6694.41 + 8.712 119.661 614.71 10572.52 + 10.014 123.253 772.86 15161.71 + 11.316 126.845 935.69 20429.73 + 12.618 130.437 1103.20 26354.64 + 13.921 134.028 1275.38 32921.26 + 15.223 137.620 1452.24 40119.05 + END FTABLE 1 +END FTABLES + +EXT SOURCES +<-Volume-> SsysSgap<--Mult-->Tran <-Target vols> <-Grp> <-Member->*** + # # tem strg<-factor->strg # # # #*** +*** METEOROLOGY +WDM1 1000 EVAP ENGLZERO 1.000 SAME RCHRES 1 EXTNL POTEV +WDM1 1001 DEWP ENGLZERO SAME RCHRES 1 EXTNL DEWTMP +WDM1 1002 WNDH ENGLZERO SAME RCHRES 1 EXTNL WIND +WDM1 1003 RADH ENGLZERO SAME RCHRES 1 EXTNL SOLRAD +WDM1 1004 ATMP ENGLZERO SAME RCHRES 1 EXTNL GATMP +WDM1 1005 CLDC ENGLZERO SAME RCHRES 1 EXTNL CLOUD + +*** PRECIPITATION AND ATMOSPHERIC DEPOSITION LOADS +WDM2 2000 HPRC ENGLZERO SAME RCHRES 1 EXTNL PREC +WDM2 2001 NO23 ENGLZERO DIV RCHRES 1 EXTNL NUADFX 1 1 +WDM2 2002 NH4A ENGLZERO DIV RCHRES 1 EXTNL NUADFX 2 1 +WDM2 2003 NO3D ENGLZERO DIV RCHRES 1 EXTNL NUADFX 1 1 +WDM2 2004 NH4D ENGLZERO DIV RCHRES 1 EXTNL NUADFX 2 1 +WDM2 2005 ORGN ENGLZERO DIV RCHRES 1 EXTNL PLADFX 1 1 +WDM2 2006 PO4A ENGLZERO DIV RCHRES 1 EXTNL NUADFX 3 1 +WDM2 2007 ORGP ENGLZERO DIV RCHRES 1 EXTNL PLADFX 2 1 + +*** POINT SOURCE +WDM3 3000 FLOW ENGLZERO DIV RCHRES 1 INFLOW IVOL +WDM3 3001 HEAT ENGLZERO DIV RCHRES 1 INFLOW IHEAT +WDM3 3002 NH3X ENGLZERO DIV RCHRES 1 INFLOW NUIF1 2 +WDM3 3003 NO3X ENGLZERO DIV RCHRES 1 INFLOW NUIF1 1 +WDM3 3004 ORNX ENGLZERO DIV RCHRES 1 INFLOW PKIF 3 +WDM3 3005 PO4X ENGLZERO DIV RCHRES 1 INFLOW NUIF1 4 +WDM3 3006 ORPX ENGLZERO DIV RCHRES 1 INFLOW PKIF 4 +WDM3 3021 BODX ENGLZERO DIV RCHRES 1 INFLOW OXIF 2 +WDM3 3022 TSSX ENGLZERO 0.0005 DIV RCHRES 1 INFLOW ISED 3 +WDM3 3023 DOXX ENGLZERO DIV RCHRES 1 INFLOW OXIF 1 +WDM3 3024 TOCX ENGLZERO DIV RCHRES 1 INFLOW PKIF 5 + +*** RPA LOAD +WDM3 3031 NH3R ENGLZERO 1.0000 DIV RCHRES 1 INFLOW NUIF1 2 +WDM3 3032 NO3R ENGLZERO 1.0000 DIV RCHRES 1 INFLOW NUIF1 1 +WDM3 3033 RONR ENGLZERO 1.0000 DIV RCHRES 1 INFLOW PKIF 3 +WDM3 3034 PO4R ENGLZERO 1.0000 DIV RCHRES 1 INFLOW NUIF1 4 +WDM3 3035 ROPR ENGLZERO 1.0000 DIV RCHRES 1 INFLOW PKIF 4 +WDM3 3036 BODR ENGLZERO 1.0000 DIV RCHRES 1 INFLOW OXIF 2 +WDM3 3037 SNDR ENGLZERO 1.0000 DIV RCHRES 1 INFLOW ISED 1 +WDM3 3038 SLTR ENGLZERO 1.0000 DIV RCHRES 1 INFLOW ISED 2 +WDM3 3039 CLYR ENGLZERO 1.0000 DIV RCHRES 1 INFLOW ISED 3 + +*** DIVERSIONS +WDM3 3007 DIVR ENGLZERO SAME RCHRES 1 EXTNL OUTDGT 1 +WDM3 3008 DIVA ENGLZERO SAME RCHRES 1 EXTNL OUTDGT 2 + +*** SEPTIC +WDM3 3010 SNO3 ENGLZERO 1.0000 DIV RCHRES 1 INFLOW NUIF1 1 + +*** RIB +WDM3 3012 NH3X ENGLZERO DIV RCHRES 1 INFLOW NUIF1 2 +WDM3 3013 NO3X ENGLZERO DIV RCHRES 1 INFLOW NUIF1 1 +WDM3 3014 ORNX ENGLZERO DIV RCHRES 1 INFLOW PKIF 3 +WDM3 3015 PO4X ENGLZERO DIV RCHRES 1 INFLOW NUIF1 4 +WDM3 3016 ORPX ENGLZERO DIV RCHRES 1 INFLOW PKIF 4 +WDM3 3017 BODX ENGLZERO DIV RCHRES 1 INFLOW OXIF 2 + +*** AEOLIAN SEDIMENT +WDM3 3061 SFAS ENGLZERO 7.027e-06DIV RCHRES 1 INFLOW ISED 2 +WDM3 3062 SFAC ENGLZERO 7.027e-06DIV RCHRES 1 INFLOW ISED 3 + +*** UPSTREAM and EOS INPUT *** +WDM4 11 WATR ENGLZERO SAME RCHRES 1 INFLOW IVOL +WDM4 12 HEAT ENGLZERO SAME RCHRES 1 INFLOW IHEAT +WDM4 13 DOXY ENGLZERO SAME RCHRES 1 INFLOW OXIF 1 +WDM4 21 SAND ENGLZERO SAME RCHRES 1 INFLOW ISED 1 +WDM4 22 SILT ENGLZERO SAME RCHRES 1 INFLOW ISED 2 +WDM4 23 CLAY ENGLZERO SAME RCHRES 1 INFLOW ISED 3 +WDM4 31 NO3D ENGLZERO SAME RCHRES 1 INFLOW NUIF1 1 +WDM4 32 NH3D ENGLZERO SAME RCHRES 1 INFLOW NUIF1 2 +WDM4 33 NH3A ENGLZERO SAME RCHRES 1 INFLOW NUIF2 1 1 +WDM4 34 NH3I ENGLZERO SAME RCHRES 1 INFLOW NUIF2 2 1 +WDM4 35 NH3C ENGLZERO SAME RCHRES 1 INFLOW NUIF2 3 1 +WDM4 36 RORN ENGLZERO SAME RCHRES 1 INFLOW PKIF 3 +WDM4 37 LORN ENGLZERO SAME RCHRES 1 INFLOW PKIF 3 +WDM4 41 PO4D ENGLZERO SAME RCHRES 1 INFLOW NUIF1 4 +WDM4 42 PO4A ENGLZERO SAME RCHRES 1 INFLOW NUIF2 1 2 +WDM4 43 PO4I ENGLZERO SAME RCHRES 1 INFLOW NUIF2 2 2 +WDM4 44 PO4C ENGLZERO SAME RCHRES 1 INFLOW NUIF2 3 2 +WDM4 45 RORP ENGLZERO SAME RCHRES 1 INFLOW PKIF 4 +WDM4 47 LORP ENGLZERO SAME RCHRES 1 INFLOW PKIF 4 +WDM4 51 BODA ENGLZERO SAME RCHRES 1 INFLOW OXIF 2 +WDM4 52 TORC ENGLZERO SAME RCHRES 1 INFLOW PKIF 5 +WDM4 53 PHYT ENGLZERO SAME RCHRES 1 INFLOW PKIF 1 +END EXT SOURCES + +EXT TARGETS +<-Volume-> <-Grp> <-Member-><--Mult-->Tran <-Volume-> Tsys Tgap Amd *** + # # #<-factor->strg # # tem strg strg*** +RCHRES 1 OFLOW OVOL 3 SAME WDM4 111 WATR ENGL REPL +RCHRES 1 OFLOW OHEAT 3 SAME WDM4 112 HEAT ENGL REPL +RCHRES 1 OFLOW OXCF2 3 1 SAME WDM4 113 DOXY ENGL REPL +RCHRES 1 OFLOW OSED 3 1 SAME WDM4 121 SAND ENGL REPL +RCHRES 1 OFLOW OSED 3 2 SAME WDM4 122 SILT ENGL REPL +RCHRES 1 OFLOW OSED 3 3 SAME WDM4 123 CLAY ENGL REPL +RCHRES 1 OFLOW NUCF9 3 1 SAME WDM4 131 NO3D ENGL REPL +RCHRES 1 OFLOW NUCF9 3 2 SAME WDM4 132 NH3D ENGL REPL +RCHRES 1 OFLOW OSNH4 3 1 SAME WDM4 133 NH3A ENGL REPL +RCHRES 1 OFLOW OSNH4 3 2 SAME WDM4 134 NH3I ENGL REPL +RCHRES 1 OFLOW OSNH4 3 3 SAME WDM4 135 NH3C ENGL REPL +RCHRES 1 OFLOW PKCF2 3 3 SAME WDM4 136 RORN ENGL REPL +RCHRES 1 OFLOW PKCF2 3 3 0. SAME WDM4 137 LORN ENGL REPL +RCHRES 1 OFLOW NUCF9 3 4 SAME WDM4 141 PO4D ENGL REPL +RCHRES 1 OFLOW OSPO4 3 1 SAME WDM4 142 PO4A ENGL REPL +RCHRES 1 OFLOW OSPO4 3 2 SAME WDM4 143 PO4I ENGL REPL +RCHRES 1 OFLOW OSPO4 3 3 SAME WDM4 144 PO4C ENGL REPL +RCHRES 1 OFLOW PKCF2 3 4 SAME WDM4 145 RORP ENGL REPL +RCHRES 1 OFLOW PKCF2 3 4 0. SAME WDM4 147 LORP ENGL REPL +RCHRES 1 OFLOW OXCF2 3 2 SAME WDM4 151 BODA ENGL REPL +RCHRES 1 OFLOW PKCF2 3 5 SAME WDM4 152 TORC ENGL REPL +RCHRES 1 OFLOW PKCF2 3 1 SAME WDM4 153 PHYT ENGL REPL +RCHRES 1 SEDTRN DEPSCR 4 SAME WDM4 124 SSCR ENGL REPL +END EXT TARGETS + +NETWORK +<-Volume-> <-Grp> <-Member-><--Mult-->Tran <-Target vols> <-Grp> <-Member-> *** + # # #<-factor->strg # # # # *** +RCHRES 1 HYDR TAU AVER PLTGEN 1 INPUT MEAN 1 +END NETWORK + +PLTGEN + PLOTINFO + # - # FILE NPT NMN LABL PYR PIVL *** + 1 31 1 12 24 + END PLOTINFO + + GEN-LABELS + # - #<----------------Title-----------------> *** + 1 JL1_6562_6560 daily_shear_stress_lbsft2 + END GEN-LABELS + + SCALING + #thru# YMIN YMAX IVLIN THRESH *** + 1 99 0. 100000. 20. + END SCALING + + CURV-DATA + <-Curve label--> Line Intg Col Tran *** + # - # type eqv code code *** + 1 daily_shear_stre 1 1 AVER + END CURV-DATA +END PLTGEN + + + +SPEC-ACTIONS +*** test special actions + RCHRES 1 RSED 4 += 2.50E+05 + RCHRES 1 RSED 5 += 6.89E+05 + RCHRES 1 RSED 6 += 4.01E+05 +END SPEC-ACTIONS + +END RUN diff --git a/tests/testcbp/HSP2results/PL3_5250_0001.json.constant_wd b/tests/testcbp/HSP2results/PL3_5250_0001.json.constant_wd index 8a6d387e..f5d4cb5a 100644 --- a/tests/testcbp/HSP2results/PL3_5250_0001.json.constant_wd +++ b/tests/testcbp/HSP2results/PL3_5250_0001.json.constant_wd @@ -8,8 +8,8 @@ "object_class": "Constant", "value": 10.0 }, - "IVOLwrite": { - "name": "IVOLwrite", + "WDwrite": { + "name": "WDwrite", "object_class": "ModelLinkage", "left_path": "/STATE/RCHRES_R001/O2", "right_path": "/STATE/RCHRES_R001/wd_cfs", diff --git a/tests/testcbp/HSP2results/check_endpoint_ts.py b/tests/testcbp/HSP2results/check_endpoint_ts.py index 97ee7af5..23807ecc 100644 --- a/tests/testcbp/HSP2results/check_endpoint_ts.py +++ b/tests/testcbp/HSP2results/check_endpoint_ts.py @@ -23,7 +23,7 @@ # - finally stash specactions in state, not domain (segment) dependent so do it once # now load state and the special actions state = init_state_dicts() -state_initialize_om(state) +om_init_state(state) state['specactions'] = uci_obj.specactions # stash the specaction dict in state state_siminfo_hsp2(uci_obj, siminfo) @@ -33,7 +33,7 @@ # Iterate through all segments and add crucial paths to state # before loading dynamic components that may reference them state_init_hsp2(state, opseq, activities) -state_load_dynamics_specl(state, io_manager, siminfo) # traditional special actions +specl_load_state(state, io_manager, siminfo) # traditional special actions state_load_dynamics_om(state, io_manager, siminfo) # operational model for custom python state_om_model_run_prep(state, io_manager, siminfo) # this creates all objects from the UCI and previous loads # state['model_root_object'].find_var_path('RCHRES_R001') diff --git a/tests/testcbp/HSP2results/check_equation.py b/tests/testcbp/HSP2results/check_equation.py index 40bb1f23..0aacb677 100644 --- a/tests/testcbp/HSP2results/check_equation.py +++ b/tests/testcbp/HSP2results/check_equation.py @@ -1,18 +1,27 @@ # Must be run from the HSPsquared source directory, the h5 file has already been setup with hsp import_uci test10.uci # bare bones tester - must be run from the HSPsquared source directory +# Note: First time you clone git, or after a major refactor, you must reinstall (in a venv) using: +# pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host files.pythonhosted.org --default-timeout=100 -e . import os from hsp2.hsp2.main import * from hsp2.hsp2.om import * import numpy from hsp2.hsp2io.hdf import HDF5 from hsp2.hsp2io.io import IOManager -fpath = './tests/testcbp/HSP2results/JL1_6562_6560.h5' +# fpath = './tests/testcbp/HSP2results/JL1_6562_6560.h5' +fpath = 'C:/WorkSpace/modeling/projects/james_river/rivanna/beaver_hsp2/JL1_6562_6560.h5' + # try also: # fpath = './tests/testcbp/HSP2results/JL1_6562_6560.h5' -# sometimes when testing you may need to close the file, so try: +# sometimes when testing you may need to close the file, so try openg and closing with h5py: +# note: h5py: may need to pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host files.pythonhosted.org --default-timeout=100 h5py +# # f = h5py.File(fpath,'a') # use mode 'a' which allows read, write, modify # # f.close() hdf5_instance = HDF5(fpath) + + +## Does this help not cut off the i in io? io_manager = IOManager(hdf5_instance) uci_obj = io_manager.read_uci() siminfo = uci_obj.siminfo @@ -22,40 +31,57 @@ # - finally stash specactions in state, not domain (segment) dependent so do it once # now load state and the special actions state = init_state_dicts() -state_initialize_om(state) +state_siminfo_hsp2(uci_obj, siminfo, io_manager, state) +# Iterate through all segments and add crucial paths to state +# before loading dynamic components that may reference them +state_init_hsp2(state, opseq, activities) +om_init_state(state) state['specactions'] = uci_obj.specactions # stash the specaction dict in state -state_siminfo_hsp2(uci_obj, siminfo) # Add support for dynamic functions to operate on STATE # - Load any dynamic components if present, and store variables on objects state_load_dynamics_hsp2(state, io_manager, siminfo) -# Iterate through all segments and add crucial paths to state -# before loading dynamic components that may reference them -state_init_hsp2(state, opseq, activities) -state_load_dynamics_specl(state, io_manager, siminfo) # traditional special actions +specl_load_state(state, io_manager, siminfo) # traditional special actions state_load_dynamics_om(state, io_manager, siminfo) # operational model for custom python -state_om_model_run_prep(state, io_manager, siminfo) # this creates all objects from the UCI and previous loads -# state['model_root_object'].find_var_path('RCHRES_R001') -# Get the timeseries naked, without an object -Rlocal = state['model_object_cache']['/STATE/RCHRES_R001/Rlocal'] -Rlocal_ts = Rlocal.read_ts() -rchres1 = state['model_object_cache']['/STATE/RCHRES_R001'] -Rlocal_check = ModelLinkage('Rlocal1', rchres1, {'right_path':'/TIMESERIES/TS010', 'link_type':3}) -# Calls: -# - ts = Rlocal.io_manager.read_ts(Category.INPUTS, None, Rlocal.ts_name) -# - ts = transform(ts, Rlocal.ts_name, 'SAME', Rlocal.siminfo) -Rlocal.io_manager._output._store.keys() -# write it back. We can give an arbitrary name or it will default to write back to the source path in right_path variable -ts1 = precip_ts.read_ts() # same as precip_ts.ts_ix[precip_ts.ix], same as state['ts_ix'][precip_ts.ix] -# we can specify a custom path to write this TS to -precip_ts.write_path = '/RESULTS/test_TS039' -precip_ts.write_ts() -# precip_ts.write_ts is same as: -# ts4 = precip_ts.format_ts(ts1, ['tsvalue'], siminfo['tindex']) -# ts4.to_hdf(precip_ts.io_manager._output._store, precip_ts.write_path, format='t', data_columns=True, complevel=precip_ts.complevel) - -start = time.time() -iterate_models(model_exec_list, op_tokens, state_ix, dict_ix, ts_ix, siminfo['steps'], -1) -end = time.time() -print(len(model_exec_list), "components iterated over state_ix", siminfo['steps'], "time steps took" , end - start, "seconds") +state_om_model_run_prep(state, io_manager, siminfo) +# Inspect the dynamic stuff +# Can use the discovery tools like so: +river = state['model_root_object'].get_object('RCHRES_R001') +# or find_var_path() from the model root +# river = state['model_object_cache'][state['model_root_object'].find_var_path('RCHRES_R001')] +# or, can use the path if we know it: +# river = state['model_object_cache']["/STATE/JL1_6562_6560/RCHRES_R001"] +# or, combine the full path and object retrieval +# river = state['model_root_object'].get_object("/STATE/JL1_6562_6560/RCHRES_R001") + +wd_cfs = river.get_object('wd_cfs') +WDwrite = river.get_object('WDwrite') +specl1 = river.get_object('SPECACTION1') + + +domain = river.state_path +ep_list = ["DEP","IVOL","O1","O2","O3","OVOL1","OVOL2","OVOL3","PRSUPY","RO","ROVOL","SAREA","TAU","USTAR","VOL","VOLEV"] +model_exec_list = model_domain_dependencies(state, domain, ep_list) + +get_ix_path(state['state_paths'], river.container.ix) +get_ix_path(state['state_paths'], WDwrite.ops[2]) # op 2 (the 3rd op) is for the left_path, which is where we write the value + +# run the simulation +from hsp2.hsp2tools.commands import import_uci, run +run(fpath, saveall=True, compress=False) + +# Now, load the timeseries from hdf5 and check the values from the simulation. +from pandas import read_hdf +hydr_path = '/RESULTS/RCHRES_R001/HYDR' +HYDR_ts = read_hdf(io_manager._output._store, hydr_path) +# Note, in order to run from cmd prompt in another window, may need to close the h5: +io_manager._output._store.close() +io_manager._input._store.close() +Qout = HYDR_ts['OVOL3']*12.1 #Qout in units of cfs +wd = HYDR_ts['O1'] + HYDR_ts['O2'] +Qout.quantile([0,0.1,0.5,0.75,0.9,1.0]) +Qout.mean() +wd.quantile([0,0.1,0.5,0.75,0.9,1.0]) +wd.mean() +# \ No newline at end of file