diff --git a/.travis.yml b/.travis.yml index 32075f2c..42a2e1b3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,9 +10,9 @@ cache: pip env: global: - - OMC_VERSION=ubuntu-2004-omc:1.19.0_dev-539-gb76366f-1 - - OPTIMICA_VERSION=travis-ubuntu-1804-optimica:r26446 - - DYMOLA_VERSION=travis_ubuntu-2004_dymola:2022x-x86_64 + - OMC_VERSION=ubuntu-2004-omc:1.20.0_dev-314-g3033f43-1 + - OPTIMICA_VERSION=travis-ubuntu-1804-optimica:2022-05-09-master-4b0cd2bf71 + - DYMOLA_VERSION=travis_ubuntu-2004_dymola:2022x-x86_64_rev-2 - MPLBACKEND=agg notifications: @@ -73,7 +73,6 @@ script: - make unittest_development_error_dictionary - make unittest_development_merger - make unittest_development_refactor - - make unittest_development_regressiontest_jmodelica - make unittest_development_regressiontest_openmodelica - make unittest_development_regressiontest_optimica - make unittest_development_regressiontest diff --git a/Makefile b/Makefile index febea549..7efd030a 100644 --- a/Makefile +++ b/Makefile @@ -41,9 +41,6 @@ unittest_development_merger: unittest_development_refactor: python3 buildingspy/tests/test_development_refactor.py -unittest_development_regressiontest_jmodelica: - python3 buildingspy/tests/test_development_regressiontest_jmodelica.py - unittest_development_regressiontest_openmodelica: python3 buildingspy/tests/test_development_regressiontest_openmodelica.py @@ -56,6 +53,9 @@ unittest_development_regressiontest: unittest_development_Validator: python3 buildingspy/tests/test_development_Validator.py +unittest_development_Comparator: + python3 buildingspy/tests/test_development_Comparator.py + unittest_examples_dymola: python3 buildingspy/tests/test_examples_dymola.py diff --git a/README.rst b/README.rst index cc8f70dd..4dc789b2 100644 --- a/README.rst +++ b/README.rst @@ -6,8 +6,8 @@ BuildingsPy BuildingsPy is a Python package that can be used to -* run Modelica simulation using Dymola or JModelica -* process ``*.mat`` output files that were generated by Dymola, JModelica or OpenModelica. +* run Modelica simulation using Dymola or OPTIMICA +* process ``*.mat`` output files that were generated by Dymola, OPTIMICA or OpenModelica. * run unit tests as part of the library development. The package provides functions to extract data series from diff --git a/buildingspy/CHANGES.txt b/buildingspy/CHANGES.txt index 504e1480..66b023a5 100644 --- a/buildingspy/CHANGES.txt +++ b/buildingspy/CHANGES.txt @@ -1,5 +1,24 @@ BuildingsPy Changelog --------------------- + +Version 4.x.x, xxx -- Release 4.0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +- Added class buildingspy.development.simulationCompare that compares + the simulation performance across tools or git branches. + (https://github.com/lbl-srg/BuildingsPy/issues/492) +- Refactored regression tests for Dymola to allow specifying a time out for each tests, and set the default time out to 300 seconds. + (https://github.com/lbl-srg/BuildingsPy/issues/495) +- Add option to exclude simulation of models from Dymola CI tests. + (https://github.com/lbl-srg/BuildingsPy/pull/486) + +Version 4.0.0, May 12, 2022 -- Release 4.0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +- Removed JModelica support, and added support for new OPTIMICA compile_fmu API. + (https://github.com/lbl-srg/BuildingsPy/pull/480) +- For simulation and unit tests, updated the API for OPTIMICA to the one used in oct-2022-05-09-master-4b0cd2bf71 + (https://github.com/lbl-srg/BuildingsPy/issues/479) +- For simulation, corrected a bug that led to an error message when a model from the Modelica Standard Library is simulated + (https://github.com/lbl-srg/BuildingsPy/issues/472) - For unit tests, enabled option to run tests with OpenModelica. This change also allows specifying the test configuration in the more concise conf.yml rather than conf.json file. diff --git a/buildingspy/README.rst b/buildingspy/README.rst index cc8f70dd..4dc789b2 100644 --- a/buildingspy/README.rst +++ b/buildingspy/README.rst @@ -6,8 +6,8 @@ BuildingsPy BuildingsPy is a Python package that can be used to -* run Modelica simulation using Dymola or JModelica -* process ``*.mat`` output files that were generated by Dymola, JModelica or OpenModelica. +* run Modelica simulation using Dymola or OPTIMICA +* process ``*.mat`` output files that were generated by Dymola, OPTIMICA or OpenModelica. * run unit tests as part of the library development. The package provides functions to extract data series from diff --git a/buildingspy/VERSION b/buildingspy/VERSION index 56fea8a0..88fcc8cb 100644 --- a/buildingspy/VERSION +++ b/buildingspy/VERSION @@ -1 +1 @@ -3.0.0 \ No newline at end of file +4.0.dev1 diff --git a/buildingspy/development/__init__.py b/buildingspy/development/__init__.py index 2e24e1b1..96bb4af2 100644 --- a/buildingspy/development/__init__.py +++ b/buildingspy/development/__init__.py @@ -1,10 +1,11 @@ """ This module contains the classes -- *refactor*, a module that assists in refactoring Modelica classes, -- *Tester* that runs the unit tests of the `Buildings` library, -- *Validator* that validates the html code of the info section of the `.mo` files, and -- *IBPSA* that synchronizes Modelica libraries with the `IBPSA` library. -- *ErrorDictionary* that contains information about possible error strings. +- :func:`refactor `, a module that assists in refactoring Modelica classes, +- :func:`Tester ` that runs the unit tests of the `Buildings` library, +- :func:`Validator ` that validates the html code of the info section of the `.mo` files, and +- :func:`IBPSA ` that synchronizes Modelica libraries with the `IBPSA` library. +- :func:`ErrorDictionary ` that contains information about possible error strings. +- :func:`Comparator ` that compares the simulation performance across simulation tools or git branches. """ diff --git a/buildingspy/development/dymola_run.template b/buildingspy/development/dymola_run.template new file mode 100644 index 00000000..4f97000c --- /dev/null +++ b/buildingspy/development/dymola_run.template @@ -0,0 +1,83 @@ +def _add_exception(return_dict, e, cmd): + import subprocess + + return_dict['success'] = False + +# if isinstance(e, subprocess.CalledProcessError): +# # Check if simulation terminated, and if so, get the error +# return_dict['stdout'] = e.output.decode("utf-8") +# output = return_dict['stdout'] +# for line in output.split('\n'): +# if 'terminated' in line: +# # Found terminated string. Cut everything after the '|' character that OpenModelica writes. +# idx=line.rfind('|') +# msg=line[idx+1:].strip() +# # The solver terminated. Add this information to a custom exception message. +# return_dict['exception'] = f"'{' '.join(cmd)}' caused '{msg}'." +# pass + + if not 'exception' in return_dict: + # Did not find 'terminated' in message, handle exception as usual + return_dict['exception'] = '{}: {}'.format(type(e).__name__, e) + + +def _run_process(return_dict, cmd, worDir, timeout): + import subprocess + + output = subprocess.check_output( + cmd, + cwd = worDir, + timeout=timeout, + stderr=subprocess.STDOUT, + shell=False) + + return_dict['success'] = True + if 'stdout' in return_dict: + return_dict['stdout'] += output.decode("utf-8") + else: + return_dict['stdout'] = output.decode("utf-8") + return + +def _simulate(model, timeout): + import os + import subprocess + + worDir = "{{ working_directory }}" + return_dict = {} + + try: + cmd = {{ cmd }} + return_dict['cmd'] = ' '.join(cmd) + output = _run_process(return_dict, cmd, worDir, timeout) + + except Exception as e: + _add_exception(return_dict, e, cmd) + return return_dict + +def run(): + import os + import json + import traceback + import sys + + timeout = {{ time_out }} + model = "{{ model }}" + result = {"model": model, + "working_directory": "{{ working_directory }}", + "simulation": {"success": False}} + + # Log file + log_file = "{}_buildingspy.json".format(model.replace(".", "_")) + try: + os.remove(log_file) + except OSError: + pass + + # Simulate model + result["simulation"] = _simulate(model, timeout) + + with open(log_file, "w") as log: + log.write("{}\n".format(json.dumps(result, indent=4, sort_keys=False)) ) + +if __name__=="__main__": + run() diff --git a/buildingspy/development/error_dictionary_jmodelica.py b/buildingspy/development/error_dictionary_jmodelica.py deleted file mode 100644 index 760ce657..00000000 --- a/buildingspy/development/error_dictionary_jmodelica.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -####################################################### -# Class that contains data fields needed for the -# error checking of the regression tests for JModelica -# -# -# MWetter@lbl.gov 2019-01-04 -####################################################### -# -import buildingspy.development.error_dictionary_optimica as ed - - -class ErrorDictionary(ed.ErrorDictionary): - """ Class that contains data fields needed for the - error checking of the regression tests. - - If additional error messages need to be checked, - then they should be added to the constructor of this class. - """ diff --git a/buildingspy/development/jmodelica_run.template b/buildingspy/development/jmodelica_run.template deleted file mode 100644 index 35c6be47..00000000 --- a/buildingspy/development/jmodelica_run.template +++ /dev/null @@ -1,161 +0,0 @@ -############################################################### -# Script to test the model {{ model }} -# with JModelica. -# This script will create a json file that contains translation -# and simulation information. -############################################################### - -# Import the class that grabs stdout -import OutputGrabber as og - -def process_with_timeout(target, timeout): - import multiprocessing - import time - import copy - - manager = multiprocessing.Manager() - return_dict = manager.dict() - p = multiprocessing.Process(target=target, args=(0, return_dict)) - p.daemon = True - start = time.time() - p.start() - if timeout > 0: - p.join(timeout) - else: - p.join() - - cpu_time = time.time() - start - - error_msg = None - if p.is_alive(): - error_msg = "Process timeout" - p.terminate() - elif p.exitcode != 0: - error_msg = "Process terminated by signal {}".format(-p.exitcode) - if error_msg is not None: - raise RuntimeError(error_msg) - - ret = copy.copy(return_dict[0]) - ret.update({'cpu_time': cpu_time}) - return ret - -def _translate(proc_num, return_dict): - from pymodelica import compile_fmu - - try: - # Grab the stdoutput - out = og.OutputGrabber() - out.start() - fmu_name = compile_fmu("{{ model }}", - version="2.0", - compiler_log_level='warning', - compiler_options = {"generate_html_diagnostics" : False, - "nle_solver_tol_factor": 1e-2}) - out.stop() - # The standard output is returned as a list, with each line being an element - return_dict[proc_num] = {'success': True, 'fmu_name': str(fmu_name), 'stdout': out.capturedtext.split('\n')} - - except Exception as e: - return_dict[proc_num] = {'success': False, - 'exception': '{}: {}'.format(type(e).__name__, e)} - return - -def _simulate(proc_num, return_dict): - from pyfmi import load_fmu - - if not {{ simulate }}: - return_dict[proc_num] = {'success': False, - 'message': 'No simulation requested.'} - return return_dict - - # Simulate the model -# - - try: - fmu_name = "{{ model }}".replace(".", "_") + ".fmu" - mod = load_fmu(fmu_name) - x_nominal = mod.nominal_continuous_states - - opts = mod.simulate_options() #Retrieve the default options - opts['logging'] = False - opts['solver'] = '{{ solver }}' - opts['ncp'] = {{ ncp }} - - rtol = {{ rtol }} - - if len(x_nominal) > 0: - atol = rtol*x_nominal - else: - atol = rtol - - if opts['solver'].lower() == 'cvode': - # Set user-specified tolerance if it is smaller than the tolerance in the .mo file - opts['CVode_options']['external_event_detection'] = False - opts['CVode_options']['maxh'] = (mod.get_default_experiment_stop_time()-mod.get_default_experiment_start_time())/float(opts['ncp']) - opts['CVode_options']['iter'] = 'Newton' - opts['CVode_options']['discr'] = 'BDF' - opts['CVode_options']['rtol'] = rtol - opts['CVode_options']['atol'] = atol - opts['CVode_options']['store_event_points'] = True # True is default, set to false if many events - - opts['filter'] = {{ filter }} - # Grab the stdoutput - out = og.OutputGrabber() - out.start() - res = mod.simulate(options=opts) - out.stop() - start_time = res['time'][0] - final_time = res['time'][-1] - return_dict[proc_num] = {'success': True, 'start_time': start_time, 'final_time': final_time, 'stdout': out.capturedtext.split('\n')} - - except Exception as e: - return_dict[proc_num] = {'success': False, - 'exception': '{}: {}'.format(type(e).__name__, e)} - return return_dict - -def run(): - import os - import json - import traceback - import sys - - import pymodelica - # Increase memory - pymodelica.environ['JVM_ARGS'] = '-Xmx4096m' - - time_out = {{ time_out }} - model = "{{ model }}" - result = {"model": model, - "translation": {"success": False}, - "simulation": {"success": False}} - - # Compile model - log_file = "{}_buildingspy.json".format(model.replace(".", "_")) - try: - os.remove(log_file) - except OSError: - pass - - try: - ret_dic = process_with_timeout(target=_translate, timeout=time_out) - result["translation"] = ret_dic - - except Exception as e: - result["translation"]["exception"] = "{}: {}".format(type(e).__name__, e) - result["translation"]["traceback"] = traceback.format_exc() - - # Load model if translation was successful - if result["translation"]["success"]: - try: - ret_dic = process_with_timeout(target=_simulate, timeout=time_out) - result["simulation"] = ret_dic - - except Exception as e: - result["simulation"]["exception"] = "{}: {}".format(type(e).__name__, e) - result["simulation"]["traceback"] = traceback.format_exc() - - with open(log_file, "w") as log: - log.write("{}\n".format(json.dumps(result, indent=4, sort_keys=False)) ) - -if __name__=="__main__": - run() diff --git a/buildingspy/development/merger.py b/buildingspy/development/merger.py index 15aad458..94fc2171 100755 --- a/buildingspy/development/merger.py +++ b/buildingspy/development/merger.py @@ -52,14 +52,15 @@ def isValidLibrary(lib_home): # Exclude packages and files self.set_excluded_directories(["Experimental", "Obsolete", - ".FMUOutput"]) + ".FMUOutput", + ".vscode"]) self._excluded_files = [os.path.join(ibpsa_dir, "package.mo"), os.path.join(ibpsa_dir, "dymosim"), os.path.join(ibpsa_dir, "dymosim.exe"), os.path.join(ibpsa_dir, "request"), os.path.join(ibpsa_dir, "status"), os.path.join(ibpsa_dir, "success"), - os.path.join(ibpsa_dir, "ds*.txt"), + os.path.join(ibpsa_dir, "*.txt"), os.path.join(ibpsa_dir, "*.c"), os.path.join(ibpsa_dir, "*.exe"), os.path.join(ibpsa_dir, "*.csv"), @@ -72,16 +73,15 @@ def isValidLibrary(lib_home): os.path.join(ibpsa_dir, "*.pdf"), os.path.join(ibpsa_dir, "*.svg"), os.path.join(ibpsa_dir, "*.pyc"), + os.path.join(ibpsa_dir, "*~"), os.path.join(ibpsa_dir, "nohup.out"), os.path.join(ibpsa_dir, "funnel_comp", "plot.html"), os.path.join(ibpsa_dir, "funnel_comp", "**", "*.csv"), os.path.join(ibpsa_dir, "Fluid", "package.mo"), os.path.join(ibpsa_dir, "Resources", - "Scripts", "Dymola", "ConvertIBPSA_from_*.mos"), + "Scripts", "Conversion", "ConvertIBPSA_from_*.mos"), os.path.join(ibpsa_dir, "Resources", "Scripts", "travis", "Makefile"), - os.path.join(ibpsa_dir, "Resources", - "Scripts", "github-actions", "jmodelica", "jm_ipython.sh"), os.path.join(ibpsa_dir, "Resources", "Scripts", "BuildingsPy", "conf.json"), os.path.join(ibpsa_dir, "Resources", diff --git a/buildingspy/development/openmodelica_run_all.template b/buildingspy/development/openmodelica_run_all.template deleted file mode 100644 index 78503680..00000000 --- a/buildingspy/development/openmodelica_run_all.template +++ /dev/null @@ -1,11 +0,0 @@ -{% for model in models_underscore %} -def run_{{ model }}(): - import {{ model }} as m - m.run() -{% endfor %} - - -if __name__=="__main__": - {% for model in models_underscore %} - run_{{ model }}() - {% endfor %} diff --git a/buildingspy/development/optimica_run.template b/buildingspy/development/optimica_run.template index 9c39007c..fd67c224 100644 --- a/buildingspy/development/optimica_run.template +++ b/buildingspy/development/optimica_run.template @@ -1,6 +1,3 @@ -# Import the class that grabs stdout -import OutputGrabber as og - def process_with_timeout(target, timeout): import multiprocessing import time @@ -35,31 +32,65 @@ def process_with_timeout(target, timeout): def _translate(proc_num, return_dict): import os from pymodelica import compile_fmu + from inspect import signature generate_html_diagnostics={{ generate_html_diagnostics }} try: - # Grab the stdoutput - out = og.OutputGrabber() - out.start() - fmu_name = compile_fmu("{{ model }}{{ model_modifier }}", - version="2.0", - compiler_log_level='warning', - compiler_options = {"generate_html_diagnostics" : generate_html_diagnostics, - "nle_solver_tol_factor": 1e-2}) - out.stop() + # OCT r28242 ignores MODELICAPATH and instead needs to have it set through a function argument. + compilation_log = "{{ model }}".replace('.', '_') + "_compile.log" + version="2.0" + compiler_log_level=f"warning:{compilation_log}" + compiler_options = {"generate_html_diagnostics" : generate_html_diagnostics, + "nle_solver_tol_factor": 1e-2} # 1e-2 is the default + + sig = signature(compile_fmu) + if "modelicapath" in str(sig): + # This is the new API that uses modelicapath as a function argument. + + removed_modelica_path = False + if 'MODELICAPATH' in os.environ: + modelicapath=os.environ['MODELICAPATH'] + del os.environ['MODELICAPATH'] + removed_modelica_path = True + else: + modelicapath=os.path.abspath('.') + + fmu_name = compile_fmu("{{ model }}{{ model_modifier }}", + modelicapath=modelicapath, + version=version, + compiler_log_level=compiler_log_level, + compiler_options=compiler_options) + else: + fmu_name = compile_fmu("{{ model }}{{ model_modifier }}", + version=version, + compiler_log_level=compiler_log_level, + compiler_options=compiler_options) + + if removed_modelica_path: + os.environ['MODELICAPATH'] = modelicapath # Copy style sheets. # This is a hack to get the css and js files to render the html diagnostics. htm_dir = os.path.splitext(os.path.basename(fmu_name))[0] + "_html_diagnostics" if generate_html_diagnostics and os.path.exists(htm_dir): - for fil in ["scripts.js", "style.css", "zepto.min.js"]: + for fil in ["scripts.js", "style.css", "html-diagnostics.css", "zepto.min.js"]: src = os.path.join(".jmodelica_html", fil) if os.path.exists(src): des = os.path.join(htm_dir, fil) shutil.copyfile(src, des) + + # Read log file + out = None + if os.path.isfile(compilation_log): + with open(compilation_log, 'r') as f: + out = f.readlines() + else: + out = f"Error: Log file {compilation_log} does not exist." + + # The standard output is returned as a list, with each line being an element - return_dict[proc_num] = {'success': True, 'fmu_name': str(fmu_name), 'stdout': out.capturedtext.split('\n')} + return_dict[proc_num] = {'success': True, 'fmu_name': str(fmu_name), 'stdout': out} except Exception as e: return_dict[proc_num] = {'success': False, @@ -69,6 +100,9 @@ def _translate(proc_num, return_dict): def _simulate(proc_num, return_dict): from pyfmi import load_fmu + # Import the class that grabs stdout + import OutputGrabber as og + if not {{ simulate }}: return_dict[proc_num] = {'success': False, 'message': 'No simulation requested.'} diff --git a/buildingspy/development/optimica_run_all.template b/buildingspy/development/optimica_run_all.template deleted file mode 100644 index 78503680..00000000 --- a/buildingspy/development/optimica_run_all.template +++ /dev/null @@ -1,11 +0,0 @@ -{% for model in models_underscore %} -def run_{{ model }}(): - import {{ model }} as m - m.run() -{% endfor %} - - -if __name__=="__main__": - {% for model in models_underscore %} - run_{{ model }}() - {% endfor %} diff --git a/buildingspy/development/regressiontest.py b/buildingspy/development/regressiontest.py index fe145985..c08e9eb6 100644 --- a/buildingspy/development/regressiontest.py +++ b/buildingspy/development/regressiontest.py @@ -26,13 +26,11 @@ import time import webbrowser # Third-party module or package imports. -import matplotlib.pyplot as plt import numpy as np import simplejson # Code repository sub-package imports. import pyfunnel from buildingspy.development import error_dictionary_openmodelica -from buildingspy.development import error_dictionary_jmodelica from buildingspy.development import error_dictionary_optimica from buildingspy.development import error_dictionary_dymola from buildingspy.io.outputfile import Reader @@ -51,7 +49,7 @@ def runSimulation(worDir, cmd): .. note:: This method is outside the class definition to allow parallel computing. """ - # JModelica requires the working directory to be part of MODELICAPATH + # OPTIMICA requires the working directory to be part of MODELICAPATH env = os.environ.copy() # will be passed to the subprocess.Popen call if 'MODELICAPATH' in os.environ: env['MODELICAPATH'] = "{}:{}".format(worDir, os.environ['MODELICAPATH']) @@ -126,7 +124,7 @@ class Tester(object): :param check_html: Boolean (default ``True``). Specify whether to load tidylib and perform validation of html documentation. - :param tool: string {``'dymola'``, ``'openmodelica'``, ``'optimica'``, ``'jmodelica'``}. + :param tool: string {``'dymola'``, ``'openmodelica'``, ``'optimica'``}. Default is ``'dymola'``, specifies the tool to use for running the regression test with :func:`~buildingspy.development.Tester.run`. :param cleanup: Boolean (default ``True``). Specify whether to delete temporary directories. @@ -182,15 +180,16 @@ class Tester(object): >>> rt = r.Tester(tool="dymola") >>> myMoLib = os.path.join("buildingspy", "tests", "MyModelicaLibrary") >>> rt.setLibraryRoot(myMoLib) - >>> rt.run() # doctest: +ELLIPSIS - Number of models : 10 + >>> rt.run() # doctest: +SKIP + MyModelicaLibrary.Examples.NoSolution: Excluded from simulation. Model excluded from simulation as it has no solution. + Number of models : 11 blocks : 2 functions: 0 Using ... of ... processors to run unit tests for dymola. Generated 7 regression tests. Comparison files output by funnel are stored in the directory 'funnel_comp' of size ... MB. - Run 'python -c "import buildingspy.development.regressiontest as t; t.Tester().report()"' + Run 'python -c "import buildingspy.development.regressiontest as t; t.Tester(tool=\\\"dymola\\\").report()"' to access a summary of the comparison results. Script that runs unit tests had 0 warnings and 0 errors. @@ -203,17 +202,17 @@ class Tester(object): To run regression tests only for a single package, call :func:`setSinglePackage` prior to :func:`run`. - *Regression testing using OpenModelica, OPTIMICA or JModelica* + *Regression testing using OpenModelica or OPTIMICA* - For OpenModelica, OPTIMICA and JModelica, the selection of test cases is done the same + For OpenModelica and OPTIMICA, the selection of test cases is done the same way as for Dymola. However, the solver tolerance is obtained from the `.mo` file by reading the annotation `Tolerance="value"`. - For OpenModelica, OPTIMICA and JModelica, a JSON file stored as + For OpenModelica and OPTIMICA, a JSON file stored as ``Resources/Scripts/BuildingsPy/conf.yml`` (or for backward compatibility, in `conf.json`) can be used to further configure tests. The file has the syntax below, - where ``openmodelica``, ``optimica`` or ``jmodelica`` specifies the tool. + where ``openmodelica`` or ``optimica`` specifies the tool. .. code-block:: javascript @@ -229,7 +228,7 @@ class Tester(object): For OpenModelica, replace ``optimica`` with ``openmodelica``. - For the detailed specifiation of allowed fields, see ``buildingspy/templates/regressiontest_conf.py``. + For the detailed specification of allowed fields, see ``buildingspy/templates/regressiontest_conf.py``. Any entries are optional, and the entries shown above are the default values, except for the relative tolerance `rtol` @@ -254,8 +253,6 @@ def __init__( """ Constructor.""" if tool == 'optimica': e = error_dictionary_optimica - elif tool == 'jmodelica': - e = error_dictionary_jmodelica elif tool == 'openmodelica': e = error_dictionary_openmodelica else: @@ -274,11 +271,11 @@ def __init__( self._rootPackage = os.path.join(self._libHome, 'Resources', 'Scripts', 'Dymola') # Set the tool - if tool in ['dymola', 'openmodelica', 'optimica', 'jmodelica']: + if tool in ['dymola', 'openmodelica', 'optimica']: self._modelica_tool = tool else: raise ValueError( - "Value of 'tool' of constructor 'Tester' must be 'dymola', 'openmodelica', 'optimica' or 'jmodelica'. Received '{}'.".format(tool)) + "Value of 'tool' of constructor 'Tester' must be 'dymola', 'openmodelica' or 'optimica'. Received '{}'.".format(tool)) # File to which the console output of the simulator is written self._simulator_log_file = "simulator-{}.log".format(tool) # File to which the console output of the simulator of failed simulations is written @@ -372,12 +369,14 @@ def __init__( self._color_BOLD = '\033[1m' self._color_OK = '\033[1;32m' self._color_GREY = '\033[90m' + self._color_WARNING = '\033[93m' self._color_ERROR = '\033[91m' self._color_ENDC = '\033[0m' else: self._color_BOLD = '' self._color_OK = '' self._color_GREY = '' + self._color_WARNING = '' self._color_ERROR = '' self._color_ENDC = '' @@ -415,7 +414,7 @@ def report(self, timeout=600, browser=None, autoraise=True, comp_file=None): def get_unit_test_log_file(self): """ Return the name of the log file of the unit tests, - such as ``unitTests-openmodelica.log``, ``unitTests-optimica.log``, ``unitTests-jmodelica.log`` or ``unitTests-dymola.log``. + such as ``unitTests-openmodelica.log``, ``unitTests-optimica.log`` or ``unitTests-dymola.log``. """ return "unitTests-{}.log".format(self._modelica_tool) @@ -427,8 +426,6 @@ def _initialize_error_dict(self): import buildingspy.development.error_dictionary_openmodelica as e elif self._modelica_tool == 'optimica': import buildingspy.development.error_dictionary_optimica as e - elif self._modelica_tool == 'jmodelica': - import buildingspy.development.error_dictionary_jmodelica as e else: import buildingspy.development.error_dictionary_dymola as e @@ -1030,7 +1027,7 @@ def _set_attribute_value(line, keyword, dat): # raise ValueError(msg) dat['dymola']['TranslationLogFile'] = dat['model_name'] + ".translation.log" # Get tolerance from mo file. This is used to set the tolerance - # for OpenModelica, OPTIMICA and JModelica. + # for OpenModelica and OPTIMICA. # Only get the tolerance for the models that need to be simulated, # because those that are only exported as FMU don't need this setting. if not dat['dymola']['exportFMU']: @@ -1059,8 +1056,6 @@ def _set_attribute_value(line, keyword, dat): "_", "_0").replace(".", "_") dat['dymola']['FMUName'] = dat['dymola']['FMUName'] + ".fmu" # Plot variables are only used for those models that need to be simulated. - # For JModelica, if dat['jmodelica']['simulate'] == False: - # dat['ResultVariables'] is reset to [] in _add_experiment_specifications if not dat['dymola']['exportFMU']: plotVars = [] iLin = 0 @@ -1205,7 +1200,10 @@ def _verify_model_exists(model_name): self._reporter.writeError( f"{conf_file_name} specifies {con_dat['model_name']}, but there is no model file {mo_name}.") - if self._modelica_tool != 'dymola': + if self._modelica_tool == 'dymola': + for ent in self._data: + ent['dymola']['time_out'] = 300 + else: # Non-dymola def_dic = {} def_dic[self._modelica_tool] = { 'translate': True, @@ -1411,7 +1409,7 @@ def _getTimeGridFromSimulationResults(pairs): val = [] try: var_mat = var - # Matrix variables in OpenModelica, OPTIMICA and JModelica are stored in mat file with + # Matrix variables in OpenModelica and OPTIMICA are stored in mat file with # no space e.g. [1,1]. if self._modelica_tool != 'dymola': var_mat = re.sub(' ', '', var_mat) @@ -1431,7 +1429,7 @@ def _getTimeGridFromSimulationResults(pairs): data['ScriptFile'] + ", caught division by zero.\n" s += " len(val) = " + str(len(val)) + "\n" s += " tMax-tMin = " + str(tMax - tMin) + "\n" - warnings.append(s) + errors.append(s) break except ValueError as e: s = "When processing " + fulFilNam + " generated by " + \ @@ -1439,8 +1437,10 @@ def _getTimeGridFromSimulationResults(pairs): s += " type(time) = " + str(type(time)) + "\n" break except KeyError: - warnings.append("%s uses %s which does not exist in %s.\n" % - (data['ScriptFile'], var, data['ResultFile'])) + # This must be an error, otherwise the user would be asked + # to accept a reference result file with a variable that does not exist. + errors.append("%s uses %s which does not exist in %s.\n" % + (data['ScriptFile'], var, data['ResultFile'])) else: # Store time grid. if self._OCT_VERIFICATION: @@ -1476,7 +1476,7 @@ def _getDymolaTranslationStatistics(self, data, warnings, errors): :param warning: A list to which all warnings will be appended. :param errors: A list to which all errors will be appended. :return: The translation log from the `*.translation.log` file as - a list of dictionaries. + a list of dictionaries, or `None` if `*.translation.log` does not exist. Extracts and returns the translation log from the `*.translation.log` file as a list of dictionaries. @@ -1485,7 +1485,10 @@ def _getDymolaTranslationStatistics(self, data, warnings, errors): # Get the working directory that contains the ".log" file fulFilNam = os.path.join(data['ResultDirectory'], self.getLibraryName(), data['dymola']['TranslationLogFile']) - return of.get_model_statistics(fulFilNam, self._modelica_tool) + if os.path.exists(fulFilNam): + return of.get_model_statistics(fulFilNam, self._modelica_tool) + else: + return None def _legacy_comp(self, tOld, yOld, tNew, yNew, tGriOld, tGriNew, varNam, filNam, tol): # Interpolate the new variables to the old time stamps @@ -2195,14 +2198,16 @@ def _compareResults(self, data_idx, oldRefFulFilNam, y_sim, y_tra, refFilNam, an # reject the new values. if (newTrajectories or newStatistics) and (not self._batch) and ( not ans == "N") and (not ans == "Y"): - print(f"{self._color_ERROR} For {refFilNam},") if newTrajectories and newStatistics: + print(f"{self._color_ERROR} For {refFilNam},") print( f" update reference files with new {self._color_BOLD}statistics and trajectories{self._color_ERROR}?{self._color_ENDC}") elif newStatistics: + print(f"{self._color_WARNING} For {refFilNam},") print( - f" update reference files with new {self._color_BOLD}statistics{self._color_ERROR}?{self._color_ENDC}") + f" update reference files with new {self._color_BOLD}statistics{self._color_WARNING}?{self._color_ENDC}") else: + print(f"{self._color_ERROR} For {refFilNam},") print( f" update reference files with new {self._color_BOLD}trajectories{self._color_ERROR}?{self._color_ENDC}") @@ -2266,6 +2271,7 @@ def _funnel_plot(self, model_name, browser=None): def _legacy_plot(self, y_sim, t_ref, y_ref, noOldResults, timOfMaxErr, model_name): """Plot comparison results generated by legacy comparison algorithm.""" + import matplotlib.pyplot as plt nPlo = len(y_sim) iPlo = 0 plt.clf() @@ -2490,8 +2496,8 @@ def _check_fmu_statistics(self, ans): self._reporter.writeError(em) return retVal - def _get_jmodelica_warnings(self, error_text, model): - """ Return a list with all JModelica warnings + def _get_optimica_warnings(self, error_text, model): + """ Return a list with all OPTIMICA warnings """ import re @@ -2555,7 +2561,7 @@ def _get_openmodelica_simulation_record(self, simulation_text): def _get_optimica_simulation_record(self, simulation_text): """ Return total number of Jacobian evaluations, state events, and elapsed cpu time - when unit tests are run with OPTIMICA or JModelica + when unit tests are run with OPTIMICA. """ jacobianNumber = 0 stateEvents = 0 @@ -2577,7 +2583,7 @@ def _get_optimica_simulation_record(self, simulation_text): return res def _verify_non_dymola_runs(self): - """ Check the results of the OPTIMICA and JModelica tests. + """ Check the results of the OPTIMICA tests. This function returns 0 if no errors occurred, or a positive non-zero number otherwise. @@ -2608,7 +2614,7 @@ def _verify_non_dymola_runs(self): res = json.load(json_file) # Get warnings from stdout that was captured from the compilation if 'stdout' in res['translation']: - warnings = self._get_jmodelica_warnings( + warnings = self._get_optimica_warnings( error_text=res['translation']['stdout'], model=res['model']) res['translation']['warnings'] = warnings @@ -2717,6 +2723,7 @@ def _checkReferencePoints(self, ans): if not os.path.exists(refDir): os.makedirs(refDir) + updateReferenceData = False ret_val = 0 for data_idx, data in enumerate(self._data): # Index to self._comp_info @@ -2793,7 +2800,7 @@ def _checkReferencePoints(self, ans): # unless the tests run in batch mode if not (self._batch or ans == "Y" or ans == "N"): ans = "-" - updateReferenceData = False + updateReferenceData = False # check if reference results already exist in library oldRefFulFilNam = os.path.join(refDir, refFilNam) # If the reference file exists, and if the reference file contains @@ -2812,24 +2819,27 @@ def _checkReferencePoints(self, ans): for pai in y_sim: t_ref = pai["time"] noOldResults = noOldResults + list(pai.keys()) - if not self._OCT_VERIFICATION: - if not (self._batch or ans == "Y" or ans == "N"): + + if not self._OCT_VERIFICATION and not (self._batch or ans == "Y" or ans == "N"): + if t_ref is None: + self._reporter.writeError( + f"Test case {refFilNam} has no simulation output to compare. You need to add at least one variable to compare.") + else: self._legacy_plot(y_sim, t_ref, {}, noOldResults, dict(), "New results: " + data['ScriptFile']) - # Reference file does not exist, write warning, unless we are in OCT_VERIFICATION mode - print( - "*** Warning: Reference file {} does not yet exist.".format(refFilNam)) - while not (ans == "n" or ans == "y" or ans == "Y" or ans == "N"): - print(" Create new file?") - ans = input( - " Enter: y(yes), n(no), Y(yes for all), N(no for all): ") - if self._OCT_VERIFICATION or ans == "y" or ans == "Y": - updateReferenceData = True - else: - self._reporter.writeError("Did not write new reference file %s." % - oldRefFulFilNam) - else: - updateReferenceData = True + # Reference file does not exist + print( + "*** Warning: Reference file {} does not yet exist.".format(refFilNam)) + while not ( + ans == "n" or ans == "y" or ans == "Y" or ans == "N"): + print(" Create new file?") + ans = input( + " Enter: y(yes), n(no), Y(yes for all), N(no for all): ") + if ans == "y" or ans == "Y": + updateReferenceData = True + else: + self._reporter.writeError( + "Did not write new reference file %s." % oldRefFulFilNam) if updateReferenceData: # If the reference data of any variable was updated # Make dictionary to save the results self._writeReferenceResults(oldRefFulFilNam, y_sim, y_tra) @@ -2858,9 +2868,9 @@ def _checkReferencePoints(self, ans): if self._comp_tool == 'funnel': s = """Comparison files output by funnel are stored in the directory '{}' of size {:.1f} MB. -Run 'python -c "import buildingspy.development.regressiontest as t; t.Tester().report()"' +Run 'python -c "import buildingspy.development.regressiontest as t; t.Tester(tool=\\\"{}\\\").report()"' to access a summary of the comparison results.\n""".format( - self._comp_dir, self._get_size_dir(self._comp_dir) * 1e-6) + self._comp_dir, self._get_size_dir(self._comp_dir) * 1e-6, self._modelica_tool) self._reporter.writeOutput(s) return ret_val @@ -2878,7 +2888,7 @@ def _performTranslationErrorChecks(self, logFil, stat): m = re.search(v["tool_message"], line) if m is not None: stat[k] = stat[k] + int(m.group(1)) - # otherwise, default: count the number of line occurences + # otherwise, default: count the number of line occurrences else: if v["tool_message"] in line: stat[k] = stat[k] + 1 @@ -2939,23 +2949,31 @@ def _checkSimulationError(self, errorFile): else: key = 'FMUExport' + logFil = None if key in ele: - logFil = ele[key]["translationLog"] - ele[key] = self._performTranslationErrorChecks(logFil, ele[key]) - for k, v in list(self._error_dict.get_dictionary().items()): - # For OPTIMICA and JModelica, we neither have simulate nor FMUExport - if ele[key][k] > 0: - self._reporter.writeWarning(v["model_message"].format(ele[key]["command"])) - self._error_dict.increment_counter(k) - - if hasTranslationError: - hasTranslationErrors = True + if "translationLog" in ele[key]: + logFil = ele[key]["translationLog"] + ele[key] = self._performTranslationErrorChecks(logFil, ele[key]) + for k, v in list(self._error_dict.get_dictionary().items()): + # For OPTIMICA, we neither have simulate nor FMUExport + if ele[key][k] > 0: + self._reporter.writeWarning( + v["model_message"].format(ele[key]["command"])) + self._error_dict.increment_counter(k) + + if hasTranslationError and logFil is not None: with open(self._failed_simulator_log_file, "a") as f: f.write("===============================\n") f.write("=====START OF NEW LOG FILE=====\n") f.write("===============================\n") - with open(logFil, "r") as f2: - f.write(f2.read()) + if os.path.exists(logFil): + with open(logFil, "r") as f2: + f.write(f2.read()) + else: + # Logfile does not exists, which may be because simulation was terminated + # due to time out + f.write( + f"Log file {logFil} does not exist, this can happen if the process was terminated due to time out.") f.write("\n\n\n") if iChe > 0: @@ -3123,7 +3141,7 @@ def _isPresentAndTrue(key, dic): return key in dic and dic[key] def _write_runscript_dymola(self, iPro, tra_data_pro): - """Create the runAll.mos script for the current processor iPro and for Dymola, + """Create the run_modelName.mos scripts for the current processor iPro and for Dymola, and return the number of generated regression tests. :param iPro: The number of the processor. @@ -3131,6 +3149,18 @@ def _write_runscript_dymola(self, iPro, tra_data_pro): """ import platform + for tra_data in tra_data_pro: + self._write_dymola_script(iPro, tra_data) + + def _write_dymola_script(self, iPro, tra_data): + """Create the run_modelName.mos script for the current model and for Dymola, + and return the number of generated regression tests. + + :param iPro: The number of the processor. + :param tra_data: Data for the experiment that requires translation, for processor number iPro only. + """ + import platform + ################################################################## # Internal functions def _write_translation_stats(runFil, values): @@ -3153,17 +3183,17 @@ def _print_end_of_json(isLastItem, fileHandle, logFileName): ################################################################## # Count the number of experiments that need to be simulated or exported as an FMU. # This is needed to properly close the json brackets. - nItem = 0 +# nItem = 0 # Count how many tests need to be simulated. - nTes = len(tra_data_pro) +# nTes = len(tra_data_pro) # Number of generated unit tests - nUniTes = 0 +# nUniTes = 0 - runFil = open(os.path.join(self._temDir[iPro], self.getLibraryName( - ), "runAll.mos"), mode="w", encoding="utf-8") + statistics_log = f"{tra_data['model_name']}.statistics.log" + runFil = open(os.path.join(self._temDir[iPro], self.getLibraryName(), + f"run_{tra_data['model_name']}.mos"), mode="w", encoding="utf-8") runFil.write( - f""" -// File autogenerated for process {iPro + 1} of {self._nPro} + f"""// File autogenerated for process {iPro + 1} of {self._nPro} // File created for execution by {self._modelica_tool}. Do not edit. // Disable parallel computing as this can give slightly different results. Advanced.ParallelizeCode = false; @@ -3198,87 +3228,85 @@ def _print_end_of_json(isLastItem, fileHandle, logFileName): Advanced.TranslationInCommandLog := true; // Set flag to support string parameters, which is required for the weather // data file. -Modelica.Utilities.Files.remove(\"{self._simulator_log_file}\"); -Modelica.Utilities.Files.remove(\"{self._statistics_log}\"); +//Modelica.Utilities.Files.remove(\"{self._simulator_log_file}\"); +Modelica.Utilities.Files.remove(\"{statistics_log}\"); """) runFil.write(r""" Modelica.Utilities.Streams.print("{\"testCase\" : [", "%s"); -""" % self._statistics_log) - - for i in range(nTes): - if self._isPresentAndTrue( - 'translate', - tra_data_pro[i]['dymola']) or self._isPresentAndTrue( - 'exportFMU', - tra_data_pro[i]['dymola']): - nItem = nItem + 1 - iItem = 0 - # Write unit tests for this process - for i in range(nTes): - # Check if this mos file should be simulated - if self._isPresentAndTrue( - 'translate', - tra_data_pro[i]['dymola']) or self._isPresentAndTrue( - 'exportFMU', - tra_data_pro[i]['dymola']): - isLastItem = (iItem == nItem - 1) - mosFilNam = os.path.join(self.getLibraryName(), - "Resources", "Scripts", "Dymola", - tra_data_pro[i]['ScriptFile']) - absMosFilNam = os.path.join(self._temDir[iPro], mosFilNam) - values = { - "mosWithPath": mosFilNam.replace( - "\\", - "/"), - "checkCommand": self._getModelCheckCommand(absMosFilNam).replace( - "\\", - "/"), - "checkCommandString": self._getModelCheckCommand(absMosFilNam).replace( - '\"', - r'\\\"'), - "scriptFile": tra_data_pro[i]['ScriptFile'].replace( - "\\", - "/"), - "model_name": tra_data_pro[i]['model_name'].replace( - "\\", - "/"), - "model_name_underscore": tra_data_pro[i]['model_name'].replace( - ".", - "_"), - "start_time": tra_data_pro[i]['startTime'] if 'startTime' in tra_data_pro[i] else 0, - "final_time": tra_data_pro[i]['stopTime'] if 'stopTime' in tra_data_pro[i] else 0, - "statisticsLog": self._statistics_log.replace( - "\\", - "/"), - "translationLog": os.path.join( - self._temDir[iPro], - self.getLibraryName(), - tra_data_pro[i]['model_name'] + - ".translation.log").replace( - "\\", - "/"), - "simulatorLog": self._simulator_log_file.replace( - "\\", - "/")} - if 'FMUName' in tra_data_pro[i]['dymola']: - values["FMUName"] = tra_data_pro[i]['dymola']['FMUName'] - # Delete command log, model_name.simulation.log and dslog.txt - runFil.write(f""" +""" % statistics_log) + + # if self._isPresentAndTrue( + # 'translate', + # tra_data['dymola']) or self._isPresentAndTrue( + # 'exportFMU', + # tra_data['dymola']): + #nItem = nItem + 1 + +# iItem = 0 +# Write unit tests for this process +# Check if this mos file should be simulated + if self._isPresentAndTrue( + 'translate', + tra_data['dymola']) or self._isPresentAndTrue( + 'exportFMU', + tra_data['dymola']): + isLastItem = True # (iItem == nItem - 1) + mosFilNam = os.path.join(self.getLibraryName(), + "Resources", "Scripts", "Dymola", + tra_data['ScriptFile']) + absMosFilNam = os.path.join(self._temDir[iPro], mosFilNam) + values = { + "libraryName": self.getLibraryName(), + "mosWithPath": mosFilNam.replace( + "\\", + "/"), + "checkCommand": self._getModelCheckCommand(absMosFilNam).replace( + "\\", + "/"), + "checkCommandString": self._getModelCheckCommand(absMosFilNam).replace( + '\"', + r'\\\"'), + "scriptFile": tra_data['ScriptFile'].replace( + "\\", + "/"), + "model_name": tra_data['model_name'].replace( + "\\", + "/"), + "model_name_underscore": tra_data['model_name'].replace( + ".", + "_"), + "start_time": tra_data['startTime'] if 'startTime' in tra_data else 0, + "final_time": tra_data['stopTime'] if 'stopTime' in tra_data else 0, + "statisticsLog": statistics_log, + "translationLog": os.path.join( + self._temDir[iPro], + self.getLibraryName(), + tra_data['model_name'] + + ".translation.log").replace( + "\\", + "/"), + "simulatorLog": self._simulator_log_file.replace( + "\\", + "/")} + if 'FMUName' in tra_data['dymola']: + values["FMUName"] = tra_data['dymola']['FMUName'] + # Delete command log, model_name.simulation.log and dslog.txt + runFil.write(f""" Modelica.Utilities.Files.remove(\"{values["model_name"]}.translation.log\"); Modelica.Utilities.Files.remove(\"dslog.txt\"); clearlog(); """) - ######################################################################## - # Write line for model check - model_name = values["model_name"] - if model_name.startswith("Obsolete.", model_name.find(".") + 1): - # This model is in IBPSA.Obsolete, or Buildings.Obsolete etc. - values["set_non_pedantic"] = "Advanced.PedanticModelica = false;\n" - values["set_pedantic"] = "Advanced.PedanticModelica = true;\n" - else: # Set to empty string as for non-obsolete models, we don't switch to non-pedantic mode - values["set_non_pedantic"] = "" - values["set_pedantic"] = "" - template = r""" + ######################################################################## + # Write line for model check + model_name = values["model_name"] + if model_name.startswith("Obsolete.", model_name.find(".") + 1): + # This model is in IBPSA.Obsolete, or Buildings.Obsolete etc. + values["set_non_pedantic"] = "Advanced.PedanticModelica = false;\n" + values["set_pedantic"] = "Advanced.PedanticModelica = true;\n" + else: # Set to empty string as for non-obsolete models, we don't switch to non-pedantic mode + values["set_non_pedantic"] = "" + values["set_pedantic"] = "" + template = r""" {set_non_pedantic} rCheck = {checkCommand}; {set_pedantic} @@ -3288,23 +3316,66 @@ def _print_end_of_json(isLastItem, fileHandle, logFileName): Modelica.Utilities.Streams.print(" \"command\" : \"{checkCommandString};\",", "{statisticsLog}"); Modelica.Utilities.Streams.print(" \"result\" : " + String(rCheck), "{statisticsLog}"); Modelica.Utilities.Streams.print(" }},", "{statisticsLog}"); +""" + runFil.write(template.format(**values)) + ########################################################################## + # Write commands for checking translation and simulation results. + # Only translation requested, but no simulation. + ########################################################################## + if self._isPresentAndTrue( + 'translate', + tra_data['dymola']) and not self._isPresentAndTrue( + 'simulate', + tra_data['dymola']): + template = r""" +{set_non_pedantic} +retVal = translateModel("{model_name}"); +Modelica.Utilities.Streams.print("Translated {model_name} successfully: " + String(retVal)); +{set_pedantic} +savelog("{model_name}.translation.log"); +if Modelica.Utilities.Files.exist("dslog.txt") then + Modelica.Utilities.Files.move("dslog.txt", "{model_name}.dslog.log"); +end if; +iSuc=0; +if Modelica.Utilities.Files.exist("{model_name}.dslog.log") then + iLin=1; + endOfFile=false; + while (not endOfFile) loop + (_line, endOfFile)=Modelica.Utilities.Streams.readLine("{model_name}.dslog.log", iLin); + iLin=iLin+1; + iSuc=iSuc+Modelica.Utilities.Strings.count(_line, "Translated {model_name} successfully: true."); + end while; + Modelica.Utilities.Streams.close("{model_name}.dslog.log"); +else + Modelica.Utilities.Streams.print("{model_name}.dslog.log was not generated.", "{model_name}.log"); +end if; """ runFil.write(template.format(**values)) - ########################################################################## - # Write commands for checking translation and simulation results. - if self._isPresentAndTrue('translate', tra_data_pro[i]['dymola']): - # Remove dslog.txt, run a simulation, rename dslog.txt, and - # scan this log file for errors. - # This is needed as RunScript returns true even if the simulation failed. - # We read to dslog file line by line as very long files can lead to - # Out of memory for strings - # It could due to too large matrices, infinite recursion, or uninitialized variables. - # You can increase the size of 'Stringbuffer' in dymola/source/matrixop.h. - # The stack of functions is: - # Modelica.Utilities.Streams.readFile - template = r""" + template = r""" +Modelica.Utilities.Streams.print(" \"translate\" : {{", "{statisticsLog}"); +Modelica.Utilities.Streams.print(" \"command\" :\"translateModel(\\\"{model_name}\\\");\",", "{statisticsLog}"); +Modelica.Utilities.Streams.print(" \"translationLog\" : \"{translationLog}\",", "{statisticsLog}"); +Modelica.Utilities.Streams.print(" \"result\" : " + String(iSuc > 0), "{statisticsLog}"); +""" + runFil.write(template.format(**values)) + _write_translation_stats(runFil, values) + _print_end_of_json(isLastItem, + runFil, + statistics_log) + # Simulation requested + if self._isPresentAndTrue('simulate', tra_data['dymola']): + # Remove dslog.txt, run a simulation, rename dslog.txt, and + # scan this log file for errors. + # This is needed as RunScript returns true even if the simulation failed. + # We read to dslog file line by line as very long files can lead to + # Out of memory for strings + # It could due to too large matrices, infinite recursion, or uninitialized variables. + # You can increase the size of 'Stringbuffer' in dymola/source/matrixop.h. + # The stack of functions is: + # Modelica.Utilities.Streams.readFile + template = r""" {set_non_pedantic} -rScript=RunScript("Resources/Scripts/Dymola/{scriptFile}"); +rScript=RunScript("modelica://{libraryName}/Resources/Scripts/Dymola/{scriptFile}"); {set_pedantic} savelog("{model_name}.translation.log"); if Modelica.Utilities.Files.exist("dslog.txt") then @@ -3363,10 +3434,10 @@ def _print_end_of_json(isLastItem, fileHandle, logFileName): Modelica.Utilities.Streams.print("{model_name}.dslog.log was not generated.", "{model_name}.log"); end if; """ - runFil.write(template.format(**values)) - template = r""" + runFil.write(template.format(**values)) + template = r""" Modelica.Utilities.Streams.print(" \"simulate\" : {{", "{statisticsLog}"); -Modelica.Utilities.Streams.print(" \"command\" : \"RunScript(\\\"Resources/Scripts/Dymola/{scriptFile}\\\");\",", "{statisticsLog}"); +Modelica.Utilities.Streams.print(" \"command\" : \"RunScript(\\\"modelica://{libraryName}/Resources/Scripts/Dymola/{scriptFile}\\\");\",", "{statisticsLog}"); Modelica.Utilities.Streams.print(" \"translationLog\" : \"{translationLog}\",", "{statisticsLog}"); Modelica.Utilities.Streams.print(" \"elapsed_time\" :" + intTim + ",", "{statisticsLog}"); Modelica.Utilities.Streams.print(" \"jacobians\" :" + numJac + ",", "{statisticsLog}"); @@ -3375,17 +3446,17 @@ def _print_end_of_json(isLastItem, fileHandle, logFileName): Modelica.Utilities.Streams.print(" \"final_time\" :" + String({final_time}) + ",", "{statisticsLog}"); Modelica.Utilities.Streams.print(" \"result\" : " + String(iSuc > 0), "{statisticsLog}"); """ - runFil.write(template.format(**values)) - _write_translation_stats(runFil, values) - _print_end_of_json(isLastItem, - runFil, - self._statistics_log) - ########################################################################## - # FMU export - if tra_data_pro[i]['dymola']['exportFMU']: - template = r""" + runFil.write(template.format(**values)) + _write_translation_stats(runFil, values) + _print_end_of_json(isLastItem, + runFil, + statistics_log) + ########################################################################## + # FMU export + if tra_data['dymola']['exportFMU']: + template = r""" Modelica.Utilities.Files.removeFile("{FMUName}"); -RunScript("Resources/Scripts/Dymola/{scriptFile}"); +RunScript("modelica://{libraryName}/Resources/Scripts/Dymola/{scriptFile}"); savelog("{model_name}.translation.log"); if Modelica.Utilities.Files.exist("dslog.txt") then Modelica.Utilities.Files.move("dslog.txt", "{model_name}.dslog.log"); @@ -3404,27 +3475,27 @@ def _print_end_of_json(isLastItem, fileHandle, logFileName): Modelica.Utilities.Streams.print("{model_name}.dslog.log was not generated.", "{model_name}.log"); end if; """ - runFil.write(template.format(**values)) - template = r""" + runFil.write(template.format(**values)) + template = r""" Modelica.Utilities.Streams.print(" \"FMUExport\" : {{", "{statisticsLog}"); -Modelica.Utilities.Streams.print(" \"command\" :\"RunScript(\\\"Resources/Scripts/Dymola/{scriptFile}\\\");\",", "{statisticsLog}"); +Modelica.Utilities.Streams.print(" \"command\" :\"RunScript(\\\"modelica://{libraryName}/Resources/Scripts/Dymola/{scriptFile}\\\");\",", "{statisticsLog}"); Modelica.Utilities.Streams.print(" \"translationLog\" : \"{translationLog}\",", "{statisticsLog}"); Modelica.Utilities.Streams.print(" \"result\" : " + String(iSuc > 0), "{statisticsLog}"); """ - runFil.write(template.format(**values)) - _write_translation_stats(runFil, values) - _print_end_of_json(isLastItem, - runFil, - self._statistics_log) - - if not (tra_data_pro[i]['dymola']['exportFMU'] - or tra_data_pro[i]['dymola']['translate']): - print( - "****** {} neither requires a simulation nor an FMU export.".format(tra_data_pro[i]['ScriptFile'])) - self._removePlotCommands(absMosFilNam) - self._updateResultFile(absMosFilNam, tra_data_pro[i]['model_name']) - nUniTes = nUniTes + 1 - iItem = iItem + 1 + runFil.write(template.format(**values)) + _write_translation_stats(runFil, values) + _print_end_of_json(isLastItem, + runFil, + statistics_log) + + if not (tra_data['dymola']['exportFMU'] + or tra_data['dymola']['translate']): + print( + "****** {} neither requires a simulation nor an FMU export.".format(tra_data['ScriptFile'])) + self._removePlotCommands(absMosFilNam) + self._updateResultFile(absMosFilNam, tra_data['model_name']) +# nUniTes = nUniTes + 1 +# iItem = iItem + 1 if platform.system() == 'Windows': # Reset DDE to original settings @@ -3439,12 +3510,12 @@ def _print_end_of_json(isLastItem, fileHandle, logFileName): exit(); """) runFil.close() - return nUniTes + return def _write_runscripts(self): - """Create the runAll.mos scripts, one per processor (self._nPro). + """Create the run_modelName.mos scripts, one per model. - The commands in the script depend on the tool: 'openmodelica', 'dymola', 'optimica', or 'jmodelica' + The commands in the script depend on the tool: 'openmodelica', 'dymola' or 'optimica' """ nUniTes = 0 @@ -3500,10 +3571,10 @@ def _write_runscripts(self): if self._modelica_tool == 'dymola': # Case for dymola - nUniTes = nUniTes + self._write_runscript_dymola(iPro, tra_data_pro) - else: - # Case for non-dymola - nUniTes = nUniTes + self._write_runscript_non_dymola(iPro, tra_data_pro) + self._write_runscript_dymola(iPro, tra_data_pro) + + nUniTes = nUniTes + self._write_python_runscripts(iPro, tra_data_pro) + self._write_run_all_script(iPro, tra_data_pro) if nUniTes == 0: raise RuntimeError(f"Wrong invocation, generated {nUniTes} unit tests.") @@ -3518,8 +3589,8 @@ def _get_set_of_result_variables(list_of_result_variables): s.add(ele) return s - def _write_runscript_non_dymola(self, iPro, tra_data_pro): - """ Write the OpenModelica, OPTIMICA or JModelica runfile for all experiments in tra_data_pro. + def _write_run_all_script(self, iPro, tra_data_pro): + """ Write the OpenModelica or OPTIMICA top-level runfile for all experiments in tra_data_pro. :param iPro: The number of the processor. :param tra_data_pro: A list with the data for the experiments that require translation, for this processor only. @@ -3536,7 +3607,7 @@ def _write_runscript_non_dymola(self, iPro, tra_data_pro): models_underscore = [] for dat in tra_data_pro: models_underscore.append(dat['model_name'].replace(".", "_")) - template = env.get_template("{}_run_all.template".format(self._modelica_tool)) + template = env.get_template("run_all.template") txt = template.render(models_underscore=sorted(models_underscore)) # for the special case that no models need to be translated (for this process) # we need to add a python command. Otherwise the python file is not valid. @@ -3544,6 +3615,21 @@ def _write_runscript_non_dymola(self, iPro, tra_data_pro): txt += " import os;\n" fil.write(txt) + def _write_python_runscripts(self, iPro, tra_data_pro): + """ Write the Python runfiles for all experiments in tra_data_pro. + + :param iPro: The number of the processor. + :param tra_data_pro: A list with the data for the experiments that require translation, for this processor only. + """ + import inspect + import buildingspy.development.regressiontest as r + import jinja2 + + directory = self._temDir[iPro] + + path_to_template = os.path.dirname(inspect.getfile(r)) + env = jinja2.Environment(loader=jinja2.FileSystemLoader(path_to_template)) + tem_mod = env.get_template("{}_run.template".format(self._modelica_tool)) for dat in tra_data_pro: @@ -3562,7 +3648,7 @@ def _write_runscript_non_dymola(self, iPro, tra_data_pro): dat[self._modelica_tool]['rtol'] = 1E-6 # Note that if dat[self._modelica_tool]['simulate'] == false, then only the FMU export is tested, but no # simulation should be done. - # filter argument must respect glob syntax ([ is escaped with []]) + JModelica mat file + # filter argument must respect glob syntax ([ is escaped with []]) + OPTIMICA mat file # stores matrix variables with no space e.g. [1,1]. if self._modelica_tool == 'openmodelica': filter = '(' + '|'.join([re.sub(r'\[|\]', @@ -3579,7 +3665,7 @@ def _write_runscript_non_dymola(self, iPro, tra_data_pro): time_out=dat[self._modelica_tool]['time_out'], filter=filter ) - else: + elif self._modelica_tool == 'optimica': txt = tem_mod.render( model=model, ncp=dat[self._modelica_tool]['ncp'], @@ -3596,12 +3682,34 @@ def _write_runscript_non_dymola(self, iPro, tra_data_pro): lambda m: '[{}]'.format(m.group()), re.sub(' ', '', x)) for x in result_variables] ) + else: # dymola + # assemble command + cmd = list() + cmd.append(f"{self.getModelicaCommand()}") + cmd.append(f"run_{model}.mos") + if not self._showGUI: + cmd.append("/nowindow") + + txt = tem_mod.render( + model=model, + working_directory=os.path.join(directory, self.getLibraryName()), + library_name=self.getLibraryName(), + # ncp=dat[self._modelica_tool]['ncp'], + # rtol=dat[self._modelica_tool]['rtol'], + # solver=dat[self._modelica_tool]['solver'], + # start_time='mod.get_default_experiment_start_time()', + # final_time='mod.get_default_experiment_stop_time()', + # simulate=dat[self._modelica_tool]['simulate'], + time_out=dat[self._modelica_tool]['time_out'], + cmd=cmd + ) + file_name = os.path.join(directory, "{}.py".format(model.replace(".", "_"))) with open(file_name, mode="w", encoding="utf-8") as fil: fil.write(txt) # Copy python file that grabs the console output - if self._modelica_tool == 'optimica' or self._modelica_tool == 'jmodelica': + if self._modelica_tool == 'optimica': shutil.copyfile( os.path.join( os.path.dirname(__file__), @@ -3667,19 +3775,34 @@ def _setTemporaryDirectories(self): def _run_simulation_info(self): """ Extract simulation data from statistics.json when run unit test with dymola """ + + def _get(model, key, data): + for ent in data: + if ent['model_name'] == model: + return ent[key] + return 0 + with open(self._statistics_log, 'r') as f: staVal = simplejson.loads(f.read()) data = [] for case in staVal['testCase']: - if 'FMUExport' not in case: + if 'translate' in case: + temp = {} + temp['model'] = case['model'] + temp['translation'] = {} + temp['translation']['success'] = case['translate']['result'] + data.append(temp) + if 'simulate' in case: temp = {} temp['model'] = case['model'] temp['simulation'] = {} - temp['simulation']['elapsed_time'] = case['simulate']['elapsed_time'] - temp['simulation']['start_time'] = case['simulate']['start_time'] - temp['simulation']['final_time'] = case['simulate']['final_time'] - temp['simulation']['jacobians'] = case['simulate']['jacobians'] - temp['simulation']['state_events'] = case['simulate']['state_events'] + temp['simulation']['elapsed_time'] = case['simulate']['elapsed_time'] if 'elapsed_time' in case['simulate'] else 0 + temp['simulation']['start_time'] = case['simulate']['start_time'] if 'start_time' in case['simulate'] else _get( + case['model'], 'startTime', self._data) + temp['simulation']['final_time'] = case['simulate']['final_time'] if 'final_time' in case['simulate'] else _get( + case['model'], 'stopTime', self._data) + temp['simulation']['jacobians'] = case['simulate']['jacobians'] if 'jacobians' in case['simulate'] else 0 + temp['simulation']['state_events'] = case['simulate']['state_events'] if 'state_events' in case['simulate'] else 0 temp['simulation']['success'] = case['simulate']['result'] data.append(temp) dataJson = simplejson.dumps(data) @@ -3705,7 +3828,7 @@ def run(self): - for Dymola, compares the results of the new simulations with reference results that are stored in ``Resources/ReferenceResults``, - writes the message `Regression tests completed successfully.` - if no error occured, + if no error occurred, - returns 0 if no errors and no warnings occurred, or non-zero otherwise. """ @@ -3770,18 +3893,14 @@ def run(self): tem_dir = [] libNam = self.getLibraryName() for di in self._temDir: - if self._modelica_tool == 'dymola': - tem_dir.append(os.path.join(di, libNam)) - else: - tem_dir.append(di) + # if self._modelica_tool == 'dymola': + # tem_dir.append(os.path.join(di, libNam)) + # else: + # tem_dir.append(di) + tem_dir.append(di) if not self._useExistingResults: - if self._modelica_tool == 'dymola': - if self._showGUI: - cmd = [self.getModelicaCommand(), "runAll.mos"] - else: - cmd = [self.getModelicaCommand(), "runAll.mos", "/nowindow"] - elif self._modelica_tool == 'openmodelica': + if self._modelica_tool == 'dymola' or self._modelica_tool == 'openmodelica': # OS X invokes python 2.7 if the command below is python, despite of having # alias python=python3 in .bashrc. # Hence, we invoke python3 for OS X. @@ -3791,8 +3910,9 @@ def run(self): cmd = ["python3", "./run.py"] else: cmd = ["python", "./run.py"] - elif self._modelica_tool != 'dymola': + else: cmd = [self.getModelicaCommand(), "run.py"] + if self._nPro > 1: po = multiprocessing.Pool(self._nPro) po.map(functools.partial(runSimulation, @@ -3826,23 +3946,69 @@ def run(self): with open(self._statistics_log, mode="w", encoding="utf-8") as logFil: stat = list() for d in self._temDir: - temLogFilNam = os.path.join(d, self.getLibraryName(), self._statistics_log) - if os.path.exists(temLogFilNam): - with open(temLogFilNam.replace('Temp\tmp', 'Temp\\tmp'), mode="r", encoding="utf-8-sig") as temSta: - try: - jsonLog = json.load(temSta) - cas = jsonLog["testCase"] - # Iterate over all test cases of this output file - for ele in cas: - stat.append(ele) - except ValueError as e: - self._reporter.writeError( - "Decoding '%s' failed: %s" % (temLogFilNam, e)) - raise - else: - self._reporter.writeError( - "Log file '" + temLogFilNam + "' does not exist.\n") - retVal = 1 + for temLogFilNam in glob.glob( + os.path.join(d, self.getLibraryName(), '*.statistics.log')): + if os.path.exists(temLogFilNam): + with open(temLogFilNam.replace('Temp\tmp', 'Temp\\tmp'), mode="r", encoding="utf-8-sig") as temSta: + try: + jsonLog = json.load(temSta) + cas = jsonLog["testCase"] + # Iterate over all test cases of this output file + for ele in cas: + stat.append(ele) + except json.decoder.JSONDecodeError as e: + # If a run timed out, then temLogFilNam is not a valid json file + # because the file is written on the fly, and dymola did not finish + # writing all of it, which results in an invalid file. + # Check if /tmp/tmp-Buildings-1-o_m7nj7p/Buildings_Examples_VAVReheat_ASHRAE2006_buildingspy.json + # exists + modelName = os.path.split(temLogFilNam)[ + 1].replace('.statistics.log', '') + buiLogNam = os.path.join( + d, + f"{modelName.replace('.', '_')}_buildingspy.json") + if os.path.exists(buiLogNam): + # Read the log file of the python script that invoked dymola + with open(buiLogNam, mode="r", encoding="utf-8-sig") as buiLog: + jsonBui = json.load(buiLog) + # Build up the entry for reporting the case + if "simulation" in jsonBui and "exception" in jsonBui["simulation"]: + exception = ''.join( + jsonBui['simulation']['exception']) + else: + exception = f"JSONDecodeError in {temLogFilNam}: {str(e)}" + ele = { + "model": modelName, + "simulate": { + "command": ''.join( + jsonBui['simulation']['cmd']), + "result": False, + "exception": exception}} + self._reporter.writeError( + f"Model '{modelName}' failed: {exception}") + stat.append(ele) + # Add the failure also to self._data so that + # _checkReferencePoints is not trying to read the + # output. + for ele in self._data: + if ele['model_name'] == modelName: + if "simulation" in ele[self._modelica_tool]: + ele[self._modelica_tool]['simulation']['success'] = False + else: + ele[self._modelica_tool]['simulation'] = { + 'success': False} + else: + self._reporter.writeError( + f"Decoding '{temLogFilNam}' failed and '{buiLogNam}' does not exist: {e}") + raise + except ValueError as e: + self._reporter.writeError( + "Loading '%s' failed: %s" % (temLogFilNam, e)) + raise + else: + self._reporter.writeError( + "Log file '" + temLogFilNam + "' does not exist.\n") + retVal = 1 # Dump an array of testCase objects # dump to a string first using json.dumps instead of json.dump json_string = json.dumps({"testCase": stat}, @@ -3890,7 +4056,7 @@ def run(self): retVal = temp if not self._skip_verification: - # For OpenModelica, OPTIMICA and JModelica: store available translation and simulation info + # For OpenModelica and OPTIMICA: store available translation and simulation info # into self._comp_info used for reporting. with open(self._simulator_log_file, 'r') as f: self._comp_info = simplejson.loads(f.read()) diff --git a/buildingspy/development/jmodelica_run_all.template b/buildingspy/development/run_all.template similarity index 100% rename from buildingspy/development/jmodelica_run_all.template rename to buildingspy/development/run_all.template diff --git a/buildingspy/development/simulationCompare.py b/buildingspy/development/simulationCompare.py new file mode 100644 index 00000000..ccdffd9c --- /dev/null +++ b/buildingspy/development/simulationCompare.py @@ -0,0 +1,813 @@ +#!/usr/bin/env python3 +######################################################## +# 2021-02-03: Changed calculation of relative time difference. +# 2020-11-11: Corrected color coding for html output +######################################################## +import getpass +import git +import glob +import io +import json +import os +import re +import sys +import shutil +import tempfile + +from distutils.dir_util import mkpath + + +class Comparator(object): + """ Class that compares various simulation statistics across tools or branches. + + This class allows comparing various simulation performance indicators + (CPU time, number of state events, number of Jacobian evaluations) + across Modelica simulation tools and across git branches. + The tests can be run across a whole library, or across an individual Modelica package. + The results will be summarized in a table format that compares the performance + across tools and branches. + + Initiate with the following optional arguments: + + :param tools: A list of tools to compare, such as ``['openmodelica', 'dymola']``. + :param branches: A list of branches to compare, such as ``['master', 'issueXXX']``. + :param package: Name of top-level package to compare, such as ``Buildings`` or ``Buildings.Examples``. + :param repo: Name of repository, such as ``https://github.com/lbl-srg/modelica-buildings``. + :param nPro: Number of threads that are used to run the translations and simulations. + Set to ``0`` to use all processors. + :param tolAbsTim: float (default ``0.1``). Absolute tolerance in time, if exceeded, results will be flagged in summary table. + :param tolRelTim: float (default ``0.1``). Relative tolerance in time, if exceeded, results will be flagged in summary table. + :param postCloneCommand: list. A list of a command and its arguments that is run after cloning the repository. The command is run from + the root folder inside the repository, e.g., the folder that contains the ``.git`` folder. + + This class can be used to compare translation and simulation statistics across tools and branches. + Note that only one simulation is done, hence the simulation time can vary from one run to another, + and therefore indicates trends rather than exact comparison of computing time. + + To run the comparison, type + + >>> import os + >>> import buildingspy.development.simulationCompare as sc + >>> s = sc.Comparator( + ... tools=['dymola', 'openmodelica'], + ... branches=['master'], + ... package='Buildings', + ... repo='https://github.com/lbl-srg/modelica-buildings', + ... postCloneCommand=[ + ... "python", + ... "Buildings/Resources/src/ThermalZones/install.py", + ... "--binaries-for-os-only"]) + >>> s.run() # doctest: +SKIP + + + To change the comparison for different tolerances without running the simulations again, type + + >>> import os + >>> import buildingspy.development.simulationCompare as sc + >>> s = sc.Comparator( + ... tools=['dymola', 'openmodelica'], + ... branches=['master'], + ... package='Buildings', + ... repo='https://github.com/lbl-srg/modelica-buildings', + ... postCloneCommand=[ + ... "python", + ... "Buildings/Resources/src/ThermalZones/install.py", + ... "--binaries-for-os-only"]) + >>> s.post_process(tolAbsTime=0.2, tolRelTime=0.2) # doctest: +SKIP + + + """ + + def __init__( + self, + tools, + branches, + package, + repo, + nPro=0, + simulate=True, + tolAbsTime=0.1, + tolRelTime=0.1, + postCloneCommand=None): + + self._cwd = os.getcwd() + self._tools = tools + self._branches = branches + self._package = package + self._lib_src = repo + self._nPro = nPro + self._tolAbsTime = tolAbsTime + self._tolRelTime = tolRelTime + self._generate_tables = True + self._postCloneCommand = postCloneCommand + + def _get_cases(self): + ''' Set up simulation cases. + ''' + cases = list() + for tool in self._tools: + for branch in self._branches: + cases.append( + {'package': self._package, + 'tool': tool, + 'branch': branch}) + for case in cases: + desDir = os.path.join(self._cwd, case['tool'], case['branch']) + logFil = os.path.join(desDir, "comparison-%s.log" % case['tool']) + commitLog = os.path.join(desDir, "commit.log") + case['name'] = logFil + case['commit'] = commitLog + return cases + + @staticmethod + def _create_and_return_working_directory(): + ''' Create working directory. + ''' + worDir = tempfile.mkdtemp(prefix='tmp-simulationCompare-' + getpass.getuser()) + print("Created directory {}".format(worDir)) + return worDir + + def _runPostCloneCommand(self, working_directory): + import subprocess + if self._postCloneCommand is not None: + print(f"*** Running {' '.join(self._postCloneCommand)} in '{working_directory}") + retArg = subprocess.run(self._postCloneCommand, cwd=working_directory) + if retArg.returncode != 0: + print( + f"*** Error: Command {' '.join(self._postCloneCommand)} in '{working_directory} returned {retArg.returncode}.") + + def _clone_repository(self, working_directory): + '''Clone or copy repository to working directory''' +# if from_git_hub: + print(f'*** Cloning repository {self._lib_src} in {working_directory}') + git.Repo.clone_from(self._lib_src, working_directory) +# else: +# shutil.rmtree(working_directory) +# print(f'*** Copying repository from {self._lib_src} to {working_directory}') +# shutil.copytree(self._lib_src, working_directory) + + @staticmethod + def _checkout_branch(working_directory, branch): + '''Checkout feature branch''' + d = {} + print(f'Checking out branch {branch}') + r = git.Repo(working_directory) + g = git.Git(working_directory) + g.stash() + g.checkout(branch) + # Print commit + d['branch'] = branch + d['commit'] = str(r.active_branch.commit) + + return d + + def _runUnitTest(self, package, tool): + ''' Execute unit tests. + ''' + if package.find(".") == -1: + # Top level package is requested + single_package = "" + else: + single_package = f"-s {package}" + + if self._nPro == 0: + num_pro = "" + else: + num_pro = f"-n {self._nPro}" + + command = f"../bin/runUnitTests.py {single_package} {num_pro} -t {tool} --batch" + try: + os.system(command) + except OSError: + sys.stderr.write("Execution of '" + command + "' failed.") + + def _simulateCase(self, case, wor_dir): + ''' Set up unit tests and save log file + ''' + bdg_dir = os.path.join(wor_dir, self._package.split(".")[0]) + os.chdir(bdg_dir) + # run unit test + self._runUnitTest(case['package'], case['tool']) + # copy the log files to current working directory + logFil = "comparison-%s.log" % case['tool'] + if os.path.exists(logFil): + # write commit number to the commit.log file + with io.open(os.path.join(bdg_dir, "commit.log"), mode="w") as f: + f.write(case['commit']) + logFiles = glob.iglob(os.path.join(bdg_dir, "*.log")) + desDir = os.path.join(self._cwd, case['tool'], case['branch']) + mkpath(desDir) + for file in logFiles: + shutil.copy2(file, desDir) + os.chdir(self._cwd) + + @staticmethod + def _sortSimulationData(case): + ''' Filter the needed data from log file + + The unit test generated log file "comparison-xxx.log", which is then renamed as case['name'], contains more + data than needed. + ''' + logs = list() + with io.open(case['name'], mode="rt", encoding="utf-8-sig") as log: + stat = json.loads(log.read()) + for ele in stat: + if "simulation" in ele: + temp = {"model": ele["model"], + "simulation": ele["simulation"]} + logs.append(temp) + return logs + + @staticmethod + def _refactorLogsStructure(logs, tolAbsTime, tolRelTime): + ''' Change the structure: + --From-- + "logs": [{"label": 'branch1', "commit": string, "log": [{"model": model1, "simulation": simulation_log}, + {"model": model2, "simulation": simulation_log}]}, + {"label": 'branch2', "commit": string, "log": [{"model": model1, "simulation": simulation_log}, + {"model": model2, "simulation": simulation_log}]}], + --To-- + "logs": [ {"model": model1, + "simulation": [{"label": branch1, "commit": string, "log": simulation_log}, + {"label": branch2, "commit": string, "log": simulation_log}]}, + {"model": model2, + "simulation": [{"label": branch1, "commit": string, "log": simulation_log}, + {"label": branch2, "commit": string, "log": simulation_log}]} ] + ''' + minLog = 0 + modelNumber = len(logs[0]['log']) + for i in range(1, len(logs)): + ithCaseModelNumber = len(logs[i]['log']) + if ithCaseModelNumber < modelNumber: + modelNumber = ithCaseModelNumber + minLog = i + refactoredLogs = list() + for j in range(len(logs[minLog]['log'])): + model = {'model': logs[minLog]['log'][j]['model']} + model['flag'] = False + simulation = list() + # find the same model's simulation log from other simulations + for k in range(len(logs)): + for l in range(len(logs[k]['log'])): + if logs[k]['log'][l]['model'] == logs[minLog]['log'][j]['model']: + temp = {'label': logs[k]['label'], + 'commit': logs[k]['commit'], + 'log': logs[k]['log'][l]['simulation']} + simulation.append(temp) + model['simulation'] = simulation + # check if the model runs successfully in all branches or tools + suc = Comparator._checkSimulation(model) + if suc is not True: + refactoredLogs.append(model) + continue + # find the maximum simulation time + t_0 = model['simulation'][0]['log']['elapsed_time'] + t_max = t_0 + relTim = 0 + relTim = 0 + for m in range(1, len(model['simulation'])): + elaTim = model['simulation'][m]['log']['elapsed_time'] + if t_0 > 1E-10: + relTim = elaTim / t_0 + if elaTim > t_max: + t_max = elaTim + # check if the model should be flagged as the simulation times are + # significantly different between different tools or branches + if t_max > tolAbsTime and abs(1 - relTim) > tolRelTime: + model['flag'] = True + model['relTim'] = relTim + refactoredLogs.append(model) + return refactoredLogs + + @staticmethod + def _checkSimulation(model): + ''' Check if the model runs successfully in all branches or tools + ''' + suc = True + simLogs = model['simulation'] + for i in range(len(simLogs)): + logSuc = simLogs[i]['log']['success'] + suc = suc and logSuc + return suc + + @staticmethod + def _refactorDataStructure(data, tolAbsTime, tolRelTime): + ''' Change data structure + ''' + refactoredData = list() + for ele in data: + temp = {'label': ele['label']} + logs = Comparator._refactorLogsStructure(ele['logs'], tolAbsTime, tolRelTime) + temp['logs'] = logs + refactoredData.append(temp) + return refactoredData + + def _generateTable(self, dataSet): + ''' Generate html table and write it to file + ''' + htmlTableDir = os.path.join(self._cwd, 'results', 'html') + mkpath(htmlTableDir) + # latexTableDir = os.path.join(self._cwd, 'results', 'latex') + # mkpath(latexTableDir) + for data in dataSet: + # generate branches comparison tables + if len(self._branches) > 1: + for tool in self._tools: + if data['label'] == tool: + filNam = os.path.join(htmlTableDir, "branches_compare_%s.html" % tool) + # texTab = os.path.join(latexTableDir, "branches_compare_%s.tex" % tool) + # generate html table content + htmltext, flagModels = self._generateHtmlTable(data, 'branches') + Comparator._writeFile(filNam, htmltext) + # self._generateTexTable(texTab, flagModels) + # generate tools comparison tables + if len(self._tools) > 1: + for branch in self._branches: + if data['label'] == branch: + filNam = os.path.join(htmlTableDir, "tools_compare_%s.html" % branch) + # texTab = os.path.join(latexTableDir, "tools_compare_%s.tex" % branch) + # generate html table content + htmltext, flagModels = self._generateHtmlTable(data, 'tools') + Comparator._writeFile(filNam, htmltext) + # self._generateTexTable(texTab, flagModels) + + def _generateTexTable(self, filNam, models): + try: + log = models[0]['log'] + except IndexError: # No flagged model to process. + return + + totalColumns = 2 + len(log) + begin = \ + r'''\documentclass{article} +\usepackage[table]{xcolor} +\usepackage{longtable} +\usepackage{listings} +\begin{document} +\def\tableCaption{xxxYYY.} +\def\tableLabel{tab:xxx} +''' + + column = \ + r'''\begin{longtable}{|p{9cm}|''' + captionLabel = \ + r'''\caption{\tableCaption} +\label{\tableLabel}\\ + ''' + for i in range(totalColumns - 2): + column = column + '''p{2cm}|''' + column = column + 'p{1cm}|}' + os.linesep + hline = '''\\hline''' + os.linesep + # column head + head = '''Model''' + for i in range(len(log)): + head = head + '''&$t_{%s}$ in [s]''' % log[i]['label'].replace('_', '\\_') + head = head + '''&$t_{2}/t_{1}$\\\\''' + head = head + '''[2.5ex] \\hline''' + os.linesep + row = '' + for i in range(len(models)): + ithModel = models[i] + temp = '' + fillColor = self._textTableColor(ithModel['relTim']) + temp = '''\\rowcolor[HTML]{%s} ''' % fillColor + os.linesep + temp = temp + '''{\\small ''' + '''\\lstinline|''' + \ + ithModel['model'].replace(f'{self._package}.', '') + '''|}''' + for j in range(len(log)): + temp = temp + '&' + '{\\small ' + \ + '{:.3f}'.format(ithModel['log'][j]['elapsed_time']) + '}' + temp = temp + '&' + '{\\small ' + '{:.2f}'.format(ithModel['relTim']) + '}' + temp = temp + '''\\\\[2.5ex] \\hline''' + os.linesep + row = row + temp + end = ''' +\\end{longtable} +\\end{document}''' + content = begin + column + captionLabel + hline + head + row + end + Comparator._writeFile(filNam, content) + + def _textTableColor(self, relTim): + dif = relTim - 1 - self._tolRelTime if relTim > 1 else (1 - relTim) - self._tolRelTime + dR = 0.5 + dG = 0.1 + if dif < 0: + color = 'FFFFFF' + elif dif >= 0: + if relTim < 1: + if dif >= 0 and dif < dG: + color = 'edfef2' + elif dif >= dG and dif < 2 * dG: + color = 'dbfde4' + elif dif >= 2 * dG and dif < 3 * dG: + color = 'c9fcd7' + elif dif >= 3 * dG and dif < 4 * dG: + color = 'b6fbca' + elif dif >= 4 * dG and dif < 5 * dG: + color = 'a4fbbc' + elif dif >= 5 * dG and dif < 6 * dG: + color = '92faaf' + elif dif >= 6 * dG and dif < 7 * dG: + color = '80f9a1' + else: + color = '6ef894' + else: + if dif >= 0 and dif < dR: + color = 'feeded' + elif dif >= dR and dif < 2 * dR: + color = 'fddbdb' + elif dif >= 2 * dR and dif < 3 * dR: + color = 'fcc9c9' + elif dif >= 3 * dR and dif < 4 * dR: + color = 'fbb6b6' + elif dif >= 4 * dR and dif < 5 * dR: + color = 'fba4a4' + elif dif >= 5 * dR and dif < 6 * dR: + color = 'fa9292' + elif dif >= 6 * dR and dif < 7 * dR: + color = 'f98080' + else: + color = 'f86e6e' + return color + + @staticmethod + def _writeFile(filNam, content): + ''' Write html table to file + ''' + print(f"*** writing {filNam}") + with open(filNam, 'w+') as f: + f.write(content) + + def _chooseStyle(self, relTim, flag): + # relTim is (elaTim-t_0) / t_0 + dif = relTim - 1 - self._tolRelTime if relTim > 1 else (1 - relTim) - self._tolRelTime + dR = 0.5 + dG = 0.1 + style = 'normal' + if dif >= 0 and flag: + if relTim < 1: + if dif >= 0 and dif < dG: + style = 'g-1' + elif dif >= dG and dif < 2 * dG: + style = 'g-2' + elif dif >= 2 * dG and dif < 3 * dG: + style = 'g-3' + elif dif >= 3 * dG and dif < 4 * dG: + style = 'g-4' + elif dif >= 4 * dG and dif < 5 * dG: + style = 'g-5' + elif dif >= 5 * dG and dif < 6 * dG: + style = 'g-6' + elif dif >= 6 * dG and dif < 7 * dG: + style = 'g-7' + else: + style = 'g-8' + else: + if dif >= 0 and dif < dR: + style = 'r-1' + elif dif >= dR and dif < 2 * dR: + style = 'r-2' + elif dif >= 2 * dR and dif < 3 * dR: + style = 'r-3' + elif dif >= 3 * dR and dif < 4 * dR: + style = 'r-4' + elif dif >= 4 * dR and dif < 5 * dR: + style = 'r-5' + elif dif >= 5 * dR and dif < 6 * dR: + style = 'r-6' + elif dif >= 6 * dR and dif < 7 * dR: + style = 'r-7' + else: + style = 'r-8' + return style + + def _generateHtmlTable(self, data, tools_or_branches): + ''' Html table template + ''' + # style section + style = ''' + + +''' + + # find the data logs + dataLogs = data['logs'] + # calculate column width + fullLabels = self._tools if tools_or_branches == 'tools' else self._branches + numberOfDataSet = len(fullLabels) + colWidth = 100 / (3 + numberOfDataSet * 3 + 1) + + # specify column style + colGro = ''' + + + + ''' % (3 * colWidth) + for i in range(3 * numberOfDataSet + 1): + temp = '''''' % colWidth + os.linesep + colGro = colGro + temp + colGro = colGro + '''''' + os.linesep + + # specify head + heaGro = ''' + + + ''' + for i in range(numberOfDataSet): + label = fullLabels[i] + temp = ''' + ''' + os.linesep + heaGro = heaGro + '''''' + os.linesep + + # find total number of models + numberOfModels = len(dataLogs) + + # write simulation logs of each model + flagModelList = list() + models = '' + failedModels = list() + newDataLogs = list() + for entry in dataLogs: + entSim = entry['simulation'] + suc = True + failedIn = list() + if (len(entSim) == numberOfDataSet): + # the model has been translated by all tools/in all branches so it has + # fully set of simulation log, but it may not be simulated successfully. + for simLog in entSim: + suc = suc and simLog['log']['success'] + if simLog['log']['success'] is not True: + failedIn.append(simLog['label']) + else: + # the model is not translated by one/more tools or in one/more branches so + # it does not have fully set of simulation log. + suc = False + if (len(entSim) == 0): + tmp = ' ,'.join(fullLabels) + failedIn.append(tmp) + else: + # list the successful run + sucLab = list() + for simLog in entSim: + if simLog['log']['success']: + sucLab.append(simLog['label']) + # filter the tools or branches that do not simulate or translate the model + for fulLab in fullLabels: + if fulLab not in sucLab: + failedIn.append(fulLab) + if suc: + newDataLogs.append(entry) + if suc is not True: + temp = {'model': entry['model']} + temp['logs'] = failedIn + failedModels.append(temp) + + # find the branches and the corresponded commit + firstEnt = newDataLogs[0] + firstEntSim = firstEnt['simulation'] + toolBranchInfo = '' + if tools_or_branches == 'tools': + commitText = f'{firstEntSim[0]["commit"]}' \ + if self._lib_src[0:5] == "https" else f'{firstEntSim[0]["commit"]}' + branchCommit = '''Branch %s (%s)''' % (data['label'], commitText) + toolsList = list() + for simLog in firstEntSim: + toolsList.append(simLog['label']) + tools = ', '.join(toolsList) + toolBranchInfo = '''
+

+ %s,
comparing tools: %s.
+

+ ''' % (branchCommit, tools) + else: + branchCommitList = list() + for simLog in firstEntSim: + commitText = f'{simLog["commit"]}' \ + if self._lib_src[0:5] == "https" else f'{simLog["commit"]}' + temp = '''%s (%s)''' % (simLog['label'], commitText) + branchCommitList.append(temp) + branchCommit = ',
'.join(branchCommitList) + toolBranchInfo = '''
+

+ Run with %s,
comparing branches:
%s.
+

+ ''' % (data['label'], branchCommit) + + for entry in newDataLogs: + modelData = '''''' + os.linesep + flag = entry['flag'] + relTim = entry['relTim'] + tgStyle = 'tg-' + self._chooseStyle(relTim, flag) + if flag: + flagModelListTemp = {'model': entry['model']} + flagModelListTemp['relTim'] = relTim + temp1 = '''''' % (tgStyle, entry['model']) + modelData = modelData + temp1 + os.linesep + temp2 = '' + temp3 = list() + for j in range(numberOfDataSet): + variableSet = entry['simulation'][j]['log'] + elapsed_time = variableSet['elapsed_time'] + state_events = variableSet['state_events'] + jacobians = variableSet['jacobians'] + flagTemp = {'elapsed_time': elapsed_time, + 'state_events': state_events, + 'jacobians': jacobians, + 'label': entry['simulation'][j]['label']} + temp3.append(flagTemp) + temp2 = temp2 + ''' + + + + ''' % (tgStyle, elapsed_time, + tgStyle, int(state_events), + tgStyle, int(jacobians)) + if flag: + flagModelListTemp['log'] = temp3 + flagModelList.append(flagModelListTemp) + modelData = modelData + temp2 + os.linesep + modelData = modelData + '''''' % (tgStyle, relTim) + os.linesep + modelData = modelData + '''''' + os.linesep + models = models + os.linesep + modelData + + sortedList = sorted(flagModelList, reverse=True, key=lambda k: k['relTim']) + + # write flagged models + flaggedModels = '' + for model in sortedList: + modelData = '''''' + os.linesep + relTim = model['relTim'] + tgStyle = 'tg-' + self._chooseStyle(relTim, True) + modelName = model['model'] + temp1 = '''''' % (tgStyle, modelName) + modelData = modelData + temp1 + os.linesep + temp2 = '' + for j in range(len(model['log'])): + oneSet = model['log'][j] + temp2 = temp2 + ''' + + + + ''' % (tgStyle, oneSet['elapsed_time'], + tgStyle, int(oneSet['state_events']), + tgStyle, int(oneSet['jacobians'])) + modelData = modelData + temp2 + os.linesep + modelData = modelData + '''''' % (tgStyle, relTim) + os.linesep + modelData = modelData + '''''' + os.linesep + flaggedModels = flaggedModels + os.linesep + modelData + + failedFlagText = '' + if tools_or_branches == 'branches': + failedFlagText = 'failed in branches' + else: + failedFlagText = 'failed or excluded by tools' + failedModelsInfo = '' + if len(failedModels) > 0: + failedModelsInfo = '''
+

+ Following models were flagged for %s. +

+ ''' % failedFlagText + failedModelsInfo += os.linesep + failedModelsInfo += '''
Model%s
-
Elapsed time (s) +
%s
-
State events +
%s
-
Jacobians + ''' % (label, label, label) + heaGro = heaGro + temp + heaGro = heaGro + '''
t2⁄t1
%s%.4f%d%d%.2f
%s%.4f%d%d%.2f
''' + os.linesep + failedModelsInfo += '''''' + os.linesep + for i in range(len(failedModels)): + failedTxt = ', '.join(failedModels[i]['logs']) + failedModelsInfo += ''' + ''' % (failedModels[i]['model'], failedTxt) + os.linesep + failedModelsInfo += '''
ModelFailed Info
%s%s
''' + + flagInfo = '''
+

+ Following models were flagged for which the maximum simulation time is greater than %.2f seconds + and the relative difference between maximum and minimum simulation time + (i.e. (tmax - tmin)/tmax) + is greater than %.2f. +

+ ''' % (self._tolAbsTime, self._tolRelTime) + flagModels = colGro + heaGro + flaggedModels + os.linesep + \ + '''''' + allModelInfo = '''

+

+ Following models are in package %s: +

+ ''' % self._package + allModels = colGro + heaGro + models + os.linesep + \ + '''''' + + # assemble html content + htmltext = '''''' + os.linesep + style + toolBranchInfo + failedModelsInfo + \ + flagInfo + flagModels + allModelInfo + allModels + '''''' + return htmltext, sortedList + + def _runCases(self, cases): + ''' Run simulations + ''' + lib_dir = self._create_and_return_working_directory() + self._clone_repository(lib_dir) + self._runPostCloneCommand(lib_dir) + for case in cases: + d = self._checkout_branch(lib_dir, case['branch']) + case['commit'] = d['commit'] + self._simulateCase(case, lib_dir) + shutil.rmtree(lib_dir) + + def run(self): + ''' Run the comparison and generate the output. + + The output files will be in the directory ``results``, and the raw test data + are in the directories with the same names as specified by the parameter ``tools``. + ''' + cases = self._get_cases() + self._runCases(cases) + self.post_process() + + def post_process(self, tolAbsTime=None, tolRelTime=None): + ''' Generate the html tables. + + This function post-processes the simulations, generates the overview tables, and writes the tables + to the directory `results`. + + :param tolAbsTime: float. Optional argument for absolute tolerance in time, if exceeded, results will be flagged in summary table. + :param tolRelTime: float. Optional argument for relative tolerance in time, if exceeded, results will be flagged in summary table. + + ''' + + _tolAbsTime = self._tolAbsTime if tolAbsTime is None else tolAbsTime + _tolRelTime = self._tolRelTime if tolRelTime is None else tolRelTime + + logs = list() + for case in self._get_cases(): + # find commit number + with io.open(case['commit'], mode="r") as f: + commit = f.read() + # filter simulation log + temp = {'branch': case['branch'], + 'commit': commit, + 'tool': case['tool'], + 'log': Comparator._sortSimulationData(case)} + logs.append(temp) + toolsCompare = list() + branchesCompare = list() + + # comparison between different branches with same tool + if len(self._branches) > 1: + for tool in self._tools: # [dymola, jmodelica] + data = {'label': tool} + temp = list() + for log in logs: + if log['tool'] == tool: + branch = {'label': log['branch'], + 'commit': log['commit'], + 'log': log['log']} + temp.append(branch) + data['logs'] = temp + branchesCompare.append(data) + # refactor data structure + branchesData = Comparator._refactorDataStructure( + branchesCompare, _tolAbsTime, _tolRelTime) + # generate html table file + self._generateTable(branchesData) + + # comparison between different tools on same branch + if len(self._tools) > 1: + for branch in self._branches: + data = {'label': branch} + temp = list() + for log in logs: + if log['branch'] == branch: + toolLog = {'label': log['tool'], + 'commit': log['commit'], + 'log': log['log']} + temp.append(toolLog) + data['logs'] = temp + toolsCompare.append(data) + # refactor data structure + toolsData = Comparator._refactorDataStructure(toolsCompare, _tolAbsTime, _tolRelTime) + # generate html table file + self._generateTable(toolsData) diff --git a/buildingspy/development/validator.py b/buildingspy/development/validator.py index dda87a5d..e7f7eafa 100644 --- a/buildingspy/development/validator.py +++ b/buildingspy/development/validator.py @@ -206,7 +206,7 @@ def _check_experiment(self, name, val, value, model_path, mos_file): ".\n" + self._capitalize_first(name) + " contains invalid expressions such as x * y. Only literal expressions are allowed " + - "by OPTIMICA, JModelica and OpenModelica unit tests.\n") + "by OPTIMICA and OpenModelica unit tests.\n") raise ValueError(s) delta = abs(eval(val) - eval(value)) @@ -338,7 +338,7 @@ def _separate_mos_files(self, mos_files): if (found_sim and not found_tol): s = ( "Found mos file={!s} without tolerance defined.\n" + - "A minimum tolerance of 1e-6 is required for OPTIMICA and JModelica.\n").format(itr) + "A minimum tolerance of 1e-6 is required for OPTIMICA.\n").format(itr) raise ValueError(s) return n_tols, mos_non_fmus, mos_fmus @@ -371,13 +371,13 @@ def _wrong_parameter(self, mos_file, name, value): if value is None: s = ( "Found mos file={!s} without tolerance specified.\n" + - "A minimum tolerance of 1e-6 is required for OPTIMICA and JModelica for unit tests.\n").format(mos_file) + "A minimum tolerance of 1e-6 is required for OPTIMICA for unit tests.\n").format(mos_file) raise ValueError(s) else: if(float(value) > 1e-6): s = ("Found mos file={!s} with tolerance={!s}.\n" "The tolerance found is bigger than 1e-6, the maximum required by " - "OPTIMICA and JModelica for unit tests.\n").format(mos_file, value) + "OPTIMICA for unit tests.\n").format(mos_file, value) raise ValueError(s) if (name + "=" == "stopTime="): @@ -411,7 +411,7 @@ def _getValue(self, name, line, fil_nam): if name == "StartTime": # If it is smaller than -2147483648 and bigger than 2147483647, which are # the minimum and maximum 32 bit integers. These are used in - # the CI testing of JModelica. Exceeding them will cause an integer overflow + # the CI testing of OPTIMICA. Exceeding them will cause an integer overflow if isinstance(ev, int): if ev < -2147483648: err = ( @@ -422,7 +422,7 @@ def _getValue(self, name, line, fil_nam): if name == "StopTime": # If it is smaller than -2147483648 and bigger than 2147483647, which are # the minimum and maximum 32 bit integers. These are used in - # the CI testing of JModelica. Exceeding them will cause an integer overflow + # the CI testing of OPTIMICA. Exceeding them will cause an integer overflow if isinstance(ev, int): if ev > 2147483647: err = ( @@ -444,7 +444,7 @@ def _wrong_literal(self, mos_file, name): s = ( "Found mos file={!s} with invalid expression={!s}.\n" + - "This is not allowed for cross validation with OPTIMICA and JModelica.\n").format( + "This is not allowed for cross validation with OPTIMICA.\n").format( mos_file, name + '=' + diff --git a/buildingspy/examples/dymola/runSimulation.py b/buildingspy/examples/dymola/runSimulation.py index 7156c9c4..e96ca7c3 100644 --- a/buildingspy/examples/dymola/runSimulation.py +++ b/buildingspy/examples/dymola/runSimulation.py @@ -29,17 +29,19 @@ def main(): li = [] # First model, from Modelica Buildings Library, v7.0.0 model = 'Buildings.Controls.Continuous.Examples.PIDHysteresis' - s = Simulator(model, 'case1') + s = Simulator(model, outputDirectory='case1') s.addParameters({'con.eOn': 0.1}) li.append(s) # second model - s = Simulator(model, 'case2') + s = Simulator(model, outputDirectory='case2') s.addParameters({'con.eOn': 1}) li.append(s) # Run all cases in parallel po = Pool() po.map(simulateCase, li) + po.close() + po.join() # Clean up shutil.rmtree('case1') diff --git a/buildingspy/io/outputfile.py b/buildingspy/io/outputfile.py index 3858c50e..ab638100 100644 --- a/buildingspy/io/outputfile.py +++ b/buildingspy/io/outputfile.py @@ -143,7 +143,10 @@ def get_errors_and_warnings(log_file, simulator): elif lin.find(ERR) >= 0: listErr.append(lines[index + 1].strip()) elif simulator == "dymola" and lin == " = false\n": - listErr.append("Log file contained the line ' = false'") + em = "Log file contained the line ' = false'" + if index > 0: + em = f"{em}. Preceeding line: '{lin[index-1]}'" + listErr.append(em) ret["warnings"] = listWarn ret["errors"] = listErr @@ -165,9 +168,9 @@ class Reader(object): def __init__(self, fileName, simulator): import os - if simulator not in ['openmodelica', 'dymola', 'optimica', 'jmodelica']: + if simulator not in ['openmodelica', 'dymola', 'optimica']: raise ValueError( - 'Argument "simulator" needs to be set to "openmodelica", "dymola", "optimica" or "jmodelica".') + 'Argument "simulator" needs to be set to "openmodelica", "dymola" or "optimica".') if not os.path.isfile(fileName): raise FileNotFoundError(f"File {os.path.abspath(fileName)} does not exist.") diff --git a/buildingspy/io/postprocess.py b/buildingspy/io/postprocess.py index 7f33f558..555f5c95 100644 --- a/buildingspy/io/postprocess.py +++ b/buildingspy/io/postprocess.py @@ -143,7 +143,7 @@ def boxplot(t, y, increment=3600, nIncrement=24, :param increment: The time increment that is used in the plot :param nIncrement: The number of increments before the data are wrapped. :return: This method returns a - `matplotlib.pyplot `_ object that can be further + `matplotlib.pyplot `_ object that can be further processed, such as to label its axis. All other arguments are as explained at `matplotlib's boxplot documentation diff --git a/buildingspy/simulate/Dymola.py b/buildingspy/simulate/Dymola.py index 57be11cc..4e73c2d8 100644 --- a/buildingspy/simulate/Dymola.py +++ b/buildingspy/simulate/Dymola.py @@ -135,9 +135,6 @@ def _get_dymola_commands(self, working_directory, log_file, model_name, translat s = """ // File autogenerated by _get_dymola_commands // Do not edit. -//cd("{working_directory}"); -openModel("{package_mo}", changeDirectory=false); -Modelica.Utilities.Files.remove("{log_file}"); OutputCPUtime:=true; """.format(working_directory=working_directory, package_mo=package_mo, diff --git a/buildingspy/simulate/Optimica.py b/buildingspy/simulate/Optimica.py index b4ce44e2..be8d5150 100644 --- a/buildingspy/simulate/Optimica.py +++ b/buildingspy/simulate/Optimica.py @@ -4,10 +4,6 @@ Class that translates and simulates a Modelica model with OPTIMICA. - Note that because OPTIMICA and JModelica have a similar API, and because - they are invoked by the same script, this class - should also work with JModelica. - For a similar class that uses Dymola, see :func:`buildingspy.simulate.Dymola`. """ @@ -205,7 +201,7 @@ def _translate_and_simulate(self, simulate): template = env.get_template("optimica_run.template") - # Note that filter argument must respect glob syntax ([ is escaped with []]) + JModelica mat file + # Note that filter argument must respect glob syntax ([ is escaped with []]) + OPTIMICA mat file # stores matrix variables with no space e.g. [1,1]. txt = template.render( model=self.modelName, diff --git a/buildingspy/simulate/__init__.py b/buildingspy/simulate/__init__.py index d7d1c223..84980821 100644 --- a/buildingspy/simulate/__init__.py +++ b/buildingspy/simulate/__init__.py @@ -13,8 +13,4 @@ class may have methods that are only applicable for :func:`buildingspy.simulate.Optimica.Simulator.generateHtmlDiagnostics` is only available for Optimica. -Note that :mod:`buildingspy.simulate.Optimica` -should also work with JModelica.org, -but the latter is not officially supported. - """ diff --git a/buildingspy/simulate/base_simulator.py b/buildingspy/simulate/base_simulator.py index c3e7a4a8..b6869572 100644 --- a/buildingspy/simulate/base_simulator.py +++ b/buildingspy/simulate/base_simulator.py @@ -39,9 +39,7 @@ def __init__( # Check if the packagePath parameter is correct self._packagePath = None - if packagePath is None: - self.setPackagePath(os.path.abspath('.')) - else: + if packagePath is not None: self.setPackagePath(packagePath) self.modelName = modelName @@ -311,7 +309,10 @@ def _create_worDir(self): import os import tempfile import getpass - curDir = os.path.abspath(self._packagePath) + if self._packagePath is None: + curDir = os.path.abspath(self._packagePath) + else: + curDir = os.path.abspath(".") ds = curDir.split(os.sep) dirNam = ds[len(ds) - 1] worDir = os.path.join(tempfile.mkdtemp( @@ -376,7 +377,10 @@ def _runSimulation(self, cmd, timeout, directory, env=None): # python buildingspy/tests/test_simulate_Optimica.py # Test_simulate_Simulator.test_setResultFilter osEnv = os.environ.copy() if env is None else env - osEnv = self.prependToModelicaPath(osEnv, os.path.dirname(self._packagePath)) + if self._packagePath is None: + osEnv = self.prependToModelicaPath(osEnv, os.path.abspath(".")) + else: + osEnv = self.prependToModelicaPath(osEnv, os.path.dirname(self._packagePath)) # Run command try: @@ -403,7 +407,7 @@ def _runSimulation(self, cmd, timeout, directory, env=None): if timeout_exceeded: # For Dymola only: manage process termination. - # (For Optimica and JModelica this is managed at the lower level + # (For Optimica this is managed at the lower level # in `*_run.template`.) if self._MODELICA_EXE == 'dymola': # On unixlike systems, give the process a chance to close gracefully @@ -534,8 +538,11 @@ def prependToModelicaPath(env, path): >>> env = s.prependToModelicaPath(env, os.getcwd()) ''' - if 'MODELICAPATH' in env: - env['MODELICAPATH'] = ":".join([path, env['MODELICAPATH']]) + if path is None: + return env else: - env['MODELICAPATH'] = path - return env + if 'MODELICAPATH' in env: + env['MODELICAPATH'] = ":".join([path, env['MODELICAPATH']]) + else: + env['MODELICAPATH'] = path + return env diff --git a/buildingspy/templates/regressiontest_conf.py b/buildingspy/templates/regressiontest_conf.py index 321c3388..2d0a9da8 100644 --- a/buildingspy/templates/regressiontest_conf.py +++ b/buildingspy/templates/regressiontest_conf.py @@ -5,7 +5,8 @@ "schema": { "translate": {"type": "boolean"}, "simulate": {"type": "boolean"}, - "comment": {"type": "string"} + "comment": {"type": "string"}, + "time_out": {"type": "number"} } }, "optimica": { @@ -16,7 +17,8 @@ "comment": {"type": "string"}, "solver": {"type": "string"}, "rtol": {"type": "number"}, - "ncp": {"type": "integer", "min": 500} + "ncp": {"type": "integer", "min": 500}, + "time_out": {"type": "number"} } }, "openmodelica": { @@ -27,7 +29,8 @@ "comment": {"type": "string"}, "solver": {"type": "string"}, "rtol": {"type": "number"}, - "ncp": {"type": "integer", "min": 500} + "ncp": {"type": "integer", "min": 500}, + "time_out": {"type": "number"} } } } diff --git a/buildingspy/tests/MyModelicaLibrary/Examples/FMUs/IntegratorGain.mo b/buildingspy/tests/MyModelicaLibrary/Examples/FMUs/IntegratorGain.mo index 22205e20..86dc1edf 100644 --- a/buildingspy/tests/MyModelicaLibrary/Examples/FMUs/IntegratorGain.mo +++ b/buildingspy/tests/MyModelicaLibrary/Examples/FMUs/IntegratorGain.mo @@ -1,6 +1,6 @@ within MyModelicaLibrary.Examples.FMUs; block IntegratorGain "Block to demonstrate the FMU export" - extends Modelica.Blocks.Interfaces.BlockIcon; + extends Modelica.Blocks.Icons.Block; parameter Real k = -1 "Gain"; diff --git a/buildingspy/tests/MyModelicaLibrary/Examples/FMUs/Integrator_Underscore.mo b/buildingspy/tests/MyModelicaLibrary/Examples/FMUs/Integrator_Underscore.mo index 6ce1be11..ab0057cb 100644 --- a/buildingspy/tests/MyModelicaLibrary/Examples/FMUs/Integrator_Underscore.mo +++ b/buildingspy/tests/MyModelicaLibrary/Examples/FMUs/Integrator_Underscore.mo @@ -1,6 +1,6 @@ within MyModelicaLibrary.Examples.FMUs; block Integrator_Underscore "Block to demonstrate the FMU export" - extends Modelica.Blocks.Interfaces.BlockIcon; + extends Modelica.Blocks.Icons.Block; parameter Real k = -1 "Gain"; diff --git a/buildingspy/tests/MyModelicaLibrary/Examples/NoSolution.mo b/buildingspy/tests/MyModelicaLibrary/Examples/NoSolution.mo new file mode 100644 index 00000000..47a40b92 --- /dev/null +++ b/buildingspy/tests/MyModelicaLibrary/Examples/NoSolution.mo @@ -0,0 +1,16 @@ +within MyModelicaLibrary.Examples; +model NoSolution + Real x(start=0, fixed=true) "State"; + Real y(start=1); +equation + der(x) = 1; + cos(y) = 2*x; + +annotation ( + Documentation(info = "

+Model with equations that have no solution. +

"), +experiment(Tolerance=1e-6, StopTime=1.0), +__Dymola_Commands(file="Resources/Scripts/Dymola/Examples/NoSolution.mos" + "Simulate and plot")); +end NoSolution; diff --git a/buildingspy/tests/MyModelicaLibrary/Examples/ParameterEvaluation.mo b/buildingspy/tests/MyModelicaLibrary/Examples/ParameterEvaluation.mo index 88db97b1..19e81e87 100644 --- a/buildingspy/tests/MyModelicaLibrary/Examples/ParameterEvaluation.mo +++ b/buildingspy/tests/MyModelicaLibrary/Examples/ParameterEvaluation.mo @@ -5,7 +5,8 @@ model ParameterEvaluation parameter Integer n = integer(1/x) "Dimension"; Real T[n] "Vector"; equation - der(T) = ones(n) + der(T) = ones(n); + annotation (Documentation(info="

This model is used in the Python regression tests to ensure that BuildingsPy @@ -13,5 +14,4 @@ throws an exception if it attempts to change a structural parameter after the compilation.

")); - end ParameterEvaluation; diff --git a/buildingspy/tests/MyModelicaLibrary/Examples/package.order b/buildingspy/tests/MyModelicaLibrary/Examples/package.order index bf6d9947..c3a41501 100644 --- a/buildingspy/tests/MyModelicaLibrary/Examples/package.order +++ b/buildingspy/tests/MyModelicaLibrary/Examples/package.order @@ -1,5 +1,6 @@ BooleanParameters Constants MyStep +NoSolution ParameterEvaluation FMUs diff --git a/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/BuildingsPy/conf.yml b/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/BuildingsPy/conf.yml new file mode 100644 index 00000000..c1c741f0 --- /dev/null +++ b/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/BuildingsPy/conf.yml @@ -0,0 +1,10 @@ +- model_name: MyModelicaLibrary.Examples.NoSolution + dymola: + comment: 'Model excluded from simulation as it has no solution.' + simulate: false + openmodelica: + comment: 'Model excluded from simulation as it has no solution.' + simulate: false + optimica: + comment: 'Model excluded from simulation as it has no solution.' + simulate: false diff --git a/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/Dymola/Examples/NoSolution.mos b/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/Dymola/Examples/NoSolution.mos new file mode 100644 index 00000000..653be6df --- /dev/null +++ b/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/Dymola/Examples/NoSolution.mos @@ -0,0 +1,5 @@ +simulateModel("MyModelicaLibrary.Examples.NoSolution", tolerance=1e-6, stopTime=1.0, method="CVode"); +createPlot( + id=1, + y={"x", "y"} +) diff --git a/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/OpenModelica/compareVars/MyModelicaLibrary.Examples.NoSolution.mos b/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/OpenModelica/compareVars/MyModelicaLibrary.Examples.NoSolution.mos new file mode 100644 index 00000000..91a80073 --- /dev/null +++ b/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/OpenModelica/compareVars/MyModelicaLibrary.Examples.NoSolution.mos @@ -0,0 +1,5 @@ +compareVars := + { + "x", + "y" + }; diff --git a/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/travis/bin/jm_ipython.sh b/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/travis/bin/jm_ipython.sh index b334f036..a289f416 100755 --- a/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/travis/bin/jm_ipython.sh +++ b/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/travis/bin/jm_ipython.sh @@ -57,7 +57,7 @@ if [ -z ${MODELICAPATH+x} ]; then else # Add the current directory to the front of the Modelica path. # This will export the directory to the docker, and also set - # it in the MODELICAPATH so that JModelica finds it. + # it in the MODELICAPATH so that OPTIMICA finds it. MODELICAPATH=`pwd`:${MODELICAPATH} fi @@ -107,7 +107,7 @@ DOCKER_FLAGS="\ ${NAME}" docker run ${DOCKER_FLAGS} /bin/bash -c \ - "export MODELICAPATH=${DOCKER_MODELICAPATH}:/opt/oct/ThirdParty/MSL/MSL323:/opt/oct/ThirdParty/MSL/MSL400:/opt/oct/ThirdParty/MSL && \ + "export MODELICAPATH=${DOCKER_MODELICAPATH} && \ export PYTHONPATH=${DOCKER_PYTHONPATH} && \ export IPYTHONDIR=/mnt/shared && alias ipython=ipython3 && \ diff --git a/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/travis/bin/omc b/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/travis/bin/omc index d51735ae..441a4107 100755 --- a/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/travis/bin/omc +++ b/buildingspy/tests/MyModelicaLibrary/Resources/Scripts/travis/bin/omc @@ -80,18 +80,28 @@ do shift done -# --user=${UID} \ +# On travis, the current working directory /tmp/tmp-Buildings-0-o0h50lbn (or similar) +# is created # with permissions drwx------ and owned by the travis user and group (UID=2000). +# However, omc must be run as user developer (UID=1000) as it accesses ~/.openmodelica/libraries. +# But running as developer does not allow the docker to access /tmp/tmp-Buildings-0-o0h50lbn. +# The chmod command below gives access to the current working directory for all users. +# See also https://app.travis-ci.com/github/lbl-srg/modelica-buildings/builds/255798277 +chmod a+rwx "$cur_dir" +# Below, inside the docker, we remove spawn-* as these files have the permission of the +# docker user and hence the calling process cannot remove them, which +# causes an error if BuildingsPy cleans up the temporary directories. docker run \ - --user=${UID} \ + --user=developer \ -i \ $DOCKER_INTERACTIVE \ --detach=false \ ${MOD_MOUNT} \ -v ${sha_dir}:/mnt/shared \ --rm \ + --workdir="/mnt/shared/${bas_nam}" \ ${DOCKER_USERNAME}/${IMG_NAME} /bin/bash -c \ - "export OPENMODELICALIBRARY=${DOCKER_MODELICAPATH}:/usr/lib/omlibrary && \ - cd \"/mnt/shared/${bas_nam}\" && \ - omc ${arg_lis}" + "export MODELICAPATH=${DOCKER_MODELICAPATH} && \ + omc ${arg_lis} && \ + rm -rf spawn-*" exit $? diff --git a/buildingspy/tests/MyModelicaLibrary/package.mo b/buildingspy/tests/MyModelicaLibrary/package.mo index ab6e059f..fb8fb4d6 100644 --- a/buildingspy/tests/MyModelicaLibrary/package.mo +++ b/buildingspy/tests/MyModelicaLibrary/package.mo @@ -18,5 +18,5 @@ See buildingspy/tests/Test_development_refactor_Annex60.

")); - annotation (uses(Modelica(version="3.2.3"))); + annotation (uses(Modelica(version="4.0.0"))); end MyModelicaLibrary; diff --git a/buildingspy/tests/MyModelicaLibrary/package.order b/buildingspy/tests/MyModelicaLibrary/package.order index 602ef997..f3ebf8d9 100644 --- a/buildingspy/tests/MyModelicaLibrary/package.order +++ b/buildingspy/tests/MyModelicaLibrary/package.order @@ -1,8 +1,8 @@ MyModel MyModelTimeOut MyStep -Reset one two +Reset Examples Obsolete diff --git a/buildingspy/tests/test_development_Comparator.py b/buildingspy/tests/test_development_Comparator.py new file mode 100644 index 00000000..ad8a65e0 --- /dev/null +++ b/buildingspy/tests/test_development_Comparator.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +import os + +import unittest + + +class Test_development_Comparator(unittest.TestCase): + """ + This class contains the unit tests for + :mod:`buildingspy.development.Comparator`. + """ + + def assertIsFile(self, path): + import pathlib as pl + if not pl.Path(path).resolve().is_file(): + raise AssertionError("File does not exist: %s" % str(path)) + + def test_tools(self): + import buildingspy.development.simulationCompare as sc + import shutil + + repo = "https://github.com/ibpsa/modelica-ibpsa" + tools = ['dymola', 'openmodelica'] + + s = sc.Comparator( + tools=tools, + branches=['master'], + package="IBPSA.Utilities.Psychrometrics.Examples", + repo=repo) + + # s.run() + s.post_process() + # Make sure output file exists + self.assertIsFile(os.path.join("results", "html", "tools_compare_master.html")) + # shutil.rmtree("results") + # for tool in tools: + # shutil.rmtree(tool) + + +if __name__ == '__main__': + unittest.main() diff --git a/buildingspy/tests/test_development_Validator.py b/buildingspy/tests/test_development_Validator.py index 49ac5dbb..363444b7 100644 --- a/buildingspy/tests/test_development_Validator.py +++ b/buildingspy/tests/test_development_Validator.py @@ -97,12 +97,12 @@ def test_validateExperimentSetup(self): ########################################### # Checking missing tolerance in mos file self.run_case(val, myMoLib, "Test2", "experiment(Tolerance=1e-6, StopTime=1.0),", - "", "A minimum tolerance of 1e-6 is required for OPTIMICA and JModelica.") + "", "A minimum tolerance of 1e-6 is required for OPTIMICA.") ########################################### # Checking missing tolerance in mo file self.run_case(val, myMoLib, "Test3", "experiment(StopTime=1.0),", "stopTime=1.0,", - "A minimum tolerance of 1e-6 is required for OPTIMICA and JModelica.") + "A minimum tolerance of 1e-6 is required for OPTIMICA.") ########################################### # Checking tolerances mismatch @@ -168,7 +168,7 @@ def test_validateExperimentSetup(self): ########################################### # Checking wrong data type that can cause an overflow - # In JModelica's CI testing, the maximum integer is 2147483647 + # In OPTIMICA's CI testing, the maximum integer is 2147483647 self.run_case( val, myMoLib, diff --git a/buildingspy/tests/test_development_regressiontest.py b/buildingspy/tests/test_development_regressiontest.py index 70f60b13..98d3827e 100644 --- a/buildingspy/tests/test_development_regressiontest.py +++ b/buildingspy/tests/test_development_regressiontest.py @@ -103,7 +103,7 @@ def test_setSinglePackage_2(self): rt.setLibraryRoot(myMoLib) rt.include_fmu_tests(True) rt.setSinglePackage("MyModelicaLibrary.Examples") - self.assertEqual(6, rt.get_number_of_tests()) + self.assertEqual(7, rt.get_number_of_tests()) def test_setSinglePackage_3(self): import buildingspy.development.regressiontest as r @@ -112,7 +112,7 @@ def test_setSinglePackage_3(self): rt.setLibraryRoot(myMoLib) rt.include_fmu_tests(True) rt.setSinglePackage("MyModelicaLibrary.Examples.FMUs,MyModelicaLibrary.Examples") - self.assertEqual(6, rt.get_number_of_tests()) + self.assertEqual(7, rt.get_number_of_tests()) def test_setSinglePackage_4(self): import buildingspy.development.regressiontest as r @@ -121,7 +121,7 @@ def test_setSinglePackage_4(self): rt.setLibraryRoot(myMoLib) rt.include_fmu_tests(True) rt.setSinglePackage("MyModelicaLibrary.Examples,MyModelicaLibrary.Examples.FMUs") - self.assertEqual(6, rt.get_number_of_tests()) + self.assertEqual(7, rt.get_number_of_tests()) def test_areResultsEqual(self): """Test legacy comparison tool.""" diff --git a/buildingspy/tests/test_development_regressiontest_jmodelica.py b/buildingspy/tests/test_development_regressiontest_jmodelica.py deleted file mode 100644 index 013182d9..00000000 --- a/buildingspy/tests/test_development_regressiontest_jmodelica.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -import unittest -import os - -# To run this test, navigate to the BuildingsPy folder, then type -# python buildingspy/tests/test_development_regressiontest.py - - -class Test_regressiontest_jmodelica_Tester(unittest.TestCase): - """ - This class contains the unit tests for - :mod:`buildingspy.regressiontest.Tester` for jmodelica. - """ - - def test_unit_test_log_file(self): - import buildingspy.development.regressiontest as r - rt = r.Tester(check_html=False, tool="jmodelica") - self.assertEqual('unitTests-jmodelica.log', rt.get_unit_test_log_file()) - - @staticmethod - def _write_test(content): - """ Write a unit test for a model with the content `content` - in a temporary directory and return the name of this directory. - """ - import os - import tempfile - - dir_name = os.path.join(tempfile.mkdtemp(prefix='tmp-BuildingsPy-unittests-'), "TestLib") - script_dir = os.path.join(dir_name, "Resources", "Scripts", "Dymola") - mo_name = "Test" - mo_content = """within TestLib; - model Test - {} - annotation (experiment(Tolerance=1e-6, StopTime=3600)); - end Test; - """.format(content) - - # Create directory for mos scripts - os.makedirs(script_dir) - # Write mos file - with open(os.path.join(script_dir, mo_name + ".mos"), mode="w", encoding="utf-8") as fil: - con = """ -simulateModel("TestLib.{}", tolerance=1e-6, stopTime=1.0, method="CVode", resultFile="test");""".format(mo_name) - con = con + """ -createPlot(id=1, y={"Test.x"}); -""" - fil.write(con) - # Write mo file - with open(os.path.join(dir_name, mo_name + ".mo"), mode="w", encoding="utf-8") as fil: - fil.write(mo_content) - # Write top-level package - with open(os.path.join(dir_name, 'package.mo'), mode="w", encoding="utf-8") as fil: - mo = """ - within; - package TestLib - end TestLib; -""" - fil.write(mo) - # Write top-level package.order - with open(os.path.join(dir_name, 'package.order'), mode="w", encoding="utf-8") as fil: - mo = """TestLib""" - fil.write(mo) - return dir_name - - def test_regressiontest_diagnostics(self): - """ Test that warnings and errors reported by JModelica are reported. - """ - import shutil - import buildingspy.development.regressiontest as r - - tests = [ - {'ret_val': 0, - 'mo_content': """parameter Real x = 0;""", - 'description': "Correct model."}, - {'ret_val': 2, - 'mo_content': """parameter Real[2] x(unit="m") = {0, 0};""", - 'description': "Missing each on variable."}, - {'ret_val': 2, - 'mo_content': """parameter Real x(each unit="m") = 0;""", - 'description': "Wrong each on scalar."}, - {'ret_val': 2, - 'mo_content': """Modelica.Blocks.Sources.Constant b(each k=0) ;""", - 'description': "Wrong each on scalar component."}, - {'ret_val': 2, - 'mo_content': """Modelica.Blocks.Sources.Constant b[2](k=0) ;""", - 'description': "Missing each on array of components."}, - {'ret_val': 0, - 'mo_content': """ - Real x; - equation - Modelica.Math.exp(x)=1;""", - 'description': "Missing start value, which should be ignored."}, - {'ret_val': 0, - 'mo_content': """ - Real x(start=0); - equation - der(x)^3 = 0;""", - 'description': "Missing start value for der(x), which should be ignored."}, - {'ret_val': 2, - 'mo_content': """parameter Real[2] x(unit="m") = {0, 0}; - parameter Real y(each unit="m") = 0;""", - 'description': "Two errors."}, - {'ret_val': 1, - 'mo_content': """x; """, - 'description': "Syntax error that should cause a failure in translation."}, - {'ret_val': 1, - 'mo_content': """Real x(start=0); - equation - Modelica.Math.exp(x)=-1;""", - 'description': "Model that has no solution."} - ] - # Run all test cases - for test in tests: - des = test['description'] - print("*** Running test for '{}'".format(des)) - mo_content = test['mo_content'] - dir_name = self._write_test(mo_content) - rt = r.Tester(skip_verification=True, check_html=False, tool="jmodelica") - rt.setLibraryRoot(dir_name) - ret_val = rt.run() - # Check return value to see if test suceeded - self.assertEqual( - test['ret_val'], - ret_val, - "Test for '{}' failed, return value {}".format( - des, - ret_val)) # Delete temporary files - # Get parent dir of dir_name, because dir_name contains the Modelica library name - par = os.path.split(dir_name)[0] - os.remove(rt.get_unit_test_log_file()) - shutil.rmtree(par) - - def test_regressiontest(self): - import buildingspy.development.regressiontest as r - rt = r.Tester(skip_verification=True, check_html=False, tool="jmodelica") - myMoLib = os.path.join("buildingspy", "tests", "MyModelicaLibrary") - rt.deleteTemporaryDirectories(True) - rt.setLibraryRoot(myMoLib) - ret_val = rt.run() - # Check return value to see if test suceeded - self.assertEqual(0, ret_val, "Test failed with return value {}".format(ret_val)) - # Delete temporary files - os.remove(rt.get_unit_test_log_file()) - - -if __name__ == '__main__': - unittest.main() diff --git a/buildingspy/tests/test_examples_dymola.py b/buildingspy/tests/test_examples_dymola.py index 47634103..b9050ad0 100644 --- a/buildingspy/tests/test_examples_dymola.py +++ b/buildingspy/tests/test_examples_dymola.py @@ -64,9 +64,7 @@ def test_runSimulation(self): """ import os import buildingspy.examples.dymola.runSimulation as s - os.chdir("Buildings") s.main() - os.chdir("..") def test_plotResult(self): """ diff --git a/buildingspy/tests/test_simulate_Dymola.py b/buildingspy/tests/test_simulate_Dymola.py index e9766366..2e813ca7 100644 --- a/buildingspy/tests/test_simulate_Dymola.py +++ b/buildingspy/tests/test_simulate_Dymola.py @@ -80,6 +80,23 @@ def test_translate(self): s = Simulator("MyModelicaLibrary.MyModel", packagePath=self._packagePath) s.translate() + def test_simulate_user_library(self): + """ + Tests simulating a model from the Modelica Standard Library. + + """ + s = Simulator("MyModelicaLibrary.MyModel", packagePath=self._packagePath) + s.simulate() + + def test_simulate_msl(self): + """ + Tests simulating a model from the Modelica Standard Library. + + This test is for https://github.com/lbl-srg/BuildingsPy/issues/472 + """ + s = Simulator("Modelica.Blocks.Examples.PID_Controller") + s.simulate() + def test_addMethods(self): """ Tests the various add methods. diff --git a/buildingspy/tests/test_simulate_Optimica.py b/buildingspy/tests/test_simulate_Optimica.py index e9fdaad5..b29b3891 100644 --- a/buildingspy/tests/test_simulate_Optimica.py +++ b/buildingspy/tests/test_simulate_Optimica.py @@ -80,6 +80,23 @@ def test_translate(self): s = Simulator("MyModelicaLibrary.MyModel", packagePath=self._packagePath) s.translate() + def test_simulate_user_library(self): + """ + Tests simulating a model from the Modelica Standard Library. + + """ + s = Simulator("MyModelicaLibrary.MyModel", packagePath=self._packagePath) + s.simulate() + + def test_simulate_msl(self): + """ + Tests simulating a model from the Modelica Standard Library. + + This test is for https://github.com/lbl-srg/BuildingsPy/issues/472 + """ + s = Simulator("Modelica.Blocks.Examples.PID_Controller") + s.simulate() + def test_addMethods(self): """ Tests the various add methods. diff --git a/doc/Makefile b/doc/Makefile index 9cedbd6c..b79c6c3b 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -34,6 +34,7 @@ clean: html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo "Deleting stray html pull down menu" @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." diff --git a/doc/source/_static/lbl-logo.png b/doc/source/_static/lbl-logo.png index 6ad8a608..16fae5f7 100644 Binary files a/doc/source/_static/lbl-logo.png and b/doc/source/_static/lbl-logo.png differ diff --git a/doc/source/_static/sphinxdoc.css b/doc/source/_static/sphinxdoc.css index 78cae1a7..bed777c0 100644 --- a/doc/source/_static/sphinxdoc.css +++ b/doc/source/_static/sphinxdoc.css @@ -46,11 +46,8 @@ html { body { font-size: 14px; -// this looks cramped on Ubuntu letter-spacing: -0.01em; line-height: 150%; color: #022e4d; -// border: 1px solid #022e4d; -// background-color: #BFD1D4; text-align: left; margin-left: auto; margin-right: auto; @@ -60,6 +57,10 @@ body { width: 870px; } +.nav-item{ + font-size: 0; +} + /* Center images */ div.figure img{ border: 0; @@ -75,6 +76,11 @@ div.headerStyle{ box-shadow: 2px -2px 10px 0px rgba(0,0,0,0.4); } +div.headerStyle img{ + left: 10px; + height: 80px; +} + div.document { background-color: white; text-align: left; @@ -97,10 +103,10 @@ div.related { } /* Highlight class names in python documentation */ -.class dt { +.class dt.sig, dt.sig-object { background-color: #BFD1D4; } -/* Don't highlight method names in python documentation */ +/* Method names in python documentation */ .class .method dt, .class .staticmethod dt { background-color: white; } @@ -218,7 +224,9 @@ h1 { h2 { margin: 1.3em 0 0.2em 0; font-size: 1.35em; - padding: 0; + padding: 0.1em; + background-color: #022e4d; + color: white; } h3 { @@ -284,7 +292,7 @@ tt.descname, tt.descclassname, tt.xref { span.pre{ border: 0; color: #750000; - background-color: white; + /*background-color: white;*/ } hr { diff --git a/doc/source/development.rst b/doc/source/development.rst index 6e1e066e..8fc97e92 100644 --- a/doc/source/development.rst +++ b/doc/source/development.rst @@ -36,3 +36,10 @@ Error dictionary .. automodule:: buildingspy.development.error_dictionary .. autoclass:: buildingspy.development.error_dictionary.ErrorDictionary :members: + +Comparing simulation performance +-------------------------------- + +.. automodule:: buildingspy.development.simulationCompare +.. autoclass:: buildingspy.development.simulationCompare.Comparator + :members: diff --git a/doc/source/index.rst b/doc/source/index.rst index 34c2c456..694cef20 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -10,12 +10,11 @@ buildingspy package that can be used to: - Run Modelica simulations using - `OPTIMICA `_ + `OPTIMICA `_ or `Dymola `_. - JModelica.org should also work as it has a similar API than OPTIMICA. - Process ``*.mat`` output files that were generated by - OPTIMICA, JModelica.org, Dymola or + OPTIMICA, Dymola or `OpenModelica `_. - Run unit tests as part of the library development. - Refactor Modelica libraries.