diff --git a/.pyup.yml b/.pyup.yml index 09a60749d..783352eaf 100644 --- a/.pyup.yml +++ b/.pyup.yml @@ -13,7 +13,7 @@ pin: True # set the default branch # default: empty, the default branch on GitHub -branch: master +branch: develop # update schedule # default: empty diff --git a/CHANGES.rst b/CHANGES.rst index a0ff2af7c..9806973ba 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,49 @@ +0.19.0 (2019-04-19) +=================== + +New Features +------------ + +Project & API Documentation +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Added guidelines to the style guide for logging the execution of instrument monitors +- Added example useage of logging in the ``example.py`` module + +Web Application +~~~~~~~~~~~~~~~ + +- Modified various web app views to enable faster loading times +- Modified archive and preview image views to only display data for an authenticated user +- Added views for MIRI and NIRSpec Data Trending Monitors, which monitors the behavior of select MIRI and NIRSpec Engineering Database mnemonics over time + +``jwql`` Repository +~~~~~~~~~~~~~~~~~~~ + +- Added Dark Monitor module, which monitors the dark current and hot pixel populations for each JWST instrument +- Added software for producing MIRI and NIRSpec Data Trending Monitors (described above) +- Modified ``generate_preview_images`` module to support the creation of preview images for stage 3 data products +- Refactored ``monitor_filesystem`` to utilize PostgreSQL database tables to store archive filesystem statistics +- Configured ``codecov`` for the project. The project homepage can be found at https://codecov.io/gh/spacetelescope/jwql +- Modified ``logging_functions`` module to enable dev, test, and production logging environments +- Added convenience decorator to ``logging_functions`` module to time the execution of a function or method +- Modified ``monitor_cron_jobs`` module to make use of updated ``logging_functions`` + +Bug Fixes +--------- + +Web Application +~~~~~~~~~~~~~~~ + +- Fixed API views to only return the basenames of file paths, instead of full directory names + +``jwql`` Repository +~~~~~~~~~~~~~~~~~~~ + +- Fixed ``logging_functions`` module to properly parse new format of ``INSTALL_REQUIRES`` dependency in ``setup.py`` for logging system dependencies and their versions +- Fixed ``Jenkinsfile`` to not allow for one failed unit test in Jenkins builds + + 0.18.0 (2019-03-14) =================== diff --git a/Jenkinsfile b/Jenkinsfile index 1f9443b97..b07a037c7 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,16 +1,21 @@ // Obtain files from source control system. if (utils.scm_checkout()) return +withCredentials([string( + credentialsId: 'jwql-codecov', + variable: 'codecov_token')]) { + // Define each build configuration, copying and overriding values as necessary. bc0 = new BuildConfig() bc0.nodetype = "linux-stable" bc0.name = "debug" -bc0.build_cmds = ["conda env update --file=environment.yml", - "with_env -n jwql python setup.py install"] -bc0.test_cmds = ["with_env -n jwql pytest -s --junitxml=result.xml"] -bc0.failedUnstableThresh = 1 -bc0.failedFailureThresh = 1 - +bc0.build_cmds = [ + "conda env update --file=environment.yml", + "pip install codecov pytest-cov", + "with_env -n jwql python setup.py install"] +bc0.test_cmds = [ + "with_env -n jwql pytest -s --junitxml=results.xml --cov=./jwql/ --cov-report xml", + "codecov --token=${codecov_token}"] // bc1 = utils.copy(bc0) // bc1.build_cmds[0] = "conda install -q -y python=3.5" @@ -18,3 +23,4 @@ bc0.failedFailureThresh = 1 // Iterate over configurations that define the (distibuted) build matrix. // Spawn a host of the given nodetype for each combination and run in parallel. utils.run([bc0]) +} diff --git a/JenkinsfileRT b/JenkinsfileRT new file mode 100644 index 000000000..31904b083 --- /dev/null +++ b/JenkinsfileRT @@ -0,0 +1,22 @@ +// Obtain files from source control system. +if (utils.scm_checkout()) return + +// Define each build configuration, copying and overriding values as necessary. +bc0 = new BuildConfig() +bc0.nodetype = "linux-stable" +bc0.name = "debug" +bc0.build_cmds = ["conda env update --file=environment.yml", + "with_env -n jwql python setup.py install"] +bc0.test_cmds = ["cp JWQL_CONFIG jwql/utils/", + "with_env -n jwql pytest -s --junitxml=result.xml"] +bc0.failedUnstableThresh = 1 +bc0.failedFailureThresh = 1 + + + +// bc1 = utils.copy(bc0) +// bc1.build_cmds[0] = "conda install -q -y python=3.5" + +// Iterate over configurations that define the (distibuted) build matrix. +// Spawn a host of the given nodetype for each combination and run in parallel. +utils.run([bc0]) \ No newline at end of file diff --git a/README.md b/README.md index 1a70e6228..19efad4a5 100644 --- a/README.md +++ b/README.md @@ -103,7 +103,7 @@ The following is a bare-bones example of a best work flow for contributing to th 4. Create a branch on that personal fork. 5. Make your software changes. 6. Push that branch to your personal GitHub repository (i.e. `origin`). -7. On the `spacetelescope` `jwql` repository, create a pull request that merges the branch into `spacetelescope:master`. +7. On the `spacetelescope` `jwql` repository, create a pull request that merges the branch into `spacetelescope:develop`. 8. Assign a reviewer from the team for the pull request. 9. Iterate with the reviewer over any needed changes until the reviewer accepts and merges your branch. 10. Delete your local copy of your branch. diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 000000000..df9e539f8 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,31 @@ +codecov: + notify: + require_ci_to_pass: yes + +coverage: + precision: 2 + round: down + range: "70...100" + + status: + project: yes + patch: yes + changes: no + +parsers: + gcov: + branch_detection: + conditional: yes + loop: yes + method: no + macro: no + +comment: + layout: "header, diff" + behavior: default + require_changes: no + +ignore: + - "jwql/website/" + - "jwql/database/" + - "*__init__.py*" \ No newline at end of file diff --git a/docs/source/common_monitors.rst b/docs/source/common_monitors.rst new file mode 100644 index 000000000..21d98a7d7 --- /dev/null +++ b/docs/source/common_monitors.rst @@ -0,0 +1,9 @@ +*************** +common_monitors +*************** + +dark_monitor.py +--------------- +.. automodule:: jwql.instrument_monitors.common_monitors.dark_monitor + :members: + :undoc-members: \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index af34cf5d8..00e9c25c3 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -23,9 +23,11 @@ API documentation :maxdepth: 1 :caption: Contents: + common_monitors.rst database.rst edb.rst jwql_monitors.rst + instrument_monitors.rst tests.rst utils.rst website.rst diff --git a/docs/source/instrument_monitors.rst b/docs/source/instrument_monitors.rst new file mode 100644 index 000000000..284f2cd23 --- /dev/null +++ b/docs/source/instrument_monitors.rst @@ -0,0 +1,9 @@ +******************* +instrument_monitors +******************* + +pipeline_tools.py +----------------- +.. automodule:: jwql.instrument_monitors.pipeline_tools + :members: + :undoc-members: \ No newline at end of file diff --git a/docs/source/tests.rst b/docs/source/tests.rst index b06f27be2..cfab32e86 100644 --- a/docs/source/tests.rst +++ b/docs/source/tests.rst @@ -8,12 +8,36 @@ test_api_views.py :members: :undoc-members: +test_calculations.py +-------------------- +.. automodule:: jwql.tests.test_calculations + :members: + :undoc-members: + +test_dark_monitor.py +-------------------- +.. automodule:: jwql.tests.test_dark_monitor + :members: + :undoc-members: + test_edb_interface.py --------------------- .. automodule:: jwql.tests.test_edb_interface :members: :undoc-members: +test_instrument_properties.py +----------------------------- +.. automodule:: jwql.tests.test_instrument_properties + :members: + :undoc-members: + +test_loading_times.py +-------------------- +.. automodule:: jwql.tests.test_loading_times + :members: + :undoc-members: + test_monitor_mast.py -------------------- .. automodule:: jwql.tests.test_monitor_mast @@ -26,6 +50,12 @@ test_permissions.py :members: :undoc-members: +test_pipeline_tools.py +---------------------- +.. automodule:: jwql.tests.test_pipeline_tools + :members: + :undoc-members: + test_plotting.py ---------------- .. automodule:: jwql.tests.test_plotting diff --git a/docs/source/utils.rst b/docs/source/utils.rst index bdc718d7f..e219ef3bf 100644 --- a/docs/source/utils.rst +++ b/docs/source/utils.rst @@ -2,12 +2,24 @@ utils ***** +calculations.py +--------------- +.. automodule:: jwql.utils.calculations + :members: + :undoc-members: + constants.py ------------ .. automodule:: jwql.utils.constants :members: :undoc-members: +instrument_properties.py +------------------------ +.. automodule:: jwql.utils.instrument_properties + :members: + :undoc-members: + logging_functions.py -------------------- .. automodule:: jwql.utils.logging_functions diff --git a/environment.yml b/environment.yml index f51125aa8..3bd509e1a 100644 --- a/environment.yml +++ b/environment.yml @@ -3,29 +3,32 @@ channels: - http://ssb.stsci.edu/astroconda-dev - defaults dependencies: -- asdf>=2.3.0 -- astropy +- asdf=2.3.1 +- astropy>=3.1.2 - astroquery=0.3.9 -- bokeh=1.0.4 -- django=2.1.5 +- bokeh=1.1.0 +- crds>=7.2.7 +- django=2.1.7 - ipython=7.3.0 - jinja2=2.10 -- jwst +- jwst=0.13.1 - matplotlib=3.0.2 - numpy=1.16.2 - numpydoc=0.8.0 -- pandas=0.24.0 +- pandas=0.24.2 - postgresql=9.6.6 - psycopg2=2.7.5 - python=3.6.4 - python-dateutil=2.7.5 -- pytest=4.3.0 +- pytest=4.4.0 - pytest-cov=2.6.1 - pytest-html=1.19.0 - sphinx=1.8.5 - sphinx_rtd_theme=0.1.9 -- sqlalchemy=1.3.1 +- sqlalchemy=1.3.3 - stsci_rtd_theme=0.0.2 - pip: - authlib==0.10 + - codecov==2.0.15 + - pysiaf==0.2.5 - sphinx-automodapi==0.10 diff --git a/jwql/database/database_interface.py b/jwql/database/database_interface.py index 8afcb07af..557223b6c 100644 --- a/jwql/database/database_interface.py +++ b/jwql/database/database_interface.py @@ -26,6 +26,8 @@ - Matthew Bourque - Lauren Chambers - Bryan Hilbert + - Misty Cracraft + - Sara Ogaz Use --- @@ -72,13 +74,13 @@ from sqlalchemy import String from sqlalchemy import Time from sqlalchemy import UniqueConstraint -from sqlalchemy.dialects import postgresql from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from sqlalchemy.orm.query import Query from sqlalchemy.types import ARRAY -from jwql.utils import utils +from jwql.utils.constants import FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES +from jwql.utils.utils import get_config # Monkey patch Query with data_frame method @@ -133,12 +135,12 @@ def load_connection(connection_string): return session, base, engine, meta -# Import a global session. If running from readthedocs, pass a dummy connection string -if 'build' and 'project' and 'jwql' in socket.gethostname(): +# Import a global session. If running from readthedocs or Jenkins, pass a dummy connection string +if 'build' and 'project' in socket.gethostname() or os.path.expanduser('~') == '/home/jenkins': dummy_connection_string = 'postgresql+psycopg2://account:password@hostname:0000/db_name' session, base, engine, meta = load_connection(dummy_connection_string) else: - SETTINGS = utils.get_config() + SETTINGS = get_config() session, base, engine, meta = load_connection(SETTINGS['connection_string']) @@ -198,6 +200,49 @@ def colnames(self): return a_list +class FilesystemGeneral(base): + """ORM for the general (non instrument specific) filesystem monitor + table""" + + # Name the table + __tablename__ = 'filesystem_general' + + # Define the columns + id = Column(Integer, primary_key=True, nullable=False) + date = Column(DateTime, unique=True, nullable=False) + total_file_count = Column(Integer, nullable=False) + total_file_size = Column(Float, nullable=False) + fits_file_count = Column(Integer, nullable=False) + fits_file_size = Column(Float, nullable=False) + used = Column(Float, nullable=False) + available = Column(Float, nullable=False) + + +class FilesystemInstrument(base): + """ORM for the instrument specific filesystem monitor table""" + + # Name the table + __tablename__ = 'filesystem_instrument' + __table_args__ = (UniqueConstraint('date', 'instrument', 'filetype', name='filesystem_instrument_uc'),) + + # Define the columns + id = Column(Integer, primary_key=True, nullable=False) + date = Column(DateTime, nullable=False) + instrument = Column(Enum(*JWST_INSTRUMENT_NAMES, name='instrument_enum'), nullable=False) + filetype = Column(Enum(*FILE_SUFFIX_TYPES, name='filetype_enum'), nullable=False) + count = Column(Integer, nullable=False) + size = Column(Float, nullable=False) + + @property + def colnames(self): + """A list of all the column names in this table EXCEPT the date column""" + # Get the columns + a_list = [col for col, val in self.__dict__.items() + if not isinstance(val, datetime)] + + return a_list + + class Monitor(base): """ORM for the ``monitor`` table""" @@ -209,7 +254,7 @@ class Monitor(base): start_time = Column(DateTime, nullable=False) end_time = Column(DateTime, nullable=True) status = Column(Enum('SUCESS', 'FAILURE', name='monitor_status'), nullable=True) - affected_tables = Column(postgresql.ARRAY(String, dimensions=1), nullable=True) + affected_tables = Column(ARRAY(String, dimensions=1), nullable=True) log_file = Column(String(), nullable=False) @@ -240,11 +285,14 @@ def get_monitor_columns(data_dict, table_name): 'date': Date(), 'time': Time(), 'datetime': DateTime, - 'bool': Boolean} + 'bool': Boolean + } # Get the data from the table definition file + instrument = table_name.split('_')[0] table_definition_file = os.path.join(os.path.split(__file__)[0], 'monitor_table_definitions', + instrument.lower(), '{}.txt'.format(table_name)) with open(table_definition_file, 'r') as f: data = f.readlines() @@ -255,9 +303,20 @@ def get_monitor_columns(data_dict, table_name): column_name = column_definition[0] data_type = column_definition[1] + if 'array' in data_type: + dtype, _a, dimension = data_type.split('_') + dimension = int(dimension[0]) + array = True + else: + dtype = data_type + array = False + # Create a new column - if data_type in list(data_type_dict.keys()): - data_dict[column_name.lower()] = Column(data_type_dict[data_type]) + if dtype in list(data_type_dict.keys()): + if array: + data_dict[column_name.lower()] = Column(ARRAY(data_type_dict[dtype], dimensions=dimension)) + else: + data_dict[column_name.lower()] = Column(data_type_dict[dtype]) else: raise ValueError('Unrecognized column type: {}:{}'.format(column_name, data_type)) @@ -308,7 +367,7 @@ class : obj # Columns specific to all monitor ORMs data_dict['id'] = Column(Integer, primary_key=True, nullable=False) data_dict['entry_date'] = Column(DateTime, unique=True, nullable=False, default=datetime.now()) - data_dict['__table_args__'] = (UniqueConstraint('id', 'entry_date', name='monitor_uc'),) + data_dict['__table_args__'] = (UniqueConstraint('id', 'entry_date', name='{}_uc'.format(data_dict['__tablename__'])),) # Get monitor-specific columns data_dict = get_monitor_columns(data_dict, data_dict['__tablename__']) @@ -318,10 +377,23 @@ class : obj return type(class_name, (base,), data_dict) + # Create tables from ORM factory -# NIRCamDarkQueries = monitor_orm_factory('nircam_dark_queries') -# NIRCamDarkPixelStats = monitor_orm_factory('nircam_dark_pixel_stats') -# NIRCamDarkDarkCurrent = monitor_orm_factory('nircam_dark_dark_current') +NIRCamDarkQueryHistory = monitor_orm_factory('nircam_dark_query_history') +NIRCamDarkPixelStats = monitor_orm_factory('nircam_dark_pixel_stats') +NIRCamDarkDarkCurrent = monitor_orm_factory('nircam_dark_dark_current') +NIRISSDarkQueryHistory = monitor_orm_factory('niriss_dark_query_history') +NIRISSDarkPixelStats = monitor_orm_factory('niriss_dark_pixel_stats') +NIRISSDarkDarkCurrent = monitor_orm_factory('niriss_dark_dark_current') +NIRSpecDarkQueryHistory = monitor_orm_factory('nirspec_dark_query_history') +NIRSpecDarkPixelStats = monitor_orm_factory('nirspec_dark_pixel_stats') +NIRSpecDarkDarkCurrent = monitor_orm_factory('nirspec_dark_dark_current') +MIRIDarkQueryHistory = monitor_orm_factory('miri_dark_query_history') +MIRIDarkPixelStats = monitor_orm_factory('miri_dark_pixel_stats') +MIRIDarkDarkCurrent = monitor_orm_factory('miri_dark_dark_current') +FGSDarkQueryHistory = monitor_orm_factory('fgs_dark_query_history') +FGSDarkPixelStats = monitor_orm_factory('fgs_dark_pixel_stats') +FGSDarkDarkCurrent = monitor_orm_factory('fgs_dark_dark_current') if __name__ == '__main__': diff --git a/jwql/database/monitor_table_definitions/fgs/fgs_dark_dark_current.txt b/jwql/database/monitor_table_definitions/fgs/fgs_dark_dark_current.txt new file mode 100644 index 000000000..cdd2d681d --- /dev/null +++ b/jwql/database/monitor_table_definitions/fgs/fgs_dark_dark_current.txt @@ -0,0 +1,19 @@ +APERTURE, string +AMPLIFIER, string +MEAN, float +STDEV, float +SOURCE_FILES, string_array_1d +GAUSS_AMPLITUDE, float_array_1d +GUASS_PEAK, float_array_1d +GAUSS_WIDTH, float_array_1d +GAUSS_CHISQ, float +DOUBLE_GAUSS_AMPLITUDE1, float_array_1d +DOUBLE_GAUSS_PEAK1, float_array_1d +DOUBLE_GAUSS_WIDTH1, float_array_1d +DOUBLE_GAUSS_AMPLITUDE2, float_array_1d +DOUBLE_GAUSS_PEAK2, float_array_1d +DOUBLE_GAUSS_WIDTH2, float_array_1d +DOUBLE_GAUSS_CHISQ, float +MEAN_DARK_IMAGE_FILE, string +HIST_DARK_VALUES, float_array_1d +HIST_AMPLITUDES, float_array_1d diff --git a/jwql/database/monitor_table_definitions/fgs/fgs_dark_pixel_stats.txt b/jwql/database/monitor_table_definitions/fgs/fgs_dark_pixel_stats.txt new file mode 100644 index 000000000..bf2c06c48 --- /dev/null +++ b/jwql/database/monitor_table_definitions/fgs/fgs_dark_pixel_stats.txt @@ -0,0 +1,7 @@ +DETECTOR, string +X_COORD, integer_array_1d +Y_COORD, integer_array_1d +TYPE, string +SOURCE_FILES, string_array_1d +MEAN_DARK_IMAGE_FILE, string +BASELINE_FILE, string diff --git a/jwql/database/monitor_table_definitions/fgs/fgs_dark_query_history.txt b/jwql/database/monitor_table_definitions/fgs/fgs_dark_query_history.txt new file mode 100644 index 000000000..d0c493354 --- /dev/null +++ b/jwql/database/monitor_table_definitions/fgs/fgs_dark_query_history.txt @@ -0,0 +1,6 @@ +INSTRUMENT, string +APERTURE, string +START_TIME_MJD, float +END_TIME_MJD, float +FILES_FOUND, integer +RUN_MONITOR, bool diff --git a/jwql/database/monitor_table_definitions/miri/miri_dark_dark_current.txt b/jwql/database/monitor_table_definitions/miri/miri_dark_dark_current.txt new file mode 100644 index 000000000..cdd2d681d --- /dev/null +++ b/jwql/database/monitor_table_definitions/miri/miri_dark_dark_current.txt @@ -0,0 +1,19 @@ +APERTURE, string +AMPLIFIER, string +MEAN, float +STDEV, float +SOURCE_FILES, string_array_1d +GAUSS_AMPLITUDE, float_array_1d +GUASS_PEAK, float_array_1d +GAUSS_WIDTH, float_array_1d +GAUSS_CHISQ, float +DOUBLE_GAUSS_AMPLITUDE1, float_array_1d +DOUBLE_GAUSS_PEAK1, float_array_1d +DOUBLE_GAUSS_WIDTH1, float_array_1d +DOUBLE_GAUSS_AMPLITUDE2, float_array_1d +DOUBLE_GAUSS_PEAK2, float_array_1d +DOUBLE_GAUSS_WIDTH2, float_array_1d +DOUBLE_GAUSS_CHISQ, float +MEAN_DARK_IMAGE_FILE, string +HIST_DARK_VALUES, float_array_1d +HIST_AMPLITUDES, float_array_1d diff --git a/jwql/database/monitor_table_definitions/miri/miri_dark_pixel_stats.txt b/jwql/database/monitor_table_definitions/miri/miri_dark_pixel_stats.txt new file mode 100644 index 000000000..bf2c06c48 --- /dev/null +++ b/jwql/database/monitor_table_definitions/miri/miri_dark_pixel_stats.txt @@ -0,0 +1,7 @@ +DETECTOR, string +X_COORD, integer_array_1d +Y_COORD, integer_array_1d +TYPE, string +SOURCE_FILES, string_array_1d +MEAN_DARK_IMAGE_FILE, string +BASELINE_FILE, string diff --git a/jwql/database/monitor_table_definitions/miri/miri_dark_query_history.txt b/jwql/database/monitor_table_definitions/miri/miri_dark_query_history.txt new file mode 100644 index 000000000..d0c493354 --- /dev/null +++ b/jwql/database/monitor_table_definitions/miri/miri_dark_query_history.txt @@ -0,0 +1,6 @@ +INSTRUMENT, string +APERTURE, string +START_TIME_MJD, float +END_TIME_MJD, float +FILES_FOUND, integer +RUN_MONITOR, bool diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_dark_dark_current.txt b/jwql/database/monitor_table_definitions/nircam/nircam_dark_dark_current.txt index e3866c8ff..cdd2d681d 100644 --- a/jwql/database/monitor_table_definitions/nircam/nircam_dark_dark_current.txt +++ b/jwql/database/monitor_table_definitions/nircam/nircam_dark_dark_current.txt @@ -1,2 +1,19 @@ +APERTURE, string +AMPLIFIER, string MEAN, float -STDEV, float \ No newline at end of file +STDEV, float +SOURCE_FILES, string_array_1d +GAUSS_AMPLITUDE, float_array_1d +GUASS_PEAK, float_array_1d +GAUSS_WIDTH, float_array_1d +GAUSS_CHISQ, float +DOUBLE_GAUSS_AMPLITUDE1, float_array_1d +DOUBLE_GAUSS_PEAK1, float_array_1d +DOUBLE_GAUSS_WIDTH1, float_array_1d +DOUBLE_GAUSS_AMPLITUDE2, float_array_1d +DOUBLE_GAUSS_PEAK2, float_array_1d +DOUBLE_GAUSS_WIDTH2, float_array_1d +DOUBLE_GAUSS_CHISQ, float +MEAN_DARK_IMAGE_FILE, string +HIST_DARK_VALUES, float_array_1d +HIST_AMPLITUDES, float_array_1d diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_dark_pixel_stats.txt b/jwql/database/monitor_table_definitions/nircam/nircam_dark_pixel_stats.txt index c2eadbe6b..bf2c06c48 100644 --- a/jwql/database/monitor_table_definitions/nircam/nircam_dark_pixel_stats.txt +++ b/jwql/database/monitor_table_definitions/nircam/nircam_dark_pixel_stats.txt @@ -1,3 +1,7 @@ -X_COORD, integer -Y_COORD, integer -TYPE, string \ No newline at end of file +DETECTOR, string +X_COORD, integer_array_1d +Y_COORD, integer_array_1d +TYPE, string +SOURCE_FILES, string_array_1d +MEAN_DARK_IMAGE_FILE, string +BASELINE_FILE, string diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_dark_queries.txt b/jwql/database/monitor_table_definitions/nircam/nircam_dark_queries.txt deleted file mode 100644 index 9f838c18f..000000000 --- a/jwql/database/monitor_table_definitions/nircam/nircam_dark_queries.txt +++ /dev/null @@ -1,3 +0,0 @@ -LAST_RUN, datetime -DETECTOR, string -APERTURE, string \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_dark_query_history.txt b/jwql/database/monitor_table_definitions/nircam/nircam_dark_query_history.txt index 209b84d88..d0c493354 100644 --- a/jwql/database/monitor_table_definitions/nircam/nircam_dark_query_history.txt +++ b/jwql/database/monitor_table_definitions/nircam/nircam_dark_query_history.txt @@ -1,3 +1,6 @@ -START_TIME, datetime -DETECTOR, string +INSTRUMENT, string APERTURE, string +START_TIME_MJD, float +END_TIME_MJD, float +FILES_FOUND, integer +RUN_MONITOR, bool diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_dark_dark_current.txt b/jwql/database/monitor_table_definitions/niriss/niriss_dark_dark_current.txt new file mode 100644 index 000000000..cdd2d681d --- /dev/null +++ b/jwql/database/monitor_table_definitions/niriss/niriss_dark_dark_current.txt @@ -0,0 +1,19 @@ +APERTURE, string +AMPLIFIER, string +MEAN, float +STDEV, float +SOURCE_FILES, string_array_1d +GAUSS_AMPLITUDE, float_array_1d +GUASS_PEAK, float_array_1d +GAUSS_WIDTH, float_array_1d +GAUSS_CHISQ, float +DOUBLE_GAUSS_AMPLITUDE1, float_array_1d +DOUBLE_GAUSS_PEAK1, float_array_1d +DOUBLE_GAUSS_WIDTH1, float_array_1d +DOUBLE_GAUSS_AMPLITUDE2, float_array_1d +DOUBLE_GAUSS_PEAK2, float_array_1d +DOUBLE_GAUSS_WIDTH2, float_array_1d +DOUBLE_GAUSS_CHISQ, float +MEAN_DARK_IMAGE_FILE, string +HIST_DARK_VALUES, float_array_1d +HIST_AMPLITUDES, float_array_1d diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_dark_pixel_stats.txt b/jwql/database/monitor_table_definitions/niriss/niriss_dark_pixel_stats.txt new file mode 100644 index 000000000..bf2c06c48 --- /dev/null +++ b/jwql/database/monitor_table_definitions/niriss/niriss_dark_pixel_stats.txt @@ -0,0 +1,7 @@ +DETECTOR, string +X_COORD, integer_array_1d +Y_COORD, integer_array_1d +TYPE, string +SOURCE_FILES, string_array_1d +MEAN_DARK_IMAGE_FILE, string +BASELINE_FILE, string diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_dark_query_history.txt b/jwql/database/monitor_table_definitions/niriss/niriss_dark_query_history.txt new file mode 100644 index 000000000..d0c493354 --- /dev/null +++ b/jwql/database/monitor_table_definitions/niriss/niriss_dark_query_history.txt @@ -0,0 +1,6 @@ +INSTRUMENT, string +APERTURE, string +START_TIME_MJD, float +END_TIME_MJD, float +FILES_FOUND, integer +RUN_MONITOR, bool diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_dark_dark_current.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_dark_dark_current.txt new file mode 100644 index 000000000..cdd2d681d --- /dev/null +++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_dark_dark_current.txt @@ -0,0 +1,19 @@ +APERTURE, string +AMPLIFIER, string +MEAN, float +STDEV, float +SOURCE_FILES, string_array_1d +GAUSS_AMPLITUDE, float_array_1d +GUASS_PEAK, float_array_1d +GAUSS_WIDTH, float_array_1d +GAUSS_CHISQ, float +DOUBLE_GAUSS_AMPLITUDE1, float_array_1d +DOUBLE_GAUSS_PEAK1, float_array_1d +DOUBLE_GAUSS_WIDTH1, float_array_1d +DOUBLE_GAUSS_AMPLITUDE2, float_array_1d +DOUBLE_GAUSS_PEAK2, float_array_1d +DOUBLE_GAUSS_WIDTH2, float_array_1d +DOUBLE_GAUSS_CHISQ, float +MEAN_DARK_IMAGE_FILE, string +HIST_DARK_VALUES, float_array_1d +HIST_AMPLITUDES, float_array_1d diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_dark_pixel_stats.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_dark_pixel_stats.txt new file mode 100644 index 000000000..bf2c06c48 --- /dev/null +++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_dark_pixel_stats.txt @@ -0,0 +1,7 @@ +DETECTOR, string +X_COORD, integer_array_1d +Y_COORD, integer_array_1d +TYPE, string +SOURCE_FILES, string_array_1d +MEAN_DARK_IMAGE_FILE, string +BASELINE_FILE, string diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_dark_query_history.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_dark_query_history.txt new file mode 100644 index 000000000..d0c493354 --- /dev/null +++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_dark_query_history.txt @@ -0,0 +1,6 @@ +INSTRUMENT, string +APERTURE, string +START_TIME_MJD, float +END_TIME_MJD, float +FILES_FOUND, integer +RUN_MONITOR, bool diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py new file mode 100755 index 000000000..097d86b99 --- /dev/null +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -0,0 +1,988 @@ +#! /usr/bin/env python + +"""This module contains code for the dark current monitor, which +performs some basic analysis to check whether the dark current behavior +for the most recent set of input files is consistent with that from +past files. + +If enough new files for a given instrument/aperture combination +(currently the files must be identified as dark current files in the +``exp_type`` header keyword) are present in the filesystem at the time +the ``dark_monitor`` is called, the files are first run through the the +appropriate pipeline steps to produce slope images. + +A mean slope image as well as a standard deviation slope image is +created by sigma-clipping on a pixel by pixel basis. The mean and +standard deviation images are saved to a fits file, the name of which +is entered into the ``DarkCurrent`` database table. + +The mean slope image is then normalized by an existing baseline slope +image. New hot pixels are identified as those with normalized signal +rates above a ``hot_threshold`` value. Similarly, pixels with +normalized signal rates below a ``dead_threshold`` are flagged as new +dead pixels. + +The standard deviation slope image is normalized by a baseline +(historical) standard deviation image. Pixels with normalized values +above a noise threshold are flagged as newly noisy pixels. + +New hot, dead, and noisy pixels are saved to the ``DarkPixelStats`` +database table. + +Next, the dark current in the mean slope image is examined. A histogram +of the slope values is created for the pixels in each amplifier, as +well as for all pixels on the detector. In all cases, a Gaussian is fit +to the histogram. Currently for NIRCam and NIRISS, a double Gaussian is +also fit to the histogram from the entire detector. + +The histogram itself as well as the best-fit Gaussian and double +Gaussian parameters are saved to the DarkDarkCurrent database table. + + +Author +------ + + - Bryan Hilbert + +Use +--- + + This module can be used from the command line as such: + + :: + + python dark_monitor.py +""" + +from copy import copy, deepcopy +import datetime +import logging +import os + +from astropy.io import ascii, fits +from astropy.modeling import models +from astropy.time import Time +import numpy as np +from pysiaf import Siaf +from sqlalchemy import func +from sqlalchemy.sql.expression import and_ + +from jwql.database.database_interface import session +from jwql.database.database_interface import NIRCamDarkQueryHistory, NIRCamDarkPixelStats, NIRCamDarkDarkCurrent +from jwql.database.database_interface import NIRISSDarkQueryHistory, NIRISSDarkPixelStats, NIRISSDarkDarkCurrent +from jwql.database.database_interface import MIRIDarkQueryHistory, MIRIDarkPixelStats, MIRIDarkDarkCurrent +from jwql.database.database_interface import NIRSpecDarkQueryHistory, NIRSpecDarkPixelStats, NIRSpecDarkDarkCurrent +from jwql.database.database_interface import FGSDarkQueryHistory, FGSDarkPixelStats, FGSDarkDarkCurrent +from jwql.instrument_monitors import pipeline_tools +from jwql.jwql_monitors import monitor_mast +from jwql.utils import calculations, instrument_properties +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE, JWST_DATAPRODUCTS +from jwql.utils.logging_functions import log_info, log_fail +from jwql.utils.permissions import set_permissions +from jwql.utils.utils import copy_files, ensure_dir_exists, get_config, filesystem_path, initialize_instrument_monitor, update_monitor_table + +THRESHOLDS_FILE = os.path.join(os.path.split(__file__)[0], 'dark_monitor_file_thresholds.txt') + + +def mast_query_darks(instrument, aperture, start_date, end_date): + """Use ``astroquery`` to search MAST for dark current data + + Parameters + ---------- + instrument : str + Instrument name (e.g. ``nircam``) + + aperture : str + Detector aperture to search for (e.g. ``NRCA1_FULL``) + + start_date : float + Starting date for the search in MJD + + end_date : float + Ending date for the search in MJD + + Returns + ------- + query_results : list + List of dictionaries containing the query results + """ + + # Make sure instrument is correct case + if instrument.lower() == 'nircam': + instrument = 'NIRCam' + dark_template = ['NRC_DARK'] + elif instrument.lower() == 'niriss': + instrument = 'NIRISS' + dark_template = ['NIS_DARK'] + elif instrument.lower() == 'nirspec': + instrument = 'NIRSpec' + dark_template = ['NRS_DARK'] + elif instrument.lower() == 'fgs': + instrument = 'FGS' + dark_template = ['FGS_DARK'] + elif instrument.lower() == 'miri': + instrument = 'MIRI' + dark_template = ['MIR_DARKALL', 'MIR_DARKIMG', 'MIR_DARKMRS'] + + # monitor_mast.instrument_inventory does not allow list inputs to + # the added_filters input (or at least if you do provide a list, then + # it becomes a nested list when it sends the query to MAST. The + # nested list is subsequently ignored by MAST.) + # So query once for each dark template, and combine outputs into a + # single list. + query_results = [] + for template_name in dark_template: + + # Create dictionary of parameters to add + parameters = {"date_obs_mjd": {"min": start_date, "max": end_date}, + "apername": aperture, "exp_type": template_name} + + query = monitor_mast.instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS, + add_filters=parameters, return_data=True, caom=False) + if len(query['data']) > 0: + query_results.extend(query['data']) + + return query_results + + +@log_fail +@log_info +class Dark(): + """Class for executing the dark current monitor. + + This class will search for new (since the previous instance of the + class) dark current files in the file system. It will loop over + instrument/aperture combinations and find the number of new dark + current files available. If there are enough, it will copy the files + over to a working directory and run the monitor. This will create a + mean dark current rate image, create a histogram of the dark current + values, and fit several functions to the histogram. It will also + compare the dark current image to a historical image in order to + search for new hot or dead pixels. Results are all saved to + database tables. + + Parameters + ---------- + testing : bool + For pytest. If ``True``, an instance of ``Dark`` is created, but + no other code is executed. + + Attributes + ---------- + output_dir : str + Path into which outputs will be placed + + data_dir : str + Path into which new dark files will be copied to be worked on + + query_start : float + MJD start date to use for querying MAST + + query_end : float + MJD end date to use for querying MAST + + instrument : str + Name of instrument used to collect the dark current data + + aperture : str + Name of the aperture used for the dark current (e.g. + ``NRCA1_FULL``) + + query_table : sqlalchemy table + Table containing the history of dark current queries to MAST + for each instrument/aperture combination + + pixel_table : sqlalchemy table + Table containing lists of hot/dead/noisy pixels found for each + instrument/detector + + stats_table : sqlalchemy table + Table containing dark current analysis results. Mean/stdev + values, histogram information, Gaussian fitting results, etc. + + Raises + ------ + ValueError + If encountering an unrecognized bad pixel type + + ValueError + If the most recent query search returns more than one entry + """ + + def __init__(self, testing=False): + + logging.info('Begin logging for dark_monitor') + + apertures_to_skip = ['NRCALL_FULL', 'NRCAS_FULL', 'NRCBS_FULL'] + + if not testing: + + # Get the output directory + self.output_dir = os.path.join(get_config()['outputs'], 'dark_monitor') + + # Read in config file that defines the thresholds for the number + # of dark files that must be present in order for the monitor to run + limits = ascii.read(THRESHOLDS_FILE) + + # Use the current time as the end time for MAST query + self.query_end = Time.now().mjd + + # Loop over all instruments + for instrument in ['nircam']: + self.instrument = instrument + + # Identify which database tables to use + self.identify_tables() + + # Get a list of all possible apertures from pysiaf + possible_apertures = list(Siaf(instrument).apernames) + possible_apertures = [ap for ap in possible_apertures if ap not in apertures_to_skip] + + for aperture in possible_apertures: + + logging.info('') + logging.info('Working on aperture {} in {}'.format(aperture, instrument)) + + # Find the appropriate threshold for the number of new files needed + match = aperture == limits['Aperture'] + file_count_threshold = limits['Threshold'][match] + + # Locate the record of the most recent MAST search + self.aperture = aperture + self.query_start = self.most_recent_search() + logging.info('\tQuery times: {} {}'.format(self.query_start, self.query_end)) + + # Query MAST using the aperture and the time of the + # most recent previous search as the starting time + new_entries = mast_query_darks(instrument, aperture, self.query_start, self.query_end) + logging.info('\tAperture: {}, new entries: {}'.format(self.aperture, len(new_entries))) + + # Check to see if there are enough new files to meet the monitor's signal-to-noise requirements + if len(new_entries) >= file_count_threshold: + + logging.info('\tSufficient new dark files found for {}, {} to run the dark monitor.' + .format(self.instrument, self.aperture)) + + # Get full paths to the files + new_filenames = [filesystem_path(file_entry['filename']) for file_entry in new_entries] + + # Set up directories for the copied data + ensure_dir_exists(os.path.join(self.output_dir, 'data')) + self.data_dir = os.path.join(self.output_dir, + 'data/{}_{}'.format(self.instrument.lower(), + self.aperture.lower())) + ensure_dir_exists(self.data_dir) + + # Copy files from filesystem + dark_files, not_copied = copy_files(new_filenames, self.data_dir) + + # Run the dark monitor + self.run(dark_files) + monitor_run = True + + else: + logging.info(('\tDark monitor skipped. {} new dark files for {}, {}. {} new files are ' + 'required to run dark current monitor.').format( + len(new_entries), instrument, aperture, file_count_threshold[0])) + monitor_run = False + + # Update the query history + new_entry = {'instrument': instrument, + 'aperture': aperture, + 'start_time_mjd': self.query_start, + 'end_time_mjd': self.query_end, + 'files_found': len(new_entries), + 'run_monitor': monitor_run, + 'entry_date': datetime.datetime.now()} + self.query_table.__table__.insert().execute(new_entry) + logging.info('\tUpdated the query history table') + + logging.info('Dark Monitor completed successfully.') + + def add_bad_pix(self, coordinates, pixel_type, files, mean_filename, baseline_filename): + """Add a set of bad pixels to the bad pixel database table + + Parameters + ---------- + coordinates : tuple + Tuple of two lists, containing x,y coordinates of bad + pixels (Output of ``np.where`` call) + + pixel_type : str + Type of bad pixel. Options are ``dead``, ``hot``, and + ``noisy`` + + files : list + List of fits files which were used to identify the bad + pixels + + mean_filename : str + Name of fits file containing the mean dark rate image used + to find these bad pixels + + baseline_filename : str + Name of fits file containing the baseline dark rate image + used to find these bad pixels + """ + + logging.info('Adding {} {} pixels to database.'.format(len(coordinates[0]), pixel_type)) + + source_files = [os.path.basename(item) for item in files] + entry = {'detector': self.detector, + 'x_coord': coordinates[0], + 'y_coord': coordinates[1], + 'type': pixel_type, + 'source_files': source_files, + 'mean_dark_image_file': os.path.basename(mean_filename), + 'baseline_file': os.path.basename(baseline_filename), + 'entry_date': datetime.datetime.now()} + self.pixel_table.__table__.insert().execute(entry) + + def get_metadata(self, filename): + """Collect basic metadata from a fits file + + Parameters + ---------- + filename : str + Name of fits file to examine + """ + + header = fits.getheader(filename) + + try: + self.detector = header['DETECTOR'] + self.x0 = header['SUBSTRT1'] + self.y0 = header['SUBSTRT2'] + self.xsize = header['SUBSIZE1'] + self.ysize = header['SUBSIZE2'] + self.sample_time = header['TSAMPLE'] + self.frame_time = header['TFRAME'] + self.read_pattern = header['READPATT'] + + except KeyError as e: + logging.error(e) + + def exclude_existing_badpix(self, badpix, pixel_type): + """Given a set of coordinates of bad pixels, determine which of + these pixels have been previously identified and remove them + from the list + + Parameters + ---------- + badpix : tuple + Tuple of lists containing x and y pixel coordinates. (Output + of ``numpy.where`` call) + + pixel_type : str + Type of bad pixel being examined. Options are ``hot``, + ``dead``, and ``noisy`` + + Returns + ------- + new_pixels_x : list + List of x coordinates of new bad pixels + + new_pixels_y : list + List of y coordinates of new bad pixels + """ + + if pixel_type not in ['hot', 'dead', 'noisy']: + raise ValueError('Unrecognized bad pixel type: {}'.format(pixel_type)) + + db_entries = session.query(self.pixel_table) \ + .filter(self.pixel_table.type == pixel_type) \ + .filter(self.pixel_table.detector == self.detector) \ + .all() + + already_found = [] + if len(db_entries) != 0: + for _row in db_entries: + x_coords = _row.x_coord + y_coords = _row.y_coord + for x, y in zip(x_coords, y_coords): + already_found.append((x, y)) + + # Check to see if each pixel already appears in the database for + # the given bad pixel type + new_pixels_x = [] + new_pixels_y = [] + for x, y in zip(badpix[0], badpix[1]): + pixel = (x, y) + if pixel not in already_found: + new_pixels_x.append(x) + new_pixels_y.append(y) + + return (new_pixels_x, new_pixels_y) + + def find_hot_dead_pixels(self, mean_image, comparison_image, hot_threshold=2., dead_threshold=0.1): + """Create the ratio of the slope image to a baseline slope + image. Pixels in the ratio image with values above + ``hot_threshold`` will be marked as hot, and those with ratio + values less than ``dead_threshold`` will be marked as dead. + + Parameters + ---------- + mean_image : numpy.ndarray + 2D array containing the slope image from the new data + + comparison_image : numpy.ndarray + 2D array containing the baseline slope image to compare + against the new slope image. + + hot_threshold : float + ``(mean_image / comparison_image)`` ratio value above which + a pixel is considered hot. + + dead_threshold : float + ``(mean_image / comparison_image)`` ratio value below which + a pixel is considered dead. + + Returns + ------- + hotpix : tuple + Tuple (of lists) containing x,y coordinates of newly hot + pixels + + deadpix : tuple + Tuple (of lists) containing x,y coordinates of newly dead + pixels + """ + + # Avoid divide by zeros + zeros = comparison_image == 0. + comparison_image[zeros] = 1. + mean_image[zeros] += 1. + + ratio = mean_image / comparison_image + hotpix = np.where(ratio > hot_threshold) + deadpix = np.where(ratio < dead_threshold) + + return hotpix, deadpix + + def get_baseline_filename(self): + """Query the database and return the filename of the baseline + (comparison) mean dark slope image to use when searching for + new hot/dead/noisy pixels. For this we assume that the most + recent baseline file for the given detector is the one to use. + + Returns + ------- + filename : str + Name of fits file containing the baseline image + """ + + subq = session.query(self.pixel_table.detector, + func.max(self.pixel_table.entry_date).label('maxdate') + ).group_by(self.pixel_table.detector).subquery('t2') + + query = session.query(self.pixel_table).join( + subq, + and_( + self.pixel_table.detector == self.detector, + self.pixel_table.entry_date == subq.c.maxdate + ) + ) + + count = query.count() + if not count: + filename = None + else: + filename = query.all()[0].baseline_file + logging.info('Baseline filename: {}'.format(filename)) + + return filename + + def identify_tables(self): + """Determine which database tables to use for a run of the dark + monitor + """ + + mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument] + self.query_table = eval('{}DarkQueryHistory'.format(mixed_case_name)) + self.pixel_table = eval('{}DarkPixelStats'.format(mixed_case_name)) + self.stats_table = eval('{}DarkDarkCurrent'.format(mixed_case_name)) + + def most_recent_search(self): + """Query the query history database and return the information + on the most recent query for the given ``aperture_name`` where + the dark monitor was executed. + + Returns + ------- + query_result : float + Date (in MJD) of the ending range of the previous MAST query + where the dark monitor was run. + """ + + sub_query = session.query(self.query_table.aperture, + func.max(self.query_table.end_time_mjd).label('maxdate') + ).group_by(self.query_table.aperture).subquery('t2') + + # Note that "self.query_table.run_monitor == True" below is + # intentional. Switching = to "is" results in an error in the query. + query = session.query(self.query_table).join( + sub_query, + and_( + self.query_table.aperture == self.aperture, + self.query_table.end_time_mjd == sub_query.c.maxdate, + self.query_table.run_monitor == True + ) + ).all() + + query_count = len(query) + if query_count == 0: + query_result = 57357.0 # a.k.a. Dec 1, 2015 == CV3 + logging.info(('\tNo query history for {}. Beginning search date will be set to {}.' + .format(self.aperture, query_result))) + elif query_count > 1: + raise ValueError('More than one "most recent" query?') + else: + query_result = query[0].end_time_mjd + + return query_result + + def noise_check(self, new_noise_image, baseline_noise_image, threshold=1.5): + """Create the ratio of the stdev (noise) image to a baseline + noise image. Pixels in the ratio image with values above + ``threshold`` will be marked as newly noisy. + + Parameters + ---------- + new_noise_image : numpy.ndarray + 2D array containing the noise image from the new data + + baseline_noise_image : numpy.ndarray + 2D array containing the baseline noise image to compare + against the new noise image. + + threshold : float + ``(new_noise_image / baseline_noise_image)`` ratio value + above which a pixel is considered newly noisey. + + Returns + ------- + noisy : tuple + Tuple (of lists) of x,y coordinates of newly noisy pixels + """ + + # Avoid divide by zeros + zeros = baseline_noise_image == 0. + baseline_noise_image[zeros] = 1. + new_noise_image[zeros] += 1. + + ratio = new_noise_image / baseline_noise_image + noisy = np.where(ratio > threshold) + + return noisy + + def read_baseline_slope_image(self, filename): + """Read in a baseline mean slope image and associated standard + deviation image from the given fits file + + Parameters + ---------- + filename : str + Name of fits file to be read in + + Returns + ------- + mean_image : numpy.ndarray + 2D mean slope image + + stdev_image : numpy.ndarray + 2D stdev image + """ + + try: + with fits.open(filename) as hdu: + mean_image = hdu['MEAN'].data + stdev_image = hdu['STDEV'].data + return mean_image, stdev_image + except (FileNotFoundError, KeyError) as e: + logging.warning('Trying to read {}: {}'.format(filename, e)) + + def run(self, file_list): + """The main method. See module docstrings for further details + + Parameters + ---------- + file_list : list + List of filenames (including full paths) to the dark current + files + """ + + # Basic metadata that will be needed later + self.get_metadata(file_list[0]) + + # Determine which pipeline steps need to be executed + required_steps = pipeline_tools.get_pipeline_steps(self.instrument) + logging.info('\tRequired calwebb1_detector pipeline steps to have the data in the ' + 'correct format:') + for item in required_steps: + logging.info('\t\t{}: {}'.format(item, required_steps[item])) + + # Modify the list of pipeline steps to skip those not needed for the + # preparation of dark current data + required_steps['dark_current'] = False + required_steps['persistence'] = False + + # NIRSpec IR^2 readout pattern NRSIRS2 is the only one with + # nframes not a power of 2 + if self.read_pattern not in pipeline_tools.GROUPSCALE_READOUT_PATTERNS: + required_steps['group_scale'] = False + + # Run pipeline steps on files, generating slope files + slope_files = [] + for filename in file_list: + + completed_steps = pipeline_tools.completed_pipeline_steps(filename) + steps_to_run = pipeline_tools.steps_to_run(required_steps, completed_steps) + + logging.info('\tWorking on file: {}'.format(filename)) + logging.info('\tPipeline steps that remain to be run:') + for item in steps_to_run: + logging.info('\t\t{}: {}'.format(item, steps_to_run[item])) + + # Run any remaining required pipeline steps + if any(steps_to_run.values()) is False: + slope_files.append(filename) + else: + processed_file = filename.replace('.fits', '_{}.fits'.format('rate')) + + # If the slope file already exists, skip the pipeline call + if not os.path.isfile(processed_file): + logging.info('\tRunning pipeline on {}'.format(filename)) + processed_file = pipeline_tools.run_calwebb_detector1_steps(os.path.abspath(filename), steps_to_run) + logging.info('\tPipeline complete. Output: {}'.format(processed_file)) + + else: + logging.info('\tSlope file {} already exists. Skipping call to pipeline.' + .format(processed_file)) + pass + + slope_files.append(processed_file) + + # Delete the original dark ramp file to save disk space + os.remove(filename) + + logging.info('\tSlope images to use in the dark monitor for {}, {}:'.format(self.instrument, self.aperture)) + for item in slope_files: + logging.info('\t\t{}'.format(item)) + + # Read in all slope images and place into a list + slope_image_stack, slope_exptimes = pipeline_tools.image_stack(slope_files) + + # Calculate a mean slope image from the inputs + slope_image, stdev_image = calculations.mean_image(slope_image_stack, sigma_threshold=3) + mean_slope_file = self.save_mean_slope_image(slope_image, stdev_image, slope_files) + logging.info('\tSigma-clipped mean of the slope images saved to: {}'.format(mean_slope_file)) + + # ----- Search for new hot/dead/noisy pixels ----- + # Read in baseline mean slope image and stdev image + # The baseline image is used to look for hot/dead/noisy pixels, + # but not for comparing mean dark rates. Therefore, updates to + # the baseline can be minimal. + + # Limit checks for hot/dead/noisy pixels to full frame data since + # subarray data have much shorter exposure times and therefore lower + # signal-to-noise + aperture_type = Siaf(self.instrument)[self.aperture].AperType + if aperture_type == 'FULLSCA': + baseline_file = self.get_baseline_filename() + if baseline_file is None: + logging.warning(('\tNo baseline dark current countrate image for {} {}. Setting the ' + 'current mean slope image to be the new baseline.'.format(self.instrument, self.aperture))) + baseline_file = mean_slope_file + baseline_mean = deepcopy(slope_image) + baseline_stdev = deepcopy(stdev_image) + else: + logging.info('\tBaseline file is {}'.format(baseline_file)) + baseline_mean, baseline_stdev = self.read_baseline_slope_image(baseline_file) + + # Check the hot/dead pixel population for changes + new_hot_pix, new_dead_pix = self.find_hot_dead_pixels(slope_image, baseline_mean) + + # Shift the coordinates to be in full frame coordinate system + new_hot_pix = self.shift_to_full_frame(new_hot_pix) + new_dead_pix = self.shift_to_full_frame(new_dead_pix) + + # Exclude hot and dead pixels found previously + new_hot_pix = self.exclude_existing_badpix(new_hot_pix, 'hot') + new_dead_pix = self.exclude_existing_badpix(new_dead_pix, 'dead') + + # Add new hot and dead pixels to the database + logging.info('\tFound {} new hot pixels'.format(len(new_hot_pix))) + logging.info('\tFound {} new dead pixels'.format(len(new_dead_pix))) + self.add_bad_pix(new_hot_pix, 'hot', file_list, mean_slope_file, baseline_file) + self.add_bad_pix(new_dead_pix, 'dead', file_list, mean_slope_file, baseline_file) + + # Check for any pixels that are significantly more noisy than + # in the baseline stdev image + new_noisy_pixels = self.noise_check(stdev_image, baseline_stdev) + + # Shift coordinates to be in full_frame coordinate system + new_noisy_pixels = self.shift_to_full_frame(new_noisy_pixels) + + # Exclude previously found noisy pixels + new_noisy_pixels = self.exclude_existing_badpix(new_noisy_pixels, 'noisy') + + # Add new noisy pixels to the database + logging.info('\tFound {} new noisy pixels'.format(len(new_noisy_pixels))) + self.add_bad_pix(new_noisy_pixels, 'noisy', file_list, mean_slope_file, baseline_file) + + # ----- Calculate image statistics ----- + + # Find amplifier boundaries so per-amp statistics can be calculated + number_of_amps, amp_bounds = instrument_properties.amplifier_info(slope_files[0]) + logging.info('\tAmplifier boundaries: {}'.format(amp_bounds)) + + # Calculate mean and stdev values, and fit a Gaussian to the + # histogram of the pixels in each amp + (amp_mean, amp_stdev, gauss_param, gauss_chisquared, double_gauss_params, double_gauss_chisquared, + histogram, bins) = self.stats_by_amp(slope_image, amp_bounds) + + # Construct new entry for dark database table + source_files = [os.path.basename(item) for item in file_list] + for key in amp_mean.keys(): + dark_db_entry = {'aperture': self.aperture, 'amplifier': key, 'mean': amp_mean[key], + 'stdev': amp_stdev[key], + 'source_files': source_files, + 'gauss_amplitude': list(gauss_param[key][0]), + 'gauss_peak': list(gauss_param[key][1]), + 'gauss_width': list(gauss_param[key][2]), + 'gauss_chisq': gauss_chisquared[key], + 'double_gauss_amplitude1': double_gauss_params[key][0], + 'double_gauss_peak1': double_gauss_params[key][1], + 'double_gauss_width1': double_gauss_params[key][2], + 'double_gauss_amplitude2': double_gauss_params[key][3], + 'double_gauss_peak2': double_gauss_params[key][4], + 'double_gauss_width2': double_gauss_params[key][5], + 'double_gauss_chisq': double_gauss_chisquared[key], + 'mean_dark_image_file': os.path.basename(mean_slope_file), + 'hist_dark_values': bins, + 'hist_amplitudes': histogram, + 'entry_date': datetime.datetime.now() + } + self.stats_table.__table__.insert().execute(dark_db_entry) + + def save_mean_slope_image(self, slope_img, stdev_img, files): + """Save the mean slope image and associated stdev image to a + file + + Parameters + ---------- + slope_img : numpy.ndarray + 2D array containing the mean slope image + + stdev_img : numpy.ndarray + 2D array containing the stdev image associated with the mean + slope image. + + files : list + List of input files used to construct the mean slope image + + Returns + ------- + output_filename : str + Name of fits file to save mean and stdev images within + """ + + output_filename = '{}_{}_{}_to_{}_mean_slope_image.fits'.format(self.instrument.lower(), + self.aperture.lower(), + self.query_start, self.query_end) + mean_slope_dir = os.path.join(get_config()['outputs'], 'dark_monitor', 'mean_slope_images') + ensure_dir_exists(mean_slope_dir) + output_filename = os.path.join(mean_slope_dir, output_filename) + + primary_hdu = fits.PrimaryHDU() + primary_hdu.header['INSTRUME'] = (self.instrument, 'JWST instrument') + primary_hdu.header['APERTURE'] = (self.aperture, 'Aperture name') + primary_hdu.header['QRY_STRT'] = (self.query_start, 'MAST Query start time (MJD)') + primary_hdu.header['QRY_END'] = (self.query_end, 'MAST Query end time (MJD)') + + files_string = 'FILES USED: ' + for filename in files: + files_string += '{}, '.format(filename) + + primary_hdu.header.add_history(files_string) + mean_img_hdu = fits.ImageHDU(slope_img, name='MEAN') + stdev_img_hdu = fits.ImageHDU(stdev_img, name='STDEV') + hdu_list = fits.HDUList([primary_hdu, mean_img_hdu, stdev_img_hdu]) + hdu_list.writeto(output_filename, overwrite=True) + set_permissions(output_filename) + + return output_filename + + def shift_to_full_frame(self, coords): + """Shift the input list of pixels from the subarray coordinate + system to the full frame coordinate system + + Parameters + ---------- + coords : tup + (x, y) pixel coordinates in subarray coordinate system + + Returns + ------- + coords : tup + (x, y) pixel coordinates in full frame coordinate system + """ + + x = coords[0] + x += self.x0 + y = coords[1] + y += self.y0 + + return (x, y) + + def stats_by_amp(self, image, amps): + """Calculate statistics in the input image for each amplifier as + well as the full image + + Parameters + ---------- + image : numpy.ndarray + 2D array on which to calculate statistics + + amps : dict + Dictionary containing amp boundary coordinates (output from + ``amplifier_info`` function) + ``amps[key] = [(xmin, ymin), (xmax, ymax)]`` + + Returns + ------- + amp_means : dict + Sigma-clipped mean value for each amp. Keys are amp numbers + as strings (e.g. ``'1'``) + + amp_stdevs : dict + Sigma-clipped standard deviation for each amp. Keys are amp + numbers as strings (e.g. ``'1'``) + + gaussian_params : dict + Best-fit Gaussian parameters to the dark current histogram. + Keys are amp numbers as strings. Values are three-element + lists ``[amplitude, peak, width]``. Each element in the list + is a tuple of the best-fit value and the associated + uncertainty. + + gaussian_chi_squared : dict + Reduced chi-squared for the best-fit parameters. Keys are + amp numbers as strings + + double_gaussian_params : dict + Best-fit double Gaussian parameters to the dark current + histogram. Keys are amp numbers as strings. Values are six- + element lists. (3-elements * 2 Gaussians). + ``[amplitude1, peak1, stdev1, amplitude2, peak2, stdev2]`` + Each element of the list is a tuple containing the best-fit + value and associated uncertainty. + + double_gaussian_chi_squared : dict + Reduced chi-squared for the best-fit parameters. Keys are + amp numbers as strings + + hist : numpy.ndarray + 1D array of histogram values + + bin_centers : numpy.ndarray + 1D array of bin centers that match the ``hist`` values. + """ + + amp_means = {} + amp_stdevs = {} + gaussian_params = {} + gaussian_chi_squared = {} + double_gaussian_params = {} + double_gaussian_chi_squared = {} + + # Add full image coords to the list of amp_boundaries, so that full + # frame stats are also calculated. + if 'FULL' in self.aperture: + maxx = 0 + maxy = 0 + for amp in amps: + mxx = amps[amp][1][0] + mxy = amps[amp][1][1] + if mxx > maxx: + maxx = copy(mxx) + if mxy > maxy: + maxy = copy(mxy) + amps['5'] = [(0, 0), (maxx, maxy)] + logging.info(('\tFull frame exposure detected. Adding the full frame to the list ' + 'of amplifiers upon which to calculate statistics.')) + + for key in amps: + x_start, y_start = amps[key][0] + x_end, y_end = amps[key][1] + + # Basic statistics, sigma clipped areal mean and stdev + amp_mean, amp_stdev = calculations.mean_stdev(image[y_start: y_end, x_start: x_end]) + amp_means[key] = amp_mean + amp_stdevs[key] = amp_stdev + + # Create a histogram + lower_bound = (amp_mean - 7 * amp_stdev) + upper_bound = (amp_mean + 7 * amp_stdev) + + hist, bin_edges = np.histogram(image[y_start: y_end, x_start: x_end], bins='auto', + range=(lower_bound, upper_bound)) + bin_centers = (bin_edges[1:] + bin_edges[0: -1]) / 2. + initial_params = [np.max(hist), amp_mean, amp_stdev] + + # Fit a Gaussian to the histogram. Save best-fit params and + # uncertainties, as well as reduced chi squared + amplitude, peak, width = calculations.gaussian1d_fit(bin_centers, hist, initial_params) + gaussian_params[key] = [amplitude, peak, width] + + gauss_fit_model = models.Gaussian1D(amplitude=amplitude[0], mean=peak[0], stddev=width[0]) + gauss_fit = gauss_fit_model(bin_centers) + + positive = hist > 0 + degrees_of_freedom = len(hist) - 3. + total_pix = np.sum(hist[positive]) + p_i = gauss_fit[positive] / total_pix + gaussian_chi_squared[key] = (np.sum((hist[positive] - (total_pix*p_i)**2) / (total_pix*p_i)) + / degrees_of_freedom) + + # Double Gaussian fit only for full frame data (and only for + # NIRISS, NIRCam at the moment.) + if key == '5': + if self.instrument.upper() in ['NIRISS', 'NIRCAM']: + initial_params = (np.max(hist), amp_mean, amp_stdev * 0.8, + np.max(hist) / 7., amp_mean / 2., amp_stdev * 0.9) + double_gauss_params, double_gauss_sigma = calculations.double_gaussian_fit(bin_centers, hist, initial_params) + double_gaussian_params[key] = [[param, sig] for param, sig in zip(double_gauss_params, double_gauss_sigma)] + double_gauss_fit = calculations.double_gaussian(bin_centers, *double_gauss_params) + degrees_of_freedom = len(bin_centers) - 6. + dp_i = double_gauss_fit[positive] / total_pix + double_gaussian_chi_squared[key] = np.sum((hist[positive] - (total_pix*dp_i)**2) / (total_pix*dp_i)) / degrees_of_freedom + + else: + double_gaussian_params[key] = [[0., 0.] for i in range(6)] + double_gaussian_chi_squared[key] = 0. + else: + double_gaussian_params[key] = [[0., 0.] for i in range(6)] + double_gaussian_chi_squared[key] = 0. + + logging.info('\tMean dark rate by amplifier: {}'.format(amp_means)) + logging.info('\tStandard deviation of dark rate by amplifier: {}'.format(amp_means)) + logging.info('\tBest-fit Gaussian parameters [amplitude, peak, width]'.format(gaussian_params)) + logging.info('\tReduced chi-squared associated with Gaussian fit: {}'.format(gaussian_chi_squared)) + logging.info('\tBest-fit double Gaussian parameters [amplitude1, peak1, width1, amplitude2, peak2, ' + 'width2]'.format(double_gaussian_params)) + logging.info('\tReduced chi-squared associated with double Gaussian fit: {}' + .format(double_gaussian_chi_squared)) + + return (amp_means, amp_stdevs, gaussian_params, gaussian_chi_squared, double_gaussian_params, + double_gaussian_chi_squared, hist.astype(np.float), bin_centers) + + +if __name__ == '__main__': + + module = os.path.basename(__file__).strip('.py') + start_time, log_file = initialize_instrument_monitor(module) + + monitor = Dark() + + update_monitor_table(module, start_time, log_file) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt b/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt new file mode 100644 index 000000000..ddb83e174 --- /dev/null +++ b/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt @@ -0,0 +1,606 @@ +Instrument Aperture Threshold +nircam NRCA1_FULL_OSS 10 +nircam NRCA2_FULL_OSS 10 +nircam NRCA3_FULL_OSS 10 +nircam NRCA4_FULL_OSS 10 +nircam NRCA5_FULL_OSS 10 +nircam NRCB1_FULL_OSS 10 +nircam NRCB2_FULL_OSS 10 +nircam NRCB3_FULL_OSS 10 +nircam NRCB4_FULL_OSS 10 +nircam NRCB5_FULL_OSS 10 +nircam NRCALL_FULL 10 +nircam NRCAS_FULL 10 +nircam NRCA1_FULL 10 +nircam NRCA2_FULL 10 +nircam NRCA3_FULL 10 +nircam NRCA4_FULL 10 +nircam NRCA5_FULL 10 +nircam NRCBS_FULL 10 +nircam NRCB1_FULL 10 +nircam NRCB2_FULL 10 +nircam NRCB3_FULL 10 +nircam NRCB4_FULL 10 +nircam NRCB5_FULL 10 +nircam NRCB1_FULLP 10 +nircam NRCB5_FULLP 10 +nircam NRCA1_SUB160 30 +nircam NRCA2_SUB160 30 +nircam NRCA3_SUB160 30 +nircam NRCA4_SUB160 30 +nircam NRCA5_SUB160 30 +nircam NRCB1_SUB160 30 +nircam NRCB2_SUB160 30 +nircam NRCB3_SUB160 30 +nircam NRCB4_SUB160 30 +nircam NRCB5_SUB160 30 +nircam NRCA1_SUB320 30 +nircam NRCA2_SUB320 30 +nircam NRCA3_SUB320 30 +nircam NRCA4_SUB320 30 +nircam NRCA5_SUB320 30 +nircam NRCB1_SUB320 30 +nircam NRCB2_SUB320 30 +nircam NRCB3_SUB320 30 +nircam NRCB4_SUB320 30 +nircam NRCB5_SUB320 30 +nircam NRCA1_SUB640 30 +nircam NRCA2_SUB640 30 +nircam NRCA3_SUB640 30 +nircam NRCA4_SUB640 30 +nircam NRCA5_SUB640 30 +nircam NRCB1_SUB640 30 +nircam NRCB2_SUB640 30 +nircam NRCB3_SUB640 30 +nircam NRCB4_SUB640 30 +nircam NRCB5_SUB640 30 +nircam NRCA5_GRISM256_F322W2 30 +nircam NRCA5_GRISM128_F322W2 30 +nircam NRCA5_GRISM64_F322W2 30 +nircam NRCA5_GRISM256_F277W 30 +nircam NRCA5_GRISM128_F277W 30 +nircam NRCA5_GRISM64_F277W 30 +nircam NRCA5_GRISM256_F356W 30 +nircam NRCA5_GRISM128_F356W 30 +nircam NRCA5_GRISM64_F356W 30 +nircam NRCA5_GRISM256_F444W 30 +nircam NRCA5_GRISM128_F444W 30 +nircam NRCA5_GRISM64_F444W 30 +nircam NRCA5_GRISM_F322W2 30 +nircam NRCA5_GRISM_F277W 30 +nircam NRCA5_GRISM_F356W 30 +nircam NRCA5_GRISM_F444W 30 +nircam NRCA1_GRISMTS 30 +nircam NRCA1_GRISMTS256 30 +nircam NRCA1_GRISMTS128 30 +nircam NRCA1_GRISMTS64 30 +nircam NRCA3_GRISMTS 30 +nircam NRCA3_GRISMTS256 30 +nircam NRCA3_GRISMTS128 30 +nircam NRCA3_GRISMTS64 30 +nircam NRCA5_TAGRISMTS32 30 +nircam NRCA5_TAGRISMTS_SCI_F322W2 30 +nircam NRCA5_TAGRISMTS_SCI_F444W 30 +nircam NRCA3_DHSPIL 30 +nircam NRCA3_DHSPIL_SUB96 30 +nircam NRCA3_DHSPIL_WEDGES 30 +nircam NRCB4_DHSPIL 30 +nircam NRCB4_DHSPIL_SUB96 30 +nircam NRCB4_DHSPIL_WEDGES 30 +nircam NRCA3_FP1 30 +nircam NRCA3_FP1_SUB8 30 +nircam NRCA3_FP1_SUB64 30 +nircam NRCA3_FP2MIMF 30 +nircam NRCA1_FP3MIMF 30 +nircam NRCA2_FP4MIMF 30 +nircam NRCA4_FP5MIMF 30 +nircam NRCB4_FP1 30 +nircam NRCB4_FP1_SUB8 30 +nircam NRCB4_FP1_SUB64 30 +nircam NRCB4_FP2MIMF 30 +nircam NRCB2_FP3MIMF 30 +nircam NRCB1_FP4MIMF 30 +nircam NRCB3_FP5MIMF 30 +nircam NRCA3_SUB64P 30 +nircam NRCA3_SUB160P 30 +nircam NRCA3_SUB400P 30 +nircam NRCA5_SUB64P 30 +nircam NRCA5_SUB160P 30 +nircam NRCA5_SUB400P 30 +nircam NRCB1_SUB64P 30 +nircam NRCB1_SUB160P 30 +nircam NRCB1_SUB400P 30 +nircam NRCB5_SUB64P 30 +nircam NRCB5_SUB160P 30 +nircam NRCB5_SUB400P 30 +nircam NRCB5_TAPSIMG32 30 +nircam NRCA5_GRISMC_WFSS 30 +nircam NRCA5_GRISMR_WFSS 30 +nircam NRCALL_GRISMC_WFSS 30 +nircam NRCALL_GRISMR_WFSS 30 +nircam NRCB5_GRISMC_WFSS 30 +nircam NRCB5_GRISMR_WFSS 30 +nircam NRCA2_MASK210R 30 +nircam NRCA5_MASK335R 30 +nircam NRCA5_MASK430R 30 +nircam NRCA4_MASKSWB 30 +nircam NRCA5_MASKLWB 30 +nircam NRCA2_TAMASK210R 30 +nircam NRCA5_TAMASK335R 30 +nircam NRCA5_TAMASK430R 30 +nircam NRCA4_TAMASKSWB 30 +nircam NRCA5_TAMASKLWB 30 +nircam NRCA5_TAMASKLWBL 30 +nircam NRCA4_TAMASKSWBS 30 +nircam NRCB1_MASK210R 30 +nircam NRCB5_MASK335R 30 +nircam NRCB5_MASK430R 30 +nircam NRCB3_MASKSWB 30 +nircam NRCB5_MASKLWB 30 +nircam NRCB1_TAMASK210R 30 +nircam NRCB5_TAMASK335R 30 +nircam NRCB5_TAMASK430R 30 +nircam NRCB3_TAMASKSWB 30 +nircam NRCB5_TAMASKLWB 30 +nircam NRCB5_TAMASKLWBL 30 +nircam NRCB3_TAMASKSWBS 30 +nircam NRCA2_FSTAMASK210R 30 +nircam NRCA4_FSTAMASKSWB 30 +nircam NRCA5_FSTAMASKLWB 30 +nircam NRCA5_FSTAMASK335R 30 +nircam NRCA5_FSTAMASK430R 30 +nircam NRCA4_MASKSWB_F182M 30 +nircam NRCA4_MASKSWB_F187N 30 +nircam NRCA4_MASKSWB_F210M 30 +nircam NRCA4_MASKSWB_F212N 30 +nircam NRCA4_MASKSWB_F200W 30 +nircam NRCA4_MASKSWB_NARROW 30 +nircam NRCA5_MASKLWB_F250M 30 +nircam NRCA5_MASKLWB_F300M 30 +nircam NRCA5_MASKLWB_F277W 30 +nircam NRCA5_MASKLWB_F335M 30 +nircam NRCA5_MASKLWB_F360M 30 +nircam NRCA5_MASKLWB_F356W 30 +nircam NRCA5_MASKLWB_F410M 30 +nircam NRCA5_MASKLWB_F430M 30 +nircam NRCA5_MASKLWB_F460M 30 +nircam NRCA5_MASKLWB_F480M 30 +nircam NRCA5_MASKLWB_F444W 30 +nircam NRCA5_MASKLWB_NARROW 30 +nircam NRCA2_FULL_MASK210R 10 +nircam NRCA5_FULL_MASK335R 10 +nircam NRCA5_FULL_MASK430R 10 +nircam NRCA4_FULL_MASKSWB 10 +nircam NRCA4_FULL_MASKSWB_F182M 10 +nircam NRCA4_FULL_MASKSWB_F187N 10 +nircam NRCA4_FULL_MASKSWB_F210M 10 +nircam NRCA4_FULL_MASKSWB_F212N 10 +nircam NRCA4_FULL_MASKSWB_F200W 10 +nircam NRCA5_FULL_MASKLWB 10 +nircam NRCA5_FULL_MASKLWB_F250M 10 +nircam NRCA5_FULL_MASKLWB_F300M 10 +nircam NRCA5_FULL_MASKLWB_F277W 10 +nircam NRCA5_FULL_MASKLWB_F335M 10 +nircam NRCA5_FULL_MASKLWB_F360M 10 +nircam NRCA5_FULL_MASKLWB_F356W 10 +nircam NRCA5_FULL_MASKLWB_F410M 10 +nircam NRCA5_FULL_MASKLWB_F430M 10 +nircam NRCA5_FULL_MASKLWB_F460M 10 +nircam NRCA5_FULL_MASKLWB_F480M 10 +nircam NRCA5_FULL_MASKLWB_F444W 10 +niriss NIS_CEN_OSS 10 +niriss NIS_CEN 10 +niriss NIS_AMI1 30 +niriss NIS_AMI2 30 +niriss NIS_AMI3 30 +niriss NIS_AMI4 30 +niriss NIS_AMITA 30 +niriss NIS_SOSSTA 30 +niriss NIS_WFSS_OFFSET 30 +niriss NIS_WFSS64 30 +niriss NIS_WFSS64R 30 +niriss NIS_WFSS64R3 30 +niriss NIS_WFSS64C 30 +niriss NIS_WFSS64C3 30 +niriss NIS_WFSS128 30 +niriss NIS_WFSS128R 30 +niriss NIS_WFSS128R3 30 +niriss NIS_WFSS128C 30 +niriss NIS_WFSS128C3 30 +niriss NIS_SUB64 30 +niriss NIS_SUB128 30 +niriss NIS_SUB256 30 +niriss NIS_SUBAMPCAL 30 +niriss NIS_SUBSTRIP96 30 +niriss NIS_SUBSTRIP256 30 +niriss NIS_FP1MIMF 30 +niriss NIS_FP2MIMF 30 +niriss NIS_FP3MIMF 30 +niriss NIS_FP4MIMF 30 +niriss NIS_FP5MIMF 30 +niriss NIS_AMIFULL 10 +niriss NIS_SOSSFULL 10 +miri MIRIM_FULL_OSS 10 +miri MIRIM_FULL 10 +miri MIRIM_ILLUM 30 +miri MIRIM_BRIGHTSKY 30 +miri MIRIM_SUB256 30 +miri MIRIM_SUB128 30 +miri MIRIM_SUB64 30 +miri MIRIM_SLITLESSPRISM 30 +miri MIRIM_SLITLESSUPPER 30 +miri MIRIM_SLITLESSLOWER 30 +miri MIRIM_MASK1065 30 +miri MIRIM_MASK1140 30 +miri MIRIM_MASK1550 30 +miri MIRIM_MASKLYOT 30 +miri MIRIM_TAMRS 30 +miri MIRIM_TALRS 30 +miri MIRIM_TABLOCK 30 +miri MIRIM_TALYOT_UL 30 +miri MIRIM_TALYOT_UR 30 +miri MIRIM_TALYOT_LL 30 +miri MIRIM_TALYOT_LR 30 +miri MIRIM_TALYOT_CUL 30 +miri MIRIM_TALYOT_CUR 30 +miri MIRIM_TALYOT_CLL 30 +miri MIRIM_TALYOT_CLR 30 +miri MIRIM_TA1550_UL 30 +miri MIRIM_TA1550_UR 30 +miri MIRIM_TA1550_LL 30 +miri MIRIM_TA1550_LR 30 +miri MIRIM_TA1550_CUL 30 +miri MIRIM_TA1550_CUR 30 +miri MIRIM_TA1550_CLL 30 +miri MIRIM_TA1550_CLR 30 +miri MIRIM_TA1140_UL 30 +miri MIRIM_TA1140_UR 30 +miri MIRIM_TA1140_LL 30 +miri MIRIM_TA1140_LR 30 +miri MIRIM_TA1140_CUL 30 +miri MIRIM_TA1140_CUR 30 +miri MIRIM_TA1140_CLL 30 +miri MIRIM_TA1140_CLR 30 +miri MIRIM_TA1065_UL 30 +miri MIRIM_TA1065_UR 30 +miri MIRIM_TA1065_LL 30 +miri MIRIM_TA1065_LR 30 +miri MIRIM_TA1065_CUL 30 +miri MIRIM_TA1065_CUR 30 +miri MIRIM_TA1065_CLL 30 +miri MIRIM_TA1065_CLR 30 +miri MIRIM_TAFULL 10 +miri MIRIM_TAILLUM 30 +miri MIRIM_TABRIGHTSKY 30 +miri MIRIM_TASUB256 30 +miri MIRIM_TASUB128 30 +miri MIRIM_TASUB64 30 +miri MIRIM_TASLITLESSPRISM 30 +miri MIRIM_CORON1065 30 +miri MIRIM_CORON1140 30 +miri MIRIM_CORON1550 30 +miri MIRIM_CORONLYOT 30 +miri MIRIM_KNIFE 30 +miri MIRIM_FP1MIMF 30 +miri MIRIM_FP2MIMF 30 +miri MIRIM_FP3MIMF 30 +miri MIRIM_FP4MIMF 30 +miri MIRIM_FP5MIMF 30 +miri MIRIM_SLIT 30 +miri MIRIFU_CHANNEL1A 30 +miri MIRIFU_1ASLICE01 30 +miri MIRIFU_1ASLICE02 30 +miri MIRIFU_1ASLICE03 30 +miri MIRIFU_1ASLICE04 30 +miri MIRIFU_1ASLICE05 30 +miri MIRIFU_1ASLICE06 30 +miri MIRIFU_1ASLICE07 30 +miri MIRIFU_1ASLICE08 30 +miri MIRIFU_1ASLICE09 30 +miri MIRIFU_1ASLICE10 30 +miri MIRIFU_1ASLICE11 30 +miri MIRIFU_1ASLICE12 30 +miri MIRIFU_1ASLICE13 30 +miri MIRIFU_1ASLICE14 30 +miri MIRIFU_1ASLICE15 30 +miri MIRIFU_1ASLICE16 30 +miri MIRIFU_1ASLICE17 30 +miri MIRIFU_1ASLICE18 30 +miri MIRIFU_1ASLICE19 30 +miri MIRIFU_1ASLICE20 30 +miri MIRIFU_1ASLICE21 30 +miri MIRIFU_CHANNEL1B 30 +miri MIRIFU_1BSLICE01 30 +miri MIRIFU_1BSLICE02 30 +miri MIRIFU_1BSLICE03 30 +miri MIRIFU_1BSLICE04 30 +miri MIRIFU_1BSLICE05 30 +miri MIRIFU_1BSLICE06 30 +miri MIRIFU_1BSLICE07 30 +miri MIRIFU_1BSLICE08 30 +miri MIRIFU_1BSLICE09 30 +miri MIRIFU_1BSLICE10 30 +miri MIRIFU_1BSLICE11 30 +miri MIRIFU_1BSLICE12 30 +miri MIRIFU_1BSLICE13 30 +miri MIRIFU_1BSLICE14 30 +miri MIRIFU_1BSLICE15 30 +miri MIRIFU_1BSLICE16 30 +miri MIRIFU_1BSLICE17 30 +miri MIRIFU_1BSLICE18 30 +miri MIRIFU_1BSLICE19 30 +miri MIRIFU_1BSLICE20 30 +miri MIRIFU_1BSLICE21 30 +miri MIRIFU_CHANNEL1C 30 +miri MIRIFU_1CSLICE01 30 +miri MIRIFU_1CSLICE02 30 +miri MIRIFU_1CSLICE03 30 +miri MIRIFU_1CSLICE04 30 +miri MIRIFU_1CSLICE05 30 +miri MIRIFU_1CSLICE06 30 +miri MIRIFU_1CSLICE07 30 +miri MIRIFU_1CSLICE08 30 +miri MIRIFU_1CSLICE09 30 +miri MIRIFU_1CSLICE10 30 +miri MIRIFU_1CSLICE11 30 +miri MIRIFU_1CSLICE12 30 +miri MIRIFU_1CSLICE13 30 +miri MIRIFU_1CSLICE14 30 +miri MIRIFU_1CSLICE15 30 +miri MIRIFU_1CSLICE16 30 +miri MIRIFU_1CSLICE17 30 +miri MIRIFU_1CSLICE18 30 +miri MIRIFU_1CSLICE19 30 +miri MIRIFU_1CSLICE20 30 +miri MIRIFU_1CSLICE21 30 +miri MIRIFU_CHANNEL2A 30 +miri MIRIFU_2ASLICE01 30 +miri MIRIFU_2ASLICE02 30 +miri MIRIFU_2ASLICE03 30 +miri MIRIFU_2ASLICE04 30 +miri MIRIFU_2ASLICE05 30 +miri MIRIFU_2ASLICE06 30 +miri MIRIFU_2ASLICE07 30 +miri MIRIFU_2ASLICE08 30 +miri MIRIFU_2ASLICE09 30 +miri MIRIFU_2ASLICE10 30 +miri MIRIFU_2ASLICE11 30 +miri MIRIFU_2ASLICE12 30 +miri MIRIFU_2ASLICE13 30 +miri MIRIFU_2ASLICE14 30 +miri MIRIFU_2ASLICE15 30 +miri MIRIFU_2ASLICE16 30 +miri MIRIFU_2ASLICE17 30 +miri MIRIFU_CHANNEL2B 30 +miri MIRIFU_2BSLICE01 30 +miri MIRIFU_2BSLICE02 30 +miri MIRIFU_2BSLICE03 30 +miri MIRIFU_2BSLICE04 30 +miri MIRIFU_2BSLICE05 30 +miri MIRIFU_2BSLICE06 30 +miri MIRIFU_2BSLICE07 30 +miri MIRIFU_2BSLICE08 30 +miri MIRIFU_2BSLICE09 30 +miri MIRIFU_2BSLICE10 30 +miri MIRIFU_2BSLICE11 30 +miri MIRIFU_2BSLICE12 30 +miri MIRIFU_2BSLICE13 30 +miri MIRIFU_2BSLICE14 30 +miri MIRIFU_2BSLICE15 30 +miri MIRIFU_2BSLICE16 30 +miri MIRIFU_2BSLICE17 30 +miri MIRIFU_CHANNEL2C 30 +miri MIRIFU_2CSLICE01 30 +miri MIRIFU_2CSLICE02 30 +miri MIRIFU_2CSLICE03 30 +miri MIRIFU_2CSLICE04 30 +miri MIRIFU_2CSLICE05 30 +miri MIRIFU_2CSLICE06 30 +miri MIRIFU_2CSLICE07 30 +miri MIRIFU_2CSLICE08 30 +miri MIRIFU_2CSLICE09 30 +miri MIRIFU_2CSLICE10 30 +miri MIRIFU_2CSLICE11 30 +miri MIRIFU_2CSLICE12 30 +miri MIRIFU_2CSLICE13 30 +miri MIRIFU_2CSLICE14 30 +miri MIRIFU_2CSLICE15 30 +miri MIRIFU_2CSLICE16 30 +miri MIRIFU_2CSLICE17 30 +miri MIRIFU_CHANNEL3A 30 +miri MIRIFU_3ASLICE01 30 +miri MIRIFU_3ASLICE02 30 +miri MIRIFU_3ASLICE03 30 +miri MIRIFU_3ASLICE04 30 +miri MIRIFU_3ASLICE05 30 +miri MIRIFU_3ASLICE06 30 +miri MIRIFU_3ASLICE07 30 +miri MIRIFU_3ASLICE08 30 +miri MIRIFU_3ASLICE09 30 +miri MIRIFU_3ASLICE10 30 +miri MIRIFU_3ASLICE11 30 +miri MIRIFU_3ASLICE12 30 +miri MIRIFU_3ASLICE13 30 +miri MIRIFU_3ASLICE14 30 +miri MIRIFU_3ASLICE15 30 +miri MIRIFU_3ASLICE16 30 +miri MIRIFU_CHANNEL3B 30 +miri MIRIFU_3BSLICE01 30 +miri MIRIFU_3BSLICE02 30 +miri MIRIFU_3BSLICE03 30 +miri MIRIFU_3BSLICE04 30 +miri MIRIFU_3BSLICE05 30 +miri MIRIFU_3BSLICE06 30 +miri MIRIFU_3BSLICE07 30 +miri MIRIFU_3BSLICE08 30 +miri MIRIFU_3BSLICE09 30 +miri MIRIFU_3BSLICE10 30 +miri MIRIFU_3BSLICE11 30 +miri MIRIFU_3BSLICE12 30 +miri MIRIFU_3BSLICE13 30 +miri MIRIFU_3BSLICE14 30 +miri MIRIFU_3BSLICE15 30 +miri MIRIFU_3BSLICE16 30 +miri MIRIFU_CHANNEL3C 30 +miri MIRIFU_3CSLICE01 30 +miri MIRIFU_3CSLICE02 30 +miri MIRIFU_3CSLICE03 30 +miri MIRIFU_3CSLICE04 30 +miri MIRIFU_3CSLICE05 30 +miri MIRIFU_3CSLICE06 30 +miri MIRIFU_3CSLICE07 30 +miri MIRIFU_3CSLICE08 30 +miri MIRIFU_3CSLICE09 30 +miri MIRIFU_3CSLICE10 30 +miri MIRIFU_3CSLICE11 30 +miri MIRIFU_3CSLICE12 30 +miri MIRIFU_3CSLICE13 30 +miri MIRIFU_3CSLICE14 30 +miri MIRIFU_3CSLICE15 30 +miri MIRIFU_3CSLICE16 30 +miri MIRIFU_CHANNEL4A 30 +miri MIRIFU_4ASLICE01 30 +miri MIRIFU_4ASLICE02 30 +miri MIRIFU_4ASLICE03 30 +miri MIRIFU_4ASLICE04 30 +miri MIRIFU_4ASLICE05 30 +miri MIRIFU_4ASLICE06 30 +miri MIRIFU_4ASLICE07 30 +miri MIRIFU_4ASLICE08 30 +miri MIRIFU_4ASLICE09 30 +miri MIRIFU_4ASLICE10 30 +miri MIRIFU_4ASLICE11 30 +miri MIRIFU_4ASLICE12 30 +miri MIRIFU_CHANNEL4B 30 +miri MIRIFU_4BSLICE01 30 +miri MIRIFU_4BSLICE02 30 +miri MIRIFU_4BSLICE03 30 +miri MIRIFU_4BSLICE04 30 +miri MIRIFU_4BSLICE05 30 +miri MIRIFU_4BSLICE06 30 +miri MIRIFU_4BSLICE07 30 +miri MIRIFU_4BSLICE08 30 +miri MIRIFU_4BSLICE09 30 +miri MIRIFU_4BSLICE10 30 +miri MIRIFU_4BSLICE11 30 +miri MIRIFU_4BSLICE12 30 +miri MIRIFU_CHANNEL4C 30 +miri MIRIFU_4CSLICE01 30 +miri MIRIFU_4CSLICE02 30 +miri MIRIFU_4CSLICE03 30 +miri MIRIFU_4CSLICE04 30 +miri MIRIFU_4CSLICE05 30 +miri MIRIFU_4CSLICE06 30 +miri MIRIFU_4CSLICE07 30 +miri MIRIFU_4CSLICE08 30 +miri MIRIFU_4CSLICE09 30 +miri MIRIFU_4CSLICE10 30 +miri MIRIFU_4CSLICE11 30 +miri MIRIFU_4CSLICE12 30 +nirspec NRS1_FULL_OSS 10 +nirspec NRS1_FULL 10 +nirspec NRS2_FULL_OSS 10 +nirspec NRS2_FULL 10 +nirspec NRS_S200A1_SLIT 30 +nirspec NRS_S200A2_SLIT 30 +nirspec NRS_S400A1_SLIT 30 +nirspec NRS_S1600A1_SLIT 30 +nirspec NRS_S200B1_SLIT 30 +nirspec NRS_FULL_IFU 10 +nirspec NRS_IFU_SLICE00 30 +nirspec NRS_IFU_SLICE01 30 +nirspec NRS_IFU_SLICE02 30 +nirspec NRS_IFU_SLICE03 30 +nirspec NRS_IFU_SLICE04 30 +nirspec NRS_IFU_SLICE05 30 +nirspec NRS_IFU_SLICE06 30 +nirspec NRS_IFU_SLICE07 30 +nirspec NRS_IFU_SLICE08 30 +nirspec NRS_IFU_SLICE09 30 +nirspec NRS_IFU_SLICE10 30 +nirspec NRS_IFU_SLICE11 30 +nirspec NRS_IFU_SLICE12 30 +nirspec NRS_IFU_SLICE13 30 +nirspec NRS_IFU_SLICE14 30 +nirspec NRS_IFU_SLICE15 30 +nirspec NRS_IFU_SLICE16 30 +nirspec NRS_IFU_SLICE17 30 +nirspec NRS_IFU_SLICE18 30 +nirspec NRS_IFU_SLICE19 30 +nirspec NRS_IFU_SLICE20 30 +nirspec NRS_IFU_SLICE21 30 +nirspec NRS_IFU_SLICE22 30 +nirspec NRS_IFU_SLICE23 30 +nirspec NRS_IFU_SLICE24 30 +nirspec NRS_IFU_SLICE25 30 +nirspec NRS_IFU_SLICE26 30 +nirspec NRS_IFU_SLICE27 30 +nirspec NRS_IFU_SLICE28 30 +nirspec NRS_IFU_SLICE29 30 +nirspec NRS_FULL_MSA 10 +nirspec NRS_FULL_MSA1 10 +nirspec NRS_FULL_MSA2 10 +nirspec NRS_FULL_MSA3 10 +nirspec NRS_FULL_MSA4 10 +nirspec NRS_VIGNETTED_MSA 30 +nirspec NRS_VIGNETTED_MSA1 30 +nirspec NRS_VIGNETTED_MSA2 30 +nirspec NRS_VIGNETTED_MSA3 30 +nirspec NRS_VIGNETTED_MSA4 30 +nirspec NRS_FIELD1_MSA4 30 +nirspec NRS_FIELD2_MSA4 30 +nirspec NRS1_FP1MIMF 30 +nirspec NRS1_FP2MIMF 30 +nirspec NRS1_FP3MIMF 30 +nirspec NRS2_FP4MIMF 30 +nirspec NRS2_FP5MIMF 30 +nirspec CLEAR_GWA_OTE 30 +nirspec F110W_GWA_OTE 30 +nirspec F140X_GWA_OTE 30 +nirspec NRS_SKY_OTEIP 30 +nirspec NRS_CLEAR_OTEIP_MSA_L0 30 +nirspec NRS_CLEAR_OTEIP_MSA_L1 30 +nirspec NRS_F070LP_OTEIP_MSA_L0 30 +nirspec NRS_F070LP_OTEIP_MSA_L1 30 +nirspec NRS_F100LP_OTEIP_MSA_L0 30 +nirspec NRS_F100LP_OTEIP_MSA_L1 30 +nirspec NRS_F170LP_OTEIP_MSA_L0 30 +nirspec NRS_F170LP_OTEIP_MSA_L1 30 +nirspec NRS_F290LP_OTEIP_MSA_L0 30 +nirspec NRS_F290LP_OTEIP_MSA_L1 30 +nirspec NRS_F110W_OTEIP_MSA_L0 30 +nirspec NRS_F110W_OTEIP_MSA_L1 30 +nirspec NRS_F140X_OTEIP_MSA_L0 30 +nirspec NRS_F140X_OTEIP_MSA_L1 30 +fgs FGS1_FULL_OSS 10 +fgs FGS1_FULL 10 +fgs FGS2_FULL_OSS 10 +fgs FGS2_FULL 10 +fgs FGS1_SUB128LL 30 +fgs FGS1_SUB128DIAG 30 +fgs FGS1_SUB128CNTR 30 +fgs FGS1_SUB32LL 30 +fgs FGS1_SUB32DIAG 30 +fgs FGS1_SUB32CNTR 30 +fgs FGS1_SUB8LL 30 +fgs FGS1_SUB8DIAG 30 +fgs FGS1_SUB8CNTR 30 +fgs FGS2_SUB128LL 30 +fgs FGS2_SUB128DIAG 30 +fgs FGS2_SUB128CNTR 30 +fgs FGS2_SUB32LL 30 +fgs FGS2_SUB32DIAG 30 +fgs FGS2_SUB32CNTR 30 +fgs FGS2_SUB8LL 30 +fgs FGS2_SUB8DIAG 30 +fgs FGS2_SUB8CNTR 30 +fgs FGS1_FP1MIMF 30 +fgs FGS1_FP2MIMF 30 +fgs FGS1_FP3MIMF 30 +fgs FGS1_FP4MIMF 30 +fgs FGS1_FP5MIMF 30 +fgs FGS2_FP1MIMF 30 +fgs FGS2_FP2MIMF 30 +fgs FGS2_FP3MIMF 30 +fgs FGS2_FP4MIMF 30 +fgs FGS2_FP5MIMF 30 diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/15min_to_db.py b/jwql/instrument_monitors/miri_monitors/data_trending/15min_to_db.py new file mode 100755 index 000000000..ff9f7bc78 --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/15min_to_db.py @@ -0,0 +1,106 @@ +#! /usr/bin/env python +''' Auxiliary module to populate database + + This module was used throughout development to populate the database. Since + the EDB had no valid data during implementation we had to download data elsewhere. + The downloaded data is in .CSV format and can easily be read by the program. + After import and sorting the process_file function extracts the useful part and + pushes it to the auxiliary database. This function can be implemented in the + final cron job. + +Authors +------- + + - Daniel Kühbacher + +Use +--- + make sure "directory" points to a folder where useable 15min-samples are storedself. + make sure you already ran .utils/sql_interface.py in order to create a empty database + with prepared tables. + Run the module form the command line. + +Notes +----- + For developement only +''' + +import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as mn +import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.miri_monitors.data_trending.utils.csv_to_AstropyTable as apt +from jwql.instrument_monitors.miri_monitors.data_trending.utils.process_data import once_a_day_routine +from jwql.utils.utils import get_config, filename_parser + +import statistics +import os +import glob + +#set _location_ variable +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + +#point to the directory where your files are located! +directory = os.path.join(get_config()['outputs'], 'miri_data_trending', 'trainings_data_15min', '*.CSV') +paths = glob.glob(directory) + +def process_file(conn, path): + '''Parse CSV file, process data within and put to DB + + Parameters + ---------- + conn : DBobject + Connection object to auxiliary database + path : str + defines file to read + ''' + + #import mnemonic data and append dict to variable below + m_raw_data = apt.mnemonics(path) + + #process raw data with once a day routine + processed_data = once_a_day_routine(m_raw_data) + + #push extracted and filtered data to temporary database + for key, value in processed_data.items(): + + #abbreviate data table + m = m_raw_data.mnemonic(key) + + if key == "SE_ZIMIRICEA": + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, "SE_ZIMIRICEA_IDLE", dataset) + + elif key == "IMIR_HK_ICE_SEC_VOLT4": + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, "IMIR_HK_ICE_SEC_VOLT4_IDLE", dataset) + + else: + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, key, dataset) + +def main(): + #generate paths + DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') + DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'miri_database.db') + + #connect to temporary database + conn = sql.create_connection(DATABASE_FILE) + + #process every csv file in directory folder + for path in paths: + process_file(conn, path) + + #close connection + sql.close_connection(conn) + print("done") + +if __name__ == "__main__": + main() diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/__init__.py b/jwql/instrument_monitors/miri_monitors/data_trending/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/dashboard.py b/jwql/instrument_monitors/miri_monitors/data_trending/dashboard.py new file mode 100644 index 000000000..45274d43d --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/dashboard.py @@ -0,0 +1,98 @@ +#! /usr/bin/env python +"""Combines plots to tabs and prepares dashboard + +The module imports all prepares plot functions from .plots and combines +prebuilt tabs to a dashboard. Furthermore it defines the timerange for +the visualisation. Default time_range should be set to about 4 Month (120days) + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``data_container.py``, e.g.: + + :: + import jwql.instrument_monitors.miri_monitors.data_trending.dashboard as dash + dashboard, variables = dash.data_trending_dashboard(start_time, end_time) + +Dependencies +------------ + User must provide "miri_database.db" in folder jwql/database + +""" +import datetime +import os + +from bokeh.embed import components +from bokeh.models.widgets import Tabs + +#import plot functions +from .plots.power_tab import power_plots +from .plots.ice_voltage_tab import volt_plots +from .plots.fpe_voltage_tab import fpe_plots +from .plots.temperature_tab import temperature_plots +from .plots.bias_tab import bias_plots +from .plots.wheel_ratio_tab import wheel_ratios +import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql +from jwql.utils.utils import get_config + + +#configure actual datetime in order to implement range function +now = datetime.datetime.now() +#default_start = now - datetime.timedelta(1000) +default_start = datetime.date(2017, 8, 15).isoformat() + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) +PACKAGE_DIR = __location__.split('instrument_monitors')[0] + +def data_trending_dashboard(start = default_start, end = now): + """Builds dashboard + Parameters + ---------- + start : time + configures start time for query and visualisation + end : time + configures end time for query and visualisation + Return + ------ + plot_data : list + A list containing the JavaScript and HTML content for the dashboard + variables : dict + no use + """ + + #connect to database + # DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') + DATABASE_LOCATION = os.path.join(PACKAGE_DIR, 'database') + DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'miri_database.db') + + conn = sql.create_connection(DATABASE_FILE) + + #some variables can be passed to the template via following + variables = dict(init = 1) + + #some variables can be passed to the template via following + variables = dict(init = 1) + + #add tabs to dashboard + tab1 = power_plots(conn, start, end) + tab2 = volt_plots(conn, start, end) + tab3 = fpe_plots(conn, start, end) + tab4 = temperature_plots(conn, start, end) + tab5 = bias_plots(conn, start, end) + tab6 = wheel_ratios(conn, start, end) + + #build dashboard + tabs = Tabs( tabs=[ tab1, tab2, tab3, tab5, tab4, tab6 ] ) + + #return dashboard to web app + script, div = components(tabs) + plot_data = [div, script] + + #close sql connection + sql.close_connection(conn) + + return plot_data, variables diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/day_to_db.py b/jwql/instrument_monitors/miri_monitors/data_trending/day_to_db.py new file mode 100755 index 000000000..acc909c5b --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/day_to_db.py @@ -0,0 +1,138 @@ +#! /usr/bin/env python +''' Auxiliary module to populate database + + This module was used throughout development to populate the database. Since + the EDB had no valid data during implementation we had to download data elsewhere. + The downloaded data is in .CSV format and can easily be read by the program. + After import and sorting the process_file function extracts the useful part and + pushes it to the auxiliary database. This function can be implemented in the + final cron job. + +Authors +------- + + - Daniel Kühbacher + +Use +--- + make sure "directory" points to a folder where useable day-samples are stored. + make sure you already ran .utils/sql_interface.py in order to create a empty database + with prepared tables. + Run the module form the command line. + +Notes +----- + For developement only +''' + +import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as mn +import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.miri_monitors.data_trending.utils.csv_to_AstropyTable as apt +from jwql.instrument_monitors.miri_monitors.data_trending.utils.process_data import whole_day_routine, wheelpos_routine +from jwql.utils.utils import get_config, filename_parser + +import os +import glob +import statistics +import sqlite3 + +#set _location_ variable +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + +#files with data to initially fill the database +directory = os.path.join(get_config()['outputs'], 'miri_data_trending', 'trainings_data_day', '*.CSV') +paths = glob.glob(directory) + + +def process_file(conn, path): + '''Parse CSV file, process data within and put to DB + + Parameters + ---------- + conn : DBobject + Connection object to temporary database + path : str + defines file to read + ''' + + m_raw_data = apt.mnemonics(path) + + cond3, FW_volt, GW14_volt, GW23_volt, CCC_volt = whole_day_routine(m_raw_data) + FW, GW14, GW23, CCC= wheelpos_routine(m_raw_data) + + #put data from con3 to database + for key, value in cond3.items(): + + m = m_raw_data.mnemonic(key) + + if value != None: + if len(value) > 2: + if key == "SE_ZIMIRICEA": + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, "SE_ZIMIRICEA_HV_ON", dataset) + + elif key == "IMIR_HK_ICE_SEC_VOLT4": + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, "IMIR_HK_ICE_SEC_VOLT4_HV_ON", dataset) + + else: + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, key, dataset) + + + ######################################################################################### + for pos in mn.fw_positions: + try: + data = FW[pos] + for element in data: + sql.add_wheel_data(conn, 'IMIR_HK_FW_POS_RATIO_{}'.format(pos), element) + except KeyError: + pass + + for pos in mn.gw_positions: + try: + data_GW14 = GW14[pos] + data_GW23 = GW23[pos] + + for element in data_GW14: + sql.add_wheel_data(conn, 'IMIR_HK_GW14_POS_RATIO_{}'.format(pos), element) + for element in data_GW23: + sql.add_wheel_data(conn, 'IMIR_HK_GW23_POS_RATIO_{}'.format(pos), element) + except KeyError: + pass + + for pos in mn.ccc_positions: + try: + data = CCC[pos] + for element in data: + sql.add_wheel_data(conn, 'IMIR_HK_CCC_POS_RATIO_{}'.format(pos), element) + except KeyError: + pass + + +def main(): + #point to database + DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') + DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'miri_database.db') + + #connect to temporary database + conn = sql.create_connection(DATABASE_FILE) + + #process all files found ind folder "directory" + for path in paths: + process_file(conn, path) + + sql.close_connection(conn) + print("done") + +if __name__ == "__main__": + main() diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py b/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py new file mode 100755 index 000000000..c2532edf1 --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py @@ -0,0 +1,196 @@ +#! /usr/bin/env python +''' Cron Job for miri datatrending -> populates database + + This module holds functions to connect with the engineering database in order + to grab and process data for the specific miri database. The scrips queries + a daily 15 min chunk and a whole day dataset. These contain several mnemonics + defined in ''mnemonics.py''. The queried data gets processed and stored in + an auxiliary database. + +Authors +------- + - Daniel Kühbacher + +Dependencies +------------ + For further information please contact Brian O'Sullivan + +References +---------- + +''' +import .utils.mnemonics as mn +import .utils.sql_interface as sql +from .utils.process_data import whole_day_routine, wheelpos_routine +from jwql.utils.engineering_database import query_single_mnemonic + +import pandas as pd +import numpy as np +import statistics +import sqlite3 +import os + +from astropy.time import Time + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) +PACKAGE_DIR = __location__.split('instrument_monitors')[0] + + +def process_day_sample(conn, m_raw_data): + '''Parse CSV file, process data within and put to DB + Parameters + ---------- + conn : DBobject + Connection object to temporary database + path : str + defines path to the files + ''' + + m_raw_data = apt.mnemonics(path) + + cond3, FW_volt, GW14_volt, GW23_volt, CCC_volt = whole_day_routine(m_raw_data) + FW, GW14, GW23, CCC= wheelpos_routine(m_raw_data) + + #put data from con3 to database + for key, value in cond3.items(): + + m = m_raw_data.mnemonic(key) + + if value != None: + if len(value) > 2: + if key == "SE_ZIMIRICEA": + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, "SE_ZIMIRICEA_HV_ON", dataset) + + elif key == "IMIR_HK_ICE_SEC_VOLT4": + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, "IMIR_HK_ICE_SEC_VOLT4_HV_ON", dataset) + + else: + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, key, dataset) + + + ######################################################################################### + for pos in mn.fw_positions: + try: + data = FW[pos] + for element in data: + sql.add_wheel_data(conn, 'IMIR_HK_FW_POS_RATIO_{}'.format(pos), element) + except KeyError: + pass + + for pos in mn.gw_positions: + try: + data_GW14 = GW14[pos] + data_GW23 = GW23[pos] + + for element in data_GW14: + sql.add_wheel_data(conn, 'IMIR_HK_GW14_POS_RATIO_{}'.format(pos), element) + for element in data_GW23: + sql.add_wheel_data(conn, 'IMIR_HK_GW23_POS_RATIO_{}'.format(pos), element) + except KeyError: + pass + + for pos in mn.ccc_positions: + try: + data = CCC[pos] + for element in data: + sql.add_wheel_data(conn, 'IMIR_HK_CCC_POS_RATIO_{}'.format(pos), element) + except KeyError: + pass + + +def process_15min_sample(conn, m_raw_data): + '''Parse CSV file, process data within and put to DB + Parameters + ---------- + conn : DBobject + Connection object to temporary database + path : str + defines path to the files + ''' + + #import mnemonic data and append dict to variable below + m_raw_data = apt.mnemonics(path) + + #process raw data with once a day routine + processed_data = once_a_day_routine(m_raw_data) + + #push extracted and filtered data to temporary database + for key, value in processed_data.items(): + + #abbreviate data table + m = m_raw_data.mnemonic(key) + + if key == "SE_ZIMIRICEA": + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, "SE_ZIMIRICEA_IDLE", dataset) + + elif key == "IMIR_HK_ICE_SEC_VOLT4": + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, "IMIR_HK_ICE_SEC_VOLT4_IDLE", dataset) + + else: + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, key, dataset) + +def main(): + + from ..utils.engineering_database import query_single_mnemonic + + mnemonic_identifier = 'SA_ZFGOUTFOV' + start_time = Time(2016.0, format='decimalyear') + end_time = Time(2018.1, format='decimalyear') + + + mnemonic = query_single_mnemonic(mnemonic_identifier, start_time, end_time) + assert len(mnemonic.data) == mnemonic.meta['paging']['rows'] + + + + for mnemonic in mn.mnemonic_set_15min: + whole_day.update(mnemonic = query_single_mnemonic(mnemonic, start, end)) + + + #configure start and end time for query + # + # + # + # + + #query table start and end from engineering_database + # + # + # + # + #return table_day, table_15min + + #open temporary database and write data! + DATABASE_LOCATION = os.path.join(PACKAGE_DIR, 'database') + DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'miri_database.db') + + conn = sql.create_connection(DATABASE_FILE) + + process_day_sample(conn, table_day) + process_15process_15min_sample(conn, table_15min) + + sql.close_connection(conn) diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/__init__.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/bias_tab.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/bias_tab.py new file mode 100644 index 000000000..7cf0865b3 --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/plots/bias_tab.py @@ -0,0 +1,360 @@ +#! /usr/bin/env python +"""Prepares plots for BIAS tab + + Module prepares plots for mnemonics below. Combines plots in a grid and + returns tab object. + + Plot 1: + IGDP_MIR_IC_V_VDETCOM + IGDP_MIR_SW_V_VDETCOM + IGDP_MIR_LW_V_VDETCOM + + Plot 2: + IGDP_MIR_IC_V_VSSOUT + IGDP_MIR_SW_V_VSSOUT + IGDP_MIR_LW_V_VSSOUT + + Plot 3: + IGDP_MIR_IC_V_VRSTOFF + IGDP_MIR_SW_V_VRSTOFF + IGDP_MIR_LW_V_VRSTOFF + + Plot 4: + IGDP_MIR_IC_V_VP + IGDP_MIR_SW_V_VP + IGDP_MIR_LW_V_VP + + Plot 5 + IGDP_MIR_IC_V_VDDUC + IGDP_MIR_SW_V_VDDUC + IGDP_MIR_LW_V_VDDUC + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``dashborad.py``, e.g.: + + :: + from .plots.bias_tab import bias_plots + tab = bias_plots(conn, start, end) + +Dependencies +------------ + User must provide database "miri_database.db" + +""" + +import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.miri_monitors.data_trending.plots.plot_functions as pf +from bokeh.plotting import figure +from bokeh.models.widgets import Panel, Tabs, Div +from bokeh.models import ColumnDataSource, HoverTool +from bokeh.layouts import gridplot, Column + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def vdetcom(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 560, \ + plot_height = 500, \ + x_axis_type = 'datetime', \ + output_backend="webgl", \ + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "VDETCOM" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "VDETCOM IC", "IGDP_MIR_IC_V_VDETCOM", start, end, conn, color = "red") + b = pf.add_to_plot(p, "VDETCOM SW", "IGDP_MIR_SW_V_VDETCOM", start, end, conn, color = "orange") + c = pf.add_to_plot(p, "VDETCOM LW", "IGDP_MIR_LW_V_VDETCOM", start, end, conn, color = "green") + + pf.add_hover_tool(p,[a,b,c]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def vssout(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 560, \ + plot_height = 500, \ + x_axis_type = 'datetime', \ + output_backend="webgl", \ + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "VSSOUT" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "VSSOUT IC", "IGDP_MIR_IC_V_VSSOUT", start, end, conn, color = "red") + b = pf.add_to_plot(p, "VSSOUT SW", "IGDP_MIR_SW_V_VSSOUT", start, end, conn, color = "orange") + c = pf.add_to_plot(p, "VSSOUT LW", "IGDP_MIR_LW_V_VSSOUT", start, end, conn, color = "green") + + pf.add_hover_tool(p,[a,b,c]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def vrstoff(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 560, \ + plot_height = 500, \ + x_axis_type = 'datetime', \ + output_backend="webgl", \ + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "VRSTOFF" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "VRSTOFF IC", "IGDP_MIR_IC_V_VRSTOFF", start, end, conn, color = "red") + b = pf.add_to_plot(p, "VRSTOFF SW", "IGDP_MIR_SW_V_VRSTOFF", start, end, conn, color = "orange") + c = pf.add_to_plot(p, "VRSTOFF LW", "IGDP_MIR_LW_V_VRSTOFF", start, end, conn, color = "green") + + pf.add_hover_tool(p,[a,b,c]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def vp(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 560, \ + plot_height = 500, \ + x_axis_type = 'datetime', \ + output_backend="webgl", \ + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "VP" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "VP IC", "IGDP_MIR_IC_V_VP", start, end, conn, color = "red") + b = pf.add_to_plot(p, "VP SW", "IGDP_MIR_SW_V_VP", start, end, conn, color = "orange") + c = pf.add_to_plot(p, "VP LW", "IGDP_MIR_LW_V_VP", start, end, conn, color = "green") + + pf.add_hover_tool(p,[a,b,c]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def vdduc(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 560, \ + plot_height = 500, \ + x_axis_type = 'datetime', \ + output_backend="webgl", \ + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "VDDUC" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "VDDUC IC", "IGDP_MIR_IC_V_VDDUC", start, end, conn, color = "red") + b = pf.add_to_plot(p, "VDDUC SW", "IGDP_MIR_SW_V_VDDUC", start, end, conn, color = "orange") + c = pf.add_to_plot(p, "VDDUC LW", "IGDP_MIR_LW_V_VDDUC", start, end, conn, color = "green") + + pf.add_hover_tool(p,[a,b,c]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def bias_plots(conn, start, end): + '''Combines plots to a tab + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : tab object + used by dashboard.py to set up dashboard + ''' + + descr = Div(text= + """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Plotname
Mnemonic
Description
VSSOUTIGDP_MIR_IC_V_VSSOUT
+ IGDP_MIR_SW_V_VSSOUT
+ IGDP_MIR_LW_V_VSSOUT
Detector Bias VSSOUT (IC,SW, & LW)
VDETCOMIGDP_MIR_IC_V_VDETCOM
+ IGDP_MIR_SW_V_VDETCOM
+ IGDP_MIR_LW_V_VDETCOM
Detector Bias VDETCOM (IC,SW, & LW)
VRSTOFFIGDP_MIR_IC_V_VRSTOFF
+ IGDP_MIR_SW_V_VRSTOFF
+ IGDP_MIR_LW_V_VRSTOFF
Detector Bias VRSTOFF (IC,SW, & LW)
VPIGDP_MIR_IC_V_VP
+ IGDP_MIR_SW_V_VP
+ IGDP_MIR_LW_V_VP
Detector Bias VP (IC,SW, & LW)
VDDUCIGDP_MIR_IC_V_VDDUC
+ IGDP_MIR_SW_V_VDDUC
+ IGDP_MIR_LW_V_VDDUC
Detector Bias VDDUC (IC,SW, & LW)
+ + """, width=1100) + + plot1 = vdetcom(conn, start, end) + plot2 = vssout(conn, start, end) + plot3 = vrstoff(conn, start, end) + plot4 = vp(conn, start, end) + plot5 = vdduc(conn, start, end) + + l = gridplot([ [plot2, plot1], \ + [plot3, plot4], \ + [plot5, None]], merge_tools=False) + + layout = Column(descr, l) + + tab = Panel(child = layout, title = "BIAS") + + return tab diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/fpe_voltage_tab.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/fpe_voltage_tab.py new file mode 100644 index 000000000..cc0961051 --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/plots/fpe_voltage_tab.py @@ -0,0 +1,405 @@ +#! /usr/bin/env python +"""Prepares plots for FPE VOLTAGE tab + + Module prepares plots for mnemonics below. Combines plots in a grid and + returns tab object. + + Plot 1: + IMIR_PDU_V_DIG_5V + IMIR_PDU_I_DIG_5V + + Plot 2: + IMIR_PDU_V_ANA_5V + IMIR_PDU_I_ANA_5V + + Plot 3: + IMIR_PDU_V_ANA_N5V + IMIR_PDU_I_ANA_N5V + + Plot 4: + IMIR_PDU_V_ANA_7V + IMIR_PDU_I_ANA_7V + + Plot 5: + IMIR_PDU_V_ANA_N7V + IMIR_PDU_I_ANA_N7V + + Plot 6: + IMIR_SPW_V_DIG_2R5V + IMIR_PDU_V_REF_2R5V + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``dashborad.py``, e.g.: + + :: + from .plots.fpe_voltage_tab import fpe_plots + tab = fpe_plots(conn, start, end) + +Dependencies +------------ + User must provide database "miri_database.db" + +""" +import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.miri_monitors.data_trending.plots.plot_functions as pf +from bokeh.models import LinearAxis, Range1d +from bokeh.plotting import figure +from bokeh.models.widgets import Panel, Tabs, Div +from bokeh.models import ColumnDataSource, HoverTool +from bokeh.layouts import WidgetBox, gridplot, Column + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def dig5(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 500, + y_range = [4.9,5.1], + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "FPE Dig. 5V" + pf.add_basic_layout(p) + + p.extra_y_ranges = {"current": Range1d(start=2100, end=2500)} + a = pf.add_to_plot(p, "FPE Dig. 5V", "IMIR_PDU_V_DIG_5V", start, end, conn, color = "red") + b = pf.add_to_plot(p, "FPE Dig. 5V Current", "IMIR_PDU_I_DIG_5V", start, end, conn, y_axis = "current", color = "blue") + p.add_layout(LinearAxis(y_range_name = "current", axis_label = "Current (mA)", axis_label_text_color = "blue"), 'right') + + pf.add_hover_tool(p,[a,b]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def refdig(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 560, \ + plot_height = 500, \ + y_range = [2.45,2.55], \ + x_axis_type = 'datetime', \ + output_backend="webgl", \ + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "2.5V Ref and FPE Dig." + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "FPE Dig. 2.5V", "IMIR_SPW_V_DIG_2R5V", start, end, conn, color = "orange") + b = pf.add_to_plot(p, "FPE PDU 2.5V REF", "IMIR_PDU_V_REF_2R5V", start, end, conn, color = "red") + + pf.add_hover_tool(p,[a,b]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def ana5(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 560, \ + plot_height = 500, \ + y_range = [4.95,5.05], \ + x_axis_type = 'datetime', \ + output_backend="webgl", \ + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "FPE Ana. 5V" + pf.add_basic_layout(p) + + p.extra_y_ranges = {"current": Range1d(start=100, end=250)} + a = pf.add_to_plot(p, "FPE Ana. 5V", "IMIR_PDU_V_ANA_5V",start, end, conn, color = "red") + b = pf.add_to_plot(p, "FPE Ana. 5V Current", "IMIR_PDU_I_ANA_5V",start, end, conn, y_axis = "current", color = "blue") + p.add_layout(LinearAxis(y_range_name = "current", axis_label = "Current (mA)", axis_label_text_color = "blue"), 'right') + + pf.add_hover_tool(p,[a,b]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def ana5n(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 560, \ + plot_height = 500, \ + y_range = [-5.1,-4.85], \ + x_axis_type = 'datetime', \ + output_backend="webgl", \ + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "FPE Ana. N5V" + pf.add_basic_layout(p) + + p.extra_y_ranges = {"current": Range1d(start=100, end=300)} + a = pf.add_to_plot(p, "FPE Ana. N5", "IMIR_PDU_V_ANA_N5V",start, end, conn, color = "red") + b = pf.add_to_plot(p, "FPE Ana. N5 Current", "IMIR_PDU_I_ANA_N5V",start, end, conn, y_axis = "current", color = "blue") + p.add_layout(LinearAxis(y_range_name = "current", axis_label = "Current (mA)", axis_label_text_color = "blue"), 'right') + + pf.add_hover_tool(p,[a,b]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def ana7(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 560, \ + plot_height = 500, \ + y_range = [6.85, 7.1], \ + x_axis_type = 'datetime', \ + output_backend="webgl", \ + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "FPE Ana. 7V" + pf.add_basic_layout(p) + + p.extra_y_ranges = {"current": Range1d(start=300, end=450)} + a = pf.add_to_plot(p, "FPE Ana. 7V", "IMIR_PDU_V_ANA_7V",start, end, conn, color = "red") + b = pf.add_to_plot(p, "FPE Ana. 7V Current", "IMIR_PDU_I_ANA_7V",start, end, conn, y_axis = "current", color = "blue") + p.add_layout(LinearAxis(y_range_name = "current", axis_label = "Current (mA)", axis_label_text_color = "blue"), 'right') + + pf.add_hover_tool(p,[a,b]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def ana7n(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 560, \ + plot_height = 500, \ + y_range = [-7.1, -6.9], \ + x_axis_type = 'datetime', \ + output_backend="webgl", \ + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "FPE Ana. N7V" + pf.add_basic_layout(p) + + p.extra_y_ranges = {"current": Range1d(start=350, end=400)} + a = pf.add_to_plot(p, "FPE Dig. N7V", "IMIR_PDU_V_ANA_N7V",start, end, conn, color = "red") + b = pf.add_to_plot(p, "FPE Ana. N7V Current", "IMIR_PDU_I_ANA_N7V",start, end, conn, y_axis = "current", color = "blue") + p.add_layout(LinearAxis(y_range_name = "current", axis_label = "Current (mA)", axis_label_text_color = "blue"), 'right') + + pf.add_hover_tool(p,[a,b]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + + +def fpe_plots(conn, start, end): + '''Combines plots to a tab + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : tab object + used by dashboard.py to set up dashboard + ''' + descr = Div(text= + """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Plotname
Mnemonic
Description
2.5V Ref and FPE DiggIMIR_SPW_V_DIG_2R5V
+ IMIR_PDU_V_REF_2R5V
FPE 2.5V Digital and FPE 2.5V PDU Reference Voltage
FPE Dig. 5VIMIR_PDU_V_DIG_5V
+ IMIR_PDU_I_DIG_5V
FPE 5V Digital Voltage and Current
FPE Ana. 5VIMIR_PDU_V_ANA_5V
+ IMIR_PDU_I_ANA_5V
FPE +5V Analog Voltage and Current
FPE Ana. N5VIMIR_PDU_V_ANA_N5V
+ IMIR_PDU_I_ANA_N5V
FPE -5V Analog Voltage and Current
FPE Ana. 7VIMIR_PDU_V_ANA_7V
+ IMIR_PDU_I_ANA_7V
FPE +7V Analog Voltage and Current
FPE Ana. N7VIMIR_PDU_V_ANA_N7V
+ IMIR_PDU_I_ANA_N7V
FPE -7V Analog Voltage and Current
+ + """, width=1100) + + plot1 = dig5(conn, start, end) + plot2 = refdig(conn, start, end) + plot3 = ana5(conn, start, end) + plot4 = ana5n(conn, start, end) + plot5 = ana7(conn, start, end) + plot6 = ana7n(conn, start, end) + + l = gridplot([ [plot2, plot1], \ + [plot3, plot4], \ + [plot5, plot6]], merge_tools=False) + + layout = Column(descr, l) + + tab = Panel(child = layout, title = "FPE VOLTAGE/CURRENT") + + return tab diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/ice_voltage_tab.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/ice_voltage_tab.py new file mode 100644 index 000000000..ccd92776f --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/plots/ice_voltage_tab.py @@ -0,0 +1,294 @@ +#! /usr/bin/env python +"""Prepares plots for ICE VOLTAGE tab + + Module prepares plots for mnemonics below, combines plots in a grid and + returns tab object. + + Plot 1: + IMIR_HK_ICE_SEC_VOLT1 + IMIR_HK_ICE_SEC_VOLT3 + + Plot 2: + IMIR_HK_ICE_SEC_VOLT2 + + Plot 3: + IMIR_HK_ICE_SEC_VOLT4 : IDLE and HV_ON + + Plot 4: + IMIR_HK_FW_POS_VOLT + IMIR_HK_GW14_POS_VOLT + IMIR_HK_GW23_POS_VOLT + IMIR_HK_CCC_POS_VOLT + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``dashborad.py``, e.g.: + + :: + from .plots.ice_voltage_tab import ice_plots + tab = ice_plots(conn, start, end) + +Dependencies +------------ + User must provide database "miri_database.db" + +""" + +import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.miri_monitors.data_trending.plots.plot_functions as pf +from bokeh.models import LinearAxis, Range1d +from bokeh.plotting import figure +from bokeh.models.widgets import Panel, Tabs, Div +from bokeh.models import ColumnDataSource +from bokeh.layouts import gridplot, Column + +import pandas as pd +import numpy as np + +from astropy.time import Time + +def volt4(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 500, + y_range = [4.2,5], + x_axis_type = 'datetime', + output_backend="webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "ICE_SEC_VOLT4" + pf.add_basic_layout(p) + + # add a line renderer with legend and line thickness + + a = pf.add_to_plot(p, "Volt4 Idle", "IMIR_HK_ICE_SEC_VOLT4_IDLE", start, end, conn, color = "orange") + b = pf.add_to_plot(p, "Volt4 Hv on", "IMIR_HK_ICE_SEC_VOLT4_HV_ON", start, end, conn, color = "red") + + pf.add_hover_tool(p, [a,b]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def volt1_3(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 500, + y_range = [30,50], + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "ICE_SEC_VOLT1/3" + pf.add_basic_layout(p) + + # add a line renderer with legend and line thickness + a = pf.add_to_plot(p, "Volt1", "IMIR_HK_ICE_SEC_VOLT1", start, end, conn, color = "red") + b = pf.add_to_plot(p, "Volt3", "IMIR_HK_ICE_SEC_VOLT3", start, end, conn, color = "purple") + + pf.add_hover_tool(p, [a,b]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def volt2(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 500, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "ICE_SEC_VOLT2" + pf.add_basic_layout(p) + + # add a line renderer with legend and line thickness + a = pf.add_to_plot(p, "Volt2", "IMIR_HK_ICE_SEC_VOLT2", start, end, conn, color = "red") + + pf.add_hover_tool(p,[a]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def pos_volt(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 500, + y_range = [280,300], + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (mV)') + + p.grid.visible = True + p.title.text = "Wheel Sensor Supply" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "FW", "IMIR_HK_FW_POS_VOLT", start, end, conn, color = "red") + b = pf.add_to_plot(p, "GW14", "IMIR_HK_GW14_POS_VOLT", start, end, conn, color = "purple") + c = pf.add_to_plot(p, "GW23", "IMIR_HK_GW23_POS_VOLT", start, end, conn, color = "orange") + d = pf.add_to_plot(p, "CCC", "IMIR_HK_CCC_POS_VOLT", start, end, conn, color = "firebrick") + + pf.add_hover_tool(p, [a,b,c,d]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def volt_plots(conn, start, end): + '''Combines plots to a tab + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : tab object + used by dashboard.py to set up dashboard + ''' + + descr = Div(text= + """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Plotname
Mnemonic
Description
ICE_SEC_VOLT1/3IMIR_HK_ICE_SEC_VOLT1
+ IMIR_HK_ICE_SEC_VOLT3
ICE Secondary Voltage (HV) V1 and V3
ICE_SEC_VOLT2IMIR_HK_SEC_VOLT2ICE secondary voltage (HV) V2
ICE_SEC_VOLT4IMIR_HK_SEC_VOLT2ICE secondary voltage (HV) V4 - HV on and IDLE
Wheel Sensor SupplyIMIR_HK_FW_POS_VOLT
+ IMIR_HK_GW14_POS_VOLT
+ IMIR_HK_GW23_POS_VOLT
+ IMIR_HK_CCC_POS_VOLT
Wheel Sensor supply voltages
+ + """, width = 1100) + + plot1 = volt1_3(conn, start, end) + plot2 = volt2(conn, start, end) + plot3 = volt4(conn, start, end) + plot4 = pos_volt(conn, start, end) + + l = gridplot([[plot1, plot2], [plot3, plot4]], merge_tools = False) + layout = Column(descr, l) + + tab = Panel(child = layout, title = "ICE VOLTAGE") + + return tab diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/plot_functions.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/plot_functions.py new file mode 100644 index 000000000..c07e7ab6d --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/plots/plot_functions.py @@ -0,0 +1,209 @@ +#! /usr/bin/env python +"""Auxilary functions for plots + + Module holds functions that are used for several plots. + + +Authors +------- + - Daniel Kühbacher + +Use +--- + + +Dependencies +------------ + +""" +import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql +from bokeh.plotting import figure +from bokeh.models import BoxAnnotation, LinearAxis, Range1d +from bokeh.embed import components +from bokeh.models.widgets import Panel, Tabs +from bokeh.models import ColumnDataSource, HoverTool, DatetimeTickFormatter, DatetimeTicker, SingleIntervalTicker +from bokeh.models.formatters import TickFormatter +from bokeh.models.tools import PanTool, SaveTool + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def pol_regression(x, y, rank): + ''' Calculate polynominal regression of certain rank + Parameters + ---------- + x : list + x parameters for regression + y : list + y parameters for regression + rank : int + rank of regression + Return + ------ + y_poly : list + regression y parameters + ''' + z = np.polyfit(x, y, rank) + f = np.poly1d(z) + y_poly = f(x) + return y_poly + +def add_hover_tool(p, rend): + ''' Append hover tool to plot + parameters + ---------- + p : bokeh figure + declares where to append hover tool + rend : list + list of renderer to append hover tool + ''' + + from bokeh.models import HoverTool + + #activate HoverTool for scatter plot + hover_tool = HoverTool( tooltips = + [ + ('Name', '$name'), + ('Count', '@data_points'), + ('Mean', '@average'), + ('Deviation', '@deviation'), + ], renderers = rend) + #append hover tool + p.tools.append(hover_tool) + +def add_limit_box(p, lower, upper, alpha = 0.1, color="green"): + ''' Adds box to plot + Parameters + ---------- + p : bokeh figure + declares where to append hover tool + lower : float + lower limit of box + upper : float + upper limit of box + alpha : float + transperency of box + color : str + filling color + ''' + box = BoxAnnotation(bottom = lower, top = upper, fill_alpha = alpha, fill_color = color) + p.add_layout(box) + +def add_to_plot(p, legend, mnemonic, start, end, conn, y_axis= "default", color="red", err='n'): + '''Add scatter and line to certain plot and activates hoover tool + Parameters + ---------- + p : bokeh object + defines plot where line and scatter should be added + legend : str + will be showed in legend of plot + mnemonic : str + defines mnemonic to be plotted + start : datetime + sets start time for data query + end : datetime + sets end time for data query + conn : DBobject + connection object to database + y_axis : str (default='default') + used if secon y axis is provided + color : str (default='dred') + defines color for scatter and line plot + Return + ------ + scat : plot scatter object + used for applying hovertools o plots + ''' + + #convert given start and end time to astropy time + start_str = str(Time(start).mjd) + end_str = str(Time(end).mjd) + + #prepare and execute sql query + sql_c = "SELECT * FROM "+mnemonic+" WHERE start_time BETWEEN "+start_str+" AND "+end_str+" ORDER BY start_time" + temp = pd.read_sql_query(sql_c, conn) + + #put data into Dataframe and define ColumnDataSource for each plot + reg = pd.DataFrame({'reg' : pol_regression(temp['start_time'], temp['average'],3)}) + temp = pd.concat([temp, reg], axis = 1) + temp['start_time'] = pd.to_datetime( Time(temp['start_time'], format = "mjd").datetime ) + plot_data = ColumnDataSource(temp) + + #plot data + p.line(x = "start_time", y = "average", color = color, y_range_name = y_axis, legend = legend, source = plot_data) + scat = p.scatter(x = "start_time", y = "average", color = color, name = mnemonic, y_range_name = y_axis, legend = legend, source = plot_data) + + #generate error lines if wished + if err != 'n': + #generate error bars + err_xs = [] + err_ys = [] + + for index, item in temp.iterrows(): + err_xs.append((item['start_time'], item['start_time'])) + err_ys.append((item['average'] - item['deviation'], item['average'] + item['deviation'])) + + # plot them + p.multi_line(err_xs, err_ys, color = color, legend = legend) + + return scat + +def add_to_wplot(p, legend, mnemonic, start, end, conn, nominal, color = "red"): + '''Add line plot to figure (for wheelpositions) + Parameters + ---------- + p : bokeh object + defines figure where line schould be plotted + legend : str + will be showed in legend of plot + mnemonic : str + defines mnemonic to be plotted + start : datetime + sets start time for data query + end : datetime + sets end time for data query + conn : DBobject + connection object to database + color : str (default='dred') + defines color for scatter and line plot + ''' + + start_str = str(Time(start).mjd) + end_str = str(Time(end).mjd) + + sql_c = "SELECT * FROM "+mnemonic+" WHERE timestamp BETWEEN "+start_str+" AND "+end_str+" ORDER BY timestamp" + temp = pd.read_sql_query(sql_c, conn) + + #normalize values + temp['value'] -= nominal + #temp['value'] -= 1 + + temp['timestamp'] = pd.to_datetime( Time(temp['timestamp'], format = "mjd").datetime ) + plot_data = ColumnDataSource(temp) + + p.line(x = "timestamp", y = "value", color = color, legend = legend, source = plot_data) + p.scatter(x = "timestamp", y = "value", color = color, legend = legend, source = plot_data) + +def add_basic_layout(p): + '''Add basic layout to certain plot + Parameters + ---------- + p : bokeh object + defines plot where line and scatter should be added + ''' + p.title.align = "left" + p.title.text_color = "#c85108" + p.title.text_font_size = "25px" + p.background_fill_color = "#efefef" + + p.xaxis.axis_label_text_font_size = "14pt" + p.xaxis.axis_label_text_color ='#2D353C' + p.yaxis.axis_label_text_font_size = "14pt" + p.yaxis.axis_label_text_color = '#2D353C' + + p.xaxis.major_tick_line_color = "firebrick" + p.xaxis.major_tick_line_width = 2 + p.xaxis.minor_tick_line_color = "#c85108" diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/power_tab.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/power_tab.py new file mode 100644 index 000000000..2c140af71 --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/plots/power_tab.py @@ -0,0 +1,248 @@ +import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.miri_monitors.data_trending.plots.plot_functions as pf +from bokeh.plotting import figure +from bokeh.models import BoxAnnotation, LinearAxis, Range1d +from bokeh.embed import components +from bokeh.models.widgets import Panel, Tabs, Div +from bokeh.models import ColumnDataSource, HoverTool +from bokeh.layouts import column, row, WidgetBox + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def power_ice(conn, start, end): + #query data from database + start_str = str(Time(start).mjd) + end_str = str(Time(end).mjd) + + sql_c = "SELECT * FROM SE_ZIMIRICEA_IDLE WHERE start_time BETWEEN "+start_str+" AND "+end_str+" ORDER BY start_time" + _idle = pd.read_sql_query(sql_c, conn) + sql_c = "SELECT * FROM SE_ZIMIRICEA_HV_ON WHERE start_time BETWEEN "+start_str+" AND "+end_str+" ORDER BY start_time" + _hv = pd.read_sql_query(sql_c, conn) + + voltage = 30 + _idle['average'] *= voltage + _hv['average'] *= voltage + + _idle['start_time'] = pd.to_datetime( Time(_idle['start_time'], format = "mjd").datetime ) + _hv['start_time'] = pd.to_datetime( Time(_hv['start_time'], format = "mjd").datetime ) + + #set column data source + idle = ColumnDataSource(_idle) + hv = ColumnDataSource(_hv) + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 1120, \ + plot_height = 500, \ + y_range = [5,14], \ + x_axis_type = 'datetime', \ + output_backend = "webgl", \ + x_axis_label = 'Date', y_axis_label='Power (W)') + + p.grid.visible = True + p.title.text = "POWER ICE" + pf.add_basic_layout(p) + pf.add_limit_box(p, 6, 8, alpha = 0.1, color = "green") + + + # add a line renderer with legend and line thickness + scat1=p.scatter(x = "start_time", y = "average", color = 'orange', legend = "Power idle", source = idle) + scat2=p.scatter(x = "start_time", y = "average", color = 'red', legend = "Power hv on", source = hv) + p.line(x = "start_time", y = "average", color = 'orange', legend = "Power idle", source = idle) + p.line(x = "start_time", y = "average", color = 'red', legend = "Power hv on", source = hv) + + #generate error bars + err_xs_hv = [] + err_ys_hv = [] + err_xs_idle = [] + err_ys_idle = [] + + for index, item in _hv.iterrows(): + err_xs_hv.append((item['start_time'],item['start_time'])) + err_ys_hv.append((item['average'] - item['deviation'], item['average'] + item['deviation'])) + + for index, item in _idle.iterrows(): + err_xs_idle.append((item['start_time'],item['start_time'])) + err_ys_idle.append((item['average'] - item['deviation'], item['average'] + item['deviation'])) + # plot them + p.multi_line(err_xs_hv, err_ys_hv, color='red', legend='Power hv on') + p.multi_line(err_xs_idle, err_ys_idle, color='orange', legend='Power idle') + + #activate HoverTool for scatter plot + hover_tool = HoverTool( tooltips = + [ + ('count', '@data_points'), + ('mean', '@average'), + ('deviation', '@deviation'), + + ], mode='mouse', renderers=[scat1,scat2]) + + p.tools.append(hover_tool) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def power_fpea(conn, start, end): + + start_str = str(Time(start).mjd) + end_str = str(Time(end).mjd) + + sql_c = "SELECT * FROM SE_ZIMIRFPEA WHERE start_time BETWEEN "+start_str+" AND "+end_str+" ORDER BY start_time" + _fpea = pd.read_sql_query(sql_c, conn) + + voltage = 30 + _fpea['average'] *= voltage + + _fpea['start_time'] = pd.to_datetime( Time(_fpea['start_time'], format = "mjd").datetime ) + + #set column data source + fpea = ColumnDataSource(_fpea) + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 1120, \ + plot_height = 500, \ + y_range = [28.0, 28.5], \ + x_axis_type = 'datetime', \ + output_backend = "webgl", \ + x_axis_label = 'Date', y_axis_label='Power (W)') + + p.grid.visible = True + p.title.text = "POWER FPE" + pf.add_basic_layout(p) + + # add a line renderer with legend and line thickness + scat1 = p.scatter(x = "start_time", y = "average", color = 'orange', legend = "Power FPEA", source = fpea) + p.line(x = "start_time", y = "average", color = 'orange', legend = "Power FPEA", source = fpea) + + err_xs = [] + err_ys = [] + + for index, item in _fpea.iterrows(): + err_xs.append((item['start_time'], item['start_time'])) + err_ys.append((item['average'] - item['deviation'], item['average'] + item['deviation'])) + + # plot them + p.multi_line(err_xs, err_ys, color='orange', legend='Power FPEA') + + #activate HoverTool for scatter plot + hover_tool = HoverTool( tooltips = + [ + ('count', '@data_points'), + ('mean', '@average'), + ('deviation', '@deviation'), + + ], renderers = [scat1]) + p.tools.append(hover_tool) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def currents(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 500, + y_range = [0,1.1], + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label = 'Current (A)') + + p.grid.visible = True + p.title.text = "FPE & ICE Currents" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "ICE Current idle", "SE_ZIMIRICEA_IDLE", start, end, conn, color = "red") + b = pf.add_to_plot(p, "ICE Current HV on", "SE_ZIMIRICEA_HV_ON", start, end, conn, color = "orange") + c = pf.add_to_plot(p, "FPE Current", "SE_ZIMIRFPEA", start, end, conn, color = "brown") + + pf.add_hover_tool(p,[a,b,c]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + + +def power_plots(conn, start, end): + + + descr = Div(text= + """ + + + + + + + + + + + + + + + + + + + + + + + + +
Plotname
Mnemonic
Description
POWER ICESE_ZIMIRICEA * 30V (static)Primary power consumption ICE side A - HV on and IDLE
POWER FPESE_ZIMIRIFPEA * 30V (static)Primary power consumption FPE side A
FPE & ICE Voltages/CurrentsSE_ZIMIRFPEA
+ SE_ZIMIRCEA + *INPUT VOLTAGE* (missing)
Supply voltage and current ICE/FPE
+ + """, width=1100) + + plot1 = power_ice(conn, start, end) + plot2 = power_fpea(conn, start, end) + plot3 = currents(conn, start, end) + + layout = column(descr, plot1, plot2, plot3) + + #layout_volt = row(volt4, volt1_3) + tab = Panel(child = layout, title = "POWER") + + return tab diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/temperature_tab.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/temperature_tab.py new file mode 100644 index 000000000..bb95214b8 --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/plots/temperature_tab.py @@ -0,0 +1,348 @@ +#! /usr/bin/env python +"""Prepares plots for TEMPERATURE tab + + Module prepares plots for mnemonics below. Combines plots in a grid and + returns tab object. + + Plot 1: + IGDP_MIR_ICE_T1P_CRYO + IGDP_MIR_ICE_T2R_CRYO + IGDP_MIR_ICE_T3LW_CRYO + IGDP_MIR_ICE_T4SW_CRYO + IGDP_MIR_ICE_T5IMG_CRYO + IGDP_MIR_ICE_T6DECKCRYO + IGDP_MIR_ICE_T7IOC_CRYO + IGDP_MIR_ICE_FW_CRYO + IGDP_MIR_ICE_CCC_CRYO + IGDP_MIR_ICE_GW14_CRYO + IGDP_MIR_ICE_GW23_CRYO + IGDP_MIR_ICE_POMP_CRYO + IGDP_MIR_ICE_POMR_CRYO + IGDP_MIR_ICE_IFU_CRYO + IGDP_MIR_ICE_IMG_CRYO + + Plot 2: + ST_ZTC1MIRIA + ST_ZTC2MIRIA + IMIR_PDU_TEMP + IMIR_IC_SCE_ANA_TEMP1 + IMIR_SW_SCE_ANA_TEMP1 + IMIR_LW_SCE_ANA_TEMP1 + IMIR_IC_SCE_DIG_TEMP + IMIR_SW_SCE_DIG_TEMP + IMIR_LW_SCE_DIG_TEMP + + Plot 3: + IGDP_MIR_IC_DET_TEMP + IGDP_MIR_LW_DET_TEMP + IGDP_MIR_SW_DET_TEMP + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``dashborad.py``, e.g.: + + :: + from .plots.temperature_tab import temperature_plots + tab = temperature_plots(conn, start, end) + +Dependencies +------------ + User must provide database "miri_database.db" + +""" +import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.miri_monitors.data_trending.plots.plot_functions as pf +from bokeh.plotting import figure +from bokeh.models import BoxAnnotation, LinearAxis, Range1d +from bokeh.embed import components +from bokeh.models.widgets import Panel, Tabs, Div +from bokeh.models import ColumnDataSource +from bokeh.layouts import column + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def cryo(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 1120, \ + plot_height = 700, \ + y_range = [5.8,6.4], \ + x_axis_type = 'datetime', \ + output_backend="webgl", \ + x_axis_label = 'Date', y_axis_label = 'Temperature (K)') + + p.grid.visible = True + p.title.text = "Cryo Temperatures" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "T1P", "IGDP_MIR_ICE_T1P_CRYO", start, end, conn, color = "brown") + b = pf.add_to_plot(p, "T2R", "IGDP_MIR_ICE_T2R_CRYO", start, end, conn, color = "burlywood") + c = pf.add_to_plot(p, "T3LW", "IGDP_MIR_ICE_T3LW_CRYO", start, end, conn, color = "cadetblue") + d = pf.add_to_plot(p, "T4SW", "IGDP_MIR_ICE_T4SW_CRYO", start, end, conn, color = "chartreuse") + e = pf.add_to_plot(p, "T5IMG", "IGDP_MIR_ICE_T5IMG_CRYO", start, end, conn, color = "chocolate") + f = pf.add_to_plot(p, "T6DECK", "IGDP_MIR_ICE_T6DECKCRYO", start, end, conn, color = "coral") + g = pf.add_to_plot(p, "T7IOC", "IGDP_MIR_ICE_T7IOC_CRYO", start, end, conn, color = "darkorange") + h = pf.add_to_plot(p, "FW", "IGDP_MIR_ICE_FW_CRYO", start, end, conn, color = "crimson") + i = pf.add_to_plot(p, "CCC", "IGDP_MIR_ICE_CCC_CRYO", start, end, conn, color = "cyan") + j = pf.add_to_plot(p, "GW14", "IGDP_MIR_ICE_GW14_CRYO", start, end, conn, color = "darkblue") + k = pf.add_to_plot(p, "GW23", "IGDP_MIR_ICE_GW23_CRYO", start, end, conn, color = "darkgreen") + l = pf.add_to_plot(p, "POMP", "IGDP_MIR_ICE_POMP_CRYO", start, end, conn, color = "darkmagenta") + m = pf.add_to_plot(p, "POMR", "IGDP_MIR_ICE_POMR_CRYO", start, end, conn, color = "darkcyan") + n = pf.add_to_plot(p, "IFU", "IGDP_MIR_ICE_IFU_CRYO", start, end, conn, color = "cornflowerblue") + o = pf.add_to_plot(p, "IMG", "IGDP_MIR_ICE_IMG_CRYO", start, end, conn, color = "orange") + + pf.add_hover_tool(p,[a,b,c,d,e,f,g,h,i,j,k,l,m,n,o]) + + p.legend.location = "bottom_right" + p.legend.orientation = "horizontal" + p.legend.click_policy = "hide" + + return p + +def temp(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + start_str = str(Time(start).mjd) + end_str = str(Time(end).mjd) + + sql_c = "SELECT * FROM IGDP_MIR_ICE_INTER_TEMP WHERE start_time BETWEEN "+start_str+" AND "+end_str+" ORDER BY start_time" + temp = pd.read_sql_query(sql_c, conn) + + temp['average']+= 273.15 + reg = pd.DataFrame({'reg' : pf.pol_regression(temp['start_time'], temp['average'],3)}) + temp = pd.concat([temp, reg], axis=1) + + temp['start_time'] = pd.to_datetime( Time(temp['start_time'], format = "mjd").datetime ) + plot_data = ColumnDataSource(temp) + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 1120, \ + plot_height = 700, \ + y_range = [275,295], \ + x_axis_type = 'datetime', \ + output_backend="webgl", \ + x_axis_label = 'Date', y_axis_label = 'Temperature (K)') + + p.grid.visible = True + p.title.text = "IEC Temperatures" + pf.add_basic_layout(p) + + p.line(x = "start_time", y = "average", color = "brown", legend = "ICE Internal", source = plot_data) + p.scatter(x = "start_time", y = "average", color = "brown", legend = "ICE Internal", source = plot_data) + + a = pf.add_to_plot(p, "ICE IEC A", "ST_ZTC1MIRIA", start, end, conn, color = "burlywood") + b = pf.add_to_plot(p, "FPE IEC A", "ST_ZTC2MIRIA", start, end, conn, color = "cadetblue") + j = pf.add_to_plot(p, "ICE IEC B", "ST_ZTC1MIRIB", start, end, conn, color = "blue") + k = pf.add_to_plot(p, "FPE IEC B.", "ST_ZTC2MIRIB", start, end, conn, color = "brown") + c = pf.add_to_plot(p, "FPE PDU", "IMIR_PDU_TEMP", start, end, conn, color = "chartreuse") + d = pf.add_to_plot(p, "ANA IC", "IMIR_IC_SCE_ANA_TEMP1", start, end, conn, color = "chocolate") + e = pf.add_to_plot(p, "ANA SW", "IMIR_SW_SCE_ANA_TEMP1", start, end, conn, color = "coral") + f = pf.add_to_plot(p, "ANA LW", "IMIR_LW_SCE_ANA_TEMP1", start, end, conn, color = "darkorange") + g = pf.add_to_plot(p, "DIG IC", "IMIR_IC_SCE_DIG_TEMP", start, end, conn, color = "crimson") + h = pf.add_to_plot(p, "DIG SW", "IMIR_SW_SCE_DIG_TEMP", start, end, conn, color = "cyan") + i = pf.add_to_plot(p, "DIG LW", "IMIR_LW_SCE_DIG_TEMP", start, end, conn, color = "darkblue") + + pf.add_hover_tool(p,[a,b,c,d,e,f,g,h,i,j,k]) + + p.legend.location = "bottom_right" + p.legend.orientation = "horizontal" + p.legend.click_policy = "hide" + + return p + +def det(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 1120, \ + plot_height = 400, \ + y_range = [6.395,6.41], \ + x_axis_type = 'datetime', \ + output_backend="webgl", \ + x_axis_label = 'Date', y_axis_label = 'Temperature (K)') + + p.grid.visible = True + p.title.text = "Detector Temperature" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "Det. Temp. IC", "IGDP_MIR_IC_DET_TEMP", start, end, conn, color = "red") + b = pf.add_to_plot(p, "Det. Temp. LW", "IGDP_MIR_LW_DET_TEMP", start, end, conn, color = "green") + c = pf.add_to_plot(p, "Det. Temp. SW", "IGDP_MIR_SW_DET_TEMP", start, end, conn, color = "blue") + + pf.add_hover_tool(p,[a,b,c]) + + p.legend.location = "bottom_right" + p.legend.orientation = "horizontal" + p.legend.click_policy = "hide" + + return p + +def temperature_plots(conn, start, end): + '''Combines plots to a tab + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : tab object + used by dashboard.py to set up dashboard + ''' + + descr = Div(text= + """ + + + + + + + + + + + + + + + + + + + + + + + + +
Plotname
Mnemonic
Description
CRYO TemperaturesIGDP_MIR_ICE_T1P_CRYO
+ IGDP_MIR_ICE_T2R_CRYO
+ IGDP_MIR_ICE_T3LW_CRYO
+ IGDP_MIR_ICE_T4SW_CRYO
+ IGDP_MIR_ICE_T5IMG_CRYO
+ IGDP_MIR_ICE_T6DECKCRYO
+ IGDP_MIR_ICE_T7IOC_CRYO
+ IGDP_MIR_ICE_FW_CRYO
+ IGDP_MIR_ICE_CCC_CRYO
+ IGDP_MIR_ICE_GW14_CRYO
+ IGDP_MIR_ICE_GW23_CRYO
+ IGDP_MIR_ICE_POMP_CRYO
+ IGDP_MIR_ICE_POMR_CRYO
+ IGDP_MIR_ICE_IFU_CRYO
+ IGDP_MIR_ICE_IMG_CRYO
Deck Nominal Temperature (T1)
+ Deck Redundant Temperature (T2)
+ LW FPM I/F Temperature (T3)
+ SW FPM I/F Temperature (T4)
+ IM FPM I/F Temperature (T5)
+ A-B Strut Apex Temperature (T6)
+ IOC Temperature (T7)
+ FWA Temperature
+ CCC Temperature
+ DGA-A (GW14) Temperature
+ DGA-B (GW23) Temperature
+ POMH Nominal Temperature
+ POMH Redundant Temperature
+ MRS (CF) Cal. Source Temperature
+ Imager (CI) Cal. Source Temperature
IEC TemperaturesST_ZTC1MIRIA
+ ST_ZTC2MIRIA
+ ST_ZTC1MIRIB
+ ST_ZTC2MIRIB
+ IGDP_MIR_ICE_INTER_TEMP
+ IMIR_PDU_TEMP
+ IMIR_IC_SCE_ANA_TEMP1
+ IMIR_SW_SCE_ANA_TEMP1
+ IMIR_LW_SCE_ANA_TEMP1
+ IMIR_IC_SCE_DIG_TEMP
+ IMIR_SW_SCE_DIG_TEMP
+ IMIR_LW_SCE_DIG_TEMP
ICE IEC Panel Temp A
+ FPE IEC Panel Temp A
+ ICE IEC Panel Temp B
+ FPE IEC Panel Temp B
+ ICE internal Temperature
+ FPE PDU Temperature
+ FPE SCE Analogue board Temperature IC
+ FPE SCE Analogue board Temperature SW
+ FPE SCE Analogue board Temperature LW
+ FPE SCE Digital board Temperature IC
+ FPE SCE Digital board Temperature SW
+ FPE SCE Digital board Temperature LW
Detector TemperaturesIGDP_MIR_IC_DET_TEMP
+ IGDP_MIR_lW_DET_TEMP
+ IGDP_MIR_SW_DET_TEMP
Detector Temperature (IC,SW&LW)
+ + """, width=1100) + + + plot1 = cryo(conn, start, end) + plot2 = temp(conn, start, end) + plot3 = det(conn, start, end) + + layout = column(descr, plot1, plot2, plot3) + tab = Panel(child = layout, title = "TEMPERATURE") + + return tab diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/wheel_ratio_tab.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/wheel_ratio_tab.py new file mode 100644 index 000000000..049ba01c4 --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/plots/wheel_ratio_tab.py @@ -0,0 +1,320 @@ +#! /usr/bin/env python +"""Prepares plots for FPE VOLTAGE tab + + Module prepares plots for mnemonics below. Combines plots in a grid and + returns tab object. + + Plot 1: + IMIR_HK_FW_POS_RATIO_FND + IMIR_HK_FW_POS_RATIO_OPAQUE + IMIR_HK_FW_POS_RATIO_F1000W + IMIR_HK_FW_POS_RATIO_F1130W + IMIR_HK_FW_POS_RATIO_F1280W + IMIR_HK_FW_POS_RATIO_P750L + IMIR_HK_FW_POS_RATIO_F1500W + IMIR_HK_FW_POS_RATIO_F1800W + IMIR_HK_FW_POS_RATIO_F2100W + IMIR_HK_FW_POS_RATIO_F560W + IMIR_HK_FW_POS_RATIO_FLENS + IMIR_HK_FW_POS_RATIO_F2300C + IMIR_HK_FW_POS_RATIO_F770W + IMIR_HK_FW_POS_RATIO_F1550C + IMIR_HK_FW_POS_RATIO_F2550W + IMIR_HK_FW_POS_RATIO_F1140C + IMIR_HK_FW_POS_RATIO_F2550WR + IMIR_HK_FW_POS_RATIO_F1065C + + Plot 2: + IMIR_HK_GW14_POS_RATIO_SHORT + IMIR_HK_GW14_POS_RATIO_MEDIUM + IMIR_HK_GW14_POS_RATIO_LONG + + Plot 3: + IMIR_HK_GW23_POS_RATIO_SHORT + IMIR_HK_GW23_POS_RATIO_MEDIUM + IMIR_HK_GW23_POS_RATIO_LONG + + Plot 4: + IMIR_HK_CCC_POS_RATIO_LOCKED + IMIR_HK_CCC_POS_RATIO_OPEN + IMIR_HK_CCC_POS_RATIO_CLOSED + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``dashboard.py``, e.g.: + + :: + from .plots.wheel_ratio_tab import wheel_plots + tab = wheel_plots(conn, start, end) + +Dependencies +------------ + User must provide database "miri_database.db" + +""" + +import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.miri_monitors.data_trending.plots.plot_functions as pf +import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as mn +from bokeh.plotting import figure +from bokeh.models import BoxAnnotation, LinearAxis, Range1d +from bokeh.embed import components +from bokeh.models.widgets import Panel, Tabs, Div +from bokeh.models import ColumnDataSource +from bokeh.layouts import column, row, WidgetBox + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def gw14(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 1120, \ + plot_height = 500, \ + y_range = [-2,2], \ + x_axis_type = 'datetime', \ + x_axis_label = 'Date', y_axis_label = 'ratio (normalized)') + + p.grid.visible = True + p.title.text = "DGA-A Ratio" + p.title.align = "left" + pf.add_basic_layout(p) + + pf.add_to_wplot(p, "SHORT", "IMIR_HK_GW14_POS_RATIO_SHORT", start, end, conn, mn.gw14_nominals['SHORT'], color = "green") + pf.add_to_wplot(p, "MEDIUM", "IMIR_HK_GW14_POS_RATIO_MEDIUM", start, end, conn, mn.gw14_nominals['MEDIUM'], color = "red") + pf.add_to_wplot(p, "LONG", "IMIR_HK_GW14_POS_RATIO_LONG", start, end, conn, mn.gw14_nominals['LONG'], color = "blue") + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + + +def gw23(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 1120, \ + plot_height = 500, \ + y_range = [-2,2], \ + x_axis_type = 'datetime', \ + x_axis_label = 'Date', y_axis_label = 'ratio (normalized)') + + p.grid.visible = True + p.title.text = "DGA-B Ratio" + p.title.align = "left" + pf.add_basic_layout(p) + + pf.add_to_wplot(p, "SHORT", "IMIR_HK_GW23_POS_RATIO_SHORT", start, end, conn, mn.gw23_nominals['SHORT'], color = "green") + pf.add_to_wplot(p, "MEDIUM", "IMIR_HK_GW23_POS_RATIO_MEDIUM", start, end, conn, mn.gw23_nominals['MEDIUM'], color = "red") + pf.add_to_wplot(p, "LONG", "IMIR_HK_GW23_POS_RATIO_LONG", start, end, conn, mn.gw23_nominals['LONG'], color = "blue") + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + + +def ccc(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 1120, \ + plot_height = 500, \ + y_range = [-2,2], \ + x_axis_type = 'datetime', \ + x_axis_label = 'Date', y_axis_label = 'ratio (normalized)') + + p.grid.visible = True + p.title.text = "CCC Ratio" + pf.add_basic_layout(p) + + #add_to_wplot(p, "LOCKED", "IMIR_HK_CCC_POS_RATIO_LOCKED", start, end, conn, mn.ccc_nominals['LOCKED'], color = "green") + pf.add_to_wplot(p, "OPEN", "IMIR_HK_CCC_POS_RATIO_OPEN", start, end, conn, mn.ccc_nominals['OPEN'], color = "red") + pf.add_to_wplot(p, "CLOSED", "IMIR_HK_CCC_POS_RATIO_CLOSED", start, end, conn, mn.ccc_nominals['CLOSED'], color = "blue") + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + + +def fw(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 1120, \ + plot_height = 800, \ + y_range = [-6,4], \ + x_axis_type = 'datetime', \ + x_axis_label = 'Date', y_axis_label='ratio (normalized)') + + p.grid.visible = True + p.title.text = "Filterwheel Ratio" + pf.add_basic_layout(p) + + pf.add_to_wplot(p, "FND", "IMIR_HK_FW_POS_RATIO_FND", start, end, conn, mn.fw_nominals['FND'], color = "green") + pf.add_to_wplot(p, "OPAQUE", "IMIR_HK_FW_POS_RATIO_OPAQUE", start, end, conn, mn.fw_nominals['OPAQUE'], color = "red") + pf.add_to_wplot(p, "F1000W", "IMIR_HK_FW_POS_RATIO_F1000W", start, end, conn, mn.fw_nominals['F1000W'], color = "blue") + pf.add_to_wplot(p, "F1130W", "IMIR_HK_FW_POS_RATIO_F1130W", start, end, conn, mn.fw_nominals['F1130W'], color = "orange") + pf.add_to_wplot(p, "F1280W", "IMIR_HK_FW_POS_RATIO_F1280W", start, end, conn, mn.fw_nominals['F1280W'], color = "firebrick") + pf.add_to_wplot(p, "P750L", "IMIR_HK_FW_POS_RATIO_P750L", start, end, conn, mn.fw_nominals['P750L'], color = "cyan") + pf.add_to_wplot(p, "F1500W", "IMIR_HK_FW_POS_RATIO_F1500W", start, end, conn, mn.fw_nominals['F1500W'], color = "magenta") + pf.add_to_wplot(p, "F1800W", "IMIR_HK_FW_POS_RATIO_F1800W", start, end, conn, mn.fw_nominals['F1800W'], color = "burlywood") + pf.add_to_wplot(p, "F2100W", "IMIR_HK_FW_POS_RATIO_F2100W", start, end, conn, mn.fw_nominals['F2100W'], color = "cadetblue") + pf.add_to_wplot(p, "F560W", "IMIR_HK_FW_POS_RATIO_F560W", start, end, conn, mn.fw_nominals['F560W'], color = "chartreuse") + pf.add_to_wplot(p, "FLENS", "IMIR_HK_FW_POS_RATIO_FLENS", start, end, conn, mn.fw_nominals['FLENS'], color = "brown") + pf.add_to_wplot(p, "F2300C", "IMIR_HK_FW_POS_RATIO_F2300C", start, end, conn, mn.fw_nominals['F2300C'], color = "chocolate") + pf.add_to_wplot(p, "F770W", "IMIR_HK_FW_POS_RATIO_F770W", start, end, conn, mn.fw_nominals['F770W'], color = "darkorange") + pf.add_to_wplot(p, "F1550C", "IMIR_HK_FW_POS_RATIO_F1550C", start, end, conn, mn.fw_nominals['F1550C'], color = "darkgreen") + pf.add_to_wplot(p, "F2550W", "IMIR_HK_FW_POS_RATIO_F2550W", start, end, conn, mn.fw_nominals['F2550W'], color = "darkcyan") + pf.add_to_wplot(p, "F1140C", "IMIR_HK_FW_POS_RATIO_F1140C", start, end, conn, mn.fw_nominals['F1140C'], color = "darkmagenta") + pf.add_to_wplot(p, "F2550WR", "IMIR_HK_FW_POS_RATIO_F2550WR", start, end, conn, mn.fw_nominals['F2550WR'], color = "crimson") + pf.add_to_wplot(p, "F1065C", "IMIR_HK_FW_POS_RATIO_F1065C", start, end, conn, mn.fw_nominals['F1065C'], color = "cornflowerblue") + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + + +def wheel_ratios(conn, start, end): + '''Combine plots to a tab + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : tab object + used by dashboard.py to set up dashboard + ''' + + descr = Div(text= + """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Plotname
Mnemonic
Description
Filterwheel RatioIMIR_HK_FW_POS_RATIO
+ IMIR_HK_FW_CUR_POS
FW position sensor ratio (normalised) and commanded position
DGA-A RatioIMIR_HK_GW14_POS_RATIO
+ IMIR_HK_GW14_CUR_POS
DGA-A position sensor ratio (normalised) and commanded position
DGA-B RatioIMIR_HK_GW23_POS_RATIO
+ IMIR_HK_GW23_CUR_POS
DGA-B position sensor ratio (normalised) and commanded position
CCC RatioIMIR_HK_CCC_POS_RATIO
+ IMIR_HK_CCC_CUR_POS
Contamination Control Cover position sensor ratio (normalised) and commanded position
+ + """, width=1100) + + plot1 = fw(conn, start, end) + plot2 = gw14(conn, start, end) + plot3 = gw23(conn, start, end) + plot4 = ccc(conn, start, end) + + layout = column(descr, plot1, plot2, plot3, plot4) + tab = Panel(child = layout, title = "WHEEL RATIO") + + return tab diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/utils/condition.py b/jwql/instrument_monitors/miri_monitors/data_trending/utils/condition.py new file mode 100755 index 000000000..cb4620c76 --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/utils/condition.py @@ -0,0 +1,349 @@ +#! /usr/bin/env python +"""Module generates conditions over one or more mnemonics + +The modules purpose is to return True/False for any times by reference of +certain conditions. If for instance the condition "x>1" over a defined period of +time is needed, the module looks for all elements where the condition applies +and where it does not apply. This generates two lists, which contain the "start" +and "end" times of the condition. +A futher function combines the start- and endtimes to time-tuples between which +the condition is known as TRUE. A "state" function returns True/False for an +exact time attribute, whereby the condition is represented in binary form. + +Authors +------- + - Daniel Kühbacher + +Use +--- + This module is not prepared for standalone use. + + For use in programm set condition up like below: + + import the module as follow: + >>>import condition as cond + + generate list with required conditions: + >>>con_set = [ cond.equal(m.mnemonic('IMIR_HK_POM_LOOP'),'OFF'), + cond.smaller(m.mnemonic('IMIR_HK_ICE_SEC_VOLT1'),1), + cond.greater(m.mnemonic('SE_ZIMIRICEA'),0.2)] + + generate object of condition with the con_set as attribute: + >>>condition_object=cond.condition(con_set) + + Now the condition_object can return a True/False statement wheather + the time given as attribut meets the conditions: + + >>>if condition.state(float(element['Primary Time'])): + -> True when condition for the given time applies + -> False when condition for the given time is not applicable + +Dependencies +------------ + no external files needed + +References +---------- + +Notes +----- + +""" + + +class condition: + """Class to hold several subconditions""" + + #contains list of representative time pairs for each subcondition + cond_time_pairs = [] + #state of the condition + __state = False + + #initializes condition through condition set + def __init__(self, cond_set): + """Initialize object with set of conditions + Parameters + ---------- + cond_set : list + list contains subconditions objects + """ + self.cond_set = cond_set + + #destructor -> take care that all time_pairs are deleted! + def __del__(self): + """Delete object - destructor method""" + del self.cond_time_pairs[:] + + #prints all stored time pairs (for developement only) + def print_times(self): + """Print conditions time pairs on command line (developement)""" + print('Available time pairs:') + for times in self.cond_time_pairs: + print('list: '+str(times)) + + #returns a interval if time is anywhere in between + def get_interval(self, time): + """Returns time interval if availlable, where "time" is in between + Parameters + ---------- + time : float + given time attribute + Return + ------ + time_pair : tuple + pair of start_time and end_time where time is in between + """ + end_time = 10000000 + start_time = 0 + + #do for every condition + for cond in self.cond_time_pairs: + #do for every time pair in condition + for pair in cond: + if (time > pair[0]) and (time < pair[1]): + if (end_time > pair[1]) and (start_time < pair[0]): + start_time = pair[0] + end_time = pair[1] + break + else: + break + + if (end_time != 10000000) and (start_time != 0): + return [start_time, end_time] + else: + return None + + + #generates time pairs out of start and end times + def generate_time_pairs(start_times, end_times): + """Forms time pairs out of start times and end times + Parameters + ---------- + start_times : list + contains all times where a condition applies + end_times : list + contains all times where the condition does not apply + Return + ------ + time_pair : list + list of touples with start and end time + """ + #internal use only + time_pair: float = [] + + #when the conditons doesn´t apply anyway + if not start_times: + time_pair.append((0,0)) + + #check if the condition indicates an open time range + elif not end_times: + time_pair.append((start_times[0], 0)) + + #generate time pairs + #for each start time a higher or equal end time is searched for + #these times form am touple which is appended to time_pair : list + else: + time_hook = 0 + last_start_time = 0 + + for start in list(sorted(set(start_times))): + + if(start > time_hook): + for end in list(sorted(set(end_times))): + + if end > start: + + time_pair.append((start, end)) + time_hook = end + break + + if list(sorted(set(start_times)))[-1] > list(sorted(set(end_times)))[-1]: + time_pair.append((list(sorted(set(end_times)))[-1], 0)) + + return(time_pair) + + + #returns state of the condition at a given time + #if state(given time)==True -> condition is true + #if state(given time)==False -> condition is false + def state(self, time): + """Checks whether condition is true of false at a given time + Parameters + ---------- + time : float + input time for condition query + Return + ------ + state : bool + True/False statement whether the condition applies or not + """ + #checks condition for every sub condition in condition set (subconditions) + + state = self.__state + + for cond in self.cond_time_pairs: + + if self.__check_subcondition(cond, time): + state = True + else: + state = False + break + + return state + + + def __check_subcondition(self, cond, time): + + #if there are no values availlable + if cond[0][0] == 0: + return False + + for time_pair in cond: + #if just a positive time is availlable, return true + if (time_pair[1] == 0) and (time > time_pair[0]): + + return True + + #if given time occurs between a time pair, return true + elif (time_pair[0]) <= time and (time < time_pair[1]): + + return True + + else: + pass + + +class equal(condition): + """Class to hold single "is equal" subcondition""" + + #add attributes to function - start function "cond_time_pairs()" + def __init__(self, mnemonic, value): + """Initializes subconditon + Parameters + ---------- + mnemonic : astropy table + includes mnemomic engineering data and corresponding primary time + value : str + coparison value for equal statement + """ + self.mnemonic = mnemonic + self.value = value + condition.cond_time_pairs.append((self.cond_true_time())) + + + #generates a list of time-touples (start_time, end_time) that mark the beginning and end of + #wheather the condition is true or not + def cond_true_time(self): + """Filters all values that are equal to a given comparison value + if equal: Primary time -> temp_start + if not equal: Primary time -> temp_end + Return + ------ + time_p : list + list of touples with start and end time + """ + temp_start = [] + temp_end = [] + + for key in self.mnemonic: + + #find all times whoses Raw values equal the given value + if key['value'] == self.value: + temp_start.append(key["time"]) + + #find all end values + else: + temp_end.append(key["time"]) + + time_p = condition.generate_time_pairs(temp_start, temp_end) + return time_p + + +class greater(condition): + """Class to hold single "greater than" subcondition""" + + #add attributes to function - start function "cond_time_pairs()" + def __init__(self, mnemonic, value): + """Initializes subconditon + Parameters + ---------- + mnemonic : astropy table + includes mnemomic engineering data and corresponding primary time + value : str + coparison value for equal statement + """ + self.mnemonic= mnemonic + self.value=value + condition.cond_time_pairs.append((self.cond_true_time())) + + def cond_true_time(self): + """Filters all values that are greater than a given comparison value + if equal: Primary time -> temp_start + if not equal: Primary time -> temp_end + Return + ------ + time_p : list + list of touples with start and end time + """ + temp_start: float = [] + temp_end: float = [] + + for key in self.mnemonic: + + #find all times whose Raw values are grater than the given value + if float(key['value']) > self.value: + temp_start.append(key["time"]) + + #find all end values + else: + temp_end.append(key["time"]) + + time_p = condition.generate_time_pairs(temp_start, temp_end) + return time_p + + +class smaller(condition): + """Class to hold single "greater than" subcondition""" + + #add attributes to function - start function "cond_time_pairs()" + def __init__(self, mnemonic, value): + """Initializes subconditon + Parameters + ---------- + mnemonic : astropy table + includes mnemomic engineering data and corresponding primary time + value : str + coparison value for equal statement + """ + self.mnemonic=mnemonic + self.value=value + condition.cond_time_pairs.append((self.cond_true_time())) + + def cond_true_time(self): + """Filters all values that are greater than a given comparison value + if equal: Primary time -> temp_start + if not equal: Primary time -> temp_end + Return + ------ + time_p : list + list of touples with start and end time + """ + temp_start: float = [] + temp_end: float = [] + + for key in self.mnemonic: + + #find all times whose Raw values are grater than the given value + if float(key['value']) < self.value: + temp_start.append(key["time"]) + + #find all end values + else: + temp_end.append(key["time"]) + + time_p = condition.generate_time_pairs(temp_start, temp_end) + return time_p + + +if __name__ =='__main__': + pass diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/utils/csv_to_AstropyTable.py b/jwql/instrument_monitors/miri_monitors/data_trending/utils/csv_to_AstropyTable.py new file mode 100755 index 000000000..dbaea27f6 --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/utils/csv_to_AstropyTable.py @@ -0,0 +1,151 @@ +#! /usr/bin/env python +"""Module for importing and sorting mnemonics + +This module imports a whole set of mnemonics from a .CSV sheet and converts it +to an astropy table. In a second step the table is sorted by its mnemoncis +and for each mnemmonic another astropy table with reduced content is created. +The last step is to append the data (time and engineering value) with its +mnemonic identifier as key to a dictionary. + +Authors +------- + - Daniel Kühbacher + +Use +--- + + +Dependencies +------------ + mnemonics.py -> includes a list of mnemonics to be evaluated + +References +---------- + +Notes +----- + +""" +from astropy.table import Table +from astropy.time import Time +import warnings +import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as mn + + +class mnemonics: + """class to hold a set of mnemonics""" + + __mnemonic_dict = {} + + def __init__(self, import_path): + """main function of this class + Parameters + ---------- + import_path : str + defines file to import (csv sheet) + """ + imported_data = self.import_CSV(import_path) + length = len(imported_data) + + print('{} was imported - {} lines'.format(import_path, length)) + + #look for every mnmonic given in mnemonicy.py + for mnemonic_name in mn.mnemonic_set_base: + temp = self.sort_mnemonic(mnemonic_name, imported_data) + #append temp to dict with related mnemonic + if temp != None: + self.__mnemonic_dict.update({mnemonic_name:temp}) + else: + warnings.warn("fatal error") + + + def import_CSV(self, path): + """imports csv sheet and converts it to AstropyTable + Parameters + ---------- + path : str + defines path to file to import + Return + ------ + imported_data : AstropyTable + container for imported data + """ + #read data from given *CSV file + imported_data=Table.read(path, format='ascii.basic', delimiter=',') + return imported_data + + + #returns table of single mnemonic + def mnemonic(self, name): + """Returns table of one single mnemonic + Parameters + ---------- + name : str + name of mnemonic + Return + ------ + __mnemonic_dict[name] : AstropyTable + corresponding table to mnemonic name + """ + try: + return self.__mnemonic_dict[name] + except KeyError: + print('{} not in list'.format(name)) + + + #looks for given mnemonic in given table + #returns list containing astropy tables with sorted mnemonics and engineering values + #adds useful meta data to Table + def sort_mnemonic(self, mnemonic, table): + """Looks for all values in table with identifier "mnemonic" + Converts time string to mjd format + Parameters + ---------- + mnemonic : str + identifies which mnemonic to look for + table : AstropyTable + table that stores mnemonics and data + Return + ------ + mnemonic_table : AstropyTable + stores all data associated with identifier "mnemonic" + """ + + temp1: float = [] + temp2 = [] + + #appends present mnemonic data to temp arrays temp1 and temp2 + for item in table: + try: + if item['Telemetry Mnemonic'] == mnemonic: + #convert time string to mjd format + temp = item['Secondary Time'].replace('/','-').replace(' ','T') + t = Time(temp, format='isot') + + temp1.append(t.mjd) + temp2.append(item['EU Value']) + except KeyError: + warnings.warn("{} is not in mnemonic table".format(mnemonic)) + + description = ('time','value') + data = [temp1, temp2] + + #add some meta data + if len(temp1) > 0: + date_start = temp1[0] + date_end = temp1[len(temp1)-1] + info = {'start':date_start, 'end':date_end} + else: + info = {"n":"n"} + + #add name of mnemonic to meta data of list + info['mnemonic'] = mnemonic + info['len'] = len(temp1) + + #table to return + mnemonic_table = Table(data, names = description, \ + dtype = ('f8','str'), meta = info) + return mnemonic_table + +if __name__ =='__main__': + pass diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/utils/mnemonics.py b/jwql/instrument_monitors/miri_monitors/data_trending/utils/mnemonics.py new file mode 100755 index 000000000..461dab289 --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/utils/mnemonics.py @@ -0,0 +1,450 @@ +"""Module lists all neccessary mnemonics for MIRI data trending + +The module includes several lists to import to MIRI data trending monitor program. +The lists are used for data aquisation and to set up the initial database. + +Authors +------- + - Daniel Kühbacher + +Use +--- + import mnemoncis as mn + +Dependencies +------------ + further information to included mnemonics: ############### + +References +---------- + +Notes +----- + +""" + +#all mnemonic used for condition 1 (see: draft) +#"SE_ZBUSVLT", +mnemonic_cond_1 = [ +"SE_ZIMIRICEA", + +"IMIR_HK_ICE_SEC_VOLT4", +"IGDP_MIR_ICE_INTER_TEMP", + +"ST_ZTC1MIRIA", +"ST_ZTC1MIRIB", + +"IGDP_MIR_ICE_T1P_CRYO", +"IGDP_MIR_ICE_T2R_CRYO", +"IGDP_MIR_ICE_T3LW_CRYO", +"IGDP_MIR_ICE_T4SW_CRYO", +"IGDP_MIR_ICE_T5IMG_CRYO", +"IGDP_MIR_ICE_T6DECKCRYO", +"IGDP_MIR_ICE_T7IOC_CRYO", +"IGDP_MIR_ICE_FW_CRYO", +"IGDP_MIR_ICE_CCC_CRYO", +"IGDP_MIR_ICE_GW14_CRYO", +"IGDP_MIR_ICE_GW23_CRYO", +"IGDP_MIR_ICE_POMP_CRYO", +"IGDP_MIR_ICE_POMR_CRYO", +"IGDP_MIR_ICE_IFU_CRYO", +"IGDP_MIR_ICE_IMG_CRYO"] + +#all mnemonics used for condition 2 (see: draft) +mnemonic_cond_2=[ +"SE_ZIMIRFPEA", + +"IMIR_PDU_V_DIG_5V", +"IMIR_PDU_I_DIG_5V", +"IMIR_PDU_V_ANA_5V", +"IMIR_PDU_I_ANA_5V", + +"IMIR_PDU_V_ANA_N5V", +"IMIR_PDU_I_ANA_N5V", + +"IMIR_PDU_V_ANA_7V", +"IMIR_PDU_I_ANA_7V", + +"IMIR_PDU_V_ANA_N7V", +"IMIR_PDU_I_ANA_N7V", + +"IMIR_SPW_V_DIG_2R5V", +"IMIR_PDU_V_REF_2R5V", + +"IGDP_MIR_IC_V_VDETCOM", +"IGDP_MIR_SW_V_VDETCOM", +"IGDP_MIR_LW_V_VDETCOM", + +"IGDP_MIR_IC_V_VSSOUT", +"IGDP_MIR_SW_V_VSSOUT", +"IGDP_MIR_LW_V_VSSOUT", +"IGDP_MIR_IC_V_VRSTOFF", + +"IGDP_MIR_SW_V_VRSTOFF", +"IGDP_MIR_LW_V_VRSTOFF", + +"IGDP_MIR_IC_V_VP", +"IGDP_MIR_SW_V_VP", +"IGDP_MIR_LW_V_VP", + +"IGDP_MIR_IC_V_VDDUC", +"IGDP_MIR_SW_V_VDDUC", +"IGDP_MIR_LW_V_VDDUC", + +"IMIR_PDU_TEMP", + +"ST_ZTC2MIRIA", +"ST_ZTC2MIRIB", + +"IMIR_IC_SCE_ANA_TEMP1", +"IMIR_SW_SCE_ANA_TEMP1", +"IMIR_LW_SCE_ANA_TEMP1", + +"IMIR_IC_SCE_DIG_TEMP", +"IMIR_SW_SCE_DIG_TEMP", +"IMIR_LW_SCE_DIG_TEMP", + +"IGDP_MIR_IC_DET_TEMP", +"IGDP_MIR_LW_DET_TEMP", +"IGDP_MIR_SW_DET_TEMP"] + +#mnemonics for 15 min evaluation +mnemonic_set_15min = mnemonic_cond_1 + mnemonic_cond_2 + +#ICE secondary voltages -> apply to condition3 +mnemonic_cond_3 = [ +"IMIR_HK_ICE_SEC_VOLT1", +"IMIR_HK_ICE_SEC_VOLT2", +"IMIR_HK_ICE_SEC_VOLT3", +"IMIR_HK_ICE_SEC_VOLT4", +"SE_ZIMIRICEA"] + +#filter weel positions +fw_positions = [ +"FND", +"OPAQUE", +"F1000W", +"F1130W", +"F1280W", +"P750L", +"F1500W", +"F1800W", +"F2100W", +"F560W", +"FLENS", +"F2300C", +"F770W", +"F1550C", +"F2550W", +"F1140C", +"F2550WR", +"F1065C"] + +#grating weel positions +gw_positions = [ +"SHORT", +"MEDIUM", +"LONG"] + +#contamination control clap positions +ccc_positions = [ +"LOCKED", +"OPEN", +"CLOSED"] + +fw_nominals = { +"FND" : -164.46, +"OPAQUE" : 380.42, +"F1000W" : -23.88, +"F1130W" : 138.04, +"F1280W" : -298.14, +"P750L" : 12.79, +"F1500W" : -377.32, +"F1800W" : 435.61, +"F2100W" : -126.04, +"F560W" : 218.13, +"FLENS" : -212.44, +"F2300C" : 306.03, +"F770W" : -61.90, +"F1550C" : 188.88, +"F2550W" : -323.65, +"F1140C" : 83.08, +"F2550WR" : -255.18, +"F1065C" : 261.62 } + +gw23_nominals = { +"SHORT" : 619.81, +"MEDIUM" : 373.31, +"LONG" : 441.4} + +gw14_nominals = { +"SHORT" : 627.49, +"MEDIUM" : 342.71, +"LONG" : 408.75 } + +ccc_nominals = { +"LOCKED" : 577.23, +"OPEN" : 507.86, +"CLOSED" : 399.90} + + +#comprises all mnemonics used throughout he programm +mnemonic_set_base = [ +"SE_ZIMIRICEA", +"SE_ZBUSVLT", + +"IMIR_HK_ICE_SEC_VOLT1", +"IMIR_HK_ICE_SEC_VOLT2", +"IMIR_HK_ICE_SEC_VOLT3", +"IMIR_HK_ICE_SEC_VOLT4", + +"IGDP_MIR_ICE_INTER_TEMP", + +"ST_ZTC1MIRIB", +"ST_ZTC1MIRIA", +"ST_ZTC2MIRIB", +"ST_ZTC2MIRIA", + +"IGDP_MIR_ICE_T1P_CRYO", +"IGDP_MIR_ICE_T2R_CRYO", +"IGDP_MIR_ICE_T3LW_CRYO", +"IGDP_MIR_ICE_T4SW_CRYO", +"IGDP_MIR_ICE_T5IMG_CRYO", +"IGDP_MIR_ICE_T6DECKCRYO", +"IGDP_MIR_ICE_T7IOC_CRYO", +"IGDP_MIR_ICE_FW_CRYO", +"IGDP_MIR_ICE_CCC_CRYO", +"IGDP_MIR_ICE_GW14_CRYO", +"IGDP_MIR_ICE_GW23_CRYO", +"IGDP_MIR_ICE_POMP_CRYO", +"IGDP_MIR_ICE_POMR_CRYO", +"IGDP_MIR_ICE_IFU_CRYO", +"IGDP_MIR_ICE_IMG_CRYO", + +"SE_ZIMIRFPEA", + +"IMIR_PDU_V_DIG_5V", +"IMIR_PDU_I_DIG_5V", +"IMIR_PDU_V_ANA_5V", +"IMIR_PDU_I_ANA_5V", + +"IMIR_PDU_V_ANA_N5V", +"IMIR_PDU_I_ANA_N5V", + +"IMIR_PDU_V_ANA_7V", +"IMIR_PDU_I_ANA_7V", + +"IMIR_PDU_V_ANA_N7V", +"IMIR_PDU_I_ANA_N7V", + +"IMIR_SPW_V_DIG_2R5V", +"IMIR_PDU_V_REF_2R5V", + +"IGDP_MIR_IC_V_VDETCOM", +"IGDP_MIR_SW_V_VDETCOM", +"IGDP_MIR_LW_V_VDETCOM", + +"IGDP_MIR_IC_V_VSSOUT", +"IGDP_MIR_SW_V_VSSOUT", +"IGDP_MIR_LW_V_VSSOUT", +"IGDP_MIR_IC_V_VRSTOFF", + +"IGDP_MIR_SW_V_VRSTOFF", +"IGDP_MIR_LW_V_VRSTOFF", + +"IGDP_MIR_IC_V_VP", +"IGDP_MIR_SW_V_VP", +"IGDP_MIR_LW_V_VP", + +"IGDP_MIR_IC_V_VDDUC", +"IGDP_MIR_SW_V_VDDUC", +"IGDP_MIR_LW_V_VDDUC", + +"IMIR_PDU_TEMP", + +"IMIR_IC_SCE_ANA_TEMP1", +"IMIR_SW_SCE_ANA_TEMP1", +"IMIR_LW_SCE_ANA_TEMP1", + +"IMIR_IC_SCE_DIG_TEMP", +"IMIR_SW_SCE_DIG_TEMP", +"IMIR_LW_SCE_DIG_TEMP", + +"IGDP_MIR_IC_DET_TEMP", +"IGDP_MIR_LW_DET_TEMP", +"IGDP_MIR_SW_DET_TEMP", + +"IMIR_HK_IMG_CAL_LOOP", +"IMIR_HK_IFU_CAL_LOOP", +"IMIR_HK_POM_LOOP", +"IGDP_IT_MIR_IC_STATUS", +"IGDP_IT_MIR_LW_STATUS", +"IGDP_IT_MIR_SW_STATUS", + +"IMIR_HK_FW_POS_VOLT", +"IMIR_HK_FW_POS_RATIO", +"IMIR_HK_FW_CUR_POS", + +"IMIR_HK_GW14_POS_VOLT", +"IMIR_HK_GW14_POS_RATIO", +"IMIR_HK_GW14_CUR_POS", + +"IMIR_HK_GW23_POS_VOLT", +"IMIR_HK_GW23_POS_RATIO", +"IMIR_HK_GW23_CUR_POS", + +"IMIR_HK_CCC_POS_RATIO", +"IMIR_HK_CCC_CUR_POS", +"IMIR_HK_CCC_POS_VOLT" ] + +#mnemonic set for setting up database +mnemonic_set_database = [ +"SE_ZIMIRICEA_IDLE", +"SE_ZIMIRICEA_HV_ON", + +"ICE_POWER_IDLE", +"ICE_POWER_HV_ON", + +"FPE_POWER", + +"SE_ZBUSVLT", + +"IMIR_HK_ICE_SEC_VOLT1", +"IMIR_HK_ICE_SEC_VOLT2", +"IMIR_HK_ICE_SEC_VOLT3", +"IMIR_HK_ICE_SEC_VOLT4_IDLE", +"IMIR_HK_ICE_SEC_VOLT4_HV_ON", + +"IGDP_MIR_ICE_INTER_TEMP", + +"ST_ZTC1MIRIB", +"ST_ZTC1MIRIA", +"ST_ZTC2MIRIB", +"ST_ZTC2MIRIA", + +"IGDP_MIR_ICE_T1P_CRYO", +"IGDP_MIR_ICE_T2R_CRYO", +"IGDP_MIR_ICE_T3LW_CRYO", +"IGDP_MIR_ICE_T4SW_CRYO", +"IGDP_MIR_ICE_T5IMG_CRYO", +"IGDP_MIR_ICE_T6DECKCRYO", +"IGDP_MIR_ICE_T7IOC_CRYO", +"IGDP_MIR_ICE_FW_CRYO", +"IGDP_MIR_ICE_CCC_CRYO", +"IGDP_MIR_ICE_GW14_CRYO", +"IGDP_MIR_ICE_GW23_CRYO", +"IGDP_MIR_ICE_POMP_CRYO", +"IGDP_MIR_ICE_POMR_CRYO", +"IGDP_MIR_ICE_IFU_CRYO", +"IGDP_MIR_ICE_IMG_CRYO", + +"SE_ZIMIRFPEA", + +"IMIR_PDU_V_DIG_5V", +"IMIR_PDU_I_DIG_5V", +"IMIR_PDU_V_ANA_5V", +"IMIR_PDU_I_ANA_5V", + +"IMIR_PDU_V_ANA_N5V", +"IMIR_PDU_I_ANA_N5V", + +"IMIR_PDU_V_ANA_7V", +"IMIR_PDU_I_ANA_7V", + +"IMIR_PDU_V_ANA_N7V", +"IMIR_PDU_I_ANA_N7V", + +"IMIR_SPW_V_DIG_2R5V", +"IMIR_PDU_V_REF_2R5V", + +"IGDP_MIR_IC_V_VDETCOM", +"IGDP_MIR_SW_V_VDETCOM", +"IGDP_MIR_LW_V_VDETCOM", + +"IGDP_MIR_IC_V_VSSOUT", +"IGDP_MIR_SW_V_VSSOUT", +"IGDP_MIR_LW_V_VSSOUT", +"IGDP_MIR_IC_V_VRSTOFF", + +"IGDP_MIR_SW_V_VRSTOFF", +"IGDP_MIR_LW_V_VRSTOFF", + +"IGDP_MIR_IC_V_VP", +"IGDP_MIR_SW_V_VP", +"IGDP_MIR_LW_V_VP", + +"IGDP_MIR_IC_V_VDDUC", +"IGDP_MIR_SW_V_VDDUC", +"IGDP_MIR_LW_V_VDDUC", + +"IMIR_PDU_TEMP", + +"IMIR_IC_SCE_ANA_TEMP1", +"IMIR_SW_SCE_ANA_TEMP1", +"IMIR_LW_SCE_ANA_TEMP1", + +"IMIR_IC_SCE_DIG_TEMP", +"IMIR_SW_SCE_DIG_TEMP", +"IMIR_LW_SCE_DIG_TEMP", + +"IGDP_MIR_IC_DET_TEMP", +"IGDP_MIR_LW_DET_TEMP", +"IGDP_MIR_SW_DET_TEMP", + +"IMIR_HK_FW_POS_VOLT", +"IMIR_HK_GW14_POS_VOLT", +"IMIR_HK_GW23_POS_VOLT", +"IMIR_HK_CCC_POS_VOLT"] + +#different tables for wheelpostions +mnemonic_wheelpositions = [ +"IMIR_HK_FW_POS_RATIO_FND", +"IMIR_HK_FW_POS_RATIO_OPAQUE", +"IMIR_HK_FW_POS_RATIO_F1000W", +"IMIR_HK_FW_POS_RATIO_F1130W", +"IMIR_HK_FW_POS_RATIO_F1280W", +"IMIR_HK_FW_POS_RATIO_P750L", +"IMIR_HK_FW_POS_RATIO_F1500W", +"IMIR_HK_FW_POS_RATIO_F1800W", +"IMIR_HK_FW_POS_RATIO_F2100W", +"IMIR_HK_FW_POS_RATIO_F560W", +"IMIR_HK_FW_POS_RATIO_FLENS", +"IMIR_HK_FW_POS_RATIO_F2300C", +"IMIR_HK_FW_POS_RATIO_F770W", +"IMIR_HK_FW_POS_RATIO_F1550C", +"IMIR_HK_FW_POS_RATIO_F2550W", +"IMIR_HK_FW_POS_RATIO_F1140C", +"IMIR_HK_FW_POS_RATIO_F2550WR", +"IMIR_HK_FW_POS_RATIO_F1065C", + +"IMIR_HK_GW14_POS_RATIO_SHORT", +"IMIR_HK_GW14_POS_RATIO_MEDIUM", +"IMIR_HK_GW14_POS_RATIO_LONG", + +"IMIR_HK_GW23_POS_RATIO_SHORT", +"IMIR_HK_GW23_POS_RATIO_MEDIUM", +"IMIR_HK_GW23_POS_RATIO_LONG", + +"IMIR_HK_CCC_POS_RATIO_LOCKED", +"IMIR_HK_CCC_POS_RATIO_OPEN", +"IMIR_HK_CCC_POS_RATIO_CLOSED"] + +fw_pos_mnemonic = [ +"IMIR_HK_FW_POS_RATIO_FND", +"IMIR_HK_FW_POS_RATIO_OPAQUE", +"IMIR_HK_FW_POS_RATIO_F1000W", +"IMIR_HK_FW_POS_RATIO_F1130W", +"IMIR_HK_FW_POS_RATIO_F1280W", +"IMIR_HK_FW_POS_RATIO_P750L", +"IMIR_HK_FW_POS_RATIO_F1500W", +"IMIR_HK_FW_POS_RATIO_F1800W", +"IMIR_HK_FW_POS_RATIO_F2100W", +"IMIR_HK_FW_POS_RATIO_F560W", +"IMIR_HK_FW_POS_RATIO_FLENS", +"IMIR_HK_FW_POS_RATIO_F2300C", +"IMIR_HK_FW_POS_RATIO_F770W", +"IMIR_HK_FW_POS_RATIO_F1550C", +"IMIR_HK_FW_POS_RATIO_F2550W", +"IMIR_HK_FW_POS_RATIO_F1140C", +"IMIR_HK_FW_POS_RATIO_F2550WR", +"IMIR_HK_FW_POS_RATIO_F1065C"] diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/utils/process_data.py b/jwql/instrument_monitors/miri_monitors/data_trending/utils/process_data.py new file mode 100755 index 000000000..ad43d2080 --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/utils/process_data.py @@ -0,0 +1,394 @@ +"""This module holds functions for miri data trending + +All functions in this module are tailored for the miri datatrending application. +Detailed descriptions are given for every function individually. + +------- + - Daniel Kühbacher + +Use +--- + +Dependencies +------------ +MIRI_trend_requestsDRAFT1900201.docx + +References +---------- + +Notes +----- + +""" + +import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as mn +import jwql.instrument_monitors.miri_monitors.data_trending.utils.condition as cond +import statistics +import sqlite3 +import warnings +from collections import defaultdict + + + +def extract_data(condition, mnemonic): + '''Function extracts data from given mnemmonic at a given condition + Parameters + ---------- + condition : object + conditon object that holds one or more subconditions + mnemonic : AstropyTable + holds single table with mnemonic data + Return + ------ + temp : list or None + holds data that applies to given condition + ''' + temp = [] + + #look for all values that fit to the given conditions + for element in mnemonic: + if condition.state(float(element['time'])): + temp.append(float(element['value'])) + + #return temp is one ore more values fit to the condition + #return None if no applicable data was found + if len(temp) > 0: + return temp + else: + return None + +def extract_filterpos1(condition, nominals, ratio_mnem, pos_mnem): + '''Extracts ratio values which correspond to given position values and their + proposed nominals + Parameters + ---------- + condition : object + conditon object that holds one or more subconditions + nominals : dict + holds nominal values for all wheel positions + ratio_mem : AstropyTable + holds ratio values of one specific mnemonic + pos_mem : AstropyTable + holds pos values of one specific mnemonic + Return + ------ + pos_values : dict + holds ratio values and times with corresponding positionlabel as key + ''' + + #initilize empty dict + pos_values = defaultdict(list) + + #do for every position in mnemonic attribute + for pos in pos_mnem: + + #raise warning if position is UNKNOWN + if pos['value'] != "UNKNOWN": + + #request time interval where the current positon is in between + interval = condition.get_interval(pos['time']) + + #get all ratio values in the interval + + + + #check if condition attribute for current positon is true + if interval is not None: + cur_pos_time = pos['time'] + filtername = pos['value'] + + for ratio in ratio_mnem: + + #look for ratio values which are in the same time interval + #and differ a certain value (here 5mV) from the nominal + if (ratio['time'] >= cur_pos_time) and \ + (abs(float(ratio['value']) - nominals.get(pos['value'])) < 5): + + if (ratio['time'] > interval[0]) and (ratio['time'] < interval[1]): + pos_values[pos['value']].append(( ratio['time'], ratio['value'])) + + + + else: + warnings.warn("UNKNOWN Position") + return pos_values + +def extract_filterpos(condition, nominals, ratio_mnem, pos_mnem): + '''Extracts ratio values which correspond to given position values and their + proposed nominals + Parameters + ---------- + condition : object + conditon object that holds one or more subconditions + nominals : dict + holds nominal values for all wheel positions + ratio_mem : AstropyTable + holds ratio values of one specific mnemonic + pos_mem : AstropyTable + holds pos values of one specific mnemonic + Return + ------ + pos_values : dict + holds ratio values and times with corresponding positionlabel as key + ''' + + #initilize empty dict for assigned ratio values + pos_values = defaultdict(list) + + for index, pos in enumerate(pos_mnem): + + #raise warning if position is UNKNOWN + if pos['value'] != "UNKNOWN": + + #set up interval beween where the pos value was timed and the supply + interval = condition.get_interval(pos['time']) + + if interval is None: + continue + else: + interval[0] = pos['time'] + if pos_mnem[index+1]['time'] < interval[1]: + interval[1] = pos_mnem[index+1]['time'] + + #empty list for pos values + interval_ratios = [] + + #get all ratio values in the interval + for ratio in ratio_mnem: + if (ratio['time'] >= interval[0]) and (ratio['time'] < interval[1]): + interval_ratios.append(ratio) + elif ratio['time']>= interval[1]: + break + + #check wheather pos values are in range of these checkvals + window = 1 + found_value = False + + while found_value == False: + for ratio in interval_ratios: + if (abs(float(ratio['value']) - nominals.get(pos['value'])) < window): + found_value = True + pos_values[pos['value']].append(( ratio['time'], ratio['value'])) + break + + window +=2 + + if window > 10: + print('ratio error') + break + + else: + warnings.warn("UNKNOWN Position") + return pos_values + +def once_a_day_routine(mnemonic_data): + '''Proposed routine for processing a 15min data file once a day + Parameters + ---------- + mnemonic_data : dict + dict holds time and value in a astropy table with correspining identifier as key + Return + ------ + data_cond_1 : dict + holds extracted data with condition 1 applied + data_cond_1 : dict + holds extracted data with condition 2 applied + ''' + + #abbreviate attribute + m = mnemonic_data + returndata = dict() + + + ######################################################################### + con_set_1 = [ \ + cond.equal(m.mnemonic('IMIR_HK_IMG_CAL_LOOP'),'OFF'), \ + cond.equal(m.mnemonic('IMIR_HK_IFU_CAL_LOOP'),'OFF'), \ + cond.equal(m.mnemonic('IMIR_HK_POM_LOOP'),'OFF'), \ + cond.smaller(m.mnemonic('IMIR_HK_ICE_SEC_VOLT1'),1.0), \ + cond.greater(m.mnemonic('SE_ZIMIRICEA'),0.2)] + #setup condition + condition_1 = cond.condition(con_set_1) + + + #add filtered engineering values of mnemonics given in list mnemonic_cond_1 + #to dictitonary + for identifier in mn.mnemonic_cond_1: + data = extract_data(condition_1, m.mnemonic(identifier)) + + if data != None: + returndata.update( {identifier:data} ) + else: + print("no data for {}".format(identifier)) + + del condition_1 + + ########################################################################## + #under normal use following line should be added: + #cond.equal(m.mnemonic('IGDP_IT_MIR_SW_STATUS'), 'DETECTOR_READY'), \ + #SW was missing in the trainigs data so I could not use it for a condition. + con_set_2 = [ \ + cond.greater(m.mnemonic('SE_ZIMIRFPEA'), 0.5), \ + cond.equal(m.mnemonic('IGDP_IT_MIR_IC_STATUS'), 'DETECTOR_READY'), \ + cond.equal(m.mnemonic('IGDP_IT_MIR_LW_STATUS'), 'DETECTOR_READY')] + #setup condition + condition_2 = cond.condition(con_set_2) + + #add filtered engineering values of mnemonics given in list mnemonic_cond_2 + #to dictitonary + for identifier in mn.mnemonic_cond_2: + data = extract_data(condition_2, m.mnemonic(identifier)) + + if data != None: + returndata.update( {identifier:data} ) + else: + print("no data for {}".format(identifier)) + + del condition_2 + + return returndata + + +def whole_day_routine(mnemonic_data): + '''Proposed routine for processing data representing a whole day + Parameters + ---------- + mnemonic_data : dict + dict holds time and value in a astropy table with correspining identifier as key + Return + ------ + data_cond_3 : dict + holds extracted data with condition 3 applied + FW_volt : list + extracted data for IMIR_HK_FW_POS_VOLT + GW14_volt : list + extracted data for IMIR_HK_GW14_POS_VOLT + GW23_volt : list + extracted data for IMIR_HK_GW23_POS_VOLT + CCC_volt : list + extracted data for IMIR_HK_CCC_POS_VOLT + ''' + + #abbreviate attribute + m = mnemonic_data + returndata = dict() + + ######################################################################### + con_set_3 = [ \ + cond.greater(m.mnemonic('IMIR_HK_ICE_SEC_VOLT1'), 25.0)] + #setup condition + condition_3 = cond.condition(con_set_3) + + #add filtered engineering values of mnemonics given in list mnemonic_cond_3 + #to dictitonary + for identifier in mn.mnemonic_cond_3: + data = extract_data(condition_3, m.mnemonic(identifier)) + + if data != None: + returndata.update({identifier:data}) + else: + print("no data for {}".format(identifier)) + + del condition_3 + + ######################################################################### + #extract data for IMIR_HK_FW_POS_VOLT under given condition + con_set_FW = [ \ + cond.greater(m.mnemonic('IMIR_HK_FW_POS_VOLT'),250.0)] + #setup condition + condition_FW = cond.condition(con_set_FW) + FW_volt = extract_data(condition_FW, m.mnemonic('IMIR_HK_FW_POS_VOLT')) + returndata.update({'IMIR_HK_FW_POS_VOLT':FW_volt}) + del condition_FW + + #extract data for IMIR_HK_GW14_POS_VOLT under given condition + con_set_GW14 = [ \ + cond.greater(m.mnemonic('IMIR_HK_GW14_POS_VOLT'),250.0)] + #setup condition + condition_GW14 = cond.condition(con_set_GW14) + GW14_volt = extract_data(condition_GW14, m.mnemonic('IMIR_HK_GW14_POS_VOLT')) + returndata.update({'IMIR_HK_GW14_POS_VOLT':GW14_volt}) + del condition_GW14 + + #extract data for IMIR_HK_GW23_POS_VOLT under given condition + con_set_GW23 = [ \ + cond.greater(m.mnemonic('IMIR_HK_GW23_POS_VOLT'),250.0)] + #setup condition + condition_GW23 = cond.condition(con_set_GW23) + GW23_volt = extract_data(condition_GW23, m.mnemonic('IMIR_HK_GW23_POS_VOLT')) + returndata.update({'IMIR_HK_GW23_POS_VOLT':GW23_volt}) + del condition_GW23 + + #extract data for IMIR_HK_CCC_POS_VOLT under given condition + con_set_CCC = [ \ + cond.greater(m.mnemonic('IMIR_HK_CCC_POS_VOLT'),250.0)] + #setup condition + condition_CCC = cond.condition(con_set_CCC) + CCC_volt = extract_data(condition_CCC, m.mnemonic('IMIR_HK_CCC_POS_VOLT')) + returndata.update({'IMIR_HK_CCC_POS_VOLT':CCC_volt}) + del condition_CCC + + return returndata + + +def wheelpos_routine(mnemonic_data): + '''Proposed routine for positionsensors each day + Parameters + ---------- + mnemonic_data : dict + dict holds time and value in a astropy table with correspining identifier as key + Return + ------ + FW : dict + holds FW ratio values and times with corresponding positionlabel as key + GW14 : dict + holds GW14 ratio values and times with corresponding positionlabel as key + GW23 : dict + holds GW23 ratio values and times with corresponding positionlabel as key + CCC : dict + holds CCC ratio values and times with corresponding positionlabel as key + ''' + + #abbreviate attribute + m = mnemonic_data + + con_set_FW = [ \ + cond.greater(m.mnemonic('IMIR_HK_FW_POS_VOLT'),250.0)] + #setup condition + condition_FW = cond.condition(con_set_FW) + FW = extract_filterpos(condition_FW, mn.fw_nominals, \ + m.mnemonic('IMIR_HK_FW_POS_RATIO'), m.mnemonic('IMIR_HK_FW_CUR_POS')) + + del condition_FW + + con_set_GW14 = [ \ + cond.greater(m.mnemonic('IMIR_HK_GW14_POS_VOLT'),250.0)] + #setup condition + condition_GW14 = cond.condition(con_set_GW14) + GW14 = extract_filterpos(condition_GW14, mn.gw14_nominals, \ + m.mnemonic('IMIR_HK_GW14_POS_RATIO'), m.mnemonic('IMIR_HK_GW14_CUR_POS')) + + del condition_GW14 + + con_set_GW23 = [ \ + cond.greater(m.mnemonic('IMIR_HK_GW23_POS_VOLT'),250.0)] + #setup condition + condition_GW23 = cond.condition(con_set_GW23) + GW23 = extract_filterpos(condition_GW23, mn.gw23_nominals, \ + m.mnemonic('IMIR_HK_GW23_POS_RATIO'), m.mnemonic('IMIR_HK_GW23_CUR_POS')) + + del condition_GW23 + + con_set_CCC = [ \ + cond.greater(m.mnemonic('IMIR_HK_CCC_POS_VOLT'),250.0)] + #setup condition + condition_CCC = cond.condition(con_set_CCC) + CCC = extract_filterpos(condition_CCC, mn.ccc_nominals, \ + m.mnemonic('IMIR_HK_CCC_POS_RATIO'), m.mnemonic('IMIR_HK_CCC_CUR_POS')) + + del condition_CCC + + return FW, GW14, GW23, CCC + +if __name__ =='__main__': + pass diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/utils/sql_interface.py b/jwql/instrument_monitors/miri_monitors/data_trending/utils/sql_interface.py new file mode 100755 index 000000000..543ddcfe2 --- /dev/null +++ b/jwql/instrument_monitors/miri_monitors/data_trending/utils/sql_interface.py @@ -0,0 +1,160 @@ +"""Module holds functions to generate and access sqlite databases + +The module is tailored for use in miri data trending. It holds functions to +create and close connections to a sqlite database. Calling the module itself +creates a sqlite database with specific tables used at miri data trending. + +Authors +------- + - Daniel Kühbacher + +Use +--- + +Dependencies +------------ + import mnemonics as m + +References +---------- + +Notes +----- + +""" +import os +import sqlite3 +from sqlite3 import Error + +import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as m +from jwql.utils.utils import get_config, filename_parser + +def create_connection(db_file): + '''Sets up a connection or builds database + Parameters + ---------- + db_file : string + represents filename of database + Return + ------ + conn : DBobject or None + Connection object or None + ''' + try: + conn = sqlite3.connect(db_file) + print('Connected to database "{}"'.format(db_file)) + return conn + except Error as e: + print(e) + return None + + +def close_connection(conn): + '''Closes connection to database + Parameters + ---------- + conn : DBobject + Connection object to be closed + ''' + conn.close() + print('Connection closed') + + +def add_data(conn, mnemonic, data): + '''Add data of a specific mnemonic to database if it not exists + Parameters + ---------- + conn : DBobject + connection object to access database + mnemonic : string + identifies the table + data : list + specifies the data + ''' + + c = conn.cursor() + + #check if data already exists (start_time as identifier) + c.execute('SELECT id from {} WHERE start_time= {}'.format(mnemonic, data[0])) + temp = c.fetchall() + + if len(temp) == 0: + c.execute('INSERT INTO {} (start_time,end_time,data_points,average,deviation) \ + VALUES (?,?,?,?,?)'.format(mnemonic),data) + conn.commit() + else: + print('data already exists') + + +def add_wheel_data(conn, mnemonic, data): + '''Add data of a specific wheel position to database if it not exists + Parameters + ---------- + conn : DBobject + connection object to access database + mnemonic : string + identifies the table + data : list + specifies the data + ''' + + c = conn.cursor() + + #check if data already exists (start_time) + c.execute('SELECT id from {} WHERE timestamp = {}'.format(mnemonic, data[0])) + temp = c.fetchall() + + if len(temp) == 0: + c.execute('INSERT INTO {} (timestamp, value) \ + VALUES (?,?)'.format(mnemonic),data) + conn.commit() + else: + print('data already exists') + + +def main(): + ''' Creates SQLite database with tables proposed in mnemonics.py''' + + __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + + #generate paths + DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') + DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'miri_database.db') + + conn = create_connection(DATABASE_FILE) + + c=conn.cursor() + + for mnemonic in m.mnemonic_set_database: + try: + c.execute('CREATE TABLE IF NOT EXISTS {} ( \ + id INTEGER, \ + start_time REAL, \ + end_time REAL, \ + data_points INTEGER, \ + average REAL, \ + deviation REAL, \ + performed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\ + PRIMARY KEY (id));'.format(mnemonic)) + except Error as e: + print('e') + + for mnemonic in m.mnemonic_wheelpositions: + try: + c.execute('CREATE TABLE IF NOT EXISTS {} ( \ + id INTEGER, \ + timestamp REAL, \ + value REAL, \ + performed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\ + PRIMARY KEY (id));'.format(mnemonic)) + except Error as e: + print('e') + + print("Database initial setup complete") + conn.commit() + close_connection(conn) + +#sets up database if called as main +if __name__ == "__main__": + main() + print("sql_interface.py done") diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/15min_to_db.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/15min_to_db.py new file mode 100644 index 000000000..3ab3b7865 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/15min_to_db.py @@ -0,0 +1,93 @@ +import statistics +import os +import glob +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as mn +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.csv_to_AstropyTable as apt +from jwql.utils.utils import get_config, filename_parser + +from astropy.table import Table, Column + +from jwql.instrument_monitors.nirspec_monitors.data_trending.utils.process_data import once_a_day_routine + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + +#point to the directory where your files are located! +directory = os.path.join(get_config()['outputs'], 'nirspec_data_trending', 'nirspec_new_15min', '*.CSV') + +#here some some files contain the same data but they are all incomplete +#in order to generate a full database we have to import all of them +filenames = glob.glob(directory) + +def process_file(conn, path): + '''Parse CSV file, process data within and put to DB + Parameters + ---------- + conn : DBobject + Connection object to temporary database + path : str + defines path to the files + ''' + + #import mnemonic data and append dict to variable below + m_raw_data = apt.mnemonics(path) + + #process raw data with once a day routine + returndata = once_a_day_routine(m_raw_data) + + #put all data in a database that uses a condition + for key, value in returndata.items(): + m = m_raw_data.mnemonic(key) + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, key, dataset) + + #add rest of the data to database + for identifier in mn.mnemSet_15min: + + m = m_raw_data.mnemonic(identifier) + + temp = [] + + #look for all values that fit to the given conditions + for element in m: + temp.append(float(element['value'])) + + #return None if no applicable data was found + if len(temp) > 2: + length = len(temp) + mean = statistics.mean(temp) + deviation = statistics.stdev(temp) + + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, identifier, dataset) + elif len(temp) == 2: + dataset = (float(element['time']), float(element['time']), 1, temp[0], 0) + sql.add_data(conn, identifier, dataset) + else: + print('No data for {}'.format(identifier)) + print(temp) + + del temp + + +def main(): + #generate paths + DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') + DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'nirspec_database.db') + + #connect to temporary database + conn = sql.create_connection(DATABASE_FILE) + + #do for every file in list above + for path in filenames: + process_file(conn, path) + + #close connection + sql.close_connection(conn) + print("done") + +if __name__ == "__main__": + main() diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/__init__.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/dashboard.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/dashboard.py new file mode 100644 index 000000000..5935285a8 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/dashboard.py @@ -0,0 +1,103 @@ +#! /usr/bin/env python +"""Combines plots to tabs and prepares dashboard + +The module imports all prepares plot functions from .plots and combines +prebuilt tabs to a dashboard. Furthermore it defines the timerange for +the visualisation. Default time_range should be set to about 4 Month (120days) + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``data_container.py``, e.g.: + + :: + import jwql.instrument_monitors.miri_monitors.data_trending.dashboard as dash + dashboard, variables = dash.data_trending_dashboard(start_time, end_time) + +Dependencies +------------ + User must provide "nirspec_database.db" in folder jwql/database + +""" +import os +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql +from jwql.utils.utils import get_config, filename_parser + +from bokeh.embed import components +from bokeh.models.widgets import Tabs +from bokeh.resources import Resources +from bokeh.io.state import curstate + +from astropy.time import Time +import datetime +from datetime import date + +#import plot functions +from .plots.power_tab import power_plots +from .plots.voltage_tab import volt_plots +from .plots.temperature_tab import temperature_plots +from .plots.msa_mce_tab import msa_mce_plots +from .plots.fpe_fpa_tab import fpe_fpa_plots +from .plots.caa_tab import caa_plots +from .plots.wheel_tab import wheel_pos + +#configure actual datetime in order to implement range function +now = datetime.datetime.now() +#default_start = now - datetime.timedelta(1000) +default_start = datetime.date(2017, 8, 15).isoformat() + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + +def data_trending_dashboard(start = default_start, end = now): + """Bulilds dashboard + Parameters + ---------- + start : time + configures start time for query and visualisation + end : time + configures end time for query and visualisation + Return + ------ + plot_data : list + A list containing the JavaScript and HTML content for the dashboard + variables : dict + no use + """ + + #connect to database + DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') + DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'nirspec_database.db') + + conn = sql.create_connection(DATABASE_FILE) + + #some variables can be passed to the template via following + variables = dict(init = 1) + + #some variables can be passed to the template via following + variables = dict(init = 1) + + #add tabs to dashboard + tab1 = power_plots(conn, start, end) + tab2 = volt_plots(conn, start, end) + tab3 = temperature_plots(conn, start, end) + tab4 = msa_mce_plots(conn, start, end) + tab5 = fpe_fpa_plots(conn, start, end) + tab6 = caa_plots(conn, start, end) + tab7 = wheel_pos(conn, start, end) + + #build dashboard + tabs = Tabs( tabs=[ tab1, tab2, tab3, tab4, tab5, tab6, tab7] ) + #tabs = Tabs( tabs=[ tab1, tab7] ) + + #return dasboard to webapp + script, div = components(tabs) + plot_data = [div, script] + + #close sql connection + sql.close_connection(conn) + + return plot_data, variables diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/day_to_db.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/day_to_db.py new file mode 100644 index 000000000..5f06c6609 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/day_to_db.py @@ -0,0 +1,116 @@ +import statistics +import os +import glob +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as mn +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.csv_to_AstropyTable as apt +from jwql.utils.utils import get_config, filename_parser + +from astropy.table import Table, Column + +from jwql.instrument_monitors.nirspec_monitors.data_trending.utils.process_data import whole_day_routine, wheelpos_routine + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + +#point to the directory where your files are located! +directory = os.path.join(get_config()['outputs'], 'nirspec_data_trending', 'nirspec_more', '*.CSV') + +#here some some files contain the same data but they are all incomplete +#in order to generate a full database we have to import all of them +filenames = glob.glob(directory) +test = "FOFTLM2019073163845064.CSV" + +def process_file(conn, path): + '''Parse CSV file, process data within and put to DB + Parameters + ---------- + conn : DBobject + Connection object to temporary database + path : str + defines path to the files + ''' + + #import mnemonic data and append dict to variable below + m_raw_data = apt.mnemonics(path) + + #process raw data with once a day routine + return_data, lamp_data = whole_day_routine(m_raw_data) + FW, GWX, GWY = wheelpos_routine(m_raw_data) + + for key, values in FW.items(): + for data in values: + sql.add_wheel_data(conn, 'INRSI_C_FWA_POSITION_{}'.format(key), data) + + for key, values in GWX.items(): + for data in values: + sql.add_wheel_data(conn, 'INRSI_C_GWA_X_POSITION_{}'.format(key), data) + + for key, values in GWY.items(): + for data in values: + sql.add_wheel_data(conn, 'INRSI_C_GWA_Y_POSITION_{}'.format(key), data) + + #put all data to a database that uses a condition + for key, value in return_data.items(): + m = m_raw_data.mnemonic(key) + length = len(value) + if length > 2: + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, key, dataset) + + + #add rest of the data to database -> no conditions applied + for identifier in mn.mnemSet_day: + + m = m_raw_data.mnemonic(identifier) + + temp = [] + + #look for all values that fit to the given conditions + for element in m: + temp.append(float(element['value'])) + + #return None if no applicable data was found + if len(temp) > 2: + length = len(temp) + mean = statistics.mean(temp) + deviation = statistics.stdev(temp) + + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, identifier, dataset) + else: + print('No data for {}'.format(identifier)) + print(temp) + + del temp + #add lamp data to database -> distiction over lamps + for key, values in lamp_data.items(): + for data in values: + dataset_volt = (data[0], data[1], data[5], data[6], data[7]) + dataset_curr = (data[0], data[1], data[2], data[3], data[4]) + sql.add_data(conn, 'LAMP_{}_VOLT'.format(key), dataset_volt) + sql.add_data(conn, 'LAMP_{}_CURR'.format(key), dataset_curr) + +def main(): + #generate paths + DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') + DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'nirspec_database.db') + + #connect to temporary database + conn = sql.create_connection(DATABASE_FILE) + + ''' + path = directory + test + process_file(conn, path) + ''' + #do for every file in list above + for path in filenames: + process_file(conn, path) + + #close connection + sql.close_connection(conn) + print("done") + +if __name__ == "__main__": + main() diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py new file mode 100644 index 000000000..01fe1ab74 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py @@ -0,0 +1,196 @@ +#! /usr/bin/env python +''' Main module for nirspec datatrending -> fills database + + This module holds functions to connect with the engineering database in order + to grab and process data for the specific miri database. The scrips queries + a daily 15 min chunk and a whole day dataset. These contain several mnemonics + defined in ''mnemonics.py''. The queried data gets processed and stored in + a prepared database. + +Authors +------- + + - Daniel Kühbacher + +Use +--- + +Dependencies +------------ + +References +---------- + +Notes +----- +''' +import .utils.mnemonics as mn +import .utils.sql_interface as sql +from .utils.process_data import whole_day_routine, wheelpos_routine +from jwql.utils.engineering_database import query_single_mnemonic + +import pandas as pd +import numpy as np +import statistics +import sqlite3 + +from astropy.time import Time + + +def process_daysample(conn, m_raw_data): + '''Parse CSV file, process data within and put to DB + Parameters + ---------- + conn : DBobject + Connection object to temporary database + path : str + defines path to the files + ''' + + #process raw data with once a day routine + return_data, lamp_data = whole_day_routine(m_raw_data) + FW, GWX, GWY = wheelpos_routine(m_raw_data) + + #put all data to a database that uses a condition + for key, value in return_data.items(): + m = m_raw_data.mnemonic(key) + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, key, dataset) + + + #add rest of the data to database -> no conditions applied + for identifier in mn.mnemSet_day: + m = m_raw_data.mnemonic(identifier) + temp = [] + #look for all values that fit to the given conditions + for element in m: + temp.append(float(element['value'])) + #return None if no applicable data was found + if len(temp) > 2: + length = len(temp) + mean = statistics.mean(temp) + deviation = statistics.stdev(temp) + else: + print('No data for {}'.format(identifier)) + del temp + + #add lamp data to database -> distiction over lamps + for key, values in lamp_data.items(): + for data in values: + dataset_volt = (data[0], data[1], data[5], data[6], data[7]) + dataset_curr = (data[0], data[1], data[2], data[3], data[4]) + sql.add_data(conn, 'LAMP_{}_VOLT'.format(key), dataset_volt) + sql.add_data(conn, 'LAMP_{}_CURR'.format(key), dataset_curr) + + + #add wheeldata to database + for key, values in FW.items(): + for data in values: + sql.add_wheel_data(conn, 'INRSI_C_FWA_POSITION_{}'.format(key), data) + + for key, values in GWX.items(): + for data in values: + sql.add_wheel_data(conn, 'INRSI_C_GWA_X_POSITION_{}'.format(key), data) + + for key, values in GWY.items(): + for data in values: + sql.add_wheel_data(conn, 'INRSI_C_GWA_Y_POSITION_{}'.format(key), data) + + +def process_15minsample(conn, m_raw_data): + '''Parse CSV file, process data within and put to DB + Parameters + ---------- + conn : DBobject + Connection object to temporary database + path : str + defines path to the files + ''' + + #process raw data with once a day routine + returndata = once_a_day_routine(m_raw_data) + + #put all data in a database that uses a condition + for key, value in returndata.items(): + m = m_raw_data.mnemonic(key) + length = len(value) + mean = statistics.mean(value) + deviation = statistics.stdev(value) + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, key, dataset) + + #add rest of the data to database + for identifier in mn.mnemSet_15min: + + m = m_raw_data.mnemonic(identifier) + + temp = [] + + #look for all values that fit to the given conditions + for element in m: + temp.append(float(element['value'])) + + #return None if no applicable data was found + if len(temp) > 2: + length = len(temp) + mean = statistics.mean(temp) + deviation = statistics.stdev(temp) + + dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) + sql.add_data(conn, identifier, dataset) + elif len(temp) == 2: + dataset = (float(element['time']), float(element['time']), 1, temp[0], 0) + sql.add_data(conn, identifier, dataset) + else: + print('No data for {}'.format(identifier)) + print(temp) + + del temp + +def main(): + + ''' + from ..utils.engineering_database import query_single_mnemonic + + mnemonic_identifier = 'SA_ZFGOUTFOV' + start_time = Time(2016.0, format='decimalyear') + end_time = Time(2018.1, format='decimalyear') + + + mnemonic = query_single_mnemonic(mnemonic_identifier, start_time, end_time) + assert len(mnemonic.data) == mnemonic.meta['paging']['rows'] + ''' + + + for mnemonic in mn.mnemonic_set_15min: + whole_day.update(mnemonic = query_single_mnemonic(mnemonic, start, end)) + + + #configure start and end time for query + # + # + # + # + + #query table start and end from engineering_database + # + # + # + # + #return table_day, table_15min + #generate paths + DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') + DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'nirspec_database.db') + + #connect to temporary database + conn = sql.create_connection(DATABASE_FILE) + + process_daysample(conn, table_day) + process_15minsample(conn, table_15min) + + #close connection + sql.close_connection(conn) + print("done") diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/__init__.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/caa_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/caa_tab.py new file mode 100644 index 000000000..6caa773ac --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/caa_tab.py @@ -0,0 +1,247 @@ +#! /usr/bin/env python +"""Prepares plots for CAA tab + + Module prepares plots for mnemonics below. Combines plots in a grid and + returns tab object. + + Plot 1 - Lamp Voltages and Currents (Distincted) + INRSH_LAMP_SEL + INRSI_C_CAA_CURRENT + INRSI_C_CAA_VOLTAGE + + Plot 2 - CAA (Voltages and Currents) + INRSH_CAA_VREFOFF + INRSH_CAA_VREF + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``nirspec_dashboard.py``, e.g.: + + :: + from .plots.voltage_tab import voltage_plots + tab = voltage_plots(conn, start, end) + +Dependencies +------------ + User must provide database "nirpsec_database.db" + +""" +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf +from bokeh.models import LinearAxis, Range1d +from bokeh.plotting import figure +from bokeh.models.widgets import Panel, Tabs, Div +from bokeh.models import ColumnDataSource, HoverTool +from bokeh.layouts import WidgetBox, gridplot, Column + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def lamp_volt(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 800, + y_range = [1.2,2.3], + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "CAA Lamp Voltages" + pf.add_basic_layout(p) + + l = pf.add_to_plot(p, "FLAT1", "LAMP_FLAT1_VOLT", start, end, conn, color = "red") + m = pf.add_to_plot(p, "FLAT2", "LAMP_FLAT2_VOLT", start, end, conn, color = "green") + n = pf.add_to_plot(p, "FLAT3", "LAMP_FLAT3_VOLT", start, end, conn, color = "blue") + o = pf.add_to_plot(p, "FLAT4", "LAMP_FLAT4_VOLT", start, end, conn, color = "brown") + x = pf.add_to_plot(p, "FLAT5", "LAMP_FLAT5_VOLT", start, end, conn, color = "orange") + q = pf.add_to_plot(p, "LINE1", "LAMP_LINE1_VOLT", start, end, conn, color = "cyan") + r = pf.add_to_plot(p, "LINE2", "LAMP_LINE2_VOLT", start, end, conn, color = "darkmagenta") + s = pf.add_to_plot(p, "LINE3", "LAMP_LINE3_VOLT", start, end, conn, color = "burlywood") + t = pf.add_to_plot(p, "LINE4", "LAMP_LINE4_VOLT", start, end, conn, color = "darkkhaki") + u = pf.add_to_plot(p, "REF", "LAMP_REF_VOLT", start, end, conn, color = "darkblue") + v = pf.add_to_plot(p, "TEST", "LAMP_TEST_VOLT", start, end, conn, color = "goldenrod") + + pf.add_hover_tool(p,[l,m,n,o,x,q,r,s,t,u,v]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def lamp_current(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 600, + y_range = [10.5,14.5], + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Current (mA)') + + p.grid.visible = True + p.title.text = "CAA Lamp currents" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "FLAT1", "LAMP_FLAT1_CURR", start, end, conn, color = "red") + b = pf.add_to_plot(p, "FLAT2", "LAMP_FLAT2_CURR", start, end, conn, color = "green") + c = pf.add_to_plot(p, "FLAT3", "LAMP_FLAT3_CURR", start, end, conn, color = "blue") + d = pf.add_to_plot(p, "FLAT4", "LAMP_FLAT4_CURR", start, end, conn, color = "brown") + e = pf.add_to_plot(p, "FLAT5", "LAMP_FLAT5_CURR", start, end, conn, color = "orange") + f = pf.add_to_plot(p, "LINE1", "LAMP_LINE1_CURR", start, end, conn, color = "cyan") + g = pf.add_to_plot(p, "LINE2", "LAMP_LINE2_CURR", start, end, conn, color = "darkmagenta") + h = pf.add_to_plot(p, "LINE3", "LAMP_LINE3_CURR", start, end, conn, color = "burlywood") + i = pf.add_to_plot(p, "LINE4", "LAMP_LINE4_CURR", start, end, conn, color = "darkkhaki") + j = pf.add_to_plot(p, "REF", "LAMP_REF_CURR", start, end, conn, color = "darkblue") + k = pf.add_to_plot(p, "TEST", "LAMP_TEST_CURR", start, end, conn, color = "goldenrod") + + pf.add_hover_tool(p,[a,b,c,d,e,f,g,h,i,j,k]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def caa_volt(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 600, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + a = pf.add_to_plot(p, "CAA_VREFOFF", "INRSH_CAA_VREFOFF", start, end, conn, color = "red") + b = pf.add_to_plot(p, "CAA_VREF", "INRSH_CAA_VREF", start, end, conn, color = "green") + + #pf.add_hover_tool(p,[a,b]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + + +def caa_plots(conn, start, end): + '''Combines plots to a tab + + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + + Return + ------ + p : tab object + used by dashboard.py to set up dashboard + ''' + descr = Div(text= + """ + + + + + + + + + + + + + + + + + + + + + +
Plotname
Mnemonic
Description
CAA Lamp VoltagesINRSH_LAMP_SEL
+ INRSI_C_CAA_VOLTAGE
Lamp Voltage for each CAA Lamp
CAA Lamp CurrentsINRSH_LAMP_SEL
+ INRSI_C_CAA_CURRENT
Lamp Currents for each CAA Lamp
+ + """, width=1100) + + plot1 = lamp_volt(conn, start, end) + plot2 = lamp_current(conn, start, end) + #plot3 = caa_plots(conn, start, end) + + layout = Column(descr, plot1, plot2) + + tab = Panel(child = layout, title = "CAA/LAMPS") + + return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/fpe_fpa_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/fpe_fpa_tab.py new file mode 100644 index 000000000..991c5e816 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/fpe_fpa_tab.py @@ -0,0 +1,350 @@ +#! /usr/bin/env python +"""Prepares plots for Temperature tab + + Module prepares plots for mnemonics below. Combines plots in a grid and + returns tab object. + + Plot 1 - ASIC 1 Voltages + IGDP_NRSD_ALG_A1_VDDA + IGDP_NRSD_ALG_A1GND4VDA + IGDP_NRSD_ALG_A1GND5VRF + INRSD_ALG_A1_VDD3P3 + INRSD_ALG_A1_VDD + INRSD_ALG_A1_REF + INRSD_A1_DSUB_V + INRSD_A1_VRESET_V + INRSD_A1_CELLDRN_V + INRSD_A1_DRAIN_V + INRSD_A1_VBIASGATE_V + INRSD_A1_VBIASPWR_V + + Plot 2 - ASIC 1 Currents + IGDP_NRSD_ALG_A1_VDD_C + IGDP_NRSD_ALG_A1VDAP12C + IGDP_NRSD_ALG_A1VDAN12C + INRSD_A1_VDDA_I + + Plot 3 - ASIC 2 Voltages + IGDP_NRSD_ALG_A2_VDDA + IGDP_NRSD_ALG_A2GND4VDA + IGDP_NRSD_ALG_A2GND5VRF + INRSD_ALG_A2_VDD3P3 + INRSD_ALG_A2_VDD + INRSD_ALG_A2_REF + INRSD_A2_DSUB_V + INRSD_A2_VRESET_V + INRSD_A2_CELLDRN_V + INRSD_A2_DRAIN_V + INRSD_A2_VBIASGATE_V + INRSD_A2_VBIASPWR_V + + Plot 4 - ASIC 2 Currents + IGDP_NRSD_ALG_A2_VDD_C + IGDP_NRSD_ALG_A2VDAP12C + IGDP_NRSD_ALG_A2VDAN12C + INRSD_A2_VDDA_I + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``nirspec_dashboard.py``, e.g.: + + :: + from .plots.fpa_fpe_tab import fpa_fpe_plots + tab = fpa_fpe_plots(conn, start, end) + +Dependencies +------------ + User must provide database "nirspec_database.db" + +""" +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf +from bokeh.models import LinearAxis, Range1d +from bokeh.plotting import figure +from bokeh.models.widgets import Panel, Tabs, Div +from bokeh.models import ColumnDataSource, HoverTool +from bokeh.layouts import WidgetBox, gridplot, Column, Row + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def asic_1_voltages(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 800, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "ASIC 1 Voltages" + pf.add_basic_layout(p) + a = pf.add_to_plot(p, "VDDA", "IGDP_NRSD_ALG_A1_VDDA", start, end, conn, color = "burlywood") + b = pf.add_to_plot(p, "A1GND4VDA", "IGDP_NRSD_ALG_A1GND4VDA", start, end, conn, color = "cadetblue") + c = pf.add_to_plot(p, "A1GND5VRF", "IGDP_NRSD_ALG_A1GND5VRF", start, end, conn, color = "chartreuse") + d = pf.add_to_plot(p, "A1VDD3P3", "INRSD_ALG_A1_VDD3P3", start, end, conn, color = "chocolate") + e = pf.add_to_plot(p, "VDD", "INRSD_ALG_A1_VDD", start, end, conn, color = "coral") + f = pf.add_to_plot(p, "REF", "INRSD_ALG_A1_REF", start, end, conn, color = "darkorange") + g = pf.add_to_plot(p, "DSUB_V", "INRSD_A1_DSUB_V", start, end, conn, color = "crimson") + h = pf.add_to_plot(p, "VRESET_V", "INRSD_A1_VRESET_V", start, end, conn, color = "cyan") + i = pf.add_to_plot(p, "CELLDRN_V", "INRSD_A1_CELLDRN_V", start, end, conn, color = "darkblue") + j = pf.add_to_plot(p, "DRAIN_V", "INRSD_A1_DRAIN_V", start, end, conn, color = "darkgreen") + k = pf.add_to_plot(p, "VBIASGATE_V", "INRSD_A1_VBIASGATE_V", start, end, conn, color = "darkmagenta") + l = pf.add_to_plot(p, "VBIASPWR_V", "INRSD_A1_VBIASPWR_V", start, end, conn, color = "cornflowerblue") + pf.add_hover_tool(p,[a,b,c,d,e,f,g,h,i,j,k,l]) + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def asic_2_voltages(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 800, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "ASIC 2 Voltages" + pf.add_basic_layout(p) + a = pf.add_to_plot(p, "VDDA", "IGDP_NRSD_ALG_A2_VDDA", start, end, conn, color = "burlywood") + b = pf.add_to_plot(p, "A2GND4VDA", "IGDP_NRSD_ALG_A2GND4VDA", start, end, conn, color = "cadetblue") + c = pf.add_to_plot(p, "A2GND5VRF", "IGDP_NRSD_ALG_A2GND5VRF", start, end, conn, color = "chartreuse") + d = pf.add_to_plot(p, "A2VDD3P3", "INRSD_ALG_A2_VDD3P3", start, end, conn, color = "chocolate") + e = pf.add_to_plot(p, "VDD", "INRSD_ALG_A2_VDD", start, end, conn, color = "coral") + f = pf.add_to_plot(p, "REF", "INRSD_ALG_A2_REF", start, end, conn, color = "darkorange") + g = pf.add_to_plot(p, "DSUB_V", "INRSD_A2_DSUB_V", start, end, conn, color = "crimson") + h = pf.add_to_plot(p, "VRESET_V", "INRSD_A2_VRESET_V", start, end, conn, color = "cyan") + i = pf.add_to_plot(p, "CELLDRN_V", "INRSD_A2_CELLDRN_V", start, end, conn, color = "darkblue") + j = pf.add_to_plot(p, "DRAIN_V", "INRSD_A2_DRAIN_V", start, end, conn, color = "darkgreen") + k = pf.add_to_plot(p, "VBIASGATE_V", "INRSD_A2_VBIASGATE_V", start, end, conn, color = "darkmagenta") + l = pf.add_to_plot(p, "VBIASPWR_V", "INRSD_A2_VBIASPWR_V", start, end, conn, color = "cornflowerblue") + pf.add_hover_tool(p,[a,b,c,d,e,f,g,h,i,j,k,l]) + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def asic_1_currents(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 500, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Current (mA)') + + p.grid.visible = True + p.title.text = "ASIC 1 Currents" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "VDD_C", "IGDP_NRSD_ALG_A1_VDD_C", start, end, conn, color = "burlywood") + b = pf.add_to_plot(p, "A1VDAP12C", "IGDP_NRSD_ALG_A1VDAP12C", start, end, conn, color = "cadetblue") + c = pf.add_to_plot(p, "A1VDAN12C", "IGDP_NRSD_ALG_A1VDAN12C", start, end, conn, color = "chartreuse") + d = pf.add_to_plot(p, "VDDA_I", "INRSD_A1_VDDA_I", start, end, conn, color = "chocolate") + + pf.add_hover_tool(p,[a,b,c,d]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def asic_2_currents(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 500, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Current (mA)') + + p.grid.visible = True + p.title.text = "ASIC 2 Currents" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "VDD_C", "IGDP_NRSD_ALG_A2_VDD_C", start, end, conn, color = "burlywood") + b = pf.add_to_plot(p, "A2VDAP12C", "IGDP_NRSD_ALG_A2VDAP12C", start, end, conn, color = "cadetblue") + c = pf.add_to_plot(p, "A2VDAN12C", "IGDP_NRSD_ALG_A2VDAN12C", start, end, conn, color = "chartreuse") + d = pf.add_to_plot(p, "VDDA_I", "INRSD_A2_VDDA_I", start, end, conn, color = "chocolate") + + pf.add_hover_tool(p,[a,b,c,d]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + return p + + +def fpe_fpa_plots(conn, start, end): + '''Combines plots to a tab + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : tab object + used by dashboard.py to set up dashboard + ''' + descr = Div(text= + """ + + + + + + + + + + + + + + + + + + + + + +
Plotname
Mnemonic
Description
ASIC (1,2) VoltagesIGDP_NRSD_ALG_A(1,2)_VDDA
+ IGDP_NRSD_ALG_A(1,2)GND4VDA
+ IGDP_NRSD_ALG_A(1,2)GND5VRF
+ INRSD_ALG_A(1,2)_VDD3P3
+ INRSD_ALG_A(1,2)_VDD
+ INRSD_ALG_A(1,2)_REF
+ INRSD_A(1,2)_DSUB_V
+ INRSD_A(1,2)_VRESET_V
+ INRSD_A(1,2)_CELLDRN_V
+ INRSD_A(1,2)_DRAIN_V
+ INRSD_A(1,2)_VBIASGATE_V
+ INRSD_A(1,2)_VBIASPWR_V
+
+ ASIC (1,2) VDDA Voltage
+ ASIC (1,2) VDDA/Ground Voltage
+ ASIC (1,2) Ref/Ground Voltage
+ ASIC (1,2) VDD 3.3 Supply Voltage
+ ASIC (1,2) VDD Voltage
+ ASIC (1,2) Reference Voltage
+ ASIC (1,2) Dsub Voltage
+ ASIC (1,2) Reset Voltage
+ ASIC (1,2) Cell Drain Voltage
+ ASIC (1,2) Drain Voltage
+ ASIC (1,2) Bias Gate Voltage
+ ASIC (1,2) Bias Power Voltage
+
ASIC (1,2) CurrentsIGDP_NRSD_ALG_A(1,2)_VDD_C
+ IGDP_NRSD_ALG_A(1,2)VDAP12C
+ IGDP_NRSD_ALG_A(1,2)VDAN12C
+ INRSD_A(1,2)_VDDA_I
+
ASIC (1,2) VDD Current
+ ASIC (1,2) VDDA +12V Current
+ ASIC (1,2) VDDA -12V Current
+ ASIC (1,2) VDDA Current
+
+ + """, width=1100) + + plot1 = asic_1_voltages(conn, start, end) + plot2 = asic_2_voltages(conn, start, end) + plot3 = asic_1_currents(conn, start, end) + plot4 = asic_2_currents(conn, start, end) + + currents = Row(plot3, plot4) + layout = Column(descr, plot1, plot2, currents) + + tab = Panel(child = layout, title = "FPE/FPA") + + return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/msa_mce_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/msa_mce_tab.py new file mode 100644 index 000000000..30f0ae988 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/msa_mce_tab.py @@ -0,0 +1,549 @@ +#! /usr/bin/env python +"""Prepares plots for Temperature tab + + Module prepares plots for mnemonics below. Combines plots in a grid and + returns tab object. + + Plot 1 - MCE Board 1 (AIC) Voltages + INRSM_MCE_AIC_1R5_V + INRSM_MCE_AIC_3R3_V + INRSM_MCE_AIC_5_V + INRSM_MCE_AIC_P12_V + INRSM_MCE_AIC_N12_V + + Plot 2 - MCE Board 1 (AIC) Currents + INRSM_MCE_AIC_3R3_I + INRSM_MCE_AIC_5_I + INRSM_MCE_AIC_P12_I + INRSM_MCE_AIC_N12_I + + Plot 3 - MCE Board 2 (MDAC) Voltages + INRSM_MCE_MDAC_1R5_V + INRSM_MCE_MDAC_3R3_V + INRSM_MCE_MDAC_5_V + INRSM_MCE_MDAC_P12_V + INRSM_MCE_MDAC_N12_V + + Plot 4 - MCE Board 2 (MDAC) Currents + INRSM_MCE_MDAC_3R3_I + INRSM_MCE_MDAC_5_I + INRSM_MCE_MDAC_P12_I + INRSM_MCE_MDAC_N12_I + + Plot (5-8) - QUAD (1-4) + INRSM_MSA_Q(1-4)_365VDD + INRSM_MSA_Q(1-4)_365VPP + INRSM_MSA_Q(1-4)_171VPP + IGDPM_MSA_Q(1-4)_365IDD + IGDPM_MSA_Q(1-4)_365IPP + IGDPM_MSA_Q(1-4)_171RTN + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``nirspec_dashboard.py``, e.g.: + + :: + from .plots.msa_mce_tab import msa_mce_plots + tab = msa_mce_plots(conn, start, end) + +Dependencies +------------ + User must provide database "nirspec_database.db" + +""" +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf +from bokeh.models import LinearAxis, Range1d +from bokeh.plotting import figure +from bokeh.models.widgets import Panel, Tabs, Div +from bokeh.models import ColumnDataSource, HoverTool, Title +from bokeh.layouts import WidgetBox, gridplot, Column + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def aic_voltage(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 700, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "MCE Board 1 (AIC)" + p.add_layout(Title(text="Voltages", text_font_style="italic", text_font_size="12pt"), 'above') + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "1R5_V", "INRSM_MCE_AIC_1R5_V", start, end, conn, color = "red") + b = pf.add_to_plot(p, "3R3_V", "INRSM_MCE_AIC_3R3_V", start, end, conn, color = "orange") + c = pf.add_to_plot(p, "5_V", "INRSM_MCE_AIC_5_V", start, end, conn, color = "brown") + d = pf.add_to_plot(p, "P12_V", "INRSM_MCE_AIC_P12_V", start, end, conn, color = "burlywood") + e = pf.add_to_plot(p, "N12_V", "INRSM_MCE_AIC_N12_V", start, end, conn, color = "darkmagenta") + + pf.add_hover_tool(p,[a,b,c,d,e]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def aic_current(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 700, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Current (A)') + + p.grid.visible = True + p.title.text = "MCE Board 1 (AIC)" + p.add_layout(Title(text="Currents", text_font_style="italic", text_font_size="12pt"), 'above') + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "3R3_I", "INRSM_MCE_AIC_3R3_I", start, end, conn, color = "blue") + b = pf.add_to_plot(p, "5_I", "INRSM_MCE_AIC_5_I", start, end, conn, color = "red") + c = pf.add_to_plot(p, "P12_I", "INRSM_MCE_AIC_P12_I", start, end, conn, color = "green") + d = pf.add_to_plot(p, "N12_I", "INRSM_MCE_AIC_N12_I", start, end, conn, color = "orange") + + pf.add_hover_tool(p,[a,b,c,d]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def mdac_voltage(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 700, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "MCE Board 2 (MDAC)" + p.add_layout(Title(text="Voltages", text_font_style="italic", text_font_size="12pt"), 'above') + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "1R5_V", "INRSM_MCE_MDAC_1R5_V", start, end, conn, color = "red") + b = pf.add_to_plot(p, "3R3_V", "INRSM_MCE_MDAC_3R3_V", start, end, conn, color = "orange") + c = pf.add_to_plot(p, "5_V", "INRSM_MCE_MDAC_5_V", start, end, conn, color = "brown") + d = pf.add_to_plot(p, "P12_V", "INRSM_MCE_MDAC_P12_V", start, end, conn, color = "burlywood") + e = pf.add_to_plot(p, "N12_V", "INRSM_MCE_MDAC_N12_V", start, end, conn, color = "darkmagenta") + + pf.add_hover_tool(p,[a,b,c,d,e]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def mdac_current(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 700, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "MCE Board 2 (MDAC)" + p.add_layout(Title(text="Currents", text_font_style="italic", text_font_size="12pt"), 'above') + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "3R3_I", "INRSM_MCE_MDAC_3R3_I", start, end, conn, color = "blue") + b = pf.add_to_plot(p, "5_I", "INRSM_MCE_MDAC_5_I", start, end, conn, color = "red") + c = pf.add_to_plot(p, "P12_I", "INRSM_MCE_MDAC_P12_I", start, end, conn, color = "green") + d = pf.add_to_plot(p, "N12_I", "INRSM_MCE_MDAC_N12_I", start, end, conn, color = "orange") + + pf.add_hover_tool(p,[a,b,c,d]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def quad1_volt(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 500, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "Quad 1" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "365VDD", "INRSM_MSA_Q1_365VDD", start, end, conn, color = "red") + b = pf.add_to_plot(p, "365VPP", "INRSM_MSA_Q1_365VPP", start, end, conn, color = "orange") + c = pf.add_to_plot(p, "171VPP", "INRSM_MSA_Q1_171VPP", start, end, conn, color = "brown") + d = pf.add_to_plot(p, "365IDD", "IGDPM_MSA_Q1_365IDD", start, end, conn, color = "burlywood") + e = pf.add_to_plot(p, "365IPP", "IGDPM_MSA_Q1_365IPP", start, end, conn, color = "darkmagenta") + f = pf.add_to_plot(p, "171RTN", "IGDPM_MSA_Q1_171RTN", start, end, conn, color = "blue") + + pf.add_hover_tool(p,[a,b,c,d,e,f]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def quad2_volt(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 500, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "Quad 2" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "365VDD", "INRSM_MSA_Q2_365VDD", start, end, conn, color = "red") + b = pf.add_to_plot(p, "365VPP", "INRSM_MSA_Q2_365VPP", start, end, conn, color = "orange") + c = pf.add_to_plot(p, "171VPP", "INRSM_MSA_Q2_171VPP", start, end, conn, color = "brown") + d = pf.add_to_plot(p, "365IDD", "IGDPM_MSA_Q2_365IDD", start, end, conn, color = "burlywood") + e = pf.add_to_plot(p, "365IPP", "IGDPM_MSA_Q2_365IPP", start, end, conn, color = "darkmagenta") + f = pf.add_to_plot(p, "171RTN", "IGDPM_MSA_Q2_171RTN", start, end, conn, color = "blue") + + pf.add_hover_tool(p,[a,b,c,d,e,f]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def quad3_volt(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 500, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "Quad 3" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "365VDD", "INRSM_MSA_Q3_365VDD", start, end, conn, color = "red") + b = pf.add_to_plot(p, "365VPP", "INRSM_MSA_Q3_365VPP", start, end, conn, color = "orange") + c = pf.add_to_plot(p, "171VPP", "INRSM_MSA_Q3_171VPP", start, end, conn, color = "brown") + d = pf.add_to_plot(p, "365IDD", "IGDPM_MSA_Q3_365IDD", start, end, conn, color = "burlywood") + e = pf.add_to_plot(p, "365IPP", "IGDPM_MSA_Q3_365IPP", start, end, conn, color = "darkmagenta") + f = pf.add_to_plot(p, "171RTN", "IGDPM_MSA_Q3_171RTN", start, end, conn, color = "blue") + + pf.add_hover_tool(p,[a,b,c,d,e,f]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def quad4_volt(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 560, + plot_height = 500, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "Quad 4" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "365VDD", "INRSM_MSA_Q4_365VDD", start, end, conn, color = "red") + b = pf.add_to_plot(p, "365VPP", "INRSM_MSA_Q4_365VPP", start, end, conn, color = "orange") + c = pf.add_to_plot(p, "171VPP", "INRSM_MSA_Q4_171VPP", start, end, conn, color = "brown") + d = pf.add_to_plot(p, "365IDD", "IGDPM_MSA_Q4_365IDD", start, end, conn, color = "burlywood") + e = pf.add_to_plot(p, "365IPP", "IGDPM_MSA_Q4_365IPP", start, end, conn, color = "darkmagenta") + f = pf.add_to_plot(p, "171RTN", "IGDPM_MSA_Q4_171RTN", start, end, conn, color = "blue") + + pf.add_hover_tool(p,[a,b,c,d,e,f]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def msa_mce_plots(conn, start, end): + '''Combines plots to a tab + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : tab object + used by dashboard.py to set up dashboard + ''' + descr = Div(text= + """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Plotname
Mnemonic
Description
MCE Board 1 (AIC) VoltagesINRSM_MCE_AIC_1R5_V
+ INRSM_MCE_AIC_3R3_V
+ INRSM_MCE_AIC_5_V
+ INRSM_MCE_AIC_P12_V
+ INRSM_MCE_AIC_N12_V
+
MCE AIC +1.5V Voltage
+ MCE AIC +3.3V Voltage
+ MCE AIC +5V Voltage
+ MCE AIC +12V Voltage
+ MCE AIC -12V Voltage
+
MCE Board 1 (AIC) CurrentsINRSM_MCE_AIC_3R3_I
+ INRSM_MCE_AIC_5_I
+ INRSM_MCE_AIC_P12_I
+ INRSM_MCE_AIC_N12_I
+
MCE AIC Board +3.3V Current
+ MCE AIC Board +5V Current
+ MCE AIC Board +12V Current
+ MCE AIC Board -12V Current
+
MCE Board 2 (MDAC) VoltagesINRSM_MCE_MDAC_1R5_V
+ INRSM_MCE_MDAC_3R3_V
+ INRSM_MCE_MDAC_5_V
+ INRSM_MCE_MDAC_P12_V
+ INRSM_MCE_MDAC_N12_V
+
MCE MDAC +1.5V Voltage
+ MCE MDAC +3.3V Voltage
+ MCE MDAC +5V Voltage
+ MCE MDAC +12V Voltage
+ MCE MDAC -12V Voltage
+
MCE Board 2 (MDAC) CurrentsINRSM_MCE_MDAC_3R3_I
+ INRSM_MCE_MDAC_5_I
+ INRSM_MCE_MDAC_P12_I
+ INRSM_MCE_MDAC_N12_I
+
MCE MDAC Board +3.3V Current
+ MCE MDAC Board +5V Current
+ MCE MDAC Board +12V Current
+ MCE MDAC Board -12V Current
+
QUAD (1-4)INRSM_MSA_Q(1-4)_365VDD
+ INRSM_MSA_Q(1-4)_365VPP
+ INRSM_MSA_Q(1-4)_171VPP
+ IGDPM_MSA_Q(1-4)_365IDD
+ IGDPM_MSA_Q(1-4)_365IPP
+ IGDPM_MSA_Q(1-4)_171RTN
+
MSA Quad (1-4) Vdd 365 Voltage
+ MSA Quad (1-4) Vpp 365 Voltage
+ MSA Quad (1-4) Vpp 171 Voltage
+ MSA Quad (1-4) Vdd 365 Current
+ MSA Quad (1-4) Vpp 365 Current
+ MSA Quad (1-4) Return 171 Current
+
+ + """, width=1100) + + plot1 = aic_voltage(conn, start, end) + plot2 = aic_current(conn, start, end) + plot3 = mdac_voltage(conn, start, end) + plot4 = mdac_current(conn, start, end) + plot5 = quad1_volt(conn, start, end) + plot6 = quad2_volt(conn, start, end) + plot7 = quad3_volt(conn, start, end) + plot8 = quad4_volt(conn, start, end) + + grid = gridplot([[plot1, plot2], + [plot3, plot4], + [plot5, plot6], + [plot7, plot8]],merge_tools=False) + layout = Column(descr, grid) + + tab = Panel(child = layout, title = "MSA/MCE") + + return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/plot_functions.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/plot_functions.py new file mode 100644 index 000000000..4b639cc7c --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/plot_functions.py @@ -0,0 +1,209 @@ +#! /usr/bin/env python +"""Auxilary functions for plots + + Module holds functions that are used for several plots. + + +Authors +------- + - Daniel Kühbacher + +Use +--- + + +Dependencies +------------ + +""" +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql +from bokeh.plotting import figure +from bokeh.models import BoxAnnotation, LinearAxis, Range1d +from bokeh.embed import components +from bokeh.models.widgets import Panel, Tabs +from bokeh.models import ColumnDataSource, HoverTool, DatetimeTickFormatter, DatetimeTicker, SingleIntervalTicker +from bokeh.models.formatters import TickFormatter +from bokeh.models.tools import PanTool, SaveTool + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def pol_regression(x, y, rank): + ''' Calculate polynominal regression of certain rank + Parameters + ---------- + x : list + x parameters for regression + y : list + y parameters for regression + rank : int + rank of regression + Return + ------ + y_poly : list + regression y parameters + ''' + z = np.polyfit(x, y, rank) + f = np.poly1d(z) + y_poly = f(x) + return y_poly + +def add_hover_tool(p, rend): + ''' Append hover tool to plot + parameters + ---------- + p : bokeh figure + declares where to append hover tool + rend : list + list of renderer to append hover tool + ''' + + from bokeh.models import HoverTool + + #activate HoverTool for scatter plot + hover_tool = HoverTool( tooltips = + [ + ('Name', '$name'), + ('Count', '@data_points'), + ('Mean', '@average'), + ('Deviation', '@deviation'), + ], renderers = rend) + #append hover tool + p.tools.append(hover_tool) + +def add_limit_box(p, lower, upper, alpha = 0.1, color="green"): + ''' Adds box to plot + Parameters + ---------- + p : bokeh figure + declares where to append hover tool + lower : float + lower limit of box + upper : float + upper limit of box + alpha : float + transperency of box + color : str + filling color + ''' + box = BoxAnnotation(bottom = lower, top = upper, fill_alpha = alpha, fill_color = color) + p.add_layout(box) + +def add_to_plot(p, legend, mnemonic, start, end, conn, y_axis= "default", color="red", err='y'): + '''Add scatter and line to certain plot and activates hoover tool + Parameters + ---------- + p : bokeh object + defines plot where line and scatter should be added + legend : str + will be showed in legend of plot + mnemonic : str + defines mnemonic to be plotted + start : datetime + sets start time for data query + end : datetime + sets end time for data query + conn : DBobject + connection object to database + y_axis : str (default='default') + used if secon y axis is provided + color : str (default='dred') + defines color for scatter and line plot + Return + ------ + scat : plot scatter object + used for applying hovertools o plots + ''' + + #convert given start and end time to astropy time + start_str = str(Time(start).mjd) + end_str = str(Time(end).mjd) + + #prepare and execute sql query + sql_c = "SELECT * FROM "+mnemonic+" WHERE start_time BETWEEN "+start_str+" AND "+end_str+" ORDER BY start_time" + temp = pd.read_sql_query(sql_c, conn) + + #put data into Dataframe and define ColumnDataSource for each plot + #reg = pd.DataFrame({'reg' : pol_regression(temp['start_time'], temp['average'],3)}) + #temp = pd.concat([temp, reg], axis = 1) + temp['start_time'] = pd.to_datetime( Time(temp['start_time'], format = "mjd").datetime ) + plot_data = ColumnDataSource(temp) + + #plot data + p.line(x = "start_time", y = "average", color = color, y_range_name=y_axis, legend = legend, source = plot_data) + scat = p.scatter(x = "start_time", y = "average", name = mnemonic, color = color, y_range_name=y_axis, legend = legend, source = plot_data) + + #generate error lines if wished + if err != 'n': + #generate error bars + err_xs = [] + err_ys = [] + + for index, item in temp.iterrows(): + err_xs.append((item['start_time'], item['start_time'])) + err_ys.append((item['average'] - item['deviation'], item['average'] + item['deviation'])) + + # plot them + p.multi_line(err_xs, err_ys, color = color, legend = legend) + + return scat + +def add_to_plot_normalized(p, legend, mnemonic, start, end, conn, nominal, color = "red"): + '''Add line plot to figure (for wheelpositions) + Parameters + ---------- + p : bokeh object + defines figure where line schould be plotted + legend : str + will be showed in legend of plot + mnemonic : str + defines mnemonic to be plotted + start : datetime + sets start time for data query + end : datetime + sets end time for data query + conn : DBobject + connection object to database + color : str (default='dred') + defines color for scatter and line plot + ''' + + start_str = str(Time(start).mjd) + end_str = str(Time(end).mjd) + + sql_c = "SELECT * FROM "+mnemonic+" WHERE timestamp BETWEEN "+start_str+" AND "+end_str+" ORDER BY timestamp" + temp = pd.read_sql_query(sql_c, conn) + + #normalize values + temp['value'] -= nominal + #temp['value'] -= 1 + + temp['timestamp'] = pd.to_datetime( Time(temp['timestamp'], format = "mjd").datetime ) + plot_data = ColumnDataSource(temp) + + p.line(x = "timestamp", y = "value", color = color, legend = legend, source = plot_data) + p.scatter(x = "timestamp", y = "value", color = color, legend = legend, source = plot_data) + +def add_basic_layout(p): + '''Add basic layout to certain plot + Parameters + ---------- + p : bokeh object + defines plot where line and scatter should be added + ''' + p.title.align = "left" + p.title.text_color = "#c85108" + p.title.text_font_size = "25px" + p.background_fill_color = "#efefef" + + p.xaxis.axis_label_text_font_size = "14pt" + p.xaxis.axis_label_text_color ='#2D353C' + p.yaxis.axis_label_text_font_size = "14pt" + p.yaxis.axis_label_text_color = '#2D353C' + + p.xaxis.major_tick_line_color = "firebrick" + p.xaxis.major_tick_line_width = 2 + p.xaxis.minor_tick_line_color = "#c85108" diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/power_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/power_tab.py new file mode 100644 index 000000000..567ab8411 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/power_tab.py @@ -0,0 +1,304 @@ +#! /usr/bin/env python +"""Prepares plots for POWER tab + + Module prepares plots for mnemonics below. Combines plots in a grid and + returns tab object. + + Plot 1 - ICE Power Data + GP_ZPSVOLT + SE_ZINRSICEA / SE_ZINRSICEB + INRSH_HK_P15V + INRSH_HK_N15V + INRSH_HK_VMOTOR + INRSH_HK_P5V + INRSH_HK_2P5V + INRSH_HK_ADCTGAIN + INRSH_HK_ADCTOFFSET + INRSH_OA_VREFOFF + INRSH_OA_VREF + + Plot 2 - MCE Power Data + GP_ZPSVOLT + SE_ZINRSMCEA / SE_ZINRSMCEB + + Plot 3 - FPE Power Data + GP_ZPSVOLT + SE_ZINRSFPEA / SE_ZINRSFPEB + INRSD_ALG_ACC_P12C + INRSD_ALG_ACC_N12C + INRSD_ALG_ACC_3D3_1D5_C + INRSD_ALG_CHASSIS + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``nirspec_dashboard.py``, e.g.: + + :: + from .plots.power_tab import power_plots + tab = power_plots(conn, start, end) + +Dependencies +------------ + User must provide database "miri_database.db" + +""" +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf +from bokeh.models import LinearAxis, Range1d +from bokeh.plotting import figure +from bokeh.models.widgets import Panel, Tabs, Div +from bokeh.models import ColumnDataSource, HoverTool +from bokeh.layouts import WidgetBox, gridplot, Column + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def ice_power(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 700, + x_axis_type = 'datetime', + y_range = [-20, 20], + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "ICE Power Parameters" + pf.add_basic_layout(p) + + p.extra_y_ranges = {"current": Range1d(start = 0, end=0.8)} + #a = pf.add_to_plot(p, "In_VOlt", "GP_ZPSVOLT", start, end, conn, color = "red") + b = pf.add_to_plot(p, "ICE A current", "SE_ZINRSICEA", start, end, conn, color = "blue", y_axis="current") + c = pf.add_to_plot(p, "P15V", "INRSH_HK_P15V", start, end, conn, color = "red") + d = pf.add_to_plot(p, "N15V", "INRSH_HK_N15V", start, end, conn, color = "orange") + e = pf.add_to_plot(p, "VMOTOR", "INRSH_HK_VMOTOR", start, end, conn, color = "burlywood") + f = pf.add_to_plot(p, "P5V", "INRSH_HK_P5V", start, end, conn, color = "green") + g = pf.add_to_plot(p, "2P5V", "INRSH_HK_2P5V", start, end, conn, color = "darkgreen") + h = pf.add_to_plot(p, "ADCTGAIN", "INRSH_HK_ADCTGAIN", start, end, conn, color = "brown") + i = pf.add_to_plot(p, "ADCOFFSET", "INRSH_HK_ADCTOFFSET", start, end, conn, color = "navy") + p.add_layout(LinearAxis(y_range_name = "current", axis_label = "Current (A)", axis_label_text_color = "blue"), 'right') + + pf.add_hover_tool(p,[b,c,d,e,g,f,h,i]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def mce_power(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 400, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Current (A)') + + p.grid.visible = True + p.title.text = "MCE Power Parameters" + pf.add_basic_layout(p) + + b = pf.add_to_plot(p, "MCE A current", "SE_ZINRSMCEA", start, end, conn, color = "blue") + + pf.add_hover_tool(p,[b]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def fpe_power(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 700, + y_range = [-30,280], + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "FPE Power Parameters" + pf.add_basic_layout(p) + + + p.extra_y_ranges = {"current": Range1d(start = 0, end=0.8)} + #a = pf.add_to_plot(p, "In_VOlt", "GP_ZPSVOLT", start, end, conn, color = "red") + b = pf.add_to_plot(p, "FPE A current", "SE_ZINRSFPEA", start, end, conn, color = "blue", y_axis="current") + c = pf.add_to_plot(p, "P12C", "INRSD_ALG_ACC_P12C", start, end, conn, color = "red") + d = pf.add_to_plot(p, "N15V", "INRSH_HK_N15V", start, end, conn, color = "orange") + e = pf.add_to_plot(p, "N12C", "INRSD_ALG_ACC_N12C", start, end, conn, color = "burlywood") + f = pf.add_to_plot(p, "1D5", "INRSD_ALG_ACC_3D3_1D5_C", start, end, conn, color = "green") + g = pf.add_to_plot(p, "Chassis", "INRSD_ALG_CHASSIS", start, end, conn, color = "purple") + p.add_layout(LinearAxis(y_range_name = "current", axis_label = "Current (A)", axis_label_text_color = "blue"), 'right') + + pf.add_hover_tool(p,[b,c,d,e,f,g]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + + + +def power_plots(conn, start, end): + '''Combines plots to a tab + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : tab object + used by dashboard.py to set up dashboard + ''' + descr = Div(text= + """ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Plotname
Mnemonic
Description
ICE Power ParametersGP_ZPSVOLT (missing)
+ SE_ZINRSICEA
+ INRSH_HK_P15V
+ INRSH_HK_N15V
+ INRSH_HK_VMOTOR
+ INRSH_HK_P5V
+ INRSH_HK_2P5V
+ INRSH_HK_ADCTGAIN
+ INRSH_HK_ADCTOFFSET
+ INRSH_OA_VREFOFF
+ INRSH_OA_VREF
+
ICE Input Voltage
+ ICE Input Current (A side)
+ ICE +15V Voltage
+ ICE -15V Voltage
+ ICE Motor Voltage
+ ICE +5V FPGA Voltage
+ ICE +2V5 FPGA Voltage
+ ICE ADC TM Chain Gain for Calibration
+ ICE ADC TM Chain Offset for Calibration
+
MCE Power ParametersGP_ZPSVOLT (missing)
+ SE_ZINRSMCEA +
ICE Input Voltage
+ MCE Input Current (A side)
+
FPE Power ParametersGP_ZPSVOLT (missing)
+ SE_ZINRSFPEA
+ INRSD_ALG_ACC_P12C
+ INRSD_ALG_ACC_N12C
+ INRSD_ALG_ACC_3D3_1D5_C
+ INRSD_ALG_CHASSIS
+
ICE Input Voltage
+ MCE Input Current (A side)
+ ACC +12V Current
+ ACC -12V Current
+ ACC 3.3/1.5 Supply Current
+ Chassis Voltage
+
+ + """, width=1100) + + plot1 = ice_power(conn, start, end) + plot2 = mce_power(conn, start, end) + plot3 = fpe_power(conn, start, end) + + layout = Column(descr, plot1, plot2, plot3) + + tab = Panel(child = layout, title = "POWER") + + return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/temperature_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/temperature_tab.py new file mode 100644 index 000000000..ee7eecb50 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/temperature_tab.py @@ -0,0 +1,642 @@ +#! /usr/bin/env python +"""Prepares plots for Temperature tab + + Module prepares plots for mnemonics below. Combines plots in a grid and + returns tab object. + + Plot 1 - IRSU monitored temps + SI_GZCTS75A / SI_GZCTS75B + SI_GZCTS68A / SI_GZCTS68B + SI_GZCTS81A / SI_GZCTS81B + SI_GZCTS80A / SI_GZCTS80B + SI_GZCTS76A / SI_GZCTS76B + SI_GZCTS79A / SI_GZCTS79B + SI_GZCTS77A / SI_GZCTS77B + SI_GZCTS78A / SI_GZCTS78B + SI_GZCTS69A / SI_GZCTS69B + + Plot 2 - Box Temps + IGDP_NRSD_ALG_TEMP + INRSH_HK_TEMP1 + INRSH_HK_TEMP2 + + Plot 3 - FPE Power Data + IGDP_NRSI_C_CAM_TEMP + IGDP_NRSI_C_COL_TEMP + IGDP_NRSI_C_COM1_TEMP + IGDP_NRSI_C_FOR_TEMP + IGDP_NRSI_C_IFU_TEMP + IGDP_NRSI_C_BP1_TEMP + IGDP_NRSI_C_BP2_TEMP + IGDP_NRSI_C_BP3_TEMP + IGDP_NRSI_C_BP4_TEMP + IGDP_NRSI_C_RMA_TEMP + IGDP_NRSI_C_CAAL1_TEMP + IGDP_NRSI_C_CAAL2_TEMP + IGDP_NRSI_C_CAAL3_TEMP + IGDP_NRSI_C_CAAL4_TEMP + IGDP_NRSI_C_FWA_TEMP + IGDP_NRSI_C_GWA_TEMP + + Plot 4 - MCE internal Temp + INRSM_MCE_PCA_TMP1 + INRSM_MCE_PCA_TMP2 + INRSM_MCE_AIC_TMP_FPGA + INRSM_MCE_AIC_TMP_ADC + INRSM_MCE_AIC_TMP_VREG + INRSM_MCE_MDAC_TMP_FPGA + INRSM_MCE_MDAC_TMP_OSC + INRSM_MCE_MDAC_TMP_BRD + INRSM_MCE_MDAC_TMP_PHA + INRSM_MCE_MDAC_TMP_PHB + + Plot 5 - MSA Temp + INRSM_Q1_TMP_A + INRSM_Q2_TMP_A + INRSM_Q3_TMP_A + INRSM_Q4_TMP_A + INRSM_MECH_MTR_TMP_A + INRSM_LL_MTR_TMP_A + INRSM_MSA_TMP_A + + Plot 6 - FPA Temp + IGDP_NRSD_ALG_FPA_TEMP + IGDP_NRSD_ALG_A1_TEMP + IGDP_NRSD_ALG_A2_TEMP + + Plot 7 - Heat Strap Temps (Trim heaters) + SI_GZCTS74A / SI_GZCTS74B + SI_GZCTS67A / SI_GZCTS67B + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``nirspec_dashboard.py``, e.g.: + + :: + from .plots.temperature_tab import temperature_plots + tab = temperature_plots(conn, start, end) + +Dependencies +------------ + User must provide database "nirspec_database.db" + +""" +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf +from bokeh.models import LinearAxis, Range1d +from bokeh.plotting import figure +from bokeh.models.widgets import Panel, Tabs, Div +from bokeh.models import ColumnDataSource, HoverTool +from bokeh.layouts import WidgetBox, gridplot, Column + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def irsu_temp(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 700, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Temperature (K)') + + p.grid.visible = True + p.title.text = "IRSU monitored Temperatures" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "75A", "SI_GZCTS75A", start, end, conn, color = "red") + b = pf.add_to_plot(p, "68A", "SI_GZCTS68A", start, end, conn, color = "green") + c = pf.add_to_plot(p, "81A", "SI_GZCTS81A", start, end, conn, color = "blue") + d = pf.add_to_plot(p, "80A", "SI_GZCTS80A", start, end, conn, color = "orange") + e = pf.add_to_plot(p, "76A", "SI_GZCTS76A", start, end, conn, color = "brown") + f = pf.add_to_plot(p, "79A", "SI_GZCTS79A", start, end, conn, color = "cyan") + g = pf.add_to_plot(p, "77A", "SI_GZCTS77A", start, end, conn, color = "darkmagenta") + h = pf.add_to_plot(p, "78A", "SI_GZCTS78A ", start, end, conn, color = "burlywood") + i = pf.add_to_plot(p, "69A", "SI_GZCTS69A ", start, end, conn, color = "chocolate") + + pf.add_hover_tool(p,[a,b,c,d,e,f,g,h,i]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def fpe_temp(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 700, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Temperature (K)') + + p.grid.visible = True + p.title.text = "FPE Temperatures" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "CAM", "IGDP_NRSI_C_CAM_TEMP", start, end, conn, color = "red") + b = pf.add_to_plot(p, "COL", "IGDP_NRSI_C_COL_TEMP", start, end, conn, color = "green") + c = pf.add_to_plot(p, "COM1", "IGDP_NRSI_C_COM1_TEMP", start, end, conn, color = "blue") + d = pf.add_to_plot(p, "FOR", "IGDP_NRSI_C_FOR_TEMP", start, end, conn, color = "darkorange") + e = pf.add_to_plot(p, "IFU", "IGDP_NRSI_C_IFU_TEMP", start, end, conn, color = "cyan") + f = pf.add_to_plot(p, "BP1", "IGDP_NRSI_C_BP1_TEMP", start, end, conn, color = "darkmagenta") + g = pf.add_to_plot(p, "BP2", "IGDP_NRSI_C_BP2_TEMP", start, end, conn, color = "burlywood") + h = pf.add_to_plot(p, "BP3", "IGDP_NRSI_C_BP3_TEMP", start, end, conn, color = "brown") + i = pf.add_to_plot(p, "BP4", "IGDP_NRSI_C_BP4_TEMP", start, end, conn, color = "chocolate") + j = pf.add_to_plot(p, "RMA", "IGDP_NRSI_C_RMA_TEMP", start, end, conn, color = "darkgreen") + + pf.add_hover_tool(p,[a,b,c,d,e,f,g,h,i,j]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def caal_temp(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 700, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Temperature (K)') + + p.grid.visible = True + p.title.text = "CAA Lamps / FWA, GWA" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "CAAL1", "IGDP_NRSI_C_CAAL1_TEMP", start, end, conn, color = "darkblue") + b = pf.add_to_plot(p, "CAAL2", "IGDP_NRSI_C_CAAL2_TEMP", start, end, conn, color = "magenta") + c = pf.add_to_plot(p, "CAAL3", "IGDP_NRSI_C_CAAL3_TEMP", start, end, conn, color = "mediumaquamarine") + d = pf.add_to_plot(p, "CAAL4", "IGDP_NRSI_C_CAAL4_TEMP", start, end, conn, color = "goldenrod") + e = pf.add_to_plot(p, "FWA", "IGDP_NRSI_C_FWA_TEMP", start, end, conn, color = "darkseagreen") + f = pf.add_to_plot(p, "GWA", "IGDP_NRSI_C_GWA_TEMP", start, end, conn, color = "darkkhaki") + + pf.add_hover_tool(p,[a,b,c,d,e,f]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def box_temp(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 700, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Temperature (K)') + + p.grid.visible = True + p.title.text = "Box Temperatures" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "ALG_TEMP", "IGDP_NRSD_ALG_TEMP", start, end, conn, color = "red") + b = pf.add_to_plot(p, "HK_TEMP1", "INRSH_HK_TEMP1", start, end, conn, color = "green") + c = pf.add_to_plot(p, "HK_TEMP2", "INRSH_HK_TEMP2", start, end, conn, color = "blue") + + pf.add_hover_tool(p,[a,b,c]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def mce_internal_temp(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 700, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Temperature (K)') + + p.grid.visible = True + p.title.text = "MCE internal Temperatures" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "PCA_TMP1", "INRSM_MCE_PCA_TMP1", start, end, conn, color = "green") + b = pf.add_to_plot(p, "PCA_TMP2", "INRSM_MCE_PCA_TMP2", start, end, conn, color = "blue") + c = pf.add_to_plot(p, "FPGA_AIC", "INRSM_MCE_AIC_TMP_FPGA", start, end, conn, color = "brown") + d = pf.add_to_plot(p, "ADC_AIC", "INRSM_MCE_AIC_TMP_ADC", start, end, conn, color = "red") + e = pf.add_to_plot(p, "VREG_AIC", "INRSM_MCE_AIC_TMP_VREG", start, end, conn, color = "hotpink") + f = pf.add_to_plot(p, "FPGA_MDAC", "INRSM_MCE_MDAC_TMP_FPGA", start, end, conn, color = "cadetblue") + g = pf.add_to_plot(p, "OSC_MDAC", "INRSM_MCE_MDAC_TMP_OSC", start, end, conn, color = "navy") + h = pf.add_to_plot(p, "BRD_MDAC", "INRSM_MCE_MDAC_TMP_BRD", start, end, conn, color = "darkgreen") + i = pf.add_to_plot(p, "PHA_MDAC", "INRSM_MCE_MDAC_TMP_PHA", start, end, conn, color = "magenta") + j = pf.add_to_plot(p, "PHB_MDAC", "INRSM_MCE_MDAC_TMP_PHB", start, end, conn, color = "orange") + + pf.add_hover_tool(p,[a,b,c,d,e,f,g,h,i,j]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + +def msa_temp(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 700, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Temperature (K)') + + p.grid.visible = True + p.title.text = "MSA Temperatures" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "Q1_TEMP", "INRSM_Q1_TMP_A", start, end, conn, color = "green") + b = pf.add_to_plot(p, "Q2_TEMP", "INRSM_Q2_TMP_A", start, end, conn, color = "red") + c = pf.add_to_plot(p, "Q3_TEMP", "INRSM_Q3_TMP_A", start, end, conn, color = "blue") + d = pf.add_to_plot(p, "Q4_TEMP", "INRSM_Q4_TMP_A", start, end, conn, color = "brown") + e = pf.add_to_plot(p, "MECH_MTR", "INRSM_MECH_MTR_TMP_A", start, end, conn, color = "orange") + f = pf.add_to_plot(p, "LL_MTR", "INRSM_LL_MTR_TMP_A", start, end, conn, color = "darkmagenta") + g = pf.add_to_plot(p, "MSA", "INRSM_MSA_TMP_A", start, end, conn, color = "indigo") + + pf.add_hover_tool(p,[a,b,c,d,e,f,g]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + +def fpa_temp(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 700, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Temperature (K)') + + p.grid.visible = True + p.title.text = "FPA Temperatures" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "ALG_FPA", "IGDP_NRSD_ALG_FPA_TEMP", start, end, conn, color = "green") + b = pf.add_to_plot(p, "ALG_A1", "IGDP_NRSD_ALG_A1_TEMP", start, end, conn, color = "red") + c = pf.add_to_plot(p, "ALG_A2", "IGDP_NRSD_ALG_A2_TEMP", start, end, conn, color = "blue") + + pf.add_hover_tool(p,[a,b,c]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + + +def heat_strap_temp(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 700, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Temperature (K)') + + p.grid.visible = True + p.title.text = "Heat Strap Temperatures (Trim heaters)" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "74A", "SI_GZCTS74A", start, end, conn, color = "green") + b = pf.add_to_plot(p, "67A", "SI_GZCTS67A", start, end, conn, color = "red") + + pf.add_hover_tool(p,[a,b]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + + return p + + +def temperature_plots(conn, start, end): + '''Combines plots to a tab + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : tab object + used by dashboard.py to set up dashboard + ''' + descr = Div(text= + """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Plotname
Mnemonic
Description
IRSU monitored TemperaturesSI_GZCTS75A
+ SI_GZCTS68A
+ SI_GZCTS81A
+ SI_GZCTS80A
+ SI_GZCTS76A
+ SI_GZCTS79A
+ SI_GZCTS77A
+ SI_GZCTS78A
+ SI_GZCTS69A
CAA IRSU Temperature
+ CAM IRSU Temperature
+ COM1 Nominal IRSU Temperature
+ COM1 Redundant IRSU Temperature
+ FWA IRSU Temperature
+ GWA IRSU Temperature
+ Thermal Strap Nominal IRSU Temperature
+ Thermal Strap Redundant IRSU Temperature
+ MSA Nominal IRSU Temperature
+ MSA Redundant IRSU Temperature
FPE Temperatures/td> + IGDP_NRSI_C_CAM_TEMP
+ IGDP_NRSI_C_COL_TEMP
+ IGDP_NRSI_C_COM1_TEMP
+ IGDP_NRSI_C_FOR_TEMP
+ IGDP_NRSI_C_IFU_TEMP
+ IGDP_NRSI_C_BP1_TEMP
+ IGDP_NRSI_C_BP2_TEMP
+ IGDP_NRSI_C_BP3_TEMP
+ IGDP_NRSI_C_BP4_TEMP
+ IGDP_NRSI_C_RMA_TEMP
OA CAM Temperature
+ OA COL Temperature
+ OA COM1 Temperature
+ OA FOR Temperature
+ OA IFU Temperature
+ OA BP1 Temperature
+ OA BP2 Temperature
+ OA BP3 Temperature
+ OA BP4 Temperature
+ OA RMA Temperature
Box TemperaturesIGDP_NRSD_ALG_TEMP
+ INRSH_HK_TEMP1
+ INRSH_HK_TEMP2
ICE Internal Temperature 1
+ ICE Internal Temperature 2
MCE internal TemperaturesINRSM_MCE_PCA_TMP1
+ INRSM_MCE_PCA_TMP2
+ INRSM_MCE_AIC_TMP_FPGA
+ INRSM_MCE_AIC_TMP_ADC
+ INRSM_MCE_AIC_TMP_VREG
+ INRSM_MCE_MDAC_TMP_FPGA
+ INRSM_MCE_MDAC_TMP_OSC
+ INRSM_MCE_MDAC_TMP_BRD
+ INRSM_MCE_MDAC_TMP_PHA
+ INRSM_MCE_MDAC_TMP_PHB
MCE PCA Board Temperature 1
+ MCE PCA Board Temperature 2
+ MCE AIC Board FPGA Temperature
+ MCE AIC Board Analog/Digital Converter Temperature
+ MCE AIC Board Voltage Regulator Temperature
+ MCE MDAC Board FPGA Temperature
+ MCE MDAC Board Oscillator Temperature
+ MCE MDAC Board Temperature
+ MCE MDAC Board Phase A PA10 Temperature
+ MCE MDAC Board Phase B PA10 Temperature
MSA TemperaturesINRSM_Q1_TMP_A
+ INRSM_Q2_TMP_A
+ INRSM_Q3_TMP_A
+ INRSM_Q4_TMP_A
+ INRSM_MECH_MTR_TMP_A
+ INRSM_LL_MTR_TMP_A
+ INRSM_MSA_TMP_A
MSA Quad 1 Temperature
+ MSA Quad 2 Temperature
+ MSA Quad 3 Temperature
+ MSA Quad 4 Temperature
+ MSA Magnetic Arm Motor Temperature
+ MSA Launch Lock Motor Temperature
+ MSA Frame Temperature
FPA TemperaturesIGDP_NRSD_ALG_FPA_TEMP
+ IGDP_NRSD_ALG_A1_TEMP
+ IGDP_NRSD_ALG_A2_TEMP
FPE Temperature
+ FPA Temperature
+ ASIC 1 Temperature
+ ASIC 2 Temperature
Heat Strap Temperatures (Trim Heaters)SI_GZCTS74A
+ SI_GZCTS67A
FPA TH-Strap A Temperature from IRSU A
+ FPA TH-Strap B Temperature from IRSU A
CAA Lamps / FWA,GWAIGDP_NRSI_C_CAAL1_TEMP
+ IGDP_NRSI_C_CAAL2_TEMP
+ IGDP_NRSI_C_CAAL3_TEMP
+ IGDP_NRSI_C_CAAL4_TEMP
+ IGDP_NRSI_C_FWA_TEMP
+ IGDP_NRSI_C_GWA_TEMP
CAA Temperature LINE1
+ CAA Temperature LINE2
+ CAA Temperature LINE3
+ CAA Temperature LINE4
+ FWA Temperature Sensor Value
+ GWA Temperature Sensor Value
+ + """, width=1100) + + plot1 = irsu_temp(conn, start, end) + plot2 = fpe_temp(conn, start, end) + plot3 = box_temp(conn, start, end) + plot4 = mce_internal_temp(conn, start, end) + plot5 = msa_temp(conn, start, end) + plot6 = fpa_temp(conn, start, end) + plot7 = heat_strap_temp(conn, start, end) + plot8 = caal_temp(conn, start, end) + + layout = Column(descr, plot1, plot2, plot3, plot4, plot5, plot6, plot7, plot8) + + tab = Panel(child = layout, title = "TEMPERATURE") + + return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/voltage_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/voltage_tab.py new file mode 100644 index 000000000..81184f2fe --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/voltage_tab.py @@ -0,0 +1,263 @@ +#! /usr/bin/env python +"""Prepares plots for Ref. Voltage/Currents tab + + Module prepares plots for mnemonics below. Combines plots in a grid and + returns tab object. + + Plot 1 - Ref Voltages + INRSH_FWA_MOTOR_VREF + INRSH_GWA_MOTOR_VREF + INRSH_OA_VREF + + Plot 2 - ADCMGAIN (Voltages) + INRSH_FWA_ADCMGAIN + INRSH_GWA_ADCMGAIN + INRSH_RMA_ADCMGAIN + + Plot 3 - OFFSET (Voltages) + INRSH_GWA_ADCMOFFSET + INRSH_FWA_ADCMOFFSET + INRSH_OA_VREFOFF + INRSH_RMA_ADCMOFFSET + + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``nirspec_dashboard.py``, e.g.: + + :: + from .plots.voltage_tab import voltage_plots + tab = voltage_plots(conn, start, end) + +Dependencies +------------ + User must provide database "nirpsec_database.db" + +""" +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf +from bokeh.models import LinearAxis, Range1d +from bokeh.plotting import figure +from bokeh.models.widgets import Panel, Tabs, Div +from bokeh.models import ColumnDataSource, HoverTool +from bokeh.layouts import WidgetBox, gridplot, Column + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def ref_volt(conn, start, end): + '''Create specific plot and return plot object + + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 500, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "Ref Voltages" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "FWA_MOTOR_VREF", "INRSH_FWA_MOTOR_VREF", start, end, conn, color = "green") + b = pf.add_to_plot(p, "GWA_MOTOR_VREF", "INRSH_GWA_MOTOR_VREF", start, end, conn, color = "blue") + c = pf.add_to_plot(p, "OA_VREF", "INRSH_OA_VREF", start, end, conn, color = "red") + + pf.add_hover_tool(p,[a,b,c]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + return p + + +def gain_volt(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 500, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "ADCMAIN" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "FWA_ADCMGAIN", "INRSH_FWA_ADCMGAIN", start, end, conn, color = "green") + b = pf.add_to_plot(p, "GWA_ADCMGAIN", "INRSH_GWA_ADCMGAIN", start, end, conn, color = "blue") + c = pf.add_to_plot(p, "RMA_ADCMGAIN", "INRSH_RMA_ADCMGAIN", start, end, conn, color = "red") + + + #pf.add_hover_tool(p,[a,b,c]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + return p + +def offset_volt(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", + toolbar_location = "above", + plot_width = 1120, + plot_height = 500, + x_axis_type = 'datetime', + output_backend = "webgl", + x_axis_label = 'Date', y_axis_label='Voltage (V)') + + p.grid.visible = True + p.title.text = "OFFSET" + pf.add_basic_layout(p) + + a = pf.add_to_plot(p, "GWA_ADCMOFFSET", "INRSH_GWA_ADCMOFFSET", start, end, conn, color = "blue") + b = pf.add_to_plot(p, "FWA_ADCMOFFSET", "INRSH_FWA_ADCMOFFSET", start, end, conn, color = "green") + c = pf.add_to_plot(p, "OA_VREFOFF", "INRSH_OA_VREFOFF", start, end, conn, color = "orange") + d = pf.add_to_plot(p, "RMA_ADCMOFFSET", "INRSH_RMA_ADCMOFFSET", start, end, conn, color = "red") + + pf.add_hover_tool(p,[a,b,c,d]) + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = "horizontal" + + return p + + +def volt_plots(conn, start, end): + '''Combines plots to a tab + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : tab object + used by dashboard.py to set up dashboard + ''' + descr = Div(text= + """ + + + + + + + + + + + + + + + + + + + + + + + + + + +
Plotname
Mnemonic
Description
Ref VoltagesINRSH_FWA_MOTOR_VREF
+ INRSH_GWA_MOTOR_VREF
+ INRSH_OA_VREF
FWA Motor Reference Voltage for Calibration
+ GWA Motor Reference Voltage for Calibration
+ OA/RMA Reference Voltage for TM Calibration
+
ADCMGAININRSH_FWA_ADCMGAIN
+ INRSH_GWA_ADCMGAIN
+ INRSH_RMA_ADCMGAIN
FWA ADC Motor Chain Gain for Calibration
+ GWA ADC Motor Chain Gain for Calibration
+ RMA ADC Motor Chain Gain for Calibration
+
OFFSETINRSH_FWA_ADCMOFFSET
+ INRSH_GWA_ADCMOFFSET
+ INRSH_OA_VREFOFF
+ INRSH_RMA_ADCMOFFSET
FWA ADC Motor Chain Offset for Calibration
+ GWA ADC Motor Chain Offset for Calibration
+ CAA Reference Voltage Offset for TM Calibration
+ RMA ADC Motor Chain Offset for Calibration
+
+ + """, width=1100) + + plot1 = ref_volt(conn, start, end) + plot2 = gain_volt(conn, start, end) + plot3 = offset_volt(conn, start, end) + + layout = Column(descr, plot1, plot2, plot3) + + tab = Panel(child = layout, title = "REF VOLTAGES") + + return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/wheel_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/wheel_tab.py new file mode 100644 index 000000000..9abf7f2d2 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/wheel_tab.py @@ -0,0 +1,260 @@ +#! /usr/bin/env python +"""Prepares plots for WHEEL tab + + Module prepares plots for mnemonics below. Combines plots in a grid and + returns tab object. + + Plot 1 - Filterwheel + INRSI_FWA_MECH_POS + INRSI_C_FWA_POSITION + + Plot 2 - Gratingwheel X + INRSI_GWA_MECH_POS + INRSI_C_GWA_X_POSITION + + Plot 3 - Gratingwheel Y + INRSI_GWA_MECH_POS + INRSI_C_GWA_Y_POSITION + +Authors +------- + - Daniel Kühbacher + +Use +--- + The functions within this module are intended to be imported and + used by ``dashboard.py``, e.g.: + + :: + from .plots.wheel_ratio_tab import wheel_plots + tab = wheel_plots(conn, start, end) + +Dependencies +------------ + User must provide database "miri_database.db" + +""" + +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as mn +from bokeh.plotting import figure +from bokeh.models import BoxAnnotation, LinearAxis, Range1d +from bokeh.embed import components +from bokeh.models.widgets import Panel, Tabs, Div +from bokeh.models import ColumnDataSource +from bokeh.layouts import column, row, WidgetBox + +import pandas as pd +import numpy as np + +from astropy.time import Time + + +def fw(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 1120, \ + plot_height = 500, \ + y_range = [-3,3], \ + x_axis_type = 'datetime', \ + x_axis_label = 'Date', y_axis_label = 'mV (normalized)') + + p.grid.visible = True + p.title.text = "Filterwheel" + p.title.align = "left" + pf.add_basic_layout(p) + + pf.add_to_plot_normalized(p, "F110W", "INRSI_C_FWA_POSITION_F110W", start, end, conn, mn.fw_nominals['F110W'], color = "green") + pf.add_to_plot_normalized(p, "F100LP", "INRSI_C_FWA_POSITION_F100LP", start, end, conn, mn.fw_nominals['F100LP'], color = "red") + pf.add_to_plot_normalized(p, "F140X", "INRSI_C_FWA_POSITION_F140X", start, end, conn, mn.fw_nominals['F140X'], color = "blue") + pf.add_to_plot_normalized(p, "OPAQUE", "INRSI_C_FWA_POSITION_OPAQUE", start, end, conn, mn.fw_nominals['OPAQUE'], color = "orange") + pf.add_to_plot_normalized(p, "F290LP", "INRSI_C_FWA_POSITION_F290LP", start, end, conn, mn.fw_nominals['F290LP'], color = "purple") + pf.add_to_plot_normalized(p, "F170LP", "INRSI_C_FWA_POSITION_F170LP", start, end, conn, mn.fw_nominals['F170LP'], color = "brown") + pf.add_to_plot_normalized(p, "CLEAR", "INRSI_C_FWA_POSITION_CLEAR", start, end, conn, mn.fw_nominals['CLEAR'], color = "chocolate") + pf.add_to_plot_normalized(p, "F070LP", "INRSI_C_FWA_POSITION_F070LP", start, end, conn, mn.fw_nominals['F070LP'], color = "darkmagenta") + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = 'horizontal' + return p + + +def gwx(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 1120, \ + plot_height = 500, \ + y_range = [-4,4], \ + x_axis_type = 'datetime', \ + x_axis_label = 'Date', y_axis_label = 'mV (normalized)') + + p.grid.visible = True + p.title.text = "Gratingwheel X" + p.title.align = "left" + pf.add_basic_layout(p) + + pf.add_to_plot_normalized(p, "PRISM", "INRSI_C_GWA_X_POSITION_PRISM", start, end, conn, mn.gwx_nominals['PRISM'], color = "green") + pf.add_to_plot_normalized(p, "MIRROR", "INRSI_C_GWA_X_POSITION_MIRROR", start, end, conn, mn.gwx_nominals['MIRROR'], color = "blue") + pf.add_to_plot_normalized(p, "G140H", "INRSI_C_GWA_X_POSITION_G140H", start, end, conn, mn.gwx_nominals['G140H'], color = "red") + pf.add_to_plot_normalized(p, "G235H", "INRSI_C_GWA_X_POSITION_G235H", start, end, conn, mn.gwx_nominals['G235H'], color = "purple") + pf.add_to_plot_normalized(p, "G395H", "INRSI_C_GWA_X_POSITION_G395H", start, end, conn, mn.gwx_nominals['G395H'], color = "orange") + pf.add_to_plot_normalized(p, "G140M", "INRSI_C_GWA_X_POSITION_G140M", start, end, conn, mn.gwx_nominals['G140M'], color = "brown") + pf.add_to_plot_normalized(p, "G235M", "INRSI_C_GWA_X_POSITION_G235M", start, end, conn, mn.gwx_nominals['G235M'], color = "darkmagenta") + pf.add_to_plot_normalized(p, "G395M", "INRSI_C_GWA_X_POSITION_G395M", start, end, conn, mn.gwx_nominals['G395M'], color = "darkcyan") + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = 'horizontal' + + return p + +def gwy(conn, start, end): + '''Create specific plot and return plot object + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : Plot object + Bokeh plot + ''' + + # create a new plot with a title and axis labels + p = figure( tools = "pan,wheel_zoom,box_zoom,reset,save", \ + toolbar_location = "above", \ + plot_width = 1120, \ + plot_height = 500, \ + y_range = [-3,3], \ + x_axis_type = 'datetime', \ + x_axis_label = 'Date', y_axis_label = 'mV (normalized)') + + p.grid.visible = True + p.title.text = "Gratingwheel Y" + p.title.align = "left" + pf.add_basic_layout(p) + + pf.add_to_plot_normalized(p, "PRISM", "INRSI_C_GWA_Y_POSITION_PRISM", start, end, conn, mn.gwy_nominals['PRISM'], color = "green") + pf.add_to_plot_normalized(p, "MIRROR", "INRSI_C_GWA_Y_POSITION_MIRROR", start, end, conn, mn.gwy_nominals['MIRROR'], color = "blue") + pf.add_to_plot_normalized(p, "G140H", "INRSI_C_GWA_Y_POSITION_G140H", start, end, conn, mn.gwy_nominals['G140H'], color = "red") + pf.add_to_plot_normalized(p, "G235H", "INRSI_C_GWA_Y_POSITION_G235H", start, end, conn, mn.gwy_nominals['G235H'], color = "purple") + pf.add_to_plot_normalized(p, "G395H", "INRSI_C_GWA_Y_POSITION_G395H", start, end, conn, mn.gwy_nominals['G395H'], color = "orange") + pf.add_to_plot_normalized(p, "G140M", "INRSI_C_GWA_Y_POSITION_G140M", start, end, conn, mn.gwy_nominals['G140M'], color = "brown") + pf.add_to_plot_normalized(p, "G235M", "INRSI_C_GWA_Y_POSITION_G235M", start, end, conn, mn.gwy_nominals['G235M'], color = "darkmagenta") + pf.add_to_plot_normalized(p, "G395M", "INRSI_C_GWA_Y_POSITION_G395M", start, end, conn, mn.gwy_nominals['G395M'], color = "darkcyan") + + p.legend.location = "bottom_right" + p.legend.click_policy = "hide" + p.legend.orientation = 'horizontal' + return p + +def wheel_pos(conn, start, end): + '''Combine plots to a tab + Parameters + ---------- + conn : DBobject + Connection object that represents database + start : time + Startlimit for x-axis and query (typ. datetime.now()- 4Months) + end : time + Endlimit for x-axis and query (typ. datetime.now()) + Return + ------ + p : tab object + used by dashboard.py to set up dashboard + ''' + descr = Div(text= + """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Plotname
Mnemonic
Description
FilterwheelINRSI_FWA_MECH_POS
+ INRSI_C_FWA_POSITION
Position Sensor Value
+ Current Position
Gratingwheel XINRSI_GWA_MECH_POS
+ INRSI_C_GWA_X_POSITION
Position X Sensor Value
+ Current Position
Gratingwheel YINRSI_GWA_MECH_POS
+ INRSI_C_GWA_Y_POSITION
Position Y Sensor Value
+ Current Position
+ + """, width=1100) + + plot1 = fw(conn, start, end) + plot2 = gwx(conn, start, end) + plot3 = gwy(conn, start, end) + + layout = column(descr, plot1, plot2, plot3) + tab = Panel(child = layout, title = "FILTER/GRATINGWHEEL") + + return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/__init__.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/condition.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/condition.py new file mode 100644 index 000000000..8bf7d5514 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/condition.py @@ -0,0 +1,405 @@ +#! /usr/bin/env python +"""Module generates conditions over one or more mnemonics + +The modules purpose is to return True/False for any times by reference of +certain conditions. If for instance the condition "x>1" over a defined period of +time is needed, the module looks for all elements where the condition applies +and where it does not apply. This generates two lists, which contain the "start" +and "end" times of the condition. +A futher function combines the start- and endtimes to time-tuples between which +the condition is known as TRUE. A "state" function returns True/False for an +exact time attribute, whereby the condition is represented in binary form. + +Authors +------- + - Daniel Kühbacher + +Use +--- + This module is not prepared for standalone use. + + For use in programm set condition up like below: + + import the module as follow: + >>>import condition as cond + + generate list with required conditions: + >>>con_set = [ cond.equal(m.mnemonic('IMIR_HK_POM_LOOP'),'OFF'), + cond.smaller(m.mnemonic('IMIR_HK_ICE_SEC_VOLT1'),1), + cond.greater(m.mnemonic('SE_ZIMIRICEA'),0.2)] + + generate object of condition with the con_set as attribute: + >>>condition_object=cond.condition(con_set) + + Now the condition_object can return a True/False statement wheather + the time given as attribut meets the conditions: + + >>>if condition.state(float(element['Primary Time'])): + -> True when condition for the given time applies + -> False when condition for the given time is not applicable + +Dependencies +------------ + no external files needed + +References +---------- + +Notes +----- + +""" + + +class condition: + """Class to hold several subconditions""" + + #contains list of representative time pairs for each subcondition + cond_time_pairs = [] + #state of the condition + __state = False + + #initializes condition through condition set + def __init__(self, cond_set): + """Initialize object with set of conditions + Parameters + ---------- + cond_set : list + list contains subconditions objects + """ + self.cond_set = cond_set + + #destructor -> take care that all time_pairs are deleted! + def __del__(self): + """Delete object - destructor method""" + del self.cond_time_pairs[:] + + #prints all stored time pairs (for developement only) + def print_times(self): + """Print conditions time pairs on command line (developement)""" + print('Available time pairs:') + for times in self.cond_time_pairs: + print('list: '+str(times)) + + #returns a interval if time is anywhere in between + def get_interval(self, time): + """Returns time interval if availlable, where "time" is in between + Parameters + ---------- + time : float + given time attribute + Return + ------ + time_pair : tuple + pair of start_time and end_time where time is in between + """ + end_time = 10000000 + start_time = 0 + + #do for every condition + for cond in self.cond_time_pairs: + #do for every time pair in condition + for pair in cond: + if (time > pair[0]) and (time < pair[1]): + if (end_time > pair[1]) and (start_time < pair[0]): + start_time = pair[0] + end_time = pair[1] + break + else: + break + + if (end_time != 10000000) and (start_time != 0): + return [start_time, end_time] + else: + return None + + + #generates time pairs out of start and end times + def generate_time_pairs(start_times, end_times): + """Forms time pairs out of start times and end times + Parameters + ---------- + start_times : list + contains all times where a condition applies + end_times : list + contains all times where the condition does not apply + Return + ------ + time_pair : list + list of touples with start and end time + """ + #internal use only + time_pair: float = [] + + #when the conditons doesn´t apply anyway + if not start_times: + time_pair.append((0,0)) + + #check if the condition indicates an open time range + elif not end_times: + time_pair.append((start_times[0], 0)) + + #generate time pairs + #for each start time a higher or equal end time is searched for + #these times form am touple which is appended to time_pair : list + else: + time_hook = 0 + last_start_time = 0 + + for start in list(sorted(set(start_times))): + + if(start > time_hook): + for end in list(sorted(set(end_times))): + + if end > start: + + time_pair.append((start, end)) + time_hook = end + break + + if list(sorted(set(start_times)))[-1] > list(sorted(set(end_times)))[-1]: + time_pair.append((list(sorted(set(end_times)))[-1], 0)) + + return(time_pair) + + + #returns state of the condition at a given time + #if state(given time)==True -> condition is true + #if state(given time)==False -> condition is false + def state(self, time): + """Checks whether condition is true of false at a given time + Parameters + ---------- + time : float + input time for condition query + Return + ------ + state : bool + True/False statement whether the condition applies or not + """ + #checks condition for every sub condition in condition set (subconditions) + + state = self.__state + + for cond in self.cond_time_pairs: + + if self.__check_subcondition(cond, time): + state = True + else: + state = False + break + + return state + + + def __check_subcondition(self, cond, time): + + #if there are no values availlable + if cond[0][0] == 0: + return False + + for time_pair in cond: + #if just a positive time is availlable, return true + if (time_pair[1] == 0) and (time > time_pair[0]): + + return True + + #if given time occurs between a time pair, return true + elif (time_pair[0]) <= time and (time < time_pair[1]): + + return True + + else: + pass + + +class equal(condition): + """Class to hold single "is equal" subcondition""" + + stringval = True + + #add attributes to function - start function "cond_time_pairs()" + def __init__(self, mnemonic, value, stringval=True): + """Initializes subconditon + Parameters + ---------- + mnemonic : astropy table + includes mnemomic engineering data and corresponding primary time + value : str + coparison value for equal statement + """ + self.mnemonic = mnemonic + self.value = value + self.stringval = stringval + condition.cond_time_pairs.append((self.cond_true_time())) + + + #generates a list of time-touples (start_time, end_time) that mark the beginning and end of + #wheather the condition is true or not + def cond_true_time(self): + """Filters all values that are equal to a given comparison value + if equal: Primary time -> temp_start + if not equal: Primary time -> temp_end + Return + ------ + time_p : list + list of touples with start and end time + """ + temp_start = [] + temp_end = [] + + for key in self.mnemonic: + + #find all times whoses Raw values equal the given value + if self.stringval: + if key['value'] == self.value: + temp_start.append(key["time"]) + + #find all end values + else: + temp_end.append(key["time"]) + else: + # just another option to compare float values + if float(key['value']) == self.value: + temp_start.append(key["time"]) + + #find all end values + else: + temp_end.append(key["time"]) + + time_p = condition.generate_time_pairs(temp_start, temp_end) + return time_p + +class unequal(condition): + """Class to hold single "is unequal" subcondition""" + + #add attributes to function - start function "cond_time_pairs()" + def __init__(self, mnemonic, value): + """Initializes subconditon + Parameters + ---------- + mnemonic : astropy table + includes mnemomic engineering data and corresponding primary time + value : str + coparison value for equal statement + """ + self.mnemonic = mnemonic + self.value = value + condition.cond_time_pairs.append((self.cond_true_time())) + + + #generates a list of time-touples (start_time, end_time) that mark the beginning and end of + #wheather the condition is true or not + def cond_true_time(self): + """Filters all values that are equal to a given comparison value + if equal: Primary time -> temp_start + if not equal: Primary time -> temp_end + Return + ------ + time_p : list + list of touples with start and end time + """ + temp_start = [] + temp_end = [] + + for key in self.mnemonic: + + #find all times whoses Raw values equal the given value + if key['value'] != self.value: + temp_start.append(key["time"]) + + #find all end values + else: + temp_end.append(key["time"]) + + time_p = condition.generate_time_pairs(temp_start, temp_end) + return time_p + +class greater(condition): + """Class to hold single "greater than" subcondition""" + + #add attributes to function - start function "cond_time_pairs()" + def __init__(self, mnemonic, value): + """Initializes subconditon + Parameters + ---------- + mnemonic : astropy table + includes mnemomic engineering data and corresponding primary time + value : str + coparison value for equal statement + """ + self.mnemonic= mnemonic + self.value=value + condition.cond_time_pairs.append((self.cond_true_time())) + + def cond_true_time(self): + """Filters all values that are greater than a given comparison value + if equal: Primary time -> temp_start + if not equal: Primary time -> temp_end + Return + ------ + time_p : list + list of touples with start and end time + """ + temp_start: float = [] + temp_end: float = [] + + for key in self.mnemonic: + + #find all times whose Raw values are grater than the given value + if float(key['value']) > self.value: + temp_start.append(key["time"]) + + #find all end values + else: + temp_end.append(key["time"]) + + time_p = condition.generate_time_pairs(temp_start, temp_end) + return time_p + + +class smaller(condition): + """Class to hold single "greater than" subcondition""" + + #add attributes to function - start function "cond_time_pairs()" + def __init__(self, mnemonic, value): + """Initializes subconditon + Parameters + ---------- + mnemonic : astropy table + includes mnemomic engineering data and corresponding primary time + value : str + coparison value for equal statement + """ + self.mnemonic=mnemonic + self.value=value + condition.cond_time_pairs.append((self.cond_true_time())) + + def cond_true_time(self): + """Filters all values that are greater than a given comparison value + if equal: Primary time -> temp_start + if not equal: Primary time -> temp_end + Return + ------ + time_p : list + list of touples with start and end time + """ + temp_start: float = [] + temp_end: float = [] + + for key in self.mnemonic: + + #find all times whose Raw values are grater than the given value + if float(key['value']) < self.value: + temp_start.append(key["time"]) + + #find all end values + else: + temp_end.append(key["time"]) + + time_p = condition.generate_time_pairs(temp_start, temp_end) + return time_p + + +if __name__ =='__main__': + pass diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/csv_to_AstropyTable.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/csv_to_AstropyTable.py new file mode 100644 index 000000000..273e173dd --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/csv_to_AstropyTable.py @@ -0,0 +1,151 @@ +#! /usr/bin/env python +"""Module for importing and sorting mnemonics + +This module imports a whole set of mnemonics from a .CSV sheet and converts it +to an astropy table. In a second step the table is sorted by its mnemoncis +and for each mnemmonic another astropy table with reduced content is created. +The last step is to append the data (time and engineering value) with its +mnemonic identifier as key to a dictionary. + +Authors +------- + - Daniel Kühbacher + +Use +--- + + +Dependencies +------------ + mnemonics.py -> includes a list of mnemonics to be evaluated + +References +---------- + +Notes +----- + +""" +from astropy.table import Table +from astropy.time import Time +import warnings +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as mn + + +class mnemonics: + """class to hold a set of mnemonics""" + + __mnemonic_dict = {} + + def __init__(self, import_path): + """main function of this class + Parameters + ---------- + import_path : str + defines file to import (csv sheet) + """ + imported_data = self.import_CSV(import_path) + length = len(imported_data) + + print('{} was imported - {} lines'.format(import_path, length)) + + #look for every mnmonic given in mnemonicy.py + for mnemonic_name in mn.mnemonic_set_query: + temp = self.sort_mnemonic(mnemonic_name, imported_data) + #append temp to dict with related mnemonic + if temp != None: + self.__mnemonic_dict.update({mnemonic_name:temp}) + else: + warnings.warn("fatal error") + + + def import_CSV(self, path): + """imports csv sheet and converts it to AstropyTable + Parameters + ---------- + path : str + defines path to file to import + Return + ------ + imported_data : AstropyTable + container for imported data + """ + #read data from given *CSV file + imported_data=Table.read(path, format='ascii.basic', delimiter=',') + return imported_data + + + #returns table of single mnemonic + def mnemonic(self, name): + """Returns table of one single mnemonic + Parameters + ---------- + name : str + name of mnemonic + Return + ------ + __mnemonic_dict[name] : AstropyTable + corresponding table to mnemonic name + """ + try: + return self.__mnemonic_dict[name] + except KeyError: + print('{} not in list'.format(name)) + + + #looks for given mnemonic in given table + #returns list containing astropy tables with sorted mnemonics and engineering values + #adds useful meta data to Table + def sort_mnemonic(self, mnemonic, table): + """Looks for all values in table with identifier "mnemonic" + Converts time string to mjd format + Parameters + ---------- + mnemonic : str + identifies which mnemonic to look for + table : AstropyTable + table that stores mnemonics and data + Return + ------ + mnemonic_table : AstropyTable + stores all data associated with identifier "mnemonic" + """ + + temp1: float = [] + temp2 = [] + + #appends present mnemonic data to temp arrays temp1 and temp2 + for item in table: + try: + if item['Telemetry Mnemonic'] == mnemonic: + #convert time string to mjd format + temp = item['Secondary Time'].replace('/','-').replace(' ','T') + t = Time(temp, format='isot') + + temp1.append(t.mjd) + temp2.append(item['EU Value']) + except KeyError: + warnings.warn("{} is not in mnemonic table".format(mnemonic)) + + description = ('time','value') + data = [temp1, temp2] + + #add some meta data + if len(temp1) > 0: + date_start = temp1[0] + date_end = temp1[len(temp1)-1] + info = {'start':date_start, 'end':date_end} + else: + info = {"n":"n"} + + #add name of mnemonic to meta data of list + info['mnemonic'] = mnemonic + info['len'] = len(temp1) + + #table to return + mnemonic_table = Table(data, names = description, \ + dtype = ('f8','str'), meta = info) + return mnemonic_table + +if __name__ =='__main__': + pass diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/mnemonics.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/mnemonics.py new file mode 100644 index 000000000..181b981fd --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/mnemonics.py @@ -0,0 +1,541 @@ +"""Module lists all neccessary mnemonics for NIRSpec data trending + +The module includes several lists to import to NIRSpec data trending monitor program. +The lists are used for data aquisation and to set up the initial database. + +Authors +------- + - Daniel Kühbacher + +Use +--- + import mnemoncis as mn + +References +---------- + JWQL_NIRSpec_INputs_V4[2414].xlsx + +Notes +----- + +""" + +#mnemonics underlaying certain conditions 15min +# INRSD_EXP_STAT != STARTED +mnemonic_cond_1 = [ +"SE_ZINRSFPEA", +"SE_ZINRSFPEB"] + +#mnemonics underlaying condition 15min +# INRSH_LAMP_SEL = NO_LAMP +mnemonic_cond_2 = [ +"SE_ZINRSICEA", +"SE_ZINRSICEB"] + +mnemonic_cond_3 = [ +"SE_ZINRSMCEA", +"SE_ZINRSMCEB"] + +#menmonics applicable when CAA is powered +#INRSH_CAA_PWRF_ST = ON +mnemonic_caa= [ +"IGDP_NRSI_C_CAAL1_TEMP", +"IGDP_NRSI_C_CAAL2_TEMP", +"IGDP_NRSI_C_CAAL3_TEMP", +"IGDP_NRSI_C_CAAL4_TEMP"] + +#only applicable when Filter table 10 is set +mnemonic_ft10 = [ +"INRSH_OA_VREFOFF", +"INRSH_OA_VREF", + +"INRSH_CAA_VREFOFF", +"INRSH_CAA_VREF", + +"INRSH_FWA_ADCMGAIN", +"INRSH_FWA_ADCMOFFSET", +"INRSH_FWA_MOTOR_VREF", + +"INRSH_GWA_ADCMGAIN", +"INRSH_GWA_ADCMOFFSET", +"INRSH_GWA_MOTOR_VREF", + +"INRSH_RMA_ADCMGAIN", +"INRSH_RMA_ADCMOFFSET"] + +#all mnemonics used for conditions +mnemonic_for_conditions = [ +"INRSM_MOVE_STAT", +"INRSH_WHEEL_MOT_SVREF", +"INRSI_CAA_ON_FLAG", +"INRSH_LAMP_SEL", +"INRSD_EXP_STAT", + +"INRSH_CAA_PWRF_ST", + +"INRSI_FWA_MOVE_ST", +"INRSI_FWA_MECH_POS", +"INRSI_GWA_MOVE_ST", +"INRSI_GWA_MECH_POS", + +"INRSI_C_FWA_POSITION", +"INRSI_C_GWA_X_POSITION", +"INRSI_C_GWA_Y_POSITION", + +"ICTM_RT_FILTER" ] + +#these mnemonic are used by the day routine +mnemSet_day = [ +"INRSM_MSA_Q1_365VDD", +"INRSM_MSA_Q1_365VPP", +"INRSM_MSA_Q1_171VPP", +"IGDPM_MSA_Q1_365IDD", +"IGDPM_MSA_Q1_365IPP", +"IGDPM_MSA_Q1_171RTN", + +"INRSM_MSA_Q2_365VDD", +"INRSM_MSA_Q2_365VPP", +"INRSM_MSA_Q2_171VPP", +"IGDPM_MSA_Q2_365IDD", +"IGDPM_MSA_Q2_365IPP", +"IGDPM_MSA_Q2_171RTN", + +"INRSM_MSA_Q3_365VDD", +"INRSM_MSA_Q3_365VPP", +"INRSM_MSA_Q3_171VPP", +"IGDPM_MSA_Q3_365IDD", +"IGDPM_MSA_Q3_365IPP", +"IGDPM_MSA_Q3_171RTN", + +"INRSM_MSA_Q4_365VDD", +"INRSM_MSA_Q4_365VPP", +"INRSM_MSA_Q4_171VPP", +"IGDPM_MSA_Q4_365IDD", +"IGDPM_MSA_Q4_365IPP", +"IGDPM_MSA_Q4_171RTN", + +"IGDP_NRSD_ALG_FPA_TEMP", +"IGDP_NRSD_ALG_A1_TEMP", +"IGDP_NRSD_ALG_A2_TEMP", +"IGDP_NRSI_C_FWA_TEMP", +"IGDP_NRSI_C_GWA_TEMP", + +"SI_GZCTS74A", +"SI_GZCTS74B", +"SI_GZCTS67A", +"SI_GZCTS67B"] + +#these mnemonic are used by the 15min routine +mnemSet_15min = [ +"IGDP_NRSD_ALG_TEMP", + +"INRSD_ALG_ACC_P12C", +"INRSD_ALG_ACC_N12C", +"INRSD_ALG_ACC_3D3_1D5_C", +"INRSD_ALG_CHASSIS", + +"IGDP_NRSD_ALG_A1_VDD_C", +"IGDP_NRSD_ALG_A1_VDDA", +"IGDP_NRSD_ALG_A1VDAP12C", +"IGDP_NRSD_ALG_A1VDAN12C", +"IGDP_NRSD_ALG_A1GND4VDA", +"IGDP_NRSD_ALG_A1GND5VRF", +"INRSD_ALG_A1_VDD3P3", +"INRSD_ALG_A1_VDD", +"INRSD_ALG_A1_REF", +"INRSD_A1_DSUB_V", +"INRSD_A1_VRESET_V", +"INRSD_A1_CELLDRN_V", +"INRSD_A1_DRAIN_V", +"INRSD_A1_VBIASGATE_V", +"INRSD_A1_VBIASPWR_V", +"INRSD_A1_VDDA_I", + +"IGDP_NRSD_ALG_A2_VDD_C", +"IGDP_NRSD_ALG_A2_VDDA", +"IGDP_NRSD_ALG_A2VDAP12C", +"IGDP_NRSD_ALG_A2VDAN12C", +"IGDP_NRSD_ALG_A2GND4VDA", +"IGDP_NRSD_ALG_A2GND5VRF", +"INRSD_ALG_A2_VDD3P3", +"INRSD_ALG_A2_VDD", +"INRSD_ALG_A2_REF", +"INRSD_A2_DSUB_V", +"INRSD_A2_VRESET_V", +"INRSD_A2_CELLDRN_V", +"INRSD_A2_DRAIN_V", +"INRSD_A2_VBIASGATE_V", +"INRSD_A2_VBIASPWR_V", +"INRSD_A2_VDDA_I", + +"INRSH_HK_TEMP1", +"INRSH_HK_TEMP2", + +"INRSH_HK_P15V", +"INRSH_HK_N15V", +"INRSH_HK_VMOTOR", +"INRSH_HK_P5V", +"INRSH_HK_2P5V", +"INRSH_HK_ADCTGAIN", +"INRSH_HK_ADCTOFFSET", + +"IGDP_NRSI_C_CAM_TEMP", +"IGDP_NRSI_C_COL_TEMP", +"IGDP_NRSI_C_COM1_TEMP", +"IGDP_NRSI_C_FOR_TEMP", +"IGDP_NRSI_C_IFU_TEMP", +"IGDP_NRSI_C_BP1_TEMP", +"IGDP_NRSI_C_BP2_TEMP", +"IGDP_NRSI_C_BP3_TEMP", +"IGDP_NRSI_C_BP4_TEMP", +"IGDP_NRSI_C_RMA_TEMP", + +"SI_GZCTS75A", +"SI_GZCTS68A", +"SI_GZCTS81A", +"SI_GZCTS80A", +"SI_GZCTS70A", +"SI_GZCTS76A", +"SI_GZCTS79A", +"SI_GZCTS77A", +"SI_GZCTS78A", +"SI_GZCTS69A", + +"INRSM_MCE_AIC_1R5_V", +"INRSM_MCE_AIC_3R3_V", +"INRSM_MCE_AIC_5_V", +"INRSM_MCE_AIC_P12_V", +"INRSM_MCE_AIC_N12_V", +"INRSM_MCE_AIC_3R3_I", +"INRSM_MCE_AIC_5_I", +"INRSM_MCE_AIC_P12_I", +"INRSM_MCE_AIC_N12_I", + +"INRSM_MCE_MDAC_1R5_V", +"INRSM_MCE_MDAC_3R3_V", +"INRSM_MCE_MDAC_5_V", +"INRSM_MCE_MDAC_P12_V", +"INRSM_MCE_MDAC_N12_V", +"INRSM_MCE_MDAC_3R3_I", +"INRSM_MCE_MDAC_5_I", +"INRSM_MCE_MDAC_P12_I", +"INRSM_MCE_MDAC_N12_I", + +"INRSM_MCE_PCA_TMP1", +"INRSM_MCE_PCA_TMP2", +"INRSM_MCE_AIC_TMP_FPGA", +"INRSM_MCE_AIC_TMP_ADC", +"INRSM_MCE_AIC_TMP_VREG", +"INRSM_MCE_MDAC_TMP_FPGA", +"INRSM_MCE_MDAC_TMP_OSC", +"INRSM_MCE_MDAC_TMP_BRD", +"INRSM_MCE_MDAC_TMP_PHA", +"INRSM_MCE_MDAC_TMP_PHB", + +"INRSM_Q1_TMP_A", +"INRSM_Q2_TMP_A", +"INRSM_Q3_TMP_A", +"INRSM_Q4_TMP_A", +"INRSM_MECH_MTR_TMP_A", +"INRSM_LL_MTR_TMP_A", +"INRSM_MSA_TMP_A"] + +#mnemonic set for setting up database +mnemonic_set_database = [ +"GP_ZPSVOLT", +"SE_ZINRSFPEA", +"SE_ZINRSFPEB", + +"IGDP_NRSD_ALG_TEMP", + +"IGDP_NRSD_ALG_FPA_TEMP", +"IGDP_NRSD_ALG_A1_TEMP", +"IGDP_NRSD_ALG_A2_TEMP", +"SI_GZCTS74A", +"SI_GZCTS74B", +"SI_GZCTS67A", +"SI_GZCTS67B", + +"INRSD_ALG_ACC_P12C", +"INRSD_ALG_ACC_N12C", +"INRSD_ALG_ACC_3D3_1D5_C", +"INRSD_ALG_CHASSIS", + +"IGDP_NRSD_ALG_A1_VDD_C", +"IGDP_NRSD_ALG_A1_VDDA", +"IGDP_NRSD_ALG_A1VDAP12C", +"IGDP_NRSD_ALG_A1VDAN12C", +"IGDP_NRSD_ALG_A1GND4VDA", +"IGDP_NRSD_ALG_A1GND5VRF", +"INRSD_ALG_A1_VDD3P3", +"INRSD_ALG_A1_VDD", +"INRSD_ALG_A1_REF", +"INRSD_A1_DSUB_V", +"INRSD_A1_VRESET_V", +"INRSD_A1_CELLDRN_V", +"INRSD_A1_DRAIN_V", +"INRSD_A1_VBIASGATE_V", +"INRSD_A1_VBIASPWR_V", +"INRSD_A1_VDDA_I", + +"IGDP_NRSD_ALG_A2_VDD_C", +"IGDP_NRSD_ALG_A2_VDDA", +"IGDP_NRSD_ALG_A2VDAP12C", +"IGDP_NRSD_ALG_A2VDAN12C", +"IGDP_NRSD_ALG_A2GND4VDA", +"IGDP_NRSD_ALG_A2GND5VRF", +"INRSD_ALG_A2_VDD3P3", +"INRSD_ALG_A2_VDD", +"INRSD_ALG_A2_REF", +"INRSD_A2_DSUB_V", +"INRSD_A2_VRESET_V", +"INRSD_A2_CELLDRN_V", +"INRSD_A2_DRAIN_V", +"INRSD_A2_VBIASGATE_V", +"INRSD_A2_VBIASPWR_V", +"INRSD_A2_VDDA_I", + +"SE_ZINRSICEA", +"SE_ZINRSICEB", + +"INRSH_HK_TEMP1", +"INRSH_HK_TEMP2", + +"INRSH_HK_P15V", +"INRSH_HK_N15V", +"INRSH_HK_VMOTOR", +"INRSH_HK_P5V", +"INRSH_HK_2P5V", +"INRSH_HK_ADCTGAIN", +"INRSH_HK_ADCTOFFSET", + +"INRSH_OA_VREFOFF", +"INRSH_OA_VREF", + +"IGDP_NRSI_C_CAM_TEMP", +"IGDP_NRSI_C_COL_TEMP", +"IGDP_NRSI_C_COM1_TEMP", +"IGDP_NRSI_C_FOR_TEMP", +"IGDP_NRSI_C_IFU_TEMP", +"IGDP_NRSI_C_BP1_TEMP", +"IGDP_NRSI_C_BP2_TEMP", +"IGDP_NRSI_C_BP3_TEMP", +"IGDP_NRSI_C_BP4_TEMP", +"IGDP_NRSI_C_RMA_TEMP", + +"INRSH_CAA_VREFOFF", +"INRSH_CAA_VREF", + +"INRSH_LAMP_SEL", +"INRSI_C_CAA_CURRENT", +"INRSI_C_CAA_VOLTAGE", + +"IGDP_NRSI_C_CAAL1_TEMP", +"IGDP_NRSI_C_CAAL2_TEMP", +"IGDP_NRSI_C_CAAL3_TEMP", +"IGDP_NRSI_C_CAAL4_TEMP", + +"INRSH_FWA_ADCMGAIN", +"INRSH_FWA_ADCMOFFSET", +"INRSH_FWA_MOTOR_VREF", + +"IGDP_NRSI_C_FWA_TEMP", + +"INRSH_GWA_ADCMGAIN", +"INRSH_GWA_ADCMOFFSET", +"INRSH_GWA_MOTOR_VREF", + +"IGDP_NRSI_C_GWA_TEMP", + +"INRSH_RMA_ADCMGAIN", +"INRSH_RMA_ADCMOFFSET", + +"SI_GZCTS75A", +"SI_GZCTS68A", +"SI_GZCTS81A", +"SI_GZCTS80A", +"SI_GZCTS70A", +"SI_GZCTS76A", +"SI_GZCTS79A", +"SI_GZCTS77A", +"SI_GZCTS78A", +"SI_GZCTS69A", +"SI_GZCTS75B", +"SI_GZCTS68B", +"SI_GZCTS81B", +"SI_GZCTS80B", +"SI_GZCTS70B", +"SI_GZCTS76B", +"SI_GZCTS79B", +"SI_GZCTS77B", +"SI_GZCTS78B", +"SI_GZCTS69B", + +"SE_ZINRSMCEA", +"SE_ZINRSMCEB", + +"INRSM_MCE_AIC_1R5_V", +"INRSM_MCE_AIC_3R3_V", +"INRSM_MCE_AIC_5_V", +"INRSM_MCE_AIC_P12_V", +"INRSM_MCE_AIC_N12_V", +"INRSM_MCE_AIC_3R3_I", +"INRSM_MCE_AIC_5_I", +"INRSM_MCE_AIC_P12_I", +"INRSM_MCE_AIC_N12_I", + +"INRSM_MCE_MDAC_1R5_V", +"INRSM_MCE_MDAC_3R3_V", +"INRSM_MCE_MDAC_5_V", +"INRSM_MCE_MDAC_P12_V", +"INRSM_MCE_MDAC_N12_V", +"INRSM_MCE_MDAC_3R3_I", +"INRSM_MCE_MDAC_5_I", +"INRSM_MCE_MDAC_P12_I", +"INRSM_MCE_MDAC_N12_I", + +"INRSM_MCE_PCA_TMP1", +"INRSM_MCE_PCA_TMP2", +"INRSM_MCE_AIC_TMP_FPGA", +"INRSM_MCE_AIC_TMP_ADC", +"INRSM_MCE_AIC_TMP_VREG", +"INRSM_MCE_MDAC_TMP_FPGA", +"INRSM_MCE_MDAC_TMP_OSC", +"INRSM_MCE_MDAC_TMP_BRD", +"INRSM_MCE_MDAC_TMP_PHA", +"INRSM_MCE_MDAC_TMP_PHB", + +"INRSM_Q1_TMP_A", +"INRSM_Q2_TMP_A", +"INRSM_Q3_TMP_A", +"INRSM_Q4_TMP_A", +"INRSM_MECH_MTR_TMP_A", +"INRSM_LL_MTR_TMP_A", +"INRSM_MSA_TMP_A", + +"INRSM_Q1_TMP_B", +"INRSM_Q2_TMP_B", +"INRSM_Q3_TMP_B", +"INRSM_Q4_TMP_B", +"INRSM_MECH_MTR_TMP_B", +"INRSM_LL_MTR_TMP_B", +"INRSM_MSA_TMP_B", + +"INRSM_MSA_Q1_365VDD", +"INRSM_MSA_Q1_365VPP", +"INRSM_MSA_Q1_171VPP", +"IGDPM_MSA_Q1_365IDD", +"IGDPM_MSA_Q1_365IPP", +"IGDPM_MSA_Q1_171RTN", + +"INRSM_MSA_Q2_365VDD", +"INRSM_MSA_Q2_365VPP", +"INRSM_MSA_Q2_171VPP", +"IGDPM_MSA_Q2_365IDD", +"IGDPM_MSA_Q2_365IPP", +"IGDPM_MSA_Q2_171RTN", + +"INRSM_MSA_Q3_365VDD", +"INRSM_MSA_Q3_365VPP", +"INRSM_MSA_Q3_171VPP", +"IGDPM_MSA_Q3_365IDD", +"IGDPM_MSA_Q3_365IPP", +"IGDPM_MSA_Q3_171RTN", + +"INRSM_MSA_Q4_365VDD", +"INRSM_MSA_Q4_365VPP", +"INRSM_MSA_Q4_171VPP", +"IGDPM_MSA_Q4_365IDD", +"IGDPM_MSA_Q4_365IPP", +"IGDPM_MSA_Q4_171RTN", + +"LAMP_FLAT1_CURR", +"LAMP_FLAT2_CURR", +"LAMP_FLAT3_CURR", +"LAMP_FLAT4_CURR", +"LAMP_FLAT5_CURR", +"LAMP_LINE1_CURR", +"LAMP_LINE2_CURR", +"LAMP_LINE3_CURR", +"LAMP_LINE4_CURR", +"LAMP_REF_CURR", +"LAMP_TEST_CURR", + +"LAMP_FLAT1_VOLT", +"LAMP_FLAT2_VOLT", +"LAMP_FLAT3_VOLT", +"LAMP_FLAT4_VOLT", +"LAMP_FLAT5_VOLT", +"LAMP_LINE1_VOLT", +"LAMP_LINE2_VOLT", +"LAMP_LINE3_VOLT", +"LAMP_LINE4_VOLT", +"LAMP_REF_VOLT", +"LAMP_TEST_VOLT"] + +mnemonic_wheelpositions = [ +"INRSI_C_FWA_POSITION_F110W", +"INRSI_C_FWA_POSITION_F100LP", +"INRSI_C_FWA_POSITION_F140X", +"INRSI_C_FWA_POSITION_OPAQUE", +"INRSI_C_FWA_POSITION_F290LP", +"INRSI_C_FWA_POSITION_F170LP", +"INRSI_C_FWA_POSITION_CLEAR", +"INRSI_C_FWA_POSITION_F070LP", + +"INRSI_C_GWA_X_POSITION_PRISM", +"INRSI_C_GWA_Y_POSITION_PRISM", + +"INRSI_C_GWA_X_POSITION_MIRROR", +"INRSI_C_GWA_Y_POSITION_MIRROR", + +"INRSI_C_GWA_X_POSITION_G140H", +"INRSI_C_GWA_Y_POSITION_G140H", + +"INRSI_C_GWA_X_POSITION_G235H", +"INRSI_C_GWA_Y_POSITION_G235H", + +"INRSI_C_GWA_X_POSITION_G395H", +"INRSI_C_GWA_Y_POSITION_G395H", + +"INRSI_C_GWA_X_POSITION_G140M", +"INRSI_C_GWA_Y_POSITION_G140M", + +"INRSI_C_GWA_X_POSITION_G235M", +"INRSI_C_GWA_Y_POSITION_G235M", + +"INRSI_C_GWA_X_POSITION_G395M", +"INRSI_C_GWA_Y_POSITION_G395M" ] + +fw_nominals = { +'F110W': -123.99, +'F100LP' : -10.32, +'CLEAR' : -56.44, +'F070LP' : 43.45, +'F140X' : -78.37, +'OPAQUE' : 21.58, +'F290LP' : -95.78, +'F170LP' : 8.95} + +gwx_nominals = { +'PRISM' : 169.01, +'MIRROR' : 171.11, +'G140H' : 180.25, +'G235H' : 176.66, +'G395H' : 159.96, +'G140M' : 164.31, +'G235M' : 159.24, +'G395M' : 141.69} + +gwy_nominals = { +'PRISM' : 17.08, +'MIRROR' : 98.72, +'G140H' : 67.47, +'G235H' : 70.00, +'G395H' : 73.29, +'G140M' : 63.18, +'G235M' : 69.81, +'G395M' : 89.57} + +#use this list for query +mnemonic_set_query = mnemonic_set_database + mnemonic_for_conditions diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/process_data.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/process_data.py new file mode 100644 index 000000000..e2fa7c793 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/process_data.py @@ -0,0 +1,344 @@ +"""This module holds functions for miri data trending + +All functions in this module are tailored for the miri datatrending application. +Detailed descriptions are given for every function individually. + +------- + - Daniel Kühbacher + +Use +--- + +Dependencies +------------ +MIRI_trend_requestsDRAFT1900201.docx + +References +---------- + +Notes +----- + +""" + +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as mn +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.condition as cond +import statistics +import sqlite3 +import warnings +import numpy as np +from collections import defaultdict + + +def extract_data(condition, mnemonic): + '''Function extracts data from given mnemmonic at a given condition + Parameters + ---------- + condition : object + conditon object that holds one or more subconditions + mnemonic : AstropyTable + holds single table with mnemonic data + Return + ------ + temp : list or None + holds data that applies to given condition + ''' + temp = [] + + #look for all values that fit to the given conditions + for element in mnemonic: + if condition.state(float(element['time'])): + temp.append(float(element['value'])) + + #return temp is one ore more values fit to the condition + #return None if no applicable data was found + if len(temp) > 0: + return temp + else: + return None + +def lamp_distinction(caa_flag, lamp_sel, lamp_curr, lamp_volt): + """Distincts over all calibration lamps and returns representative current means + each + Parameters + ---------- + """ + + #initilize empty dict + lamp_values = defaultdict(list) + + for index, flag in enumerate(caa_flag): + + if flag['value'] == 'ON': + + #initialize lamp value to default + current_lamp = "default" + + #find current lamp value + for lamp in lamp_sel: + if lamp['time'] <= flag['time']: + current_lamp = lamp['value'] + + #go to next Value if dummy lamps are activated + if (current_lamp == 'NO_LAMP') or (current_lamp == 'DUMMY'): + continue + + #define on_time of current lamp + try: + start_time = flag['time'] + + i = 1 + if caa_flag[index+i]['value'] == 'OFF': + end_time = caa_flag[index+1]['time'] + else: + i += 1 + + except IndexError: + break + + #append and evaluate current and voltage values + temp_curr = [] + temp_volt = [] + + #append current values to list + for curr in lamp_curr: + if curr['time'] >= start_time: + if curr['time'] < end_time: + temp_curr.append(float(curr['value'])) + else: + break + #append voltage values to list + for volt in lamp_volt: + if volt['time'] >= start_time : + if volt['time'] < end_time: + temp_volt.append(float(volt['value'])) + else: + break + + lamp_data = [] + #append current values + lamp_data.append(start_time) + lamp_data.append(end_time) + lamp_data.append(len(temp_curr)) + lamp_data.append(statistics.mean(temp_curr)) + lamp_data.append(statistics.stdev(temp_curr)) + #append voltage values + lamp_data.append(len(temp_volt)) + lamp_data.append(statistics.mean(temp_volt)) + lamp_data.append(statistics.stdev(temp_volt)) + lamp_values[current_lamp].append(( lamp_data )) + + return lamp_values + +def extract_filterpos(move_stat, wheel_pos, wheel_val): + '''Extracts ratio values which correspond to given position values and their + proposed nominals + Parameters + ---------- + condition : object + conditon object that holds one or more subconditions + nominals : dict + holds nominal values for all wheel positions + ratio_mem : AstropyTable + holds ratio values of one specific mnemonic + pos_mem : AstropyTable + holds pos values of one specific mnemonic + Return + ------ + pos_values : dict + holds ratio values and times with corresponding positionlabel as key + ''' + + #initilize empty dict for assigned ratio values + pos_values = defaultdict(list) + + for index, stat in enumerate(move_stat): + + #raise warning if position is UNKNOWN + if stat['value'] == "SUCCESS": + + #initialize lamp value to default + current_pos = "default" + pos_val = 0 + pos_time = 0 + + #Evaluate current position + for pos in wheel_pos: + if pos['time'] <= stat['time']: + current_pos = pos['value'] + if pos['time'] > stat['time']: + break + + #Evaluate corresponding value + for val in wheel_val: + if val['time'] <= stat['time']: + pos_val = val['value'] + pos_time = val['time'] + if val['time'] > stat['time']: + break + + print (current_pos, pos_val, pos_time) + + if current_pos != 'default': + pos_values[current_pos].append((pos_time, pos_val)) + else: + continue + + return pos_values + +def once_a_day_routine(mnemonic_data): + '''Routine for processing a 15min data file once a day + Parameters + ---------- + mnemonic_data : dict + dict holds time and value in a astropy table with correspining identifier as key + Return + ------ + return_data : dict + Holds extracted data with applied conditions + ''' + + #abbreviate attribute + m = mnemonic_data + return_data = dict() + + ########################################################################### + con_set_1 = [ \ + cond.unequal(m.mnemonic('INRSD_EXP_STAT'),'STARTED')] + #setup condition + condition_1 = cond.condition(con_set_1) + + for identifier in mn.mnemonic_cond_1: + data = extract_data(condition_1, m.mnemonic(identifier)) + if data != None: + return_data.update( {identifier:data} ) + else: + print("no data for {}".format(identifier)) + del condition_1 + + ########################################################################### + con_set_2 = [ \ + cond.equal(m.mnemonic('INRSH_LAMP_SEL'), 'NO_LAMP')] + #setup condition + condition_2 = cond.condition(con_set_2) + + for identifier in mn.mnemonic_cond_2: + data = extract_data(condition_2, m.mnemonic(identifier)) + if data != None: + return_data.update( {identifier:data} ) + else: + print("no data for {}".format(identifier)) + del condition_2 + + ########################################################################### + con_set_3 = [ \ + cond.unequal(m.mnemonic('INRSM_MOVE_STAT'), 'STARTED')] + #setup condition + condition_3 = cond.condition(con_set_3) + + for identifier in mn.mnemonic_cond_3: + data = extract_data(condition_3, m.mnemonic(identifier)) + if data != None: + return_data.update( {identifier:data} ) + else: + print("no data for {}".format(identifier)) + del condition_3 + + return return_data + + +def whole_day_routine(mnemonic_data): + '''Proposed routine for processing a 15min data file once a day + + Parameters + ---------- + mnemonic_data : dict + dict holds time and value in a astropy table with correspining identifier as key + + Return + ------ + data_cond_1 : dict + holds extracted data with condition 1 applied + data_cond_1 : dict + holds extracted data with condition 2 applied + ''' + + #abbreviate attribute + m = mnemonic_data + return_data = dict() + + ########################################################################### + con_set_ft_10 = [ + cond.equal(m.mnemonic('ICTM_RT_FILTER'), 10, stringval = False)] + #setup condition + condition_ft_10 = cond.condition(con_set_ft_10) + + for identifier in mn.mnemonic_ft10: + data = extract_data(condition_ft_10, m.mnemonic(identifier)) + if data != None: + return_data.update( {identifier:data} ) + else: + print("no data for {}".format(identifier)) + del condition_ft_10 + + ########################################################################## + con_set_caa = [ \ + cond.equal(m.mnemonic('INRSH_CAA_PWRF_ST'), 'ON')] + #setup condition + condition_caa = cond.condition(con_set_caa) + + for identifier in mn.mnemonic_caa: + data = extract_data(condition_caa, m.mnemonic(identifier)) + + if data != None: + return_data.update( {identifier:data} ) + else: + print("no data for {}".format(identifier)) + + del condition_caa + + ########################################################################### + data_lamps = lamp_distinction( m.mnemonic('INRSI_CAA_ON_FLAG'), + m.mnemonic('INRSH_LAMP_SEL'), + m.mnemonic('INRSI_C_CAA_CURRENT'), + m.mnemonic('INRSI_C_CAA_VOLTAGE') ) + + return return_data, data_lamps + +def wheelpos_routine(mnemonic_data): + '''Proposed routine for positionsensors each day + Parameters + ---------- + mnemonic_data : dict + dict holds time and value in a astropy table with correspining identifier as key + Return + ------ + FW : dict + holds FW ratio values and times with corresponding positionlabel as key + GW14 : dict + holds GW14 ratio values and times with corresponding positionlabel as key + GW23 : dict + holds GW23 ratio values and times with corresponding positionlabel as key + CCC : dict + holds CCC ratio values and times with corresponding positionlabel as key + ''' + + #abbreviate attribute + m = mnemonic_data + + FW = extract_filterpos( m.mnemonic('INRSI_FWA_MOVE_ST'), + m.mnemonic('INRSI_FWA_MECH_POS'), + m.mnemonic('INRSI_C_FWA_POSITION')) + + GWX = extract_filterpos(m.mnemonic('INRSI_GWA_MOVE_ST'), + m.mnemonic('INRSI_GWA_MECH_POS'), + m.mnemonic('INRSI_C_GWA_X_POSITION')) + + GWY = extract_filterpos(m.mnemonic('INRSI_GWA_MOVE_ST'), + m.mnemonic('INRSI_GWA_MECH_POS'), + m.mnemonic('INRSI_C_GWA_Y_POSITION')) + + return FW, GWX, GWY + +if __name__ =='__main__': + pass diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/sql_interface.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/sql_interface.py new file mode 100644 index 000000000..89da759b6 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/sql_interface.py @@ -0,0 +1,160 @@ +"""Module holds functions to generate and access sqlite databases + +The module is tailored for use in miri data trending. It holds functions to +create and close connections to a sqlite database. Calling the module itself +creates a sqlite database with specific tables used at miri data trending. + +Authors +------- + - Daniel Kühbacher + +Use +--- + +Dependencies +------------ + import mnemonics as m + +References +---------- + +Notes +----- + +""" +import os +import sqlite3 +from sqlite3 import Error + +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as m +from jwql.utils.utils import get_config, filename_parser + +def create_connection(db_file): + '''Sets up a connection or builds database + Parameters + ---------- + db_file : string + represents filename of database + Return + ------ + conn : DBobject or None + Connection object or None + ''' + try: + conn = sqlite3.connect(db_file) + print('Connected to database "{}"'.format(db_file)) + return conn + except Error as e: + print(e) + return None + + +def close_connection(conn): + '''Closes connection to database + Parameters + ---------- + conn : DBobject + Connection object to be closed + ''' + conn.close() + print('Connection closed') + + +def add_data(conn, mnemonic, data): + '''Add data of a specific mnemonic to database if it not exists + Parameters + ---------- + conn : DBobject + connection object to access database + mnemonic : string + identifies the table + data : list + specifies the data + ''' + + c = conn.cursor() + + #check if data already exists (start_time as identifier) + c.execute('SELECT id from {} WHERE start_time= {}'.format(mnemonic, data[0])) + temp = c.fetchall() + + if len(temp) == 0: + c.execute('INSERT INTO {} (start_time,end_time,data_points,average,deviation) \ + VALUES (?,?,?,?,?)'.format(mnemonic),data) + conn.commit() + else: + print('data for {} already exists'.format(mnemonic)) + + +def add_wheel_data(conn, mnemonic, data): + '''Add data of a specific wheel position to database if it not exists + Parameters + ---------- + conn : DBobject + connection object to access database + mnemonic : string + identifies the table + data : list + specifies the data + ''' + + c = conn.cursor() + + #check if data already exists (start_time) + c.execute('SELECT id from {} WHERE timestamp = {}'.format(mnemonic, data[0])) + temp = c.fetchall() + + if len(temp) == 0: + c.execute('INSERT INTO {} (timestamp, value) \ + VALUES (?,?)'.format(mnemonic),data) + conn.commit() + else: + print('data already exists') + + +def main(): + ''' Creates SQLite database with tables proposed in mnemonics.py''' + + __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + + #generate paths + DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') + DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'nirspec_database.db') + + conn = create_connection(DATABASE_FILE) + + c=conn.cursor() + + for mnemonic in m.mnemonic_set_database: + try: + c.execute('CREATE TABLE IF NOT EXISTS {} ( \ + id INTEGER, \ + start_time REAL, \ + end_time REAL, \ + data_points INTEGER, \ + average REAL, \ + deviation REAL, \ + performed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\ + PRIMARY KEY (id));'.format(mnemonic)) + except Error as e: + print('e') + + for mnemonic in m.mnemonic_wheelpositions: + try: + c.execute('CREATE TABLE IF NOT EXISTS {} ( \ + id INTEGER, \ + timestamp REAL, \ + value REAL, \ + performed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\ + PRIMARY KEY (id));'.format(mnemonic)) + except Error as e: + print('e') + + print("Database initial setup complete") + conn.commit() + close_connection(conn) + +#sets up database if called as main +if __name__ == "__main__": + main() + print("sql_interface.py done") diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/wheel_to_db.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/wheel_to_db.py new file mode 100644 index 000000000..a9b283fe1 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/wheel_to_db.py @@ -0,0 +1,63 @@ +import statistics +import os +import glob +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as mn +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql +import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.csv_to_AstropyTable as apt +from jwql.utils.utils import get_config, filename_parser + +from astropy.table import Table, Column + +from jwql.instrument_monitors.nirspec_monitors.data_trending.utils.process_data import wheelpos_routine + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + +#point to the directory where your files are located! +directory = os.path.join(get_config()['outputs'], 'nirspec_data_trending', 'nirspec_wheels', '*.CSV') + +#here some some files contain the same data but they are all incomplete +#in order to generate a full database we have to import all of them +filenames = glob.glob(directory) + +def process_file(conn, path): + + #import mnemonic data and append dict to variable below + m_raw_data = apt.mnemonics(path) + + #process raw data with once a day routine + FW, GWX, GWY = wheelpos_routine(m_raw_data) + + for key, values in FW.items(): + for data in values: + sql.add_wheel_data(conn, 'INRSI_C_FWA_POSITION_{}'.format(key), data) + + for key, values in GWX.items(): + for data in values: + sql.add_wheel_data(conn, 'INRSI_C_GWA_X_POSITION_{}'.format(key), data) + + for key, values in GWY.items(): + for data in values: + sql.add_wheel_data(conn, 'INRSI_C_GWA_Y_POSITION_{}'.format(key), data) + +def main(): + #generate paths + DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') + DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'nirspec_database.db') + + #connect to temporary database + conn = sql.create_connection(DATABASE_FILE) + + ''' + path = directory + test + process_file(conn, path) + ''' + #do for every file in list above + for path in filenames: + process_file(conn, path) + + #close connection + sql.close_connection(conn) + print("done") + +if __name__ == "__main__": + main() diff --git a/jwql/instrument_monitors/pipeline_tools.py b/jwql/instrument_monitors/pipeline_tools.py new file mode 100644 index 000000000..e81e7cfce --- /dev/null +++ b/jwql/instrument_monitors/pipeline_tools.py @@ -0,0 +1,259 @@ +"""Various utility functions related to the JWST calibration pipeline + +Authors +------- + + - Bryan Hilbert + +Use +--- + + This module can be imported as such: + :: + + from jwql.instrument_monitors import pipeline_tools + pipeline_steps = pipeline_tools.completed_pipeline_steps(filename) + """ + +from collections import OrderedDict +import copy +import numpy as np + +from astropy.io import fits +from jwst.dq_init import DQInitStep +from jwst.dark_current import DarkCurrentStep +from jwst.firstframe import FirstFrameStep +from jwst.group_scale import GroupScaleStep +from jwst.ipc import IPCStep +from jwst.jump import JumpStep +from jwst.lastframe import LastFrameStep +from jwst.linearity import LinearityStep +from jwst.persistence import PersistenceStep +from jwst.ramp_fitting import RampFitStep +from jwst.refpix import RefPixStep +from jwst.rscd import RSCD_Step +from jwst.saturation import SaturationStep +from jwst.superbias import SuperBiasStep + +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_UPPERCASE + +# Define the fits header keyword that accompanies each step +PIPE_KEYWORDS = {'S_GRPSCL': 'group_scale', 'S_DQINIT': 'dq_init', 'S_SATURA': 'saturation', + 'S_IPC': 'ipc', 'S_REFPIX': 'refpix', 'S_SUPERB': 'superbias', + 'S_PERSIS': 'persistence', 'S_DARK': 'dark_current', 'S_LINEAR': 'linearity', + 'S_FRSTFR': 'firstframe', 'S_LASTFR': 'lastframe', 'S_RSCD': 'rscd', + 'S_JUMP': 'jump', 'S_RAMP': 'rate'} + +PIPELINE_STEP_MAPPING = {'dq_init': DQInitStep, 'dark_current': DarkCurrentStep, + 'firstframe': FirstFrameStep, 'group_scale': GroupScaleStep, + 'ipc': IPCStep, 'jump': JumpStep, 'lastframe': LastFrameStep, + 'linearity': LinearityStep, 'persistence': PersistenceStep, + 'rate': RampFitStep, 'refpix': RefPixStep, 'rscd': RSCD_Step, + 'saturation': SaturationStep, 'superbias': SuperBiasStep} + +# Readout patterns that have nframes != a power of 2. These readout patterns +# require the group_scale pipeline step to be run. +GROUPSCALE_READOUT_PATTERNS = ['NRSIRS2'] + + +def completed_pipeline_steps(filename): + """Return a list of the completed pipeline steps for a given file. + + Parameters + ---------- + filename : str + File to examine + + Returns + ------- + completed : collections.OrderedDict + Dictionary with boolean entry for each pipeline step, + indicating which pipeline steps have been run on filename + """ + + # Initialize using PIPE_KEYWORDS so that entries are guaranteed to + # be in the correct order + completed = OrderedDict({}) + for key in PIPE_KEYWORDS.values(): + completed[key] = False + + header = fits.getheader(filename) + for key in PIPE_KEYWORDS.keys(): + try: + value = header.get(key) + except KeyError: + value == 'NOT DONE' + if value == 'COMPLETE': + completed[PIPE_KEYWORDS[key]] = True + + return completed + + +def get_pipeline_steps(instrument): + """Get the names and order of the ``calwebb_detector1`` pipeline + steps for a given instrument. Use values that match up with the + values in the ``PIPE_STEP`` defintion in ``definitions.py`` + + Parameters + ---------- + instrument : str + Name of JWST instrument + + Returns + ------- + steps : collections.OrderedDict + Dictionary of step names + """ + + # Ensure instrument name is valid + instrument = instrument.upper() + if instrument not in JWST_INSTRUMENT_NAMES_UPPERCASE.values(): + raise ValueError("WARNING: {} is not a valid instrument name.".format(instrument)) + + # Order is important in 'steps' lists below!! + if instrument == 'MIRI': + steps = ['group_scale', 'dq_init', 'saturation', 'ipc', 'firstframe', 'lastframe', + 'linearity', 'rscd', 'dark_current', 'refpix', 'persistence', 'jump', 'rate'] + # No persistence correction for MIRI + steps.remove('persistence') + # MIRI is limited to one frame per group + steps.remove('group_scale') + else: + steps = ['group_scale', 'dq_init', 'saturation', 'ipc', 'superbias', 'refpix', 'linearity', + 'persistence', 'dark_current', 'jump', 'rate'] + + # No persistence correction for NIRSpec + if instrument == 'NIRSPEC': + steps.remove('persistence') + else: + # NIRCam, NISISS, FGS all do not need group scale as nframes is + # always a multiple of 2 + steps.remove('group_scale') + + # IPC correction currently not done for any instrument + steps.remove('ipc') + + # Initialize using PIPE_KEYWORDS so the steps will be in the right order + required_steps = OrderedDict({}) + for key in steps: + required_steps[key] = True + for key in PIPE_KEYWORDS.values(): + if key not in required_steps.keys(): + required_steps[key] = False + + return required_steps + + +def image_stack(file_list): + """Given a list of fits files containing 2D images, read in all data + and place into a 3D stack + + Parameters + ---------- + file_list : list + List of fits file names + + Returns + ------- + cube : numpy.ndarray + 3D stack of the 2D images + """ + + exptimes = [] + for i, input_file in enumerate(file_list): + with fits.open(input_file) as hdu: + image = hdu[1].data + exptime = hdu[0].header['EFFINTTM'] + num_ints = hdu[0].header['NINTS'] + + # Stack all inputs together into a single 3D image cube + if i == 0: + ndim_base = image.shape + if len(ndim_base) == 3: + cube = copy.deepcopy(image) + elif len(ndim_base) == 2: + cube = np.expand_dims(image, 0) + else: + ndim = image.shape + if ndim_base[-2:] == ndim[-2:]: + if len(ndim) == 2: + image = np.expand_dims(image, 0) + elif len(ndim) > 3: + raise ValueError("4-dimensional input slope images not supported.") + cube = np.vstack((cube, image)) + else: + raise ValueError("Input images are of inconsistent size in x/y dimension.") + exptimes.append([exptime] * num_ints) + + return cube, exptimes + + +def run_calwebb_detector1_steps(input_file, steps): + """Run the steps of ``calwebb_detector1`` specified in the steps + dictionary on the input file + + Parameters + ---------- + input_file : str + File on which to run the pipeline steps + + steps : collections.OrderedDict + Keys are the individual pipeline steps (as seen in the + ``PIPE_KEYWORDS`` values above). Boolean values indicate whether + a step should be run or not. Steps are run in the official + ``calwebb_detector1`` order. + """ + + first_step_to_be_run = True + for step_name in steps: + if steps[step_name]: + if first_step_to_be_run: + model = PIPELINE_STEP_MAPPING[step_name].call(input_file) + first_step_to_be_run = False + else: + model = PIPELINE_STEP_MAPPING[step_name].call(model) + suffix = step_name + output_filename = input_file.replace('.fits', '_{}.fits'.format(suffix)) + if suffix != 'rate': + model.save(output_filename) + else: + model[0].save(output_filename) + + return output_filename + + +def steps_to_run(all_steps, finished_steps): + """Given a list of pipeline steps that need to be completed as well + as a list of steps that have already been completed, return a list + of steps remaining to be done. + + Parameters + ---------- + all_steps : collections.OrderedDict + A dictionary of all steps that need to be completed + + finished_steps : collections.OrderedDict + A dictionary with keys equal to the pipeline steps and boolean + values indicating whether a particular step has been completed + or not (i.e. output from ``completed_pipeline_steps``) + + Returns + ------- + steps_to_run : collections.OrderedDict + A dictionaru with keys equal to the pipeline steps and boolean + values indicating whether a particular step has yet to be run. + """ + + torun = copy.deepcopy(finished_steps) + + for key in all_steps: + if all_steps[key] == finished_steps[key]: + torun[key] = False + elif ((all_steps[key] is True) & (finished_steps[key] is False)): + torun[key] = True + elif ((all_steps[key] is False) & (finished_steps[key] is True)): + print(("WARNING: {} step has been run " + "but the requirements say that it should not " + "be. Need a new input file.".format(key))) + + return torun diff --git a/jwql/jwql_monitors/generate_preview_images.py b/jwql/jwql_monitors/generate_preview_images.py index 7d7c080ef..107b8e190 100755 --- a/jwql/jwql_monitors/generate_preview_images.py +++ b/jwql/jwql_monitors/generate_preview_images.py @@ -26,8 +26,9 @@ python generate_preview_images.py """ -from glob import glob +import glob import logging +import multiprocessing import os import re @@ -215,7 +216,7 @@ def check_existence(file_list, outdir): file_parts['parallel_seq_id'], file_parts['activity'], file_parts['exposure_id'], mosaic_str, file_parts['suffix']) - current_files = glob(os.path.join(outdir, search_string)) + current_files = glob.glob(os.path.join(outdir, search_string)) if len(current_files) > 0: return True else: @@ -496,35 +497,151 @@ def find_data_channel(detectors): return channel +def get_base_output_name(filename_dict): + """Returns the base output name used for preview images and + thumbnails. + + Parameters + ---------- + filename_dict : dict + A dictionary containing parsed filename parts via + ``filename_parser`` + + Returns + ------- + base_output_name : str + The base output name, e.g. ``jw96090001002_03101_00001_nrca2_rate`` + """ + + base_output_name = 'jw{}{}{}_{}{}{}_{}_'.format( + filename_dict['program_id'], filename_dict['observation'], + filename_dict['visit'], filename_dict['visit_group'], + filename_dict['parallel_seq_id'], filename_dict['activity'], + filename_dict['exposure_id']) + + return base_output_name + + @log_fail @log_info def generate_preview_images(): - """The main function of the ``generate_preview_image`` module.""" + """The main function of the ``generate_preview_image`` module. + See module docstring for further details.""" # Begin logging logging.info("Beginning the script run") - filesystem = get_config()['filesystem'] - preview_image_filesystem = get_config()['preview_image_filesystem'] - thumbnail_filesystem = get_config()['thumbnail_filesystem'] + # Process programs in parallel + program_list = [os.path.basename(item) for item in glob.glob(os.path.join(get_config()['filesystem'], '*'))] + pool = multiprocessing.Pool(processes=int(get_config()['cores'])) + pool.map(process_program, program_list) + pool.close() + pool.join() + + # Complete logging: + logging.info("Completed.") + + +def group_filenames(filenames): + """Given a list of JWST filenames, group together files from the + same exposure. These files will share the same ``program_id``, + ``observation``, ``visit``, ``visit_group``, ``parallel_seq_id``, + ``activity``, ``exposure``, and ``suffix``. Only the ``detector`` + will be different. Currently only NIRCam files for a given exposure + will be grouped together. For other instruments multiple files for + a given exposure will be kept separate from one another and no + mosaic will be made. Stage 3 files will remain as individual + files, and will not be grouped together with any other files. + + Parameters + ---------- + filenames : list + list of filenames + + Returns + ------- + grouped : list + grouped list of filenames where each element is a list and + contains the names of filenames with matching exposure + information. + """ + + # Some initializations + grouped, matched_names = [], [] + filenames.sort() - filenames = glob(os.path.join(filesystem, '*/*.fits')) + # Loop over each file in the list of good files + for filename in filenames: + + # Holds list of matching files for exposure + subgroup = [] + + # Generate string to be matched with other filenames + filename_dict = filename_parser(os.path.basename(filename)) + + # If the filename was already involved in a match, then skip + if filename not in matched_names: + + # For stage 3 filenames, treat individually + if 'stage_3' in filename_dict['filename_type']: + matched_names.append(filename) + subgroup.append(filename) + + # Group together stage 1 and 2 filenames + elif filename_dict['filename_type'] == 'stage_1_and_2': + + # Determine detector naming convention + if filename_dict['detector'].upper() in NIRCAM_SHORTWAVE_DETECTORS: + detector_str = 'NRC[AB][1234]' + elif filename_dict['detector'].upper() in NIRCAM_LONGWAVE_DETECTORS: + detector_str = 'NRC[AB]5' + else: # non-NIRCam detectors + detector_str = filename_dict['detector'].upper() + + # Build pattern to match against + base_output_name = get_base_output_name(filename_dict) + match_str = '{}{}_{}.fits'.format(base_output_name, detector_str, filename_dict['suffix']) + match_str = os.path.join(os.path.dirname(filename), match_str) + pattern = re.compile(match_str, re.IGNORECASE) + + # Try to match the substring to each good file + for file_to_match in filenames: + if pattern.match(file_to_match) is not None: + matched_names.append(file_to_match) + subgroup.append(file_to_match) + + if len(subgroup) > 0: + grouped.append(subgroup) + + return grouped + + +def process_program(program): + """Generate preview images and thumbnails for the given program. + + Parameters + ---------- + program : str + The program identifier (e.g. ``88600``) + """ + + # Group together common exposures + filenames = glob.glob(os.path.join(get_config()['filesystem'], program, '*.fits')) grouped_filenames = group_filenames(filenames) logging.info('Found {} filenames'.format(len(filenames))) for file_list in grouped_filenames: filename = file_list[0] + # Determine the save location try: identifier = 'jw{}'.format(filename_parser(filename)['program_id']) - except ValueError as error: + except ValueError: identifier = os.path.basename(filename).split('.fits')[0] + preview_output_directory = os.path.join(get_config()['preview_image_filesystem'], identifier) + thumbnail_output_directory = os.path.join(get_config()['thumbnail_filesystem'], identifier) - preview_output_directory = os.path.join(preview_image_filesystem, identifier) - thumbnail_output_directory = os.path.join(thumbnail_filesystem, identifier) - - # Check to see if the preview images already exist and skip - # if they do + # Check to see if the preview images already exist and skip if they do file_exists = check_existence(file_list, preview_output_directory) if file_exists: logging.info("JPG already exists for {}, skipping.".format(filename)) @@ -544,7 +661,7 @@ def generate_preview_images(): # than one detector was used), then create a mosaic max_size = 8 numfiles = len(file_list) - if numfiles != 1: + if numfiles > 1: try: mosaic_image, mosaic_dq = create_mosaic(file_list) logging.info('Created mosiac for:') @@ -578,100 +695,10 @@ def generate_preview_images(): im.file = dummy_file im.make_image(max_img_size=max_size) + logging.info('Created preview image and thumbnail for: {}'.format(filename)) except ValueError as error: logging.warning(error) - # Complete logging: - logging.info("Completed.") - - -def group_filenames(input_files): - """Given a list of JWST filenames, group together files from the - same exposure. These files will share the same ``program_id``, - ``observation``, ``visit``, ``visit_group``, ``parallel_seq_id``, - ``activity``, ``exposure``, and ``suffix``. Only the ``detector`` - will be different. Currently only NIRCam files for a given exposure - will be grouped together. For other instruments multiple files for - a given exposure will be kept separate from one another and no - mosaic will be made. - - Parameters - ---------- - input_files : list - list of filenames - - Returns - ------- - grouped : list - grouped list of filenames where each element is a list and - contains the names of filenames with matching exposure - information. - """ - - grouped = [] - - # Sort files first - input_files.sort() - - goodindex = np.arange(len(input_files)) - input_files = np.array(input_files) - - # Loop over each file in the list of good files - for index, full_filename in enumerate(input_files[goodindex]): - file_directory, filename = os.path.split(full_filename) - - # Generate string to be matched with other filenames - filename_parts = filename_parser(filename) - program = filename_parts['program_id'] - observation = filename_parts['observation'] - visit = filename_parts['visit'] - visit_group = filename_parts['visit_group'] - parallel = filename_parts['parallel_seq_id'] - activity = filename_parts['activity'] - exposure = filename_parts['exposure_id'] - detector = filename_parts['detector'].upper() - suffix = filename_parts['suffix'] - - observation_base = 'jw{}{}{}_{}{}{}_{}_'.format( - program, observation, visit, visit_group, - parallel, activity, exposure) - - if detector in NIRCAM_SHORTWAVE_DETECTORS: - detector_str = 'NRC[AB][1234]' - elif detector in NIRCAM_LONGWAVE_DETECTORS: - detector_str = 'NRC[AB]5' - else: # non-NIRCam detectors - should never be used I think?? - detector_str = detector - match_str = '{}{}_{}.fits'.format(observation_base, detector_str, suffix) - match_str = os.path.join(file_directory, match_str) - pattern = re.compile(match_str, re.IGNORECASE) - - # Try to match the substring to each good file - matches = [] - matched_name = [] - for index2, file2match in enumerate(input_files[goodindex]): - match = pattern.match(file2match) - - # Add any files that match the string - if match is not None: - matched_name.append(file2match) - matches.append(goodindex[index2]) - # For any matched files, remove from goodindex so we don't - # use them as a basis for matching later - all_locs = [] - for num in matches: - loc = np.where(goodindex == num) - all_locs.append(loc[0][0]) - if len(all_locs) != 0: - # Delete matched file indexes from the list of - # files to search - goodindex = np.delete(goodindex, all_locs) - - # Add the list of matched files to the overall list of files - grouped.append(matched_name) - - return grouped - if __name__ == '__main__': diff --git a/jwql/jwql_monitors/monitor_cron_jobs.py b/jwql/jwql_monitors/monitor_cron_jobs.py index 6fcea3999..e3cf5e444 100755 --- a/jwql/jwql_monitors/monitor_cron_jobs.py +++ b/jwql/jwql_monitors/monitor_cron_jobs.py @@ -244,7 +244,9 @@ def status(production_mode=True): # If we are in development mode, the log files are in a slightly # different location than in production mode - if not production_mode: + if production_mode: + log_path = os.path.join(log_path, 'prod') + else: log_path = os.path.join(log_path, 'dev') # Set up a dictionary to keep track of results @@ -321,6 +323,6 @@ def success_check(filename): if __name__ == '__main__': module = os.path.basename(__file__).strip('.py') - configure_logging(module, production_mode=True) + configure_logging(module) status() diff --git a/jwql/jwql_monitors/monitor_filesystem.py b/jwql/jwql_monitors/monitor_filesystem.py index da9890518..7961f7341 100755 --- a/jwql/jwql_monitors/monitor_filesystem.py +++ b/jwql/jwql_monitors/monitor_filesystem.py @@ -1,327 +1,394 @@ #! /usr/bin/env python """ -This module is meant to monitor and gather statistics of the filesystem -that hosts data for the ``jwql`` application. This will answer -questions such as the total number of files, how much disk space is -being used, and then plot these values over time. +This module monitors and gather statistics of the filesystem that hosts +data for the ``jwql`` application. This will answer questions such as +the total number of files, how much disk space is being used, and then +plot these values over time. Authors ------- - Misty Cracraft + - Sara Ogaz + - Matthew Bourque Use --- - This module can be executed from the command line: + This module is intended to be executed from the command line: :: python monitor_filesystem.py - Alternatively, it can be called from scripts with the following - import statements: - - :: - - from monitor_filesystem import filesystem_monitor - from monitor_filesystem import plot_system_stats - - - Required arguments (in a ``config.json`` file): - ``filepath`` - The path to the input file needs to be in a - ``config.json`` file in the ``utils`` directory - ``outputs`` - The path to the output files needs to be in a - ``config.json`` file in the ``utils`` directory. - - Required arguments for plotting: - ``inputfile`` - The name of the file to save all of the system - statistics to - ``filebytype`` - The name of the file to save stats on fits type - files to - + The user must have a ``config.json`` file in the ``utils`` + directory with the following keys: + - ``filesystem`` - The path to the filesystem + - ``outputs`` - The path to where the output plots will be + written Dependencies ------------ The user must have a configuration file named ``config.json`` placed in the ``utils`` directory. - -Notes ------ - - The ``monitor_filesystem`` function queries the filesystem, - calculates the statistics and saves the output file(s) in the - directory specified in the ``config.json`` file. - - The ``plot_system_stats`` function reads in the two specified files - of statistics and plots the figures to an html output page as well - as saving them to an output html file. """ from collections import defaultdict import datetime +import itertools import logging -import numpy as np import os import subprocess from bokeh.embed import components from bokeh.layouts import gridplot +from bokeh.palettes import Dark2_5 as palette from bokeh.plotting import figure, output_file, save +from jwql.database.database_interface import engine +from jwql.database.database_interface import session +from jwql.database.database_interface import FilesystemGeneral +from jwql.database.database_interface import FilesystemInstrument from jwql.utils.logging_functions import configure_logging, log_info, log_fail from jwql.utils.permissions import set_permissions +from jwql.utils.constants import FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE from jwql.utils.utils import filename_parser from jwql.utils.utils import get_config +FILESYSTEM = get_config()['filesystem'] + + +def gather_statistics(general_results_dict, instrument_results_dict): + """Walks the filesytem to gather various statistics to eventually + store in the database + + Parameters + ---------- + general_results_dict : dict + A dictionary for the ``filesystem_general`` database table + instrument_results_dict : dict + A dictionary for the ``filesystem_instrument`` database table + + Returns + ------- + general_results_dict : dict + A dictionary for the ``filesystem_general`` database table + instrument_results_dict : dict + A dictionary for the ``filesystem_instrument`` database table + """ + + logging.info('Searching filesystem...') + + for dirpath, _, files in os.walk(FILESYSTEM): + general_results_dict['total_file_count'] += len(files) + for filename in files: + + file_path = os.path.join(dirpath, filename) + general_results_dict['total_file_size'] += os.path.getsize(file_path) + + if filename.endswith(".fits"): + + # Parse out filename information + filename_dict = filename_parser(filename) + filetype = filename_dict['suffix'] + instrument = filename_dict['instrument'] + + # Populate general stats + general_results_dict['fits_file_count'] += 1 + general_results_dict['fits_file_size'] += os.path.getsize(file_path) + + # Populate instrument specific stats + if instrument not in instrument_results_dict: + instrument_results_dict[instrument] = {} + if filetype not in instrument_results_dict[instrument]: + instrument_results_dict[instrument][filetype] = {} + instrument_results_dict[instrument][filetype]['count'] = 0 + instrument_results_dict[instrument][filetype]['size'] = 0 + instrument_results_dict[instrument][filetype]['count'] += 1 + instrument_results_dict[instrument][filetype]['size'] += os.path.getsize(file_path) / (2**40) + + # Convert file sizes to terabytes + general_results_dict['total_file_size'] = general_results_dict['total_file_size'] / (2**40) + general_results_dict['fits_file_size'] = general_results_dict['fits_file_size'] / (2**40) + + logging.info('{} files found in filesystem'.format(general_results_dict['fits_file_count'])) + + return general_results_dict, instrument_results_dict + + +def get_global_filesystem_stats(general_results_dict): + """Gathers ``used`` and ``available`` ``df``-style stats on the + entire filesystem. + + Parameters + ---------- + general_results_dict : dict + A dictionary for the ``filesystem_general`` database table + + Returns + ------- + general_results_dict : dict + A dictionary for the ``filesystem_general`` database table + """ + + command = "df {}".format(FILESYSTEM) + command += " | awk '{print $3, $4}' | tail -n 1" + stats = subprocess.check_output(command, shell=True).split() + general_results_dict['used'] = int(stats[0]) / (2**40) + general_results_dict['available'] = int(stats[1]) / (2**40) + + return general_results_dict + + +def initialize_results_dicts(): + """Initializes dictionaries that will hold filesystem statistics + + Returns + ------- + general_results_dict : dict + A dictionary for the ``filesystem_general`` database table + instrument_results_dict : dict + A dictionary for the ``filesystem_instrument`` database table + """ + + now = datetime.datetime.now() + + general_results_dict = {} + general_results_dict['date'] = now + general_results_dict['total_file_count'] = 0 + general_results_dict['fits_file_count'] = 0 + general_results_dict['total_file_size'] = 0 + general_results_dict['fits_file_size'] = 0 + + instrument_results_dict = {} + instrument_results_dict['date'] = now + + return general_results_dict, instrument_results_dict + @log_fail @log_info def monitor_filesystem(): - """Tabulates the inventory of the JWST filesystem, saving - statistics to files, and generates plots. + """ + Tabulates the inventory of the JWST filesystem, saving statistics + to database tables, and generates plots. """ - # Begin logging logging.info('Beginning filesystem monitoring.') - # Get path, directories and files in system and count files in all directories - settings = get_config() - filesystem = settings['filesystem'] - outputs_dir = os.path.join(settings['outputs'], 'monitor_filesystem') + # Initialize dictionaries for database input + general_results_dict, instrument_results_dict = initialize_results_dicts() - # set up dictionaries for output - results_dict = defaultdict(int) - size_dict = defaultdict(float) - # Walk through all directories recursively and count files - logging.info('Searching filesystem...') - for dirpath, dirs, files in os.walk(filesystem): - results_dict['file_count'] += len(files) # find number of all files - for filename in files: - file_path = os.path.join(dirpath, filename) - if filename.endswith(".fits"): # find total number of fits files - results_dict['fits_files'] += 1 - size_dict['size_fits'] += os.path.getsize(file_path) - suffix = filename_parser(filename)['suffix'] - results_dict[suffix] += 1 - size_dict[suffix] += os.path.getsize(file_path) - detector = filename_parser(filename)['detector'] - instrument = detector[0:3] # first three characters of detector specify instrument - results_dict[instrument] += 1 - size_dict[instrument] += os.path.getsize(file_path) - logging.info('{} files found in filesystem'.format(results_dict['fits_files'])) + # Walk through filesystem recursively to gather statistics + general_results_dict, instrument_results_dict = gather_statistics(general_results_dict, instrument_results_dict) # Get df style stats on file system - out = subprocess.check_output('df {}'.format(filesystem), shell=True) - outstring = out.decode("utf-8") # put into string for parsing from byte format - parsed = outstring.split(sep=None) - - # Select desired elements from parsed string - total = int(parsed[8]) # in blocks of 512 bytes - used = int(parsed[9]) - available = int(parsed[10]) - percent_used = parsed[11] - - # Save stats for plotting over time - now = datetime.datetime.now().isoformat(sep='T', timespec='auto') # get date of stats - - # set up output file and write stats - statsfile = os.path.join(outputs_dir, 'statsfile.txt') - with open(statsfile, "a+") as f: - f.write("{0} {1:15d} {2:15d} {3:15d} {4:15d} {5}\n".format(now, results_dict['file_count'], - total, available, used, percent_used)) - set_permissions(statsfile) - logging.info('Saved file statistics to: {}'.format(statsfile)) - - # set up and read out stats on files by type - filesbytype = os.path.join(outputs_dir, 'filesbytype.txt') - with open(filesbytype, "a+") as f2: - f2.write("{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}\n".format(results_dict['fits_files'], - results_dict['uncal'], results_dict['cal'], results_dict['rate'], - results_dict['rateints'], results_dict['i2d'], results_dict['nrc'], - results_dict['nrs'], results_dict['nis'], results_dict['mir'], results_dict['gui'])) - set_permissions(filesbytype, verbose=False) - logging.info('Saved file statistics by type to {}'.format(filesbytype)) - - # set up file size by type file - sizebytype = os.path.join(outputs_dir, 'sizebytype.txt') - with open(sizebytype, "a+") as f3: - f3.write("{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}\n".format(size_dict['size_fits'], - size_dict['uncal'], size_dict['cal'], size_dict['rate'], - size_dict['rateints'], size_dict['i2d'], size_dict['nrc'], - size_dict['nrs'], size_dict['nis'], size_dict['mir'], size_dict['gui'])) - set_permissions(sizebytype, verbose=False) - logging.info('Saved file sizes by type to {}'.format(sizebytype)) - - logging.info('Filesystem statistics calculation complete.') + general_results_dict = get_global_filesystem_stats(general_results_dict) + + # Add data to database tables + update_database(general_results_dict, instrument_results_dict) # Create the plots - plot_system_stats(statsfile, filesbytype, sizebytype) + plot_filesystem_stats() + + logging.info("Completed.") -def plot_system_stats(stats_file, filebytype, sizebytype): - """Read in the file of saved stats over time and plot them. +def plot_by_filetype(plot_type, instrument): + """Plot ``count`` or ``size`` by filetype versus date for the given + instrument, or all instruments. Parameters - ----------- - stats_file : str - file containing information of stats over time - filebytype : str - file containing information of file counts by type over - time - sizebytype : str - file containing information on file sizes by type over time + ---------- + plot_type : str + Which data to plot. Either ``count`` or ``size``. + instrument : str + The instrument to plot for. Can be a valid JWST instrument or + ``all`` to plot across all instruments. + + Returns + ------- + plot : bokeh.plotting.figure.Figure object + ``bokeh`` plot of total file counts versus date """ - # get path for files - settings = get_config() - outputs_dir = os.path.join(settings['outputs'], 'monitor_filesystem') - - # read in file of statistics - date, f_count, sysize, frsize, used, percent = np.loadtxt(stats_file, dtype=str, unpack=True) - fits_files, uncalfiles, calfiles, ratefiles, rateintsfiles, i2dfiles, nrcfiles, nrsfiles, nisfiles, mirfiles, fgsfiles = np.loadtxt(filebytype, dtype=str, unpack=True) - fits_sz, uncal_sz, cal_sz, rate_sz, rateints_sz, i2d_sz, nrc_sz, nrs_sz, nis_sz, mir_sz, fgs_sz = np.loadtxt(sizebytype, dtype=str, unpack=True) - logging.info('Read in file statistics from {}, {}, {}'.format(stats_file, filebytype, sizebytype)) - - # put in proper np array types and convert to GB sizes - dates = np.array(date, dtype='datetime64') - file_count = f_count.astype(float) - systemsize = sysize.astype(float) / (1024.**3) - freesize = frsize.astype(float) / (1024.**3) - usedsize = used.astype(float) / (1024.**3) - - fits = fits_files.astype(int) - uncal = uncalfiles.astype(int) - cal = calfiles.astype(int) - rate = ratefiles.astype(int) - rateints = rateintsfiles.astype(int) - i2d = i2dfiles.astype(int) - nircam = nrcfiles.astype(int) - nirspec = nrsfiles.astype(int) - niriss = nisfiles.astype(int) - miri = mirfiles.astype(int) - fgs = fgsfiles.astype(int) - - fits_size = fits_sz.astype(float) / (1024.**3) - uncal_size = uncal_sz.astype(float) / (1024.**3) - cal_size = cal_sz.astype(float) / (1024.**3) - rate_size = rate_sz.astype(float) / (1024.**3) - rateints_size = rateints_sz.astype(float) / (1024.**3) - i2d_size = i2d_sz.astype(float) / (1024.**3) - nircam_size = nrc_sz.astype(float) / (1024.**3) - nirspec_size = nrs_sz.astype(float) / (1024.**3) - niriss_size = nis_sz.astype(float) / (1024.**3) - miri_size = mir_sz.astype(float) / (1024.**3) - fgs_size = fgs_sz.astype(float) / (1024.**3) - - # plot the data - # Plot filecount vs. date - p1 = figure( - tools='pan,box_zoom,reset,wheel_zoom,save', x_axis_type='datetime', - title="Total File Counts", x_axis_label='Date', y_axis_label='Count') - p1.line(dates, file_count, line_width=2, line_color='blue') - p1.circle(dates, file_count, color='blue') + # Determine plot title + if instrument == 'all': + title = 'Total File {} by Type'.format(plot_type.capitalize()) + else: + instrument_title = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument] + title = '{} Total File {} by Type'.format(instrument_title, plot_type.capitalize()) + + # Initialize plot + plot = figure( + tools='pan,box_zoom,wheel_zoom,reset,save', + x_axis_type='datetime', + title=title, + x_axis_label='Date', + y_axis_label='Count') + colors = itertools.cycle(palette) + + for filetype, color in zip(FILE_SUFFIX_TYPES, colors): + + # Query for counts + results = session.query(FilesystemInstrument.date, getattr(FilesystemInstrument, plot_type))\ + .filter(FilesystemInstrument.filetype == filetype) + + if instrument == 'all': + results = results.all() + else: + results = results.filter(FilesystemInstrument.instrument == instrument).all() + + # Group by date + if results: + results_dict = defaultdict(int) + for date, value in results: + results_dict[date] += value + + # Parse results so they can be easily plotted + dates = list(results_dict.keys()) + values = list(results_dict.values()) + + # Plot the results + plot.line(dates, values, legend='{} files'.format(filetype), line_color=color) + plot.circle(dates, values, color=color) + + return plot + + +def plot_filesystem_size(): + """Plot filesystem sizes (size, used, available) versus date + + Returns + ------- + plot : bokeh.plotting.figure.Figure object + ``bokeh`` plot of total file counts versus date + """ # Plot system stats vs. date - p2 = figure( - tools='pan,box_zoom,wheel_zoom,reset,save', x_axis_type='datetime', - title='System stats', x_axis_label='Date', y_axis_label='GB') - p2.line(dates, systemsize, legend='Total size', line_color='red') - p2.circle(dates, systemsize, color='red') - p2.line(dates, freesize, legend='Free bytes', line_color='blue') - p2.circle(dates, freesize, color='blue') - p2.line(dates, usedsize, legend='Used bytes', line_color='green') - p2.circle(dates, usedsize, color='green') - - # Plot fits files by type vs. date - p3 = figure( - tools='pan,box_zoom,wheel_zoom,reset,save', x_axis_type='datetime', - title="Total File Counts by Type", x_axis_label='Date', y_axis_label='Count') - p3.line(dates, fits, legend='Total fits files', line_color='black') - p3.circle(dates, fits, color='black') - p3.line(dates, uncal, legend='uncalibrated fits files', line_color='red') - p3.diamond(dates, uncal, color='red') - p3.line(dates, cal, legend='calibrated fits files', line_color='blue') - p3.square(date, cal, color='blue') - p3.line(dates, rate, legend='rate fits files', line_color='green') - p3.triangle(dates, rate, color='green') - p3.line(dates, rateints, legend='rateints fits files', line_color='orange') - p3.asterisk(dates, rateints, color='orange') - p3.line(dates, i2d, legend='i2d fits files', line_color='purple') - p3.x(dates, i2d, color='purple') - p3.line(dates, nircam, legend='nircam fits files', line_color='midnightblue') - p3.x(dates, nircam, color='midnightblue') - p3.line(dates, nirspec, legend='nirspec fits files', line_color='springgreen') - p3.x(dates, nirspec, color='springgreen') - p3.line(dates, niriss, legend='niriss fits files', line_color='darkcyan') - p3.x(dates, niriss, color='darkcyan') - p3.line(dates, miri, legend='miri fits files', line_color='dodgerblue') - p3.x(dates, miri, color='dodgerblue') - p3.line(dates, fgs, legend='fgs fits files', line_color='darkred') - p3.x(dates, fgs, color='darkred') - - # plot size of total fits files by type - p4 = figure( - tools='pan,box_zoom,wheel_zoom,reset,save', x_axis_type='datetime', - title="Total File Sizes by Type", x_axis_label='Date', y_axis_label='GB') - p4.line(dates, fits_size, legend='Total fits files', line_color='black') - p4.circle(dates, fits_size, color='black') - p4.line(dates, uncal_size, legend='uncalibrated fits files', line_color='red') - p4.diamond(dates, uncal_size, color='red') - p4.line(dates, cal_size, legend='calibrated fits files', line_color='blue') - p4.square(date, cal_size, color='blue') - p4.line(dates, rate_size, legend='rate fits files', line_color='green') - p4.triangle(dates, rate_size, color='green') - p4.line(dates, rateints_size, legend='rateints fits files', line_color='orange') - p4.asterisk(dates, rateints_size, color='orange') - p4.line(dates, i2d_size, legend='i2d fits files', line_color='purple') - p4.x(dates, i2d_size, color='purple') - p4.line(dates, nircam_size, legend='nircam fits files', line_color='midnightblue') - p4.x(dates, nircam_size, color='midnightblue') - p4.line(dates, nirspec_size, legend='nirspec fits files', line_color='springgreen') - p4.x(dates, nirspec_size, color='springgreen') - p4.line(dates, niriss_size, legend='niriss fits files', line_color='darkcyan') - p4.x(dates, niriss_size, color='darkcyan') - p4.line(dates, miri_size, legend='miri fits files', line_color='dodgerblue') - p4.x(dates, miri_size, color='dodgerblue') - p4.line(dates, fgs_size, legend='fgs fits files', line_color='darkred') - p4.x(dates, fgs_size, color='darkred') - - # create a layout with a grid pattern to save all plots - grid = gridplot([[p1, p2], [p3, p4]]) - outfile = os.path.join(outputs_dir, "filesystem_monitor.html") + results = session.query(FilesystemGeneral.date, FilesystemGeneral.total_file_size, + FilesystemGeneral.used, FilesystemGeneral.available).all() + dates, total_sizes, useds, availables = zip(*results) + plot = figure( + tools='pan,box_zoom,wheel_zoom,reset,save', + x_axis_type='datetime', + title='System stats', + x_axis_label='Date', + y_axis_label='GB') + plot.line(dates, total_sizes, legend='Total size', line_color='red') + plot.circle(dates, total_sizes, color='red') + plot.line(dates, useds, legend='Used bytes', line_color='green') + plot.circle(dates, useds, color='green') + plot.line(dates, availables, legend='Free bytes', line_color='blue') + plot.circle(dates, availables, color='blue') + + return plot + + +def plot_filesystem_stats(): + """ + Plot various filesystem statistics using ``bokeh`` and save them to + the output directory. + """ + + p1 = plot_total_file_counts() + p2 = plot_filesystem_size() + p3 = plot_by_filetype('count', 'all') + p4 = plot_by_filetype('size', 'all') + plot_list = [p1, p2, p3, p4] + + for instrument in JWST_INSTRUMENT_NAMES: + plot_list.append(plot_by_filetype('count', instrument)) + plot_list.append(plot_by_filetype('size', instrument)) + + # Create a layout with a grid pattern + grid_chunks = [plot_list[i:i+2] for i in range(0, len(plot_list), 2)] + grid = gridplot(grid_chunks) + + # Save all of the plots in one file + outputs_dir = os.path.join(get_config()['outputs'], 'monitor_filesystem') + outfile = os.path.join(outputs_dir, 'filesystem_monitor.html') output_file(outfile) save(grid) set_permissions(outfile) logging.info('Saved plot of all statistics to {}'.format(outfile)) # Save each plot's components - plots = [p1, p2, p3, p4] - plot_names = ['filecount', 'system_stats', 'filecount_type', 'size_type'] - for plot, name in zip(plots, plot_names): + for plot in plot_list: + plot_name = plot.title.text.lower().replace(' ', '_') plot.sizing_mode = 'stretch_both' script, div = components(plot) - div_outfile = os.path.join(outputs_dir, "{}_component.html".format(name)) + div_outfile = os.path.join(outputs_dir, "{}_component.html".format(plot_name)) with open(div_outfile, 'w') as f: f.write(div) f.close() set_permissions(div_outfile) - script_outfile = os.path.join(outputs_dir, "{}_component.js".format(name)) + script_outfile = os.path.join(outputs_dir, "{}_component.js".format(plot_name)) with open(script_outfile, 'w') as f: f.write(script) f.close() set_permissions(script_outfile) - logging.info('Saved components files: {}_component.html and {}_component.js'.format(name, name)) + logging.info('Saved components files: {}_component.html and {}_component.js'.format(plot_name, plot_name)) logging.info('Filesystem statistics plotting complete.') - # Begin logging: - logging.info("Completed.") + +def plot_total_file_counts(): + """Plot total file counts versus date + + Returns + ------- + plot : bokeh.plotting.figure.Figure object + ``bokeh`` plot of total file counts versus date + """ + + # Total file counts vs. date + results = session.query(FilesystemGeneral.date, FilesystemGeneral.total_file_count).all() + dates, file_counts = zip(*results) + plot = figure( + tools='pan,box_zoom,reset,wheel_zoom,save', + x_axis_type='datetime', + title="Total File Counts", + x_axis_label='Date', + y_axis_label='Count') + plot.line(dates, file_counts, line_width=2, line_color='blue') + plot.circle(dates, file_counts, color='blue') + + return plot + + +def update_database(general_results_dict, instrument_results_dict): + """Updates the ``filesystem_general`` and ``filesystem_instrument`` + database tables. + + Parameters + ---------- + general_results_dict : dict + A dictionary for the ``filesystem_general`` database table + instrument_results_dict : dict + A dictionary for the ``filesystem_instrument`` database table + """ + + engine.execute(FilesystemGeneral.__table__.insert(), general_results_dict) + session.commit() + + # Add data to filesystem_instrument table + for instrument in JWST_INSTRUMENT_NAMES: + for filetype in instrument_results_dict[instrument]: + new_record = {} + new_record['date'] = instrument_results_dict['date'] + new_record['instrument'] = instrument + new_record['filetype'] = filetype + new_record['count'] = instrument_results_dict[instrument][filetype]['count'] + new_record['size'] = instrument_results_dict[instrument][filetype]['size'] + + engine.execute(FilesystemInstrument.__table__.insert(), new_record) + session.commit() if __name__ == '__main__': diff --git a/jwql/tests/test_api_views.py b/jwql/tests/test_api_views.py index e6d36f450..8e06d762e 100644 --- a/jwql/tests/test_api_views.py +++ b/jwql/tests/test_api_views.py @@ -74,7 +74,6 @@ def test_api_views(url): # Build full URL base_url = get_base_url() url = '{}/{}'.format(base_url, url) - print('Testing {}'.format(url)) # Determine the type of data to check for based on the url data_type = url.split('/')[-2] diff --git a/jwql/tests/test_calculations.py b/jwql/tests/test_calculations.py new file mode 100644 index 000000000..5834e453e --- /dev/null +++ b/jwql/tests/test_calculations.py @@ -0,0 +1,105 @@ +#! /usr/bin/env python + +"""Tests for the ``calculations`` module. + +Authors +------- + + - Bryan Hilbert + +Use +--- + + These tests can be run via the command line (omit the ``-s`` to + suppress verbose output to stdout): + :: + + pytest -s test_calculations.py +""" + +import numpy as np + +from jwql.utils import calculations + + +def test_double_gaussian_fit(): + """Test the double Gaussian fitting function""" + + amplitude1 = 500 + mean_value1 = 0.5 + sigma_value1 = 0.05 + amplitude2 = 300 + mean_value2 = 0.4 + sigma_value2 = 0.03 + + bin_centers = np.arange(0., 1.1, 0.007) + input_params = [amplitude1, mean_value1, sigma_value1, amplitude2, mean_value2, sigma_value2] + input_values = calculations.double_gaussian(bin_centers, *input_params) + + initial_params = [np.max(input_values), 0.55, 0.1, np.max(input_values), 0.5, 0.05] + params, sigma = calculations.double_gaussian_fit(bin_centers, input_values, initial_params) + + assert np.allclose(np.array(params[0:3]), np.array([amplitude2, mean_value2, sigma_value2]), + atol=0, rtol=0.000001) + assert np.allclose(np.array(params[3:]), np.array([amplitude1, mean_value1, sigma_value1]), + atol=0, rtol=0.000001) + + +def test_gaussian1d_fit(): + """Test histogram fitting function""" + + mean_value = 0.5 + sigma_value = 0.05 + image = np.random.normal(loc=mean_value, scale=sigma_value, size=(100, 100)) + hist, bin_edges = np.histogram(image, bins='auto') + bin_centers = (bin_edges[1:] + bin_edges[0: -1]) / 2. + initial_params = [np.max(hist), 0.55, 0.1] + amplitude, peak, width = calculations.gaussian1d_fit(bin_centers, hist, initial_params) + + assert np.isclose(peak[0], mean_value, atol=0.0015, rtol=0.) + assert np.isclose(width[0], sigma_value, atol=0.0015, rtol=0.) + assert ((mean_value <= peak[0]+3*peak[1]) & (mean_value >= peak[0]-3*peak[1])) + assert ((sigma_value <= width[0]+3*width[1]) & (sigma_value >= width[0]-3*width[1])) + + +def test_mean_image(): + """Test the sigma-clipped mean and stdev image calculator""" + + # Create a stack of 50 5x5 pixel images + nstack = 50 + cube = np.zeros((nstack, 5, 5)) + + # Set alternating frames equal to 4 and 5 + for i in range(nstack): + if i % 2 == 0: + cube[i, :, :] = 4. + else: + cube[i, :, :] = 5. + + # Insert a few signal values that will be removed by sigma clipping. + # Make sure you "remove" and equal number of 4's and 5's from each + # pixel in order to keep the mean at 4.5 and dev at 0.5 + cube[0, 0, 0] = 55. + cube[1, 0, 0] = -78. + cube[3, 3, 3] = 150. + cube[2, 3, 3] = 32. + cube[1, 4, 4] = -96. + cube[4, 4, 4] = -25. + mean_img, dev_img = calculations.mean_image(cube, sigma_threshold=3) + + assert np.all(mean_img == 4.5) + assert np.all(dev_img == 0.5) + + +def test_mean_stdev(): + """Test calcualtion of the sigma-clipped mean from an image""" + + image = np.zeros((50, 50)) + 1. + badx = [1, 4, 10, 14, 16, 20, 22, 25, 29, 30] + bady = [13, 27, 43, 21, 1, 32, 25, 21, 9, 14] + for x, y in zip(badx, bady): + image[y, x] = 100. + + meanval, stdval = calculations.mean_stdev(image, sigma_threshold=3) + assert meanval == 1. + assert stdval == 0. diff --git a/jwql/tests/test_dark_monitor.py b/jwql/tests/test_dark_monitor.py new file mode 100644 index 000000000..7562c2ba1 --- /dev/null +++ b/jwql/tests/test_dark_monitor.py @@ -0,0 +1,136 @@ +#! /usr/bin/env python + +"""Tests for the ``dark_monitor`` module. + +Authors +------- + + - Bryan Hilbert + +Use +--- + + These tests can be run via the command line (omit the ``-s`` to + suppress verbose output to stdout): + :: + + pytest -s test_dark_monitor.py +""" + +import os +import pytest + +from astropy.time import Time +import numpy as np + +from jwql.instrument_monitors.common_monitors import dark_monitor +from jwql.utils.utils import get_config + + +def test_find_hot_dead_pixels(): + """Test hot and dead pixel searches""" + monitor = dark_monitor.Dark(testing=True) + + # Create "baseline" image + comparison_image = np.zeros((10, 10)) + 1. + + # Create mean slope image to compare + mean_image = np.zeros((10, 10)) + 1. + mean_image[0, 0] = 1.7 + mean_image[1, 1] = 2.2 + mean_image[7, 7] = 4.5 + mean_image[5, 5] = 0.12 + mean_image[6, 6] = 0.06 + mean_image[7, 3] = 0.09 + + hot, dead = monitor.find_hot_dead_pixels(mean_image, comparison_image, hot_threshold=2., dead_threshold=0.1) + assert len(hot) == 2 + assert np.all(hot[0] == np.array([1, 7])) + assert np.all(hot[1] == np.array([1, 7])) + assert len(dead) == 2 + assert np.all(dead[0] == np.array([6, 7])) + assert np.all(dead[1] == np.array([6, 3])) + + +@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', + reason='Requires access to central storage.') +def test_get_metadata(): + """Test retrieval of metadata from input file""" + + monitor = dark_monitor.Dark(testing=True) + filename = os.path.join(get_config()['test_dir'], 'dark_monitor', 'test_image_1.fits') + monitor.get_metadata(filename) + + assert monitor.detector == 'NRCA1' + assert monitor.x0 == 0 + assert monitor.y0 == 0 + assert monitor.xsize == 10 + assert monitor.ysize == 10 + assert monitor.sample_time == 10 + assert monitor.frame_time == 10.5 + + +def test_mast_query_darks(): + """Test that the MAST query for darks is functional""" + + instrument = 'NIRCAM' + aperture = 'NRCA1_FULL' + start_date = Time("2016-01-01T00:00:00").mjd + end_date = Time("2018-01-01T00:00:00").mjd + query = dark_monitor.mast_query_darks(instrument, aperture, start_date, end_date) + apernames = [entry['apername'] for entry in query] + filenames = [entry['filename'] for entry in query] + + truth_filenames = ['jw96003001001_02201_00001_nrca1_dark.fits', + 'jw82600013001_02102_00002_nrca1_dark.fits', + 'jw82600013001_02101_00001_nrca1_dark.fits', + 'jw82600013001_02103_00003_nrca1_dark.fits', + 'jw82600013001_02103_00001_nrca1_dark.fits', + 'jw82600013001_02103_00002_nrca1_dark.fits', + 'jw82600016001_02101_00002_nrca1_dark.fits', + 'jw82600016001_02101_00001_nrca1_dark.fits', + 'jw82600013001_02102_00001_nrca1_dark.fits', + 'jw82600016001_02103_00002_nrca1_dark.fits', + 'jw82600016001_02103_00001_nrca1_dark.fits', + 'jw82600016001_02103_00004_nrca1_dark.fits', + 'jw82600016001_02103_00003_nrca1_dark.fits', + 'jw82600016001_02102_00001_nrca1_dark.fits'] + + assert len(query) == 14 + assert apernames == [aperture]*len(query) + assert filenames == truth_filenames + + +def test_noise_check(): + """Test the search for noisier than average pixels""" + + noise_image = np.zeros((10, 10)) + 0.5 + baseline = np.zeros((10, 10)) + 0.5 + + noise_image[3, 3] = 0.8 + noise_image[6, 6] = 0.6 + noise_image[9, 9] = 1.0 + + baseline[5, 5] = 1.0 + noise_image[5, 5] = 1.25 + + monitor = dark_monitor.Dark(testing=True) + noisy = monitor.noise_check(noise_image, baseline, threshold=1.5) + + assert len(noisy[0]) == 2 + assert np.all(noisy[0] == np.array([3, 9])) + assert np.all(noisy[1] == np.array([3, 9])) + + +def test_shift_to_full_frame(): + """Test pixel coordinate shifting to be in full frame coords""" + + monitor = dark_monitor.Dark(testing=True) + monitor.x0 = 512 + monitor.y0 = 512 + + coordinates = (np.array([6, 7]), np.array([6, 3])) + new_coords = monitor.shift_to_full_frame(coordinates) + + assert np.all(new_coords[0] == np.array([518, 519])) + assert np.all(new_coords[1] == np.array([518, 515])) diff --git a/jwql/tests/test_edb_interface.py b/jwql/tests/test_edb_interface.py index 310f76d3e..516986f7a 100644 --- a/jwql/tests/test_edb_interface.py +++ b/jwql/tests/test_edb_interface.py @@ -17,9 +17,10 @@ pytest -s test_edb_interface.py """ -import pytest + from astropy.time import Time from astroquery.mast import Mast +import pytest from jwql.edb.edb_interface import mnemonic_inventory, query_single_mnemonic from jwql.utils.utils import get_config @@ -71,7 +72,8 @@ def test_query_single_mnemonic(): token=MAST_TOKEN) assert len(data) == meta['paging']['rows'] - + +@pytest.mark.xfail def test_invalid_query(): """Test that the mnemonic query for an unauthorized user fails.""" diff --git a/jwql/tests/test_instrument_properties.py b/jwql/tests/test_instrument_properties.py new file mode 100644 index 000000000..5e9af4784 --- /dev/null +++ b/jwql/tests/test_instrument_properties.py @@ -0,0 +1,83 @@ +#! /usr/bin/env python + +"""Tests for the ``instrument_properties`` module. + +Authors +------- + + - Bryan Hilbert + +Use +--- + + These tests can be run via the command line (omit the ``-s`` to + suppress verbose output to stdout): + :: + + pytest -s test_instrument_properties.py +""" + +import os +import pytest + +import numpy as np + +from jwql.utils import instrument_properties +from jwql.utils.utils import get_config + + +@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', + reason='Requires access to central storage.') +def test_amplifier_info(): + """Test that the correct number of amplifiers are found for a given + file + """ + + data_dir = os.path.join(get_config()['test_dir'], 'dark_monitor') + + fullframe = instrument_properties.amplifier_info(os.path.join(data_dir, 'test_image_ff.fits')) + fullframe_truth = (4, {'1': [(4, 4), (512, 2044)], + '2': [(512, 4), (1024, 2044)], + '3': [(1024, 4), (1536, 2044)], + '4': [(1536, 4), (2044, 2044)]}) + assert fullframe == fullframe_truth + + fullframe = instrument_properties.amplifier_info(os.path.join(data_dir, 'test_image_ff.fits'), omit_reference_pixels=False) + fullframe_truth = (4, {'1': [(0, 0), (512, 2048)], + '2': [(512, 0), (1024, 2048)], + '3': [(1024, 0), (1536, 2048)], + '4': [(1536, 0), (2048, 2048)]}) + assert fullframe == fullframe_truth + + subarray = instrument_properties.amplifier_info(os.path.join(data_dir, 'test_image_1.fits')) + subarray_truth = (1, {'1': [(0, 0), (10, 10)]}) + assert subarray == subarray_truth + + subarray_one = instrument_properties.amplifier_info(os.path.join(data_dir, 'test_image_grismstripe_one_amp.fits')) + subarray_one_truth = (1, {'1': [(4, 4), (2044, 64)]}) + assert subarray_one == subarray_one_truth + + subarray_four = instrument_properties.amplifier_info(os.path.join(data_dir, 'test_image_grismstripe_four_amp.fits')) + subarray_four_truth = (4, {'1': [(4, 4), (512, 64)], + '2': [(512, 4), (1024, 64)], + '3': [(1024, 4), (1536, 64)], + '4': [(1536, 4), (2044, 64)]}) + assert subarray_four == subarray_four_truth + + +def test_calc_frame_time(): + """Test calcuation of frametime for a given instrument/aperture""" + + nearir_fullframe = 10.73677 + nircam_160 = 0.27864 + nrc_fullframe = instrument_properties.calc_frame_time('nircam', 'NRCA1_FULL', 2048, 2048, 4) + nrc_160 = instrument_properties.calc_frame_time('nircam', 'NRCA1_SUB160', 160, 160, 1) + nrs_fullframe = instrument_properties.calc_frame_time('niriss', 'NIS_CEN', 2048, 2048, 4) + #nrs_some_subarra = instrument_properies.calc_frame_time('niriss', '????', ??, ??, ?) + + print('STILL NEED TO ADD FRAMETIME CALCS FOR MIRI AND NIRSPEC TO THE CALC_FRAME_TIME_FUNCTION') + print('CONFIRM NIRCAMSUB160 TIME ON JDOX') + + assert np.isclose(nrc_fullframe, nearir_fullframe, atol=0.001, rtol=0) + assert np.isclose(nrc_160, nircam_160, atol=0.001, rtol=0) + assert np.isclose(nrs_fullframe, nearir_fullframe, atol=0.001, rtol=0) diff --git a/jwql/tests/test_loading_times.py b/jwql/tests/test_loading_times.py new file mode 100644 index 000000000..df0445ced --- /dev/null +++ b/jwql/tests/test_loading_times.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python + +"""Tests various webpages of the ``jwql`` web application to make sure +that loading times are not too long + +Authors +------- + + - Matthew Bourque + +Use +--- + + These tests can be run via the command line (omit the -s to + suppress verbose output to stdout): + + :: + + pytest -s test_loading_times.py +""" + +import pytest +import time +import urllib.request + +from jwql.utils.utils import get_base_url + +TIME_CONSTRAINT = 30 # seconds + +urls = [] + +# Generic URLs +urls.append('') +urls.append('about/') +urls.append('edb/') + +# Speicif URLs +test_mappings = [('fgs', '86700', 'jw86600007001_02101_00001_guider2'), + ('miri', '98012', 'jw98012001001_02102_00001_mirimage'), + ('nircam', '93025', 'jw93065002001_02101_00001_nrcb2'), + ('niriss', '00308', 'jw00308001001_02101_00001_nis'), + ('nirspec', '96213', 'jw96213001001_02101_00001_nrs1')] +for mapping in test_mappings: + (instrument, proposal, rootname) = mapping + urls.append('{}/'.format(instrument)) + urls.append('{}/archive/'.format(instrument)) + urls.append('{}/archive/{}/'.format(instrument, proposal)) + urls.append('{}/{}/'.format(instrument, rootname)) + + +@pytest.mark.parametrize('url', urls) +def test_loading_times(url): + """Test to see if the given ``url`` returns a webpage sucessfully + within a reasonable time. + + Parameters + ---------- + url : str + The url to the webpage of interest (e.g. + ``http://127.0.0.1:8000/fgs/archive/'``). + """ + + # Build full URL + base_url = get_base_url() + url = '{}/{}'.format(base_url, url) + print('Testing {}'.format(url)) + + t1 = time.time() + url = urllib.request.urlopen(url) + t2 = time.time() + + assert (t2 - t1) <= TIME_CONSTRAINT diff --git a/jwql/tests/test_permissions.py b/jwql/tests/test_permissions.py index 83a94aa7f..1ed6dab67 100755 --- a/jwql/tests/test_permissions.py +++ b/jwql/tests/test_permissions.py @@ -47,7 +47,6 @@ def test_directory(test_dir=TEST_DIRECTORY): os.mkdir(test_dir) # creates directory with default mode=511 yield test_dir - print("teardown test directory") if os.path.isdir(test_dir): os.remove(test_dir) @@ -68,7 +67,6 @@ def test_directory_permissions(test_directory): # independently from the user. owner = get_owner_string(test_directory) group = get_group_string(test_directory) - print('\nCurrent owner={} group={}'.format(owner, group)) set_permissions(test_directory, owner=owner, group=group) assert has_permissions(test_directory, owner=owner, group=group) @@ -95,14 +93,12 @@ def test_file(test_dir=TEST_DIRECTORY): with open(filename, 'w') as filestream: filestream.write('jwql permission test') yield filename - print("teardown test file and directory ") if os.path.isfile(filename): os.remove(filename) if os.path.isdir(test_dir): os.rmdir(test_dir) -# @pytest.mark.xfail def test_file_group(test_file): """Create a file with the standard permissions ``('-rw-r--r--')`` and default group. diff --git a/jwql/tests/test_pipeline_tools.py b/jwql/tests/test_pipeline_tools.py new file mode 100644 index 000000000..c932dd46e --- /dev/null +++ b/jwql/tests/test_pipeline_tools.py @@ -0,0 +1,185 @@ +#! /usr/bin/env python + +"""Tests for the ``pipeline_tools`` module. + +Authors +------- + + - Bryan Hilbert + +Use +--- + + These tests can be run via the command line (omit the ``-s`` to + suppress verbose output to stdout): + :: + + pytest -s test_pipeline_tools.py +""" + +from collections import OrderedDict +import os +import pytest + +import numpy as np + +from jwql.instrument_monitors import pipeline_tools +from jwql.utils.utils import get_config + + +@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', + reason='Requires access to central storage.') +def test_completed_pipeline_steps(): + """Test that the list of completed pipeline steps for a file is + correct + + Parameters + ---------- + filename : str + File to be checked + """ + + filename = os.path.join(get_config()['filesystem'], 'jw00312', 'jw00312002001_02102_00001_nrcb4_rateints.fits') + completed_steps = pipeline_tools.completed_pipeline_steps(filename) + true_completed = OrderedDict([('group_scale', False), + ('dq_init', True), + ('saturation', True), + ('ipc', False), + ('refpix', True), + ('superbias', True), + ('persistence', True), + ('dark_current', True), + ('linearity', True), + ('firstframe', False), + ('lastframe', False), + ('rscd', False), + ('jump', True), + ('rate', True)]) + + assert completed_steps == true_completed + + +def test_get_pipeline_steps(): + """Test that the proper pipeline steps are returned for an + instrument + """ + + # FGS, NIRCam, and NIRISS have the same required steps + instruments = ['fgs', 'nircam', 'niriss'] + for instrument in instruments: + req_steps = pipeline_tools.get_pipeline_steps(instrument) + steps = ['dq_init', 'saturation', 'superbias', 'refpix', 'linearity', + 'persistence', 'dark_current', 'jump', 'rate'] + not_required = ['group_scale', 'ipc', 'firstframe', 'lastframe', 'rscd'] + steps_dict = OrderedDict({}) + for step in steps: + steps_dict[step] = True + for step in not_required: + steps_dict[step] = False + assert req_steps == steps_dict + + # NIRSpec and MIRI have different required steps + nrs_req_steps = pipeline_tools.get_pipeline_steps('nirspec') + nrs_steps = ['group_scale', 'dq_init', 'saturation', 'superbias', 'refpix', 'linearity', + 'dark_current', 'jump', 'rate'] + not_required = ['ipc', 'persistence', 'firstframe', 'lastframe', 'rscd'] + nrs_dict = OrderedDict({}) + for step in nrs_steps: + nrs_dict[step] = True + for step in not_required: + nrs_dict[step] = False + assert nrs_req_steps == nrs_dict + + miri_req_steps = pipeline_tools.get_pipeline_steps('miri') + miri_steps = ['dq_init', 'saturation', 'firstframe', 'lastframe', + 'linearity', 'rscd', 'dark_current', 'refpix', 'jump', 'rate'] + not_required = ['group_scale', 'ipc', 'superbias', 'persistence'] + miri_dict = OrderedDict({}) + for step in miri_steps: + miri_dict[step] = True + for step in not_required: + miri_dict[step] = False + assert miri_req_steps == miri_dict + + +@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', + reason='Requires access to central storage.') +def test_image_stack(): + """Test stacking of slope images""" + + directory = os.path.join(get_config()['test_dir'], 'dark_monitor') + files = [os.path.join(directory, 'test_image_{}.fits'.format(str(i+1))) for i in range(3)] + + image_stack, exptimes = pipeline_tools.image_stack(files) + truth = np.zeros((3, 10, 10)) + truth[0, :, :] = 5. + truth[1, :, :] = 10. + truth[2, :, :] = 15. + + assert np.all(image_stack == truth) + assert exptimes == [[10.5], [10.5], [10.5]] + + +def test_steps_to_run(): + """Test that the dictionaries for steps required and steps completed + are correctly combined to create a dictionary of pipeline steps to + be done + + Parameters + ---------- + filename : str + File to be checked + + required : OrderedDict + Dict of all pipeline steps to be run on filename + + already_done : OrderedDict + Dict of pipeline steps already run on filename + """ + + required = OrderedDict([('group_scale', True), + ('dq_init', False), + ('saturation', False), + ('ipc', False), + ('refpix', False), + ('superbias', False), + ('persistence', True), + ('dark_current', True), + ('linearity', False), + ('firstframe', False), + ('lastframe', False), + ('rscd', False), + ('jump', True), + ('rate', True)]) + already_done = OrderedDict([('group_scale', True), + ('dq_init', False), + ('saturation', False), + ('ipc', False), + ('refpix', False), + ('superbias', False), + ('persistence', True), + ('dark_current', True), + ('linearity', False), + ('firstframe', False), + ('lastframe', False), + ('rscd', False), + ('jump', False), + ('rate', False)]) + + steps_to_run = pipeline_tools.steps_to_run(required, already_done) + true_steps_to_run = OrderedDict([('group_scale', False), + ('dq_init', False), + ('saturation', False), + ('ipc', False), + ('refpix', False), + ('superbias', False), + ('persistence', False), + ('dark_current', False), + ('linearity', False), + ('firstframe', False), + ('lastframe', False), + ('rscd', False), + ('jump', True), + ('rate', True)]) + + assert steps_to_run == true_steps_to_run diff --git a/jwql/tests/test_preview_image.py b/jwql/tests/test_preview_image.py index 8d713b26f..5b465995f 100644 --- a/jwql/tests/test_preview_image.py +++ b/jwql/tests/test_preview_image.py @@ -6,6 +6,7 @@ ------- - Johannes Sahlmann + - Lauren Chambers Use @@ -22,16 +23,18 @@ import glob import os import pytest +import shutil from astropy.io import fits from jwql.utils.preview_image import PreviewImage +from jwql.utils.utils import get_config, ensure_dir_exists # directory to be created and populated during tests running TEST_DIRECTORY = os.path.join(os.environ['HOME'], 'preview_image_test') -# directory that contains sample images -TEST_DATA_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_data') +# Determine if tests are being run on jenkins +ON_JENKINS = os.path.expanduser('~') == '/home/jenkins' @pytest.fixture(scope="module") @@ -49,62 +52,88 @@ def test_directory(test_dir=TEST_DIRECTORY): Path to directory used for testing """ - os.mkdir(test_dir) # creates directory + # Set up local test directory + ensure_dir_exists(test_dir) yield test_dir - print("teardown test directory") - if os.path.isdir(test_dir): - os.rmdir(test_dir) + # Tear down local test directory and any files within + if os.path.isdir(test_dir): + shutil.rmtree(test_dir) -def test_make_image(test_directory): - """Use PreviewImage.make_image to create preview images of a sample JWST exposure. + # Empty test directory on central storage + jpgs = glob.glob(os.path.join(get_config()['test_dir'], '*.jpg')) + thumbs = glob.glob(os.path.join(get_config()['test_dir'], '*.thumbs')) + for file in jpgs + thumbs: + os.remove(file) - Assert that the number of JPGs created corresponds to the number of integrations. - Parameters - ---------- - test_directory : str - Path of directory used for testing +def get_test_fits_files(): + """Get a list of the FITS files on central storage to make preview images. + Returns + ------- + filenames : list + List of filepaths to FITS files """ - filenames = glob.glob(os.path.join(TEST_DATA_DIRECTORY, '*.fits')) - print('\nGenerating preview images for {}.'.format(filenames)) + # Get the files from central store + if not ON_JENKINS: + filenames = glob.glob(os.path.join(get_config()['test_dir'], '*.fits')) + assert len(filenames) > 0 + return filenames - output_directory = test_directory + # Or return an empty list + else: + return [] - for filename in filenames: - header = fits.getheader(filename) +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') +@pytest.mark.parametrize('filename', get_test_fits_files()) +def test_make_image(test_directory, filename): + """Use PreviewImage.make_image to create preview images of a sample + JWST exposure. - # Create and save the preview image or thumbnail - for create_thumbnail in [False, True]: - try: - image = PreviewImage(filename, "SCI") - image.clip_percent = 0.01 - image.scaling = 'log' - image.cmap = 'viridis' - image.output_format = 'jpg' - image.thumbnail = create_thumbnail + Assert that the number of JPGs created corresponds to the number of + integrations. - if create_thumbnail: - image.thumbnail_output_directory = output_directory - else: - image.preview_output_directory = output_directory + Parameters + ---------- + test_directory : str + Path of directory used for testing + filename : str + Path of FITS image to generate preview of + """ - image.make_image() - except ValueError as error: - print(error) + header = fits.getheader(filename) + + # Create and save the preview image or thumbnail + for create_thumbnail in [False, True]: + try: + image = PreviewImage(filename, "SCI") + image.clip_percent = 0.01 + image.scaling = 'log' + image.cmap = 'viridis' + image.output_format = 'jpg' + image.thumbnail = create_thumbnail if create_thumbnail: - extension = 'thumb' + image.thumbnail_output_directory = test_directory else: - extension = 'jpg' + image.preview_output_directory = test_directory + + image.make_image() + except ValueError as error: + print(error) + + if create_thumbnail: + extension = 'thumb' + else: + extension = 'jpg' - # list of preview images - preview_image_filenames = glob.glob(os.path.join(test_directory, '*.{}'.format( - extension))) - assert len(preview_image_filenames) == header['NINTS'] + # list of preview images + preview_image_filenames = glob.glob(os.path.join(test_directory, '*.{}'.format( + extension))) + assert len(preview_image_filenames) == header['NINTS'] - # clean up: delete preview images - for file in preview_image_filenames: - os.remove(file) + # clean up: delete preview images + for file in preview_image_filenames: + os.remove(file) diff --git a/jwql/tests/test_utils.py b/jwql/tests/test_utils.py index 99e71d08b..a59a4b003 100644 --- a/jwql/tests/test_utils.py +++ b/jwql/tests/test_utils.py @@ -19,10 +19,10 @@ """ import os - +from pathlib import Path import pytest -from jwql.utils.utils import get_config, filename_parser +from jwql.utils.utils import copy_files, get_config, filename_parser, filesystem_path FILENAME_PARSER_TEST_DATA = [ @@ -245,8 +245,34 @@ ] -@pytest.mark.xfail(raises=FileNotFoundError, - reason='User must manually supply config file.') +@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', + reason='Requires access to central storage.') +def test_copy_files(): + """Test that files are copied successfully""" + + # Create an example file to be copied + data_dir = os.path.dirname(__file__) + file_to_copy = 'file.txt' + original_file = os.path.join(data_dir, file_to_copy) + Path(original_file).touch() + assert os.path.exists(original_file), 'Failed to create original test file.' + + # Make a copy one level up + new_location = os.path.abspath(os.path.join(data_dir, '../')) + copied_file = os.path.join(new_location, file_to_copy) + + # Copy the file + success, failure = copy_files([original_file], new_location) + assert success == [copied_file] + assert os.path.isfile(copied_file) + + # Remove the copy + os.remove(original_file) + os.remove(copied_file) + + +@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', + reason='Requires access to central storage.') def test_get_config(): """Assert that the ``get_config`` function successfully creates a dictionary. @@ -271,11 +297,10 @@ def test_filename_parser(filename, solution): assert filename_parser(filename) == solution -@pytest.mark.xfail(raises=(FileNotFoundError, ValueError), - reason='Known non-compliant files in filesystem; User must manually supply config file.') +@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', + reason='Requires access to central storage.') def test_filename_parser_whole_filesystem(): - """Test the filename_parser on all files currently in the filesystem. - """ + """Test the filename_parser on all files currently in the filesystem.""" # Get all files filesystem_dir = get_config()['filesystem'] all_files = [] @@ -309,3 +334,15 @@ def test_filename_parser_nonJWST(): with pytest.raises(ValueError): filename = 'not_a_jwst_file.fits' filename_parser(filename) + + +@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', + reason='Requires access to central storage.') +def test_filesystem_path(): + """Test that a file's location in the filesystem is returned""" + + filename = 'jw96003001001_02201_00001_nrca1_dark.fits' + check = filesystem_path(filename) + location = os.path.join(get_config()['filesystem'], 'jw96003', filename) + + assert check == location diff --git a/jwql/utils/calculations.py b/jwql/utils/calculations.py new file mode 100644 index 000000000..6d167abe7 --- /dev/null +++ b/jwql/utils/calculations.py @@ -0,0 +1,166 @@ +"""Various math-related functions used by the ``jwql`` instrument +monitors. + +Authors +------- + + - Bryan Hilbert + +Use +--- + + This module can be imported as such: + :: + + from jwql.utils import calculations + mean_val, stdev_val = calculations.mean_stdev(image, sigma_threshold=4) + """ + +import numpy as np + +from astropy.modeling import fitting, models +from astropy.stats import sigma_clip +from scipy.optimize import curve_fit +from scipy.stats import sigmaclip + + +def double_gaussian(x, amp1, peak1, sigma1, amp2, peak2, sigma2): + """Equate two Gaussians + + Parameters + ---------- + x : numpy.ndarray + 1D array of x values to be fit + + params : list + Gaussian coefficients + ``[amplitude1, peak1, stdev1, amplitude2, peak2, stdev2]`` + """ + + y_values = amp1 * np.exp(-(x - peak1)**2.0 / (2.0 * sigma1**2.0)) \ + + amp2 * np.exp(-(x - peak2)**2.0 / (2.0 * sigma2**2.0)) + + return y_values + + +def double_gaussian_fit(x_values, y_values, input_params): + """Fit two Gaussians to the given array + + Parameters + ---------- + x_values : numpy.ndarray + 1D array of x values to be fit + + y_values : numpy.ndarray + 1D array of y values to be fit + + input_params : list + Initial guesses for Gaussian coefficients + ``[amplitude1, peak1, stdev1, amplitude2, peak2, stdev2]`` + + Returns + ------- + params : list + Fitted parameter values + + sigma : numpy.ndarray + Uncertainties on the parameters + """ + + params, cov = curve_fit(double_gaussian, x_values, y_values, input_params) + sigma = np.sqrt(np.diag(cov)) + + return params, sigma + + +def gaussian1d_fit(x_values, y_values, params): + """Fit 1D Gaussian to an array. Designed around fitting to histogram + of pixel values. + + Parameters + ---------- + x_values : numpy.ndarray + 1D array of x values to be fit + + y_values : numpy.ndarray + 1D array of y values to be fit + + Returns + ------- + amplitude : tup + Tuple of the best fit Gaussian amplitude and uncertainty + + peak : tup + Tuple of the best fit Gaussian peak position and uncertainty + + width : tup + Tuple of the best fit Gaussian width and uncertainty + """ + + model_gauss = models.Gaussian1D(amplitude=params[0], mean=params[1], stddev=params[2]) + fitter_gauss = fitting.LevMarLSQFitter() + best_fit = fitter_gauss(model_gauss, x_values, y_values) + cov_diag = np.diag(fitter_gauss.fit_info['param_cov']) + + # Arrange each parameter into (best_fit_value, uncertainty) tuple + amplitude = (best_fit.amplitude.value, np.sqrt(cov_diag[0])) + peak = (best_fit.mean.value, np.sqrt(cov_diag[1])) + width = (best_fit.stddev.value, np.sqrt(cov_diag[2])) + + return amplitude, peak, width + + +def mean_image(cube, sigma_threshold=3): + """Combine a stack of 2D images into a mean slope image, using + sigma-clipping on a pixel-by-pixel basis + + Parameters + ---------- + cube : numpy.ndarray + 3D array containing a stack of 2D images + + sigma_threshold : int + Number of sigma to use when sigma-clipping values in each + pixel + + Returns + ------- + mean_image : numpy.ndarray + 2D sigma-clipped mean image + + stdev_image : numpy.ndarray + 2D sigma-clipped standard deviation image + """ + + clipped_cube = sigma_clip(cube, sigma=sigma_threshold, axis=0, masked=False) + mean_image = np.nanmean(clipped_cube, axis=0) + std_image = np.nanstd(clipped_cube, axis=0) + + return mean_image, std_image + + +def mean_stdev(image, sigma_threshold=3): + """Calculate the sigma-clipped mean and stdev of an input array + + Parameters + ---------- + image : numpy.ndarray + Array of which to calculate statistics + + sigma_threshold : float + Number of sigma to use when sigma-clipping + + Returns + ------- + mean_value : float + Sigma-clipped mean of image + + stdev_value : float + Sigma-clipped standard deviation of image + """ + + clipped, lower, upper = sigmaclip(image, low=sigma_threshold, high=sigma_threshold) + mean_value = np.mean(clipped) + stdev_value = np.std(clipped) + + return mean_value, stdev_value diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index e4bb2c28b..d12df9a8b 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -60,22 +60,43 @@ AMI_SUFFIX_TYPES MONITORS = { - 'fgs': ['Bad Pixel Monitor'], - 'miri': ['Dark Current Monitor', - 'Bad Pixel Monitor', 'Cosmic Ray Monitor', 'Photometry Monitor', - 'TA Failure Monitor', 'Blind Pointing Accuracy Monitor', - 'Filter and Calibration Lamp Monitor', 'Thermal Emission Monitor'], - 'nircam': ['Bias Monitor', - 'Readnoise Monitor', 'Gain Level Monitor', - 'Mean Dark Current Rate Monitor', 'Photometric Stability Monitor'], - 'niriss': ['Bad Pixel Monitor', - 'Readnoise Monitor', 'AMI Calibrator Monitor', 'TSO RMS Monitor'], - 'nirspec': ['Optical Short Monitor', 'Target Acquisition Monitor', - 'Detector Health Monitor', 'Ref Pix Monitor', - 'Internal Lamp Monitor', 'Instrument Model Updates', - 'Failed-open Shutter Monitor']} + 'fgs': [('Bad Pixel Monitor', '#')], + 'miri': [('Dark Current Monitor', '#'), + ('Data Trending', '/miri/miri_data_trending'), + ('Bad Pixel Monitor', '#'), + ('Cosmic Ray Monitor', '#'), + ('Photometry Monitor', '#'), + ('TA Failure Monitor', '#'), + ('Blind Pointing Accuracy Monitor', '#'), + ('Filter and Calibration Lamp Monitor', '#'), + ('Thermal Emission Monitor', '#')], + 'nircam': [('Bias Monitor', '#'), + ('Readnoise Monitor', '#'), + ('Gain Level Monitor', '#'), + ('Mean Dark Current Rate Monitor', '#'), + ('Photometric Stability Monitor', '#')], + 'niriss': [('Bad Pixel Monitor', '#'), + ('Readnoise Monitor', '#'), + ('AMI Calibrator Monitor', '#'), + ('TSO RMS Monitor', '#')], + 'nirspec': [('Optical Short Monitor', '#'), + ('Target Acquisition Monitor', '#'), + ('Data Trending', '/nirspec/nirspec_data_trending'), + ('Detector Health Monitor', '#'), + ('Ref Pix Monitor', '#'), + ('Internal Lamp Monitor', '#'), + ('Instrument Model Updates', '#'), + ('Failed-open Shutter Monitor', '#')]} NIRCAM_SHORTWAVE_DETECTORS = ['NRCA1', 'NRCA2', 'NRCA3', 'NRCA4', 'NRCB1', 'NRCB2', 'NRCB3', 'NRCB4'] NIRCAM_LONGWAVE_DETECTORS = ['NRCA5', 'NRCB5'] + +AMPLIFIER_BOUNDARIES = {'nircam': {'1': [(0, 0), (512, 2048)], '2': [(512, 0), (1024, 2048)], + '3': [(1024, 0), (1536, 2048)], '4': [(1536, 0), (2048, 2048)]} + } + +FOUR_AMP_SUBARRAYS = ['WFSS128R', 'WFSS64R'] + +SUBARRAYS_ONE_OR_FOUR_AMPS = ['SUBGRISMSTRIPE64', 'SUBGRISMSTRIPE128', 'SUBGRISMSTRIPE256'] diff --git a/jwql/utils/edb.py b/jwql/utils/edb.py new file mode 100644 index 000000000..b12a35144 --- /dev/null +++ b/jwql/utils/edb.py @@ -0,0 +1,40 @@ +#! /usr/bin/env python +"""Tests for the ``engineering_database`` module. + +Authors +------- + + - Johannes Sahlmann + + +Use +--- + + These tests can be run via the command line (omit the ``-s`` to + suppress verbose output to ``stdout``): + + :: + + pytest -s test_edb_interface.py +""" + +from astropy.time import Time +import jwql.utils.engineering_database as edb + +def test_query_single_mnemonic(): + """Test the query of a mnemonic over a given time range.""" + + mnemonic_identifier = 'SE_ZIMIRICEA' + start_time = Time(2018.01, format='decimalyear') + end_time = Time(2018.02, format='decimalyear') + + mnemonic = edb.query_single_mnemonic(mnemonic_identifier, start_time, end_time) + print(mnemonic) + +def main(): + data, meta = edb.get_all_mnemonic_identifiers() + print(data) + test_query_single_mnemonic() + +if __name__ == "__main__": + main() diff --git a/jwql/utils/instrument_properties.py b/jwql/utils/instrument_properties.py new file mode 100644 index 000000000..2e5de55c3 --- /dev/null +++ b/jwql/utils/instrument_properties.py @@ -0,0 +1,233 @@ +"""Collection of functions dealing with retrieving/calculating various +instrument properties + +Authors +------- + + - Bryan Hilbert + +Uses +---- + + This module can be imported and used as such: + + :: + + from jwql.utils import instrument_properties as inst + amps = inst.amplifier_info('my_files.fits') +""" + +from copy import deepcopy + +from astropy.io import fits +from jwst.datamodels import dqflags +import numpy as np + +from jwql.utils.constants import AMPLIFIER_BOUNDARIES, FOUR_AMP_SUBARRAYS, SUBARRAYS_ONE_OR_FOUR_AMPS + + +def amplifier_info(filename, omit_reference_pixels=True): + """Calculate the number of amplifiers used to collect the data in a + given file using the array size and exposure time of a single frame + (This is needed because there is no header keyword specifying + how many amps were used.) + + Parameters + ---------- + filename : str + Name of fits file to investigate + + omit_reference_pixels : bool + If ``True``, return the amp boundary coordinates excluding + reference pixels + + Returns + ------- + num_amps : int + Number of amplifiers used to read out the data + + amp_bounds : dict + Dictionary of amplifier boundary coordinates. Keys are strings of + the amp number (1-4). Each value is a list composed of two tuples. + The first tuple give the (x, y) starting location, and the second + tuple gives the (x, y) ending location. + """ + + # First get necessary metadata + header = fits.getheader(filename) + instrument = header['INSTRUME'].lower() + detector = header['DETECTOR'] + x_dim = header['SUBSIZE1'] + y_dim = header['SUBSIZE2'] + sample_time = header['TSAMPLE'] * 1.e-6 + frame_time = header['TFRAME'] + subarray_name = header['SUBARRAY'] + aperture = "{}_{}".format(detector, subarray_name) + + # Full frame data will be 2048x2048 for all instruments + if ((x_dim == 2048) and (y_dim == 2048)) or subarray_name in FOUR_AMP_SUBARRAYS: + num_amps = 4 + amp_bounds = deepcopy(AMPLIFIER_BOUNDARIES[instrument]) + + else: + + if subarray_name not in SUBARRAYS_ONE_OR_FOUR_AMPS: + num_amps = 1 + amp_bounds = {'1': [(0, 0), (x_dim, y_dim)]} + + else: + + # These are the tougher cases. Subarrays that can be + # used with multiple amp combinations + + # Compare the given frametime with the calculated frametimes + # using 4 amps or 1 amp. + + # Right now this is used only for the NIRCam grism stripe + # subarrays, so we don't need this to be a general case that + # can handle any subarray orientation relative to any amp + # orientation + amp4_time = calc_frame_time(instrument, aperture, x_dim, y_dim, + 4, sample_time=sample_time) + amp1_time = calc_frame_time(instrument, aperture, x_dim, y_dim, + 1, sample_time=sample_time) + + if np.isclose(amp4_time, frame_time, atol=0.001, rtol=0): + num_amps = 4 + # In this case, keep the full frame amp boundaries in + # the x direction, and set the boundaries in the y + # direction equal to the hight of the subarray + amp_bounds = deepcopy(AMPLIFIER_BOUNDARIES[instrument]) + for amp_num in ['1', '2', '3', '4']: + newdims = (amp_bounds[amp_num][1][0], y_dim) + amp_bounds[amp_num][1] = newdims + + elif np.isclose(amp1_time, frame_time, atol=0.001, rtol=0): + num_amps = 1 + amp_bounds = {'1': [(0, 0), (x_dim, y_dim)]} + + else: + raise ValueError(('Unable to determine number of amps used for exposure. 4-amp frametime' + 'is {}. 1-amp frametime is {}. Reported frametime is {}.') + .format(amp4_time, amp1_time, frame_time)) + + if omit_reference_pixels: + + # If requested, ignore reference pixels by adjusting the indexes of + # the amp boundaries. + with fits.open(filename) as hdu: + try: + data_quality = hdu['DQ'].data + except KeyError: + raise KeyError('DQ extension not found.') + + + # Reference pixels should be flagged in the DQ array with the + # REFERENCE_PIXEL flag. Find the science pixels by looping for + # pixels that don't have that bit set. + scipix = np.where(data_quality & dqflags.pixel['REFERENCE_PIXEL'] == 0) + ymin = np.min(scipix[0]) + xmin = np.min(scipix[1]) + ymax = np.max(scipix[0]) + 1 + xmax = np.max(scipix[1]) + 1 + + # Adjust the minimum and maximum x and y values if they are within + # the reference pixels + for key in amp_bounds: + bounds = amp_bounds[key] + prev_xmin, prev_ymin = bounds[0] + prev_xmax, prev_ymax = bounds[1] + if prev_xmin < xmin: + new_xmin = xmin + else: + new_xmin = prev_xmin + if prev_ymin < ymin: + new_ymin = ymin + else: + new_ymin = prev_ymin + if prev_xmax > xmax: + new_xmax = xmax + else: + new_xmax = prev_xmax + if prev_ymax > ymax: + new_ymax = ymax + else: + new_ymax = prev_ymax + amp_bounds[key] = [(new_xmin, new_ymin), (new_xmax, new_ymax)] + + return num_amps, amp_bounds + + +def calc_frame_time(instrument, aperture, xdim, ydim, amps, sample_time=1.e-5): + """Calculate the readout time for a single frame of a given size and + number of amplifiers. Note that for NIRISS and FGS, the fast readout + direction is opposite to that in NIRCam, so we switch ``xdim`` and + ``ydim`` so that we can keep a single equation. + + Parameters: + ----------- + instrument : str + Name of the instrument being simulated + + aperture : str + Name of aperture being simulated (e.g ``NRCA1_FULL``). + Currently this is only used to check for the FGS ``ACQ1`` + aperture, which uses a unique value of ``colpad`` below. + + xdim : int + Number of columns in the frame + + ydim : int + Number of rows in the frame + + amps : int + Number of amplifiers used to read out the frame + + sample_time : float + Time to sample a pixel, in seconds. For NIRCam/NIRISS/FGS + this is 10 microseconds = 1e-5 seconds + + Returns: + -------- + frametime : float + Readout time in seconds for the frame + """ + + instrument = instrument.lower() + if instrument == "nircam": + colpad = 12 + + # Fullframe + if amps == 4: + rowpad = 1 + fullpad = 1 + else: + # All subarrays + rowpad = 2 + fullpad = 0 + if ((xdim <= 8) & (ydim <= 8)): + # The smallest subarray + rowpad = 3 + + elif instrument == "niriss": + colpad = 12 + + # Fullframe + if amps == 4: + rowpad = 1 + fullpad = 1 + else: + rowpad = 2 + fullpad = 0 + + elif instrument == 'fgs': + colpad = 6 + if 'acq1' in aperture.lower(): + colpad = 12 + rowpad = 1 + if amps == 4: + fullpad = 1 + else: + fullpad = 0 + + return ((1.0 * xdim / amps + colpad) * (ydim + rowpad) + fullpad) * sample_time diff --git a/jwql/utils/logging_functions.py b/jwql/utils/logging_functions.py index 589907896..77292f68f 100644 --- a/jwql/utils/logging_functions.py +++ b/jwql/utils/logging_functions.py @@ -1,4 +1,3 @@ - """ Logging functions for the ``jwql`` automation platform. This module provides decorators to log the execution of modules. Log @@ -10,8 +9,9 @@ Authors ------- - - Catherine Martlin 2018 - - Alex Viana, 2013 (WFC3 QL Version) + - Catherine Martlin + - Alex Viana (WFC3 QL Version) + - Matthew Bourque Use --- @@ -42,8 +42,8 @@ def my_main_function(): ------------ The user must have a configuration file named ``config.json`` - placed in the ``utils`` directory. - + placed in the ``utils`` directory and it must contain keys for + ``log_dir`` and ``admin_account``. References ---------- @@ -68,11 +68,8 @@ def my_main_function(): from jwql.utils.permissions import set_permissions from jwql.utils.utils import get_config, ensure_dir_exists -LOG_FILE_LOC = '' -PRODUCTION_BOOL = '' - -def configure_logging(module, production_mode=True, path='./'): +def configure_logging(module): """Configure the log file with a standard logging format. Parameters @@ -84,27 +81,32 @@ def configure_logging(module, production_mode=True, path='./'): environement. path : str Where to write the log if user-supplied path; default to working dir. + + Returns + ------- + log_file : str + The path to the file where the log is written to. """ # Determine log file location - if production_mode: - log_file = make_log_file(module) - else: - log_file = make_log_file(module, production_mode=False, path=path) - global LOG_FILE_LOC - global PRODUCTION_BOOL - LOG_FILE_LOC = log_file - PRODUCTION_BOOL = production_mode + log_file = make_log_file(module) + + # Make sure no other root lhandlers exist before configuring the logger + for handler in logging.root.handlers[:]: + logging.root.removeHandler(handler) # Create the log file and set the permissions logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %H:%M:%S %p', level=logging.INFO) + print('Log file initialized to {}'.format(log_file)) set_permissions(log_file) + return log_file -def make_log_file(module, production_mode=True, path='./'): + +def make_log_file(module): """Create the log file name based on the module name. The name of the ``log_file`` is a combination of the name of the @@ -127,22 +129,32 @@ def make_log_file(module, production_mode=True, path='./'): The full path to where the log file will be written to. """ + # Build filename timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') filename = '{0}_{1}.log'.format(module, timestamp) + + # Determine save location user = pwd.getpwuid(os.getuid()).pw_name + admin_account = get_config()['admin_account'] + log_path = get_config()['log_dir'] + + # For production + if user == admin_account and socket.gethostname()[0] == 'p': + log_file = os.path.join(log_path, 'prod', module, filename) - settings = get_config() - admin_account = settings['admin_account'] - log_path = settings['log_dir'] + # For test + elif user == admin_account and socket.gethostname()[0] == 't': + log_file = os.path.join(log_path, 'test', module, filename) - if user != admin_account or not production_mode: - module = os.path.join('dev', module) + # For dev + elif user == admin_account and socket.gethostname()[0] == 'd': + log_file = os.path.join(log_path, 'dev', module, filename) - if production_mode: - log_file = os.path.join(log_path, module, filename) + # For local (also write to dev) else: - log_file = os.path.join(path, filename) + log_file = os.path.join(log_path, 'dev', module, filename) + # Make sure parent directory exists ensure_dir_exists(os.path.dirname(log_file)) return log_file @@ -167,7 +179,7 @@ def log_info(func): """ @wraps(func) - def wrapped(*a, **kw): + def wrapped(*args, **kwargs): # Log environment information logging.info('User: ' + getpass.getuser()) @@ -176,17 +188,18 @@ def wrapped(*a, **kw): logging.info('Python Executable Path: ' + sys.executable) # Read in setup.py file to build list of required modules - settings = get_config() - setup_file_name = settings['setup_file'] - with open(setup_file_name) as setup: - for line in setup: - if line[0:8] == "REQUIRES": - module_required = line[12:-2] - module_list = module_required.split(',') + with open(get_config()['setup_file']) as f: + data = f.readlines() + + for i, line in enumerate(data): + if 'REQUIRES = [' in line: + begin = i + 1 + elif 'setup(' in line: + end = i - 2 + required_modules = data[begin:end] # Clean up the module list - module_list = [module.replace('"', '').replace("'", '').replace(' ', '') for module in module_list] - module_list = [module.split('=')[0] for module in module_list] + module_list = [item.strip().replace("'", "").replace(",", "").split("=")[0].split(">")[0].split("<")[0] for item in required_modules] # Log common module version information for module in module_list: @@ -194,13 +207,15 @@ def wrapped(*a, **kw): mod = importlib.import_module(module) logging.info(module + ' Version: ' + mod.__version__) logging.info(module + ' Path: ' + mod.__path__[0]) - except ImportError as err: + except (ImportError, AttributeError) as err: logging.warning(err) + logging.info('') + # Call the function and time it t1_cpu = time.clock() t1_time = time.time() - func(*a, **kw) + func(*args, **kwargs) t2_cpu = time.clock() t2_time = time.time() @@ -209,8 +224,8 @@ def wrapped(*a, **kw): minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60) hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60) minutes_time, seconds_time = divmod(remainder_time, 60) - logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time)) - logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu)) + logging.info('Elapsed Real Time: {}:{}:{}'.format(int(hours_time), int(minutes_time), int(seconds_time))) + logging.info('Elapsed CPU Time: {}:{}:{}'.format(int(hours_cpu), int(minutes_cpu), int(seconds_cpu))) return wrapped @@ -230,12 +245,12 @@ def log_fail(func): """ @wraps(func) - def wrapped(*a, **kw): + def wrapped(*args, **kwargs): try: # Run the function - func(*a, **kw) + func(*args, **kwargs) logging.info('Completed Successfully') except Exception: @@ -243,3 +258,36 @@ def wrapped(*a, **kw): logging.critical('CRASHED') return wrapped + + +def log_timing(func): + """Decorator to time a module or function within a code. + + Parameters + ---------- + func : func + The function to time. + + Returns + ------- + wrapped : func + The wrapped function. Will log the time.""" + + def wrapped(*args, **kwargs): + + # Call the function and time it + t1_cpu = time.process_time() + t1_time = time.time() + func(*args, **kwargs) + t2_cpu = time.process_time() + t2_time = time.time() + + # Log execution time + hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60) + minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60) + hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60) + minutes_time, seconds_time = divmod(remainder_time, 60) + logging.info('Elapsed Real Time of {}: {}:{}:{}'.format(func.__name__, int(hours_time), int(minutes_time), int(seconds_time))) + logging.info('Elapsed CPU Time of {}: {}:{}:{}'.format(func.__name__, int(hours_cpu), int(minutes_cpu), int(seconds_cpu))) + + return wrapped diff --git a/jwql/utils/preview_image.py b/jwql/utils/preview_image.py index 7a322c732..db0e5dbb5 100755 --- a/jwql/utils/preview_image.py +++ b/jwql/utils/preview_image.py @@ -50,7 +50,7 @@ import matplotlib.colors as colors # Only import jwst if not running from readthedocs -if 'build' and 'project' and 'jwql' not in socket.gethostname(): +if 'build' and 'project' not in socket.gethostname(): from jwst.datamodels import dqflags @@ -165,7 +165,7 @@ def find_limits(self, data, pixmap, clipperc): numclip = np.int(clipperc * nelem) sorted = np.sort(data[pixmap], axis=None) minval = sorted[numclip] - maxval = sorted[-numclip-1] + maxval = sorted[-numclip - 1] return (minval, maxval) def get_data(self, filename, ext): @@ -212,13 +212,18 @@ def get_data(self, filename, ext): yd, xd = data.shape[-2:] dq = np.ones((yd, xd), dtype="bool") + # Collect information on aperture location within the # full detector. This is needed for mosaicking NIRCam # detectors later. - self.xstart = hdulist[0].header['SUBSTRT1'] - self.ystart = hdulist[0].header['SUBSTRT2'] - self.xlen = hdulist[0].header['SUBSIZE1'] - self.ylen = hdulist[0].header['SUBSIZE2'] + try: + self.xstart = hdulist[0].header['SUBSTRT1'] + self.ystart = hdulist[0].header['SUBSTRT2'] + self.xlen = hdulist[0].header['SUBSIZE1'] + self.ylen = hdulist[0].header['SUBSIZE2'] + except KeyError: + logging.warning('SUBSTR and SUBSIZE header keywords not found') + else: raise FileNotFoundError(('WARNING: {} does not exist!'.format(filename))) @@ -322,16 +327,15 @@ def make_figure(self, image, integration_number, min_value, max_value, tlabelstr = [format_string % number for number in tlabelflt] cbar = fig.colorbar(cax, ticks=tickvals) cbar.ax.set_yticklabels(tlabelstr) - cbar.ax.tick_params(labelsize=maxsize * 5./4) - # cbar.ax.set_ylabel('Signal', rotation=270, fontsize=maxsize*5./4) - ax.set_xlabel('Pixels', fontsize=maxsize * 5./4) - ax.set_ylabel('Pixels', fontsize=maxsize * 5./4) + cbar.ax.tick_params(labelsize=maxsize * 5. / 4) + ax.set_xlabel('Pixels', fontsize=maxsize * 5. / 4) + ax.set_ylabel('Pixels', fontsize=maxsize * 5. / 4) ax.tick_params(labelsize=maxsize) plt.rcParams.update({'axes.titlesize': 'small'}) - plt.rcParams.update({'font.size': maxsize * 5./4}) - plt.rcParams.update({'axes.labelsize': maxsize * 5./4}) - plt.rcParams.update({'ytick.labelsize': maxsize * 5./4}) - plt.rcParams.update({'xtick.labelsize': maxsize * 5./4}) + plt.rcParams.update({'font.size': maxsize * 5. / 4}) + plt.rcParams.update({'axes.labelsize': maxsize * 5. / 4}) + plt.rcParams.update({'ytick.labelsize': maxsize * 5. / 4}) + plt.rcParams.update({'xtick.labelsize': maxsize * 5. / 4}) elif scale == 'linear': fig, ax = plt.subplots(figsize=(xsize, ysize)) diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py index c3b834bd0..7b155e600 100644 --- a/jwql/utils/utils.py +++ b/jwql/utils/utils.py @@ -29,10 +29,12 @@ - JWST TR JWST-STScI-004800, SM-12 """ +import datetime import getpass import json import os import re +import shutil from jwql.utils import permissions from jwql.utils.constants import FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES_SHORTHAND @@ -40,9 +42,98 @@ __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) -def ensure_dir_exists(fullpath): - """Creates dirs from ``fullpath`` if they do not already exist. +def copy_files(files, out_dir): + """Copy a given file to a given directory. Only try to copy the file + if it is not already present in the output directory. + + Parameters + ---------- + files : list + List of files to be copied + + out_dir : str + Destination directory + + Returns + ------- + success : list + Files successfully copied (or that already existed in out_dir) + + failed : list + Files that were not copied + """ + + # Copy files if they do not already exist + success = [] + failed = [] + for input_file in files: + input_new_path = os.path.join(out_dir, os.path.basename(input_file)) + if os.path.isfile(input_new_path): + success.append(input_new_path) + else: + try: + shutil.copy2(input_file, out_dir) + success.append(input_new_path) + permissions.set_permissions(input_new_path) + except: + failed.append(input_file) + return success, failed + + +def download_mast_data(query_results, output_dir): + """Example function for downloading MAST query results. From MAST + website (``https://mast.stsci.edu/api/v0/pyex.html``) + + Parameters + ---------- + query_results : list + List of dictionaries returned by a MAST query. + + output_dir : str + Directory into which the files will be downlaoded """ + + # Set up the https connection + server = 'mast.stsci.edu' + conn = httplib.HTTPSConnection(server) + + # Dowload the products + print('Number of query results: {}'.format(len(query_results))) + + for i in range(len(query_results)): + + # Make full output file path + output_file = os.path.join(output_dir, query_results[i]['filename']) + + print('Output file is {}'.format(output_file)) + + # Download the data + uri = query_results[i]['dataURI'] + + print('uri is {}'.format(uri)) + + conn.request("GET", "/api/v0/download/file?uri="+uri) + resp = conn.getresponse() + file_content = resp.read() + + # Save to file + with open(output_file, 'wb') as file_obj: + file_obj.write(file_content) + + # Check for file + if not os.path.isfile(output_file): + print("ERROR: {} failed to download.".format(output_file)) + else: + statinfo = os.stat(output_file) + if statinfo.st_size > 0: + print("DOWNLOAD COMPLETE: ", output_file) + else: + print("ERROR: {} file is empty.".format(output_file)) + conn.close() + + +def ensure_dir_exists(fullpath): + """Creates dirs from ``fullpath`` if they do not already exist.""" if not os.path.exists(fullpath): os.makedirs(fullpath) permissions.set_permissions(fullpath) @@ -230,6 +321,33 @@ def filename_parser(filename): return filename_dict +def filesystem_path(filename): + """Return the full path to a given file in the filesystem + + Parameters + ---------- + filename : str + File to locate (e.g. ``jw86600006001_02101_00008_guider1_cal.fits``) + + Returns + ------- + full_path : str + Full path to the given file, including filename + """ + + filesystem_base = get_config()["filesystem"] + + # Subdirectory name is based on the proposal ID + subdir = 'jw{}'.format(filename_parser(filename)['program_id']) + full_path = os.path.join(filesystem_base, subdir, filename) + + # Check to see if the file exists + if os.path.isfile(full_path): + return full_path + else: + raise FileNotFoundError(('{} is not in the predicted location: {}'.format(filename, full_path))) + + def get_base_url(): """Return the beginning part of the URL to the ``jwql`` web app based on which user is running the software. @@ -275,3 +393,53 @@ def get_config(): settings = json.load(config_file) return settings + + +def initialize_instrument_monitor(module): + """Configures a log file for the instrument monitor run and + captures the start time of the monitor + + Parameters + ---------- + module : str + The module name (e.g. ``dark_monitor``) + + Returns + ------- + start_time : datetime object + The start time of the monitor + log_file : str + The path to where the log file is stored + """ + + from jwql.utils.logging_functions import configure_logging + + start_time = datetime.datetime.now() + log_file = configure_logging(module) + + return start_time, log_file + + +def update_monitor_table(module, start_time, log_file): + """Update the ``monitor`` database table with information about + the instrument monitor run + + Parameters + ---------- + module : str + The module name (e.g. ``dark_monitor``) + start_time : datetime object + The start time of the monitor + log_file : str + The path to where the log file is stored + """ + + from jwql.database.database_interface import Monitor + + new_entry = {} + new_entry['monitor_name'] = module + new_entry['start_time'] = start_time + new_entry['end_time'] = datetime.datetime.now() + new_entry['log_file'] = os.path.basename(log_file) + + Monitor.__table__.insert().execute(new_entry) diff --git a/jwql/website/apps/jwql/api_views.py b/jwql/website/apps/jwql/api_views.py index de4e1a93b..13e129049 100644 --- a/jwql/website/apps/jwql/api_views.py +++ b/jwql/website/apps/jwql/api_views.py @@ -56,6 +56,7 @@ from .data_containers import get_thumbnails_by_instrument from .data_containers import get_thumbnails_by_proposal from .data_containers import get_thumbnails_by_rootname +from .oauth import auth_required def all_proposals(request): diff --git a/jwql/website/apps/jwql/data_containers.py b/jwql/website/apps/jwql/data_containers.py index 4606b04e5..91b3117c2 100644 --- a/jwql/website/apps/jwql/data_containers.py +++ b/jwql/website/apps/jwql/data_containers.py @@ -27,13 +27,15 @@ import re import tempfile -import numpy as np from astropy.io import fits from astropy.time import Time from astroquery.mast import Mast +import numpy as np from jwql.edb.edb_interface import mnemonic_inventory from jwql.edb.engineering_database import get_mnemonic, get_mnemonic_info +from jwql.instrument_monitors.miri_monitors.data_trending import dashboard as miri_dash +from jwql.instrument_monitors.nirspec_monitors.data_trending import dashboard as nirspec_dash from jwql.jwql_monitors import monitor_cron_jobs from jwql.utils.constants import MONITORS from jwql.utils.preview_image import PreviewImage @@ -48,6 +50,38 @@ REPO_DIR = os.path.split(PACKAGE_DIR)[0] +def data_trending(): + """Container for Miri datatrending dashboard and components + + Returns + ------- + variables : int + nonsense + dashboard : list + A list containing the JavaScript and HTML content for the + dashboard + """ + dashboard, variables = miri_dash.data_trending_dashboard() + + return variables, dashboard + + +def nirspec_trending(): + """Container for Miri datatrending dashboard and components + + Returns + ------- + variables : int + nonsense + dashboard : list + A list containing the JavaScript and HTML content for the + dashboard + """ + dashboard, variables = nirspec_dash.data_trending_dashboard() + + return variables, dashboard + + def get_acknowledgements(): """Returns a list of individuals who are acknowledged on the ``about`` page. @@ -122,7 +156,8 @@ def get_dashboard_components(): 'system_stats': 'System Statistics'} # Exclude monitors that can't be saved as components - exclude_list = ['monitor_cron_jobs'] + exclude_list = ['monitor_cron_jobs', 'miri_data_trending', + 'trainings_data_15min', 'trainings_data_day'] # Run the cron job monitor to produce an updated table monitor_cron_jobs.status(production_mode=True) @@ -203,8 +238,7 @@ def get_edb_components(request): end_time = Time(mnemonic_query_form['end_time'].value(), format='iso') if mnemonic_identifier is not None: - mnemonic_query_result = get_mnemonic(mnemonic_identifier, start_time, - end_time) + mnemonic_query_result = get_mnemonic(mnemonic_identifier, start_time, end_time) mnemonic_query_result_plot = mnemonic_query_result.bokeh_plot() # create forms for search fields not clicked @@ -511,7 +545,9 @@ def get_preview_images_by_instrument(inst): preview_images = glob.glob(os.path.join(PREVIEW_IMAGE_FILESYSTEM, '*', '*.jpg')) # Get subset of preview images that match the filenames - preview_images = [item for item in preview_images if os.path.basename(item).split('_integ')[0] in filenames] + preview_images = [os.path.basename(item) for item in preview_images if os.path.basename(item).split('_integ')[0] in filenames] + + # Return only return preview_images @@ -642,7 +678,7 @@ def get_thumbnails_by_instrument(inst): thumbnails = glob.glob(os.path.join(THUMBNAIL_FILESYSTEM, '*', '*.thumb')) # Get subset of preview images that match the filenames - thumbnails = [item for item in thumbnails if os.path.basename(item).split('_integ')[0] in filenames] + thumbnails = [os.path.basename(item) for item in thumbnails if os.path.basename(item).split('_integ')[0] in filenames] return thumbnails @@ -696,32 +732,6 @@ def get_thumbnails_by_rootname(rootname): return thumbnails -def split_files(file_list, page_type): - """JUST FOR USE DURING DEVELOPMENT WITH FILESYSTEM - - Splits the files in the filesystem into "unlooked" and "archived", - with the "unlooked" images being the most recent 10% of files. - """ - exp_times = [] - for file in file_list: - hdr = fits.getheader(file, ext=0) - exp_start = hdr['EXPSTART'] - exp_times.append(exp_start) - - exp_times_sorted = sorted(exp_times) - i_cutoff = int(len(exp_times) * .1) - t_cutoff = exp_times_sorted[i_cutoff] - - mask_unlooked = np.array([t < t_cutoff for t in exp_times]) - - if page_type == 'unlooked': - print('ONLY RETURNING {} "UNLOOKED" FILES OF {} ORIGINAL FILES'.format(len([m for m in mask_unlooked if m]), len(file_list))) - return [f for i, f in enumerate(file_list) if mask_unlooked[i]] - elif page_type == 'archive': - print('ONLY RETURNING {} "ARCHIVED" FILES OF {} ORIGINAL FILES'.format(len([m for m in mask_unlooked if not m]), len(file_list))) - return [f for i, f in enumerate(file_list) if not mask_unlooked[i]] - - def thumbnails(inst, proposal=None): """Generate a page showing thumbnail images corresponding to activities, from a given ``proposal`` @@ -815,10 +825,6 @@ def thumbnails_ajax(inst, proposal=None): # Get the available files for the instrument filepaths = get_filenames_by_instrument(inst) - if proposal is not None: - filepaths = split_files(filepaths, 'archive') - else: - filepaths = split_files(filepaths, 'unlooked') # Get set of unique rootnames rootnames = set(['_'.join(f.split('/')[-1].split('_')[:-1]) for f in filepaths]) diff --git a/jwql/website/apps/jwql/oauth.py b/jwql/website/apps/jwql/oauth.py index e756536d2..10406b139 100644 --- a/jwql/website/apps/jwql/oauth.py +++ b/jwql/website/apps/jwql/oauth.py @@ -42,8 +42,10 @@ def login(request): import requests from authlib.django.client import OAuth -from django.shortcuts import redirect +from django.shortcuts import redirect, render +import jwql +from jwql.utils.constants import MONITORS from jwql.utils.utils import get_base_url, get_config @@ -122,9 +124,9 @@ def authorize(request): return response -def auth_required(fn): - """A decorator function that requires the given function to have - authentication through ``auth.mast`` set up. +def auth_info(fn): + """A decorator function that will return user credentials along + with what is returned by the original function. Parameters ---------- @@ -133,21 +135,19 @@ def auth_required(fn): Returns ------- - check_auth : function + user_info : function The decorated function """ - @auth_info - def check_auth(request, user): - """Check if the user is authenticated through ``auth.mast``. - If not, perform the authorization. + def user_info(request, **kwargs): + """Store authenticated user credentials in a cookie and return + it. If the user is not authenticated, store no credentials in + the cookie. Parameters ---------- request : HttpRequest object Incoming request from the webpage - user : dict - A dictionary of user credentials Returns ------- @@ -155,20 +155,28 @@ def check_auth(request, user): The decorated function """ - # If user is currently anonymous, require a login - if user["anon"]: - # Redirect to oauth login - redirect_uri = os.path.join(get_base_url(), 'authorize') - return JWQL_OAUTH.mast_auth.authorize_redirect(request, redirect_uri) + cookie = request.COOKIES.get("ASB-AUTH") - return fn(request, user) + # If user is authenticated, return user credentials + if cookie is not None: + response = requests.get( + 'https://{}/info'.format(get_config()['auth_mast']), + headers={'Accept': 'application/json', + 'Authorization': 'token {}'.format(cookie)}) + response = response.json() - return check_auth + # If user is not authenticated, return no credentials + else: + response = {'ezid': None, "anon": True} + return fn(request, response, **kwargs) -def auth_info(fn): - """A decorator function that will return user credentials along - with what is returned by the original function. + return user_info + + +def auth_required(fn): + """A decorator function that requires the given function to have + authentication through ``auth.mast`` set up. Parameters ---------- @@ -177,19 +185,21 @@ def auth_info(fn): Returns ------- - user_info : function + check_auth : function The decorated function """ - def user_info(request, **kwargs): - """Store authenticated user credentials in a cookie and return - it. If the user is not authenticated, store no credentials in - the cookie. + @auth_info + def check_auth(request, user, **kwargs): + """Check if the user is authenticated through ``auth.mast``. + If not, perform the authorization. Parameters ---------- request : HttpRequest object Incoming request from the webpage + user : dict + A dictionary of user credentials Returns ------- @@ -197,26 +207,21 @@ def user_info(request, **kwargs): The decorated function """ - cookie = request.COOKIES.get("ASB-AUTH") + # If user is currently anonymous, require a login + if user['ezid']: - # If user is authenticated, return user credentials - if cookie is not None: - response = requests.get( - 'https://{}/info'.format(get_config()['auth_mast']), - headers={'Accept': 'application/json', - 'Authorization': 'token {}'.format(cookie)}) - response = response.json() + return fn(request, user, **kwargs) - # If user is not authenticated, return no credentials else: - response = {'ezid' : None, "anon": True} + template = 'not_authenticated.html' + context = {'inst': ''} - return fn(request, response, **kwargs) + return render(request, template, context) - return user_info + return check_auth -@auth_required +@auth_info def login(request, user): """Spawn a login process for the user @@ -237,7 +242,9 @@ def login(request, user): Outgoing response sent to the webpage """ - return redirect("/") + # Redirect to oauth login + redirect_uri = os.path.join(get_base_url(), 'authorize') + return JWQL_OAUTH.mast_auth.authorize_redirect(request, redirect_uri) def logout(request): diff --git a/jwql/website/apps/jwql/static/css/jwql.css b/jwql/website/apps/jwql/static/css/jwql.css index 04d115dc3..8435ef699 100644 --- a/jwql/website/apps/jwql/static/css/jwql.css +++ b/jwql/website/apps/jwql/static/css/jwql.css @@ -57,6 +57,13 @@ margin-right: 2%; } +/* Make disabled dropdown items grey and unclickable */ +.disabled-dropdown { + color: #bec4d4 !important; + pointer-events: none; + cursor: default; +} + /*Define dropdown menu colors*/ .dropdown-item:hover{ color: #c85108; diff --git a/jwql/website/apps/jwql/templates/base.html b/jwql/website/apps/jwql/templates/base.html index efc0e7da8..315bcc540 100644 --- a/jwql/website/apps/jwql/templates/base.html +++ b/jwql/website/apps/jwql/templates/base.html @@ -61,13 +61,17 @@ diff --git a/jwql/website/apps/jwql/templates/instrument.html b/jwql/website/apps/jwql/templates/instrument.html index 34baee53b..e456a70a5 100644 --- a/jwql/website/apps/jwql/templates/instrument.html +++ b/jwql/website/apps/jwql/templates/instrument.html @@ -25,10 +25,12 @@

Images

Monitors

{% for key, inst_tools in tools.items() %} - {% if key == inst %} - {% for tool in inst_tools %} - {% if "Monitor" in tool %} - {{ tool }}
+ {% if key == inst.lower() %} + {% for monitor_name, monitor_url in inst_tools %} + {% if monitor_url == '#' %} + {{ monitor_name }}
+ {% else %} + {{ monitor_name }}
{% endif %} {% endfor %} {% endif %} diff --git a/jwql/website/apps/jwql/templates/miri_data_trending.html b/jwql/website/apps/jwql/templates/miri_data_trending.html new file mode 100644 index 000000000..06637cbd5 --- /dev/null +++ b/jwql/website/apps/jwql/templates/miri_data_trending.html @@ -0,0 +1,66 @@ + +{% extends "base.html" %} + + + +{% block preamble %} + + + Webpage Title - JWQL + +{% endblock %} + + +{% block content %} + +
+ + + +
+

How it works

+ The MIRI data-trending dashboard is prepared for instrument engineers to keep + tracking the trend of relevant parameters. A software behind processes a daily chunk of data and + calculates average and deviation of all engineering values that correspond to specific + mnemonic identifiers. The outcome is stored in a seperate database and shown on + the dashboard below. Since a cron job populates the database over time, the app + allows to keep track of the general value trend caused by degadiation and radiation. +
+
+ +
+ +
+
+ + Bokeh Scatter Plots + + + + + +
+ {{ dashboard[0] | safe }} +
+ + + + + + +
+ {{ dashboard[1] | safe }} +
+
+ +
+ +{% endblock %} diff --git a/jwql/website/apps/jwql/templates/nirspec_data_trending.html b/jwql/website/apps/jwql/templates/nirspec_data_trending.html new file mode 100644 index 000000000..24fae4b84 --- /dev/null +++ b/jwql/website/apps/jwql/templates/nirspec_data_trending.html @@ -0,0 +1,73 @@ + +{% extends "base.html" %} + + + +{% block preamble %} + + + Webpage Title - JWQL + +{% endblock %} + + +{% block content %} + +
+ + + +
+

How it works

+ The NIRSpec data-trending dashboard is a web based application that allows engineers and general users to keep track of a subset of key + NIRSpec performance parameters (“mnemonics”) such as voltages, currents, sensor signals and temperatures. In total 200+ mnemonics are + tracked for NIRSpec. This telemetry data is retrieved from the engineering data base (EDB) on the Mikulski Archive for Space Telescopes + (MAST) at STScI and some statistical analysis is performed (number of samples considered, mean, standard deviation).
+ The processed data is stored in a separate database and also presented by a series of plots in the dashboard below. + Each point on a specific graph represents the data collected for one specific day. As such, the application allows the user + to perform (long term) trending on key NIRSpec performance parameters and identify potential changes in instrument behaviour + in case these would appear.
+ In total there are 7 different tabs: 1) Power 2) Reference Voltages 3) Temperatures 4) MSA and MCE 5) FPA and FPE 6) CAA Lamps and 7) + Filter and Grating Wheel. Each of these tabs has its own specific set of plots. One can hover over the data points and detailed + statistics will appear. Also, there are various zoom and pan tools implemented allowing the user to manipulate the graphs. + Curves can be activated and muted by clicking on the legend.
+ In the future, a cron job referring to the EDB will populate the database over time. +
+
+ +
+ +
+
+ + Bokeh Scatter Plots + + + + + +
+ {{ dashboard[0] | safe }} +
+ + + + + + +
+ {{ dashboard[1] | safe }} +
+
+ +
+ +{% endblock %} diff --git a/jwql/website/apps/jwql/templates/not_authenticated.html b/jwql/website/apps/jwql/templates/not_authenticated.html new file mode 100644 index 000000000..11f7057ab --- /dev/null +++ b/jwql/website/apps/jwql/templates/not_authenticated.html @@ -0,0 +1,15 @@ +{% extends "base.html" %} + +{% block preamble %} + + Not Authenticated - JWQL + +{% endblock %} + +{% block content %} + +
+

Please log in to see this page!

+
+ +{% endblock %} \ No newline at end of file diff --git a/jwql/website/apps/jwql/urls.py b/jwql/website/apps/jwql/urls.py index 13b4b21bb..dece6036c 100644 --- a/jwql/website/apps/jwql/urls.py +++ b/jwql/website/apps/jwql/urls.py @@ -60,6 +60,12 @@ path('logout/', oauth.logout, name='logout'), path('authorize/', oauth.authorize, name='authorize'), + # NIRSpec views + path('nirspec/nirspec_data_trending/', views.nirspec_data_trending, name='nirspec_data_trending'), + + # MIRI views + path('miri/miri_data_trending/', views.miri_data_trending, name='miri_data_trending'), + # Main site views path('about/', views.about, name='about'), path('dashboard/', views.dashboard, name='dashboard'), @@ -85,5 +91,5 @@ re_path(r'^api/(?P[\d]{5})/thumbnails/$', api_views.thumbnails_by_proposal, name='preview_images_by_proposal'), re_path(r'^api/(?P[\w]+)/filenames/$', api_views.filenames_by_rootname, name='filenames_by_rootname'), re_path(r'^api/(?P[\w]+)/preview_images/$', api_views.preview_images_by_rootname, name='preview_images_by_rootname'), - re_path(r'^api/(?P[\w]+)/thumbnails/$', api_views.thumbnails_by_rootname, name='thumbnails_by_rootname') + re_path(r'^api/(?P[\w]+)/thumbnails/$', api_views.thumbnails_by_rootname, name='thumbnails_by_rootname'), ] diff --git a/jwql/website/apps/jwql/views.py b/jwql/website/apps/jwql/views.py index ee8acbed9..58fde5eb1 100644 --- a/jwql/website/apps/jwql/views.py +++ b/jwql/website/apps/jwql/views.py @@ -48,13 +48,77 @@ from .data_containers import get_proposal_info from .data_containers import thumbnails from .data_containers import thumbnails_ajax +from .data_containers import data_trending +from .data_containers import nirspec_trending from .forms import FileSearchForm -from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE +from .oauth import auth_info, auth_required +import jwql +from jwql.utils.constants import JWST_INSTRUMENT_NAMES, MONITORS, JWST_INSTRUMENT_NAMES_MIXEDCASE from jwql.utils.utils import get_base_url, get_config FILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem') +def miri_data_trending(request): + """Generate the ``MIRI DATA-TRENDING`` page + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + + Returns + ------- + HttpResponse object + Outgoing response sent to the webpage + """ + + template = "miri_data_trending.html" + variables, dash = data_trending() + + context = { + 'dashboard' : dash, + 'inst': '', # Leave as empty string or instrument name; Required for navigation bar + 'inst_list': JWST_INSTRUMENT_NAMES_MIXEDCASE, # Do not edit; Required for navigation bar + 'tools': MONITORS, # Do not edit; Required for navigation bar + 'user': None # Do not edit; Required for authentication + } + #append variables to context + context.update(variables) + + # Return a HTTP response with the template and dictionary of variables + return render(request, template, context) + +def nirspec_data_trending(request): + """Generate the ``MIRI DATA-TRENDING`` page + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + + Returns + ------- + HttpResponse object + Outgoing response sent to the webpage + """ + + template = "nirspec_data_trending.html" + variables, dash = nirspec_trending() + + context = { + 'dashboard' : dash, + 'inst': '', # Leave as empty string or instrument name; Required for navigation bar + 'inst_list': JWST_INSTRUMENT_NAMES_MIXEDCASE, # Do not edit; Required for navigation bar + 'tools': MONITORS, # Do not edit; Required for navigation bar + 'user': None # Do not edit; Required for authentication + } + #append variables to context + context.update(variables) + + # Return a HTTP response with the template and dictionary of variables + return render(request, template, context) + def about(request): """Generate the ``about`` page @@ -76,7 +140,8 @@ def about(request): return render(request, template, context) -def archived_proposals(request, inst): +@auth_required +def archived_proposals(request, user, inst): """Generate the page listing all archived proposals in the database Parameters @@ -101,7 +166,8 @@ def archived_proposals(request, inst): return render(request, template, context) -def archived_proposals_ajax(request, inst): +@auth_required +def archived_proposals_ajax(request, user, inst): """Generate the page listing all archived proposals in the database Parameters @@ -135,7 +201,8 @@ def archived_proposals_ajax(request, inst): return JsonResponse(context, json_dumps_params={'indent': 2}) -def archive_thumbnails(request, inst, proposal): +@auth_required +def archive_thumbnails(request, user, inst, proposal): """Generate the page listing all archived images in the database for a certain proposal @@ -153,6 +220,7 @@ def archive_thumbnails(request, inst, proposal): HttpResponse object Outgoing response sent to the webpage """ + # Ensure the instrument is correctly capitalized inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] @@ -164,7 +232,8 @@ def archive_thumbnails(request, inst, proposal): return render(request, template, context) -def archive_thumbnails_ajax(request, inst, proposal): +@auth_required +def archive_thumbnails_ajax(request, user, inst, proposal): """Generate the page listing all archived images in the database for a certain proposal @@ -322,13 +391,8 @@ def unlooked_images(request, inst): HttpResponse object Outgoing response sent to the webpage """ - # Ensure the instrument is correctly capitalized - inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] - - template = 'thumbnails.html' - context = thumbnails(inst) - return render(request, template, context) + pass def view_header(request, inst, file): @@ -363,7 +427,8 @@ def view_header(request, inst, file): return render(request, template, context) -def view_image(request, inst, file_root, rewrite=False): +@auth_required +def view_image(request, user, inst, file_root, rewrite=False): """Generate the image view page Parameters diff --git a/requirements.txt b/requirements.txt index 285069504..876fc3548 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,20 +1,22 @@ asdf>=2.3.0 astropy>=3.0 astroquery==0.3.9 -authlib==0.10 -bokeh==1.0.4 -django==2.1.7 -ipython==7.3.0 -jinja2==2.10 +authlib==0.11 +bokeh==1.1.0 +django==2.2 +ipython==7.4.0 +jinja2==2.10.1 jwst==0.0.0 matplotlib==3.0.3 numpy==1.16.2 numpydoc==0.8.0 -pandas==0.24.1 -psycopg2==2.7.7 +pandas==0.24.2 +psycopg2==2.8.2 +pysiaf==0.2.5 python-dateutil==2.8.0 -pytest==4.3.0 -sphinx==1.8.5 +pytest==4.4.1 +sphinx==2.0.1 sphinx-automodapi==0.10 -sqlalchemy==1.3.1 +sqlalchemy==1.3.3 stsci_rtd_theme==0.0.2 +pytest-cov=2.6.0 \ No newline at end of file diff --git a/setup.py b/setup.py index 9870d76bb..66275add8 100644 --- a/setup.py +++ b/setup.py @@ -2,27 +2,30 @@ from setuptools import setup from setuptools import find_packages -VERSION = '0.18.0' +VERSION = '0.19.0' AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, ' AUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin' -REQUIRES = ['astropy', - 'astroquery>=0.3.9', - 'authlib', - 'bokeh>=1.0', - 'django>=2.0', - 'jinja2', - 'jwst', - 'matplotlib', - 'numpy', - 'numpydoc', - 'pandas', - 'psycopg2', - 'pytest', - 'sphinx', - 'sqlalchemy', - 'stsci_rtd_theme'] +REQUIRES = [ + 'astropy', + 'astroquery>=0.3.9', + 'authlib', + 'bokeh>=1.0', + 'django>=2.0', + 'jinja2', + 'jwst', + 'matplotlib', + 'numpy', + 'numpydoc', + 'pandas', + 'psycopg2', + 'pysiaf', + 'pytest', + 'sphinx', + 'sqlalchemy', + 'stsci_rtd_theme' +] setup( name='jwql', diff --git a/style_guide/example.py b/style_guide/example.py index e8bb01d8d..361eebc49 100644 --- a/style_guide/example.py +++ b/style_guide/example.py @@ -57,6 +57,7 @@ import argparse import glob +import logging import os import sys from typing import List, Union, Tuple, Optional, Any, Dict @@ -67,12 +68,16 @@ import scipy from sqlalchemy import Float, Integer, String +from jwql.utils.logging_functions import configure_logging, log_info, log_fail, log_timing + # Global variables should be avoided, but if used should be named with # all-caps A_GLOBAL_VARIABLE = 'foo' # type: str +@log_fail +@log_info def my_main_function(path: str, filter: str) -> None: """The main function of the ``example`` module. @@ -87,7 +92,8 @@ def my_main_function(path: str, filter: str) -> None: The filter to process (e.g. "F606W"). """ - print('Using {} as an input file'.format(path)) + logging.info('Using {} as an input file'.format(path)) + an_int = 1 # type: int a_float = 3.14 # type: float a_bool = True # type: bool @@ -98,7 +104,7 @@ def my_main_function(path: str, filter: str) -> None: result = some_other_function(an_int, a_float, a_bool, a_list, a_tuple, a_dict, an_obj) # type: Optional[int] - print(result) + logging.info(result) def parse_args() -> argparse.Namespace: @@ -133,7 +139,8 @@ def parse_args() -> argparse.Namespace: return args -def some_other_function(an_int: int, a_float: float, a_bool: bool, a_list: List[Any], +@log_timing +def some_other_function(an_int: int, a_float: float, a_bool: bool, a_list: List[Any], a_tuple: Tuple[Any], a_dict: Dict[Any, Any], an_obj: object) -> int: """This function just does a bunch of nonsense. @@ -167,14 +174,17 @@ def some_other_function(an_int: int, a_float: float, a_bool: bool, a_list: List[ f.write('My favorite integer is {}'.format(an_int)) # Operators should be separated by spaces - print(a_float + a_float) - - return an_int + logging.info(a_float + a_float) + return an_int if __name__ == '__main__': + # Configure logging + module = os.path.basename(__file__).strip('.py') + configure_logging(module) + args = parse_args() # type: argparse.Namespace my_main_function(args.path, args.filter) diff --git a/style_guide/logging_guide.md b/style_guide/logging_guide.md new file mode 100644 index 000000000..d8367802d --- /dev/null +++ b/style_guide/logging_guide.md @@ -0,0 +1,88 @@ +Logging Style Guide for `jwql` Instrument Monitors +================================================== + +This document serves as a style guide for adding logging to `jwql` instrument monitor software. Any monitoring contribution to the `jwql` code repository should be checked against this guide to ensure proper usage of logging functions, and any violation of this and the style guide should be fixed before the code is committed to the `master` or `develop` branches. Please refer to the accompanying [`example.py`](https://github.com/spacetelescope/jwql/blob/master/style_guide/example.py) script for a example code that abides by the style guide and the logging guide. + + +Introduction +------------ + +All contributions of instrument monitors to the `jwql` code repository should conform to the following logging guidelines. This is to ensure uniformity across monitoring scripts and to help allow for `jwql` maintainers to programmatically interface with log files. + +The `logging_functions.py` script can be found within the `jwql` code repository underneath the `utils` directory. Users can review that code for information on how `jwql` logging works, but should be able to setup logging by following the documentation below. + + +Logging Set-Up +-------------- + +First, ensure that the monitoring script imports the following libraries: + +```python +import os +import logging + +from jwql.utils.logging_functions import configure_logging, log_info, log_fail, log_timing +``` + +Next, under the `if __name__ == '__main__'` portion of the monitoring script, add these lines of code in order to configure the logging. This creates and initializes a corresponding log file (stored in the `jwql` central storage area): + +```python +# Configure logging +module = os.path.basename(__file__).strip('.py') +configure_logging(module) +``` + +Lastly, wrap the `log_info` and `log_fail` decorators around the main function of the monitor: + +```python +@log_fail +@log_info +def my_monitor_main(): + """The main function of the monitor""" +``` + + +Convenience Decorators +---------------------- + +The `logging_functions` module also provides a convenience decorator, `log_timing` for logging the time required to execute a given function: + +```python +@log_timing +def my_function(): + """Some function""" +``` + + +In-line Logging Use +------------------- + +Users should place logging statements within the code to indicate any notable parts of the monitoring script execution. This includes such things as: + +- An external file has been accessed/written to +- A database query/insert/update has been performed +- To document the number of data products being processed/produced +- To document the begin/end points of a long process +- etc. + + +Example log file +---------------- + +The following is what a completed log file may look like: + +``` +03/28/2019 02:30:11 AM INFO: User: +03/28/2019 02:30:11 AM INFO: System: +03/28/2019 02:30:11 AM INFO: Python Version: 3.6.4 |Anaconda, Inc.| (default, Mar 13 2018, 01:15:57) [GCC 7.2.0] +03/28/2019 02:30:11 AM INFO: Python Executable Path: /path/to/environment/jwql/bin/python +03/28/2019 02:30:11 AM INFO: Beginning +03/28/2019 02:30:11 AM INFO: Using 100 files for analysis +03/28/2019 02:30:11 AM INFO: Read in my_favorite.fits file +03/28/2019 02:30:11 AM INFO: astroquery.mast query returned 77 files +03/28/2019 02:30:11 AM INFO: Saved Bokeh plot to: /some/location/plot.html +03/28/2019 02:30:11 AM INFO: completed successfully. +03/28/2019 02:30:11 AM INFO: Elapsed Real Time: 0:2:48 +03/28/2019 02:30:11 AM INFO: Elapsed CPU Time: 0:1:15 +03/28/2019 02:30:11 AM INFO: Completed Successfully +``` diff --git a/style_guide/style_guide.md b/style_guide/style_guide.md index 4b036b4f7..f1bdf8523 100644 --- a/style_guide/style_guide.md +++ b/style_guide/style_guide.md @@ -2,7 +2,7 @@ Python Code Style Guide for `jwql` ================================= This document serves as a style guide for all `jwql` software development. Any requested contribution to the `jwql` code repository should be checked against this guide, and any violation of the guide should be fixed before the code is committed to -the `master` branch. Please refer to the accompanying [`example.py`](https://github.com/spacetelescope/jwql/blob/master/style_guide/example.py) script for a example code that abides by this style guide. +the `master` or `develop` branch. Please refer to the accompanying [`example.py`](https://github.com/spacetelescope/jwql/blob/master/style_guide/example.py) script for a example code that abides by this style guide. Prerequisite Reading -------------------- @@ -67,6 +67,11 @@ Additionally, developers of this project should be mindful of application securi - Each function/method should have at minimum a description, `Parameters` (if necessary), and `Returns` (if necessary) sections +`jwql`-Specific Logging Standards +--------------------------------- +`jwql` employs standards for logging monitoring scripts. See the [`logging guide`](https://github.com/spacetelescope/jwql/tree/develop/logging_guide) for further details. + + `jwql`-Specific Variable Value/Type Standards --------------------------------------------- @@ -74,7 +79,7 @@ To the extent possible, `jwql` shall define frequently-used variable types/value - **JWST instrument names**: In all internal references and structures (e.g. dictionaries) instrument names shall be all lower-case strings, i.e. one of `fgs`, `miri`, `niriss`, `nircam`, `nirspec`. When variations are required for interfaces, e.g. `Nircam` for MAST, `NIRCam` or `NIRCAM` for SIAF, etc. these should be defined as dictionaries in [`jwql/utils/constants.py`](https://github.com/spacetelescope/jwql/blob/master/jwql/utils/constants.py) and imported from there. -- **Program/proposal identifiers**: JWST program IDs shall be stored and referred to internally as integers and parsed to strings only when needed. For example, the inputs `"001144"` and `"1144"` shall both be converted to an integer variable with value `1144`. +- **Program/proposal identifiers**: JWST program IDs shall be stored and referred to internally as integers and parsed to strings only when needed. For example, the inputs `"001144"` and `"1144"` shall both be converted to an integer variable with value `1144`. Tools and Library Recommendations