diff --git a/.gitignore b/.gitignore index 1fd6157ba..feb4495a0 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ docs/source/api/ *.pytest_cache htmlcov/ *.sqlite3 +jwql/website/static/*.csv diff --git a/.pep8speaks.yml b/.pep8speaks.yml index 146120ea3..d6a129ad9 100644 --- a/.pep8speaks.yml +++ b/.pep8speaks.yml @@ -4,7 +4,7 @@ message: # Customize the comment made by the bot opened: # Messages when a new PR is submitted header: "Hello @{name}, Thank you for submitting the Pull Request !" # The keyword {name} is converted into the author's username - footer: "If you have not done so, please consult the [`jwql` Style Guide](https://github.com/spacetelescope/jwql/blob/master/style_guide/style_guide.md)" + footer: "If you have not done so, please consult the [`jwql` Style Guide](https://github.com/spacetelescope/jwql/blob/master/style_guide/README.md)" # The messages can be written as they would over GitHub updated: # Messages when new commits are added to the PR header: "Hello @{name}, Thank you for updating !" diff --git a/CHANGES.rst b/CHANGES.rst index 9806973ba..6996c881f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,42 @@ +0.20.0 (2019-06-05) +=================== + +New Features +------------ + +Project & API Documentation +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Updated the notebook exemplifying how to perform an engineering database (EDB) telemetry query. +- Updated the README for the ``style_guide`` directory. + +Web Application +~~~~~~~~~~~~~~~ + +- Added form on preview image pages to allow users to submit image anomalies. +- Added buttons for users to download the results of EDB telemetry queries as CSV files. +- Enabled users to search for or navigate to program numbers without requiring leading zeros (i.e. "756" is now treated equivalently to "00756"). +- Enabled authentication for EDB queries via the web login (rather than requiring authentication information to be present in the configuration file). +- Added custom 404 pages. +- Added adaptive redirect feature so that users are not sent back to the homepage after login. +- Added more descriptive errors if a user tries to run the web application without filling out the proper fields in the configuration file. + +``jwql`` Repository +~~~~~~~~~~~~~~~~~~~ + +- Replaced all EDB interface code within ``jwql`` with the new ``jwedb`` `package`_. +- Fully incorporated Python 3.5 testing into the Jenkins test suite. + +Bug Fixes +--------- + +Web Application +~~~~~~~~~~~~~~~ + +- Fixed bug in which dashboard page would throw an error. +- Fixed incorrect dashboard axis labels. + + 0.19.0 (2019-04-19) =================== @@ -8,7 +47,7 @@ Project & API Documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Added guidelines to the style guide for logging the execution of instrument monitors -- Added example useage of logging in the ``example.py`` module +- Added example usage of logging in the ``example.py`` module Web Application ~~~~~~~~~~~~~~~ diff --git a/Jenkinsfile b/Jenkinsfile index b07a037c7..2f0c00fad 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,26 +1,39 @@ // Obtain files from source control system. if (utils.scm_checkout()) return +matrix_os = ["linux-stable"] +matrix_python = ["3.5", "3.6"] +matrix = [] + withCredentials([string( credentialsId: 'jwql-codecov', variable: 'codecov_token')]) { -// Define each build configuration, copying and overriding values as necessary. -bc0 = new BuildConfig() -bc0.nodetype = "linux-stable" -bc0.name = "debug" -bc0.build_cmds = [ - "conda env update --file=environment.yml", - "pip install codecov pytest-cov", - "with_env -n jwql python setup.py install"] -bc0.test_cmds = [ - "with_env -n jwql pytest -s --junitxml=results.xml --cov=./jwql/ --cov-report xml", - "codecov --token=${codecov_token}"] - -// bc1 = utils.copy(bc0) -// bc1.build_cmds[0] = "conda install -q -y python=3.5" + for (os in matrix_os) { + for (python_ver in matrix_python) { + // Define each build configuration, copying and overriding values as necessary. + env_py = "_python_${python_ver}".replace(".", "_") + bc = new BuildConfig() + bc.nodetype = os + bc.name = "debug-${os}-${env_py}" + bc.conda_packages = ["python=${python_ver}"] + bc.build_cmds = [ + "conda env update --file=environment${env_py}.yml", + "pip install codecov pytest-cov", + "python setup.py install"] + bc.test_cmds = [ + "pytest -s --junitxml=results.xml --cov=./jwql/ --cov-report=xml:coverage.xml", + "sed -i 's/file=\"[^\"]*\"//g;s/line=\"[^\"]*\"//g;s/skips=\"[^\"]*\"//g' results.xml", + "codecov --token=${codecov_token}", + "mkdir -v reports", + "mv -v coverage.xml reports/coverage.xml"] + matrix += bc + } + } + // bc1 = utils.copy(bc0) + // bc1.build_cmds[0] = "conda install -q -y python=3.5" -// Iterate over configurations that define the (distibuted) build matrix. -// Spawn a host of the given nodetype for each combination and run in parallel. -utils.run([bc0]) + // Iterate over configurations that define the (distibuted) build matrix. + // Spawn a host of the given nodetype for each combination and run in parallel. + utils.run(matrix) } diff --git a/JenkinsfileRT b/JenkinsfileRT index 31904b083..1a548bcd8 100644 --- a/JenkinsfileRT +++ b/JenkinsfileRT @@ -3,7 +3,7 @@ if (utils.scm_checkout()) return // Define each build configuration, copying and overriding values as necessary. bc0 = new BuildConfig() -bc0.nodetype = "linux-stable" +bc0.nodetype = "linux" bc0.name = "debug" bc0.build_cmds = ["conda env update --file=environment.yml", "with_env -n jwql python setup.py install"] @@ -19,4 +19,5 @@ bc0.failedFailureThresh = 1 // Iterate over configurations that define the (distibuted) build matrix. // Spawn a host of the given nodetype for each combination and run in parallel. -utils.run([bc0]) \ No newline at end of file +utils.run([bc0]) + diff --git a/MANIFEST.in b/MANIFEST.in index ac8721ae2..7756ec124 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,7 @@ include README.md include LICENSE -include environment.yml +include environment_python_3_5.yml +include environment_python_3_6.yml include setup.py recursive-include notebooks * diff --git a/README.md b/README.md index 19efad4a5..e03cd513c 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@ [![Build Status](https://ssbjenkins.stsci.edu/job/STScI/job/jwql/job/master/badge/icon)](https://ssbjenkins.stsci.edu/job/STScI/job/jwql/job/master/) [![Documentation Status](https://readthedocs.org/projects/jwql/badge/?version=latest)](https://jwql.readthedocs.io/en/latest/?badge=latest) [![STScI](https://img.shields.io/badge/powered%20by-STScI-blue.svg?colorA=707170&colorB=3e8ddd&style=flat)](http://www.stsci.edu) +[![codecov](https://codecov.io/gh/spacetelescope/jwql/branch/develop/graph/badge.svg)](https://codecov.io/gh/spacetelescope/jwql) The JWST Quicklook Application (`JWQL`) is a database-driven web application and automation framework for use by the JWST instrument teams to monitor and trend the health, stability, and performance of the JWST instruments. The system is comprised of the following: @@ -57,22 +58,22 @@ instead, and then proceed as stated. ### Environment Installation -Following the download of the `jwql` repository, contributors can then install the `jwql` `conda` environment via the `environment.yml` file, which contains all of the dependencies for the project. First, one should ensure that their version of `conda` is up to date: +Following the download of the `jwql` repository, contributors can then install the `jwql` `conda` environment via the environment yaml file, which contains all of the dependencies for the project. First, ensure that your version of `conda` is up to date: ``` conda update conda ``` -Next, one should activate the `base` environment: +Next, activate the `base` environment: ``` source activate base ``` -Lastly, one can create the `jwql` environment via the `environment.yml` file: +Lastly, create the `jwql` environment with either Python 3.5 or 3.6, via the `environment_python_3_5.yml` or `environment_python_3_6.yml` file, respectively. We recommend installing with the 3.6 version: ``` -conda env create -f environment.yml +conda env create -f environment_python_3_6.yml ``` ### Package Installation @@ -91,7 +92,7 @@ Much of the `jwql` software depends on the existence of a `config.json` file wit ## Software Contributions -There are two current pages to review before you begin contributing to the `jwql` development. The first is our [style guide](https://github.com/spacetelescope/jwql/blob/master/style_guide/style_guide.md) and the second is our [suggested git workflow page](https://github.com/spacetelescope/jwql/wiki/git-&-GitHub-workflow-for-contributing), which contains an in-depth explanation of the workflow. +There are two current pages to review before you begin contributing to the `jwql` development. The first is our [style guide](https://github.com/spacetelescope/jwql/blob/master/style_guide/README.md) and the second is our [suggested git workflow page](https://github.com/spacetelescope/jwql/wiki/git-&-GitHub-workflow-for-contributing), which contains an in-depth explanation of the workflow. Contributors are also encouraged to check out the [Checklist for Contributors Guide](https://github.com/spacetelescope/jwql/wiki/Checklist-for-Contributors-and-Reviewers-of-Pull-Requests) to ensure the pull request contains all of the necessary changes. diff --git a/codecov.yml b/codecov.yml index df9e539f8..0b57b873a 100644 --- a/codecov.yml +++ b/codecov.yml @@ -5,12 +5,12 @@ codecov: coverage: precision: 2 round: down - range: "70...100" + range: "0...75" status: - project: yes - patch: yes - changes: no + project: off + patch: off + changes: off parsers: gcov: @@ -21,11 +21,15 @@ parsers: macro: no comment: - layout: "header, diff" + layout: "header, diff, files" behavior: default require_changes: no ignore: - - "jwql/website/" - "jwql/database/" - - "*__init__.py*" \ No newline at end of file + - "jwql/instrument_monitors/miri_monitors/data_trending/plots/" + - "jwql/instrument_monitors/nirspec_monitors/data_trending/plots/" + - "*__init__.py*" + - "**/*.html" + - "**/*.js" + - "**/*.css" diff --git a/docs/source/edb.rst b/docs/source/edb.rst index 9a0475de8..b5254f628 100644 --- a/docs/source/edb.rst +++ b/docs/source/edb.rst @@ -2,12 +2,6 @@ edb *** -edb_interface.py ----------------- -.. automodule:: jwql.edb.edb_interface - :members: - :undoc-members: - engineering_database.py ----------------------- .. automodule:: jwql.edb.engineering_database diff --git a/docs/source/tests.rst b/docs/source/tests.rst index cfab32e86..c88e45396 100644 --- a/docs/source/tests.rst +++ b/docs/source/tests.rst @@ -22,7 +22,7 @@ test_dark_monitor.py test_edb_interface.py --------------------- -.. automodule:: jwql.tests.test_edb_interface +.. automodule:: jwql.tests.test_edb :members: :undoc-members: @@ -38,6 +38,12 @@ test_loading_times.py :members: :undoc-members: +test_logging_functions.py +------------------------- +.. automodule:: jwql.tests.test_logging_functions + :members: + :undoc-members: + test_monitor_mast.py -------------------- .. automodule:: jwql.tests.test_monitor_mast diff --git a/environment_python_3_5.yml b/environment_python_3_5.yml new file mode 100644 index 000000000..43b44b4cc --- /dev/null +++ b/environment_python_3_5.yml @@ -0,0 +1,35 @@ +channels: +- http://ssb.stsci.edu/astroconda-dev +- defaults +dependencies: +- asdf=2.3.0 +- astropy>=3.1.2 +- astroquery=0.3.9 +- bokeh=0.13.0 +- crds>=7.2.7 +- django=2.1.1 +- inflection=0.3.1 +- ipython=6.5.0 +- jinja2=2.10 +- jwst=0.13.0 +- matplotlib=3.0.0 +- numpy=1.15.2 +- numpydoc=0.8.0 +- pandas=0.23.4 +- postgresql=9.6.6 +- psycopg2=2.7.5 +- python=3.5.6 +- python-dateutil=2.7.3 +- pytest=3.8.1 +- pytest-cov=2.6.0 +- pytest-html=1.19.0 +- sphinx=2.0.1 +- sphinx_rtd_theme=0.1.9 +- sqlalchemy=1.2.11 +- stsci_rtd_theme=0.0.2 +- pip: + - authlib==0.10 + - codecov==2.0.15 + - jwedb>=0.0.3 + - pysiaf==0.2.5 + - sphinx-automodapi==0.10 diff --git a/environment.yml b/environment_python_3_6.yml similarity index 79% rename from environment.yml rename to environment_python_3_6.yml index 3bd509e1a..3d6eddfc4 100644 --- a/environment.yml +++ b/environment_python_3_6.yml @@ -1,4 +1,3 @@ -name: jwql channels: - http://ssb.stsci.edu/astroconda-dev - defaults @@ -6,29 +5,31 @@ dependencies: - asdf=2.3.1 - astropy>=3.1.2 - astroquery=0.3.9 -- bokeh=1.1.0 +- bokeh=1.2.0 - crds>=7.2.7 - django=2.1.7 -- ipython=7.3.0 +- inflection=0.3.1 +- ipython=7.5.0 - jinja2=2.10 - jwst=0.13.1 - matplotlib=3.0.2 -- numpy=1.16.2 -- numpydoc=0.8.0 +- numpy=1.16.4 +- numpydoc=0.9.0 - pandas=0.24.2 - postgresql=9.6.6 - psycopg2=2.7.5 - python=3.6.4 - python-dateutil=2.7.5 -- pytest=4.4.0 +- pytest=4.5.0 - pytest-cov=2.6.1 - pytest-html=1.19.0 -- sphinx=1.8.5 +- sphinx=2.0.1 - sphinx_rtd_theme=0.1.9 - sqlalchemy=1.3.3 - stsci_rtd_theme=0.0.2 - pip: - authlib==0.10 - codecov==2.0.15 + - jwedb>=0.0.3 - pysiaf==0.2.5 - sphinx-automodapi==0.10 diff --git a/jwql/database/database_interface.py b/jwql/database/database_interface.py index 557223b6c..b76a2c3b6 100644 --- a/jwql/database/database_interface.py +++ b/jwql/database/database_interface.py @@ -62,8 +62,7 @@ import socket import pandas as pd -from sqlalchemy import Boolean -from sqlalchemy import Column +from sqlalchemy import Boolean, Column, DateTime, Integer, MetaData, String, Table from sqlalchemy import create_engine from sqlalchemy import Date from sqlalchemy import DateTime @@ -75,11 +74,12 @@ from sqlalchemy import Time from sqlalchemy import UniqueConstraint from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import sessionmaker from sqlalchemy.orm.query import Query from sqlalchemy.types import ARRAY -from jwql.utils.constants import FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES +from jwql.utils.constants import ANOMALIES, FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES from jwql.utils.utils import get_config @@ -87,6 +87,7 @@ @property def data_frame(self): """Method to return a ``pandas.DataFrame`` of the results""" + return pd.read_sql(self.statement, self.session.bind) Query.data_frame = data_frame @@ -130,11 +131,10 @@ def load_connection(connection_string): base = declarative_base(engine) Session = sessionmaker(bind=engine) session = Session() - meta = MetaData() + meta = MetaData(engine) return session, base, engine, meta - # Import a global session. If running from readthedocs or Jenkins, pass a dummy connection string if 'build' and 'project' in socket.gethostname() or os.path.expanduser('~') == '/home/jenkins': dummy_connection_string = 'postgresql+psycopg2://account:password@hostname:0000/db_name' @@ -144,62 +144,6 @@ def load_connection(connection_string): session, base, engine, meta = load_connection(SETTINGS['connection_string']) -class Anomaly(base): - """ORM for the ``anomalies`` table""" - - # Name the table - __tablename__ = 'anomalies' - - # Define the columns - id = Column(Integer, primary_key=True, nullable=False) - filename = Column(String, nullable=False) - flag_date = Column(DateTime, nullable=False, default=datetime.now()) - bowtie = Column(Boolean, nullable=False, default=False) - snowball = Column(Boolean, nullable=False, default=False) - cosmic_ray_shower = Column(Boolean, nullable=False, default=False) - crosstalk = Column(Boolean, nullable=False, default=False) - cte_correction_error = Column(Boolean, nullable=False, default=False) - data_transfer_error = Column(Boolean, nullable=False, default=False) - detector_ghost = Column(Boolean, nullable=False, default=False) - diamond = Column(Boolean, nullable=False, default=False) - diffraction_spike = Column(Boolean, nullable=False, default=False) - dragon_breath = Column(Boolean, nullable=False, default=False) - earth_limb = Column(Boolean, nullable=False, default=False) - excessive_saturation = Column(Boolean, nullable=False, default=False) - figure8_ghost = Column(Boolean, nullable=False, default=False) - filter_ghost = Column(Boolean, nullable=False, default=False) - fringing = Column(Boolean, nullable=False, default=False) - guidestar_failure = Column(Boolean, nullable=False, default=False) - banding = Column(Boolean, nullable=False, default=False) - persistence = Column(Boolean, nullable=False, default=False) - prominent_blobs = Column(Boolean, nullable=False, default=False) - trail = Column(Boolean, nullable=False, default=False) - scattered_light = Column(Boolean, nullable=False, default=False) - other = Column(Boolean, nullable=False, default=False) - - def __repr__(self): - """Return the canonical string representation of the object""" - - # Get the columns that are True - a_list = [col for col, val in self.__dict__.items() - if val is True and isinstance(val, bool)] - - txt = ('Anomaly {0.id}: {0.filename} flagged at ' - '{0.flag_date} for {1}').format(self, a_list) - - return txt - - @property - def colnames(self): - """A list of all the column names in this table""" - - # Get the columns - a_list = [col for col, val in self.__dict__.items() - if isinstance(val, bool)] - - return a_list - - class FilesystemGeneral(base): """ORM for the general (non instrument specific) filesystem monitor table""" @@ -258,6 +202,40 @@ class Monitor(base): log_file = Column(String(), nullable=False) +def anomaly_orm_factory(class_name): + """Create a ``SQLAlchemy`` ORM Class for an anomaly table. + + Parameters + ---------- + class_name : str + The name of the class to be created + + Returns + ------- + class : obj + The ``SQLAlchemy`` ORM + """ + + # Initialize a dictionary to hold the column metadata + data_dict = {} + data_dict['__tablename__'] = class_name.lower() + + # Define anomaly table column names + data_dict['columns'] = ANOMALIES + data_dict['names'] = [name.replace('_', ' ') for name in data_dict['columns']] + + # Create a table with the appropriate Columns + data_dict['id'] = Column(Integer, primary_key=True, nullable=False) + data_dict['rootname'] = Column(String(), nullable=False) + data_dict['flag_date'] = Column(DateTime, nullable=False) + data_dict['user'] = Column(String(), nullable=False) + + for column in data_dict['columns']: + data_dict[column] = Column(Boolean, nullable=False, default=False) + + return type(class_name, (base,), data_dict) + + def get_monitor_columns(data_dict, table_name): """Read in the corresponding table definition text file to generate ``SQLAlchemy`` columns for the table. @@ -379,6 +357,7 @@ class : obj # Create tables from ORM factory +Anomaly = anomaly_orm_factory('anomaly') NIRCamDarkQueryHistory = monitor_orm_factory('nircam_dark_query_history') NIRCamDarkPixelStats = monitor_orm_factory('nircam_dark_pixel_stats') NIRCamDarkDarkCurrent = monitor_orm_factory('nircam_dark_dark_current') diff --git a/jwql/edb/edb_interface.py b/jwql/edb/edb_interface.py deleted file mode 100644 index 674378e03..000000000 --- a/jwql/edb/edb_interface.py +++ /dev/null @@ -1,217 +0,0 @@ -#! /usr/bin/env python -"""Module to interface the JWST DMS Engineering Database. - -This module provides ``jwql`` with functions to interface and query the -JWST DMS Engineering Database. It is designed to have minimal -dependencies on non-builtin python packages. - -Authors -------- - - - Johannes Sahlmann - -Use ---- - - This module can be imported and used with - - :: - - from jwql.edb import edb_interface - edb_interface.query_single_mnemonic(mnemonic_identifier, - start_time, end_time) - - Required arguments: - - ``mnemonic_identifier`` - String representation of a mnemonic name. - ``start_time`` - astropy.time.Time instance - ``end_time`` - astropy.time.Time instance - -Notes ------ - This module is built on top of ``astroquery.mast`` and uses - JWST-specific MAST services. - The user has to provide a valid MAST authentication token - or be authenticated. - -References ----------- - The MAST JWST EDB web portal is located at - ``https://mast.stsci.edu/portal/Mashup/Clients/JwstEdb/JwstEdb.html`` - -Dependencies ------------- - - astropy - - astroquery - -""" -from functools import lru_cache - -from astropy.table import Table -from astropy.time import Time -from astroquery.mast import Mast - -mast_edb_timeseries_service = 'Mast.JwstEdb.GetTimeseries.All' -mast_edb_dictionary_service = 'Mast.JwstEdb.Dictionary' -mast_edb_mnemonic_service = 'Mast.JwstEdb.Mnemonics' - - -def mast_authenticate(token=None): - """Verify MAST authentication status, login if needed.""" - if Mast.authenticated() is False: - if token is None: - raise ValueError('You are not authenticated in MAST. Please provide a valid token.') - else: - Mast.login(token=token) - - -def is_valid_mnemonic(mnemonic_identifier): - """Determine if the given string is a valid EDB mnemonic. - - Parameters - ---------- - mnemonic_identifier : str - The mnemonic_identifier string to be examined. - - Returns - ------- - bool - Is mnemonic_identifier a valid EDB mnemonic? - - """ - inventory = mnemonic_inventory()[0] - if mnemonic_identifier in inventory['tlmMnemonic']: - return True - else: - return False - - -@lru_cache() -def mnemonic_inventory(): - """Return all mnemonics in the DMS engineering database. - - No authentication is required, this information is public. - Since this is a rather large and quasi-static table (~15000 rows), - it is cached using functools. - - Returns - ------- - data : astropy.table.Table - Table representation of the mnemonic inventory. - meta : dict - Additional information returned by the query. - - """ - out = Mast.service_request_async(mast_edb_mnemonic_service, {}) - data, meta = process_mast_service_request_result(out) - - # convert numerical ID to str for homogenity (all columns are str) - data['tlmIdentifier'] = data['tlmIdentifier'].astype(str) - - return data, meta - - -def process_mast_service_request_result(result, data_as_table=True): - """Parse the result of a MAST EDB query. - - Parameters - ---------- - result : list of requests.models.Response instances - The object returned by a call to ``Mast.service_request_async`` - data_as_table : bool - If True, return data as astropy table, else return as json - - Returns - ------- - data : astropy.table.Table - Table representation of the returned data. - meta : dict - Additional information returned by the query - - """ - json_data = result[0].json() - if json_data['status'] != 'COMPLETE': - raise RuntimeError('Mnemonic query did not complete.\nquery status: {}\nmessage: {}'.format( - json_data['status'], json_data['msg'])) - - try: - # timestamp-value pairs in the form of an astropy table - if data_as_table: - data = Table(json_data['data']) - else: - data = json_data['data'][0] - except KeyError: - raise RuntimeError('Query did not return any data.') - - # collect meta data - meta = {} - for key in json_data.keys(): - if key.lower() != 'data': - meta[key] = json_data[key] - - return data, meta - - -def query_mnemonic_info(mnemonic_identifier, token=None): - """Query the EDB to return the mnemonic description. - - Parameters - ---------- - mnemonic_identifier : str - Telemetry mnemonic identifier, e.g. ``SA_ZFGOUTFOV`` - token : str - MAST token - - Returns - ------- - info : dict - Object that contains the returned data - - """ - mast_authenticate(token=token) - - parameters = {"mnemonic": "{}".format(mnemonic_identifier)} - result = Mast.service_request_async(mast_edb_dictionary_service, parameters) - info = process_mast_service_request_result(result, data_as_table=False)[0] - return info - - -def query_single_mnemonic(mnemonic_identifier, start_time, end_time, token=None): - """Query DMS EDB to get the mnemonic readings in a time interval. - - Parameters - ---------- - mnemonic_identifier : str - Telemetry mnemonic identifier, e.g. 'SA_ZFGOUTFOV' - start_time : astropy.time.Time instance - Start time - end_time : astropy.time.Time instance - End time - token : str - MAST token - - Returns - ------- - data, meta, info : tuple - Table and two dictionaries with the results of the query - - """ - mast_authenticate(token=token) - - if not is_valid_mnemonic(mnemonic_identifier): - raise RuntimeError('Mnemonic identifier is invalid!') - - if not isinstance(start_time, Time): - raise RuntimeError('Please specify a valid start time (instance of astropy.time.core.Time)') - - if not isinstance(end_time, Time): - raise RuntimeError('Please specify a valid end time (instance of astropy.time.core.Time)') - - parameters = {'mnemonic': mnemonic_identifier, 'start': start_time.iso, 'end': end_time.iso} - result = Mast.service_request_async(mast_edb_timeseries_service, parameters) - data, meta = process_mast_service_request_result(result) - - # get auxiliary information (description, subsystem, ...) - info = query_mnemonic_info(mnemonic_identifier) - - return data, meta, info diff --git a/jwql/edb/engineering_database.py b/jwql/edb/engineering_database.py index 02c0a45be..e879cb6ec 100644 --- a/jwql/edb/engineering_database.py +++ b/jwql/edb/engineering_database.py @@ -28,8 +28,18 @@ Notes ----- - A valid MAST authentication token has to be present in the local + There are three possibilities for MAST authentication: + + 1. A valid MAST authentication token is present in the local ``jwql`` configuration file (config.json). + 2. The MAST_API_TOKEN environment variable is set to a valid + MAST authentication token. + 3. The user has logged into the ``jwql`` web app, in which + case they are authenticated via auth.mast. + + When querying mnemonic values, the underlying MAST service returns + data that include the datapoint preceding the requested start time + and the datapoint that follows the requested end time. """ @@ -40,12 +50,8 @@ from bokeh.plotting import figure import numpy as np -from jwql.utils.utils import get_config -from .edb_interface import query_single_mnemonic, query_mnemonic_info - -# should use oauth.register_oauth()? -settings = get_config() -MAST_TOKEN = settings['mast_token'] +from jwql.utils.credentials import get_mast_token +from jwedb.edb_interface import query_single_mnemonic, query_mnemonic_info class EdbMnemonic: @@ -72,17 +78,19 @@ def __init__(self, mnemonic_identifier, start_time, end_time, data, meta, info): """ self.mnemonic_identifier = mnemonic_identifier - self.start_time = start_time - self.end_time = end_time + self.requested_start_time = start_time + self.requested_end_time = end_time self.data = data + self.data_start_time = Time(np.min(np.array(self.data['MJD'])), format='mjd') + self.data_end_time = Time(np.max(np.array(self.data['MJD'])), format='mjd') self.meta = meta self.info = info def __str__(self): """Return string describing the instance.""" return 'EdbMnemonic {} with {} records between {} and {}'.format( - self.mnemonic_identifier, len(self.data), self.start_time.isot, - self.end_time.isot) + self.mnemonic_identifier, len(self.data), self.data_start_time.isot, + self.data_end_time.isot) def interpolate(self, times, **kwargs): """Interpolate value at specified times.""" @@ -112,9 +120,30 @@ def bokeh_plot(self): def get_mnemonic(mnemonic_identifier, start_time, end_time): - """Execute query and return a EdbMnemonic instance.""" + """Execute query and return a EdbMnemonic instance. + + The underlying MAST service returns data that include the + datapoint preceding the requested start time and the datapoint + that follows the requested end time. + + Parameters + ---------- + mnemonic_identifier : str + Telemetry mnemonic identifiers, e.g. 'SA_ZFGOUTFOV' + start_time : astropy.time.Time instance + Start time + end_time : astropy.time.Time instance + End time + + Returns + ------- + mnemonic : instance of EdbMnemonic + EdbMnemonic object containing query results + + """ + mast_token = get_mast_token() data, meta, info = query_single_mnemonic(mnemonic_identifier, start_time, end_time, - token=MAST_TOKEN) + token=mast_token) # create and return instance mnemonic = EdbMnemonic(mnemonic_identifier, start_time, end_time, data, meta, info) @@ -166,4 +195,5 @@ def get_mnemonic_info(mnemonic_identifier): Object that contains the returned data """ - return query_mnemonic_info(mnemonic_identifier, token=MAST_TOKEN) + mast_token = get_mast_token() + return query_mnemonic_info(mnemonic_identifier, token=mast_token) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 097d86b99..8786e05fb 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -145,8 +145,6 @@ def mast_query_darks(instrument, aperture, start_date, end_date): return query_results -@log_fail -@log_info class Dark(): """Class for executing the dark current monitor. @@ -209,95 +207,8 @@ class Dark(): If the most recent query search returns more than one entry """ - def __init__(self, testing=False): - - logging.info('Begin logging for dark_monitor') - - apertures_to_skip = ['NRCALL_FULL', 'NRCAS_FULL', 'NRCBS_FULL'] - - if not testing: - - # Get the output directory - self.output_dir = os.path.join(get_config()['outputs'], 'dark_monitor') - - # Read in config file that defines the thresholds for the number - # of dark files that must be present in order for the monitor to run - limits = ascii.read(THRESHOLDS_FILE) - - # Use the current time as the end time for MAST query - self.query_end = Time.now().mjd - - # Loop over all instruments - for instrument in ['nircam']: - self.instrument = instrument - - # Identify which database tables to use - self.identify_tables() - - # Get a list of all possible apertures from pysiaf - possible_apertures = list(Siaf(instrument).apernames) - possible_apertures = [ap for ap in possible_apertures if ap not in apertures_to_skip] - - for aperture in possible_apertures: - - logging.info('') - logging.info('Working on aperture {} in {}'.format(aperture, instrument)) - - # Find the appropriate threshold for the number of new files needed - match = aperture == limits['Aperture'] - file_count_threshold = limits['Threshold'][match] - - # Locate the record of the most recent MAST search - self.aperture = aperture - self.query_start = self.most_recent_search() - logging.info('\tQuery times: {} {}'.format(self.query_start, self.query_end)) - - # Query MAST using the aperture and the time of the - # most recent previous search as the starting time - new_entries = mast_query_darks(instrument, aperture, self.query_start, self.query_end) - logging.info('\tAperture: {}, new entries: {}'.format(self.aperture, len(new_entries))) - - # Check to see if there are enough new files to meet the monitor's signal-to-noise requirements - if len(new_entries) >= file_count_threshold: - - logging.info('\tSufficient new dark files found for {}, {} to run the dark monitor.' - .format(self.instrument, self.aperture)) - - # Get full paths to the files - new_filenames = [filesystem_path(file_entry['filename']) for file_entry in new_entries] - - # Set up directories for the copied data - ensure_dir_exists(os.path.join(self.output_dir, 'data')) - self.data_dir = os.path.join(self.output_dir, - 'data/{}_{}'.format(self.instrument.lower(), - self.aperture.lower())) - ensure_dir_exists(self.data_dir) - - # Copy files from filesystem - dark_files, not_copied = copy_files(new_filenames, self.data_dir) - - # Run the dark monitor - self.run(dark_files) - monitor_run = True - - else: - logging.info(('\tDark monitor skipped. {} new dark files for {}, {}. {} new files are ' - 'required to run dark current monitor.').format( - len(new_entries), instrument, aperture, file_count_threshold[0])) - monitor_run = False - - # Update the query history - new_entry = {'instrument': instrument, - 'aperture': aperture, - 'start_time_mjd': self.query_start, - 'end_time_mjd': self.query_end, - 'files_found': len(new_entries), - 'run_monitor': monitor_run, - 'entry_date': datetime.datetime.now()} - self.query_table.__table__.insert().execute(new_entry) - logging.info('\tUpdated the query history table') - - logging.info('Dark Monitor completed successfully.') + def __init__(self): + """Initialize an instance of the ``Dark`` class.""" def add_bad_pix(self, coordinates, pixel_type, files, mean_filename, baseline_filename): """Add a set of bad pixels to the bad pixel database table @@ -575,34 +486,9 @@ def noise_check(self, new_noise_image, baseline_noise_image, threshold=1.5): return noisy - def read_baseline_slope_image(self, filename): - """Read in a baseline mean slope image and associated standard - deviation image from the given fits file - - Parameters - ---------- - filename : str - Name of fits file to be read in - - Returns - ------- - mean_image : numpy.ndarray - 2D mean slope image - - stdev_image : numpy.ndarray - 2D stdev image - """ - - try: - with fits.open(filename) as hdu: - mean_image = hdu['MEAN'].data - stdev_image = hdu['STDEV'].data - return mean_image, stdev_image - except (FileNotFoundError, KeyError) as e: - logging.warning('Trying to read {}: {}'.format(filename, e)) - - def run(self, file_list): - """The main method. See module docstrings for further details + def process(self, file_list): + """The main method for processing darks. See module docstrings + for further details. Parameters ---------- @@ -765,6 +651,125 @@ def run(self, file_list): } self.stats_table.__table__.insert().execute(dark_db_entry) + def read_baseline_slope_image(self, filename): + """Read in a baseline mean slope image and associated standard + deviation image from the given fits file + + Parameters + ---------- + filename : str + Name of fits file to be read in + + Returns + ------- + mean_image : numpy.ndarray + 2D mean slope image + + stdev_image : numpy.ndarray + 2D stdev image + """ + + try: + with fits.open(filename) as hdu: + mean_image = hdu['MEAN'].data + stdev_image = hdu['STDEV'].data + return mean_image, stdev_image + except (FileNotFoundError, KeyError) as e: + logging.warning('Trying to read {}: {}'.format(filename, e)) + + @log_fail + @log_info + def run(self): + """The main method. See module docstrings for further + details. + """ + + logging.info('Begin logging for dark_monitor') + + apertures_to_skip = ['NRCALL_FULL', 'NRCAS_FULL', 'NRCBS_FULL'] + + # Get the output directory + self.output_dir = os.path.join(get_config()['outputs'], 'dark_monitor') + + # Read in config file that defines the thresholds for the number + # of dark files that must be present in order for the monitor to run + limits = ascii.read(THRESHOLDS_FILE) + + # Use the current time as the end time for MAST query + self.query_end = Time.now().mjd + + # Loop over all instruments + for instrument in ['nircam']: + self.instrument = instrument + + # Identify which database tables to use + self.identify_tables() + + # Get a list of all possible apertures from pysiaf + possible_apertures = list(Siaf(instrument).apernames) + possible_apertures = [ap for ap in possible_apertures if ap not in apertures_to_skip] + + for aperture in possible_apertures: + + logging.info('') + logging.info('Working on aperture {} in {}'.format(aperture, instrument)) + + # Find the appropriate threshold for the number of new files needed + match = aperture == limits['Aperture'] + file_count_threshold = limits['Threshold'][match] + + # Locate the record of the most recent MAST search + self.aperture = aperture + self.query_start = self.most_recent_search() + logging.info('\tQuery times: {} {}'.format(self.query_start, self.query_end)) + + # Query MAST using the aperture and the time of the + # most recent previous search as the starting time + new_entries = mast_query_darks(instrument, aperture, self.query_start, self.query_end) + logging.info('\tAperture: {}, new entries: {}'.format(self.aperture, len(new_entries))) + + # Check to see if there are enough new files to meet the monitor's signal-to-noise requirements + if len(new_entries) >= file_count_threshold: + + logging.info('\tSufficient new dark files found for {}, {} to run the dark monitor.' + .format(self.instrument, self.aperture)) + + # Get full paths to the files + new_filenames = [filesystem_path(file_entry['filename']) for file_entry in new_entries] + + # Set up directories for the copied data + ensure_dir_exists(os.path.join(self.output_dir, 'data')) + self.data_dir = os.path.join(self.output_dir, + 'data/{}_{}'.format(self.instrument.lower(), + self.aperture.lower())) + ensure_dir_exists(self.data_dir) + + # Copy files from filesystem + dark_files, not_copied = copy_files(new_filenames, self.data_dir) + + # Run the dark monitor + self.process(dark_files) + monitor_run = True + + else: + logging.info(('\tDark monitor skipped. {} new dark files for {}, {}. {} new files are ' + 'required to run dark current monitor.').format( + len(new_entries), instrument, aperture, file_count_threshold[0])) + monitor_run = False + + # Update the query history + new_entry = {'instrument': instrument, + 'aperture': aperture, + 'start_time_mjd': self.query_start, + 'end_time_mjd': self.query_end, + 'files_found': len(new_entries), + 'run_monitor': monitor_run, + 'entry_date': datetime.datetime.now()} + self.query_table.__table__.insert().execute(new_entry) + logging.info('\tUpdated the query history table') + + logging.info('Dark Monitor completed successfully.') + def save_mean_slope_image(self, slope_img, stdev_img, files): """Save the mean slope image and associated stdev image to a file @@ -984,5 +989,6 @@ def stats_by_amp(self, image, amps): start_time, log_file = initialize_instrument_monitor(module) monitor = Dark() + monitor.run() update_monitor_table(module, start_time, log_file) diff --git a/jwql/jwql_monitors/generate_preview_images.py b/jwql/jwql_monitors/generate_preview_images.py index 107b8e190..d5aff6986 100755 --- a/jwql/jwql_monitors/generate_preview_images.py +++ b/jwql/jwql_monitors/generate_preview_images.py @@ -319,7 +319,7 @@ def create_mosaic(filenames): elif datadim == 3: full_array = np.zeros((datashape[0], full_ydim, full_xdim)) * np.nan else: - raise ValueError(('Difference image for {} must be either 2D or 3D.'.format(filenames[0]))) + raise ValueError('Difference image for {} must be either 2D or 3D.'.format(filenames[0])) # Place the data from the individual detectors in the appropriate # places in the final image diff --git a/jwql/jwql_monitors/monitor_filesystem.py b/jwql/jwql_monitors/monitor_filesystem.py index 7961f7341..5b97538cd 100755 --- a/jwql/jwql_monitors/monitor_filesystem.py +++ b/jwql/jwql_monitors/monitor_filesystem.py @@ -44,7 +44,7 @@ from bokeh.embed import components from bokeh.layouts import gridplot -from bokeh.palettes import Dark2_5 as palette +from bokeh.palettes import Category20_20 as palette from bokeh.plotting import figure, output_file, save from jwql.database.database_interface import engine @@ -221,13 +221,18 @@ def plot_by_filetype(plot_type, instrument): instrument_title = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument] title = '{} Total File {} by Type'.format(instrument_title, plot_type.capitalize()) + if plot_type == 'count': + ytitle = 'Counts' + else: + ytitle = 'Size (TB)' + # Initialize plot plot = figure( - tools='pan,box_zoom,wheel_zoom,reset,save', - x_axis_type='datetime', - title=title, - x_axis_label='Date', - y_axis_label='Count') + tools='pan,box_zoom,wheel_zoom,reset,save', + x_axis_type='datetime', + title=title, + x_axis_label='Date', + y_axis_label=ytitle) colors = itertools.cycle(palette) for filetype, color in zip(FILE_SUFFIX_TYPES, colors): @@ -272,11 +277,11 @@ def plot_filesystem_size(): FilesystemGeneral.used, FilesystemGeneral.available).all() dates, total_sizes, useds, availables = zip(*results) plot = figure( - tools='pan,box_zoom,wheel_zoom,reset,save', - x_axis_type='datetime', - title='System stats', - x_axis_label='Date', - y_axis_label='GB') + tools='pan,box_zoom,wheel_zoom,reset,save', + x_axis_type='datetime', + title='System stats', + x_axis_label='Date', + y_axis_label='Size TB') plot.line(dates, total_sizes, legend='Total size', line_color='red') plot.circle(dates, total_sizes, color='red') plot.line(dates, useds, legend='Used bytes', line_color='green') @@ -351,11 +356,11 @@ def plot_total_file_counts(): results = session.query(FilesystemGeneral.date, FilesystemGeneral.total_file_count).all() dates, file_counts = zip(*results) plot = figure( - tools='pan,box_zoom,reset,wheel_zoom,save', - x_axis_type='datetime', - title="Total File Counts", - x_axis_label='Date', - y_axis_label='Count') + tools='pan,box_zoom,reset,wheel_zoom,save', + x_axis_type='datetime', + title="Total File Counts", + x_axis_label='Date', + y_axis_label='Count') plot.line(dates, file_counts, line_width=2, line_color='blue') plot.circle(dates, file_counts, color='blue') diff --git a/jwql/tests/test_api_views.py b/jwql/tests/test_api_views.py index 8e06d762e..939b79fab 100644 --- a/jwql/tests/test_api_views.py +++ b/jwql/tests/test_api_views.py @@ -17,14 +17,25 @@ pytest -s test_api_views.py """ +import os import json import pytest -import urllib.request +from urllib import request, error from jwql.utils.utils import get_base_url from jwql.utils.constants import JWST_INSTRUMENT_NAMES +# Determine if tests are being run on jenkins +ON_JENKINS = os.path.expanduser('~') == '/home/jenkins' + +# Determine if the local server is running +try: + url = request.urlopen('http://127.0.0.1:8000') + LOCAL_SERVER = True +except error.URLError: + LOCAL_SERVER = False + urls = [] # Generic URLs @@ -41,6 +52,7 @@ '98012', # MIRI '93025', # NIRCam '00308', # NIRISS + '308', # NIRISS '96213'] # NIRSpec for proposal in proposals: urls.append('api/{}/filenames/'.format(proposal)) # filenames_by_proposal @@ -51,7 +63,7 @@ rootnames = ['jw86600007001_02101_00001_guider2', # FGS 'jw98012001001_02102_00001_mirimage', # MIRI 'jw93025001001_02102_00001_nrca2', # NIRCam - 'jw00308001001_02101_00001_nis', # NIRISS + 'jw00308001001_02103_00001_nis', # NIRISS 'jw96213001001_02101_00001_nrs1'] # NIRSpec for rootname in rootnames: urls.append('api/{}/filenames/'.format(rootname)) # filenames_by_rootname @@ -59,7 +71,7 @@ urls.append('api/{}/thumbnails/'.format(rootname)) # thumbnails_by_rootname -@pytest.mark.xfail +# @pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') @pytest.mark.parametrize('url', urls) def test_api_views(url): """Test to see if the given ``url`` returns a populated JSON object @@ -72,13 +84,26 @@ def test_api_views(url): """ # Build full URL - base_url = get_base_url() + if not ON_JENKINS: + base_url = get_base_url() + else: + base_url = 'https://dljwql.stsci.edu' + + if base_url == 'http://127.0.0.1:8000' and not LOCAL_SERVER: + pytest.skip("Local server not running") + url = '{}/{}'.format(base_url, url) # Determine the type of data to check for based on the url data_type = url.split('/')[-2] - url = urllib.request.urlopen(url) + try: + url = request.urlopen(url) + except error.HTTPError as e: + if e.code == 502: + pytest.skip("Dev server problem") + raise(e) + data = json.loads(url.read().decode()) assert len(data[data_type]) > 0 diff --git a/jwql/tests/test_calculations.py b/jwql/tests/test_calculations.py index 5834e453e..37f112b98 100644 --- a/jwql/tests/test_calculations.py +++ b/jwql/tests/test_calculations.py @@ -56,10 +56,10 @@ def test_gaussian1d_fit(): initial_params = [np.max(hist), 0.55, 0.1] amplitude, peak, width = calculations.gaussian1d_fit(bin_centers, hist, initial_params) - assert np.isclose(peak[0], mean_value, atol=0.0015, rtol=0.) - assert np.isclose(width[0], sigma_value, atol=0.0015, rtol=0.) - assert ((mean_value <= peak[0]+3*peak[1]) & (mean_value >= peak[0]-3*peak[1])) - assert ((sigma_value <= width[0]+3*width[1]) & (sigma_value >= width[0]-3*width[1])) + assert np.isclose(peak[0], mean_value, atol=0.0035, rtol=0.) + assert np.isclose(width[0], sigma_value, atol=0.0035, rtol=0.) + assert ((mean_value <= peak[0]+7*peak[1]) & (mean_value >= peak[0]-7*peak[1])) + assert ((sigma_value <= width[0]+7*width[1]) & (sigma_value >= width[0]-7*width[1])) def test_mean_image(): diff --git a/jwql/tests/test_dark_monitor.py b/jwql/tests/test_dark_monitor.py index 7562c2ba1..a30b62f00 100644 --- a/jwql/tests/test_dark_monitor.py +++ b/jwql/tests/test_dark_monitor.py @@ -29,7 +29,7 @@ def test_find_hot_dead_pixels(): """Test hot and dead pixel searches""" - monitor = dark_monitor.Dark(testing=True) + monitor = dark_monitor.Dark() # Create "baseline" image comparison_image = np.zeros((10, 10)) + 1. @@ -57,7 +57,7 @@ def test_find_hot_dead_pixels(): def test_get_metadata(): """Test retrieval of metadata from input file""" - monitor = dark_monitor.Dark(testing=True) + monitor = dark_monitor.Dark() filename = os.path.join(get_config()['test_dir'], 'dark_monitor', 'test_image_1.fits') monitor.get_metadata(filename) @@ -114,7 +114,7 @@ def test_noise_check(): baseline[5, 5] = 1.0 noise_image[5, 5] = 1.25 - monitor = dark_monitor.Dark(testing=True) + monitor = dark_monitor.Dark() noisy = monitor.noise_check(noise_image, baseline, threshold=1.5) assert len(noisy[0]) == 2 @@ -125,7 +125,7 @@ def test_noise_check(): def test_shift_to_full_frame(): """Test pixel coordinate shifting to be in full frame coords""" - monitor = dark_monitor.Dark(testing=True) + monitor = dark_monitor.Dark() monitor.x0 = 512 monitor.y0 = 512 diff --git a/jwql/tests/test_database_interface.py b/jwql/tests/test_database_interface.py new file mode 100755 index 000000000..96bb884d0 --- /dev/null +++ b/jwql/tests/test_database_interface.py @@ -0,0 +1,51 @@ +#! /usr/bin/env python + +"""Tests for the ``database_interface.py`` module. + +Authors +------- + + - Joe Filippazzo + - Matthew Bourque + +Use +--- + + These tests can be run via the command line (omit the ``-s`` to + suppress verbose output to stdout): + :: + + pytest -s database_interface.py +""" + +import datetime +import os +import pytest +import random +import string + +from jwql.database import database_interface as di + +# Determine if tests are being run on jenkins +ON_JENKINS = os.path.expanduser('~') == '/home/jenkins' + + +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to development database server.') +def test_anomaly_table(): + """Test to see that the database has an anomalies table""" + + assert 'anomaly' in di.engine.table_names() + + +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to development database server.') +def test_anomaly_records(): + """Test to see that new records can be entered""" + + # Add some data + random_string = ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for _ in range(10)) + di.session.add(di.Anomaly(rootname=random_string, flag_date=datetime.datetime.today(), user='test', ghost=True)) + di.session.commit() + + # Test the ghosts column + ghosts = di.session.query(di.Anomaly).filter(di.Anomaly.ghost == "True") + assert ghosts.data_frame.iloc[0]['ghost'] == True diff --git a/jwql/tests/test_edb.py b/jwql/tests/test_edb.py new file mode 100644 index 000000000..bc2fff119 --- /dev/null +++ b/jwql/tests/test_edb.py @@ -0,0 +1,63 @@ +#! /usr/bin/env python +"""Tests for the ``engineering_database`` module. + +Authors +------- + + - Johannes Sahlmann + + +Use +--- + + These tests can be run via the command line (omit the ``-s`` to + suppress verbose output to ``stdout``): + + :: + + pytest -s test_edb.py +""" + +import os + +from astropy.time import Time +import pytest + +# Determine if tests are being run on jenkins +ON_JENKINS = os.path.expanduser('~') == '/home/jenkins' + + +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') +def test_get_mnemonic(): + """Test the query of a single mnemonic.""" + from jwql.edb.engineering_database import get_mnemonic + + mnemonic_identifier = 'IMIR_HK_ICE_SEC_VOLT4' + start_time = Time('2019-01-16 00:00:00.000', format='iso') + end_time = Time('2019-01-16 00:01:00.000', format='iso') + + mnemonic = get_mnemonic(mnemonic_identifier, start_time, end_time) + assert len(mnemonic.data) == mnemonic.meta['paging']['rows'] + + +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') +def test_get_mnemonic_info(): + """Test retrieval of mnemonic info.""" + from jwql.edb.engineering_database import get_mnemonic_info + + mnemonic_identifier = 'IMIR_HK_ICE_SEC_VOLT4' + info = get_mnemonic_info(mnemonic_identifier) + assert 'subsystem' in info.keys() + + +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') +def test_get_mnemonics(): + """Test the query of a list of mnemonics.""" + from jwql.edb.engineering_database import get_mnemonics + + mnemonics = ['SA_ZFGOUTFOV', 'SA_ZFGBADCNT'] + start_time = Time(2018.0, format='decimalyear') + end_time = Time(2018.1, format='decimalyear') + + mnemonic_dict = get_mnemonics(mnemonics, start_time, end_time) + assert len(mnemonic_dict) == len(mnemonics) diff --git a/jwql/tests/test_edb_interface.py b/jwql/tests/test_edb_interface.py deleted file mode 100644 index 516986f7a..000000000 --- a/jwql/tests/test_edb_interface.py +++ /dev/null @@ -1,88 +0,0 @@ -#! /usr/bin/env python -"""Tests for the ``engineering_database`` module. - -Authors -------- - - - Johannes Sahlmann - - -Use ---- - - These tests can be run via the command line (omit the ``-s`` to - suppress verbose output to ``stdout``): - - :: - - pytest -s test_edb_interface.py -""" - -from astropy.time import Time -from astroquery.mast import Mast -import pytest - -from jwql.edb.edb_interface import mnemonic_inventory, query_single_mnemonic -from jwql.utils.utils import get_config - - -@pytest.mark.xfail(raises=(RuntimeError, FileNotFoundError)) -def test_get_mnemonic(): - """Test the query of a single mnemonic.""" - from jwql.edb.engineering_database import get_mnemonic - - mnemonic_identifier = 'IMIR_HK_ICE_SEC_VOLT4' - start_time = Time('2019-01-16 00:00:00.000', format='iso') - end_time = Time('2019-01-16 00:01:00.000', format='iso') - - mnemonic = get_mnemonic(mnemonic_identifier, start_time, end_time) - assert len(mnemonic.data) == mnemonic.meta['paging']['rows'] - - -@pytest.mark.xfail(raises=(RuntimeError, FileNotFoundError)) -def test_get_mnemonics(): - """Test the query of a list of mnemonics.""" - from jwql.edb.engineering_database import get_mnemonics - - mnemonics = ['SA_ZFGOUTFOV', 'SA_ZFGBADCNT'] - start_time = Time(2018.0, format='decimalyear') - end_time = Time(2018.1, format='decimalyear') - - mnemonic_dict = get_mnemonics(mnemonics, start_time, end_time) - assert len(mnemonic_dict) == len(mnemonics) - - -def test_mnemonic_inventory(): - """Test the retrieval of the mnemonic inventory.""" - all_mnemonics = mnemonic_inventory()[0] - assert len(all_mnemonics) > 1000 - - -@pytest.mark.xfail(raises=(RuntimeError, FileNotFoundError)) -def test_query_single_mnemonic(): - """Test the query of a mnemonic over a given time range.""" - settings = get_config() - MAST_TOKEN = settings['mast_token'] - - mnemonic_identifier = 'SA_ZFGOUTFOV' - start_time = Time(2018.0, format='decimalyear') - end_time = Time(2018.1, format='decimalyear') - - data, meta, info = query_single_mnemonic(mnemonic_identifier, start_time, end_time, - token=MAST_TOKEN) - assert len(data) == meta['paging']['rows'] - - -@pytest.mark.xfail -def test_invalid_query(): - """Test that the mnemonic query for an unauthorized user fails.""" - - Mast.logout() - - mnemonic_identifier = 'IMIR_HK_ICE_SEC_VOLT4' - start_time = Time('2019-01-16 00:00:00.000', format='iso') - end_time = Time('2019-01-16 00:01:00.000', format='iso') - try: - query_single_mnemonic(mnemonic_identifier, start_time, end_time, token='1234') - except RuntimeError: - pass diff --git a/jwql/tests/test_loading_times.py b/jwql/tests/test_loading_times.py index df0445ced..501028133 100644 --- a/jwql/tests/test_loading_times.py +++ b/jwql/tests/test_loading_times.py @@ -19,6 +19,7 @@ pytest -s test_loading_times.py """ +import os import pytest import time import urllib.request @@ -27,6 +28,9 @@ TIME_CONSTRAINT = 30 # seconds +# Determine if tests are being run on jenkins +ON_JENKINS = os.path.expanduser('~') == '/home/jenkins' + urls = [] # Generic URLs @@ -34,7 +38,7 @@ urls.append('about/') urls.append('edb/') -# Speicif URLs +# Specific URLs test_mappings = [('fgs', '86700', 'jw86600007001_02101_00001_guider2'), ('miri', '98012', 'jw98012001001_02102_00001_mirimage'), ('nircam', '93025', 'jw93065002001_02101_00001_nrcb2'), @@ -48,9 +52,10 @@ urls.append('{}/{}/'.format(instrument, rootname)) +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') @pytest.mark.parametrize('url', urls) def test_loading_times(url): - """Test to see if the given ``url`` returns a webpage sucessfully + """Test to see if the given ``url`` returns a webpage successfully within a reasonable time. Parameters diff --git a/jwql/tests/test_logging_functions.py b/jwql/tests/test_logging_functions.py new file mode 100644 index 000000000..83eb62bc2 --- /dev/null +++ b/jwql/tests/test_logging_functions.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python + +"""Tests for the ``logging_functions`` module. + +Authors +------- + + - Matthew Bourque + +Use +--- + + These tests can be run via the command line (omit the -s to + suppress verbose output to stdout): + + :: + + pytest -s test_logging_functions.py +""" + +import logging +import os +import pytest +import shutil + +from jwql.utils import logging_functions +from jwql.utils.logging_functions import configure_logging, log_fail, log_info, make_log_file +from jwql.utils.utils import get_config + +# Determine if tests are being run on jenkins +ON_JENKINS = os.path.expanduser('~') == '/home/jenkins' + + +@log_fail +@log_info +def perform_basic_logging(): + """Performs some basic logging to the test log file""" + + logging.info('This is some logging info') + logging.warning('This is a normal warning') + logging.critical('This is a critical warning') + + +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') +def test_configure_logging(): + """Assert that the ``configure_logging`` function successfully + creates a log file""" + + log_file = logging_functions.configure_logging('test_logging_functions') + assert os.path.exists(log_file) + + # Remove the log file + shutil.rmtree(os.path.dirname(log_file), ignore_errors=True) + + +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') +def test_make_log_file(): + """Assert that ``make_log_file`` function returns the appropriate + path for a log file""" + + module = 'test_logging_functions' + log_file = make_log_file(module) + + correct_locations = [ + os.path.join(get_config()['log_dir'], 'dev', module, os.path.basename(log_file)), + os.path.join(get_config()['log_dir'], 'test', module, os.path.basename(log_file)), + os.path.join(get_config()['log_dir'], 'prod', module, os.path.basename(log_file))] + + assert log_file in correct_locations + + +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') +def test_logging_functions(): + """A generic end-to-end test that creates a log file, does some + basic logging, then asserts that some logging content exists""" + + log_file = configure_logging('test_logging_functions') + perform_basic_logging() + + # Open the log file and make some assertions + with open(log_file, 'r') as f: + data = f.readlines() + data = str([line.strip() for line in data]) + testable_content = ['User:', 'System:', 'Python Executable Path:', 'INFO:', + 'WARNING:', 'CRITICAL:', 'Elapsed Real Time:', + 'Elapsed CPU Time:', 'Completed Successfully'] + for item in testable_content: + assert item in data diff --git a/jwql/tests/test_permissions.py b/jwql/tests/test_permissions.py index 1ed6dab67..01f67de64 100755 --- a/jwql/tests/test_permissions.py +++ b/jwql/tests/test_permissions.py @@ -29,6 +29,9 @@ # directory to be created and populated during tests running TEST_DIRECTORY = os.path.join(os.environ['HOME'], 'permission_test') +# Determine if tests are being run on jenkins +ON_JENKINS = os.path.expanduser('~') == '/home/jenkins' + @pytest.fixture(scope="module") def test_directory(test_dir=TEST_DIRECTORY): @@ -99,6 +102,7 @@ def test_file(test_dir=TEST_DIRECTORY): os.rmdir(test_dir) +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') def test_file_group(test_file): """Create a file with the standard permissions ``('-rw-r--r--')`` and default group. diff --git a/jwql/tests/test_pipeline_tools.py b/jwql/tests/test_pipeline_tools.py index c932dd46e..cb3653841 100644 --- a/jwql/tests/test_pipeline_tools.py +++ b/jwql/tests/test_pipeline_tools.py @@ -26,9 +26,11 @@ from jwql.instrument_monitors import pipeline_tools from jwql.utils.utils import get_config +# Determine if tests are being run on jenkins +ON_JENKINS = os.path.expanduser('~') == '/home/jenkins' -@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', - reason='Requires access to central storage.') + +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') def test_completed_pipeline_steps(): """Test that the list of completed pipeline steps for a file is correct @@ -39,7 +41,8 @@ def test_completed_pipeline_steps(): File to be checked """ - filename = os.path.join(get_config()['filesystem'], 'jw00312', 'jw00312002001_02102_00001_nrcb4_rateints.fits') + filename = os.path.join(get_config()['filesystem'], 'jw00312', + 'jw00312002001_02102_00001_nrcb4_rateints.fits') completed_steps = pipeline_tools.completed_pipeline_steps(filename) true_completed = OrderedDict([('group_scale', False), ('dq_init', True), @@ -56,6 +59,10 @@ def test_completed_pipeline_steps(): ('jump', True), ('rate', True)]) + # Only test steps that have a value of True + completed_steps = OrderedDict((k, v) for k, v in completed_steps.items() if v is True) + true_completed = OrderedDict((k, v) for k, v in true_completed.items() if v is True) + assert completed_steps == true_completed @@ -64,21 +71,25 @@ def test_get_pipeline_steps(): instrument """ - # FGS, NIRCam, and NIRISS have the same required steps + # FGS, NIRCam, and NIRISS instruments = ['fgs', 'nircam', 'niriss'] for instrument in instruments: - req_steps = pipeline_tools.get_pipeline_steps(instrument) - steps = ['dq_init', 'saturation', 'superbias', 'refpix', 'linearity', - 'persistence', 'dark_current', 'jump', 'rate'] - not_required = ['group_scale', 'ipc', 'firstframe', 'lastframe', 'rscd'] - steps_dict = OrderedDict({}) - for step in steps: - steps_dict[step] = True - for step in not_required: - steps_dict[step] = False - assert req_steps == steps_dict - - # NIRSpec and MIRI have different required steps + req_steps = pipeline_tools.get_pipeline_steps(instrument) + steps = ['dq_init', 'saturation', 'superbias', 'refpix', 'linearity', + 'persistence', 'dark_current', 'jump', 'rate'] + not_required = ['group_scale', 'ipc', 'firstframe', 'lastframe', 'rscd'] + steps_dict = OrderedDict({}) + for step in steps: + steps_dict[step] = True + for step in not_required: + steps_dict[step] = False + + # Only test steps that have a value of True + req_steps = OrderedDict((k, v) for k, v in req_steps.items() if v is True) + steps_dict = OrderedDict((k, v) for k, v in steps_dict.items() if v is True) + assert req_steps == steps_dict + + # NIRSpec nrs_req_steps = pipeline_tools.get_pipeline_steps('nirspec') nrs_steps = ['group_scale', 'dq_init', 'saturation', 'superbias', 'refpix', 'linearity', 'dark_current', 'jump', 'rate'] @@ -88,8 +99,12 @@ def test_get_pipeline_steps(): nrs_dict[step] = True for step in not_required: nrs_dict[step] = False + # Only test steps that have a value of True + nrs_req_steps = OrderedDict((k, v) for k, v in nrs_req_steps.items() if v is True) + nrs_dict = OrderedDict((k, v) for k, v in nrs_dict.items() if v is True) assert nrs_req_steps == nrs_dict + # MIRI miri_req_steps = pipeline_tools.get_pipeline_steps('miri') miri_steps = ['dq_init', 'saturation', 'firstframe', 'lastframe', 'linearity', 'rscd', 'dark_current', 'refpix', 'jump', 'rate'] @@ -99,16 +114,18 @@ def test_get_pipeline_steps(): miri_dict[step] = True for step in not_required: miri_dict[step] = False + # Only test steps that have a value of True + miri_req_steps = OrderedDict((k, v) for k, v in miri_req_steps.items() if v is True) + miri_dict = OrderedDict((k, v) for k, v in miri_dict.items() if v is True) assert miri_req_steps == miri_dict -@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', - reason='Requires access to central storage.') +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') def test_image_stack(): """Test stacking of slope images""" directory = os.path.join(get_config()['test_dir'], 'dark_monitor') - files = [os.path.join(directory, 'test_image_{}.fits'.format(str(i+1))) for i in range(3)] + files = [os.path.join(directory, 'test_image_{}.fits'.format(str(i + 1))) for i in range(3)] image_stack, exptimes = pipeline_tools.image_stack(files) truth = np.zeros((3, 10, 10)) diff --git a/jwql/tests/test_utils.py b/jwql/tests/test_utils.py index a59a4b003..94a33572f 100644 --- a/jwql/tests/test_utils.py +++ b/jwql/tests/test_utils.py @@ -24,6 +24,8 @@ from jwql.utils.utils import copy_files, get_config, filename_parser, filesystem_path +# Determine if tests are being run on jenkins +ON_JENKINS = os.path.expanduser('~') == '/home/jenkins' FILENAME_PARSER_TEST_DATA = [ @@ -245,8 +247,7 @@ ] -@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', - reason='Requires access to central storage.') +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') def test_copy_files(): """Test that files are copied successfully""" @@ -271,8 +272,7 @@ def test_copy_files(): os.remove(copied_file) -@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', - reason='Requires access to central storage.') +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') def test_get_config(): """Assert that the ``get_config`` function successfully creates a dictionary. @@ -297,8 +297,7 @@ def test_filename_parser(filename, solution): assert filename_parser(filename) == solution -@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', - reason='Requires access to central storage.') +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') def test_filename_parser_whole_filesystem(): """Test the filename_parser on all files currently in the filesystem.""" # Get all files @@ -336,8 +335,7 @@ def test_filename_parser_nonJWST(): filename_parser(filename) -@pytest.mark.skipif(os.path.expanduser('~') == '/home/jenkins', - reason='Requires access to central storage.') +@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.') def test_filesystem_path(): """Test that a file's location in the filesystem is returned""" diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index d12df9a8b..1db166b1a 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -7,58 +7,72 @@ Use --- - To print the JWST instrument names do: + This variables within this module are intended to be directly + imported, e.g.: :: - from jwql.utils import constants - print(constants.JWST_INSTRUMENT_NAMES) - inventory, keywords = monitor_mast.jwst_inventory() + from jwql.utils.constants import JWST_INSTRUMENT_NAMES References ---------- - Many variables were transferred from an earlier version of utils.py + Many variables were transferred from an earlier version of + ``utils.py`` """ -JWST_INSTRUMENT_NAMES = sorted(['niriss', 'nircam', 'nirspec', 'miri', 'fgs']) +import inflection + + +# Defines the x and y coordinates of amplifier boundaries +AMPLIFIER_BOUNDARIES = {'nircam': {'1': [(0, 0), (512, 2048)], '2': [(512, 0), (1024, 2048)], + '3': [(1024, 0), (1536, 2048)], '4': [(1536, 0), (2048, 2048)]} + } + +# Defines the possible anomalies to flag through the web app +ANOMALIES = ['snowball', 'cosmic_ray_shower', 'crosstalk', 'data_transfer_error', 'diffraction_spike', + 'excessive_saturation', 'ghost', 'guidestar_failure', 'persistence', 'satellite_trail', 'other'] + +# Defines the possible anomalies (with rendered name) to flag through the web app +ANOMALY_CHOICES = [(anomaly, inflection.titleize(anomaly)) for anomaly in ANOMALIES] + +# Possible suffix types for nominal files +GENERIC_SUFFIX_TYPES = ['uncal', 'cal', 'rateints', 'rate', 'trapsfilled', 'i2d', + 'x1dints', 'x1d', 's2d', 's3d', 'dark', 'crfints', + 'crf', 'ramp', 'fitopt', 'bsubints', 'bsub', 'cat'] + +# Possible suffix types for guider exposures +GUIDER_SUFFIX_TYPES = ['stream', 'stacked_uncal', 'image_uncal', 'stacked_cal', 'image_cal'] + +# JWST data products JWST_DATAPRODUCTS = ['IMAGE', 'SPECTRUM', 'SED', 'TIMESERIES', 'VISIBILITY', 'EVENTLIST', 'CUBE', 'CATALOG', 'ENGINEERING', 'NULL'] +# Lowercase JWST instrument names +JWST_INSTRUMENT_NAMES = sorted(['niriss', 'nircam', 'nirspec', 'miri', 'fgs']) + +# JWST instrument names with shorthand notation JWST_INSTRUMENT_NAMES_SHORTHAND = {'gui': 'fgs', 'mir': 'miri', 'nis': 'niriss', 'nrc': 'nircam', 'nrs': 'nirspec'} +# Mixed case JWST instrument names JWST_INSTRUMENT_NAMES_MIXEDCASE = {'fgs': 'FGS', 'miri': 'MIRI', 'nircam': 'NIRCam', 'niriss': 'NIRISS', 'nirspec': 'NIRSpec'} +# Upper case JWST instrument names JWST_INSTRUMENT_NAMES_UPPERCASE = {key: value.upper() for key, value in JWST_INSTRUMENT_NAMES_MIXEDCASE.items()} +# Astoquery service string for each JWST instrument JWST_MAST_SERVICES = ['Mast.Jwst.Filtered.{}'.format(value.title()) for value in JWST_INSTRUMENT_NAMES] -GUIDER_SUFFIX_TYPES = ['stream', 'stacked_uncal', 'image_uncal', 'stacked_cal', 'image_cal'] - -GENERIC_SUFFIX_TYPES = ['uncal', 'cal', 'rateints', 'rate', 'trapsfilled', 'i2d', - 'x1dints', 'x1d', 's2d', 's3d', 'dark', 'crfints', - 'crf', 'ramp', 'fitopt', 'bsubints', 'bsub', 'cat'] - -TIME_SERIES_SUFFIX_TYPES = ['phot', 'whtlt'] - -CORONAGRAPHY_SUFFIX_TYPES = ['psfstack', 'psfalign', 'psfsub'] - -AMI_SUFFIX_TYPES = ['amiavg', 'aminorm', 'ami'] - -# Concatenate all suffix types (ordered to ensure successful matching) -FILE_SUFFIX_TYPES = GUIDER_SUFFIX_TYPES + GENERIC_SUFFIX_TYPES + \ - TIME_SERIES_SUFFIX_TYPES + CORONAGRAPHY_SUFFIX_TYPES + \ - AMI_SUFFIX_TYPES - +# Available monitor names and their location for each JWST instrument MONITORS = { 'fgs': [('Bad Pixel Monitor', '#')], 'miri': [('Dark Current Monitor', '#'), @@ -88,15 +102,29 @@ ('Instrument Model Updates', '#'), ('Failed-open Shutter Monitor', '#')]} +# Possible suffix types for coronograph exposures +NIRCAM_CORONAGRAPHY_SUFFIX_TYPES = ['psfstack', 'psfalign', 'psfsub'] + +# NIRCam subarrays that use four amps for readout +NIRCAM_FOUR_AMP_SUBARRAYS = ['WFSS128R', 'WFSS64R'] + +# NIRCam long wavelength detector names +NIRCAM_LONGWAVE_DETECTORS = ['NRCA5', 'NRCB5'] + +# NIRCam short wavelength detector names NIRCAM_SHORTWAVE_DETECTORS = ['NRCA1', 'NRCA2', 'NRCA3', 'NRCA4', 'NRCB1', 'NRCB2', 'NRCB3', 'NRCB4'] -NIRCAM_LONGWAVE_DETECTORS = ['NRCA5', 'NRCB5'] +# NIRCam subarrays that use either one or four amps +NIRCAM_SUBARRAYS_ONE_OR_FOUR_AMPS = ['SUBGRISMSTRIPE64', 'SUBGRISMSTRIPE128', 'SUBGRISMSTRIPE256'] -AMPLIFIER_BOUNDARIES = {'nircam': {'1': [(0, 0), (512, 2048)], '2': [(512, 0), (1024, 2048)], - '3': [(1024, 0), (1536, 2048)], '4': [(1536, 0), (2048, 2048)]} - } +# Possible suffix types for AMI files +NIRISS_AMI_SUFFIX_TYPES = ['amiavg', 'aminorm', 'ami'] -FOUR_AMP_SUBARRAYS = ['WFSS128R', 'WFSS64R'] +# Possible suffix types for time-series exposures +TIME_SERIES_SUFFIX_TYPES = ['phot', 'whtlt'] -SUBARRAYS_ONE_OR_FOUR_AMPS = ['SUBGRISMSTRIPE64', 'SUBGRISMSTRIPE128', 'SUBGRISMSTRIPE256'] +# Concatenate all suffix types (ordered to ensure successful matching) +FILE_SUFFIX_TYPES = GUIDER_SUFFIX_TYPES + GENERIC_SUFFIX_TYPES + \ + TIME_SERIES_SUFFIX_TYPES + NIRCAM_CORONAGRAPHY_SUFFIX_TYPES + \ + NIRISS_AMI_SUFFIX_TYPES diff --git a/jwql/utils/credentials.py b/jwql/utils/credentials.py new file mode 100644 index 000000000..d307d27d4 --- /dev/null +++ b/jwql/utils/credentials.py @@ -0,0 +1,63 @@ +"""Utility functions related to accessing remote services and databases. + +Authors +------- + + - Johannes Sahlmann + - Lauren Chambers + +Use +--- + + This module can be imported as such: + :: + + import credentials + token = credentials.get_mast_token() + + """ +import os + +from astroquery.mast import Mast + +from jwql.utils.utils import get_config, check_config + + +def get_mast_token(request=None): + """Return MAST token from either Astroquery.Mast, webpage cookies, the + JWQL configuration file, or an environment variable. + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + + Returns + ------- + token : str or None + User-specific MAST token string, if available + """ + if Mast.authenticated(): + print('Authenticated with Astroquery MAST magic') + return None + else: + if request is not None: + token = str(request.POST.get('access_token')) + if token != 'None': + print('Authenticated with cached MAST token.') + return token + try: + # check if token is available via config file + check_config('mast_token') + token = get_config()['mast_token'] + print('Authenticated with config.json MAST token.') + return token + except (KeyError, ValueError): + # check if token is available via environment variable + # see https://auth.mast.stsci.edu/info + try: + token = os.environ['MAST_API_TOKEN'] + print('Authenticated with MAST token environment variable.') + return token + except KeyError: + return None diff --git a/jwql/utils/instrument_properties.py b/jwql/utils/instrument_properties.py index 2e5de55c3..4d91ac8dd 100644 --- a/jwql/utils/instrument_properties.py +++ b/jwql/utils/instrument_properties.py @@ -23,7 +23,7 @@ from jwst.datamodels import dqflags import numpy as np -from jwql.utils.constants import AMPLIFIER_BOUNDARIES, FOUR_AMP_SUBARRAYS, SUBARRAYS_ONE_OR_FOUR_AMPS +from jwql.utils.constants import AMPLIFIER_BOUNDARIES, NIRCAM_FOUR_AMP_SUBARRAYS, NIRCAM_SUBARRAYS_ONE_OR_FOUR_AMPS def amplifier_info(filename, omit_reference_pixels=True): @@ -65,13 +65,13 @@ def amplifier_info(filename, omit_reference_pixels=True): aperture = "{}_{}".format(detector, subarray_name) # Full frame data will be 2048x2048 for all instruments - if ((x_dim == 2048) and (y_dim == 2048)) or subarray_name in FOUR_AMP_SUBARRAYS: + if ((x_dim == 2048) and (y_dim == 2048)) or subarray_name in NIRCAM_FOUR_AMP_SUBARRAYS: num_amps = 4 amp_bounds = deepcopy(AMPLIFIER_BOUNDARIES[instrument]) else: - if subarray_name not in SUBARRAYS_ONE_OR_FOUR_AMPS: + if subarray_name not in NIRCAM_SUBARRAYS_ONE_OR_FOUR_AMPS: num_amps = 1 amp_bounds = {'1': [(0, 0), (x_dim, y_dim)]} diff --git a/jwql/utils/monitor_template.py b/jwql/utils/monitor_template.py index aa443ff9f..ae6d46d9a 100644 --- a/jwql/utils/monitor_template.py +++ b/jwql/utils/monitor_template.py @@ -46,7 +46,7 @@ Any monitoring script written for ``jwql`` must adhere to the ``jwql`` style guide located at: - https://github.com/spacetelescope/jwql/blob/master/style_guide/style_guide.md + https://github.com/spacetelescope/jwql/blob/master/style_guide/README.md """ import os diff --git a/jwql/utils/preview_image.py b/jwql/utils/preview_image.py index db0e5dbb5..0369dec36 100755 --- a/jwql/utils/preview_image.py +++ b/jwql/utils/preview_image.py @@ -204,7 +204,8 @@ def get_data(self, filename, ext): else: data = hdulist[ext].data.astype(np.float) else: - raise ValueError(('WARNING: no {} extension in {}!'.format(ext, filename))) + raise ValueError('WARNING: no {} extension in {}!'.format(ext, filename)) + if 'PIXELDQ' in extnames: dq = hdulist['PIXELDQ'].data dq = (dq & dqflags.pixel['NON_SCIENCE'] == 0) @@ -212,7 +213,6 @@ def get_data(self, filename, ext): yd, xd = data.shape[-2:] dq = np.ones((yd, xd), dtype="bool") - # Collect information on aperture location within the # full detector. This is needed for mosaicking NIRCam # detectors later. @@ -225,7 +225,7 @@ def get_data(self, filename, ext): logging.warning('SUBSTR and SUBSIZE header keywords not found') else: - raise FileNotFoundError(('WARNING: {} does not exist!'.format(filename))) + raise FileNotFoundError('WARNING: {} does not exist!'.format(filename)) return data, dq @@ -266,7 +266,7 @@ def make_figure(self, image, integration_number, min_value, max_value, # Check the input scaling if scale not in ['linear', 'log']: - raise ValueError(('WARNING: scaling option {} not supported.'.format(scale))) + raise ValueError('WARNING: scaling option {} not supported.'.format(scale)) # Set the figure size yd, xd = image.shape diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py index 7b155e600..fdb48dd7b 100644 --- a/jwql/utils/utils.py +++ b/jwql/utils/utils.py @@ -112,7 +112,7 @@ def download_mast_data(query_results, output_dir): print('uri is {}'.format(uri)) - conn.request("GET", "/api/v0/download/file?uri="+uri) + conn.request("GET", "/api/v0/download/file?uri=" + uri) resp = conn.getresponse() file_content = resp.read() @@ -316,7 +316,10 @@ def filename_parser(filename): except AttributeError: jdox_url = 'https://jwst-docs.stsci.edu/display/JDAT/' \ 'File+Naming+Conventions+and+Data+Products' - raise ValueError('Provided file {} does not follow JWST naming conventions. See {} for further information.'.format(filename, jdox_url)) + raise ValueError( + 'Provided file {} does not follow JWST naming conventions. ' + 'See {} for further information.'.format(filename, jdox_url) + ) return filename_dict @@ -345,7 +348,9 @@ def filesystem_path(filename): if os.path.isfile(full_path): return full_path else: - raise FileNotFoundError(('{} is not in the predicted location: {}'.format(filename, full_path))) + raise FileNotFoundError( + '{} is not in the predicted location: {}'.format(filename, full_path) + ) def get_base_url(): @@ -395,6 +400,32 @@ def get_config(): return settings +def check_config(key): + """Check that the config.json file contains the specified key + and that the entry is not empty + + Parameters + ------- + key : str + The configuration file key to verify + """ + try: + get_config()[key] + except KeyError: + raise KeyError( + 'The key `{}` is not present in config.json. Please add it.'.format(key) + + ' See the relevant wiki page (https://github.com/spacetelescope/' + 'jwql/wiki/Config-file) for more information.' + ) + + if get_config()[key] == "": + raise ValueError( + 'Please complete the `{}` field in your config.json. '.format(key) + + ' See the relevant wiki page (https://github.com/spacetelescope/' + 'jwql/wiki/Config-file) for more information.' + ) + + def initialize_instrument_monitor(module): """Configures a log file for the instrument monitor run and captures the start time of the monitor diff --git a/jwql/website/apps/jwql/data_containers.py b/jwql/website/apps/jwql/data_containers.py index 91b3117c2..039bf07ba 100644 --- a/jwql/website/apps/jwql/data_containers.py +++ b/jwql/website/apps/jwql/data_containers.py @@ -29,19 +29,33 @@ from astropy.io import fits from astropy.time import Time -from astroquery.mast import Mast +from django.conf import settings import numpy as np -from jwql.edb.edb_interface import mnemonic_inventory +# astroquery.mast import that depends on value of auth_mast +# this import has to be made before any other import of astroquery.mast +from jwql.utils.utils import get_config, filename_parser, check_config +check_config('auth_mast') +auth_mast = get_config()['auth_mast'] +mast_flavour = '.'.join(auth_mast.split('.')[1:]) +from astropy import config +conf = config.get_config('astroquery') +conf['mast'] = {'server': 'https://{}'.format(mast_flavour)} +from astroquery.mast import Mast +from jwedb.edb_interface import mnemonic_inventory + +from jwql.database import database_interface as di from jwql.edb.engineering_database import get_mnemonic, get_mnemonic_info from jwql.instrument_monitors.miri_monitors.data_trending import dashboard as miri_dash from jwql.instrument_monitors.nirspec_monitors.data_trending import dashboard as nirspec_dash from jwql.jwql_monitors import monitor_cron_jobs -from jwql.utils.constants import MONITORS +from jwql.utils.utils import ensure_dir_exists +from jwql.utils.constants import MONITORS, JWST_INSTRUMENT_NAMES_MIXEDCASE from jwql.utils.preview_image import PreviewImage -from jwql.utils.utils import get_config, filename_parser +from jwql.utils.credentials import get_mast_token from .forms import MnemonicSearchForm, MnemonicQueryForm, MnemonicExplorationForm + __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) FILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem') PREVIEW_IMAGE_FILESYSTEM = os.path.join(get_config()['jwql_dir'], 'preview_images') @@ -110,7 +124,8 @@ def get_acknowledgements(): # Parse out the list of individuals acknowledgements = data[index + 1:] - acknowledgements = [item.strip().replace('- ', '').split(' [@')[0].strip() for item in acknowledgements] + acknowledgements = [item.strip().replace('- ', '').split(' [@')[0].strip() + for item in acknowledgements] return acknowledgements @@ -132,6 +147,33 @@ def get_all_proposals(): return proposals +def get_current_flagged_anomalies(rootname): + """Return a list of currently flagged anomalies for the given + ``rootname`` + + Parameters + ---------- + rootname : str + The rootname of interest (e.g. + ``jw86600008001_02101_00001_guider2/``) + + Returns + ------- + current_anomalies : list + A list of currently flagged anomalies for the given ``rootname`` + (e.g. ``['snowball', 'crosstalk']``) + """ + + query = di.session.query(di.Anomaly).filter(di.Anomaly.rootname == rootname).order_by(di.Anomaly.flag_date.desc()).limit(1) + all_records = query.data_frame + if not all_records.empty: + current_anomalies = [col for col, val in np.sum(all_records, axis=0).items() if val] + else: + current_anomalies = [] + + return current_anomalies + + def get_dashboard_components(): """Build and return dictionaries containing components and html needed for the dashboard. @@ -147,45 +189,48 @@ def get_dashboard_components(): output_dir = get_config()['outputs'] name_dict = {'': '', 'monitor_mast': 'Database Monitor', - 'database_monitor_jwst': 'JWST', - 'database_monitor_caom': 'JWST (CAOM)', - 'monitor_filesystem': 'Filesystem Monitor', - 'filecount_type': 'Total File Counts by Type', - 'size_type': 'Total File Sizes by Type', - 'filecount': 'Total File Counts', - 'system_stats': 'System Statistics'} - - # Exclude monitors that can't be saved as components - exclude_list = ['monitor_cron_jobs', 'miri_data_trending', - 'trainings_data_15min', 'trainings_data_day'] + 'monitor_filesystem': 'Filesystem Monitor'} # Run the cron job monitor to produce an updated table monitor_cron_jobs.status(production_mode=True) - # Build dictionary of components + # Build dictionary of Bokeh components from files in the output directory dashboard_components = {} - for dir_name, subdir_list, file_list in os.walk(output_dir): + for dir_name, _, file_list in os.walk(output_dir): monitor_name = os.path.basename(dir_name) - if monitor_name not in exclude_list: - dashboard_components[name_dict[monitor_name]] = {} + + # Only continue if the dashboard knows how to build that monitor + if monitor_name in name_dict.keys(): + formatted_monitor_name = name_dict[monitor_name] + dashboard_components[formatted_monitor_name] = {} for fname in file_list: if 'component' in fname: full_fname = '{}/{}'.format(monitor_name, fname) plot_name = fname.split('_component')[0] + # Generate formatted plot name + formatted_plot_name = plot_name.title().replace('_', ' ') + for lowercase, mixed_case in JWST_INSTRUMENT_NAMES_MIXEDCASE.items(): + formatted_plot_name = formatted_plot_name.replace(lowercase.capitalize(), + mixed_case) + formatted_plot_name = formatted_plot_name.replace('Jwst', 'JWST') + formatted_plot_name = formatted_plot_name.replace('Caom', 'CAOM') + # Get the div html_file = full_fname.split('.')[0] + '.html' - with open(os.path.join(output_dir, html_file)) as f: + with open(os.path.join(output_dir, html_file), 'r') as f: div = f.read() # Get the script js_file = full_fname.split('.')[0] + '.js' - with open(os.path.join(output_dir, js_file)) as f: + with open(os.path.join(output_dir, js_file), 'r') as f: script = f.read() - dashboard_components[name_dict[monitor_name]][name_dict[plot_name]] = [div, script] + + # Save to dictionary + dashboard_components[formatted_monitor_name][formatted_plot_name] = [div, script] # Add HTML that cannot be saved as components to the dictionary - with open(os.path.join(output_dir, 'monitor_cron_jobs', 'cron_status_table.html')) as f: + with open(os.path.join(output_dir, 'monitor_cron_jobs', 'cron_status_table.html'), 'r') as f: cron_status_table_html = f.read() dashboard_html = {} dashboard_html['Cron Job Monitor'] = cron_status_table_html @@ -216,7 +261,10 @@ def get_edb_components(request): if request.method == 'POST': if 'mnemonic_name_search' in request.POST.keys(): - mnemonic_name_search_form = MnemonicSearchForm(request.POST, + # authenticate with astroquery.mast if necessary + logged_in = log_into_mast(request) + + mnemonic_name_search_form = MnemonicSearchForm(request.POST, logged_in=logged_in, prefix='mnemonic_name_search') if mnemonic_name_search_form.is_valid(): @@ -229,7 +277,11 @@ def get_edb_components(request): mnemonic_exploration_form = MnemonicExplorationForm(prefix='mnemonic_exploration') elif 'mnemonic_query' in request.POST.keys(): - mnemonic_query_form = MnemonicQueryForm(request.POST, prefix='mnemonic_query') + # authenticate with astroquery.mast if necessary + logged_in = log_into_mast(request) + + mnemonic_query_form = MnemonicQueryForm(request.POST, logged_in=logged_in, + prefix='mnemonic_query') # proceed only if entries make sense if mnemonic_query_form.is_valid(): @@ -241,6 +293,31 @@ def get_edb_components(request): mnemonic_query_result = get_mnemonic(mnemonic_identifier, start_time, end_time) mnemonic_query_result_plot = mnemonic_query_result.bokeh_plot() + # generate table download in web app + result_table = mnemonic_query_result.data + + # save file locally to be available for download + static_dir = os.path.join(settings.BASE_DIR, 'static') + ensure_dir_exists(static_dir) + file_name_root = 'mnemonic_query_result_table' + file_for_download = '{}.csv'.format(file_name_root) + path_for_download = os.path.join(static_dir, file_for_download) + + # add meta data to saved table + comments = [] + comments.append('DMS EDB query of {}:'.format(mnemonic_identifier)) + for key, value in mnemonic_query_result.info.items(): + comments.append('{} = {}'.format(key, str(value))) + result_table.meta['comments'] = comments + comments.append(' ') + comments.append('Start time {}'.format(start_time.isot)) + comments.append('End time {}'.format(end_time.isot)) + comments.append('Number of rows {}'.format(len(result_table))) + comments.append(' ') + result_table.write(path_for_download, format='ascii.fixed_width', + overwrite=True, delimiter=',', bookend=False) + mnemonic_query_result.file_for_download = file_for_download + # create forms for search fields not clicked mnemonic_name_search_form = MnemonicSearchForm(prefix='mnemonic_name_search') mnemonic_exploration_form = MnemonicExplorationForm(prefix='mnemonic_exploration') @@ -257,25 +334,39 @@ def get_edb_components(request): if field_value != '': column_name = mnemonic_exploration_form[field].label - # indices in table for which a match is found (case-insensitive) - index = [i for i, item in enumerate(mnemonic_exploration_result[column_name]) if - re.search(field_value, item, re.IGNORECASE)] + # matching indices in table (case-insensitive) + index = [ + i for i, item in enumerate(mnemonic_exploration_result[column_name]) if + re.search(field_value, item, re.IGNORECASE) + ] mnemonic_exploration_result = mnemonic_exploration_result[index] mnemonic_exploration_result.n_rows = len(mnemonic_exploration_result) + # generate tables for display and download in web app display_table = copy.deepcopy(mnemonic_exploration_result) - # temporary html file, see http://docs.astropy.org/en/stable/_modules/astropy/table/ - # table.html#Table.show_in_browser + + # temporary html file, + # see http://docs.astropy.org/en/stable/_modules/astropy/table/ tmpdir = tempfile.mkdtemp() - path = os.path.join(tmpdir, 'mnemonic_exploration_result_table.html') - with open(path, 'w') as tmp: + file_name_root = 'mnemonic_exploration_result_table' + path_for_html = os.path.join(tmpdir, '{}.html'.format(file_name_root)) + with open(path_for_html, 'w') as tmp: display_table.write(tmp, format='jsviewer') - mnemonic_exploration_result.html_file = path - mnemonic_exploration_result.html_file_content = open(path, 'r').read() + mnemonic_exploration_result.html_file_content = open(path_for_html, 'r').read() + # pass on meta data to have access to total number of mnemonics mnemonic_exploration_result.meta = meta + # save file locally to be available for download + static_dir = os.path.join(settings.BASE_DIR, 'static') + ensure_dir_exists(static_dir) + file_for_download = '{}.csv'.format(file_name_root) + path_for_download = os.path.join(static_dir, file_for_download) + display_table.write(path_for_download, format='ascii.fixed_width', + overwrite=True, delimiter=',', bookend=False) + mnemonic_exploration_result.file_for_download = file_for_download + if mnemonic_exploration_result.n_rows == 0: mnemonic_exploration_result = 'empty' @@ -361,7 +452,7 @@ def get_filenames_by_proposal(proposal): Parameters ---------- proposal : str - The five-digit proposal number (e.g. ``88600``). + The one- to five-digit proposal number (e.g. ``88600``). Returns ------- @@ -369,8 +460,9 @@ def get_filenames_by_proposal(proposal): A list of filenames associated with the given ``proposal``. """ + proposal_string = '{:05d}'.format(int(proposal)) filenames = sorted(glob.glob(os.path.join( - FILESYSTEM_DIR, 'jw{}'.format(proposal), '*'))) + FILESYSTEM_DIR, 'jw{}'.format(proposal_string), '*'))) filenames = [os.path.basename(filename) for filename in filenames] return filenames @@ -479,7 +571,8 @@ def get_image_info(file_root, rewrite): im.make_image() # Record how many integrations there are per filetype - search_jpgs = os.path.join(preview_dir, dirname, file_root + '_{}_integ*.jpg'.format(suffix)) + search_jpgs = os.path.join(preview_dir, dirname, + file_root + '_{}_integ*.jpg'.format(suffix)) num_jpgs = len(glob.glob(search_jpgs)) image_info['num_ints'][suffix] = num_jpgs @@ -545,7 +638,8 @@ def get_preview_images_by_instrument(inst): preview_images = glob.glob(os.path.join(PREVIEW_IMAGE_FILESYSTEM, '*', '*.jpg')) # Get subset of preview images that match the filenames - preview_images = [os.path.basename(item) for item in preview_images if os.path.basename(item).split('_integ')[0] in filenames] + preview_images = [os.path.basename(item) for item in preview_images if + os.path.basename(item).split('_integ')[0] in filenames] # Return only @@ -559,7 +653,7 @@ def get_preview_images_by_proposal(proposal): Parameters ---------- proposal : str - The five-digit proposal number (e.g. ``88600``). + The one- to five-digit proposal number (e.g. ``88600``). Returns ------- @@ -568,7 +662,8 @@ def get_preview_images_by_proposal(proposal): given ``proposal``. """ - preview_images = glob.glob(os.path.join(PREVIEW_IMAGE_FILESYSTEM, 'jw{}'.format(proposal), '*')) + proposal_string = '{:05d}'.format(int(proposal)) + preview_images = glob.glob(os.path.join(PREVIEW_IMAGE_FILESYSTEM, 'jw{}'.format(proposal_string), '*')) preview_images = [os.path.basename(preview_image) for preview_image in preview_images] return preview_images @@ -625,14 +720,18 @@ def get_proposal_info(filepaths): thumbnail_paths = [] num_files = [] for proposal in proposals: - thumbnail_search_filepath = os.path.join(thumbnail_dir, 'jw{}'.format(proposal), 'jw{}*rate*.thumb'.format(proposal)) + thumbnail_search_filepath = os.path.join( + thumbnail_dir, 'jw{}'.format(proposal), 'jw{}*rate*.thumb'.format(proposal) + ) thumbnail = glob.glob(thumbnail_search_filepath) if len(thumbnail) > 0: thumbnail = thumbnail[0] thumbnail = '/'.join(thumbnail.split('/')[-2:]) thumbnail_paths.append(thumbnail) - fits_search_filepath = os.path.join(FILESYSTEM_DIR, 'jw{}'.format(proposal), 'jw{}*.fits'.format(proposal)) + fits_search_filepath = os.path.join( + FILESYSTEM_DIR, 'jw{}'.format(proposal), 'jw{}*.fits'.format(proposal) + ) num_files.append(len(glob.glob(fits_search_filepath))) # Put the various information into a dictionary of results @@ -678,7 +777,8 @@ def get_thumbnails_by_instrument(inst): thumbnails = glob.glob(os.path.join(THUMBNAIL_FILESYSTEM, '*', '*.thumb')) # Get subset of preview images that match the filenames - thumbnails = [os.path.basename(item) for item in thumbnails if os.path.basename(item).split('_integ')[0] in filenames] + thumbnails = [os.path.basename(item) for item in thumbnails if + os.path.basename(item).split('_integ')[0] in filenames] return thumbnails @@ -690,7 +790,7 @@ def get_thumbnails_by_proposal(proposal): Parameters ---------- proposal : str - The five-digit proposal number (e.g. ``88600``). + The one- to five-digit proposal number (e.g. ``88600``). Returns ------- @@ -699,7 +799,8 @@ def get_thumbnails_by_proposal(proposal): ``proposal``. """ - thumbnails = glob.glob(os.path.join(THUMBNAIL_FILESYSTEM, 'jw{}'.format(proposal), '*')) + proposal_string = '{:05d}'.format(int(proposal)) + thumbnails = glob.glob(os.path.join(THUMBNAIL_FILESYSTEM, 'jw{}'.format(proposal_string), '*')) thumbnails = [os.path.basename(thumbnail) for thumbnail in thumbnails] return thumbnails @@ -732,6 +833,44 @@ def get_thumbnails_by_rootname(rootname): return thumbnails +def log_into_mast(request): + """Login via astroquery.mast if user authenticated in web app. + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + + """ + if Mast.authenticated(): + return True + + # get the MAST access token if present + access_token = str(get_mast_token(request)) + + # authenticate with astroquery.mast if necessary + if access_token != 'None': + Mast.login(token=access_token) + return Mast.authenticated() + else: + return False + + +def random_404_page(): + """Randomly select one of the various 404 templates for JWQL + + Returns + ------- + random_template : str + Filename of the selected template + """ + templates = ['404_space.html', '404_spacecat.html'] + choose_page = np.random.choice(len(templates)) + random_template = templates[choose_page] + + return random_template + + def thumbnails(inst, proposal=None): """Generate a page showing thumbnail images corresponding to activities, from a given ``proposal`` @@ -766,7 +905,8 @@ def thumbnails(inst, proposal=None): # If the proposal is specified (i.e. if the page being loaded is # an archive page), only collect data for given proposal if proposal is not None: - full_ids = [f for f in full_ids if f[2:7] == proposal] + proposal_string = '{:05d}'.format(int(proposal)) + full_ids = [f for f in full_ids if f[2:7] == proposal_string] detectors = [] proposals = [] @@ -832,7 +972,8 @@ def thumbnails_ajax(inst, proposal=None): # If the proposal is specified (i.e. if the page being loaded is # an archive page), only collect data for given proposal if proposal is not None: - rootnames = [rootname for rootname in rootnames if rootname[2:7] == proposal] + proposal_string = '{:05d}'.format(int(proposal)) + rootnames = [rootname for rootname in rootnames if rootname[2:7] == proposal_string] # Initialize dictionary that will contain all needed data data_dict = {} @@ -847,14 +988,14 @@ def thumbnails_ajax(inst, proposal=None): filename_dict = filename_parser(rootname) except ValueError: # Temporary workaround for noncompliant files in filesystem - filename_dict = {'activity': file_id[17:19], - 'detector': file_id[26:], - 'exposure_id': file_id[20:25], - 'observation': file_id[7:10], - 'parallel_seq_id': file_id[16], - 'program_id': file_id[2:7], - 'visit': file_id[10:13], - 'visit_group': file_id[14:16]} + filename_dict = {'activity': rootname[17:19], + 'detector': rootname[26:], + 'exposure_id': rootname[20:25], + 'observation': rootname[7:10], + 'parallel_seq_id': rootname[16], + 'program_id': rootname[2:7], + 'visit': rootname[10:13], + 'visit_group': rootname[14:16]} # Get list of available filenames available_files = get_filenames_by_rootname(rootname) @@ -864,13 +1005,16 @@ def thumbnails_ajax(inst, proposal=None): data_dict['file_data'][rootname]['filename_dict'] = filename_dict data_dict['file_data'][rootname]['available_files'] = available_files data_dict['file_data'][rootname]['expstart'] = get_expstart(rootname) - data_dict['file_data'][rootname]['suffixes'] = [filename_parser(filename)['suffix'] for filename in available_files] + data_dict['file_data'][rootname]['suffixes'] = [filename_parser(filename)['suffix'] for + filename in available_files] # Extract information for sorting with dropdown menus # (Don't include the proposal as a sorting parameter if the # proposal has already been specified) - detectors = [data_dict['file_data'][rootname]['filename_dict']['detector'] for rootname in list(data_dict['file_data'].keys())] - proposals = [data_dict['file_data'][rootname]['filename_dict']['program_id'] for rootname in list(data_dict['file_data'].keys())] + detectors = [data_dict['file_data'][rootname]['filename_dict']['detector'] for + rootname in list(data_dict['file_data'].keys())] + proposals = [data_dict['file_data'][rootname]['filename_dict']['program_id'] for + rootname in list(data_dict['file_data'].keys())] if proposal is not None: dropdown_menus = {'detector': detectors} else: diff --git a/jwql/website/apps/jwql/forms.py b/jwql/website/apps/jwql/forms.py index 556b57312..06e8aed35 100644 --- a/jwql/website/apps/jwql/forms.py +++ b/jwql/website/apps/jwql/forms.py @@ -10,6 +10,7 @@ - Lauren Chambers - Johannes Sahlmann + - Matthew Bourque Use --- @@ -40,20 +41,55 @@ def view_function(request): placed in the ``jwql/utils/`` directory. """ + +import datetime import glob import os from astropy.time import Time, TimeDelta from django import forms from django.shortcuts import redirect +from jwedb.edb_interface import is_valid_mnemonic -from jwql.edb.edb_interface import is_valid_mnemonic -from jwql.utils.constants import JWST_INSTRUMENT_NAMES_SHORTHAND +from jwql.database import database_interface as di +from jwql.utils.constants import ANOMALY_CHOICES, JWST_INSTRUMENT_NAMES_SHORTHAND from jwql.utils.utils import get_config, filename_parser FILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem') +class AnomalySubmitForm(forms.Form): + """A multiple choice field for specifying flagged anomalies.""" + + # Define anomaly choice field + anomaly_choices = forms.MultipleChoiceField(choices=ANOMALY_CHOICES, widget=forms.CheckboxSelectMultiple()) + + def update_anomaly_table(self, rootname, user, anomaly_choices): + """Updated the ``anomaly`` table of the database with flagged + anomaly information + + Parameters + ---------- + rootname : str + The rootname of the image to flag (e.g. + ``jw86600008001_02101_00001_guider2``) + user : str + The ``ezid`` of the authenticated user that is flagging the + anomaly + anomaly_choices : list + A list of anomalies that are to be flagged (e.g. + ``['snowball', 'crosstalk']``) + """ + + data_dict = {} + data_dict['rootname'] = rootname + data_dict['flag_date'] = datetime.datetime.now() + data_dict['user'] = user + for choice in anomaly_choices: + data_dict[choice] = True + di.engine.execute(di.Anomaly.__table__.insert(), data_dict) + + class FileSearchForm(forms.Form): """Single-field form to search for a proposal or fileroot.""" @@ -82,7 +118,7 @@ def clean_search(self): search = self.cleaned_data['search'] # Make sure the search is either a proposal or fileroot - if len(search) == 5 and search.isnumeric(): + if search.isnumeric() and 1 < int(search) < 99999: self.search_type = 'proposal' elif self._search_is_fileroot(search): self.search_type = 'fileroot' @@ -94,8 +130,9 @@ def clean_search(self): if self.search_type == 'proposal': # See if there are any matching proposals and, if so, what # instrument they are for - search_string = os.path.join(FILESYSTEM_DIR, 'jw{}'.format(search), - '*{}*.fits'.format(search)) + proposal_string = '{:05d}'.format(int(search)) + search_string = os.path.join(FILESYSTEM_DIR, 'jw{}'.format(proposal_string), + '*{}*.fits'.format(proposal_string)) all_files = glob.glob(search_string) if len(all_files) > 0: all_instruments = [] @@ -104,7 +141,8 @@ def clean_search(self): all_instruments.append(instrument) if len(set(all_instruments)) > 1: raise forms.ValidationError('Cannot return result for proposal with multiple ' - 'instruments.') + 'instruments ({}).' + .format(', '.join(set(all_instruments)))) self.instrument = all_instruments[0] else: @@ -156,10 +194,11 @@ def redirect_to_files(self): """ # Process the data in form.cleaned_data as required search = self.cleaned_data['search'] + proposal_string = '{:05d}'.format(int(search)) # If they searched for a proposal if self.search_type == 'proposal': - return redirect('/{}/archive/{}'.format(self.instrument, search)) + return redirect('/{}/archive/{}'.format(self.instrument, proposal_string)) # If they searched for a file root elif self.search_type == 'fileroot': @@ -176,6 +215,14 @@ class MnemonicSearchForm(forms.Form): # Initialize attributes search_type = None + def __init__(self, *args, **kwargs): + try: + self.logged_in = kwargs.pop('logged_in') + except KeyError: + self.logged_in = True + + super(MnemonicSearchForm, self).__init__(*args, **kwargs) + def clean_search(self): """Validate the "search" field. @@ -187,6 +234,11 @@ def clean_search(self): The cleaned data input into the "search" field """ + # Stop now if not logged in + if not self.logged_in: + raise forms.ValidationError('Could not log into MAST. Please login or provide MAST ' + 'token in environment variable or config.json.') + # Get the cleaned search data search = self.cleaned_data['search'] @@ -233,6 +285,14 @@ class MnemonicQueryForm(forms.Form): # Initialize attributes search_type = None + def __init__(self, *args, **kwargs): + try: + self.logged_in = kwargs.pop('logged_in') + except KeyError: + self.logged_in = True + + super(MnemonicQueryForm, self).__init__(*args, **kwargs) + def clean_search(self): """Validate the "search" field. @@ -244,6 +304,11 @@ def clean_search(self): The cleaned data input into the "search" field """ + # Stop now if not logged in + if not self.logged_in: + raise forms.ValidationError('Could not log into MAST. Please login or provide MAST ' + 'token in environment variable or config.json.') + # Get the cleaned search data search = self.cleaned_data['search'] diff --git a/jwql/website/apps/jwql/oauth.py b/jwql/website/apps/jwql/oauth.py index 10406b139..086334e73 100644 --- a/jwql/website/apps/jwql/oauth.py +++ b/jwql/website/apps/jwql/oauth.py @@ -46,7 +46,9 @@ def login(request): import jwql from jwql.utils.constants import MONITORS -from jwql.utils.utils import get_base_url, get_config +from jwql.utils.utils import get_base_url, get_config, check_config + +PREV_PAGE = '/' def register_oauth(): @@ -61,6 +63,8 @@ def register_oauth(): """ # Get configuration parameters + for key in ['client_id', 'client_secret', 'auth_mast']: + check_config(key) client_id = get_config()['client_id'] client_secret = get_config()['client_secret'] auth_mast = get_config()['auth_mast'] @@ -72,7 +76,9 @@ def register_oauth(): 'mast_auth', client_id='{}'.format(client_id), client_secret='{}'.format(client_secret), - access_token_url='https://{}/oauth/access_token?client_secret={}'.format(auth_mast, client_secret), + access_token_url='https://{}/oauth/access_token?client_secret={}'.format( + auth_mast, client_secret + ), access_token_params=None, refresh_token_url=None, authorize_url='https://{}/oauth/authorize'.format(auth_mast), @@ -81,6 +87,7 @@ def register_oauth(): return oauth + JWQL_OAUTH = register_oauth() @@ -102,7 +109,9 @@ def authorize(request): """ # Get auth.mast token - token = JWQL_OAUTH.mast_auth.authorize_access_token(request, headers={'Accept': 'application/json'}) + token = JWQL_OAUTH.mast_auth.authorize_access_token( + request, headers={'Accept': 'application/json'} + ) # Determine domain base_url = get_base_url() @@ -118,7 +127,7 @@ def authorize(request): cookie_args['httponly'] = True # Set the cookie - response = redirect("/") + response = redirect(PREV_PAGE) response.set_cookie("ASB-AUTH", token["access_token"], **cookie_args) return response @@ -159,15 +168,20 @@ def user_info(request, **kwargs): # If user is authenticated, return user credentials if cookie is not None: + check_config('auth_mast') + # Note: for now, this must be the development version + auth_mast = get_config()['auth_mast'] + response = requests.get( - 'https://{}/info'.format(get_config()['auth_mast']), + 'https://{}/info'.format(auth_mast), headers={'Accept': 'application/json', 'Authorization': 'token {}'.format(cookie)}) response = response.json() + response['access_token'] = cookie # If user is not authenticated, return no credentials else: - response = {'ezid': None, "anon": True} + response = {'ezid': None, "anon": True, 'access_token': None} return fn(request, response, **kwargs) @@ -243,7 +257,10 @@ def login(request, user): """ # Redirect to oauth login + global PREV_PAGE + PREV_PAGE = request.META.get('HTTP_REFERER') redirect_uri = os.path.join(get_base_url(), 'authorize') + return JWQL_OAUTH.mast_auth.authorize_redirect(request, redirect_uri) @@ -266,7 +283,9 @@ def logout(request): Outgoing response sent to the webpage """ - response = redirect("/") + global PREV_PAGE + PREV_PAGE = request.META.get('HTTP_REFERER') + response = redirect(PREV_PAGE) response.delete_cookie("ASB-AUTH") return response diff --git a/jwql/website/apps/jwql/static/css/jwql.css b/jwql/website/apps/jwql/static/css/jwql.css index 8435ef699..b801c8358 100644 --- a/jwql/website/apps/jwql/static/css/jwql.css +++ b/jwql/website/apps/jwql/static/css/jwql.css @@ -1,5 +1,15 @@ +.anomaly_choice { + list-style: none; +} + +.anomaly_form { + position: absolute; + top: 50%; + transform: translateY(-50%); +} + .APT_parameters { - width: 20% + width: 20% } .banner { @@ -182,6 +192,11 @@ display : inline; display: inline; } +#id_anomaly_choices { + list-style: none; + padding-left: 0; +} + /*Don't let the search bar be super long*/ .input-group { width: 250px; @@ -386,6 +401,27 @@ display : inline; margin-bottom: 0; } +/*Video for space 404 page*/ +#space_404 { + position: fixed; + object-fit: cover; + width: 100%; + height: 100%; + right: 0; + bottom: 0; + align: center; +} + +#space_404_text { + position: fixed; + background: rgba(0, 0, 0, 0.5); + color: white; + z-index: 100; + align: center; + padding: 2rem; + display: none; +} + .thumbnail { width: 8rem; height: 8rem; @@ -480,3 +516,8 @@ li.dropdown:hover .dropdown-menu { display: block; } +ul.no-bullets { + list-style: none; + padding-left:10px; + line-height:25px; +} diff --git a/jwql/website/apps/jwql/static/img/flyby.mov b/jwql/website/apps/jwql/static/img/flyby.mov new file mode 100644 index 000000000..3afdd35eb Binary files /dev/null and b/jwql/website/apps/jwql/static/img/flyby.mov differ diff --git a/jwql/website/apps/jwql/static/img/spacecat.jpg b/jwql/website/apps/jwql/static/img/spacecat.jpg new file mode 100644 index 000000000..b93a38a34 Binary files /dev/null and b/jwql/website/apps/jwql/static/img/spacecat.jpg differ diff --git a/jwql/website/apps/jwql/static/js/jwql.js b/jwql/website/apps/jwql/static/js/jwql.js index 4fa4103c4..e47556877 100644 --- a/jwql/website/apps/jwql/static/js/jwql.js +++ b/jwql/website/apps/jwql/static/js/jwql.js @@ -188,9 +188,10 @@ function search() { // Evaluate if the proposal number matches the search var j = i + 1 var prop_name = document.getElementById("proposal" + j).getAttribute('proposal') + var prop_num = Number(prop_name) - if (prop_name.startsWith(search_value)) { + if (prop_name.startsWith(search_value) || prop_num.toString().startsWith(search_value)) { proposals[i].style.display = "inline-block"; num_proposals_displayed++; } else { diff --git a/jwql/website/apps/jwql/templates/404_space.html b/jwql/website/apps/jwql/templates/404_space.html new file mode 100644 index 000000000..1f1aa2ad9 --- /dev/null +++ b/jwql/website/apps/jwql/templates/404_space.html @@ -0,0 +1,34 @@ +{% extends "base.html" %} + +{% block preamble %} + + 404 Not Found - JWQL + +{% endblock %} + +{% block content %} + +
+ + + + + + + + +
+

Lost in space?

+

We couldn't find your page here. Try slewing to a different nebula.

+ +
+ +
+ +{% endblock %} \ No newline at end of file diff --git a/jwql/website/apps/jwql/templates/404_spacecat.html b/jwql/website/apps/jwql/templates/404_spacecat.html new file mode 100644 index 000000000..da4585539 --- /dev/null +++ b/jwql/website/apps/jwql/templates/404_spacecat.html @@ -0,0 +1,36 @@ +{% extends "base.html" %} + +{% block preamble %} + + 404 Not Found - JWQL + +{% endblock %} + +{% block content %} + +
+ + + + + + + + +
+

Lost in space?

+

Page not found for the Space Telescope Console for Analysis and Trending (SpaceCAT)

+

Ooops, we meant to say JWQL

+ SpaceCAT + +
+ +
+ +{% endblock %} \ No newline at end of file diff --git a/jwql/website/apps/jwql/templates/engineering_database.html b/jwql/website/apps/jwql/templates/engineering_database.html index d41df505a..e3e4313c1 100644 --- a/jwql/website/apps/jwql/templates/engineering_database.html +++ b/jwql/website/apps/jwql/templates/engineering_database.html @@ -31,7 +31,7 @@

Search for an EDB mnemonic

Submit a mnemonic identifier:

-

Download FITS Download JPEG - Submit Anomaly

@@ -70,42 +104,6 @@

{{ file_root }}

Lauren needs to figure out what to do with these: {{suffixes}} {% endif %} - - - - {% endblock %} \ No newline at end of file diff --git a/jwql/website/apps/jwql/urls.py b/jwql/website/apps/jwql/urls.py index dece6036c..fd0cb3642 100644 --- a/jwql/website/apps/jwql/urls.py +++ b/jwql/website/apps/jwql/urls.py @@ -75,20 +75,20 @@ re_path(r'^(?P({}))/unlooked/$'.format(instruments), views.unlooked_images, name='unlooked'), re_path(r'^(?P({}))/(?P[\w]+)/$'.format(instruments), views.view_image, name='view_image'), re_path(r'^(?P({}))/(?P.+)/hdr/$'.format(instruments), views.view_header, name='view_header'), - re_path(r'^(?P({}))/archive/(?P[\d]{{5}})/$'.format(instruments), views.archive_thumbnails, name='archive_thumb'), + re_path(r'^(?P({}))/archive/(?P[\d]{{1,5}})/$'.format(instruments), views.archive_thumbnails, name='archive_thumb'), # AJAX views re_path(r'^ajax/(?P({}))/archive/$'.format(instruments), views.archived_proposals_ajax, name='archive_ajax'), - re_path(r'^ajax/(?P({}))/archive/(?P[\d]{{5}})/$'.format(instruments), views.archive_thumbnails_ajax, name='archive_thumb_ajax'), + re_path(r'^ajax/(?P({}))/archive/(?P[\d]{{1,5}})/$'.format(instruments), views.archive_thumbnails_ajax, name='archive_thumb_ajax'), # REST API views path('api/proposals/', api_views.all_proposals, name='all_proposals'), re_path(r'^api/(?P({}))/proposals/$'.format(instruments), api_views.instrument_proposals, name='instrument_proposals'), re_path(r'^api/(?P({}))/preview_images/$'.format(instruments), api_views.preview_images_by_instrument, name='preview_images_by_instrument'), re_path(r'^api/(?P({}))/thumbnails/$'.format(instruments), api_views.thumbnails_by_instrument, name='thumbnails_by_instrument'), - re_path(r'^api/(?P[\d]{5})/filenames/$', api_views.filenames_by_proposal, name='filenames_by_proposal'), - re_path(r'^api/(?P[\d]{5})/preview_images/$', api_views.preview_images_by_proposal, name='preview_images_by_proposal'), - re_path(r'^api/(?P[\d]{5})/thumbnails/$', api_views.thumbnails_by_proposal, name='preview_images_by_proposal'), + re_path(r'^api/(?P[\d]{1,5})/filenames/$', api_views.filenames_by_proposal, name='filenames_by_proposal'), + re_path(r'^api/(?P[\d]{1,5})/preview_images/$', api_views.preview_images_by_proposal, name='preview_images_by_proposal'), + re_path(r'^api/(?P[\d]{1,5})/thumbnails/$', api_views.thumbnails_by_proposal, name='preview_images_by_proposal'), re_path(r'^api/(?P[\w]+)/filenames/$', api_views.filenames_by_rootname, name='filenames_by_rootname'), re_path(r'^api/(?P[\w]+)/preview_images/$', api_views.preview_images_by_rootname, name='preview_images_by_rootname'), re_path(r'^api/(?P[\w]+)/thumbnails/$', api_views.thumbnails_by_rootname, name='thumbnails_by_rootname'), diff --git a/jwql/website/apps/jwql/views.py b/jwql/website/apps/jwql/views.py index 58fde5eb1..4d743db7e 100644 --- a/jwql/website/apps/jwql/views.py +++ b/jwql/website/apps/jwql/views.py @@ -35,6 +35,7 @@ placed in the ``jwql/utils/`` directory. """ +import datetime import os from django.http import JsonResponse @@ -45,14 +46,15 @@ from .data_containers import get_filenames_by_instrument from .data_containers import get_header_info from .data_containers import get_image_info +from .data_containers import get_current_flagged_anomalies from .data_containers import get_proposal_info +from .data_containers import random_404_page from .data_containers import thumbnails from .data_containers import thumbnails_ajax from .data_containers import data_trending from .data_containers import nirspec_trending -from .forms import FileSearchForm +from .forms import AnomalySubmitForm, FileSearchForm from .oauth import auth_info, auth_required -import jwql from jwql.utils.constants import JWST_INSTRUMENT_NAMES, MONITORS, JWST_INSTRUMENT_NAMES_MIXEDCASE from jwql.utils.utils import get_base_url, get_config @@ -77,18 +79,20 @@ def miri_data_trending(request): variables, dash = data_trending() context = { - 'dashboard' : dash, + 'dashboard': dash, 'inst': '', # Leave as empty string or instrument name; Required for navigation bar 'inst_list': JWST_INSTRUMENT_NAMES_MIXEDCASE, # Do not edit; Required for navigation bar 'tools': MONITORS, # Do not edit; Required for navigation bar 'user': None # Do not edit; Required for authentication } - #append variables to context + + # append variables to context context.update(variables) # Return a HTTP response with the template and dictionary of variables return render(request, template, context) + def nirspec_data_trending(request): """Generate the ``MIRI DATA-TRENDING`` page @@ -107,18 +111,20 @@ def nirspec_data_trending(request): variables, dash = nirspec_trending() context = { - 'dashboard' : dash, + 'dashboard': dash, 'inst': '', # Leave as empty string or instrument name; Required for navigation bar 'inst_list': JWST_INSTRUMENT_NAMES_MIXEDCASE, # Do not edit; Required for navigation bar 'tools': MONITORS, # Do not edit; Required for navigation bar 'user': None # Do not edit; Required for authentication } - #append variables to context + + # append variables to context context.update(variables) # Return a HTTP response with the template and dictionary of variables return render(request, template, context) + def about(request): """Generate the ``about`` page @@ -287,7 +293,8 @@ def dashboard(request): return render(request, template, context) -def engineering_database(request): +@auth_info +def engineering_database(request, user): """Generate the EDB page. Parameters @@ -376,6 +383,26 @@ def instrument(request, inst): return render(request, template, context) +def not_found(request): + """Generate a ``not_found`` page + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + + Returns + ------- + HttpResponse object + Outgoing response sent to the webpage + """ + template = random_404_page() + status_code = 404 # Note that this will show 400, 403, 404, and 500 as 404 status + context = {'inst': ''} + + return render(request, template, context, status=status_code) + + def unlooked_images(request, inst): """Generate the page listing all unlooked images in the database @@ -435,9 +462,11 @@ def view_image(request, user, inst, file_root, rewrite=False): ---------- request : HttpRequest object Incoming request from the webpage + user : dict + A dictionary of user credentials. inst : str Name of JWST instrument - file : str + file_root : str FITS filename of selected image in filesystem rewrite : bool, optional Regenerate the jpg preview of `file` if it already exists? @@ -447,16 +476,32 @@ def view_image(request, user, inst, file_root, rewrite=False): HttpResponse object Outgoing response sent to the webpage """ + # Ensure the instrument is correctly capitalized inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] template = 'view_image.html' image_info = get_image_info(file_root, rewrite) + + # Determine current flagged anomalies + current_anomalies = get_current_flagged_anomalies(file_root) + + # Create a form instance + form = AnomalySubmitForm(request.POST or None, initial={'anomaly_choices': current_anomalies}) + + # If this is a POST request, process the form data + if request.method == 'POST': + anomaly_choices = dict(request.POST)['anomaly_choices'] + if form.is_valid(): + form.update_anomaly_table(file_root, user['ezid'], anomaly_choices) + + # Build the context context = {'inst': inst, 'file_root': file_root, 'jpg_files': image_info['all_jpegs'], 'fits_files': image_info['all_files'], 'suffixes': image_info['suffixes'], - 'num_ints': image_info['num_ints']} + 'num_ints': image_info['num_ints'], + 'form': form} return render(request, template, context) diff --git a/jwql/website/jwql_proj/urls.py b/jwql/website/jwql_proj/urls.py index d245ad292..e3e224260 100644 --- a/jwql/website/jwql_proj/urls.py +++ b/jwql/website/jwql_proj/urls.py @@ -49,6 +49,13 @@ from django.contrib import admin from django.urls import include, path +from ..apps.jwql import views + +# Define custom error page views +handler404 = views.not_found # Page not found +handler500 = views.not_found # Internal error +handler403 = views.not_found # Permission denied +handler400 = views.not_found # Bad request urlpatterns = [ path('', include('jwql.website.apps.jwql.urls')), diff --git a/notebooks/edb_mnemonic_query.ipynb b/notebooks/edb_mnemonic_query.ipynb index 7d2abc4cf..744869ffe 100644 --- a/notebooks/edb_mnemonic_query.ipynb +++ b/notebooks/edb_mnemonic_query.ipynb @@ -4,12 +4,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "##Example query for a mnemonic in the DMS engineering database (EDB)\n", - "####Pre-requisites:\n", - "1. Install jwql package\n", + "## Example query for a mnemonic in the DMS engineering database (EDB)\n", + "#### Pre-requisites:\n", + "1. Install `jwql` package\n", "2. Gain access to the EDB by following the process described at https://outerspace.stsci.edu/pages/viewpage.action?spaceKey=JARI&title=JWST+Archive+Rehearsal+Instructions\n", "3. Verify that you have access to the EDB via the MAST web interface (https://mast.stsci.edu/portal/Mashup/Clients/JwstEdb/JwstEdb.html)\n", - "4. Generate a MAST authentication token at https://auth.mast.stsci.edu/info and include it in jwql/jwql/utils/config.json, e.g: {..., \"mast_token\" : \"12345cv6789123ytr8fff12345a55yo0o\", ...}" + "4. Generate a MAST authentication token at https://auth.mast.stsci.edu/info and include it in `jwql/jwql/utils/config.json`, e.g: `{..., \"mast_token\" : \"12345cv6789123ytr8fff12345a55yo0o\", ...}`" ] }, { @@ -23,21 +23,22 @@ "from astropy.time import Time\n", "import pylab as pl\n", "\n", - "from jwql.utils.engineering_database import query_single_mnemonic, mnemonic_inventory, query_mnemonics" + "from jwedb.edb_interface import mnemonic_inventory \n", + "from jwql.edb.engineering_database import get_mnemonic, get_mnemonics \n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "###Query single mnemonic" + "### Query single mnemonic" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true + "collapsed": false }, "outputs": [], "source": [ @@ -45,12 +46,11 @@ "end_time = Time('2019-01-16 00:01:00.000', format='iso')\n", "mnemonic_identifier = 'IMIR_HK_ICE_SEC_VOLT4'\n", "\n", - "mnemonic = query_single_mnemonic(mnemonic_identifier, start_time, end_time)\n", + "mnemonic = get_mnemonic(mnemonic_identifier, start_time, end_time)\n", "\n", "print(mnemonic)\n", "for key, value in mnemonic.info.items():\n", " print('{:>20}: {}'.format(key, value))\n", - "# print(mnemonic.meta)\n", "\n", "pl.figure()\n", "pl.plot(mnemonic.data['MJD'], mnemonic.data['euvalue'], 'bo-')\n", @@ -62,20 +62,22 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Query list of mnemonics" + "### Query list of mnemonics" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "start_time = Time('2019-01-16 00:00:00.000', format='iso')\n", "end_time = Time('2019-01-16 00:01:00.000', format='iso')\n", "mnemonics = ['IMIR_HK_ICE_SEC_VOLT4', 'IMIR_HK_ICE_SEC_VOLT3']\n", "\n", - "mnemonic_dict = query_mnemonics(mnemonics, start_time, end_time)\n", + "mnemonic_dict = get_mnemonics(mnemonics, start_time, end_time)\n", "\n", "for m in mnemonics:\n", " print(mnemonic_dict[m])" @@ -85,27 +87,37 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### query mnemonic inventory" + "### Query mnemonic inventory" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true + "collapsed": false }, "outputs": [], "source": [ "inventory = mnemonic_inventory()[0]\n", "print('EDB contains {} mnemonics'.format(len(inventory)))" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [] } ], "metadata": { + "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python 3", + "display_name": "Python [conda env:fpy35]", "language": "python", - "name": "python3" + "name": "conda-env-fpy35-py" }, "language_info": { "codemirror_mode": { diff --git a/requirements.txt b/requirements.txt index 876fc3548..a8b804c75 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,21 +2,23 @@ asdf>=2.3.0 astropy>=3.0 astroquery==0.3.9 authlib==0.11 -bokeh==1.1.0 -django==2.2 -ipython==7.4.0 +bokeh==1.2.0 +django==2.2.2 +inflection==0.3.1 +ipython==7.5.0 jinja2==2.10.1 +jwedb>=0.0.3 jwst==0.0.0 -matplotlib==3.0.3 -numpy==1.16.2 -numpydoc==0.8.0 +matplotlib==3.1.0 +numpy==1.16.4 +numpydoc==0.9.1 pandas==0.24.2 psycopg2==2.8.2 pysiaf==0.2.5 python-dateutil==2.8.0 -pytest==4.4.1 -sphinx==2.0.1 -sphinx-automodapi==0.10 -sqlalchemy==1.3.3 +pytest==4.6.2 +sphinx==2.1.0 +sphinx-automodapi==0.11 +sqlalchemy==1.3.4 stsci_rtd_theme==0.0.2 -pytest-cov=2.6.0 \ No newline at end of file +pytest-cov=2.6.0 diff --git a/setup.py b/setup.py index 66275add8..3807b417e 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup from setuptools import find_packages -VERSION = '0.19.0' +VERSION = '0.20.0' AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, ' AUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin' @@ -14,6 +14,7 @@ 'bokeh>=1.0', 'django>=2.0', 'jinja2', + 'jwedb', 'jwst', 'matplotlib', 'numpy', diff --git a/style_guide/style_guide.md b/style_guide/README.md similarity index 100% rename from style_guide/style_guide.md rename to style_guide/README.md