diff --git a/docs/source/database.rst b/docs/source/database.rst index 28c96b7a3..0469952af 100644 --- a/docs/source/database.rst +++ b/docs/source/database.rst @@ -12,4 +12,4 @@ reset_database.py ----------------- .. automodule:: jwql.database.reset_database :members: - :undoc-members: \ No newline at end of file + :undoc-members: diff --git a/docs/source/website.rst b/docs/source/website.rst index 6f0da9b31..3825fa2ae 100644 --- a/docs/source/website.rst +++ b/docs/source/website.rst @@ -44,6 +44,12 @@ monitor_views.py :members: :undoc-members: +monitor_models +-------------- +.. automodule:: jwql.website.apps.jwql.monitor_models.common + :members: + :undoc-members: + settings.py ----------- .. automodule:: jwql.website.jwql_proj.settings @@ -60,4 +66,4 @@ views.py -------- .. automodule:: jwql.website.apps.jwql.views :members: - :undoc-members: \ No newline at end of file + :undoc-members: diff --git a/environment_python_3.10.yml b/environment_python_3.10.yml index 383213102..e322ea9ca 100644 --- a/environment_python_3.10.yml +++ b/environment_python_3.10.yml @@ -24,7 +24,7 @@ channels: dependencies: - astropy=5.3.4 - beautifulsoup4=4.12.2 - - bokeh=2.4.3 + - bokeh=3.3.0 - celery=5.3.4 - cryptography=41.0.4 - django=4.2.6 diff --git a/environment_python_3.9.yml b/environment_python_3.9.yml index 2ea0ec9d5..a68f005c5 100644 --- a/environment_python_3.9.yml +++ b/environment_python_3.9.yml @@ -24,7 +24,7 @@ channels: dependencies: - astropy=5.3.3 - beautifulsoup4=4.12.2 - - bokeh=2.4.3 + - bokeh=3.3.0 - celery=5.3.4 - cryptography=41.0.4 - django=4.2.5 diff --git a/jwql/bokeh_templating/factory.py b/jwql/bokeh_templating/factory.py index 867451e53..7c77bfa5d 100644 --- a/jwql/bokeh_templating/factory.py +++ b/jwql/bokeh_templating/factory.py @@ -36,8 +36,7 @@ # Figures get their own constructor so we remove references to Figures from # the keyword maps. -Figure = mappings.pop("Figure") -del sequences["figure"] +Figure = mappings.pop("figure") def mapping_factory(tool, element_type): diff --git a/jwql/database/database_interface.py b/jwql/database/database_interface.py index 73b536eed..d94532ef4 100644 --- a/jwql/database/database_interface.py +++ b/jwql/database/database_interface.py @@ -84,10 +84,9 @@ from jwql.utils.constants import ANOMALIES_PER_INSTRUMENT from jwql.utils.constants import FILE_SUFFIX_TYPES from jwql.utils.constants import JWST_INSTRUMENT_NAMES +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_config -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - # Monkey patch Query with data_frame method @property diff --git a/jwql/edb/engineering_database.py b/jwql/edb/engineering_database.py index fea43aafa..b4812d81b 100644 --- a/jwql/edb/engineering_database.py +++ b/jwql/edb/engineering_database.py @@ -58,20 +58,20 @@ from astroquery.mast import Mast from bokeh.embed import components from bokeh.layouts import column -from bokeh.models import BoxAnnotation, ColumnDataSource, DatetimeTickFormatter, HoverTool, Range1d +from bokeh.models import BoxAnnotation, ColumnDataSource, DatetimeTickFormatter, HoverTool +from bokeh.models import Range1d from bokeh.plotting import figure, output_file, show, save import numpy as np from jwst.lib.engdb_tools import ENGDB_Service from jwql.utils.constants import MIRI_POS_RATIO_VALUES +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.credentials import get_mast_base_url, get_mast_token from jwql.utils.utils import get_config MAST_EDB_MNEMONIC_SERVICE = 'Mast.JwstEdb.Mnemonics' MAST_EDB_DICTIONARY_SERVICE = 'Mast.JwstEdb.Dictionary' -# Temporary until JWST operations: switch to test string for MAST request URL -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') if not ON_GITHUB_ACTIONS: Mast._portal_api_connection.MAST_REQUEST_URL = get_config()['mast_request_url'] @@ -283,7 +283,7 @@ def __mul__(self, mnem): before = np.where(common_dates == self.data['dates'][block])[0] if len(before) > 0: - new_blocks.append(before[0]) # + 1) + new_blocks.append(before[0]) # + 1) except IndexError: # The final block value is usually equal to the length of the array, and will # therefore cause an Index Error in the lines above. Ignore that error here. @@ -421,7 +421,7 @@ def block_stats(self, sigma=3, ignore_vals=[], ignore_edges=False, every_change= # calculated, remove those every change values and block values from the EdbMnemonic # instance. if every_change: - if len(remove_change_indexes) > 0: + if len(remove_change_indexes) > 0: self.every_change_values = np.delete(self.every_change_values, remove_change_indexes) self.blocks = np.delete(self.blocks, remove_change_indexes) @@ -439,7 +439,7 @@ def block_stats(self, sigma=3, ignore_vals=[], ignore_edges=False, every_change= stdevs.append(stdevval) maxs.append(meanval) mins.append(meanval) - #if hasattr(self, 'every_change_values'): + # if hasattr(self, 'every_change_values'): # updated_every_change_vals.append(self.every_change_values[i + 1]) self.mean = means self.median = medians @@ -510,9 +510,9 @@ def block_stats_filter_positions(self, sigma=5): self.data["euvalues"].data[index:self.blocks[i + 1]], sigma=sigma) if np.isfinite(meanval): - #this is preventing the nans above from being added. not sure what to do here. - #bokeh cannot deal with nans. but we need entries in order to have the blocks indexes - #remain correct. but maybe we dont care about the block indexes after averaging + # this is preventing the nans above from being added. not sure what to do here. + # bokeh cannot deal with nans. but we need entries in order to have the blocks indexes + # remain correct. but maybe we dont care about the block indexes after averaging medtimes.append(calc_median_time(self.data["dates"].data[index:self.blocks[i + 1]][good])) means.append(meanval) medians.append(medianval) @@ -523,7 +523,7 @@ def block_stats_filter_positions(self, sigma=5): # If there were blocks composed entirely of bad data, meaning no mean values were # calculated, remove those every change values and block values from the EdbMnemonic # instance. - if len(remove_change_indexes) > 0: + if len(remove_change_indexes) > 0: self.every_change_values = np.delete(self.every_change_values, remove_change_indexes) self.blocks = np.delete(self.blocks, remove_change_indexes) @@ -690,37 +690,33 @@ def bokeh_plot(self, show_plot=False, savefig=False, out_dir='./', nominal_value if plot_mean: source_mean = ColumnDataSource(data={'mean_x': self.median_times, 'mean_y': self.mean}) mean_data = fig.scatter(x='mean_x', y='mean_y', line_width=1, line_color='orange', alpha=0.75, source=source_mean) - mean_hover_tool = HoverTool(tooltips=[('Mean', '@mean_y'), - ('Date', '@mean_x{%d %b %Y %H:%M:%S}') - ], mode='mouse', renderers=[mean_data]) + mean_hover_tool = HoverTool(tooltips=[('Mean', '@mean_y'), ('Date', '@mean_x{%d %b %Y %H:%M:%S}')], + mode='mouse', renderers=[mean_data]) mean_hover_tool.formatters = {'@mean_x': 'datetime'} fig.tools.append(mean_hover_tool) if plot_median: source_median = ColumnDataSource(data={'median_x': self.median_times, 'median_y': self.median}) median_data = fig.scatter(x='median_x', y='median_y', line_width=1, line_color='orangered', alpha=0.75, source=source_median) - median_hover_tool = HoverTool(tooltips=[('Median', '@median_y'), - ('Date', '@median_x{%d %b %Y %H:%M:%S}') - ], mode='mouse', renderers=[median_data]) + median_hover_tool = HoverTool(tooltips=[('Median', '@median_y'), ('Date', '@median_x{%d %b %Y %H:%M:%S}')], + mode='mouse', renderers=[median_data]) median_hover_tool.formatters = {'@median_x': 'datetime'} fig.tools.append(median_hover_tool) - # If the max and min arrays are to be plotted, create columndata sources for them as well + # If the max and min arrays are to be plotted, create columndata sources for them as well if plot_max: source_max = ColumnDataSource(data={'max_x': self.median_times, 'max_y': self.max}) max_data = fig.scatter(x='max_x', y='max_y', line_width=1, color='black', line_color='black', source=source_max) - max_hover_tool = HoverTool(tooltips=[('Max', '@max_y'), - ('Date', '@max_x{%d %b %Y %H:%M:%S}') - ], mode='mouse', renderers=[max_data]) + max_hover_tool = HoverTool(tooltips=[('Max', '@max_y'), ('Date', '@max_x{%d %b %Y %H:%M:%S}')], + mode='mouse', renderers=[max_data]) max_hover_tool.formatters = {'@max_x': 'datetime'} fig.tools.append(max_hover_tool) if plot_min: source_min = ColumnDataSource(data={'min_x': self.median_times, 'min_y': self.min}) min_data = fig.scatter(x='min_x', y='min_y', line_width=1, color='black', line_color='black', source=source_min) - minn_hover_tool = HoverTool(tooltips=[('Min', '@min_y'), - ('Date', '@min_x{%d %b %Y %H:%M:%S}') - ], mode='mouse', renderers=[min_data]) + minn_hover_tool = HoverTool(tooltips=[('Min', '@min_y'), ('Date', '@min_x{%d %b %Y %H:%M:%S}')], + mode='mouse', renderers=[min_data]) min_hover_tool.formatters = {'@min_x': 'datetime'} fig.tools.append(min_hover_tool) @@ -740,12 +736,12 @@ def bokeh_plot(self, show_plot=False, savefig=False, out_dir='./', nominal_value fig = add_limit_boxes(fig, yellow=yellow_limits, red=red_limits) # Make the x axis tick labels look nice - fig.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + fig.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) fig.xaxis.major_label_orientation = np.pi / 4 @@ -1206,12 +1202,12 @@ def plot_data_plus_devs(self, use_median=False, show_plot=False, savefig=False, fig_dev.line(data_dates, dev, color='red') # Make the x axis tick labels look nice - fig_dev.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + fig_dev.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) fig.xaxis.major_label_orientation = np.pi / 4 diff --git a/jwql/example_config.json b/jwql/example_config.json index b6dfe2806..95bc8c8cf 100644 --- a/jwql/example_config.json +++ b/jwql/example_config.json @@ -2,7 +2,7 @@ "admin_account" : "", "auth_mast" : "", "connection_string" : "", - "database" : { + "databases" : { "engine" : "", "name" : "", "user" : "", @@ -10,13 +10,23 @@ "host" : "", "port" : "" }, - "django_database" : { - "ENGINE" : "", - "NAME" : "", - "USER" : "", - "PASSWORD" : "", - "HOST" : "", - "PORT" : "" + "django_databases" : { + "default": { + "ENGINE" : "", + "NAME" : "", + "USER" : "", + "PASSWORD" : "", + "HOST" : "", + "PORT" : "" + }, + "monitors": { + "ENGINE" : "", + "NAME" : "", + "USER" : "", + "PASSWORD" : "", + "HOST" : "", + "PORT" : "" + } }, "jwql_dir" : "", "jwql_version": "", diff --git a/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py b/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py index c0366df14..d3ae2e795 100755 --- a/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py +++ b/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py @@ -105,19 +105,13 @@ from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline from jwql.utils import crds_tools, instrument_properties, monitor_utils from jwql.utils.constants import DARKS_BAD_PIXEL_TYPES, DARK_EXP_TYPES, FLATS_BAD_PIXEL_TYPES, FLAT_EXP_TYPES -from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE, ON_GITHUB_ACTIONS +from jwql.utils.constants import ON_READTHEDOCS from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.mast_utils import mast_query from jwql.utils.permissions import set_permissions from jwql.utils.utils import copy_files, create_png_from_fits, ensure_dir_exists, get_config, filesystem_path -# Determine if the code is being run by Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - -# Determine if the code is being run as part of a Readthedocs build -ON_READTHEDOCS = False -if 'READTHEDOCS' in os.environ: # pragma: no cover - ON_READTHEDOCS = os.environ['READTHEDOCS'] if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: from jwql.website.apps.jwql.monitor_pages.monitor_bad_pixel_bokeh import BadPixelPlots diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py index 2154ee00a..f07e48875 100755 --- a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py @@ -381,7 +381,7 @@ from bokeh.embed import components, json_item from bokeh.layouts import gridplot from bokeh.models import BoxAnnotation, ColumnDataSource, DatetimeTickFormatter, HoverTool, Range1d -from bokeh.models.widgets import Tabs, Panel +from bokeh.models.layouts import Tabs from bokeh.plotting import figure, output_file, save, show from bokeh.palettes import Turbo256 from jwql.database import database_interface @@ -991,14 +991,14 @@ def get_dependency_data(self, dependency, starttime, endtime): if dependency["name"] in self.query_results: # We need the full time to be covered - if ((self.query_results[dependency["name"]].requested_start_time <= starttime) and - (self.query_results[dependency["name"]].requested_end_time >= endtime)): + if ((self.query_results[dependency["name"]].requested_start_time <= starttime) + and (self.query_results[dependency["name"]].requested_end_time >= endtime)): logging.info(f'Dependency {dependency["name"]} is already present in self.query_results.') # Extract data for the requested time range - matching_times = np.where((self.query_results[dependency["name"]].data["dates"] >= starttime) & - (self.query_results[dependency["name"]].data["dates"] <= endtime)) + matching_times = np.where((self.query_results[dependency["name"]].data["dates"] >= starttime) + & (self.query_results[dependency["name"]].data["dates"] <= endtime)) dep_mnemonic = {"dates": self.query_results[dependency["name"]].data["dates"][matching_times], "euvalues": self.query_results[dependency["name"]].data["euvalues"][matching_times]} @@ -1138,16 +1138,16 @@ def get_history_every_change(self, mnemonic, start_date, end_date): devs = [] # Keep only data that fall at least partially within the plot range - if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) - | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): + if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) + | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): times.extend(row.time) values.extend(row.mnemonic_value) medians.append(row.median) devs.append(row.stdev) hist[row.dependency_value] = (times, values, medians, devs) else: - if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) - | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): + if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) + | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): hist[row.dependency_value] = (row.time, row.mnemonic_value, row.median, row.stdev) return hist @@ -2143,12 +2143,12 @@ def plot_every_change_data(data, mnem_name, units, show_plot=False, savefig=True fig = add_limit_boxes(fig, yellow=yellow_limits, red=red_limits) # Make the x axis tick labels look nice - fig.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + fig.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) fig.xaxis.major_label_orientation = np.pi / 4 diff --git a/jwql/instrument_monitors/common_monitors/readnoise_monitor.py b/jwql/instrument_monitors/common_monitors/readnoise_monitor.py index 10fc82237..36543cc6e 100755 --- a/jwql/instrument_monitors/common_monitors/readnoise_monitor.py +++ b/jwql/instrument_monitors/common_monitors/readnoise_monitor.py @@ -44,25 +44,29 @@ import crds import matplotlib matplotlib.use('Agg') -import matplotlib.pyplot as plt # noqa: E348 (comparison to true) -import numpy as np # noqa: E348 (comparison to true) -from pysiaf import Siaf # noqa: E348 (comparison to true) -from sqlalchemy.sql.expression import and_ # noqa: E348 (comparison to true) - -from jwql.database.database_interface import FGSReadnoiseQueryHistory, FGSReadnoiseStats # noqa: E348 (comparison to true) -from jwql.database.database_interface import MIRIReadnoiseQueryHistory, MIRIReadnoiseStats # noqa: E348 (comparison to true) -from jwql.database.database_interface import NIRCamReadnoiseQueryHistory, NIRCamReadnoiseStats # noqa: E348 (comparison to true) -from jwql.database.database_interface import NIRISSReadnoiseQueryHistory, NIRISSReadnoiseStats # noqa: E348 (comparison to true) -from jwql.database.database_interface import NIRSpecReadnoiseQueryHistory, NIRSpecReadnoiseStats # noqa: E348 (comparison to true) -from jwql.database.database_interface import session, engine # noqa: E348 (comparison to true) -from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline # noqa: E348 (comparison to true) -from jwql.instrument_monitors import pipeline_tools # noqa: E348 (comparison to true) -from jwql.utils import instrument_properties, monitor_utils # noqa: E348 (comparison to true) -from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE # noqa: E348 (comparison to true) -from jwql.utils.logging_functions import log_info, log_fail # noqa: E348 (comparison to true) -from jwql.utils.monitor_utils import update_monitor_table # noqa: E348 (comparison to true) -from jwql.utils.permissions import set_permissions # noqa: E348 (comparison to true) -from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config, copy_files # noqa: E348 (comparison to true) +import matplotlib.pyplot as plt # noqa: E402 (module level import not at top of file) +import numpy as np # noqa: E402 (module level import not at top of file) +from pysiaf import Siaf # noqa: E402 (module level import not at top of file) + +from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline # noqa: E402 (module level import not at top of file) +from jwql.instrument_monitors import pipeline_tools # noqa: E402 (module level import not at top of file) +from jwql.utils import instrument_properties, monitor_utils # noqa: E402 (module level import not at top of file) +from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE # noqa: E402 (module level import not at top of file) +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS # noqa: E402 (module level import not at top of file) +from jwql.utils.logging_functions import log_info, log_fail # noqa: E402 (module level import not at top of file) +from jwql.utils.monitor_utils import update_monitor_table # noqa: E402 (module level import not at top of file) +from jwql.utils.permissions import set_permissions # noqa: E402 (module level import not at top of file) +from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config, copy_files # noqa: E402 (module level import not at top of file) + +if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: + # Need to set up django apps before we can access the models + import django # noqa: E402 (module level import not at top of file) + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") + django.setup() + + # Import * is okay here because this module specifically only contains database models + # for this monitor + from jwql.website.apps.jwql.monitor_models.readnoise import * # noqa: E402 (module level import not at top of file) class Readnoise(): @@ -149,17 +153,8 @@ def file_exists_in_database(self, filename): file_exists : bool ``True`` if filename exists in the readnoise stats database. """ - - query = session.query(self.stats_table) - results = query.filter(self.stats_table.uncal_filename == filename).all() - - if len(results) != 0: - file_exists = True - else: - file_exists = False - - session.close() - return file_exists + results = self.stats_table.objects.filter(uncal_filename__iexact=filename).values() + return (len(results) != 0) def get_amp_stats(self, image, amps): """Calculates the sigma-clipped mean and stddev, as well as the @@ -385,17 +380,18 @@ def most_recent_search(self): Date (in MJD) of the ending range of the previous MAST query where the readnoise monitor was run. """ - - query = session.query(self.query_table).filter(and_(self.query_table.aperture == self.aperture, - self.query_table.run_monitor == True)).order_by(self.query_table.end_time_mjd).all() # noqa: E712 (comparison to True) + filter_kwargs = { + 'aperture__iexact': self.aperture, + 'run_monitor__exact': True + } + query = self.query_table.objects.filter(**filter_kwargs).order_by("-end_time_mjd").all() if len(query) == 0: query_result = 59607.0 # a.k.a. Jan 28, 2022 == First JWST images (MIRI) logging.info(('\tNo query history for {}. Beginning search date will be set to {}.'.format(self.aperture, query_result))) else: - query_result = query[-1].end_time_mjd + query_result = query[0].end_time_mjd - session.close() return query_result def process(self, file_list): @@ -512,24 +508,24 @@ def process(self, file_list): 'readnoise_filename': os.path.basename(readnoise_outfile), 'full_image_mean': float(full_image_mean), 'full_image_stddev': float(full_image_stddev), - 'full_image_n': full_image_n.astype(float), - 'full_image_bin_centers': full_image_bin_centers.astype(float), + 'full_image_n': list(full_image_n.astype(float)), + 'full_image_bin_centers': list(full_image_bin_centers.astype(float)), 'readnoise_diff_image': os.path.basename(readnoise_diff_png), 'diff_image_mean': float(diff_image_mean), 'diff_image_stddev': float(diff_image_stddev), - 'diff_image_n': diff_image_n.astype(float), - 'diff_image_bin_centers': diff_image_bin_centers.astype(float), + 'diff_image_n': list(diff_image_n.astype(float)), + 'diff_image_bin_centers': list(diff_image_bin_centers.astype(float)), 'entry_date': datetime.datetime.now() } for key in amp_stats.keys(): if isinstance(amp_stats[key], (int, float)): readnoise_db_entry[key] = float(amp_stats[key]) else: - readnoise_db_entry[key] = amp_stats[key].astype(float) + readnoise_db_entry[key] = list(amp_stats[key].astype(float)) # Add this new entry to the readnoise database table - with engine.begin() as connection: - connection.execute(self.stats_table.__table__.insert(), readnoise_db_entry) + entry = self.stats_table(**readnoise_db_entry) + entry.save() logging.info('\tNew entry added to readnoise database table') # Remove the raw and calibrated files to save memory space @@ -658,8 +654,8 @@ def run(self): 'files_found': len(new_files), 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - with engine.begin() as connection: - connection.execute(self.query_table.__table__.insert(), new_entry) + stats_entry = self.query_table(**new_entry) + stats_entry.save() logging.info('\tUpdated the query history table') logging.info('Readnoise Monitor completed successfully.') diff --git a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py index 565f5b7d5..b5388309d 100644 --- a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py +++ b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py @@ -40,13 +40,22 @@ from photutils.segmentation import detect_sources, detect_threshold from scipy.ndimage import binary_dilation -from jwql.database.database_interface import session, engine -from jwql.database.database_interface import NIRCamClawQueryHistory, NIRCamClawStats from jwql.utils import monitor_utils +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config from jwst_backgrounds import jbt +if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: + # Need to set up django apps before we can access the models + import django # noqa: E402 (module level import not at top of file) + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") + django.setup() + + # Import * is okay here because this module specifically only contains database models + # for this monitor + from jwql.website.apps.jwql.monitor_models.claw import * # noqa: E402 (module level import not at top of file) + matplotlib.use('Agg') warnings.filterwarnings('ignore', message="nan_treatment='interpolate', however, NaN values detected post convolution*") warnings.filterwarnings('ignore', message='Input data contains invalid values (NaNs or infs)*') @@ -114,8 +123,8 @@ def __init__(self): ensure_dir_exists(self.output_dir_bkg) # Get the claw monitor database tables - self.query_table = eval('NIRCamClawQueryHistory') - self.stats_table = eval('NIRCamClawStats') + self.query_table = NIRCamClawQueryHistory + self.stats_table = NIRCamClawStats def make_background_plots(self, plot_type='bkg'): """Makes plots of the background levels over time in NIRCam data. @@ -128,13 +137,14 @@ def make_background_plots(self, plot_type='bkg'): measured vs model trending. """ + columns = ['filename', 'filter', 'pupil', 'detector', 'effexptm', 'expstart_mjd', 'entry_date', 'mean', 'median', + 'stddev', 'frac_masked'] # , 'total_bkg'] + # Get all of the background data. - query = session.query(NIRCamClawStats.filename, NIRCamClawStats.filter, NIRCamClawStats.pupil, NIRCamClawStats.detector, - NIRCamClawStats.effexptm, NIRCamClawStats.expstart_mjd, NIRCamClawStats.entry_date, NIRCamClawStats.mean, - NIRCamClawStats.median, NIRCamClawStats.stddev, NIRCamClawStats.frac_masked, NIRCamClawStats.total_bkg).all() - df_orig = pd.DataFrame(query, columns=['filename', 'filter', 'pupil', 'detector', 'effexptm', 'expstart_mjd', - 'entry_date', 'mean', 'median', 'stddev', 'frac_masked', 'total_bkg']) - df_orig = df_orig.drop_duplicates(subset='filename', keep="last") # remove any duplicate filename entries, keep the most recent + background_data = NIRCamClawStats.objects.all().values(*columns) + df_orig = pd.DataFrame.from_records(background_data) + # remove any duplicate filename entries, keep the most recent + df_orig = df_orig.drop_duplicates(subset='filename', keep="last") # Get label info based on plot type if plot_type == 'bkg': @@ -152,7 +162,8 @@ def make_background_plots(self, plot_type='bkg'): logging.info('Working on {} trending plots for {}'.format(plot_title, fltr)) found_limits = False if int(fltr[1:4]) < 250: # i.e. SW - detectors_to_run = ['NRCA2', 'NRCA4', 'NRCB3', 'NRCB1', 'NRCA1', 'NRCA3', 'NRCB4', 'NRCB2'] # in on-sky order, don't change order + # in on-sky order, don't change order + detectors_to_run = ['NRCA2', 'NRCA4', 'NRCB3', 'NRCB1', 'NRCA1', 'NRCA3', 'NRCB4', 'NRCB2'] grid = plt.GridSpec(2, 4, hspace=.4, wspace=.4, width_ratios=[1, 1, 1, 1]) fig = plt.figure(figsize=(45, 20)) fig.suptitle(fltr, fontsize=70) @@ -168,9 +179,9 @@ def make_background_plots(self, plot_type='bkg'): # Get relevant data for this filter/detector and remove bad datasets, e.g. crowded fields, # extended objects, nebulas, short exposures. - df = df_orig[(df_orig['filter'] == fltr) & (df_orig['pupil'] == 'CLEAR') & (df_orig['detector'] == det) & - (df_orig['effexptm'] > 300) & (df_orig['frac_masked'] < frack_masked_thresh) & - (abs(1 - (df_orig['mean'] / df_orig['median'])) < 0.05)] + df = df_orig[(df_orig['filter'] == fltr) & (df_orig['pupil'] == 'CLEAR') & (df_orig['detector'] == det) + & (df_orig['effexptm'] > 300) & (df_orig['frac_masked'] < frack_masked_thresh) + & (abs(1 - (df_orig['mean'] / df_orig['median'])) < 0.05)] if len(df) > 0: df = df.sort_values(by=['expstart_mjd']) @@ -181,7 +192,8 @@ def make_background_plots(self, plot_type='bkg'): df = df[df['stddev'] != 0] # older data has no accurate stddev measures plot_data = df['stddev'].values if plot_type == 'model': - plot_data = df['median'].values / df['total_bkg'].values + total_bkg = [1. for x in df['median'].values] + plot_data = df['median'].values # / df['total_bkg'].values plot_expstarts = df['expstart_mjd'].values # Plot the background data over time @@ -297,7 +309,7 @@ def process(self): write_bathtub=True, bathtub_file='background_versus_day.txt') bkg_table = Table.read('background_versus_day.txt', names=('day', 'total_bkg'), format='ascii') total_bkg = bkg_table['total_bkg'][bkg_table['day'] == doy][0] - except: + except Exception as e: total_bkg = np.nan # Add this file's stats to the claw database table. Can't insert values with numpy.float32 @@ -319,13 +331,12 @@ def process(self): 'stddev': float(stddev), 'frac_masked': len(segmap_orig[(segmap_orig != 0) | (dq & 1 != 0)]) / (segmap_orig.shape[0] * segmap_orig.shape[1]), 'skyflat_filename': os.path.basename(self.outfile), - 'doy': float(doy), - 'total_bkg': float(total_bkg), + # 'doy': float(doy), + # 'total_bkg': float(total_bkg), 'entry_date': datetime.datetime.now() } - - with engine.begin() as connection: - connection.execute(self.stats_table.__table__.insert(), claw_db_entry) + entry = self.stats_table(**claw_db_entry) + entry.save() hdu.close() # Make the normalized skyflat for this detector @@ -436,7 +447,7 @@ def run(self): for row in mast_table_combo: try: existing_files.append(filesystem_path(row['filename'])) - except: + except Exception as e: pass self.files = np.array(existing_files) self.detectors = np.array(mast_table_combo['detector']) @@ -460,8 +471,8 @@ def run(self): 'end_time_mjd': self.query_end_mjd, 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - with engine.begin() as connection: - connection.execute(self.query_table.__table__.insert(), new_entry) + entry = self.query_table(**new_entry) + entry.save() logging.info('Claw Monitor completed successfully.') diff --git a/jwql/pull_jwql_branch.sh b/jwql/pull_jwql_branch.sh new file mode 100644 index 000000000..95a1c94b4 --- /dev/null +++ b/jwql/pull_jwql_branch.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +function echo_format { + echo "" + echo "Usage: $0 [-r|--reset_service] [-n|--notify ]" + echo "" + echo "WARNING! the optional parameters should only be used during a JWQL release in production" + echo "branch: the git branch to pull from" + echo "[-r|--reset_service]: Reset the jwql service" + echo "[-n|--notify ]: Notify via provided email" + echo "" + echo "Local:" + echo "$ bash pull_jwql_branch.sh develop" + echo "" + echo "Test:" + echo "$ bash pull_jwql_branch.sh v1.2 -r" + echo "" + echo "Production:" + echo "$ bash pull_jwql_branch.sh v1.2 -r -n group_email_address@stsci.edu" +} + +# Check if the required number of arguments are provided +if [ "$#" -lt 1 ]; then + echo_format + exit 1 +fi + +# Set default values for optional flags +reset=false +notify=false +recipient="" + +# Retrieve the branch_name from the command line argument +branch_name=$1 +# Parse optional flags +while [[ $# -gt 1 ]]; do + case "$2" in + -r|--reset_service) + reset=true + ;; + -n|--notify) + notify=true + recipient="$3" + shift + ;; + *) + echo "Error: Invalid option $2" + echo_format + exit 1 + ;; + esac + shift +done + +if [ "$notify" = true ] && [ -z "$recipient" ]; then + echo_format + exit 1 +fi + +echo "Branch: $branch_name"; +echo "Reset: $reset"; +echo "Notify: $notify $recipient"; + +# 1. Pull updated code from GitHub deployment branch (keep second checkout in case its already defined for some weird reason) +git checkout -b $branch_name --track origin/$branch_name +git checkout $branch_name +git fetch origin $branch_name +git pull origin $branch_name +git fetch origin --tags + +# 2. Bring the service down +if [ "$reset" = true ]; then + sudo /bin/systemctl stop jwql.service +fi + +# 3. Install jwql +pip install -e .. + +# 4. Merge Any Migrations +python ./website/manage.py migrate + +# 5. Bring the service back up +if [ "$reset" = true ]; then + sudo /bin/systemctl start jwql.service +fi + +# 6. Initialize any new databases that have been added +python ./database/database_interface.py + +# 7. Send out notification email +if [ "$notify" = true ] && [ -n "$recipient" ]; then + subject="JWQL $branch_name Released" + message_content="Hello, A new version of JWQL ($branch_name) has just been released. Visit https://github.com/spacetelescope/jwql/releases for more information." + echo "$message_content" | mail -s "$subject" "$recipient" + echo "Notification Email Sent" +fi \ No newline at end of file diff --git a/jwql/tests/test_api_views.py b/jwql/tests/test_api_views.py index e3a2d1ca1..7b0994536 100644 --- a/jwql/tests/test_api_views.py +++ b/jwql/tests/test_api_views.py @@ -29,9 +29,8 @@ from jwql.utils.utils import get_base_url from jwql.utils.constants import JWST_INSTRUMENT_NAMES +from jwql.utils.constants import ON_GITHUB_ACTIONS -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') urls = [] diff --git a/jwql/tests/test_bad_pixel_monitor.py b/jwql/tests/test_bad_pixel_monitor.py index 6446570b6..b3cae5fc3 100644 --- a/jwql/tests/test_bad_pixel_monitor.py +++ b/jwql/tests/test_bad_pixel_monitor.py @@ -32,9 +32,7 @@ from jwql.database.database_interface import FGSBadPixelQueryHistory, FGSBadPixelStats from jwql.instrument_monitors.common_monitors import bad_pixel_monitor from jwql.tests.resources import has_test_db - -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS def test_bad_map_to_list(): @@ -163,6 +161,7 @@ def test_get_possible_apertures(instrument, expected_list): assert ap_list == expected_list +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_identify_tables(): """Be sure the correct database tables are identified """ diff --git a/jwql/tests/test_bias_monitor.py b/jwql/tests/test_bias_monitor.py index 8becc2558..e5335ea34 100644 --- a/jwql/tests/test_bias_monitor.py +++ b/jwql/tests/test_bias_monitor.py @@ -28,8 +28,7 @@ from jwql.instrument_monitors.common_monitors import bias_monitor from jwql.tests.resources import has_test_db from jwql.utils.utils import get_config - -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS def test_collapse_image(): @@ -65,7 +64,7 @@ def test_extract_zeroth_group(): # Extract the zeroth group using the bias monitor # nosec comment added to ignore bandit security check output_filename = monitor.extract_zeroth_group(filename) - os.chmod(output_filename, 508) # nosec + os.chmod(output_filename, 508) # nosec data = fits.getdata(output_filename, 'SCI')[0, 0, :, :] # Remove the copied test file and its zeroth group file so this test can be properly repeated @@ -96,6 +95,7 @@ def test_get_amp_medians(): assert amp_medians == amp_medians_truth +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_identify_tables(): """Be sure the correct database tables are identified""" diff --git a/jwql/tests/test_cosmic_ray_monitor.py b/jwql/tests/test_cosmic_ray_monitor.py index c9d590a3b..dfe27e4e2 100644 --- a/jwql/tests/test_cosmic_ray_monitor.py +++ b/jwql/tests/test_cosmic_ray_monitor.py @@ -26,8 +26,7 @@ from jwql.instrument_monitors.common_monitors.cosmic_ray_monitor import CosmicRay from jwql.database.database_interface import MIRICosmicRayQueryHistory from jwql.utils.utils import get_config - -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS def define_test_data(nints): diff --git a/jwql/tests/test_dark_monitor.py b/jwql/tests/test_dark_monitor.py index ef206daba..ab869cc06 100644 --- a/jwql/tests/test_dark_monitor.py +++ b/jwql/tests/test_dark_monitor.py @@ -30,8 +30,7 @@ from jwql.utils.monitor_utils import mast_query_darks from jwql.utils.constants import DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME from jwql.utils.utils import get_config - -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS def generate_data_for_file_splitting_test(): diff --git a/jwql/tests/test_data_containers.py b/jwql/tests/test_data_containers.py index 6cb278c5e..7c4f68401 100644 --- a/jwql/tests/test_data_containers.py +++ b/jwql/tests/test_data_containers.py @@ -31,20 +31,21 @@ import pandas as pd import pytest +from jwql.utils.constants import ON_GITHUB_ACTIONS + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") # Skip testing this module if on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') -from jwql.website.apps.jwql import data_containers -from jwql.tests.resources import ( - MockSessionFileAnomaly, MockSessionGroupAnomaly, - MockGetRequest, MockPostRequest) -from jwql.utils import constants +from jwql.website.apps.jwql import data_containers # noqa: E402 (module level import not at top of file) +from jwql.tests.resources import MockSessionFileAnomaly, MockSessionGroupAnomaly # noqa: E402 (module level import not at top of file) +from jwql.tests.resources import MockGetRequest, MockPostRequest # noqa: E402 (module level import not at top of file) +from jwql.utils import constants # noqa: E402 (module level import not at top of file) if not ON_GITHUB_ACTIONS: - from jwql.utils.utils import get_config - + from jwql.utils.utils import get_config # noqa: E402 (module level import not at top of file) + from jwql.website.apps.jwql.models import RootFileInfo + @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') def test_build_table(): tab = data_containers.build_table('filesystem_general') @@ -127,6 +128,58 @@ def test_get_acknowledgements(): assert len(acknowledgements) > 0 +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') +def test_get_additional_exposure_info(): + """Tests ``get_additional_exposure_info`` function.""" + # Test an exposure-level case + group_root = 'jw01068002001_02102_00008' + image_info = data_containers.get_image_info(group_root) + root_file_info = RootFileInfo.objects.filter(root_name__startswith=group_root) + basic, additional = data_containers.get_additional_exposure_info(root_file_info, image_info) + expected_basic = {'exp_type': 'NRC_IMAGE', + 'category': 'COM', + 'visit_status': 'SUCCESSFUL', + 'subarray': 'SUB320', + 'pupil': 'CLEAR'} + # We can only test a subset of the keys in additional, since things like the pipeline version, + # crds context, etc can change over time. + expected_additional = {'READPATT': 'RAPID', + 'TITLE': 'NIRCam Subarray-Mode Commissioning, CAR NIRCam-019', + 'NGROUPS': 10, + 'PI_NAME': 'Hilbert, Bryan', + 'NINTS': 10, + 'TARGNAME': 'GP2-JMAG14-STAR-OFFSET', + 'EXPTIME': 106.904, + 'EXPSTART': 59714.6163261875} + for key in expected_basic: + assert basic[key] == expected_basic[key] + for key in expected_additional: + assert additional[key] == expected_additional[key] + + # Test an image-level case + file_root = 'jw01022016001_03101_00001_nrs1' + image_info = data_containers.get_image_info(file_root) + root_file_info = RootFileInfo.objects.get(root_name=file_root) + basic, additional = data_containers.get_additional_exposure_info(root_file_info, image_info) + expected_basic = {'exp_type': 'NRS_IFU', + 'category': 'COM', + 'visit_status': 'SUCCESSFUL', + 'subarray': 'FULL', + 'filter': 'F100LP', + 'grating': 'G140H'} + expected_additional = {'READPATT': 'NRSRAPID', + 'TITLE': 'CAR FGS-017 Straylight for Moving Targets (All SIs)', + 'NGROUPS': 13, + 'PI_NAME': 'Stansberry, John A.', + 'NINTS': 2, + 'TARGNAME': 'JUPITER', + 'EXPTIME': 279.156, + 'EXPSTART': 59764.77659749352} + assert basic == expected_basic + for key in expected_additional: + assert additional[key] == expected_additional[key] + + @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_get_all_proposals(): """Tests the ``get_all_proposals`` function.""" @@ -142,17 +195,11 @@ def test_get_all_proposals(): (['uncal', 'rate', 'bad'], {'bad'})), (False, ['rate', 'uncal', 'bad'], ['uncal', 'rate', 'bad']), - (True, - ['rate', 'uncal', 'bad', - 'o006_crfints', 'o001_crf'], - (['uncal', 'rate', 'o001_crf', - 'o006_crfints', 'bad'], {'bad'})), - (False, - ['rate', 'uncal', 'bad', - 'o006_crfints', 'o001_crf'], - ['uncal', 'rate', 'o001_crf', - 'o006_crfints', 'bad']), - ]) + (True, ['rate', 'uncal', 'bad', 'o006_crfints', 'o001_crf'], + (['uncal', 'rate', 'o001_crf', 'o006_crfints', 'bad'], {'bad'})), + (False, ['rate', 'uncal', 'bad', 'o006_crfints', 'o001_crf'], + ['uncal', 'rate', 'o001_crf', 'o006_crfints', 'bad'])]) + def test_get_available_suffixes(untracked, input_suffixes, expected): result = data_containers.get_available_suffixes( input_suffixes, return_untracked=untracked) @@ -292,6 +339,7 @@ def test_get_anomaly_form_post_group(mocker): assert update_mock.call_count == 2 """ +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') def test_get_dashboard_components(): request = MockPostRequest() diff --git a/jwql/tests/test_database_interface.py b/jwql/tests/test_database_interface.py index 08890a4f1..9719d9ab4 100755 --- a/jwql/tests/test_database_interface.py +++ b/jwql/tests/test_database_interface.py @@ -29,9 +29,7 @@ from jwql.database import database_interface as di from jwql.tests.resources import has_test_db from jwql.utils.utils import get_config - -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to development database server.') diff --git a/jwql/tests/test_edb.py b/jwql/tests/test_edb.py index a7da24be5..bdbb4b179 100644 --- a/jwql/tests/test_edb.py +++ b/jwql/tests/test_edb.py @@ -28,9 +28,7 @@ import pytest from jwql.edb import engineering_database as ed - -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS def test_add(): @@ -243,8 +241,8 @@ def test_multiplication(): info['description'] = 'Voltage at some place' mnemonic1 = ed.EdbMnemonic('TEST_VOLTAGE', Time('2021-12-18T07:20:00'), Time('2021-12-18T07:30:00'), tab, {}, info, blocks=blocks1) mnemonic1.meta = {'Count': 1, - 'TlmMnemonics': [{'TlmMnemonic': 'TEST_VOLTAGE', - 'AllPoints': 1}]} + 'TlmMnemonics': [{'TlmMnemonic': 'TEST_VOLTAGE', + 'AllPoints': 1}]} dates2 = np.array([datetime(2021, 12, 18, 7, n, 10) for n in range(20, 30)]) data2 = np.array([15, 15, 15, 19, 19, 19, 19, 19, 12, 12]) @@ -258,8 +256,8 @@ def test_multiplication(): info['description'] = 'Current at some place' mnemonic2 = ed.EdbMnemonic('TEST_CURRENT', Time('2021-12-18T07:20:10'), Time('2021-12-18T07:30:10'), tab, {}, info, blocks=blocks2) mnemonic2.meta = {'Count': 1, - 'TlmMnemonics': [{'TlmMnemonic': 'TEST_CURRENT', - 'AllPoints': 1}]} + 'TlmMnemonics': [{'TlmMnemonic': 'TEST_CURRENT', + 'AllPoints': 1}]} prod = mnemonic1 * mnemonic2 assert np.allclose(prod.data["euvalues"].data, diff --git a/jwql/tests/test_edb_telemetry_monitor.py b/jwql/tests/test_edb_telemetry_monitor.py index f7a91e5d3..8ddd21f6e 100644 --- a/jwql/tests/test_edb_telemetry_monitor.py +++ b/jwql/tests/test_edb_telemetry_monitor.py @@ -38,9 +38,6 @@ from jwql.tests.resources import has_test_db from jwql.utils.constants import MIRI_POS_RATIO_VALUES -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - def test_add_every_change_history(): """Test that every_change data is correctly combined with an existing @@ -107,13 +104,13 @@ def test_conditions(): """Test the extraction of data using the ```equal``` class. """ # Create data for mnemonic of interest - #start_time = Time('2022-02-02') - #end_time = Time('2022-02-03') + # start_time = Time('2022-02-02') + # end_time = Time('2022-02-03') start_time = datetime.datetime(2022, 2, 2) end_time = datetime.datetime(2022, 2, 3) temp_data = Table() temp_data["euvalues"] = np.array([35., 35.1, 35.2, 36., 36.1, 36.2, 37.1, 37., 36., 36.]) - #temp_data["dates"] = np.array([Time('2022-02-02') + TimeDelta(0.1 * i, format='jd') for i in range(10)]) + # temp_data["dates"] = np.array([Time('2022-02-02') + TimeDelta(0.1 * i, format='jd') for i in range(10)]) temp_data["dates"] = np.array([start_time + datetime.timedelta(days=0.1 * i) for i in range(10)]) meta = {} info = {} @@ -267,7 +264,7 @@ def test_organize_every_change(): """ basetime = datetime.datetime(2021, 4, 6, 14, 0, 0) dates = np.array([basetime + datetime.timedelta(seconds=600 * i) for i in range(20)]) - #dates = np.array([basetime + TimeDelta(600 * i, format='sec') for i in range(20)]) + # dates = np.array([basetime + TimeDelta(600 * i, format='sec') for i in range(20)]) vals = np.array([300.5, 310.3, -250.5, -500.9, 32.2, 300.1, 310.8, -250.2, -500.2, 32.7, 300.2, 310.4, -250.6, -500.8, 32.3, diff --git a/jwql/tests/test_instrument_properties.py b/jwql/tests/test_instrument_properties.py index 4176cd69c..4a072c7d1 100644 --- a/jwql/tests/test_instrument_properties.py +++ b/jwql/tests/test_instrument_properties.py @@ -23,8 +23,7 @@ import numpy as np from jwql.utils import instrument_properties - -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS if not ON_GITHUB_ACTIONS: from jwql.utils.utils import get_config diff --git a/jwql/tests/test_loading_times.py b/jwql/tests/test_loading_times.py index 7c83fed72..02fa6ac24 100644 --- a/jwql/tests/test_loading_times.py +++ b/jwql/tests/test_loading_times.py @@ -25,13 +25,11 @@ import urllib.request from jwql.utils.constants import MONITORS +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_base_url TIME_CONSTRAINT = 30 # seconds -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - urls = [] # Generic URLs diff --git a/jwql/tests/test_logging_functions.py b/jwql/tests/test_logging_functions.py index e68a65ee8..53913a0d0 100644 --- a/jwql/tests/test_logging_functions.py +++ b/jwql/tests/test_logging_functions.py @@ -25,11 +25,9 @@ from jwql.utils import logging_functions from jwql.utils.logging_functions import configure_logging, log_fail, log_info, make_log_file +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_config -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - @log_fail @log_info diff --git a/jwql/tests/test_mast_utils.py b/jwql/tests/test_mast_utils.py index 74c3f0328..d32c2c9b2 100755 --- a/jwql/tests/test_mast_utils.py +++ b/jwql/tests/test_mast_utils.py @@ -22,11 +22,10 @@ from astroquery.mast import Mast from jwql.utils.constants import JWST_INSTRUMENT_NAMES +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils import mast_utils as mu from jwql.utils.utils import get_config -# Temporary until JWST operations: switch to test string for MAST request URL -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') if not ON_GITHUB_ACTIONS: Mast._portal_api_connection.MAST_REQUEST_URL = get_config()['mast_request_url'] @@ -34,9 +33,9 @@ def test_astroquery_mast(): """Test if the astroquery.mast service can complete a request""" service = 'Mast.Caom.Filtered' - params = {'columns': 'COUNT_BIG(*)', + params = {'columns': 'COUNT_BIG(*)', 'filters': [{"paramName": "obs_collection", - "values": ["JWST"]},], + "values": ["JWST"]}, ], 'pagesize': 1, 'page': 1} response = Mast.service_request_async(service, params) result = response[0].json() diff --git a/jwql/tests/test_msata_monitor.py b/jwql/tests/test_msata_monitor.py index fc0286d6c..c4e7b41ad 100644 --- a/jwql/tests/test_msata_monitor.py +++ b/jwql/tests/test_msata_monitor.py @@ -31,11 +31,10 @@ from jwql.instrument_monitors.nirspec_monitors.ta_monitors.msata_monitor import MSATA from jwql.database.database_interface import NIRSpecTAQueryHistory +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_config, ensure_dir_exists from jwql.utils import monitor_utils, permissions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - # define the type of a Bokeh plot type bokeh_plot_type = type(figure()) @@ -53,38 +52,38 @@ def define_testdata(): msata_data : pandas dataframe """ msata_dict = { - # info taken from main_hdr dict - 'filename': ['jw09999001001_02101_00001_nrs1_uncal.fits'], - 'date_obs': ['2022-06-22'], - 'visit_id': ['V09999001001P0000000002101'], - 'tafilter': ['F110W'], - 'detector': ['NRS1'], - 'readout': ['NRSRAPID'], - 'subarray': ['FULL'], - # info taken from ta_hdr dict - 'num_refstars': [12], - 'ta_status': ['SUCCESSFUL'], - 'status_rsn': ['-999'], - 'v2halffacet': [-0.27568], - 'v3halffacet': [0.10975], - 'v2msactr': [378.523987], - 'v3msactr': [-428.374481], - 'lsv2offset': [-999.0], - 'lsv3offset': [-999.0], - 'lsoffsetmag': [-999.0], - 'lsrolloffset': [-999.0], - 'lsv2sigma': [-999.0], - 'lsv3sigma': [-999.0], - 'lsiterations': [-999], - 'guidestarid': ['-999'], - 'guidestarx': [-999.0], - 'guidestary': [-999.0], - 'guidestarroll': [-999.0], - 'samx': [-999.0], - 'samy': [-999.0], - 'samroll': [-999.0], - 'stars_in_fit': [-999] - } + # info taken from main_hdr dict + 'filename': ['jw09999001001_02101_00001_nrs1_uncal.fits'], + 'date_obs': ['2022-06-22'], + 'visit_id': ['V09999001001P0000000002101'], + 'tafilter': ['F110W'], + 'detector': ['NRS1'], + 'readout': ['NRSRAPID'], + 'subarray': ['FULL'], + # info taken from ta_hdr dict + 'num_refstars': [12], + 'ta_status': ['SUCCESSFUL'], + 'status_rsn': ['-999'], + 'v2halffacet': [-0.27568], + 'v3halffacet': [0.10975], + 'v2msactr': [378.523987], + 'v3msactr': [-428.374481], + 'lsv2offset': [-999.0], + 'lsv3offset': [-999.0], + 'lsoffsetmag': [-999.0], + 'lsrolloffset': [-999.0], + 'lsv2sigma': [-999.0], + 'lsv3sigma': [-999.0], + 'lsiterations': [-999], + 'guidestarid': ['-999'], + 'guidestarx': [-999.0], + 'guidestary': [-999.0], + 'guidestarroll': [-999.0], + 'samx': [-999.0], + 'samy': [-999.0], + 'samroll': [-999.0], + 'stars_in_fit': [-999] + } # add info from ta_table num_refstars = msata_dict['num_refstars'][0] msata_dict['box_peak_value'] = [[8000 for _ in range(num_refstars)]] diff --git a/jwql/tests/test_permissions.py b/jwql/tests/test_permissions.py index dd8878294..0fc6480d5 100755 --- a/jwql/tests/test_permissions.py +++ b/jwql/tests/test_permissions.py @@ -23,15 +23,11 @@ import os import pytest -from jwql.utils.permissions import set_permissions, has_permissions, \ - get_owner_string, get_group_string +from jwql.utils.permissions import set_permissions, has_permissions, get_owner_string, get_group_string # directory to be created and populated during tests running TEST_DIRECTORY = os.path.join(os.environ['HOME'], 'permission_test') -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - @pytest.fixture(scope="module") def test_directory(test_dir=TEST_DIRECTORY): diff --git a/jwql/tests/test_pipeline_tools.py b/jwql/tests/test_pipeline_tools.py index a79d687c8..ff072e0fb 100644 --- a/jwql/tests/test_pipeline_tools.py +++ b/jwql/tests/test_pipeline_tools.py @@ -24,13 +24,10 @@ import numpy as np from jwql.instrument_monitors import pipeline_tools +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_config -# Determine if tests are being run on github actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - - @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_completed_pipeline_steps(): """Test that the list of completed pipeline steps for a file is diff --git a/jwql/tests/test_plotting.py b/jwql/tests/test_plotting.py index 18caafa9f..5432659d5 100755 --- a/jwql/tests/test_plotting.py +++ b/jwql/tests/test_plotting.py @@ -43,7 +43,7 @@ def test_bar_chart(): # And generate a figure plt = bar_chart(data, 'index') - assert str(type(plt)) == "" + assert str(type(plt)) == "" def test_bokeh_version(): diff --git a/jwql/tests/test_preview_image.py b/jwql/tests/test_preview_image.py index ce0a7de5a..001371c96 100644 --- a/jwql/tests/test_preview_image.py +++ b/jwql/tests/test_preview_image.py @@ -30,16 +30,9 @@ from jwst.datamodels import dqflags from jwql.utils.preview_image import PreviewImage, crop_to_subarray +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS from jwql.utils.utils import get_config, ensure_dir_exists -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - -# Determine if the code is being run as part of a Readthedocs build -ON_READTHEDOCS = False -if 'READTHEDOCS' in os.environ: - ON_READTHEDOCS = os.environ['READTHEDOCS'] - def test_crop_to_subarray(): """Test that the code correctly crops larger arrays down to diff --git a/jwql/tests/test_protect_module.py b/jwql/tests/test_protect_module.py index 3bcafa5c1..28af6cd86 100644 --- a/jwql/tests/test_protect_module.py +++ b/jwql/tests/test_protect_module.py @@ -21,11 +21,9 @@ from jwql.utils import protect_module as pm from pytest import fixture, mark +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.protect_module import lock_module, _PID_LOCKFILE_KEY -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - @fixture def module_lock(): diff --git a/jwql/tests/test_readnoise_monitor.py b/jwql/tests/test_readnoise_monitor.py index d00607dd5..77fdce3d8 100644 --- a/jwql/tests/test_readnoise_monitor.py +++ b/jwql/tests/test_readnoise_monitor.py @@ -27,10 +27,9 @@ from jwql.database.database_interface import NIRCamReadnoiseQueryHistory, NIRCamReadnoiseStats, session from jwql.instrument_monitors.common_monitors import readnoise_monitor from jwql.tests.resources import has_test_db +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_config -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - def test_determine_pipeline_steps(): """Test the correct pipeline steps are called""" @@ -92,6 +91,7 @@ def test_get_metadata(): assert monitor.expstart == '2016-01-18T04:35:14.523' +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_identify_tables(): """Be sure the correct database tables are identified""" diff --git a/jwql/tests/test_utils.py b/jwql/tests/test_utils.py index cd50c6a01..ecc34790b 100644 --- a/jwql/tests/test_utils.py +++ b/jwql/tests/test_utils.py @@ -26,13 +26,10 @@ from bokeh.plotting import figure import numpy as np +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import copy_files, get_config, filename_parser, filesystem_path, save_png, _validate_config -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - - FILENAME_PARSER_TEST_DATA = [ # Test full path diff --git a/jwql/tests/test_wata_monitor.py b/jwql/tests/test_wata_monitor.py index 0b9099beb..91624147a 100644 --- a/jwql/tests/test_wata_monitor.py +++ b/jwql/tests/test_wata_monitor.py @@ -28,11 +28,10 @@ from jwql.instrument_monitors.nirspec_monitors.ta_monitors.wata_monitor import WATA from jwql.database.database_interface import NIRSpecTAQueryHistory +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_config, ensure_dir_exists from jwql.utils import monitor_utils, permissions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - # define the type of a Bokeh plot type bokeh_plot_type = type(figure()) @@ -50,48 +49,48 @@ def define_testdata(): wata_data : pandas dataframe """ wata_dict = { - # info taken from main_hdr dict - 'filename': ['jw09999001001_02101_00001_nrs1_uncal.fits'], - 'date_obs': ['2022-06-22'], - 'visit_id': ['V09999001001P0000000002101'], - 'tafilter': ['F110W'], - 'detector': ['NRS1'], - 'readout': ['NRSRAPID'], - 'subarray': ['FULL'], - # info taken from ta_hdr dict - 'ta_status': ['SUCCESSFUL'], - 'status_reason': ['-999'], - 'star_name': ['-999'], - 'star_ra': [-999.0], - 'star_dec': [-999.0], - 'star_mag': [-999.0], - 'star_catalog': [-999], - 'planned_v2': [-999.0], - 'planned_v3': [-999.0], - 'stamp_start_col': [-999], - 'stamp_start_row': [-999], - 'star_detector': ['-999'], - 'max_val_box': [-999.0], - 'max_val_box_col': [-999.0], - 'max_val_box_row': [-999.0], - 'iterations': [-999], - 'corr_col': [-999.0], - 'corr_row': [-999.0], - 'stamp_final_col': [-999.0], - 'stamp_final_row': [-999.0], - 'detector_final_col': [-999.0], - 'detector_final_row': [-999.0], - 'final_sci_x': [-999.0], - 'final_sci_y': [-999.0], - 'measured_v2': [-999.0], - 'measured_v3': [-999.0], - 'ref_v2': [-999.0], - 'ref_v3': [-999.0], - 'v2_offset': [-999.0], - 'v3_offset': [-999.0], - 'sam_x': [-999.0], - 'sam_y': [-999.0], - } + # info taken from main_hdr dict + 'filename': ['jw09999001001_02101_00001_nrs1_uncal.fits'], + 'date_obs': ['2022-06-22'], + 'visit_id': ['V09999001001P0000000002101'], + 'tafilter': ['F110W'], + 'detector': ['NRS1'], + 'readout': ['NRSRAPID'], + 'subarray': ['FULL'], + # info taken from ta_hdr dict + 'ta_status': ['SUCCESSFUL'], + 'status_reason': ['-999'], + 'star_name': ['-999'], + 'star_ra': [-999.0], + 'star_dec': [-999.0], + 'star_mag': [-999.0], + 'star_catalog': [-999], + 'planned_v2': [-999.0], + 'planned_v3': [-999.0], + 'stamp_start_col': [-999], + 'stamp_start_row': [-999], + 'star_detector': ['-999'], + 'max_val_box': [-999.0], + 'max_val_box_col': [-999.0], + 'max_val_box_row': [-999.0], + 'iterations': [-999], + 'corr_col': [-999.0], + 'corr_row': [-999.0], + 'stamp_final_col': [-999.0], + 'stamp_final_row': [-999.0], + 'detector_final_col': [-999.0], + 'detector_final_row': [-999.0], + 'final_sci_x': [-999.0], + 'final_sci_y': [-999.0], + 'measured_v2': [-999.0], + 'measured_v3': [-999.0], + 'ref_v2': [-999.0], + 'ref_v3': [-999.0], + 'v2_offset': [-999.0], + 'v3_offset': [-999.0], + 'sam_x': [-999.0], + 'sam_y': [-999.0], + } # create the additional arrays bool_status, status_colors = [], [] for tas, do_str in zip(wata_dict['ta_status'], wata_dict['date_obs']): diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index 5a3aa9407..a797fe6a5 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -28,7 +28,9 @@ ``utils.py`` """ +import asdf import inflection +import os # Each amplifier is represented by 2 tuples, the first for x coordinates # and the second for y coordinates. Within each tuple are value for @@ -736,6 +738,12 @@ # Possible suffix types for AMI files NIRISS_AMI_SUFFIX_TYPES = ["amiavg", "aminorm", "ami", "psf-amiavg"] +# Determine if the code is being run as part of CI checking on github +ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') + +# Determine if the code is being run as part of a Readthedocs build +ON_READTHEDOCS = os.environ.get('READTHEDOCS', False) + # Base name for the file listing the preview images for a given instrument. # The complete name will have "_{instrument.lower}.txt" added to the end of this. PREVIEW_IMAGE_LISTFILE = "preview_image_inventory" @@ -929,57 +937,13 @@ class QueryConfigKeys: "SUBGRISMSTRIPE256", ] +schema = asdf.schema.load_schema("http://stsci.edu/schemas/jwst_datamodel/subarray.schema") SUBARRAYS_PER_INSTRUMENT = { - "nircam": [ - "FULL", - "FULLP", - "SUB640", - "SUB320", - "SUB160", - "SUB400P", - "SUB160P", - "SUB64P", - "SUB32TATS", - "SUB640A210R", - "SUB640ASWB", - "SUB320A335R", - "SUB320A430R", - "SUB320ALWB", - "SUBGRISM256", - "SUBGRISM128", - "SUBGRISM64", - "SUB32TATSGRISM", - ], - "niriss": [ - "FULL", - "SUBSTRIP96", - "SUBSTRIP256", - "SUB80", - "SUB64", - "SUB128", - "SUB256", - "WFSS64R", - "WFSS128R", - "WFSS64C", - "WFSS128C", - "SUBAMPCAL", - "SUBTAAMI", - "SUBTASOSS", - ], - "nirspec": [], - "miri": [ - "BRIGHTSKY", - "FULL", - "MASK1065", - "MASK1140", - "MASK1550", - "MASKLYOT", - "SLITLESSPRISM", - "SUB64", - "SUB128", - "SUB256", - ], - "fgs": [], + "nircam": ['FULL'] + sorted(schema["properties"]["meta"]["properties"]["subarray"]["properties"]["name"]["anyOf"][2]['enum']), + "niriss": ['FULL'] + sorted(schema["properties"]["meta"]["properties"]["subarray"]["properties"]["name"]["anyOf"][4]['enum']), + "nirspec": ['FULL'] + sorted(schema["properties"]["meta"]["properties"]["subarray"]["properties"]["name"]["anyOf"][6]['enum']), + "miri": ['FULL'] + sorted(schema["properties"]["meta"]["properties"]["subarray"]["properties"]["name"]["anyOf"][1]['enum']), + "fgs": ['FULL'] + sorted(schema["properties"]["meta"]["properties"]["subarray"]["properties"]["name"]["anyOf"][0]['enum']) } # Filename suffixes that need to include the association value in the suffix in @@ -1000,6 +964,15 @@ class QueryConfigKeys: # Possible suffix types for time-series exposures TIME_SERIES_SUFFIX_TYPES = ["phot", "whtlt"] +# Instrument Documentation Links +URL_DICT = { + "fgs": "https://jwst-docs.stsci.edu/jwst-observatory-hardware/jwst-fine-guidance-sensor", + "miri": "https://jwst-docs.stsci.edu/jwst-mid-infrared-instrument", + "niriss": "https://jwst-docs.stsci.edu/jwst-near-infrared-imager-and-slitless-spectrograph", + "nirspec": "https://jwst-docs.stsci.edu/jwst-near-infrared-spectrograph", + "nircam": "https://jwst-docs.stsci.edu/jwst-near-infrared-camera", +} + # Possible suffix types for WFS&C files WFSC_SUFFIX_TYPES = ["wfscmb"] @@ -1013,12 +986,3 @@ class QueryConfigKeys: + WFSC_SUFFIX_TYPES + MSA_SUFFIX ) - -# Instrument Documentation Links -URL_DICT = { - "fgs": "https://jwst-docs.stsci.edu/jwst-observatory-hardware/jwst-fine-guidance-sensor", - "miri": "https://jwst-docs.stsci.edu/jwst-mid-infrared-instrument", - "niriss": "https://jwst-docs.stsci.edu/jwst-near-infrared-imager-and-slitless-spectrograph", - "nirspec": "https://jwst-docs.stsci.edu/jwst-near-infrared-spectrograph", - "nircam": "https://jwst-docs.stsci.edu/jwst-near-infrared-camera", -} diff --git a/jwql/utils/interactive_preview_image.py b/jwql/utils/interactive_preview_image.py index da9563770..1ffd2ab7b 100644 --- a/jwql/utils/interactive_preview_image.py +++ b/jwql/utils/interactive_preview_image.py @@ -168,7 +168,7 @@ def create_bokeh_image(self): plot_width = min_dim fig = figure(tools='pan,reset,save', match_aspect=True, - plot_width=plot_width, plot_height=plot_height) + width=plot_width, height=plot_height) fig.add_tools(BoxZoomTool(match_aspect=True)) fig.add_tools(WheelZoomTool(zoom_on_axis=False)) @@ -256,7 +256,7 @@ def line_plots(self, main_figure): for index_direction in directions: if index_direction == 'x': # column plots - fig = figure(plot_width=200, plot_height=main_figure.height, tools='', + fig = figure(width=200, height=main_figure.height, tools='', y_axis_location='right', margin=(0, 0, 0, 30)) fig.toolbar.logo = None @@ -280,7 +280,7 @@ def line_plots(self, main_figure): else: # row plots - fig = figure(plot_height=200, plot_width=main_figure.width, tools='') + fig = figure(height=200, width=main_figure.width, tools='') fig.toolbar.logo = None fig.y_range = Range1d() @@ -387,7 +387,7 @@ def line_plots(self, main_figure): idx = line[i].data_source.data['x']; } for (let j=0; j < data.length; j++) { - if (idx[j] >= match_range.start + if (idx[j] >= match_range.start && idx[j] <= match_range.end) { if (Number.isFinite(data[j])) { min_val = Math.min(data[j], min_val); @@ -444,7 +444,7 @@ def add_hover_tool(self, source, images): hover_callback = CustomJS(args={'s': source, 'd': hover_div, 'u': self.signal_units, 'dq': is_dq}, code=""" const idx = cb_data.index.image_indices; - if (idx.length > 0) { + if (idx.length > 0) { var x = idx[0].dim1; var y = idx[0].dim2; var flat = idx[0].flat_index; @@ -471,25 +471,25 @@ def add_hover_tool(self, source, images): } label = "Value (" + u + ")"; } - d.text = "
Pixel Value
" + - "
" + - "
" + + d.text = "
Pixel Value
" + + "
" + + "
" + "
(x, y) =
" + "
(" + x + ", " + y + ")
" + "
" if ('ra' in s.data && 'dec' in s.data) { var ra = s.data['ra'][0][flat].toPrecision(8); var dec = s.data['dec'][0][flat].toPrecision(8); - d.text += "
" + + d.text += "
" + "
RA (deg)=
" + "
" + ra + "
" + "
" + - "
" + + "
" + "
Dec (deg)=
" + "
" + dec + "
" + "
" } - d.text += "
" + + d.text += "
" + "
" + label + "=
" + "
" + val + "
"; } else { diff --git a/jwql/utils/mast_utils.py b/jwql/utils/mast_utils.py index 6524a8c6a..446645800 100644 --- a/jwql/utils/mast_utils.py +++ b/jwql/utils/mast_utils.py @@ -24,12 +24,12 @@ import pandas as pd from jwql.utils.constants import JWST_DATAPRODUCTS, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE, MAST_QUERY_LIMIT +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.permissions import set_permissions from jwql.utils.utils import ensure_dir_exists, get_config from jwql.utils.plotting import bar_chart -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') if not ON_GITHUB_ACTIONS: Mast._portal_api_connection.MAST_REQUEST_URL = get_config()['mast_request_url'] diff --git a/jwql/utils/monitor_utils.py b/jwql/utils/monitor_utils.py index 249fb0bb3..536ac1ad4 100644 --- a/jwql/utils/monitor_utils.py +++ b/jwql/utils/monitor_utils.py @@ -24,6 +24,7 @@ from jwql.database.database_interface import Monitor, engine from jwql.utils.constants import ASIC_TEMPLATES, JWST_DATAPRODUCTS, MAST_QUERY_LIMIT +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS from jwql.utils.logging_functions import configure_logging, get_log_status from jwql.utils import mast_utils from jwql.utils.utils import filename_parser @@ -33,12 +34,6 @@ # a MAST query. Mast._portal_api_connection.PAGESIZE = MAST_QUERY_LIMIT -# Determine if the code is being run as part of a github action or Readthedocs build -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') -ON_READTHEDOCS = False -if 'READTHEDOCS' in os.environ: # pragma: no cover - ON_READTHEDOCS = os.environ['READTHEDOCS'] - if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: # These lines are needed in order to use the Django models in a standalone # script (as opposed to code run as a result of a webpage request). If these diff --git a/jwql/utils/plotting.py b/jwql/utils/plotting.py index 0e0dd6d06..4adba0655 100755 --- a/jwql/utils/plotting.py +++ b/jwql/utils/plotting.py @@ -70,7 +70,7 @@ def bar_chart(dataframe, groupcol, datacols=None, **kwargs): # Make the figure hover = HoverTool(tooltips=[('count', '@counts')]) - plt = figure(x_range=FactorRange(*x), plot_height=250, tools=[hover], + plt = figure(x_range=FactorRange(*x), height=250, tools=[hover], **kwargs) plt.vbar(x='x', top='counts', width=0.9, source=source, line_color="white", fill_color=factor_cmap('x', palette=Category20c[colors], diff --git a/jwql/utils/preview_image.py b/jwql/utils/preview_image.py index f86c10272..088996ab9 100755 --- a/jwql/utils/preview_image.py +++ b/jwql/utils/preview_image.py @@ -43,6 +43,7 @@ import numpy as np from jwql.utils import permissions +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS from jwql.utils.utils import get_config # Use the 'Agg' backend to avoid invoking $DISPLAY @@ -52,17 +53,9 @@ import matplotlib.colors as colors # noqa from matplotlib.ticker import AutoMinorLocator # noqa -# Only import jwst if not running from readthedocs -# Determine if the code is being run as part of a Readthedocs build -ON_READTHEDOCS = False -if 'READTHEDOCS' in os.environ: - ON_READTHEDOCS = os.environ['READTHEDOCS'] - if not ON_READTHEDOCS: from jwst.datamodels import dqflags -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: CONFIGS = get_config() @@ -763,6 +756,7 @@ def expand_for_i2d(array, xdim, ydim): else: return array + def nan_to_zero(image): """Set any pixels with a value of NaN to zero diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py index fd5ecbabd..b5d2e5a46 100644 --- a/jwql/utils/utils.py +++ b/jwql/utils/utils.py @@ -53,12 +53,9 @@ FILE_GUIDESTAR_ATTMPT_LEN_MAX, FILE_OBS_LEN, FILE_PARALLEL_SEQ_ID_LEN, \ FILE_PROG_ID_LEN, FILE_SEG_LEN, FILE_SOURCE_ID_LEN, FILE_SUFFIX_TYPES, \ FILE_TARG_ID_LEN, FILE_VISIT_GRP_LEN, FILE_VISIT_LEN, FILETYPE_WO_STANDARD_SUFFIX, \ - JWST_INSTRUMENT_NAMES_SHORTHAND - + JWST_INSTRUMENT_NAMES_SHORTHAND, ON_GITHUB_ACTIONS __location__ = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - def _validate_config(config_file_dict): """Check that the config.json file contains all the needed entries with @@ -81,7 +78,7 @@ def _validate_config(config_file_dict): "admin_account": {"type": "string"}, "auth_mast": {"type": "string"}, "connection_string": {"type": "string"}, - "database": { + "databases": { "type": "object", "properties": { "engine": {"type": "string"}, @@ -93,7 +90,36 @@ def _validate_config(config_file_dict): }, "required": ['engine', 'name', 'user', 'password', 'host', 'port'] }, - + "django_databases": { + "type": "object", + "properties": { + "default": { + "type": "object", + "properties": { + "ENGINE": {"type": "string"}, + "NAME": {"type": "string"}, + "USER": {"type": "string"}, + "PASSWORD": {"type": "string"}, + "HOST": {"type": "string"}, + "PORT": {"type": "string"} + }, + "required": ['ENGINE', 'NAME', 'USER', 'PASSWORD', 'HOST', 'PORT'] + }, + "monitors": { + "type": "object", + "properties": { + "ENGINE": {"type": "string"}, + "NAME": {"type": "string"}, + "USER": {"type": "string"}, + "PASSWORD": {"type": "string"}, + "HOST": {"type": "string"}, + "PORT": {"type": "string"} + }, + "required": ['ENGINE', 'NAME', 'USER', 'PASSWORD', 'HOST', 'PORT'] + } + }, + "required": ["default", "monitors"] + }, "jwql_dir": {"type": "string"}, "jwql_version": {"type": "string"}, "server_type": {"type": "string"}, @@ -110,11 +136,11 @@ def _validate_config(config_file_dict): "cores": {"type": "string"} }, # List which entries are needed (all of them) - "required": ["connection_string", "database", "filesystem", - "preview_image_filesystem", "thumbnail_filesystem", - "outputs", "jwql_dir", "admin_account", "log_dir", - "test_dir", "test_data", "setup_file", "auth_mast", - "mast_token", "working"] + "required": ["connection_string", "databases", "django_databases", + "filesystem", "preview_image_filesystem", + "thumbnail_filesystem", "outputs", "jwql_dir", + "admin_account", "log_dir", "test_dir", "test_data", + "setup_file", "auth_mast", "mast_token", "working"] } # Test that the provided config file dict matches the schema diff --git a/jwql/website/apps/jwql/bokeh_containers.py b/jwql/website/apps/jwql/bokeh_containers.py index dad0ad69d..f4569747a 100644 --- a/jwql/website/apps/jwql/bokeh_containers.py +++ b/jwql/website/apps/jwql/bokeh_containers.py @@ -24,7 +24,8 @@ from bokeh.embed import components from bokeh.layouts import layout -from bokeh.models.widgets import Tabs, Panel +from bokeh.models import DatetimeTickFormatter +from bokeh.models.layouts import TabPanel, Tabs from bokeh.plotting import figure, output_file import numpy as np import pysiaf @@ -162,9 +163,9 @@ def cosmic_ray_monitor_tabs(instrument): # Allow figure sizes to scale with window histogram_layout.sizing_mode = "scale_width" # Make sure the sizing is adjustable - histogram_tab = Panel(child=histogram_layout, title="Histogram") + histogram_tab = TabPanel(child=histogram_layout, title="Histogram") line_layout.sizing_mode = "scale_width" # Make sure the sizing is adjustable - line_tab = Panel(child=line_layout, title="Trending") + line_tab = TabPanel(child=line_layout, title="Trending") # Build tabs tabs = Tabs(tabs=[histogram_tab, line_tab]) @@ -199,9 +200,9 @@ def dark_monitor_tabs(instrument): image_layout = standard_monitor_plot_layout(instrument, plots.dark_image_data) # Create a tab for each type of plot - histogram_tab = Panel(child=histogram_layout, title="Dark Rate Histogram") - line_tab = Panel(child=trending_layout, title="Trending") - image_tab = Panel(child=image_layout, title="Mean Dark Image") + histogram_tab = TabPanel(child=histogram_layout, title="Dark Rate Histogram") + line_tab = TabPanel(child=trending_layout, title="Trending") + image_tab = TabPanel(child=image_layout, title="Mean Dark Image") # Build tabs tabs = Tabs(tabs=[histogram_tab, line_tab, image_tab]) @@ -282,10 +283,10 @@ def generic_telemetry_plot(times, values, name, nominal_value=None, yellow_limit if nominal_value is not None: fig.line(times, np.repeat(nominal_value, len(times)), line_dash='dashed') - fig.xaxis.formatter = DatetimeTickFormatter(hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"], + fig.xaxis.formatter = DatetimeTickFormatter(hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) fig.xaxis.major_label_orientation = np.pi / 4 @@ -354,7 +355,7 @@ def readnoise_monitor_tabs(instrument): plots[5:6] ) readnoise_layout.sizing_mode = 'scale_width' - readnoise_tab = Panel(child=readnoise_layout, title=aperture) + readnoise_tab = TabPanel(child=readnoise_layout, title=aperture) tabs.append(readnoise_tab) # Build tabs @@ -401,7 +402,7 @@ def standard_monitor_plot_layout(instrument, plots): elif instrument.lower() == 'niriss': full_frame_lists = [ [plots['NIS_CEN']] - ] + ] elif instrument.lower() == 'miri': full_frame_lists = [ [plots['MIRIM_FULL']] diff --git a/jwql/website/apps/jwql/bokeh_dashboard.py b/jwql/website/apps/jwql/bokeh_dashboard.py index 2c282e274..536ef9710 100644 --- a/jwql/website/apps/jwql/bokeh_dashboard.py +++ b/jwql/website/apps/jwql/bokeh_dashboard.py @@ -37,7 +37,7 @@ from bokeh.layouts import column from bokeh.models import Axis, ColumnDataSource, DatetimeTickFormatter, HoverTool, OpenURL, TapTool -from bokeh.models.widgets import Panel, Tabs +from bokeh.models.layouts import TabPanel, Tabs from bokeh.plotting import figure from bokeh.transform import cumsum import numpy as np @@ -49,6 +49,7 @@ from jwql.utils.constants import ANOMALY_CHOICES_PER_INSTRUMENT, FILTERS_PER_INSTRUMENT, JWST_INSTRUMENT_NAMES_MIXEDCASE from jwql.utils.utils import get_base_url, get_config from jwql.website.apps.jwql.data_containers import build_table +from jwql.website.apps.jwql.models import Anomalies def build_table_latest_entry(tablename): @@ -217,7 +218,7 @@ def dashboard_disk_usage(self): # Initialize plot plots[data['shortname']] = figure(tools='pan,box_zoom,wheel_zoom,reset,save', - plot_width=800, + width=800, x_axis_type='datetime', title=f"Available & Used Storage on {data['shortname']}", x_axis_label='Date', @@ -226,12 +227,12 @@ def dashboard_disk_usage(self): plots[data['shortname']].line(x='date', y='available', source=source, legend_label='Available', line_dash='dashed', line_color='#C85108', line_width=3) plots[data['shortname']].circle(x='date', y='available', source=source,color='#C85108', size=10) plots[data['shortname']].line(x='date', y='used', source=source, legend_label='Used', line_dash='dashed', line_color='#355C7D', line_width=3) - plots[data['shortname']].circle(x='date', y='used', source=source,color='#355C7D', size=10) + plots[data['shortname']].circle(x='date', y='used', source=source, color='#355C7D', size=10) - plots[data['shortname']].xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], - days=["%d %B %Y"], - months=["%d %B %Y"], - years=["%B %Y"], + plots[data['shortname']].xaxis.formatter = DatetimeTickFormatter(hours="%H:%M %d %B %Y", + days="%d %B %Y", + months="%d %B %Y", + years="%B %Y" ) plots[data['shortname']].xaxis.major_label_orientation = pi / 4 plots[data['shortname']].legend.location = 'top_left' @@ -242,16 +243,15 @@ def dashboard_disk_usage(self): ]) hover_tool[data['shortname']].formatters = {'@date': 'datetime'} plots[data['shortname']].tools.append(hover_tool[data['shortname']]) - tabs.append(Panel(child=plots[data['shortname']], title=f"{data['shortname']} Storage")) + tabs.append(TabPanel(child=plots[data['shortname']], title=f"{data['shortname']} Storage")) tabs = Tabs(tabs=tabs) di.session.close() return tabs - def dashboard_central_store_data_volume(self): - """Create trending plot of data volume for various JWQL-related areas on disk. + """ Create trending plot of data volume for various JWQL-related areas on disk. These plots show data volumes calculated by walking over subdirectories/files in the JWQL-specific directories. So these plots may not include the total used disk volume, in the cases where JWQL is sharing a disk with other projects. These @@ -264,14 +264,14 @@ def dashboard_central_store_data_volume(self): """ # Initialize plot plot = figure(tools='pan,box_zoom,wheel_zoom,reset,save', - plot_width=800, + width=800, x_axis_type='datetime', title='JWQL directory size', x_axis_label='Date', y_axis_label='Disk Space (TB)') # This part of the plot should cycle through areas and plot area used values vs. date - #arealist = ['logs', 'outputs', 'test', 'preview_images', 'thumbnails', 'all'] + # arealist = ['logs', 'outputs', 'test', 'preview_images', 'thumbnails', 'all'] arealist = ['logs', 'outputs', 'preview_images', 'thumbnails'] colors = ['#F8B195', '#F67280', '#6C5B7B', '#355C7D'] for area, color in zip(arealist, colors): @@ -297,10 +297,10 @@ def dashboard_central_store_data_volume(self): hover_tool.formatters = {'@date': 'datetime'} plot.tools.append(hover_tool) - plot.xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], - days=["%d %B %Y"], - months=["%d %B %Y"], - years=["%B %Y"], + plot.xaxis.formatter = DatetimeTickFormatter(hours="%H:%M %d %B %Y", + days="%d %B %Y", + months="%d %B %Y", + years="%B %Y" ) plot.xaxis.major_label_orientation = pi / 4 plot.legend.location = 'top_left' @@ -308,7 +308,7 @@ def dashboard_central_store_data_volume(self): # Put the "all" plot in a separate figure because it will be larger than all the pieces, which would # throw off the y range if it were in a single plot cen_store_plot = figure(tools='pan,box_zoom,wheel_zoom,reset,save', - plot_width=800, + width=800, x_axis_type='datetime', title='JWQL central store directory, total data volume', x_axis_label='Date', @@ -332,10 +332,10 @@ def dashboard_central_store_data_volume(self): legend_str = 'File volume' cen_store_plot.line(x='date', y='used', source=cen_store_source, legend_label=legend_str, line_dash='dashed', line_color='#355C7D', line_width=3) cen_store_plot.circle(x='date', y='used', source=cen_store_source, color='#355C7D', size=10) - cen_store_plot.xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], - days=["%d %B %Y"], - months=["%d %B %Y"], - years=["%B %Y"], + cen_store_plot.xaxis.formatter = DatetimeTickFormatter(hours="%H:%M %d %B %Y", + days="%d %B %Y", + months="%d %B %Y", + years="%B %Y" ) cen_store_plot.xaxis.major_label_orientation = pi / 4 cen_store_plot.legend.location = 'top_left' @@ -349,7 +349,6 @@ def dashboard_central_store_data_volume(self): di.session.close() return plot, cen_store_plot - def dashboard_filetype_bar_chart(self): """Build bar chart of files based off of type @@ -447,34 +446,34 @@ def dashboard_files_per_day(self): date_times = [pd.to_datetime(datetime).date() for datetime in source['date'].values] source['datestr'] = [date_time.strftime("%Y-%m-%d") for date_time in date_times] - p1 = figure(title="Number of Files in Filesystem (MAST)", tools="reset,hover,box_zoom,wheel_zoom", tooltips="@datestr: @total_file_count", plot_width=800, x_axis_label='Date', y_axis_label='Number of Files Added') + p1 = figure(title="Number of Files in Filesystem (MAST)", tools="reset,hover,box_zoom,wheel_zoom", tooltips="@datestr: @total_file_count", width=800, x_axis_label='Date', y_axis_label='Number of Files Added') p1.line(x='date', y='total_file_count', source=source, color='#6C5B7B', line_dash='dashed', line_width=3) p1.scatter(x='date', y='total_file_count', source=source, color='#C85108', size=10) disable_scientific_notation(p1) - tab1 = Panel(child=p1, title='Files Per Day') + tab1 = TabPanel(child=p1, title='Files Per Day') # Create separate tooltip for storage plot. # Show date and used and available storage together - p2 = figure(title="Available & Used Storage in Filesystem (MAST)", tools="reset,hover,box_zoom,wheel_zoom", tooltips="@datestr: @total_file_count", plot_width=800, x_axis_label='Date', y_axis_label='Disk Space (TB)') + p2 = figure(title="Available & Used Storage in Filesystem (MAST)", tools="reset,hover,box_zoom,wheel_zoom", tooltips="@datestr: @total_file_count", width=800, x_axis_label='Date', y_axis_label='Disk Space (TB)') p2.line(x='date', y='available', source=source, color='#C85108', line_dash='dashed', line_width=3, legend_label='Available Storage') p2.line(x='date', y='used', source=source, color='#355C7D', line_dash='dashed', line_width=3, legend_label='Used Storage') p2.scatter(x='date', y='available', source=source, color='#C85108', size=10) p2.scatter(x='date', y='used', source=source, color='#355C7D', size=10) disable_scientific_notation(p2) - tab2 = Panel(child=p2, title='Storage') + tab2 = TabPanel(child=p2, title='Storage') - p1.xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], - days=["%d %B %Y"], - months=["%d %B %Y"], - years=["%B %Y"], + p1.xaxis.formatter = DatetimeTickFormatter(hours="%H:%M %d %B %Y", + days="%d %B %Y", + months="%d %B %Y", + years="%B %Y" ) p1.xaxis.major_label_orientation = pi / 4 - p2.xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], - days=["%d %B %Y"], - months=["%d %B %Y"], - years=["%B %Y"], + p2.xaxis.formatter = DatetimeTickFormatter(hours="%H:%M %d %B %Y", + days="%d %B %Y", + months="%d %B %Y", + years="%B %Y" ) p2.xaxis.major_label_orientation = pi / 4 p2.legend.location = 'top_left' @@ -534,11 +533,11 @@ def make_panel(self, x_value, top, instrument, title, x_axis_label): data = pd.Series(dict(zip(x_value, top))).reset_index(name='top').rename(columns={'index': 'x'}) source = ColumnDataSource(data) - plot = figure(x_range=x_value, title=title, plot_width=850, tools="hover", tooltips="@x: @top", x_axis_label=x_axis_label) + plot = figure(x_range=x_value, title=title, width=850, tools="hover", tooltips="@x: @top", x_axis_label=x_axis_label) plot.vbar(x='x', top='top', source=source, width=0.9, color='#6C5B7B') plot.xaxis.major_label_orientation = pi / 4 disable_scientific_notation(plot) - tab = Panel(child=plot, title=instrument) + tab = TabPanel(child=plot, title=instrument) return tab @@ -603,7 +602,7 @@ def dashboard_exposure_count_by_filter(self): # Place the pie charts in a column/Panel, and append to the figure colplots = column(pie_fig, small_pie_fig) - tab = Panel(child=colplots, title=f'{instrument}') + tab = TabPanel(child=colplots, title=f'{instrument}') figures.append(tab) else: @@ -644,8 +643,8 @@ def dashboard_exposure_count_by_filter(self): lw_data['angle'] = lw_data['value'] / lw_data['value'].sum() * 2 * np.pi # Zoomed in version of the small contributors - sw_small = sw_data.loc[sw_data['value'] <0.5].copy() - lw_small = lw_data.loc[lw_data['value'] <0.5].copy() + sw_small = sw_data.loc[sw_data['value'] < 0.5].copy() + lw_small = lw_data.loc[lw_data['value'] < 0.5].copy() sw_small['angle'] = sw_small['value'] / sw_small['value'].sum() * 2 * np.pi lw_small['angle'] = lw_small['value'] / lw_small['value'].sum() * 2 * np.pi sw_small['colors'] = ['#bec4d4'] * len(sw_small) @@ -735,7 +734,6 @@ def dashboard_exposure_count_by_filter(self): show(p) """ - # Create pie charts for SW/LW, the main set of filters, and those that aren't used # as much. sw_pie_fig = create_filter_based_pie_chart("Percentage of observations using filter/pupil combinations: All Filters", sw_data) @@ -747,8 +745,8 @@ def dashboard_exposure_count_by_filter(self): sw_colplots = column(sw_pie_fig, sw_small_pie_fig) lw_colplots = column(lw_pie_fig, lw_small_pie_fig) - tab_sw = Panel(child=sw_colplots, title=f'{instrument} SW') - tab_lw = Panel(child=lw_colplots, title=f'{instrument} LW') + tab_sw = TabPanel(child=sw_colplots, title=f'{instrument} SW') + tab_lw = TabPanel(child=lw_colplots, title=f'{instrument} LW') figures.append(tab_sw) figures.append(tab_lw) @@ -765,14 +763,13 @@ def dashboard_exposure_count_by_filter(self): # Place the pie charts in a column/Panel, and append to the figure colplots = column(pie_fig, small_pie_fig) - tab = Panel(child=colplots, title=f'{instrument}') + tab = TabPanel(child=colplots, title=f'{instrument}') figures.append(tab) tabs = Tabs(tabs=figures) return tabs - def dashboard_anomaly_per_instrument(self): """Create figure for number of anamolies for each JWST instrument. @@ -785,18 +782,29 @@ def dashboard_anomaly_per_instrument(self): # Set title and figures list to make panels title = 'Anomaly Types per Instrument' figures = [] + filter_kwargs = {} - # For unique instrument values, loop through data - # Find all entries for instrument/filetype combo - # Make figure and append it to list. + # Make a tab for each instrument for instrument in ANOMALY_CHOICES_PER_INSTRUMENT.keys(): - data = build_table('{}_anomaly'.format(instrument)) - data = data.drop(columns=['id', 'rootname', 'user']) - if not pd.isnull(self.delta_t) and not data.empty: - data = data[(data['flag_date'] >= (self.date - self.delta_t)) & (data['flag_date'] <= self.date)] + # only show data for currently marked anomalies and current instrument + filter_kwargs['root_file_info__instrument__iexact'] = instrument + queryset = Anomalies.objects.filter(**filter_kwargs) + + # Convert the queryset to a Pandas DataFrame using only relevant columns + labels = [anomaly_keys for anomaly_keys, values in ANOMALY_CHOICES_PER_INSTRUMENT[instrument]] + data = pd.DataFrame.from_records(queryset.values(), columns=labels) + + # Sum columns to generate the bokeh panel summed_anomaly_columns = data.sum(axis=0, numeric_only=True).to_frame(name='counts') - figures.append(self.make_panel(summed_anomaly_columns.index.values, summed_anomaly_columns['counts'], instrument, title, 'Anomaly Type')) - tabs = Tabs(tabs=figures) + # Create plot of zeroes if empty (lookin at you FGS) + if len(summed_anomaly_columns.index.values): + plot_columns = summed_anomaly_columns.index.values + summed_values = summed_anomaly_columns['counts'] + else: + plot_columns = list(summed_anomaly_columns.index.values.base) + summed_values = np.zeros(len(plot_columns)) + figures.append(self.make_panel(plot_columns, summed_values, instrument, title, 'Anomaly Type')) + tabs = Tabs(tabs=figures) return tabs diff --git a/jwql/website/apps/jwql/data_containers.py b/jwql/website/apps/jwql/data_containers.py index db6fba1fd..4c9a6a742 100644 --- a/jwql/website/apps/jwql/data_containers.py +++ b/jwql/website/apps/jwql/data_containers.py @@ -43,6 +43,7 @@ from django.conf import settings from django.contrib import messages from django.core.exceptions import ObjectDoesNotExist +from django.db.models.query import QuerySet import numpy as np from operator import itemgetter import pandas as pd @@ -59,6 +60,7 @@ from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE, JWST_INSTRUMENT_NAMES from jwql.utils.constants import REPORT_KEYS_PER_INSTRUMENT from jwql.utils.constants import SUFFIXES_TO_ADD_ASSOCIATION, SUFFIXES_WITH_AVERAGED_INTS, QueryConfigKeys +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS from jwql.utils.credentials import get_mast_token from jwql.utils.permissions import set_permissions from jwql.utils.utils import get_rootnames_for_instrument_proposal @@ -68,15 +70,6 @@ # a MAST query. Mast._portal_api_connection.PAGESIZE = MAST_QUERY_LIMIT -# astroquery.mast import that depends on value of auth_mast -# this import has to be made before any other import of astroquery.mast -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - -# Determine if the code is being run as part of a Readthedocs build -ON_READTHEDOCS = False -if 'READTHEDOCS' in os.environ: # pragma: no cover - ON_READTHEDOCS = os.environ['READTHEDOCS'] - if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: # These lines are needed in order to use the Django models in a standalone @@ -368,6 +361,145 @@ def get_acknowledgements(): return acknowledgements +def get_additional_exposure_info(root_file_infos, image_info): + """Create dictionaries of basic exposure information from an exposure's + RootFileInfo entry, as well as header information. Originally designed to + be used in jwql.website.apps.jwql.views.view_image() + + Parameters + ---------- + root_file_infos : jwql.website.apps.jwql.models.RootFileInfo or django.db.models.query.QuerySet + RootFileInfo for a particular file base name, or a QuerySet of RootFileInfos for + an exposure base name. + + image_info : : dict + A dictionary containing various information for the given + ``file_root``. + + Returns + ------- + basic_info : dict + Dictionary of information about the file/exposure + + additional_info : dict + Dictionary of extra information about the file/exposure + """ + # Get headers from the file so we can pass along info that is common to all + # suffixes. The order of possible_suffixes_to_use is itentional, because the + # uncal file will not have info on the pipeline version used, and so we would + # rather grab information from the rate or cal files. + possible_suffixes_to_use = np.array(['rate', 'rateints', 'cal', 'calints', 'uncal']) + existing_suffixes = np.array([suffix in image_info['suffixes'] for suffix in possible_suffixes_to_use]) + + if isinstance(root_file_infos, QuerySet): + root_file_info = root_file_infos[0] + filter_value = '/'.join(set([e.filter for e in root_file_infos])) + pupil_value = '/'.join(set([e.pupil for e in root_file_infos])) + grating_value = '/'.join(set([e.grating for e in root_file_infos])) + elif isinstance(root_file_infos, RootFileInfo): + root_file_info = root_file_infos + filter_value = root_file_info.filter + pupil_value = root_file_info.pupil + grating_value = root_file_info.grating + + # Initialize dictionary of file info to show at the top of the page, along + # with another for info that will be in the collapsible text box. + basic_info = {'exp_type': root_file_info.exp_type, + 'category': 'N/A', + 'visit_status': 'N/A', + 'subarray': root_file_info.subarray, + 'filter': filter_value + } + + # The order of the elements is important here, in that the webpage displays + # them in the order they are here, and we've set this order to try and group + # together related keywords. + if isinstance(root_file_infos, QuerySet): + additional_info = {'READPATT': root_file_info.read_patt, + 'TITLE': 'N/A', + 'NGROUPS': 'N/A', + 'PI_NAME': 'N/A', + 'NINTS': 'N/A', + 'TARGNAME': 'N/A', + 'EXPTIME': 'N/A', + 'TARG_RA': 'N/A', + 'CAL_VER': 'N/A', + 'TARG_DEC': 'N/A', + 'CRDS context': 'N/A', + 'PA_V3': 'N/A', + 'EXPSTART': root_file_info.expstart + } + elif isinstance(root_file_infos, RootFileInfo): + additional_info = {'READPATT': root_file_info.read_patt, + 'TITLE': 'N/A', + 'NGROUPS': 'N/A', + 'PI_NAME': 'N/A', + 'NINTS': 'N/A', + 'TARGNAME': 'N/A', + 'EXPTIME': 'N/A', + 'RA_REF': 'N/A', + 'CAL_VER': 'N/A', + 'DEC_REF': 'N/A', + 'CRDS context': 'N/A', + 'ROLL_REF': 'N/A', + 'EXPSTART': root_file_info.expstart + } + + # Deal with instrument-specific parameters + if root_file_info.instrument == 'NIRSpec': + basic_info['grating'] = grating_value + + if root_file_info.instrument in ['NIRCam', 'NIRISS']: + basic_info['pupil'] = pupil_value + + # If any of the desired files are present, get the headers and populate the header + # info dictionary + if any(existing_suffixes): + suffix = possible_suffixes_to_use[existing_suffixes][0] + filename = f'{root_file_info.root_name}_{suffix}.fits' + + # get_image_info() has already globbed over the directory with the files and + # returned the list of existing suffixes, so we shouldn't need to check for + # file existence here. + file_path = filesystem_path(filename, check_existence=True) + + header = fits.getheader(file_path) + header_sci = fits.getheader(file_path, 1) + + basic_info['category'] = header['CATEGORY'] + basic_info['visit_status'] = header['VISITSTA'] + additional_info['NGROUPS'] = header['NGROUPS'] + additional_info['NINTS'] = header['NINTS'] + additional_info['EXPTIME'] = header['EFFEXPTM'] + additional_info['TITLE'] = header['TITLE'] + additional_info['PI_NAME'] = header['PI_NAME'] + additional_info['TARGNAME'] = header['TARGPROP'] + + # For the exposure level (i.e. multiple files) present the target + # RA and Dec. For the image level, give RA_REF, DEC_REF, since those + # are specific to the detector. Similarly, for the exposure level, show + # PA_V3, which applies to all detectors. At the image level, show + # ROLL_REF, which is detector-specific. + if isinstance(root_file_infos, QuerySet): + additional_info['TARG_RA'] = header['TARG_RA'] + additional_info['TARG_DEC'] = header['TARG_DEC'] + additional_info['PA_V3'] = header_sci['PA_V3'] + elif isinstance(root_file_infos, RootFileInfo): + additional_info['RA_REF'] = header_sci['RA_REF'] + additional_info['DEC_REF'] = header_sci['DEC_REF'] + additional_info['ROLL_REF'] = header_sci['ROLL_REF'] + + additional_info['CAL_VER'] = 'N/A' + additional_info['CRDS context'] = 'N/A' + + # Pipeline version and CRDS context info are not in uncal files + if suffix != 'uncal': + additional_info['CAL_VER'] = header['CAL_VER'] + additional_info['CRDS context'] = header['CRDS_CTX'] + + return basic_info, additional_info + + def get_all_proposals(): """Return a list of all proposals that exist in the filesystem. @@ -412,8 +544,7 @@ def get_available_suffixes(all_suffixes, return_untracked=True): untracked_suffixes = set(all_suffixes) for poss_suffix in EXPOSURE_PAGE_SUFFIX_ORDER: if 'crf' not in poss_suffix: - if (poss_suffix in all_suffixes - and poss_suffix not in suffixes): + if (poss_suffix in all_suffixes and poss_suffix not in suffixes): suffixes.append(poss_suffix) untracked_suffixes.remove(poss_suffix) else: @@ -423,8 +554,7 @@ def get_available_suffixes(all_suffixes, return_untracked=True): # So in this case, we strip the e.g. o001 from the # suffixes and check which list elements match. for image_suffix in all_suffixes: - if (image_suffix.endswith(poss_suffix) - and image_suffix not in suffixes): + if (image_suffix.endswith(poss_suffix) and image_suffix not in suffixes): suffixes.append(image_suffix) untracked_suffixes.remove(image_suffix) diff --git a/jwql/website/apps/jwql/monitor_models/bad_pixel.py b/jwql/website/apps/jwql/monitor_models/bad_pixel.py new file mode 100644 index 000000000..151aa39ff --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/bad_pixel.py @@ -0,0 +1,230 @@ +"""Defines the models for the ``jwql`` bad pixel monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class FGSBadPixelQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + dark_start_time_mjd = models.FloatField(blank=True, null=True) + dark_end_time_mjd = models.FloatField(blank=True, null=True) + flat_start_time_mjd = models.FloatField(blank=True, null=True) + flat_end_time_mjd = models.FloatField(blank=True, null=True) + dark_files_found = models.IntegerField(blank=True, null=True) + flat_files_found = models.IntegerField(blank=True, null=True) + run_bpix_from_darks = models.BooleanField(blank=True, null=True) + run_bpix_from_flats = models.BooleanField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_bad_pixel_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class FGSBadPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_bad_pixel_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRIBadPixelQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + dark_start_time_mjd = models.FloatField(blank=True, null=True) + dark_end_time_mjd = models.FloatField(blank=True, null=True) + flat_start_time_mjd = models.FloatField(blank=True, null=True) + flat_end_time_mjd = models.FloatField(blank=True, null=True) + dark_files_found = models.IntegerField(blank=True, null=True) + flat_files_found = models.IntegerField(blank=True, null=True) + run_bpix_from_darks = models.BooleanField(blank=True, null=True) + run_bpix_from_flats = models.BooleanField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_bad_pixel_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRIBadPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_bad_pixel_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamBadPixelQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + dark_start_time_mjd = models.FloatField(blank=True, null=True) + dark_end_time_mjd = models.FloatField(blank=True, null=True) + flat_start_time_mjd = models.FloatField(blank=True, null=True) + flat_end_time_mjd = models.FloatField(blank=True, null=True) + dark_files_found = models.IntegerField(blank=True, null=True) + flat_files_found = models.IntegerField(blank=True, null=True) + run_bpix_from_darks = models.BooleanField(blank=True, null=True) + run_bpix_from_flats = models.BooleanField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_bad_pixel_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamBadPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_bad_pixel_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSBadPixelQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + dark_start_time_mjd = models.FloatField(blank=True, null=True) + dark_end_time_mjd = models.FloatField(blank=True, null=True) + flat_start_time_mjd = models.FloatField(blank=True, null=True) + flat_end_time_mjd = models.FloatField(blank=True, null=True) + dark_files_found = models.IntegerField(blank=True, null=True) + flat_files_found = models.IntegerField(blank=True, null=True) + run_bpix_from_darks = models.BooleanField(blank=True, null=True) + run_bpix_from_flats = models.BooleanField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_bad_pixel_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSBadPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_bad_pixel_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecBadPixelQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + dark_start_time_mjd = models.FloatField(blank=True, null=True) + dark_end_time_mjd = models.FloatField(blank=True, null=True) + flat_start_time_mjd = models.FloatField(blank=True, null=True) + flat_end_time_mjd = models.FloatField(blank=True, null=True) + dark_files_found = models.IntegerField(blank=True, null=True) + flat_files_found = models.IntegerField(blank=True, null=True) + run_bpix_from_darks = models.BooleanField(blank=True, null=True) + run_bpix_from_flats = models.BooleanField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_bad_pixel_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecBadPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_bad_pixel_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' diff --git a/jwql/website/apps/jwql/monitor_models/bias.py b/jwql/website/apps/jwql/monitor_models/bias.py new file mode 100644 index 000000000..306ef3b29 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/bias.py @@ -0,0 +1,171 @@ +"""Defines the models for the ``jwql`` monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class NIRCamBiasQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_bias_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamBiasStats(models.Model): + aperture = models.CharField(blank=True, null=True) + uncal_filename = models.CharField(blank=True, null=True) + cal_filename = models.CharField(blank=True, null=True) + cal_image = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + median = models.FloatField(blank=True, null=True) + stddev = models.FloatField(blank=True, null=True) + collapsed_rows = ArrayField(models.FloatField()) + collapsed_columns = ArrayField(models.FloatField()) + counts = ArrayField(models.FloatField()) + bin_centers = ArrayField(models.FloatField()) + amp1_even_med = models.FloatField(blank=True, null=True) + amp1_odd_med = models.FloatField(blank=True, null=True) + amp2_even_med = models.FloatField(blank=True, null=True) + amp2_odd_med = models.FloatField(blank=True, null=True) + amp3_even_med = models.FloatField(blank=True, null=True) + amp3_odd_med = models.FloatField(blank=True, null=True) + amp4_even_med = models.FloatField(blank=True, null=True) + amp4_odd_med = models.FloatField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_bias_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSBiasQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_bias_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSBiasStats(models.Model): + aperture = models.CharField(blank=True, null=True) + uncal_filename = models.CharField(blank=True, null=True) + cal_filename = models.CharField(blank=True, null=True) + cal_image = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + median = models.FloatField(blank=True, null=True) + stddev = models.FloatField(blank=True, null=True) + collapsed_rows = ArrayField(models.FloatField()) + collapsed_columns = ArrayField(models.FloatField()) + counts = ArrayField(models.FloatField()) + bin_centers = ArrayField(models.FloatField()) + amp1_even_med = models.FloatField(blank=True, null=True) + amp1_odd_med = models.FloatField(blank=True, null=True) + amp2_even_med = models.FloatField(blank=True, null=True) + amp2_odd_med = models.FloatField(blank=True, null=True) + amp3_even_med = models.FloatField(blank=True, null=True) + amp3_odd_med = models.FloatField(blank=True, null=True) + amp4_even_med = models.FloatField(blank=True, null=True) + amp4_odd_med = models.FloatField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_bias_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecBiasQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_bias_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecBiasStats(models.Model): + aperture = models.CharField(blank=True, null=True) + uncal_filename = models.CharField(blank=True, null=True) + cal_filename = models.CharField(blank=True, null=True) + cal_image = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + median = models.FloatField(blank=True, null=True) + stddev = models.FloatField(blank=True, null=True) + collapsed_rows = ArrayField(models.FloatField()) + collapsed_columns = ArrayField(models.FloatField()) + counts = ArrayField(models.FloatField()) + bin_centers = ArrayField(models.FloatField()) + amp1_even_med = models.FloatField(blank=True, null=True) + amp1_odd_med = models.FloatField(blank=True, null=True) + amp2_even_med = models.FloatField(blank=True, null=True) + amp2_odd_med = models.FloatField(blank=True, null=True) + amp3_even_med = models.FloatField(blank=True, null=True) + amp3_odd_med = models.FloatField(blank=True, null=True) + amp4_even_med = models.FloatField(blank=True, null=True) + amp4_odd_med = models.FloatField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_bias_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' diff --git a/jwql/website/apps/jwql/monitor_models/claw.py b/jwql/website/apps/jwql/monitor_models/claw.py new file mode 100644 index 000000000..4e92ea3d4 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/claw.py @@ -0,0 +1,70 @@ +"""Defines the models for the ``jwql`` monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models + + +class NIRCamClawQueryHistory(models.Model): + entry_date = models.DateTimeField(blank=True, null=True) + instrument = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_claw_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamClawStats(models.Model): + entry_date = models.DateTimeField(blank=True, null=True) + filename = models.CharField(blank=True, null=True) + proposal = models.CharField(blank=True, null=True) + obs = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + filter = models.CharField(blank=True, null=True) + pupil = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + expstart_mjd = models.FloatField(blank=True, null=True) + effexptm = models.FloatField(blank=True, null=True) + ra = models.FloatField(blank=True, null=True) + dec = models.FloatField(blank=True, null=True) + pa_v3 = models.FloatField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + median = models.FloatField(blank=True, null=True) + stddev = models.FloatField(blank=True, null=True) + frac_masked = models.FloatField(blank=True, null=True) + skyflat_filename = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_claw_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' diff --git a/jwql/website/apps/jwql/monitor_models/common.py b/jwql/website/apps/jwql/monitor_models/common.py new file mode 100644 index 000000000..9809e843c --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/common.py @@ -0,0 +1,409 @@ +"""Defines the models for the ``jwql`` common monitor database tables. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Usage +----- + +JWQL uses the django database models for creating tables, updating table fields, adding +new data to tables, and retrieving data from tables. For instrument monitors, in particular, +there are a number of issues that may be relevant. + +In general, django model documentation can be found +`on the django website `_. +Unfortunately, finding a particular bit of documentation in django can be a challenge, so +a few quick-reference notes are provided below. + +Retrieving Data +--------------- + +Django retrieves data directly from its model tables. So, for example, if you want to +select data from the `MIRIMyMonitorStats` table, you must first import the relevant +object: + +.. code-block:: python + + from jwql.website.apps.jwql.monitor_models.my_monitor import MIRIMyMonitorStats + +Then, you would access the database contents via the `objects` member of the class. For +example, to search the `MIRIMyMonitorStats` table for all entries matching a given +aperture, and to sort them with the most recent date at the top, you might do a query like +the following: + +.. code-block:: python + + aperture = "my_miri_aperture" + + records = MIRIMyMonitorStats.objects.filter(aperture__iexact=aperture).order_by("-mjd_end").all() + +In the above code, + +* The `filter()` function selects matching records from the full table. You can use + multiple filter statements, or a single filter function with multiple filters. `filter()` + statements are always combined with an implicit AND. +* If you have a long filter statement and want to separate it from the query statement, + you can create a dictionary and add it in with the `**` prepended. The dictionary + equivalent to the above would be `{'aperture__iexact': aperture}` +* The text before the double underscore is a field name, and the text afterwards describes + the type of comparison. `iexact` indicates "case-insensitive exact match". You can also + use a variety of standard SQL comparisons (`like`, `startswith`, `gte`, etc.) +* If you want to get only records that *don't* match a pattern, then you can use the + `exclude()` function, which otherwise operates exactly the same as `filter()`. +* In the `order_by()` function, the `-` at the start is used to reverse the sort order, + and the `mjd_end` is the name of the field to be sorted by. +* The `all()` statement indicates that you want all the values returned. `get()` returns + a single value and can be iterated on, `first()` returns only the first value, etc. + +As an example of multiple filters, the code below: + +.. code-block:: python + + records = MIRIMyMonitorStats.objects.filter(aperture__iexact=ap, mjd_end__gte=60000) + + filters = { + "aperture__iexact": ap, + "mjd_end__gte": 60000 + } + records = MIRIMyMonitorStats.objects.filter(**filters) + +show two different ways of combining a search for a particular aperture *and* only data +taken more recently than MJD=60000. + +Note that django executes queries lazily, meaning that it will only actually *do* the +query when it needs the results. The above statement, for example, will not actually +run the query. Instead, it will be run when you operate on it, such as + +* Getting the length of the result with e.g. `len(records)` +* Printing out any of the results +* Asking for the value of one of the fields (e.g. `records[3].aperture`) + +Retrieving Specific Columns +=========================== + +Django offers two ways of doing this. The first one is to use the `only()` function, which +immediately loads only the relevant columns. For example, + +.. code-block:: python + + records = MIRIMyMonitorStats.objects.only("aperture", "mjd_start", "relevant_item") + +will immediately load only the three columns selected (although the rest will be retrieved +from the database, and can still be accessed, for no immediately understandable reason). +The other method is the `defer()` method, which loads every column *except* the ones listed. + +Q Objects +========= + +In order to make more complex queries, Django supplies "Q Objects", which are essentially +encapsulated filters which can be combined using logical operators. For more on this, see +`the django Q object documentation `_. + +Storing Data +------------ + +Django also uses the model tables (and objects) directly for storing new data. For example, +if you have a monitor table defined as below: + +.. code-block:: python + + from django.db import models + from django.contrib.postgres.fields import ArrayField + + class NIRISSMyMonitorStats(models.Model): + aperture = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + median = models.FloatField(blank=True, null=True) + stddev = models.FloatField(blank=True, null=True) + counts = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_my_monitor_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + +then you would create a new entry as follows: + +.. code-block:: python + + values = { + "aperture": "my_aperture", + "mean": float(mean), + "median": float(median), + "stddev": float(stddev), + "counts": list(counts.astype(float)), + "entry_date": datetime.datetime.now() + } + + entry = NIRISSMyMonitorStats(**values) + entry.save() + +There are (as usual) a few things to note above: + +* Django doesn't have a built-in array data type, so you need to import it from the + database-compatibility layers. The ArrayField takes, as a required argument, the type + of data that makes up the array. +* In the Meta sub-class of the monitor class, the `app_label = 'monitors'` statement is + required so that django knows that the model should be stored in the monitors table. +* The `float()` casts are required because the database interface doesn't understand + numpy data types. +* The `list()` cast is required because the database interface doesn't understand the + numpy `ndarray` data type + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class MonitorRouter: + """ + A router to control all database operations on models in the + JWQLDB (monitors) database. + """ + + route_app_labels = {"monitors"} + + def db_for_read(self, model, **hints): + """ + Attempts to read monitor models go to monitors db. + """ + if model._meta.app_label in self.route_app_labels: + return "monitors" + return None + + def db_for_write(self, model, **hints): + """ + Attempts to write monitor models go to monitors db. + """ + if model._meta.app_label in self.route_app_labels: + return "monitors" + return None + + def allow_relation(self, obj1, obj2, **hints): + """ + Allow relations between tables in the monitors DB. + """ + if ( + obj1._meta.app_label in self.route_app_labels + or obj2._meta.app_label in self.route_app_labels + ): + return True + return None + + def allow_migrate(self, db, app_label, model_name=None, **hints): + """ + Make sure the monitors apps only appear in the 'monitors' database. + """ + if app_label in self.route_app_labels: + return db == "monitors" + return None + + +class Monitor(models.Model): + monitor_name = models.CharField() + start_time = models.DateTimeField() + end_time = models.DateTimeField(blank=True, null=True) + status = models.TextField(blank=True, null=True) # This field type is a guess. + log_file = models.CharField() + + class Meta: + managed = True + db_table = 'monitor' + app_label = 'monitors' + + +class CentralStorage(models.Model): + date = models.DateTimeField() + area = models.CharField() + size = models.FloatField() + used = models.FloatField() + available = models.FloatField() + + class Meta: + managed = True + db_table = 'central_storage' + app_label = 'monitors' + + +class FilesystemCharacteristics(models.Model): + date = models.DateTimeField() + instrument = models.TextField() # This field type is a guess. + filter_pupil = models.TextField(blank=True, null=True) # This field type is a guess. + obs_per_filter_pupil = models.TextField(blank=True, null=True) # This field type is a guess. + + class Meta: + managed = True + db_table = 'filesystem_characteristics' + app_label = 'monitors' + + +class FilesystemGeneral(models.Model): + date = models.DateTimeField(unique=True) + total_file_count = models.IntegerField() + total_file_size = models.FloatField() + fits_file_count = models.IntegerField() + fits_file_size = models.FloatField() + used = models.FloatField() + available = models.FloatField() + + class Meta: + managed = True + db_table = 'filesystem_general' + app_label = 'monitors' + + +class FilesystemInstrument(models.Model): + date = models.DateTimeField() + instrument = models.TextField() # This field type is a guess. + filetype = models.TextField() # This field type is a guess. + count = models.IntegerField() + size = models.FloatField() + + class Meta: + managed = True + db_table = 'filesystem_instrument' + unique_together = (('date', 'instrument', 'filetype'),) + app_label = 'monitors' + + +class FgsAnomaly(models.Model): + rootname = models.CharField() + flag_date = models.DateTimeField() + user = models.CharField() + cosmic_ray_shower = models.BooleanField() + diffraction_spike = models.BooleanField() + excessive_saturation = models.BooleanField() + guidestar_failure = models.BooleanField() + persistence = models.BooleanField() + crosstalk = models.BooleanField() + data_transfer_error = models.BooleanField() + ghost = models.BooleanField() + snowball = models.BooleanField() + other = models.BooleanField() + + class Meta: + managed = True + db_table = 'fgs_anomaly' + app_label = 'monitors' + + +class MiriAnomaly(models.Model): + rootname = models.CharField() + flag_date = models.DateTimeField() + user = models.CharField() + cosmic_ray_shower = models.BooleanField() + diffraction_spike = models.BooleanField() + excessive_saturation = models.BooleanField() + guidestar_failure = models.BooleanField() + persistence = models.BooleanField() + column_pull_up = models.BooleanField() + internal_reflection = models.BooleanField() + row_pull_down = models.BooleanField() + other = models.BooleanField() + column_pull_down = models.BooleanField() + mrs_glow = models.BooleanField(db_column='MRS_Glow') # Field name made lowercase. + mrs_zipper = models.BooleanField(db_column='MRS_Zipper') # Field name made lowercase. + row_pull_up = models.BooleanField() + lrs_contamination = models.BooleanField(db_column='LRS_Contamination') # Field name made lowercase. + tree_rings = models.BooleanField() + + class Meta: + managed = True + db_table = 'miri_anomaly' + app_label = 'monitors' + + +class NircamAnomaly(models.Model): + rootname = models.CharField() + flag_date = models.DateTimeField() + user = models.CharField() + cosmic_ray_shower = models.BooleanField() + diffraction_spike = models.BooleanField() + excessive_saturation = models.BooleanField() + guidestar_failure = models.BooleanField() + persistence = models.BooleanField() + crosstalk = models.BooleanField() + data_transfer_error = models.BooleanField() + ghost = models.BooleanField() + snowball = models.BooleanField() + dragons_breath = models.BooleanField() + other = models.BooleanField() + scattered_light = models.BooleanField() + claws = models.BooleanField() + wisps = models.BooleanField() + tilt_event = models.BooleanField() + + class Meta: + managed = True + db_table = 'nircam_anomaly' + app_label = 'monitors' + + +class NirissAnomaly(models.Model): + rootname = models.CharField() + flag_date = models.DateTimeField() + user = models.CharField() + cosmic_ray_shower = models.BooleanField() + diffraction_spike = models.BooleanField() + excessive_saturation = models.BooleanField() + guidestar_failure = models.BooleanField() + persistence = models.BooleanField() + crosstalk = models.BooleanField() + data_transfer_error = models.BooleanField() + ghost = models.BooleanField() + snowball = models.BooleanField() + other = models.BooleanField() + scattered_light = models.TextField() + light_saber = models.TextField() + + class Meta: + managed = True + db_table = 'niriss_anomaly' + app_label = 'monitors' + + +class NirspecAnomaly(models.Model): + rootname = models.CharField() + flag_date = models.DateTimeField() + user = models.CharField() + cosmic_ray_shower = models.BooleanField() + diffraction_spike = models.BooleanField() + excessive_saturation = models.BooleanField() + guidestar_failure = models.BooleanField() + persistence = models.BooleanField() + crosstalk = models.BooleanField() + data_transfer_error = models.BooleanField() + ghost = models.BooleanField() + snowball = models.BooleanField() + dominant_msa_leakage = models.BooleanField(db_column='Dominant_MSA_Leakage') # Field name made lowercase. + optical_short = models.BooleanField() + other = models.BooleanField() + + class Meta: + managed = True + db_table = 'nirspec_anomaly' + app_label = 'monitors' diff --git a/jwql/website/apps/jwql/monitor_models/cosmic_ray.py b/jwql/website/apps/jwql/monitor_models/cosmic_ray.py new file mode 100644 index 000000000..f9084aa06 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/cosmic_ray.py @@ -0,0 +1,200 @@ +"""Defines the models for the ``jwql`` cosmic ray monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class FGSCosmicRayQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_cosmic_ray_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class FGSCosmicRayStats(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + source_file = models.CharField(blank=True, null=True) + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + jump_count = models.IntegerField(blank=True, null=True) + jump_rate = models.FloatField(blank=True, null=True) + magnitude = models.TextField(blank=True, null=True) # This field type is a guess. + outliers = models.TextField(blank=True, null=True) # This field type is a guess. + + class Meta: + managed = True + db_table = 'fgs_cosmic_ray_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRICosmicRayQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_cosmic_ray_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRICosmicRayStats(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + source_file = models.CharField(blank=True, null=True) + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + jump_count = models.IntegerField(blank=True, null=True) + jump_rate = models.FloatField(blank=True, null=True) + magnitude = models.TextField(blank=True, null=True) # This field type is a guess. + outliers = models.TextField(blank=True, null=True) # This field type is a guess. + + class Meta: + managed = True + db_table = 'miri_cosmic_ray_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamCosmicRayQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_cosmic_ray_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamCosmicRayStats(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + source_file = models.CharField(blank=True, null=True) + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + jump_count = models.IntegerField(blank=True, null=True) + jump_rate = models.FloatField(blank=True, null=True) + magnitude = models.TextField(blank=True, null=True) # This field type is a guess. + outliers = models.TextField(blank=True, null=True) # This field type is a guess. + + class Meta: + managed = True + db_table = 'nircam_cosmic_ray_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSCosmicRayQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_cosmic_ray_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSCosmicRayStats(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + source_file = models.CharField(blank=True, null=True) + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + jump_count = models.IntegerField(blank=True, null=True) + jump_rate = models.FloatField(blank=True, null=True) + magnitude = models.TextField(blank=True, null=True) # This field type is a guess. + outliers = models.TextField(blank=True, null=True) # This field type is a guess. + + class Meta: + managed = True + db_table = 'niriss_cosmic_ray_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecCosmicRayQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_cosmic_ray_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecCosmicRayStats(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + source_file = models.CharField(blank=True, null=True) + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + jump_count = models.IntegerField(blank=True, null=True) + jump_rate = models.FloatField(blank=True, null=True) + magnitude = models.TextField(blank=True, null=True) # This field type is a guess. + outliers = models.TextField(blank=True, null=True) # This field type is a guess. + + class Meta: + managed = True + db_table = 'nirspec_cosmic_ray_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' diff --git a/jwql/website/apps/jwql/monitor_models/dark_current.py b/jwql/website/apps/jwql/monitor_models/dark_current.py new file mode 100644 index 000000000..29d9a9523 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/dark_current.py @@ -0,0 +1,380 @@ +"""Defines the models for the ``jwql`` dark current monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class FGSDarkDarkCurrent(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + amplifier = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + gauss_amplitude = ArrayField(models.FloatField()) + gauss_peak = ArrayField(models.FloatField()) + gauss_width = ArrayField(models.FloatField()) + gauss_chisq = models.FloatField(blank=True, null=True) + double_gauss_amplitude1 = ArrayField(models.FloatField()) + double_gauss_peak1 = ArrayField(models.FloatField()) + double_gauss_width1 = ArrayField(models.FloatField()) + double_gauss_amplitude2 = ArrayField(models.FloatField()) + double_gauss_peak2 = ArrayField(models.FloatField()) + double_gauss_width2 = ArrayField(models.FloatField()) + double_gauss_chisq = models.FloatField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + hist_dark_values = ArrayField(models.FloatField()) + hist_amplitudes = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'fgs_dark_dark_current' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class FGSDarkPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_dark_pixel_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class FGSDarkQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_dark_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRIDarkDarkCurrent(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + amplifier = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + gauss_amplitude = ArrayField(models.FloatField()) + gauss_peak = ArrayField(models.FloatField()) + gauss_width = ArrayField(models.FloatField()) + gauss_chisq = models.FloatField(blank=True, null=True) + double_gauss_amplitude1 = ArrayField(models.FloatField()) + double_gauss_peak1 = ArrayField(models.FloatField()) + double_gauss_width1 = ArrayField(models.FloatField()) + double_gauss_amplitude2 = ArrayField(models.FloatField()) + double_gauss_peak2 = ArrayField(models.FloatField()) + double_gauss_width2 = ArrayField(models.FloatField()) + double_gauss_chisq = models.FloatField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + hist_dark_values = ArrayField(models.FloatField()) + hist_amplitudes = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'miri_dark_dark_current' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRIDarkPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_dark_pixel_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRIDarkQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_dark_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamDarkDarkCurrent(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + amplifier = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + gauss_amplitude = ArrayField(models.FloatField()) + gauss_peak = ArrayField(models.FloatField()) + gauss_width = ArrayField(models.FloatField()) + gauss_chisq = models.FloatField(blank=True, null=True) + double_gauss_amplitude1 = ArrayField(models.FloatField()) + double_gauss_peak1 = ArrayField(models.FloatField()) + double_gauss_width1 = ArrayField(models.FloatField()) + double_gauss_amplitude2 = ArrayField(models.FloatField()) + double_gauss_peak2 = ArrayField(models.FloatField()) + double_gauss_width2 = ArrayField(models.FloatField()) + double_gauss_chisq = models.FloatField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + hist_dark_values = ArrayField(models.FloatField()) + hist_amplitudes = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'nircam_dark_dark_current' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamDarkPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_dark_pixel_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamDarkQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_dark_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSDarkDarkCurrent(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + amplifier = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + gauss_amplitude = ArrayField(models.FloatField()) + gauss_peak = ArrayField(models.FloatField()) + gauss_width = ArrayField(models.FloatField()) + gauss_chisq = models.FloatField(blank=True, null=True) + double_gauss_amplitude1 = ArrayField(models.FloatField()) + double_gauss_peak1 = ArrayField(models.FloatField()) + double_gauss_width1 = ArrayField(models.FloatField()) + double_gauss_amplitude2 = ArrayField(models.FloatField()) + double_gauss_peak2 = ArrayField(models.FloatField()) + double_gauss_width2 = ArrayField(models.FloatField()) + double_gauss_chisq = models.FloatField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + hist_dark_values = ArrayField(models.FloatField()) + hist_amplitudes = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'niriss_dark_dark_current' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSDarkPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_dark_pixel_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSDarkQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_dark_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecDarkDarkCurrent(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + amplifier = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + gauss_amplitude = ArrayField(models.FloatField()) + gauss_peak = ArrayField(models.FloatField()) + gauss_width = ArrayField(models.FloatField()) + gauss_chisq = models.FloatField(blank=True, null=True) + double_gauss_amplitude1 = ArrayField(models.FloatField()) + double_gauss_peak1 = ArrayField(models.FloatField()) + double_gauss_width1 = ArrayField(models.FloatField()) + double_gauss_amplitude2 = ArrayField(models.FloatField()) + double_gauss_peak2 = ArrayField(models.FloatField()) + double_gauss_width2 = ArrayField(models.FloatField()) + double_gauss_chisq = models.FloatField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + hist_dark_values = ArrayField(models.FloatField()) + hist_amplitudes = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'nirspec_dark_dark_current' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecDarkPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_dark_pixel_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecDarkQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_dark_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' diff --git a/jwql/website/apps/jwql/monitor_models/edb.py b/jwql/website/apps/jwql/monitor_models/edb.py new file mode 100644 index 000000000..cc9d8ec82 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/edb.py @@ -0,0 +1,465 @@ +"""Defines the models for the ``jwql`` EDB monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class FGSEdbBlocksStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_edb_blocks_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class FGSEdbDailyStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_edb_daily_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class FGSEdbEveryChangeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + time = ArrayField(models.DateTimeField()) + mnemonic_value = ArrayField(models.FloatField()) + median = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + dependency_mnemonic = models.CharField(blank=True, null=True) + dependency_value = models.CharField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_edb_every_change_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class FGSEdbTimeIntervalStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_edb_time_interval_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class FGSEdbTimeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_edb_time_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRIEdbBlocksStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_edb_blocks_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRIEdbDailyStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_edb_daily_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRIEdbEveryChangeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + time = ArrayField(models.DateTimeField()) + mnemonic_value = ArrayField(models.FloatField()) + median = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + dependency_mnemonic = models.CharField(blank=True, null=True) + dependency_value = models.CharField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_edb_every_change_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRIEdbTimeIntervalStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_edb_time_interval_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRIEdbTimeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_edb_time_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamEdbBlocksStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_edb_blocks_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamEdbDailyStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_edb_daily_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamEdbEveryChangeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + time = ArrayField(models.DateTimeField()) + mnemonic_value = ArrayField(models.FloatField()) + median = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + dependency_mnemonic = models.CharField(blank=True, null=True) + dependency_value = models.CharField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_edb_every_change_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamEdbTimeIntervalStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_edb_time_interval_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamEdbTimeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_edb_time_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSEdbBlocksStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_edb_blocks_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSEdbDailyStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_edb_daily_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSEdbEveryChangeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + time = ArrayField(models.DateTimeField()) + mnemonic_value = ArrayField(models.FloatField()) + median = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + dependency_mnemonic = models.CharField(blank=True, null=True) + dependency_value = models.CharField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_edb_every_change_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSEdbTimeIntervalStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_edb_time_interval_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSEdbTimeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_edb_time_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecEdbBlocksStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_edb_blocks_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecEdbDailyStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_edb_daily_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecEdbEveryChangeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + time = ArrayField(models.DateTimeField()) + mnemonic_value = ArrayField(models.FloatField()) + median = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + dependency_mnemonic = models.CharField(blank=True, null=True) + dependency_value = models.CharField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_edb_every_change_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecEdbTimeIntervalStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_edb_time_interval_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecEdbTimeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_edb_time_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' diff --git a/jwql/website/apps/jwql/monitor_models/grating.py b/jwql/website/apps/jwql/monitor_models/grating.py new file mode 100644 index 000000000..9e4268879 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/grating.py @@ -0,0 +1,73 @@ +"""Defines the models for the ``jwql`` monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class NIRSpecGratingQueryHistory(models.Model): + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_grating_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecGratingStats(models.Model): + entry_date = models.DateTimeField(unique=True) + time = models.CharField(blank=True, null=True) + inrsh_gwa_adcmgain = models.FloatField(blank=True, null=True) + inrsh_gwa_adcmoffset = models.FloatField(blank=True, null=True) + inrsh_gwa_motor_vref = models.FloatField(blank=True, null=True) + prism_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + prism_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + mirror_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + mirror_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + g140h_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + g140h_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + g235h_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + g235h_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + g395h_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + g395h_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + g140m_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + g140m_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + g235m_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + g235m_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + g395m_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + g395m_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_grating_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' diff --git a/jwql/website/apps/jwql/monitor_models/readnoise.py b/jwql/website/apps/jwql/monitor_models/readnoise.py new file mode 100644 index 000000000..5a500c709 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/readnoise.py @@ -0,0 +1,335 @@ +"""Defines the models for the ``jwql`` monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class FGSReadnoiseQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_readnoise_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class FGSReadnoiseStats(models.Model): + uncal_filename = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + subarray = models.CharField(blank=True, null=True) + read_pattern = models.CharField(blank=True, null=True) + nints = models.CharField(blank=True, null=True) + ngroups = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + readnoise_filename = models.CharField(blank=True, null=True) + full_image_mean = models.FloatField(blank=True, null=True) + full_image_stddev = models.FloatField(blank=True, null=True) + full_image_n = ArrayField(models.FloatField()) + full_image_bin_centers = ArrayField(models.FloatField()) + readnoise_diff_image = models.CharField(blank=True, null=True) + diff_image_mean = models.FloatField(blank=True, null=True) + diff_image_stddev = models.FloatField(blank=True, null=True) + diff_image_n = ArrayField(models.FloatField()) + diff_image_bin_centers = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + amp1_mean = models.FloatField(blank=True, null=True) + amp1_stddev = models.FloatField(blank=True, null=True) + amp1_n = ArrayField(models.FloatField()) + amp1_bin_centers = ArrayField(models.FloatField()) + amp2_mean = models.FloatField(blank=True, null=True) + amp2_stddev = models.FloatField(blank=True, null=True) + amp2_n = ArrayField(models.FloatField()) + amp2_bin_centers = ArrayField(models.FloatField()) + amp3_mean = models.FloatField(blank=True, null=True) + amp3_stddev = models.FloatField(blank=True, null=True) + amp3_n = ArrayField(models.FloatField()) + amp3_bin_centers = ArrayField(models.FloatField()) + amp4_mean = models.FloatField(blank=True, null=True) + amp4_stddev = models.FloatField(blank=True, null=True) + amp4_n = ArrayField(models.FloatField()) + amp4_bin_centers = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'fgs_readnoise_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRIReadnoiseQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_readnoise_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRIReadnoiseStats(models.Model): + uncal_filename = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + subarray = models.CharField(blank=True, null=True) + read_pattern = models.CharField(blank=True, null=True) + nints = models.CharField(blank=True, null=True) + ngroups = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + readnoise_filename = models.CharField(blank=True, null=True) + full_image_mean = models.FloatField(blank=True, null=True) + full_image_stddev = models.FloatField(blank=True, null=True) + full_image_n = ArrayField(models.FloatField()) + full_image_bin_centers = ArrayField(models.FloatField()) + readnoise_diff_image = models.CharField(blank=True, null=True) + diff_image_mean = models.FloatField(blank=True, null=True) + diff_image_stddev = models.FloatField(blank=True, null=True) + diff_image_n = ArrayField(models.FloatField()) + diff_image_bin_centers = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + amp1_mean = models.FloatField(blank=True, null=True) + amp1_stddev = models.FloatField(blank=True, null=True) + amp1_n = ArrayField(models.FloatField()) + amp1_bin_centers = ArrayField(models.FloatField()) + amp2_mean = models.FloatField(blank=True, null=True) + amp2_stddev = models.FloatField(blank=True, null=True) + amp2_n = ArrayField(models.FloatField()) + amp2_bin_centers = ArrayField(models.FloatField()) + amp3_mean = models.FloatField(blank=True, null=True) + amp3_stddev = models.FloatField(blank=True, null=True) + amp3_n = ArrayField(models.FloatField()) + amp3_bin_centers = ArrayField(models.FloatField()) + amp4_mean = models.FloatField(blank=True, null=True) + amp4_stddev = models.FloatField(blank=True, null=True) + amp4_n = ArrayField(models.FloatField()) + amp4_bin_centers = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'miri_readnoise_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamReadnoiseQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_readnoise_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRCamReadnoiseStats(models.Model): + uncal_filename = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + subarray = models.CharField(blank=True, null=True) + read_pattern = models.CharField(blank=True, null=True) + nints = models.CharField(blank=True, null=True) + ngroups = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + readnoise_filename = models.CharField(blank=True, null=True) + full_image_mean = models.FloatField(blank=True, null=True) + full_image_stddev = models.FloatField(blank=True, null=True) + full_image_n = ArrayField(models.FloatField()) + full_image_bin_centers = ArrayField(models.FloatField()) + readnoise_diff_image = models.CharField(blank=True, null=True) + diff_image_mean = models.FloatField(blank=True, null=True) + diff_image_stddev = models.FloatField(blank=True, null=True) + diff_image_n = ArrayField(models.FloatField()) + diff_image_bin_centers = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + amp1_mean = models.FloatField(blank=True, null=True) + amp1_stddev = models.FloatField(blank=True, null=True) + amp1_n = ArrayField(models.FloatField()) + amp1_bin_centers = ArrayField(models.FloatField()) + amp2_mean = models.FloatField(blank=True, null=True) + amp2_stddev = models.FloatField(blank=True, null=True) + amp2_n = ArrayField(models.FloatField()) + amp2_bin_centers = ArrayField(models.FloatField()) + amp3_mean = models.FloatField(blank=True, null=True) + amp3_stddev = models.FloatField(blank=True, null=True) + amp3_n = ArrayField(models.FloatField()) + amp3_bin_centers = ArrayField(models.FloatField()) + amp4_mean = models.FloatField(blank=True, null=True) + amp4_stddev = models.FloatField(blank=True, null=True) + amp4_n = ArrayField(models.FloatField()) + amp4_bin_centers = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'nircam_readnoise_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSReadnoiseQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_readnoise_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRISSReadnoiseStats(models.Model): + uncal_filename = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + subarray = models.CharField(blank=True, null=True) + read_pattern = models.CharField(blank=True, null=True) + nints = models.CharField(blank=True, null=True) + ngroups = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + readnoise_filename = models.CharField(blank=True, null=True) + full_image_mean = models.FloatField(blank=True, null=True) + full_image_stddev = models.FloatField(blank=True, null=True) + full_image_n = ArrayField(models.FloatField()) + full_image_bin_centers = ArrayField(models.FloatField()) + readnoise_diff_image = models.CharField(blank=True, null=True) + diff_image_mean = models.FloatField(blank=True, null=True) + diff_image_stddev = models.FloatField(blank=True, null=True) + diff_image_n = ArrayField(models.FloatField()) + diff_image_bin_centers = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + amp1_mean = models.FloatField(blank=True, null=True) + amp1_stddev = models.FloatField(blank=True, null=True) + amp1_n = ArrayField(models.FloatField()) + amp1_bin_centers = ArrayField(models.FloatField()) + amp2_mean = models.FloatField(blank=True, null=True) + amp2_stddev = models.FloatField(blank=True, null=True) + amp2_n = ArrayField(models.FloatField()) + amp2_bin_centers = ArrayField(models.FloatField()) + amp3_mean = models.FloatField(blank=True, null=True) + amp3_stddev = models.FloatField(blank=True, null=True) + amp3_n = ArrayField(models.FloatField()) + amp3_bin_centers = ArrayField(models.FloatField()) + amp4_mean = models.FloatField(blank=True, null=True) + amp4_stddev = models.FloatField(blank=True, null=True) + amp4_n = ArrayField(models.FloatField()) + amp4_bin_centers = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'niriss_readnoise_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecReadnoiseQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_readnoise_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecReadnoiseStats(models.Model): + uncal_filename = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + subarray = models.CharField(blank=True, null=True) + read_pattern = models.CharField(blank=True, null=True) + nints = models.CharField(blank=True, null=True) + ngroups = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + readnoise_filename = models.CharField(blank=True, null=True) + full_image_mean = models.FloatField(blank=True, null=True) + full_image_stddev = models.FloatField(blank=True, null=True) + full_image_n = ArrayField(models.FloatField()) + full_image_bin_centers = ArrayField(models.FloatField()) + readnoise_diff_image = models.CharField(blank=True, null=True) + diff_image_mean = models.FloatField(blank=True, null=True) + diff_image_stddev = models.FloatField(blank=True, null=True) + diff_image_n = ArrayField(models.FloatField()) + diff_image_bin_centers = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + amp1_mean = models.FloatField(blank=True, null=True) + amp1_stddev = models.FloatField(blank=True, null=True) + amp1_n = ArrayField(models.FloatField()) + amp1_bin_centers = ArrayField(models.FloatField()) + amp2_mean = models.FloatField(blank=True, null=True) + amp2_stddev = models.FloatField(blank=True, null=True) + amp2_n = ArrayField(models.FloatField()) + amp2_bin_centers = ArrayField(models.FloatField()) + amp3_mean = models.FloatField(blank=True, null=True) + amp3_stddev = models.FloatField(blank=True, null=True) + amp3_n = ArrayField(models.FloatField()) + amp3_bin_centers = ArrayField(models.FloatField()) + amp4_mean = models.FloatField(blank=True, null=True) + amp4_stddev = models.FloatField(blank=True, null=True) + amp4_n = ArrayField(models.FloatField()) + amp4_bin_centers = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'nirspec_readnoise_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' diff --git a/jwql/website/apps/jwql/monitor_models/ta.py b/jwql/website/apps/jwql/monitor_models/ta.py new file mode 100644 index 000000000..e48433dee --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/ta.py @@ -0,0 +1,125 @@ +"""Defines the models for the ``jwql`` TA monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class MIRITaQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_ta_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class MIRITaStats(models.Model): + entry_date = models.DateTimeField(unique=True) + cal_file_name = models.CharField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + targx = models.FloatField(blank=True, null=True) + targy = models.FloatField(blank=True, null=True) + offset = models.FloatField(blank=True, null=True) + full_im_path = models.CharField(blank=True, null=True) + zoom_im_path = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_ta_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecTaQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_ta_query_history' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' + + +class NIRSpecTaStats(models.Model): + entry_date = models.DateTimeField(blank=True, null=True) + uncal_filename = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + subarray = models.CharField(blank=True, null=True) + read_pattern = models.CharField(blank=True, null=True) + nints = models.CharField(blank=True, null=True) + ngroups = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + full_image_mean = models.FloatField(blank=True, null=True) + full_image_stddev = models.FloatField(blank=True, null=True) + full_image_n = ArrayField(models.FloatField()) + full_image_bin_centers = ArrayField(models.FloatField()) + diff_image_mean = models.FloatField(blank=True, null=True) + diff_image_stddev = models.FloatField(blank=True, null=True) + diff_image_n = ArrayField(models.FloatField()) + diff_image_bin_centers = ArrayField(models.FloatField()) + amp1_mean = models.FloatField(blank=True, null=True) + amp1_stddev = models.FloatField(blank=True, null=True) + amp1_n = ArrayField(models.FloatField()) + amp1_bin_centers = ArrayField(models.FloatField()) + amp2_mean = models.FloatField(blank=True, null=True) + amp2_stddev = models.FloatField(blank=True, null=True) + amp2_n = ArrayField(models.FloatField()) + amp2_bin_centers = ArrayField(models.FloatField()) + amp3_mean = models.FloatField(blank=True, null=True) + amp3_stddev = models.FloatField(blank=True, null=True) + amp3_n = ArrayField(models.FloatField()) + amp3_bin_centers = ArrayField(models.FloatField()) + amp4_mean = models.FloatField(blank=True, null=True) + amp4_stddev = models.FloatField(blank=True, null=True) + amp4_n = ArrayField(models.FloatField()) + amp4_bin_centers = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'nirspec_ta_stats' + unique_together = (('id', 'entry_date'),) + app_label = 'monitors' diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_bad_pixel_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_bad_pixel_bokeh.py index 5e522f7cd..a3722ce13 100755 --- a/jwql/website/apps/jwql/monitor_pages/monitor_bad_pixel_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_bad_pixel_bokeh.py @@ -24,7 +24,8 @@ from bokeh.embed import components, file_html from bokeh.io import show from bokeh.layouts import layout -from bokeh.models import ColumnDataSource, DatetimeTickFormatter, HoverTool, Legend, LinearColorMapper, Panel, Tabs, Text, Title +from bokeh.models import ColumnDataSource, DatetimeTickFormatter, HoverTool, Legend, LinearColorMapper, Text, Title +from bokeh.models.layouts import Tabs, TabPanel from bokeh.plotting import figure from bokeh.resources import CDN import datetime @@ -149,7 +150,7 @@ def run(self): plot_layout = badpix_monitor_plot_layout(all_plots) # Create a tab for each type of plot - detector_panels.append(Panel(child=plot_layout, title=detector)) + detector_panels.append(TabPanel(child=plot_layout, title=detector)) # Build tabs tabs = Tabs(tabs=detector_panels) @@ -667,12 +668,12 @@ def create_plot(self): self.plot.tools.append(hover_tool) # Make the x axis tick labels look nice - self.plot.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + self.plot.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) self.plot.xaxis.major_label_orientation = np.pi / 4 diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_bias_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_bias_bokeh.py index 4b5a6a6de..0cd7d31e2 100644 --- a/jwql/website/apps/jwql/monitor_pages/monitor_bias_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_bias_bokeh.py @@ -28,7 +28,7 @@ from bokeh.embed import components, file_html from bokeh.layouts import layout from bokeh.models import ColorBar, ColumnDataSource, DatetimeTickFormatter, HoverTool, Legend, LinearAxis -from bokeh.models.widgets import Tabs, Panel +from bokeh.models.layouts import Tabs, TabPanel from bokeh.plotting import figure, output_file, save from bokeh.resources import CDN from datetime import datetime, timedelta @@ -132,10 +132,10 @@ def retrieve_latest_data(self, aperture): aperture : str Aperture name (e.g. NRCA1_FULL) """ - subq = (session.query(self.stats_table.aperture, func.max(self.stats_table.expstart).label("max_created")) \ - .group_by(self.stats_table.aperture) - .subquery() - ) + subq = (session.query(self.stats_table.aperture, func.max(self.stats_table.expstart).label("max_created")) + .group_by(self.stats_table.aperture) + .subquery() + ) query = (session.query(self.stats_table.aperture, self.stats_table.uncal_filename, @@ -147,10 +147,10 @@ def retrieve_latest_data(self, aperture): self.stats_table.counts, self.stats_table.bin_centers, self.stats_table.entry_date) - .filter(self.stats_table.aperture == aperture) - .order_by(self.stats_table.entry_date) \ - .join(subq, self.stats_table.expstart == subq.c.max_created) - ) + .filter(self.stats_table.aperture == aperture) + .order_by(self.stats_table.entry_date) \ + .join(subq, self.stats_table.expstart == subq.c.max_created) + ) latest_data = query.all() session.close() @@ -168,7 +168,6 @@ def retrieve_latest_data(self, aperture): self.latest_data['expstart'] = datetimes - class BiasMonitorPlots(): """This is the top-level class, which will call the BiasMonitorData class to get results from the bias monitor, and use the plotting @@ -281,7 +280,7 @@ def create_tabs(self): ] ) bias_layout.sizing_mode = 'scale_width' - bias_tab = Panel(child=bias_layout, title=aperture) + bias_tab = TabPanel(child=bias_layout, title=aperture) tabs.append(bias_tab) # Build tabs @@ -416,7 +415,6 @@ def create_plot(self): self.plot = PlaceholderPlot('Calibrated data: Histogram', x_label, y_label).plot - class MedianRowColPlot(): """Class to create a plot of the median signal across rows or columns @@ -509,7 +507,6 @@ def create_plot(self, colname): return plot - class TrendingPlot(): """Class to create trending plots of bias level over time. There should be 4 plots produced: 1 for each amplifier (with even and odd columns plotted in each). @@ -565,12 +562,12 @@ def create_amp_plot(self, amp_num, amp_data): alpha=0.75, source=source, legend_label='Odd cols') # Make the x axis tick labels look nice - plot.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + plot.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) plot.xaxis.major_label_orientation = np.pi / 4 @@ -586,7 +583,7 @@ def create_amp_plot(self, amp_num, amp_data): ('Date:', '@expstart_str') ] ) - #hover_tool.formatters = {'@expstart': 'datetime'} + # hover_tool.formatters = {'@expstart': 'datetime'} plot.tools.append(hover_tool) plot.xaxis.axis_label = x_label plot.yaxis.axis_label = y_label diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_cosmic_rays_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_cosmic_rays_bokeh.py index 4160e9b87..a4b03eb9d 100644 --- a/jwql/website/apps/jwql/monitor_pages/monitor_cosmic_rays_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_cosmic_rays_bokeh.py @@ -147,12 +147,12 @@ def history_plot(self): data = fig.scatter(x='x', y='y', line_width=5, line_color='blue', source=source) # Make the x axis tick labels look nice - fig.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + fig.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) fig.xaxis.major_label_orientation = np.pi / 4 fig.yaxis[0].formatter = BasicTickFormatter(use_scientific=True, precision=2) diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py index fb73e401d..1a4e7a670 100755 --- a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py @@ -196,7 +196,10 @@ def create_plot(self): self.plot.grid.grid_line_color = "white" else: # If self.data is empty, then make a placeholder plot - self.plot = PlaceholderPlot(title_str, x_label, y_label).plot + title_str = f'{self.aperture}: Dark Rate Histogram' + x_label = 'Dark Rate (DN/sec)' + y_label = 'Number of Pixels' + self.plot = PlaceholderPlot(title_str, x_label, y_label).create() class DarkImagePlot(): @@ -682,6 +685,7 @@ def create_plot(self): time=self.obstime[use_amp] ) ) + self.plot = figure(title=f'{self.aperture}: Mean +/- 1-sigma Dark Rate', tools='pan,box_zoom,reset,wheel_zoom,save', background_fill_color="#fafafa") @@ -709,12 +713,12 @@ def create_plot(self): legend_label=f'Amp {amp}') # Make the x axis tick labels look nice - self.plot.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + self.plot.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) self.plot.xaxis.major_label_orientation = np.pi / 4 diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_readnoise_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_readnoise_bokeh.py index 599be35b0..d78e64de5 100644 --- a/jwql/website/apps/jwql/monitor_pages/monitor_readnoise_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_readnoise_bokeh.py @@ -22,15 +22,16 @@ from bokeh.embed import components from bokeh.layouts import column, row -from bokeh.models import Panel, Tabs # bokeh <= 3.0 from bokeh.models import ColumnDataSource, HoverTool -# from bokeh.models import TabPanel, Tabs # bokeh >= 3.0 +from bokeh.models import TabPanel, Tabs from bokeh.plotting import figure from django.templatetags.static import static import numpy as np -from jwql.database.database_interface import session -from jwql.database.database_interface import FGSReadnoiseStats, MIRIReadnoiseStats, NIRCamReadnoiseStats, NIRISSReadnoiseStats, NIRSpecReadnoiseStats +# PEP8 will undoubtedly complain, but the file is specifically designed so that everything +# importable is a monitor class. +from jwql.website.apps.jwql.monitor_models.readnoise import * + from jwql.utils.constants import FULL_FRAME_APERTURES, JWST_INSTRUMENT_NAMES_MIXEDCASE from jwql.utils.utils import get_config @@ -80,14 +81,7 @@ def load_data(self): # Determine which database tables are needed based on instrument self.identify_tables() - # Query database for all data in readnoise stats with a matching aperture, - # and sort the data by exposure start time. - self.query_results = session.query(self.stats_table) \ - .filter(self.stats_table.aperture == self.aperture) \ - .order_by(self.stats_table.expstart) \ - .all() - - session.close() + self.query_results = list(self.stats_table.objects.filter(aperture__iexact=self.aperture).order_by("expstart").all()) class ReadNoiseFigure(): @@ -122,10 +116,10 @@ def __init__(self, instrument, aperture): self.plot_readnoise_difference_image() self.plot_readnoise_histogram() - self.tab = Panel(child=column(row(*self.amp_plots), - self.diff_image_plot, - self.readnoise_histogram), - title=self.aperture) + self.tab = TabPanel(child=column(row(*self.amp_plots), + self.diff_image_plot, + self.readnoise_histogram), + title=self.aperture) def plot_readnoise_amplifers(self): """Class to create readnoise scatter plots per amplifier. diff --git a/jwql/website/apps/jwql/monitor_views.py b/jwql/website/apps/jwql/monitor_views.py index f4d679a81..79ac6f127 100644 --- a/jwql/website/apps/jwql/monitor_views.py +++ b/jwql/website/apps/jwql/monitor_views.py @@ -39,9 +39,8 @@ import pandas as pd from . import bokeh_containers -from jwql.database.database_interface import session -from jwql.database.database_interface import NIRCamClawStats from jwql.website.apps.jwql import bokeh_containers +from jwql.website.apps.jwql.monitor_models.claw import NIRCamClawStats from jwql.website.apps.jwql.monitor_pages.monitor_readnoise_bokeh import ReadNoiseFigure from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE from jwql.utils.utils import get_config, get_base_url @@ -158,8 +157,9 @@ def claw_monitor(request): template = "claw_monitor.html" # Get all recent claw stack images from the last 10 days - query = session.query(NIRCamClawStats.expstart_mjd, NIRCamClawStats.skyflat_filename).order_by(NIRCamClawStats.expstart_mjd.desc()).all() - df = pd.DataFrame(query, columns=['expstart_mjd', 'skyflat_filename']) + query = NIRCamClawStats.objects # .filter(expstart_mjd__gte=(Time.now().mjd - 10)) + query = query.order_by('-expstart_mjd').all().values('expstart_mjd', 'skyflat_filename') + df = pd.DataFrame.from_records(query) recent_files = list(pd.unique(df['skyflat_filename'][df['expstart_mjd'] > Time.now().mjd - 10])) output_dir_claws = static(os.path.join("outputs", "claw_monitor", "claw_stacks")) claw_stacks = [os.path.join(output_dir_claws, filename) for filename in recent_files] diff --git a/jwql/website/apps/jwql/static/css/jwql.css b/jwql/website/apps/jwql/static/css/jwql.css index eb567b25f..26229492a 100644 --- a/jwql/website/apps/jwql/static/css/jwql.css +++ b/jwql/website/apps/jwql/static/css/jwql.css @@ -1,11 +1,36 @@ +.alternate-rows { + width: 100%; + border-collapse: collapse; +} + +.alternate-rows th, .alternate-rows td { + padding: 1px 10px; + border: 1px solid #ddd; +} + +.alternate-rows tr:nth-child(odd) { + background-color:#eee; +} +.alternate-rows tr:nth-child(even) { + background-color:#fff; +} + +.alternate-rows tbody tr:nth-child(even) { + background-color: #f2f2f2; +} + .anomaly_choice { list-style: none; } - + .APT_parameters { width: 20% } - + + .APT_parameters_wide { + width: 50%; + } + .banner { position: absolute; top: 55px; @@ -13,7 +38,7 @@ height: 3rem; overflow: hidden; } - + .banner img{ position: absolute; top: -9999px; @@ -40,7 +65,7 @@ border-radius: 0px !important; text-decoration: none; } - + /*Make outline buttons and highlighted normal buttons white*/ .btn-primary:hover, .btn-primary.active, .btn-outline-primary, .show > .btn-primary.dropdown-toggle, .bk-btn-primary:hover { @@ -50,14 +75,14 @@ border-radius: 0px !important; text-decoration: none; } - + /*Stop them from glowing blue*/ .btn.focus, .btn:active:focus, .btn.active:focus, .btn:active, .btn.active, .show > .btn.dropdown-toggle:focus { box-shadow: none !important; text-decoration: none; } - + [class*="col-"] { padding-top: 1rem; padding-bottom: 1rem; @@ -65,12 +90,42 @@ background-color: rgba(86, 61, 124, .15); border: 1px solid rgba(86, 61, 124, .2);*/ } - + + /* Collapsible text box */ + .collapsible { + width: 100%; + margin: 20px; + } + + .collapsible-btn { + background-color: #c85108 !important; + border-color: #c85108 !important; + color: white !important; + padding: 5px; + cursor: pointer; + border: none; + text-align: left; + outline: none; + border-radius: 0px !important; + text-decoration: none; + } + + .collapsible-content { + display: none; + padding: 10px; + border: 1px solid #3498db; + border-top: none; + } + + .collapsible-content.show { + display: block; + } + .dashboard { margin-left: 2%; margin-right: 2%; } - + /* Show the dropdown menu on hover */ /* DO NOT how the dropdown menu on hover if the navbar is collapsed */ @media only screen and (min-width: 1200px) { @@ -91,52 +146,52 @@ pointer-events: none; cursor: default; } - + /* Make disabled sections opaque and unclickable */ .disabled_section { pointer-events: none; opacity: 0.4; } - + /*Define dropdown menu colors*/ .dropdown-item:hover{ background-color: black; } - + .dropdown-menu { background-color: #2d353c; border-radius: 0px; max-height: 400px; overflow-y: auto; } - + .dropdown-menu .dropdown-item { color: white; } - + .dropdown-menu .dropdown-heading { color: #c85108 !important; text-transform: uppercase; } - + .explorer_options { padding-left: 1rem; padding-right: 1rem; } - + /*Stop the search box from glowing blue*/ .form-control:focus { box-shadow: none; border-color: #cfd4da; } - + /*Make sure the thumbnails are actually vertically centered*/ .helper { display: inline-block; height: 100%; vertical-align: middle; } - + .help-tip { text-align: center; background-color: #D0D7D8; @@ -150,40 +205,40 @@ opacity: 0.5; display: inline-block; } - + /*Stop the search box from glowing blue*/ #homepage_filesearch #id_search { width: 500px; height: 100%; padding: 0px; } - + /* START structures for engineering_database page */ - + #mnemonic_name_search { width: 100%; height: 100%; padding: 0px; } - + .mnemonic_name_search_row { display: flex; width: 100%; } - + .mnemonic_name_search_col { padding: 1em; border: 1px solid #F2CE3A; width: 100%; } - + .mnemonic_name_search_col1 { padding: 1em; border: 1px solid #F2CE3A; width: 40%; } - + .mnemonic_query_section { width: 100%; height: 100%; @@ -191,7 +246,7 @@ /*border:solid #000000;*/ border: 1px solid #F2CE3A; } - + .mnemonic_exploration_section { width: 100%; height: 100%; @@ -201,37 +256,37 @@ border: 1px solid #F2CE3A; line-height: 15px } - + .mnemonic_query_field { float:left; width:300px; list-style-type: none; display : inline; } - + /* END structures for engineering_database page */ - - + + #homepage_filesearch #id_search:focus { box-shadow: none; border-color: #cfd4da; } - + /*Make the form fields be inline*/ .homepage_form_fieldWrapper { display: inline; } - + #id_anomaly_choices { list-style: none; padding-left: 0; } - + /*Don't let the search bar be super long*/ .input-group { width: 250px; } - + /*Make the search icon look like a button*/ .input-group-text { background-color: #c85108 !important; @@ -239,7 +294,7 @@ color: white !important; border-radius: 0px; } - + /*Format the color background*/ .instrument-color-fill { display: none; @@ -252,7 +307,7 @@ left: 0%; z-index: 1; } - + /*To make the instrument logos vertically centered*/ .instrument_frame { height: 180px; @@ -262,12 +317,12 @@ position: relative; display: inline-block; } - + /*Make H2 header smaller for select pages*/ #instrument_main h2, .mnemonic_trending_main h2 { font-size: 1.75rem; } - + .instrument-name { font-size: 35px; color: white; @@ -286,72 +341,72 @@ z-index: 2; vertical-align: middle; } - + .instrument_panel { text-align: center; } - + .instrument_panel:hover .instrument-color-fill { display: inline; } - + .instrument_panel:hover .instrument-name { display: inline-block; } - + .instrument_select { padding-top: 1rem; padding-bottom: 2rem; margin-right: 5rem; margin-left: 5rem; } - + .image_preview { display: inline-block; } - + #loading { text-align:center; margin: 0 auto; width: 200px; z-index: 1000; } - + .monitor-name { background-color: #c85108; color: white; width: 100%; height: 100%; } - + /* Change color of dropdown links on hover */ li:hover .nav-link, .navbar-brand:hover { color: #fff !important; } - + /* Define navbar color*/ .navbar { background-color: black; } - + /*Define navbar font color and case*/ .nav-link { color: #bec4d4 !important; text-transform: uppercase; } - + /* Set padding around JWST logo*/ .navbar-left { padding-left:10px; padding-right:10px; } - + /* Get rid of padding around GitHub logo */ #github-link, #github-link-collapsed { padding-bottom: 0px; padding-top: 0px; } - + .plot-container { width: 100%; height: 600px; @@ -362,7 +417,7 @@ border-radius: 0px; border-width: 1px; } - + .plot-header { background-color: #c85108 !important; border-color: #c85108 !important ; @@ -372,7 +427,7 @@ border-width: 1px; width: 100%; } - + /*Define the proposal thumbnails*/ .proposal { display: inline-block; @@ -383,11 +438,11 @@ display: inline-block; margin: 0.1rem; } - + .proposal img { filter: grayscale(100%); } - + .proposal-color-fill { width: 100%; height: 100%; @@ -398,16 +453,16 @@ left: 0%; z-index: 1; } - + .proposal:hover { cursor: pointer; } - + .proposal:hover { background-color: #356198; opacity: 0.75; } - + .proposal-info { width: 100%; height: 100%; @@ -423,11 +478,11 @@ z-index: 2; font-size: 0.75rem; } - + .row { margin-bottom: 1rem; } - + .slider{ -webkit-appearance: none; width: 250px; @@ -435,7 +490,7 @@ background: #BEC4D4; outline: none; } - + /* slider style for Chrome/Safari/Opera/Edge */ .slider::-webkit-slider-thumb { -webkit-appearance: none; @@ -445,7 +500,7 @@ background: #C85108; cursor: pointer; } - + /* slider style for Firefox */ .slider::-moz-range-thumb { width: 15px; @@ -453,17 +508,17 @@ background: #C85108; cursor: pointer; } - + /* remove slider outline for Firefox */ .slider::-moz-focus-outer { border: 0; } - + .row .row { margin-top: 1rem; margin-bottom: 0; } - + /*Video for space 404 page*/ #space_404 { position: fixed; @@ -474,7 +529,7 @@ bottom: 0; align: center; } - + #space_404_text { position: fixed; background: rgba(0, 0, 0, 0.5); @@ -484,7 +539,7 @@ padding: 2rem; display: none; } - + .thumbnail { width: 8rem; height: 8rem; @@ -493,7 +548,7 @@ display: inline-block; margin: 0.1rem; } - + /*Format the color background*/ .thumbnail-color-fill { display: none; @@ -506,16 +561,16 @@ left: 0%; z-index: 1; } - + .thumbnail:hover { cursor: pointer; } - + .thumbnail:hover .thumbnail-info, .thumbnail:hover .thumbnail-color-fill { display: inline; } - + .thumbnail img { max-width: 100%; max-height: 100%; @@ -523,7 +578,7 @@ height: auto; vertical-align: middle; } - + /*Format the proposal number and number of files*/ .thumbnail-info { display: none; @@ -540,7 +595,7 @@ color: white; z-index: 2; } - + .thumbnail-staff { width: 15rem; height: 15rem; @@ -564,7 +619,7 @@ position: absolute; font-size: 0.65rem; } - + /*Format the version identifier text in bottom corner*/ #version-div { float: right; @@ -573,25 +628,25 @@ color: white; font-size: 12px } - + /*Add underline for links*/ a { text-decoration: underline; } - + /*Don't add underline for navbar and button links*/ nav a, .btn { text-decoration: none; } - + body { padding-top: 8rem; } - + body { font-family: 'Overpass', sans-serif !important; } - + h1, h2, h3, h4, h5, h6 { font-family: 'Oswald', sans-serif !important; } @@ -601,7 +656,7 @@ h1 { letter-spacing: 0.05em; } - + ul.no-bullets { list-style: none; padding-left:10px; diff --git a/jwql/website/apps/jwql/static/js/jwql.js b/jwql/website/apps/jwql/static/js/jwql.js index 0f4ee63ba..df47c63e7 100644 --- a/jwql/website/apps/jwql/static/js/jwql.js +++ b/jwql/website/apps/jwql/static/js/jwql.js @@ -310,6 +310,40 @@ function determine_page_title_obs(instrument, proposal, observation) { } } +/** + * Construct a 4-column table from an input dictionary. The 4 columns + * correspond to: key, value, key, value. + * @dictionary {dict} jsonified dictionary + */ +function make_table_from_dict(dictionary) { + var tableBody = document.getElementById("table-body"); + // Extract keys and values from the dictionary + var keys = Object.keys(dictionary); + var values = Object.values(dictionary); + + // Determine the maximum length of keys and values + //var maxLength = Math.max(keys.length, values.length); + var maxLength = keys.length + + // Populate the table dynamically + for (var i = 0; i < maxLength; i+=2) { + var row = document.createElement("tr"); + var row = tableBody.insertRow(i/2) + var cell1 = row.insertCell(0) + var cell2 = row.insertCell(1) + var cell3 = row.insertCell(2) + var cell4 = row.insertCell(3) + + cell1.textContent = i < keys.length ? keys[i]+':' : ""; + cell2.textContent = i < keys.length ? values[i] : ""; + cell3.textContent = (i+1) < keys.length ? keys[i+1]+':' : ""; + cell4.textContent = (i+1) < keys.length ? values[i+1] : ""; + + tableBody.appendChild(row); + } + return tableBody; +} + /** * adds/removes disabled_section class and clears value * @param {string} element_id @@ -1475,3 +1509,15 @@ function version_url(version_string) { a_line += '">JWQL v' + version_string + ''; return a_line; } + +/** + * Create a collapsible table + */ +document.addEventListener('DOMContentLoaded', function () { + var collapsibleBtn = document.querySelector('.collapsible-btn'); + var collapsibleContent = document.querySelector('.collapsible-content'); + + collapsibleBtn.addEventListener('click', function () { + collapsibleContent.classList.toggle('show'); + }); +}); diff --git a/jwql/website/apps/jwql/templates/jwql_query.html b/jwql/website/apps/jwql/templates/jwql_query.html index 6274c5a83..098bdd4ae 100644 --- a/jwql/website/apps/jwql/templates/jwql_query.html +++ b/jwql/website/apps/jwql/templates/jwql_query.html @@ -211,6 +211,18 @@

Dynamic Query Form

+ +
+ FGS Subarrays +
+ {% for field in form.fgs_subarray %} +
+ {{ field }} +
+ {% endfor %} +
+
+
@@ -546,6 +558,18 @@

Dynamic Query Form

+ +
+ NIRSpec Subarrays +
+ {% for field in form.nirspec_subarray %} +
+ {{ field }} +
+ {% endfor %} +
+
+

diff --git a/jwql/website/apps/jwql/templates/view_exposure.html b/jwql/website/apps/jwql/templates/view_exposure.html index a96c12153..7b9d71856 100644 --- a/jwql/website/apps/jwql/templates/view_exposure.html +++ b/jwql/website/apps/jwql/templates/view_exposure.html @@ -12,12 +12,48 @@

Exposure {{ group_root }}

- -
-
Proposal:
-
Observation:
-
Visit:
- + +
+
Proposal:
+
Observation:
+
Visit:
+
+ +
+
+
Visit Status: {{ basic_info.visit_status }}
+
Category: {{ basic_info.category }}
+ +
Subarray: {{ basic_info.subarray }}
+
Viewed: {{ marked_viewed }}
+
+
+
Filter: {{ basic_info.filter }}
+ {% if 'pupil' in basic_info %} +
Pupil: {{ basic_info.pupil }}
+ {% endif %} + {% if 'grating' in basic_info %} +
Grating: {{ basic_info.grating }}
+ {% endif %} +
Exp Start: {{ expstart_str }}
+
+ +
+ +
+ + + + + + +
+
+
diff --git a/jwql/website/apps/jwql/templates/view_image.html b/jwql/website/apps/jwql/templates/view_image.html index 4355eb878..90dd52c87 100644 --- a/jwql/website/apps/jwql/templates/view_image.html +++ b/jwql/website/apps/jwql/templates/view_image.html @@ -13,15 +13,50 @@

{{ file_root }}

-
+
Proposal:
Observation:
Visit:
Detector:
-
- FITS Filename:
- JPG Filename:

+
+
+
Visit Status: {{ basic_info.visit_status }}
+
Category: {{ basic_info.category }}
+ +
Subarray: {{ basic_info.subarray }}
+
Viewed: {{ marked_viewed }}
+
+
+
Filter: {{ basic_info.filter }}
+ {% if 'pupil' in basic_info %} +
Pupil: {{ basic_info.pupil }}
+ {% endif %} + {% if 'grating' in basic_info %} +
Grating: {{ basic_info.grating }}
+ {% endif %} +
Exp Start: {{ expstart_str }}
+
+ +
+ +
+ + + + + + +
+
+
+ +
+ FITS Filename:   JPG Filename:

View File Type: @@ -44,6 +79,7 @@

{{ file_root }}

+
diff --git a/jwql/website/apps/jwql/tests/test_context_processors.py b/jwql/website/apps/jwql/tests/test_context_processors.py index 1d4fc5951..b1ed31510 100644 --- a/jwql/website/apps/jwql/tests/test_context_processors.py +++ b/jwql/website/apps/jwql/tests/test_context_processors.py @@ -24,7 +24,7 @@ import os from unittest import skipIf -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS if not ON_GITHUB_ACTIONS: from jwql.website.apps.jwql import context_processors diff --git a/jwql/website/apps/jwql/views.py b/jwql/website/apps/jwql/views.py index 3fc36eaa7..0ea0281ad 100644 --- a/jwql/website/apps/jwql/views.py +++ b/jwql/website/apps/jwql/views.py @@ -51,21 +51,25 @@ import operator import socket +from astropy.time import Time from bokeh.layouts import layout from bokeh.embed import components from django.core.paginator import Paginator from django.http import HttpResponse, JsonResponse from django.shortcuts import redirect, render +import numpy as np from sqlalchemy import inspect from jwql.database.database_interface import load_connection from jwql.utils import monitor_utils from jwql.utils.interactive_preview_image import InteractivePreviewImg from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE, URL_DICT, QUERY_CONFIG_TEMPLATE, QueryConfigKeys -from jwql.utils.utils import filename_parser, get_base_url, get_config, get_rootnames_for_instrument_proposal, query_unformat +from jwql.utils.utils import filename_parser, filesystem_path, get_base_url, get_config +from jwql.utils.utils import get_rootnames_for_instrument_proposal, query_unformat from .data_containers import build_table from .data_containers import get_acknowledgements +from .data_containers import get_additional_exposure_info from .data_containers import get_available_suffixes from .data_containers import get_anomaly_form from .data_containers import get_dashboard_components @@ -1169,6 +1173,13 @@ def view_exposure(request, inst, group_root): root_file_info = RootFileInfo.objects.filter(root_name__startswith=group_root) viewed = all([rf.viewed for rf in root_file_info]) + # Convert expstart from MJD to a date + expstart_str = Time(root_file_info[0].expstart, format='mjd').to_datetime().strftime('%d %b %Y %H:%M') + + # Create one dict of info to show at the top of the page, and another dict of info + # to show in the collapsible text box. + basic_info, additional_info = get_additional_exposure_info(root_file_info, image_info) + # Build the context context = {'base_url': get_base_url(), 'group_root_list': group_root_list, @@ -1182,7 +1193,10 @@ def view_exposure(request, inst, group_root): 'total_ints': image_info['total_ints'], 'detectors': sorted(image_info['detectors']), 'form': form, - 'marked_viewed': viewed} + 'marked_viewed': viewed, + 'expstart_str': expstart_str, + 'basic_info': basic_info, + 'additional_info': additional_info} return render(request, template, context) @@ -1258,6 +1272,13 @@ def view_image(request, inst, file_root): # Get our current views RootFileInfo model and send our "viewed/new" information root_file_info = RootFileInfo.objects.get(root_name=file_root) + # Convert expstart from MJD to a date + expstart_str = Time(root_file_info.expstart, format='mjd').to_datetime().strftime('%d %b %Y %H:%M') + + # Create one dict of info to show at the top of the page, and another dict of info + # to show in the collapsible text box. + basic_info, additional_info = get_additional_exposure_info(root_file_info, image_info) + # Build the context context = {'base_url': get_base_url(), 'file_root_list': file_root_list, @@ -1270,6 +1291,9 @@ def view_image(request, inst, file_root): 'available_ints': image_info['available_ints'], 'total_ints': image_info['total_ints'], 'form': form, - 'marked_viewed': root_file_info.viewed} + 'marked_viewed': root_file_info.viewed, + 'expstart_str': expstart_str, + 'basic_info': basic_info, + 'additional_info': additional_info} return render(request, template, context) diff --git a/jwql/website/jwql_proj/settings.py b/jwql/website/jwql_proj/settings.py index 6dc070ca7..c0e72240f 100644 --- a/jwql/website/jwql_proj/settings.py +++ b/jwql/website/jwql_proj/settings.py @@ -95,12 +95,12 @@ ] MESSAGE_TAGS = { - messages.DEBUG: 'alert-secondary', - messages.INFO: 'alert-info', - messages.SUCCESS: 'alert-success', - messages.WARNING: 'alert-warning', - messages.ERROR: 'alert-danger', - } + messages.DEBUG: 'alert-secondary', + messages.INFO: 'alert-info', + messages.SUCCESS: 'alert-success', + messages.WARNING: 'alert-warning', + messages.ERROR: 'alert-danger', +} WSGI_APPLICATION = 'jwql.website.jwql_proj.wsgi.application' @@ -108,8 +108,10 @@ # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { - 'default': get_config()['django_database'] + 'default': get_config()['django_databases']['default'], + 'monitors': get_config()['django_databases']['monitors'] } +DATABASE_ROUTERS = ["jwql.website.apps.jwql.monitor_models.common.MonitorRouter"] # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators diff --git a/jwql/website/manage.py b/jwql/website/manage.py index 805e86b22..39009f26a 100755 --- a/jwql/website/manage.py +++ b/jwql/website/manage.py @@ -47,7 +47,7 @@ 'outputs': 'outputs', 'preview_image_filesystem': 'preview_images', 'thumbnail_filesystem': 'thumbnails' - } + } for directory in ['filesystem', 'outputs', 'preview_image_filesystem', 'thumbnail_filesystem']: symlink_location = os.path.join(os.path.dirname(__file__), 'apps', 'jwql', 'static', directory_mapping[directory]) diff --git a/pyproject.toml b/pyproject.toml index 24f701717..b17920154 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,7 @@ dependencies = [ "asdf", "astropy", "astroquery", - "bokeh<3", + "bokeh>=3", "crds", "cryptography", "django", diff --git a/requirements.txt b/requirements.txt index 2623220f8..0edf7d9ea 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ astropy==5.3.3 astroquery==0.4.6 bandit==1.7.5 beautifulsoup4==4.12.2 -bokeh==2.4.3 +bokeh==3.3.0 celery==5.3.4 cryptography==41.0.7 django==4.2.5