elements by default, accessible as @el. Many
+ // Bokeh views ignore this default
, and instead do things like draw
+ // to the HTML canvas. In this case though, we use the
to attach a
+ // Graph3d to the DOM.
+ this._graph = new vis.Graph3d(this.el, this.get_data(), this.model.options)
+ }
+
+ connect_signals(): void {
+ super.connect_signals()
+ // Set listener so that when the Bokeh data source has a change
+ // event, we can process the new data
+ this.connect(this.model.data_source.change, () => this._graph.setData(this.get_data()))
+ }
+
+ // This is the callback executed when the Bokeh data has an change (e.g. when
+ // the server updates the data). It's basic function is simply to adapt the
+ // Bokeh data source to the vis.js DataSet format
+ get_data(): vis.DataSet {
+ const data = new vis.DataSet()
+ const source = this.model.data_source
+ for (let i = 0; i < source.get_length()!; i++) {
+ data.add({
+ x: source.data[this.model.x][i],
+ y: source.data[this.model.y][i],
+ z: source.data[this.model.z][i],
+ })
+ }
+ return data
+ }
+}
+
+// We must also create a corresponding JavaScript model subclass to
+// correspond to the python Bokeh model subclass. In this case, since we want
+// an element that can position itself in the DOM according to a Bokeh layout,
+// we subclass from ``HTMLBox``
+
+export namespace Surface3d {
+ export type Attrs = p.AttrsOf
+
+ export type Props = HTMLBox.Props & {
+ x: p.Property
+ y: p.Property
+ z: p.Property
+ data_source: p.Property
+ options: p.Property<{[key: string]: unknown}>
+ }
+}
+
+export interface Surface3d extends Surface3d.Attrs {}
+
+export class Surface3d extends HTMLBox {
+ properties: Surface3d.Props
+ __view_type__: Surface3dView
+
+ constructor(attrs?: Partial) {
+ super(attrs)
+ }
+
+ // The ``__name__`` class attribute should generally match exactly the name
+ // of the corresponding Python class. Note that if using TypeScript, this
+ // will be automatically filled in during compilation, so except in some
+ // special cases, this shouldn't be generally included manually, to avoid
+ // typos, which would prohibit serialization/deserialization of this model.
+ static __name__ = "Surface3d"
+
+ static init_Surface3d(): void {
+ // This is usually boilerplate. In some cases there may not be a view.
+ this.prototype.default_view = Surface3dView
+
+ // The @define block adds corresponding "properties" to the JS model. These
+ // should basically line up 1-1 with the Python model class. Most property
+ // types have counterparts, e.g. ``bokeh.core.properties.String`` will be
+ // ``p.String`` in the JS implementatin. Where the JS type system is not yet
+ // as rich, you can use ``p.Any`` as a "wildcard" property type.
+ this.define({
+ x: [ p.String ],
+ y: [ p.String ],
+ z: [ p.String ],
+ data_source: [ p.Instance ],
+ options: [ p.Any, OPTIONS ],
+ })
+ }
+}
diff --git a/jwql/bokeh_templating/template.py b/jwql/bokeh_templating/template.py
index 35fe6ccf7..073d67782 100644
--- a/jwql/bokeh_templating/template.py
+++ b/jwql/bokeh_templating/template.py
@@ -1,117 +1,122 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
"""
-Created on Fri Jul 20 09:49:53 2018
+This module defines the ``BokehTemplate`` class, which can be subclassed
+to create a Bokeh web app with a YAML templating file.
-@author: gkanarek
+
+Author
+-------
+
+ - Graham Kanarek
+
+Use
+---
+
+ The user should subclass the ``BokehTemplate`` class to create an
+ app, as demonstrated in ``example.py``.
+
+ (A full tutorial on developing Bokeh apps with ``BokehTemplate`` is
+ forthcoming.)
+
+
+Dependencies
+------------
+
+ The user must have Bokeh and PyYAML installed.
"""
import yaml
import os
from . import factory
from bokeh.embed import components
-
-
-class BokehTemplateParserError(Exception):
- """
- A custom error for problems with parsing the interface files.
- """
-
-
-class BokehTemplateEmbedError(Exception):
- """
- A custom error for problems with embedding components.
- """
+from inspect import signature
class BokehTemplate(object):
- """
- This is the base class for creating Bokeh web apps using a YAML templating
- framework.
+ """The base class for creating Bokeh web apps using a YAML
+ templating framework.
+
+ Attributes
+ ----------
+ _embed : bool
+ A flag to indicate whether or not the individual widgets will be
+ embedded in a webpage. If ``False``, the YAML interface file
+ must include a !Document tag. Defaults to ``False``.
+ document: obje
+ The Bokeh Dpcument object (if any), equivalent to the result of
+ calling ``curdoc()``.
+ formats: dict
+ A dictionary of widget formating specifications, parsed from
+ ``format_string`` (if one exists).
+ format_string: str
+ A string of YAML formatting specifications, using the same
+ syntax as the interface file, for Bokeh widgets. Note that
+ formatting choices present in individual widget instances in the
+ interface file override these.
+ interface_file: str
+ The path to the YAML interface file.
+ refs : dict
+ A dictionary of Bokeh objects which are given ``ref`` strings in
+ the interface file. Use this to store and interact with the
+ Bokeh data sources and widgets in callback methods.
+
+ Methods
+ -------
+ ``_mapping_factory``, ``_sequence_factory``,
+ ``_figure_constructor``, and ``_document_constructor`` are imported
+ from ``bokeh_templating.factory``, used by the interface parser to
+ construct Bokeh widgets.
"""
+ # Each of these functions has a ``tool`` argument, which becomes ``self``
+ # when they are stored as methods. This way, the YAML constructors can
+ # store the Bokeh objects in the ``tool.ref`` dictionary, and can access
+ # the formatting string, if any. See ``factory.py`` for more details.
_mapping_factory = factory.mapping_factory
_sequence_factory = factory.sequence_factory
_figure_constructor = factory.figure_constructor
_document_constructor = factory.document_constructor
-
_embed = False
-
- def _self_constructor(self, loader, tag_suffix, node):
- """
- A multi_constructor for `!self` tag in the interface file.
- """
- yield eval("self" + tag_suffix, globals(), locals())
-
- def _register_default_constructors(self):
- for m in factory.mappings:
- yaml.add_constructor("!" + m + ":", self._mapping_factory(m))
-
- for s in factory.sequences:
- yaml.add_constructor("!" + s + ":", self._sequence_factory(s))
-
- yaml.add_constructor("!Figure:", self._figure_constructor)
- yaml.add_constructor("!Document:", self._document_constructor)
- yaml.add_multi_constructor(u"!self", self._self_constructor)
-
- def pre_init(self):
- """
- This should be implemented by the app subclass, to do any pre-
- initialization steps that it requires (setting defaults, loading
- data, etc).
-
- If this is not required, subclass should set `pre_init = None`
- in the class definition.
- """
-
- raise NotImplementedError
-
- def post_init(self):
- """
- This should be implemented by the app subclass, to do any post-
- initialization steps that the tool requires.
-
- If this is not required, subclass should set `post_init = None`
- in the class definition.
- """
-
- raise NotImplementedError
-
- def __init__(self):
+ document = None
+ format_string = ""
+ formats = {}
+ interface_file = ""
+ refs = {}
+
+ def __init__(self, **kwargs):
+ # Register the default constructors
self._register_default_constructors()
- # Allow for pre-init stuff from the subclass.
+ # Allow for pre-initialization code from the subclass.
if self.pre_init is not None:
- self.pre_init()
+ if signature(self.pre_init).parameters:
+ # If we try to call pre_init with keyword parameters when none
+ # are included, it will throw an error; thus, we use inspect.signature
+ self.pre_init(**kwargs)
+ else:
+ self.pre_init()
# Initialize attributes for YAML parsing
self.formats = {}
self.refs = {}
- self.document = None
# Parse formatting string, if any, and the interface YAML file
- self.include_formatting()
- self.parse_interface()
+ self._include_formatting()
+ self._parse_interface()
# Allow for post-init stuff from the subclass.
if self.post_init is not None:
self.post_init()
- def include_formatting(self):
- """
- This should simply be a dictionary of formatting keywords at the end.
- """
+ def _include_formatting(self):
+ """A utility function to parse the format string, if any."""
if not self.format_string:
return
- self.formats = yaml.load(self.format_string)
+ self.formats = yaml.load(self.format_string, Loader=yaml.Loader)
- def parse_interface(self):
- """
- This is the workhorse YAML parser, which creates the interface based
- on the layout file.
-
- `interface_file` is the path to the interface .yaml file to be parsed.
+ def _parse_interface(self):
+ """Parse the YAML interface file using the registered
+ constructors
"""
if not self.interface_file:
@@ -124,27 +129,102 @@ def parse_interface(self):
with open(filepath) as f:
interface = f.read()
- # First, let's make sure that there's a Document in here
+ # If necessary, verify that the interface string contains !Document tag
if not self._embed and '!Document' not in interface:
raise BokehTemplateParserError("Interface file must contain a Document tag")
# Now, since we've registered all the constructors, we can parse the
# entire string with yaml. We don't need to assign the result to a
# variable, since the constructors store everything in self.refs
- # (and self.document, for the document)
+ # (and self.document, for the document).
+ try:
+ yaml.load_all(interface)
+ except yaml.YAMLError as exc:
+ raise BokehTemplateParserError(exc)
- self.full_stream = list(yaml.load_all(interface))
+ def _register_default_constructors(self):
+ """Register all the default constructors with
+ ``yaml.add_constructor``.
+ """
+ for m in factory.mappings:
+ yaml.add_constructor("!" + m + ":", self._mapping_factory(m))
- def parse_string(self, yaml_string):
- return list(yaml.load_all(yaml_string))
+ for s in factory.sequences:
+ yaml.add_constructor("!" + s + ":", self._sequence_factory(s))
+
+ yaml.add_constructor("!Figure:", self._figure_constructor)
+ yaml.add_constructor("!Document:", self._document_constructor)
+ yaml.add_multi_constructor(u"!self", self._self_constructor)
+
+ def _self_constructor(self, loader, tag_suffix, node):
+ """A multi_constructor for `!self` tag in the interface file."""
+ yield eval("self" + tag_suffix, globals(), locals())
def embed(self, ref):
+ """A wrapper for ``bokeh.embed.components`` to return embeddable
+ code for the given widget reference."""
element = self.refs.get(ref, None)
if element is None:
raise BokehTemplateEmbedError("Undefined component reference")
return components(element)
- def register_sequence_constructor(self, tag, parse_func):
+ @staticmethod
+ def parse_string(yaml_string):
+ """ A utility functon to parse any YAML string using the
+ registered constructors. (Usually used for debugging.)"""
+ return list(yaml.load_all(yaml_string))
+
+ def post_init(self):
+ """This should be implemented by the app subclass, to perform
+ any post-initialization actions that the tool requires.
+
+ If this is not required, the subclass should set
+ `post_init = None` in the class definition.
+ """
+
+ raise NotImplementedError
+
+ def pre_init(self, **kwargs):
+ """This should be implemented by the app subclass, to perform
+ any pre-initialization actions that it requires (setting
+ defaults, loading data, etc). Note that positional arguments are
+ not currently supported.
+
+ If this is not required, the subclass should set
+ `pre_init = None` in the class definition.
+ """
+
+ raise NotImplementedError
+
+ @classmethod
+ def register_sequence_constructor(cls, tag, parse_func):
+ """
+ Register a new sequence constructor with YAML.
+
+ Parameters
+ ----------
+ tag : str
+ The YAML tag string to be used for the constructor.
+ parse_func: object
+ The parsing function to be registered with YAML. This
+ function should accept a multi-line string, and return a
+ python object.
+
+ Usage
+ -----
+ This classmethod should be used to register a new constructor
+ *before* creating & instantiating a subclass of BokehTemplate :
+
+ ::
+
+ from bokeh_template import BokehTemplate
+ BokehTemplate.register_sequence_constructor("my_tag", my_parser)
+
+ class myTool(BokehTemplate):
+ pass
+
+ myTool()
+ """
if tag.startswith("!"):
tag = tag[1:]
@@ -154,7 +234,35 @@ def user_constructor(loader, node):
user_constructor.__name__ = tag.lower() + "_constructor"
yaml.add_constructor("!" + tag, user_constructor)
- def register_mapping_constructor(self, tag, parse_func):
+ @classmethod
+ def register_mapping_constructor(cls, tag, parse_func):
+ """
+ Register a new mapping constructor with YAML.
+
+ Parameters
+ ----------
+ tag : str
+ The YAML tag string to be used for the constructor.
+ parse_func: object
+ The parsing function to be registered with YAML. This
+ function should accept a multi-line string, and return a
+ python object.
+
+ Usage
+ -----
+ This classmethod should be used to register a new constructor
+ *before* creating & instantiating a subclass of BokehTemplate :
+
+ ::
+
+ from bokeh_template import BokehTemplate
+ BokehTemplate.register_mapping_constructor("my_tag", my_parser)
+
+ class myTool(BokehTemplate):
+ pass
+
+ myTool()
+ """
if tag.startswith("!"):
tag = tag[1:]
@@ -163,3 +271,11 @@ def user_constructor(loader, node):
yield parse_func(value)
user_constructor.__name__ = tag.lower() + "_constructor"
yaml.add_constructor("!" + tag, user_constructor)
+
+
+class BokehTemplateEmbedError(Exception):
+ """A custom error for problems with embedding components."""
+
+
+class BokehTemplateParserError(Exception):
+ """A custom error for problems with parsing the interface files."""
diff --git a/jwql/database/database_interface.py b/jwql/database/database_interface.py
index e376ff2db..855a5c67e 100755
--- a/jwql/database/database_interface.py
+++ b/jwql/database/database_interface.py
@@ -79,7 +79,7 @@
from sqlalchemy.orm.query import Query
from sqlalchemy.types import ARRAY
-from jwql.utils.constants import ANOMALIES, FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES
+from jwql.utils.constants import ANOMALIES_PER_INSTRUMENT, FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES
from jwql.utils.utils import get_config
ON_JENKINS = '/home/jenkins' in os.path.expanduser('~')
@@ -218,7 +218,7 @@ class Monitor(base):
monitor_name = Column(String(), nullable=False)
start_time = Column(DateTime, nullable=False)
end_time = Column(DateTime, nullable=True)
- status = Column(Enum('SUCESS', 'FAILURE', name='monitor_status'), nullable=True)
+ status = Column(Enum('SUCCESS', 'FAILURE', name='monitor_status'), nullable=True)
affected_tables = Column(ARRAY(String, dimensions=1), nullable=True)
log_file = Column(String(), nullable=False)
@@ -241,8 +241,14 @@ class : obj
data_dict = {}
data_dict['__tablename__'] = class_name.lower()
+ instrument = data_dict['__tablename__'].split('_')[0]
+ instrument_anomalies = []
+ for anomaly in ANOMALIES_PER_INSTRUMENT:
+ if instrument in ANOMALIES_PER_INSTRUMENT[anomaly]:
+ instrument_anomalies.append(anomaly)
+
# Define anomaly table column names
- data_dict['columns'] = ANOMALIES
+ data_dict['columns'] = instrument_anomalies
data_dict['names'] = [name.replace('_', ' ') for name in data_dict['columns']]
# Create a table with the appropriate Columns
@@ -381,7 +387,11 @@ class : obj
# Create tables from ORM factory
-Anomaly = anomaly_orm_factory('anomaly')
+NIRCamAnomaly = anomaly_orm_factory('nircam_anomaly')
+NIRISSAnomaly = anomaly_orm_factory('niriss_anomaly')
+NIRSpecAnomaly = anomaly_orm_factory('nirspec_anomaly')
+MIRIAnomaly = anomaly_orm_factory('miri_anomaly')
+FGSAnomaly = anomaly_orm_factory('fgs_anomaly')
NIRCamDarkQueryHistory = monitor_orm_factory('nircam_dark_query_history')
NIRCamDarkPixelStats = monitor_orm_factory('nircam_dark_pixel_stats')
NIRCamDarkDarkCurrent = monitor_orm_factory('nircam_dark_dark_current')
@@ -397,7 +407,22 @@ class : obj
FGSDarkQueryHistory = monitor_orm_factory('fgs_dark_query_history')
FGSDarkPixelStats = monitor_orm_factory('fgs_dark_pixel_stats')
FGSDarkDarkCurrent = monitor_orm_factory('fgs_dark_dark_current')
-
+NIRCamBiasQueryHistory = monitor_orm_factory('nircam_bias_query_history')
+NIRCamBiasStats = monitor_orm_factory('nircam_bias_stats')
+NIRCamBadPixelQueryHistory = monitor_orm_factory('nircam_bad_pixel_query_history')
+NIRCamBadPixelStats = monitor_orm_factory('nircam_bad_pixel_stats')
+NIRISSBadPixelQueryHistory = monitor_orm_factory('niriss_bad_pixel_query_history')
+NIRISSBadPixelStats = monitor_orm_factory('niriss_bad_pixel_stats')
+FGSBadPixelQueryHistory = monitor_orm_factory('fgs_bad_pixel_query_history')
+FGSBadPixelStats = monitor_orm_factory('fgs_bad_pixel_stats')
+MIRIBadPixelQueryHistory = monitor_orm_factory('miri_bad_pixel_query_history')
+MIRIBadPixelStats = monitor_orm_factory('miri_bad_pixel_stats')
+NIRSpecBadPixelQueryHistory = monitor_orm_factory('nirspec_bad_pixel_query_history')
+NIRSpecBadPixelStats = monitor_orm_factory('nirspec_bad_pixel_stats')
+NIRCamReadnoiseQueryHistory = monitor_orm_factory('nircam_readnoise_query_history')
+NIRCamReadnoiseStats = monitor_orm_factory('nircam_readnoise_stats')
+NIRISSReadnoiseQueryHistory = monitor_orm_factory('niriss_readnoise_query_history')
+NIRISSReadnoiseStats = monitor_orm_factory('niriss_readnoise_stats')
if __name__ == '__main__':
diff --git a/jwql/database/monitor_table_definitions/fgs/fgs_bad_pixel_query_history.txt b/jwql/database/monitor_table_definitions/fgs/fgs_bad_pixel_query_history.txt
new file mode 100644
index 000000000..09dfa7321
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/fgs/fgs_bad_pixel_query_history.txt
@@ -0,0 +1,11 @@
+INSTRUMENT, string
+APERTURE, string
+DARK_START_TIME_MJD, float
+DARK_END_TIME_MJD, float
+FLAT_START_TIME_MJD, float
+FLAT_END_TIME_MJD, float
+DARK_FILES_FOUND, integer
+FLAT_FILES_FOUND, integer
+RUN_BPIX_FROM_DARKS, bool
+RUN_BPIX_FROM_FLATS, bool
+RUN_MONITOR, bool
diff --git a/jwql/database/monitor_table_definitions/fgs/fgs_bad_pixel_stats.txt b/jwql/database/monitor_table_definitions/fgs/fgs_bad_pixel_stats.txt
new file mode 100644
index 000000000..f0b0ed6e2
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/fgs/fgs_bad_pixel_stats.txt
@@ -0,0 +1,9 @@
+DETECTOR, string
+X_COORD, integer_array_1d
+Y_COORD, integer_array_1d
+TYPE, string
+SOURCE_FILES, string_array_1d
+OBS_START_TIME, datetime
+OBS_MID_TIME, datetime
+OBS_END_TIME, datetime
+BASELINE_FILE, string
diff --git a/jwql/database/monitor_table_definitions/miri/miri_bad_pixel_query_history.txt b/jwql/database/monitor_table_definitions/miri/miri_bad_pixel_query_history.txt
new file mode 100644
index 000000000..09dfa7321
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/miri/miri_bad_pixel_query_history.txt
@@ -0,0 +1,11 @@
+INSTRUMENT, string
+APERTURE, string
+DARK_START_TIME_MJD, float
+DARK_END_TIME_MJD, float
+FLAT_START_TIME_MJD, float
+FLAT_END_TIME_MJD, float
+DARK_FILES_FOUND, integer
+FLAT_FILES_FOUND, integer
+RUN_BPIX_FROM_DARKS, bool
+RUN_BPIX_FROM_FLATS, bool
+RUN_MONITOR, bool
diff --git a/jwql/database/monitor_table_definitions/miri/miri_bad_pixel_stats.txt b/jwql/database/monitor_table_definitions/miri/miri_bad_pixel_stats.txt
new file mode 100644
index 000000000..f0b0ed6e2
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/miri/miri_bad_pixel_stats.txt
@@ -0,0 +1,9 @@
+DETECTOR, string
+X_COORD, integer_array_1d
+Y_COORD, integer_array_1d
+TYPE, string
+SOURCE_FILES, string_array_1d
+OBS_START_TIME, datetime
+OBS_MID_TIME, datetime
+OBS_END_TIME, datetime
+BASELINE_FILE, string
diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_bad_pixel_query_history.txt b/jwql/database/monitor_table_definitions/nircam/nircam_bad_pixel_query_history.txt
new file mode 100644
index 000000000..09dfa7321
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/nircam/nircam_bad_pixel_query_history.txt
@@ -0,0 +1,11 @@
+INSTRUMENT, string
+APERTURE, string
+DARK_START_TIME_MJD, float
+DARK_END_TIME_MJD, float
+FLAT_START_TIME_MJD, float
+FLAT_END_TIME_MJD, float
+DARK_FILES_FOUND, integer
+FLAT_FILES_FOUND, integer
+RUN_BPIX_FROM_DARKS, bool
+RUN_BPIX_FROM_FLATS, bool
+RUN_MONITOR, bool
diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_bad_pixel_stats.txt b/jwql/database/monitor_table_definitions/nircam/nircam_bad_pixel_stats.txt
new file mode 100644
index 000000000..f0b0ed6e2
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/nircam/nircam_bad_pixel_stats.txt
@@ -0,0 +1,9 @@
+DETECTOR, string
+X_COORD, integer_array_1d
+Y_COORD, integer_array_1d
+TYPE, string
+SOURCE_FILES, string_array_1d
+OBS_START_TIME, datetime
+OBS_MID_TIME, datetime
+OBS_END_TIME, datetime
+BASELINE_FILE, string
diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_bias_query_history.txt b/jwql/database/monitor_table_definitions/nircam/nircam_bias_query_history.txt
new file mode 100644
index 000000000..c6deea152
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/nircam/nircam_bias_query_history.txt
@@ -0,0 +1,8 @@
+INSTRUMENT, string
+APERTURE, string
+START_TIME_MJD, float
+END_TIME_MJD, float
+ENTRIES_FOUND, integer
+FILES_FOUND, integer
+RUN_MONITOR, bool
+ENTRY_DATE, datetime
\ No newline at end of file
diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_bias_stats.txt b/jwql/database/monitor_table_definitions/nircam/nircam_bias_stats.txt
new file mode 100644
index 000000000..610713581
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/nircam/nircam_bias_stats.txt
@@ -0,0 +1,19 @@
+APERTURE, string
+UNCAL_FILENAME, string
+CAL_FILENAME, string
+CAL_IMAGE, string
+EXPSTART, string
+MEAN, float
+MEDIAN, float
+STDDEV, float
+COLLAPSED_ROWS, float_array_1d
+COLLAPSED_COLUMNS, float_array_1d
+AMP1_EVEN_MED, float
+AMP1_ODD_MED, float
+AMP2_EVEN_MED, float
+AMP2_ODD_MED, float
+AMP3_EVEN_MED, float
+AMP3_ODD_MED, float
+AMP4_EVEN_MED, float
+AMP4_ODD_MED, float
+ENTRY_DATE, datetime
\ No newline at end of file
diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_readnoise_query_history.txt b/jwql/database/monitor_table_definitions/nircam/nircam_readnoise_query_history.txt
new file mode 100644
index 000000000..c6deea152
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/nircam/nircam_readnoise_query_history.txt
@@ -0,0 +1,8 @@
+INSTRUMENT, string
+APERTURE, string
+START_TIME_MJD, float
+END_TIME_MJD, float
+ENTRIES_FOUND, integer
+FILES_FOUND, integer
+RUN_MONITOR, bool
+ENTRY_DATE, datetime
\ No newline at end of file
diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_readnoise_stats.txt b/jwql/database/monitor_table_definitions/nircam/nircam_readnoise_stats.txt
new file mode 100644
index 000000000..d6cebf4c2
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/nircam/nircam_readnoise_stats.txt
@@ -0,0 +1,35 @@
+UNCAL_FILENAME, string
+APERTURE, string
+DETECTOR, string
+SUBARRAY, string
+READ_PATTERN, string
+NINTS, string
+NGROUPS, string
+EXPSTART, string
+READNOISE_FILENAME, string
+FULL_IMAGE_MEAN, float
+FULL_IMAGE_STDDEV, float
+FULL_IMAGE_N, float_array_1d
+FULL_IMAGE_BIN_CENTERS, float_array_1d
+READNOISE_DIFF_IMAGE, string
+DIFF_IMAGE_MEAN, float
+DIFF_IMAGE_STDDEV, float
+DIFF_IMAGE_N, float_array_1d
+DIFF_IMAGE_BIN_CENTERS, float_array_1d
+ENTRY_DATE, datetime
+AMP1_MEAN, float
+AMP1_STDDEV, float
+AMP1_N, float_array_1d
+AMP1_BIN_CENTERS, float_array_1d
+AMP2_MEAN, float
+AMP2_STDDEV, float
+AMP2_N, float_array_1d
+AMP2_BIN_CENTERS, float_array_1d
+AMP3_MEAN, float
+AMP3_STDDEV, float
+AMP3_N, float_array_1d
+AMP3_BIN_CENTERS, float_array_1d
+AMP4_MEAN, float
+AMP4_STDDEV, float
+AMP4_N, float_array_1d
+AMP4_BIN_CENTERS, float_array_1d
\ No newline at end of file
diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_bad_pixel_query_history.txt b/jwql/database/monitor_table_definitions/niriss/niriss_bad_pixel_query_history.txt
new file mode 100644
index 000000000..09dfa7321
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/niriss/niriss_bad_pixel_query_history.txt
@@ -0,0 +1,11 @@
+INSTRUMENT, string
+APERTURE, string
+DARK_START_TIME_MJD, float
+DARK_END_TIME_MJD, float
+FLAT_START_TIME_MJD, float
+FLAT_END_TIME_MJD, float
+DARK_FILES_FOUND, integer
+FLAT_FILES_FOUND, integer
+RUN_BPIX_FROM_DARKS, bool
+RUN_BPIX_FROM_FLATS, bool
+RUN_MONITOR, bool
diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_bad_pixel_stats.txt b/jwql/database/monitor_table_definitions/niriss/niriss_bad_pixel_stats.txt
new file mode 100644
index 000000000..f0b0ed6e2
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/niriss/niriss_bad_pixel_stats.txt
@@ -0,0 +1,9 @@
+DETECTOR, string
+X_COORD, integer_array_1d
+Y_COORD, integer_array_1d
+TYPE, string
+SOURCE_FILES, string_array_1d
+OBS_START_TIME, datetime
+OBS_MID_TIME, datetime
+OBS_END_TIME, datetime
+BASELINE_FILE, string
diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_readnoise_query_history.txt b/jwql/database/monitor_table_definitions/niriss/niriss_readnoise_query_history.txt
new file mode 100644
index 000000000..c6deea152
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/niriss/niriss_readnoise_query_history.txt
@@ -0,0 +1,8 @@
+INSTRUMENT, string
+APERTURE, string
+START_TIME_MJD, float
+END_TIME_MJD, float
+ENTRIES_FOUND, integer
+FILES_FOUND, integer
+RUN_MONITOR, bool
+ENTRY_DATE, datetime
\ No newline at end of file
diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_readnoise_stats.txt b/jwql/database/monitor_table_definitions/niriss/niriss_readnoise_stats.txt
new file mode 100644
index 000000000..d6cebf4c2
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/niriss/niriss_readnoise_stats.txt
@@ -0,0 +1,35 @@
+UNCAL_FILENAME, string
+APERTURE, string
+DETECTOR, string
+SUBARRAY, string
+READ_PATTERN, string
+NINTS, string
+NGROUPS, string
+EXPSTART, string
+READNOISE_FILENAME, string
+FULL_IMAGE_MEAN, float
+FULL_IMAGE_STDDEV, float
+FULL_IMAGE_N, float_array_1d
+FULL_IMAGE_BIN_CENTERS, float_array_1d
+READNOISE_DIFF_IMAGE, string
+DIFF_IMAGE_MEAN, float
+DIFF_IMAGE_STDDEV, float
+DIFF_IMAGE_N, float_array_1d
+DIFF_IMAGE_BIN_CENTERS, float_array_1d
+ENTRY_DATE, datetime
+AMP1_MEAN, float
+AMP1_STDDEV, float
+AMP1_N, float_array_1d
+AMP1_BIN_CENTERS, float_array_1d
+AMP2_MEAN, float
+AMP2_STDDEV, float
+AMP2_N, float_array_1d
+AMP2_BIN_CENTERS, float_array_1d
+AMP3_MEAN, float
+AMP3_STDDEV, float
+AMP3_N, float_array_1d
+AMP3_BIN_CENTERS, float_array_1d
+AMP4_MEAN, float
+AMP4_STDDEV, float
+AMP4_N, float_array_1d
+AMP4_BIN_CENTERS, float_array_1d
\ No newline at end of file
diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_bad_pixel_query_history.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_bad_pixel_query_history.txt
new file mode 100644
index 000000000..09dfa7321
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_bad_pixel_query_history.txt
@@ -0,0 +1,11 @@
+INSTRUMENT, string
+APERTURE, string
+DARK_START_TIME_MJD, float
+DARK_END_TIME_MJD, float
+FLAT_START_TIME_MJD, float
+FLAT_END_TIME_MJD, float
+DARK_FILES_FOUND, integer
+FLAT_FILES_FOUND, integer
+RUN_BPIX_FROM_DARKS, bool
+RUN_BPIX_FROM_FLATS, bool
+RUN_MONITOR, bool
diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_bad_pixel_stats.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_bad_pixel_stats.txt
new file mode 100644
index 000000000..f0b0ed6e2
--- /dev/null
+++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_bad_pixel_stats.txt
@@ -0,0 +1,9 @@
+DETECTOR, string
+X_COORD, integer_array_1d
+Y_COORD, integer_array_1d
+TYPE, string
+SOURCE_FILES, string_array_1d
+OBS_START_TIME, datetime
+OBS_MID_TIME, datetime
+OBS_END_TIME, datetime
+BASELINE_FILE, string
diff --git a/jwql/instrument_monitors/common_monitors/bad_pixel_file_thresholds.txt b/jwql/instrument_monitors/common_monitors/bad_pixel_file_thresholds.txt
new file mode 100644
index 000000000..bc2d15a57
--- /dev/null
+++ b/jwql/instrument_monitors/common_monitors/bad_pixel_file_thresholds.txt
@@ -0,0 +1,332 @@
+Instrument Aperture FlatThreshold DarkThreshold
+nircam NRCA1_FULL_OSS 10 10
+nircam NRCA2_FULL_OSS 10 10
+nircam NRCA3_FULL_OSS 10 10
+nircam NRCA4_FULL_OSS 10 10
+nircam NRCA5_FULL_OSS 10 10
+nircam NRCB1_FULL_OSS 10 10
+nircam NRCB2_FULL_OSS 10 10
+nircam NRCB3_FULL_OSS 10 10
+nircam NRCB4_FULL_OSS 10 10
+nircam NRCB5_FULL_OSS 10 10
+nircam NRCALL_FULL 10 10
+nircam NRCAS_FULL 10 10
+nircam NRCA1_FULL 10 10
+nircam NRCA2_FULL 10 10
+nircam NRCA3_FULL 10 10
+nircam NRCA4_FULL 10 10
+nircam NRCA5_FULL 10 10
+nircam NRCBS_FULL 10 10
+nircam NRCB1_FULL 10 10
+nircam NRCB2_FULL 10 10
+nircam NRCB3_FULL 10 10
+nircam NRCB4_FULL 10 10
+nircam NRCB5_FULL 10 10
+nircam NRCB1_FULLP 10 10
+nircam NRCB5_FULLP 10 10
+nircam NRCA1_SUB160 30 30
+nircam NRCA2_SUB160 30 30
+nircam NRCA3_SUB160 30 30
+nircam NRCA4_SUB160 30 30
+nircam NRCA5_SUB160 30 30
+nircam NRCB1_SUB160 30 30
+nircam NRCB2_SUB160 30 30
+nircam NRCB3_SUB160 30 30
+nircam NRCB4_SUB160 30 30
+nircam NRCB5_SUB160 30 30
+nircam NRCA1_SUB320 30 30
+nircam NRCA2_SUB320 30 30
+nircam NRCA3_SUB320 30 30
+nircam NRCA4_SUB320 30 30
+nircam NRCA5_SUB320 30 30
+nircam NRCB1_SUB320 30 30
+nircam NRCB2_SUB320 30 30
+nircam NRCB3_SUB320 30 30
+nircam NRCB4_SUB320 30 30
+nircam NRCB5_SUB320 30 30
+nircam NRCA1_SUB640 30 30
+nircam NRCA2_SUB640 30 30
+nircam NRCA3_SUB640 30 30
+nircam NRCA4_SUB640 30 30
+nircam NRCA5_SUB640 30 30
+nircam NRCB1_SUB640 30 30
+nircam NRCB2_SUB640 30 30
+nircam NRCB3_SUB640 30 30
+nircam NRCB4_SUB640 30 30
+nircam NRCB5_SUB640 30 30
+nircam NRCA5_GRISM256_F322W2 30 30
+nircam NRCA5_GRISM128_F322W2 30 30
+nircam NRCA5_GRISM64_F322W2 30 30
+nircam NRCA5_GRISM256_F277W 30 30
+nircam NRCA5_GRISM128_F277W 30 30
+nircam NRCA5_GRISM64_F277W 30 30
+nircam NRCA5_GRISM256_F356W 30 30
+nircam NRCA5_GRISM128_F356W 30 30
+nircam NRCA5_GRISM64_F356W 30 30
+nircam NRCA5_GRISM256_F444W 30 30
+nircam NRCA5_GRISM128_F444W 30 30
+nircam NRCA5_GRISM64_F444W 30 30
+nircam NRCA5_GRISM_F322W2 30 30
+nircam NRCA5_GRISM_F277W 30 30
+nircam NRCA5_GRISM_F356W 30 30
+nircam NRCA5_GRISM_F444W 30 30
+nircam NRCA1_GRISMTS 30 30
+nircam NRCA1_GRISMTS256 30 30
+nircam NRCA1_GRISMTS128 30 30
+nircam NRCA1_GRISMTS64 30 30
+nircam NRCA3_GRISMTS 30 30
+nircam NRCA3_GRISMTS256 30 30
+nircam NRCA3_GRISMTS128 30 30
+nircam NRCA3_GRISMTS64 30 30
+nircam NRCA5_TAGRISMTS32 30 30
+nircam NRCA5_TAGRISMTS_SCI_F322W2 30 30
+nircam NRCA5_TAGRISMTS_SCI_F444W 30 30
+nircam NRCA3_DHSPIL 30 30
+nircam NRCA3_DHSPIL_SUB96 30 30
+nircam NRCA3_DHSPIL_WEDGES 30 30
+nircam NRCB4_DHSPIL 30 30
+nircam NRCB4_DHSPIL_SUB96 30 30
+nircam NRCB4_DHSPIL_WEDGES 30 30
+nircam NRCA3_FP1 30 30
+nircam NRCA3_FP1_SUB8 30 30
+nircam NRCA3_FP1_SUB64 30 30
+nircam NRCA3_FP2MIMF 30 30
+nircam NRCA1_FP3MIMF 30 30
+nircam NRCA2_FP4MIMF 30 30
+nircam NRCA4_FP5MIMF 30 30
+nircam NRCB4_FP1 30 30
+nircam NRCB4_FP1_SUB8 30 30
+nircam NRCB4_FP1_SUB64 30 30
+nircam NRCB4_FP2MIMF 30 30
+nircam NRCB2_FP3MIMF 30 30
+nircam NRCB1_FP4MIMF 30 30
+nircam NRCB3_FP5MIMF 30 30
+nircam NRCA3_SUB64P 30 30
+nircam NRCA3_SUB160P 30 30
+nircam NRCA3_SUB400P 30 30
+nircam NRCA5_SUB64P 30 30
+nircam NRCA5_SUB160P 30 30
+nircam NRCA5_SUB400P 30 30
+nircam NRCB1_SUB64P 30 30
+nircam NRCB1_SUB160P 30 30
+nircam NRCB1_SUB400P 30 30
+nircam NRCB5_SUB64P 30 30
+nircam NRCB5_SUB160P 30 30
+nircam NRCB5_SUB400P 30 30
+nircam NRCB5_TAPSIMG32 30 30
+nircam NRCA5_GRISMC_WFSS 30 30
+nircam NRCA5_GRISMR_WFSS 30 30
+nircam NRCALL_GRISMC_WFSS 30 30
+nircam NRCALL_GRISMR_WFSS 30 30
+nircam NRCB5_GRISMC_WFSS 30 30
+nircam NRCB5_GRISMR_WFSS 30 30
+nircam NRCA2_MASK210R 30 30
+nircam NRCA5_MASK335R 30 30
+nircam NRCA5_MASK430R 30 30
+nircam NRCA4_MASKSWB 30 30
+nircam NRCA5_MASKLWB 30 30
+nircam NRCA2_TAMASK210R 30 30
+nircam NRCA5_TAMASK335R 30 30
+nircam NRCA5_TAMASK430R 30 30
+nircam NRCA4_TAMASKSWB 30 30
+nircam NRCA5_TAMASKLWB 30 30
+nircam NRCA5_TAMASKLWBL 30 30
+nircam NRCA4_TAMASKSWBS 30 30
+nircam NRCB1_MASK210R 30 30
+nircam NRCB5_MASK335R 30 30
+nircam NRCB5_MASK430R 30 30
+nircam NRCB3_MASKSWB 30 30
+nircam NRCB5_MASKLWB 30 30
+nircam NRCB1_TAMASK210R 30 30
+nircam NRCB5_TAMASK335R 30 30
+nircam NRCB5_TAMASK430R 30 30
+nircam NRCB3_TAMASKSWB 30 30
+nircam NRCB5_TAMASKLWB 30 30
+nircam NRCB5_TAMASKLWBL 30 30
+nircam NRCB3_TAMASKSWBS 30 30
+nircam NRCA2_FSTAMASK210R 30 30
+nircam NRCA4_FSTAMASKSWB 30 30
+nircam NRCA5_FSTAMASKLWB 30 30
+nircam NRCA5_FSTAMASK335R 30 30
+nircam NRCA5_FSTAMASK430R 30 30
+nircam NRCA4_MASKSWB_F182M 30 30
+nircam NRCA4_MASKSWB_F187N 30 30
+nircam NRCA4_MASKSWB_F210M 30 30
+nircam NRCA4_MASKSWB_F212N 30 30
+nircam NRCA4_MASKSWB_F200W 30 30
+nircam NRCA4_MASKSWB_NARROW 30 30
+nircam NRCA5_MASKLWB_F250M 30 30
+nircam NRCA5_MASKLWB_F300M 30 30
+nircam NRCA5_MASKLWB_F277W 30 30
+nircam NRCA5_MASKLWB_F335M 30 30
+nircam NRCA5_MASKLWB_F360M 30 30
+nircam NRCA5_MASKLWB_F356W 30 30
+nircam NRCA5_MASKLWB_F410M 30 30
+nircam NRCA5_MASKLWB_F430M 30 30
+nircam NRCA5_MASKLWB_F460M 30 30
+nircam NRCA5_MASKLWB_F480M 30 30
+nircam NRCA5_MASKLWB_F444W 30 30
+nircam NRCA5_MASKLWB_NARROW 30 30
+nircam NRCA2_FULL_MASK210R 10 10
+nircam NRCA5_FULL_MASK335R 10 10
+nircam NRCA5_FULL_MASK430R 10 10
+nircam NRCA4_FULL_MASKSWB 10 10
+nircam NRCA4_FULL_MASKSWB_F182M 10 10
+nircam NRCA4_FULL_MASKSWB_F187N 10 10
+nircam NRCA4_FULL_MASKSWB_F210M 10 10
+nircam NRCA4_FULL_MASKSWB_F212N 10 10
+nircam NRCA4_FULL_MASKSWB_F200W 10 10
+nircam NRCA5_FULL_MASKLWB 10 10
+nircam NRCA5_FULL_MASKLWB_F250M 10 10
+nircam NRCA5_FULL_MASKLWB_F300M 10 10
+nircam NRCA5_FULL_MASKLWB_F277W 10 10
+nircam NRCA5_FULL_MASKLWB_F335M 10 10
+nircam NRCA5_FULL_MASKLWB_F360M 10 10
+nircam NRCA5_FULL_MASKLWB_F356W 10 10
+nircam NRCA5_FULL_MASKLWB_F410M 10 10
+nircam NRCA5_FULL_MASKLWB_F430M 10 10
+nircam NRCA5_FULL_MASKLWB_F460M 10 10
+nircam NRCA5_FULL_MASKLWB_F480M 10 10
+nircam NRCA5_FULL_MASKLWB_F444W 10 10
+niriss NIS_CEN_OSS 10 10
+niriss NIS_CEN 10 10
+niriss NIS_AMI1 30 30
+niriss NIS_AMI2 30 30
+niriss NIS_AMI3 30 30
+niriss NIS_AMI4 30 30
+niriss NIS_AMITA 30 30
+niriss NIS_SOSSTA 30 30
+niriss NIS_WFSS_OFFSET 30 30
+niriss NIS_WFSS64 30 30
+niriss NIS_WFSS64R 30 30
+niriss NIS_WFSS64R3 30 30
+niriss NIS_WFSS64C 30 30
+niriss NIS_WFSS64C3 30 30
+niriss NIS_WFSS128 30 30
+niriss NIS_WFSS128R 30 30
+niriss NIS_WFSS128R3 30 30
+niriss NIS_WFSS128C 30 30
+niriss NIS_WFSS128C3 30 30
+niriss NIS_SUB64 30 30
+niriss NIS_SUB128 30 30
+niriss NIS_SUB256 30 30
+niriss NIS_SUBAMPCAL 30 30
+niriss NIS_SUBSTRIP96 30 30
+niriss NIS_SUBSTRIP256 30 30
+niriss NIS_FP1MIMF 30 30
+niriss NIS_FP2MIMF 30 30
+niriss NIS_FP3MIMF 30 30
+niriss NIS_FP4MIMF 30 30
+niriss NIS_FP5MIMF 30 30
+niriss NIS_AMIFULL 10 10
+niriss NIS_SOSSFULL 10 10
+miri MIRIMAGE 10 10
+miri MIRIFULONG 10 10
+miri MIRIFUSHORT 10 10
+nirspec NRS1_FULL_OSS 10 10
+nirspec NRS1_FULL 10 10
+nirspec NRS2_FULL_OSS 10 10
+nirspec NRS2_FULL 10 10
+nirspec NRS_S200A1_SLIT 30 30
+nirspec NRS_S200A2_SLIT 30 30
+nirspec NRS_S400A1_SLIT 30 30
+nirspec NRS_S1600A1_SLIT 30 30
+nirspec NRS_S200B1_SLIT 30 30
+nirspec NRS_FULL_IFU 10 10
+nirspec NRS_IFU_SLICE00 30 30
+nirspec NRS_IFU_SLICE01 30 30
+nirspec NRS_IFU_SLICE02 30 30
+nirspec NRS_IFU_SLICE03 30 30
+nirspec NRS_IFU_SLICE04 30 30
+nirspec NRS_IFU_SLICE05 30 30
+nirspec NRS_IFU_SLICE06 30 30
+nirspec NRS_IFU_SLICE07 30 30
+nirspec NRS_IFU_SLICE08 30 30
+nirspec NRS_IFU_SLICE09 30 30
+nirspec NRS_IFU_SLICE10 30 30
+nirspec NRS_IFU_SLICE11 30 30
+nirspec NRS_IFU_SLICE12 30 30
+nirspec NRS_IFU_SLICE13 30 30
+nirspec NRS_IFU_SLICE14 30 30
+nirspec NRS_IFU_SLICE15 30 30
+nirspec NRS_IFU_SLICE16 30 30
+nirspec NRS_IFU_SLICE17 30 30
+nirspec NRS_IFU_SLICE18 30 30
+nirspec NRS_IFU_SLICE19 30 30
+nirspec NRS_IFU_SLICE20 30 30
+nirspec NRS_IFU_SLICE21 30 30
+nirspec NRS_IFU_SLICE22 30 30
+nirspec NRS_IFU_SLICE23 30 30
+nirspec NRS_IFU_SLICE24 30 30
+nirspec NRS_IFU_SLICE25 30 30
+nirspec NRS_IFU_SLICE26 30 30
+nirspec NRS_IFU_SLICE27 30 30
+nirspec NRS_IFU_SLICE28 30 30
+nirspec NRS_IFU_SLICE29 30 30
+nirspec NRS_FULL_MSA 10 10
+nirspec NRS_FULL_MSA1 10 10
+nirspec NRS_FULL_MSA2 10 10
+nirspec NRS_FULL_MSA3 10 10
+nirspec NRS_FULL_MSA4 10 10
+nirspec NRS_VIGNETTED_MSA 30 30
+nirspec NRS_VIGNETTED_MSA1 30 30
+nirspec NRS_VIGNETTED_MSA2 30 30
+nirspec NRS_VIGNETTED_MSA3 30 30
+nirspec NRS_VIGNETTED_MSA4 30 30
+nirspec NRS_FIELD1_MSA4 30 30
+nirspec NRS_FIELD2_MSA4 30 30
+nirspec NRS1_FP1MIMF 30 30
+nirspec NRS1_FP2MIMF 30 30
+nirspec NRS1_FP3MIMF 30 30
+nirspec NRS2_FP4MIMF 30 30
+nirspec NRS2_FP5MIMF 30 30
+nirspec CLEAR_GWA_OTE 30 30
+nirspec F110W_GWA_OTE 30 30
+nirspec F140X_GWA_OTE 30 30
+nirspec NRS_SKY_OTEIP 30 30
+nirspec NRS_CLEAR_OTEIP_MSA_L0 30 30
+nirspec NRS_CLEAR_OTEIP_MSA_L1 30 30
+nirspec NRS_F070LP_OTEIP_MSA_L0 30 30
+nirspec NRS_F070LP_OTEIP_MSA_L1 30 30
+nirspec NRS_F100LP_OTEIP_MSA_L0 30 30
+nirspec NRS_F100LP_OTEIP_MSA_L1 30 30
+nirspec NRS_F170LP_OTEIP_MSA_L0 30 30
+nirspec NRS_F170LP_OTEIP_MSA_L1 30 30
+nirspec NRS_F290LP_OTEIP_MSA_L0 30 30
+nirspec NRS_F290LP_OTEIP_MSA_L1 30 30
+nirspec NRS_F110W_OTEIP_MSA_L0 30 30
+nirspec NRS_F110W_OTEIP_MSA_L1 30 30
+nirspec NRS_F140X_OTEIP_MSA_L0 30 30
+nirspec NRS_F140X_OTEIP_MSA_L1 30 30
+fgs FGS1_FULL_OSS 10 10
+fgs FGS1_FULL 10 10
+fgs FGS2_FULL_OSS 10 10
+fgs FGS2_FULL 10 10
+fgs FGS1_SUB128LL 30 30
+fgs FGS1_SUB128DIAG 30 30
+fgs FGS1_SUB128CNTR 30 30
+fgs FGS1_SUB32LL 30 30
+fgs FGS1_SUB32DIAG 30 30
+fgs FGS1_SUB32CNTR 30 30
+fgs FGS1_SUB8LL 30 30
+fgs FGS1_SUB8DIAG 30 30
+fgs FGS1_SUB8CNTR 30 30
+fgs FGS2_SUB128LL 30 30
+fgs FGS2_SUB128DIAG 30 30
+fgs FGS2_SUB128CNTR 30 30
+fgs FGS2_SUB32LL 30 30
+fgs FGS2_SUB32DIAG 30 30
+fgs FGS2_SUB32CNTR 30 30
+fgs FGS2_SUB8LL 30 30
+fgs FGS2_SUB8DIAG 30 30
+fgs FGS2_SUB8CNTR 30 30
+fgs FGS1_FP1MIMF 30 30
+fgs FGS1_FP2MIMF 30 30
+fgs FGS1_FP3MIMF 30 30
+fgs FGS1_FP4MIMF 30 30
+fgs FGS1_FP5MIMF 30 30
+fgs FGS2_FP1MIMF 30 30
+fgs FGS2_FP2MIMF 30 30
+fgs FGS2_FP3MIMF 30 30
+fgs FGS2_FP4MIMF 30 30
+fgs FGS2_FP5MIMF 30 30
diff --git a/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py b/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py
new file mode 100755
index 000000000..cd8f91e59
--- /dev/null
+++ b/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py
@@ -0,0 +1,1084 @@
+#! /usr/bin/env python
+
+"""This module contains code for the bad/dead pixel monitor.
+
+The monitor calls the ``bad_pixel_mask.py`` module in the
+``spacetelescope/jwst_reffiles`` package in order to identify bad
+pixels. (``https://github.com/spacetelescope/jwst_reffiles``)
+
+The definitions of the bad pixel types to be searched for are given
+in the jwst package:
+``https://jwst-pipeline.readthedocs.io/en/stable/jwst/references_general/
+references_general.html#data-quality-flags``
+
+The bad pixel search is composed of two different parts, each of which
+can be run independently.
+
+1. Internal flat field exposures are used to search for ``DEAD``,
+``LOW QE``, ``OPEN``, and ``ADJACENT TO OPEN`` pixels.
+
+2. Dark current exposures are used to search for ``NOISY``, ``HOT``,
+``RC``, ``TELEGRAPH``, and ``LOW_PEDESTAL`` pixels.
+
+Both of these modules expect input data in at least 2 calibration
+states. In practice, the bad pixel monitor will search MAST for the
+appropriate dark current and flat field files. In both cases, a given
+file is considered useful if the uncal (and potentially the rate)
+versions of the file are present in the archive. For files where the
+uncal version only is found, the pipeline is run to produce the rate
+file.
+
+Once a sufficient number of new flats or darks are identified, the bad
+pixel montior is called. The ``bad_pixel_file_thresholds.txt`` file
+contains a list of the minimum number of new files necessary to run the
+monitor for each instrument and aperture.
+
+For the flat field files, the pipeline is run on the uncal files far
+enough to produce cosmic ray flagged (jump) files. These are also
+needed for the bad pixel search.
+
+The ``jwst_reffiles`` ``bad_pixel_mask.py`` is run, and returns a map of
+bad pixels for each of the various bad pixel mnemonics. The
+``bad_pixel_monitor`` then downloads the latest bad pixel mask in CRDS
+for the given instrument and detector/aperture, and this is compared to
+the new map of bad pixels. For each bad pixel mnemonic, any pixels
+flagged as bad that are not bad in the current reference file are saved
+to the (e.g. ``NIRCamBadPixelStats``) database table.
+
+Author
+------
+
+ - Bryan Hilbert
+
+Use
+---
+
+ This module can be used from the command line as such:
+
+ ::
+
+ python bad_pixel_monitor.py
+
+Notes
+-----
+
+The bad pixel flat that utilizes flat field ramps can't be used with
+NIRCam since NIRCam has no internal lamp and therefore will not be
+taking any more internal flat field images. Could perhaps be used with
+a series of external undithered observations, but that's something to
+think about later.
+
+Templates to use: ``FGS_INTFLAT``, ``NIS_LAMP``, ``NRS_LAMP``,
+``MIR_DARK``
+"""
+
+from copy import deepcopy
+import datetime
+import logging
+import os
+
+from astropy.io import ascii, fits
+from astropy.time import Time
+from jwst.datamodels import dqflags
+from jwst_reffiles.bad_pixel_mask import bad_pixel_mask
+import numpy as np
+from sqlalchemy import func
+from sqlalchemy.sql.expression import and_
+
+from jwql.database.database_interface import session
+from jwql.database.database_interface import NIRCamBadPixelQueryHistory, NIRCamBadPixelStats
+from jwql.database.database_interface import NIRISSBadPixelQueryHistory, NIRISSBadPixelStats
+from jwql.database.database_interface import MIRIBadPixelQueryHistory, MIRIBadPixelStats
+from jwql.database.database_interface import NIRSpecBadPixelQueryHistory, NIRSpecBadPixelStats
+from jwql.database.database_interface import FGSBadPixelQueryHistory, FGSBadPixelStats
+from jwql.instrument_monitors import pipeline_tools
+from jwql.utils import crds_tools, instrument_properties
+from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE, \
+ FLAT_EXP_TYPES, DARK_EXP_TYPES
+from jwql.utils.logging_functions import log_info, log_fail
+from jwql.utils.mast_utils import mast_query
+from jwql.utils.monitor_utils import initialize_instrument_monitor, update_monitor_table
+from jwql.utils.permissions import set_permissions
+from jwql.utils.utils import copy_files, ensure_dir_exists, get_config, filesystem_path
+
+THRESHOLDS_FILE = os.path.join(os.path.split(__file__)[0], 'bad_pixel_file_thresholds.txt')
+
+
+def bad_map_to_list(badpix_image, mnemonic):
+ """Given an DQ image and a bad pixel mnemonic, create a list of
+ (x,y) locations of this type of bad pixel in ``badpix_image``
+
+ Parameters
+ ----------
+ badpix_image : numpy.ndarray
+ 2D image of bad pixels (i.e. a DQ array)
+
+ mnemonic : str
+ The type of bad pixel to map. The mnemonic must be one of those
+ in the JWST calibration pipeline's list of possible mnemonics
+
+ Returns
+ -------
+ x_loc : list
+ List of x locations within ``badpix_image`` containing
+ ``mnemonic`` pixels.
+
+ y_loc : list
+ List of x locations within ``badpix_image`` containing
+ ``mnemonic`` pixels.
+ """
+ mnemonic = mnemonic.upper()
+ possible_mnemonics = dqflags.pixel.keys()
+ if mnemonic not in possible_mnemonics:
+ raise ValueError("ERROR: Unrecognized bad pixel mnemonic: {}".format(mnemonic))
+
+ # Find locations of this type of bad pixel
+ y_loc, x_loc = np.where(badpix_image & dqflags.pixel[mnemonic] > 0)
+
+ # Convert from numpy int to python native int, in order to avoid SQL
+ # error when adding to the database tables.
+ y_location = [int(element) for element in y_loc]
+ x_location = [int(element) for element in x_loc]
+
+ return x_location, y_location
+
+
+def check_for_sufficient_files(uncal_files, instrument_name, aperture_name, threshold_value, file_type):
+ """From a list of files of a given type (flats or darks), check to
+ see if there are enough files to call the bad pixel monitor. The
+ number of files must be equal to or greater than the provided
+ ``threshold_value``.
+
+ Parameters
+ ----------
+ uncal_files : list
+ List of filenames
+
+ instrument_name : str
+ Name of JWST instrument (e.g. ``nircam``) that the data are
+ from. This is used only in logging statements
+
+ aperture_name : str
+ Name of aperture (e.g.`` NRCA1_FULL``) that the data are from.
+ This is used only in logging statements
+
+ threshold_value : int
+ Minimum number of files required in order to run the bad pixel
+ monitor
+
+ file_type : str
+ Either `darks`` or ``flats``. This is used only in the logging
+ statements.
+
+ Returns
+ -------
+ uncal_files : list
+ List of sorted, unique files from the input file list. Set to
+ ``None`` if the number of unique files is under the threshold
+
+ run_data : bool
+ Whether or not the bad pixel monitor will be called on these
+ files.
+ """
+ if file_type not in ['darks', 'flats']:
+ raise ValueError('Input file_type must be "darks" or "flats"')
+ file_type_singular = file_type.strip('s')
+
+ if len(uncal_files) > 0:
+ uncal_files = sorted(list(set(uncal_files)))
+
+ if len(uncal_files) < threshold_value:
+ logging.info(('\tBad pixels from {} skipped. {} new {} files for {}, {} found. {} new files are '
+ 'required to run bad pixels from {} portion of monitor.')
+ .format(file_type, len(uncal_files), file_type_singular, instrument_name, aperture_name, threshold_value, file_type))
+ uncal_files = None
+ run_data = False
+
+ else:
+ logging.info('\tSufficient new files found for {}, {} to run the bad pixel from {} portion of the monitor.'
+ .format(instrument_name, aperture_name, file_type))
+ logging.info('\tNew entries: {}'.format(len(uncal_files)))
+ run_data = True
+ return uncal_files, run_data
+
+
+def exclude_crds_mask_pix(bad_pix, existing_bad_pix):
+ """Find differences between a set of newly-identified bad pixels
+ and an existing set. Return a list of newly-discovered bad pixels
+ that are not present in the existing set.
+
+ Parameters
+ ----------
+ bad_pix : numpy.ndarray
+ 2D array of bad pixel flags. Flags must correspond to the
+ defintiions used by the JWST calibration pipeline
+
+
+ existing_bad_pix : numpy.ndarray
+ 2D array of bad pixel flags. Flags must correspond to the
+ definitions used by the JWST calibration pipeline
+
+ Returns
+ -------
+ new_bad_pix : numpy.ndarray
+ 2D array of bad pixel flags contained in ``bad_pix``
+ but not ``existing_bad_pix``
+ """
+ return bad_pix - (bad_pix & existing_bad_pix)
+
+def locate_rate_files(uncal_files):
+ """Given a list of uncal (raw) files, generate a list of
+ corresponding rate files. For each uncal file, if the rate file
+ is present in the filesystem, add the name of the rate file (if
+ a rateints file exists, use that) to the list of files. If no
+ rate file is present, add ``None`` to the list.
+
+ Parameters
+ ----------
+ uncal_files : list
+ List of uncal files to use as the basis of the search
+
+ Returns
+ -------
+ rate_files : list
+ List of rate files. This list corresponds 1-to-1 with
+ ``uncal_files``. Any missing rate files are listed as None.
+
+ rate_files_to_copy : list
+ Same as ``rate_files`` but without the None entries. This is
+ a list of only the rate files that exist in the filesystem
+ """
+ if uncal_files is None:
+ return None, None
+
+ rate_files = []
+ rate_files_to_copy = []
+ for uncal in uncal_files:
+ base = uncal.split('_uncal.fits')[0]
+ constructed_ratefile = '{}_rateints.fits'.format(base)
+ try:
+ rate_files.append(filesystem_path(constructed_ratefile))
+ rate_files_to_copy.append(filesystem_path(constructed_ratefile))
+ except FileNotFoundError:
+ constructed_ratefile = '{}_rate.fits'.format(base)
+ try:
+ rate_files.append(filesystem_path(constructed_ratefile))
+ rate_files_to_copy.append(filesystem_path(constructed_ratefile))
+ except FileNotFoundError:
+ rate_files.append('None')
+ return rate_files, rate_files_to_copy
+
+
+def locate_uncal_files(query_result):
+ """Given a MAST query result, locate the raw version
+ (``uncal.fits``) of the listed files in the filesystem.
+
+ Parameters
+ ----------
+ query_result : list
+ MAST query results. List of dictionaries
+
+ Returns
+ -------
+ uncal_files : list
+ List of raw file locations within the filesystem
+ """
+ uncal_files = []
+ for entry in query_result:
+ filename = entry['filename']
+ suffix = filename.split('_')[-1].replace('.fits', '')
+ uncal_file = filename.replace(suffix, 'uncal')
+
+ # Look for uncal file
+ try:
+ uncal_files.append(filesystem_path(uncal_file))
+ except FileNotFoundError:
+ logging.warning('\t\tUnable to locate {} in filesystem. Not including in processing.'
+ .format(uncal_file))
+ return uncal_files
+
+
+class BadPixels():
+ """Class for executing the bad pixel monitor.
+
+ This class will search for new (since the previous instance of the
+ class) dark current and internal flat field files in the filesystem.
+ It will loop over instrument/aperture combinations and find the
+ number of new dark/flat files available. If there are enough, it
+ will copy the files over to a working directory and run the monitor.
+
+ This will use the ``jwst_reffiles`` package to locate new bad
+ pixels, which will be returned as a map. This map will be compared
+ to the current bad pixel reference file (``dq_init``) in CRDS, and
+ any the coordinates and type of any new bad pixels will be saved in
+ a database table.
+
+ Attributes
+ ----------
+ aperture : str
+ Aperture name of the data (e.g. ``NRCA1_FULL``)
+
+ dark_query_start : float
+ Date (in ``MJD``) of the ending range of the previous MAST query
+ where the bad pixel from darks monitor was run.
+
+ data_dir : str
+ Directory that contains the files copied from MAST to be used
+ by the bad pixel monitor
+
+ detector : str
+ Detector associated with the data (e.g. ``NRCA1``)
+
+ flat_query_start : float
+ Date (in MJD) of the ending range of the previous MAST query
+ where the bad pixel from flats monitor was run.
+
+ instrument : str
+ Name of the JWST instrument the data are from
+
+ nints : int
+ Number of integrations in the exposure
+
+ output_dir : str
+ Top level output directory associated with the bad pixel
+ monitor, as retrieved from the JWQL config file
+
+ pixel_table : sqlalchemy table
+ Database table containing lists of bad pixels identified
+ during runs of the bad pixel monitor
+
+ query_end : float
+ MJD of the execution time of the bad pixel monitor. This is
+ used as the ending time of the MAST query.
+
+ query_table : sqlalchemy table
+ Database table containing the history of MAST queries
+ for the bad pixel monitor.
+
+ Raises
+ ------
+ ValueError
+ If NINT or DETECTOR is missing from input file header
+
+ ValueError
+ If an unrecognized bad pixel mnemonic is encountered
+
+ ValueError
+ If the number of uncal and rate files does not match
+
+ ValueError
+ If the most recent query search returns more than one entry
+ """
+
+ def __init__(self):
+ """Initialize an instance of the ``BadPixels`` class."""
+
+ def add_bad_pix(self, coordinates, pixel_type, files, obs_start_time, obs_mid_time, obs_end_time, baseline_file):
+ """Add a set of bad pixels to the bad pixel database table
+
+ Parameters
+ ----------
+ coordinates : tuple
+ Tuple of two lists, containing x,y coordinates of bad
+ pixels (Output of ``np.where`` call)
+
+ pixel_type : str
+ Type of bad pixel. e.g. ``dead``, ``hot``, and ``noisy``
+
+ files : list
+ List of fits files which were used to identify the bad
+ pixels
+
+ obs_start_time : datetime.datetime
+ Observation time of the earliest file in ``files``
+
+ obs_mid_time : datetime.datetime
+ Average of the observation times in ``files``
+
+ obs_end_time : datetime.datetime
+ Observation time of the latest file in ``files``
+
+ baseline_file : str
+ Name of baseline bad pixel file against which the new bad
+ pixel population was compared
+ """
+
+ logging.info('Adding {} {} pixels to database.'.format(len(coordinates[0]), pixel_type))
+
+ source_files = [os.path.basename(item) for item in files]
+ entry = {'detector': self.detector,
+ 'x_coord': coordinates[0],
+ 'y_coord': coordinates[1],
+ 'type': pixel_type,
+ 'source_files': source_files,
+ 'obs_start_time': obs_start_time,
+ 'obs_mid_time': obs_mid_time,
+ 'obs_end_time': obs_end_time,
+ 'baseline_file': baseline_file,
+ 'entry_date': datetime.datetime.now()}
+ self.pixel_table.__table__.insert().execute(entry)
+
+ def filter_query_results(self, results, datatype):
+ """Filter MAST query results. For input flats, keep only those
+ with the most common filter/pupil/grating combination. For both
+ flats and darks, keep only those with the most common readout
+ pattern.
+
+ Parameters
+ ----------
+ results : list
+ List of query results, as returned by ``mast_query()``
+
+ datatype : str
+ Type of data being filtered. ``flat`` or ``dark``.
+
+ Returns
+ -------
+ readpatt_filtered : list
+ Filtered list of query results.
+ """
+ # Need to filter all instruments' results by filter. Choose filter with the most files
+ # Only for flats
+ if ((datatype == 'flat') and (self.instrument != 'fgs')):
+ if self.instrument in ['nircam', 'niriss']:
+ filter_on = 'pupil'
+ elif self.instrument == 'nirspec':
+ filter_on = 'grating'
+ elif self.instrument == 'miri':
+ filter_on = 'filter'
+
+ filter_list = ['{}:{}'.format(entry['filter'], entry[filter_on]) for entry in results]
+ filter_set = list(set(filter_list))
+
+ # Find the filter with the largest number of entries
+ maxnum = 0
+ maxfilt = ''
+ for filt in filter_set:
+ if filter_list.count(filt) > maxnum:
+ maxnum = filter_list.count(filt)
+ maxfilt = filt
+ filter_name, other_name = maxfilt.split(':')
+
+ filtered = []
+ for entry in results:
+ if ((str(entry['filter']) == filter_name) and (str(entry[filter_on]) == other_name)):
+ filtered.append(entry)
+
+ results = deepcopy(filtered)
+
+ # All instruments: need to filter by readout pattern. Any pattern name not containing "IRS2" is ok
+ # choose readout pattern with the most entries
+ readpatt_list = [entry['readpatt'] for entry in results]
+ readpatt_set = list(set(readpatt_list))
+
+ maxnum = 0
+ maxpatt = ''
+ for patt in readpatt_set:
+ if ((readpatt_list.count(patt) > maxnum) and ('IRS2' not in patt)):
+ maxnum = readpatt_list.count(patt)
+ maxpatt = patt
+
+ # Find the readpattern with the largest number of entries
+ readpatt_filtered = []
+ for entry in results:
+ if entry['readpatt'] == maxpatt:
+ readpatt_filtered.append(entry)
+
+ return readpatt_filtered
+
+ def get_metadata(self, filename):
+ """Collect basic metadata from a fits file
+
+ Parameters
+ ----------
+ filename : str
+ Name of fits file to examine
+ """
+
+ header = fits.getheader(filename)
+
+ try:
+ self.detector = header['DETECTOR']
+ self.nints = header['NINTS']
+
+ except KeyError as e:
+ logging.error(e)
+
+ def get_possible_apertures(self):
+ """Generate a list of possible SIAF apertures for the given
+ instrument.
+
+ Returns
+ -------
+ possible_aperture : list
+ List of acceptible apertures for self.instrument
+ """
+ if self.instrument == 'nircam':
+ possible_apertures = []
+ for i in range(1, 6):
+ possible_apertures.append('NRCA{}_FULL'.format(i))
+ possible_apertures.append('NRCB{}_FULL'.format(i))
+ if self.instrument == 'niriss':
+ possible_apertures = ['NIS_CEN']
+ if self.instrument == 'miri':
+ # Since MIRI is organized a little bit differently than the
+ # other instruments, you can't use aperture names to uniquely
+ # identify the full frame darks/flats from a given detector.
+ # Instead you must use detector names.
+ possible_apertures = [('MIRIMAGE', 'MIRIM_FULL'), ('MIRIFULONG', 'MIRIM_FULL'), ('MIRIFUSHORT', 'MIRIM_FULL')]
+ if self.instrument == 'fgs':
+ possible_apertures = ['FGS1_FULL', 'FGS2_FULL']
+ if self.instrument == 'nirspec':
+ possible_apertures = ['NRS1_FULL', 'NRS2_FULL']
+ return possible_apertures
+
+ def exclude_existing_badpix(self, badpix, pixel_type):
+ """Given a set of coordinates of bad pixels, determine which of
+ these pixels have been previously identified and remove them
+ from the list
+
+ Parameters
+ ----------
+ badpix : tuple
+ Tuple of lists containing x and y pixel coordinates. (Output
+ of ``numpy.where`` call)
+
+ pixel_type : str
+ Type of bad pixel being examined. Options are ``hot``,
+ ``dead``, and ``noisy``
+
+ Returns
+ -------
+ new_pixels_x : list
+ List of x coordinates of new bad pixels
+
+ new_pixels_y : list
+ List of y coordinates of new bad pixels
+ """
+
+ if pixel_type not in ['hot', 'dead', 'noisy']:
+ raise ValueError('Unrecognized bad pixel type: {}'.format(pixel_type))
+
+ db_entries = session.query(self.pixel_table) \
+ .filter(self.pixel_table.type == pixel_type) \
+ .filter(self.pixel_table.detector == self.detector) \
+ .all()
+
+ already_found = []
+ if len(db_entries) != 0:
+ for _row in db_entries:
+ x_coords = _row.x_coord
+ y_coords = _row.y_coord
+ for x, y in zip(x_coords, y_coords):
+ already_found.append((x, y))
+
+ # Check to see if each pixel already appears in the database for
+ # the given bad pixel type
+ new_pixels_x = []
+ new_pixels_y = []
+ for x, y in zip(badpix[0], badpix[1]):
+ pixel = (x, y)
+ if pixel not in already_found:
+ new_pixels_x.append(x)
+ new_pixels_y.append(y)
+
+ return (new_pixels_x, new_pixels_y)
+
+ def identify_tables(self):
+ """Determine which database tables to use for a run of the bad pixel
+ monitor
+ """
+ mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument]
+ self.query_table = eval('{}BadPixelQueryHistory'.format(mixed_case_name))
+ self.pixel_table = eval('{}BadPixelStats'.format(mixed_case_name))
+
+ def map_uncal_and_rate_file_lists(self, uncal_files, rate_files, rate_files_to_copy, obs_type):
+ """Copy uncal and rate files from the filesystem to the working
+ directory. Any requested files that are not in the filesystem
+ are noted and skipped. Return the file lists with skipped files
+ removed.
+
+ Parameters
+ ----------
+ uncal_files : list
+ List of raw files to be copied
+
+ rate_files : list
+ List of rate (slope) images to be copied. This list should
+ correspond 1-to-1 with ``uncal_files``. Any rate files that
+ were not found in the MAST query should be set to None.
+
+ rate_files_to_copy : list
+ Similar to ``rate_files`` but with the None entries omitted.
+
+ obs_type : str
+ Observation type (``dark`` or ``flat``). Used only for
+ logging
+
+ Returns
+ -------
+ uncal_files : list
+ List of the input raw files with any that failed to copy
+ removed
+
+ rate_files : list
+ List of the input rate files with any that failed to copy
+ removed (if the uncal also failed) or set to None (if only
+ the rate file failed)
+ """
+ # Copy files from filesystem
+ uncal_copied_files, uncal_not_copied = copy_files(uncal_files, self.data_dir)
+ rate_copied_files, rate_not_copied = copy_files(rate_files_to_copy, self.data_dir)
+
+ # Set any rate files that failed to copy to None so
+ # that we can regenerate them
+ if len(rate_not_copied) > 0:
+ for badfile in rate_not_copied:
+ rate_files[rate_files.index(badfile)] = 'None'
+
+ # Any uncal files that failed to copy must be removed
+ # entirely from the uncal and rate lists
+ if len(uncal_not_copied) > 0:
+ for badfile in uncal_not_copied:
+ bad_index = uncal_files.index(badfile)
+ del uncal_files[bad_index]
+ del rate_files[bad_index]
+
+ logging.info('\tNew {} observations: '.format(obs_type))
+ logging.info('\tData dir: {}'.format(self.data_dir))
+ logging.info('\tCopied to data dir: {}'.format(uncal_copied_files))
+ logging.info('\tNot copied (failed, or missing from filesystem): {}'.format(uncal_not_copied))
+
+ # After all this, the lists should be the same length
+ # and have a 1-to-1 correspondence
+ if len(uncal_files) != len(rate_files):
+ print('Lists of {} uncal and rate files have different lengths!!'.format(obs_type))
+ raise ValueError
+
+ return uncal_files, rate_files
+
+ def most_recent_search(self, file_type='dark'):
+ """Query the query history database and return the information
+ on the most recent query for the given ``aperture_name`` where
+ the dark monitor was executed.
+
+ Parameters
+ ----------
+ file_type : str
+ ``dark`` or ``flat``. Specifies the type of file whose
+ previous search time is queried.
+
+ Returns
+ -------
+ query_result : float
+ Date (in MJD) of the ending range of the previous MAST query
+ where the dark monitor was run.
+ """
+ if file_type.lower() == 'dark':
+ mjd_field = self.query_table.dark_end_time_mjd
+ elif file_type.lower() == 'flat':
+ mjd_field = self.query_table.flat_end_time_mjd
+
+ sub_query = session.query(self.query_table.aperture,
+ func.max(mjd_field).label('maxdate')
+ ).group_by(self.query_table.aperture).subquery('t2')
+
+ # Note that "self.query_table.run_monitor == True" below is
+ # intentional. Switching = to "is" results in an error in the query.
+ query = session.query(self.query_table).join(
+ sub_query,
+ and_(
+ self.query_table.aperture == self.aperture,
+ mjd_field == sub_query.c.maxdate,
+ self.query_table.run_monitor == True
+ )
+ ).all()
+
+ query_count = len(query)
+ if query_count == 0:
+ query_result = 57357.0 # a.k.a. Dec 1, 2015 == CV3
+ logging.info(('\tNo query history for {}. Beginning search date will be set to {}.'
+ .format(self.aperture, query_result)))
+ elif query_count > 1:
+ raise ValueError('More than one "most recent" query?')
+ else:
+ if file_type.lower() == 'dark':
+ query_result = query[0].dark_end_time_mjd
+ elif file_type.lower() == 'flat':
+ query_result = query[0].flat_end_time_mjd
+
+ return query_result
+
+ def make_crds_parameter_dict(self):
+ """Construct a paramter dictionary to be used for querying CRDS
+
+ Returns
+ -------
+ parameters : dict
+ Dictionary of parameters, in the format expected by CRDS
+ """
+ parameters = {}
+ parameters['INSTRUME'] = self.instrument.upper()
+ parameters['SUBARRAY'] = 'FULL'
+ parameters['DATE-OBS'] = datetime.date.today().isoformat()
+ current_date = datetime.datetime.now()
+ parameters['TIME-OBS'] = current_date.time().isoformat()
+ parameters['DETECTOR'] = self.detector.upper()
+ if self.instrument.upper() == 'NIRCAM':
+ if parameters['DETECTOR'] in ['NRCALONG', 'NRCBLONG']:
+ parameters['CHANNEL'] = 'LONG'
+ else:
+ parameters['CHANNEL'] = 'SHORT'
+ return parameters
+
+ def process(self, illuminated_raw_files, illuminated_slope_files, dark_raw_files, dark_slope_files):
+ """The main method for processing darks. See module docstrings
+ for further details.
+
+ Parameters
+ ----------
+ illuminated_raw_files : list
+ List of filenames (including full paths) of raw (uncal) flat
+ field files. These should all be for the same detector and
+ aperture.
+
+ illuminated_slope_files : list
+ List of filenames (including full paths) of flat field slope
+ files. These should all be for the same detector and
+ aperture and correspond one-to-one with
+ ``illuminated_raw_files``. For cases where a raw file exists
+ but no slope file, the slope file should be None
+
+ dark_raw_files : list
+ List of filenames (including full paths) of raw (uncal) dark
+ files. These should all be for the same detector and
+ aperture.
+
+ dark_slope_files : list
+ List of filenames (including full paths) of dark current
+ slope files. These should all be for the same detector and
+ aperture and correspond one-to-one with ``dark_raw_files``.
+ For cases where a raw file exists but no slope file, the
+ slope file should be ``None``
+ """
+ # Illuminated files - run entirety of calwebb_detector1 for uncal
+ # files where corresponding rate file is 'None'
+ all_files = []
+ badpix_types = []
+ badpix_types_from_flats = ['DEAD', 'LOW_QE', 'OPEN', 'ADJ_OPEN']
+ badpix_types_from_darks = ['HOT', 'RC', 'OTHER_BAD_PIXEL', 'TELEGRAPH']
+ illuminated_obstimes = []
+ if illuminated_raw_files:
+ index = 0
+ badpix_types.extend(badpix_types_from_flats)
+ for uncal_file, rate_file in zip(illuminated_raw_files, illuminated_slope_files):
+ self.get_metadata(uncal_file)
+ if rate_file == 'None':
+ jump_output, rate_output, _ = pipeline_tools.calwebb_detector1_save_jump(uncal_file, self.data_dir,
+ ramp_fit=True, save_fitopt=False)
+ if self.nints > 1:
+ illuminated_slope_files[index] = rate_output.replace('0_ramp_fit', '1_ramp_fit')
+ else:
+ illuminated_slope_files[index] = deepcopy(rate_output)
+ index += 1
+
+ # Get observation time for all files
+ illuminated_obstimes.append(instrument_properties.get_obstime(uncal_file))
+
+ all_files = deepcopy(illuminated_slope_files)
+
+ min_illum_time = min(illuminated_obstimes)
+ max_illum_time = max(illuminated_obstimes)
+ mid_illum_time = instrument_properties.mean_time(illuminated_obstimes)
+
+ # Dark files - Run calwebb_detector1 on all uncal files, saving the
+ # Jump step output. If corresponding rate file is 'None', then also
+ # run the ramp-fit step and save the output
+ dark_jump_files = []
+ dark_fitopt_files = []
+ dark_obstimes = []
+ if dark_raw_files:
+ index = 0
+ badpix_types.extend(badpix_types_from_darks)
+ # In this case we need to run the pipeline on all input files,
+ # even if the rate file is present, because we also need the jump
+ # and fitops files, which are not saved by default
+ for uncal_file, rate_file in zip(dark_raw_files, dark_slope_files):
+ jump_output, rate_output, fitopt_output = pipeline_tools.calwebb_detector1_save_jump(uncal_file, self.data_dir,
+ ramp_fit=True, save_fitopt=True)
+ self.get_metadata(uncal_file)
+ dark_jump_files.append(jump_output)
+ dark_fitopt_files.append(fitopt_output)
+ if self.nints > 1:
+ #dark_slope_files[index] = rate_output.replace('rate', 'rateints')
+ dark_slope_files[index] = rate_output.replace('0_ramp_fit', '1_ramp_fit')
+ else:
+ dark_slope_files[index] = deepcopy(rate_output)
+ dark_obstimes.append(instrument_properties.get_obstime(uncal_file))
+ index += 1
+
+ if len(all_files) == 0:
+ all_files = deepcopy(dark_slope_files)
+ else:
+ all_files = all_files + dark_slope_files
+
+ min_dark_time = min(dark_obstimes)
+ max_dark_time = max(dark_obstimes)
+ mid_dark_time = instrument_properties.mean_time(dark_obstimes)
+
+ # For the dead flux check, filter out any files that have less than
+ # 4 groups
+ dead_flux_files = []
+ if illuminated_raw_files:
+ for illum_file in illuminated_raw_files:
+ ngroup = fits.getheader(illum_file)['NGROUPS']
+ if ngroup >= 4:
+ dead_flux_files.append(illum_file)
+ if len(dead_flux_files) == 0:
+ dead_flux_files = None
+
+ # Instrument-specific preferences from jwst_reffiles meetings
+ if self.instrument in ['nircam', 'niriss', 'fgs']:
+ dead_search_type = 'sigma_rate'
+ elif self.instrument in ['miri', 'nirspec']:
+ dead_search_type = 'absolute_rate'
+
+ flat_mean_normalization_method = 'smoothed'
+
+ # Call the bad pixel search module from jwst_reffiles. Lots of
+ # other possible parameters. Only specify the non-default params
+ # in order to make things easier to read.
+ query_string = 'darks_{}_flats_{}_to_{}'.format(self.dark_query_start, self.flat_query_start, self.query_end)
+ output_file = '{}_{}_{}_bpm.fits'.format(self.instrument, self.aperture, query_string)
+ output_file = os.path.join(self.output_dir, output_file)
+ bad_pixel_mask.bad_pixels(flat_slope_files=illuminated_slope_files, dead_search_type=dead_search_type,
+ flat_mean_normalization_method=flat_mean_normalization_method,
+ run_dead_flux_check=True, dead_flux_check_files=dead_flux_files, flux_check=35000,
+ dark_slope_files=dark_slope_files, dark_uncal_files=dark_raw_files,
+ dark_jump_files=dark_jump_files, dark_fitopt_files=dark_fitopt_files, plot=False,
+ output_file=output_file, author='jwst_reffiles', description='A bad pix mask',
+ pedigree='GROUND', useafter='2222-04-01 00:00:00',
+ history='This file was created by JWQL', quality_check=False)
+
+ # Read in the newly-created bad pixel file
+ set_permissions(output_file)
+ badpix_map = fits.getdata(output_file)
+
+ # Locate and read in the current bad pixel mask
+ parameters = self.make_crds_parameter_dict()
+ mask_dictionary = crds_tools.get_reffiles(parameters, ['mask'], download=True)
+ baseline_file = mask_dictionary['mask']
+
+ if 'NOT FOUND' in baseline_file:
+ logging.warning(('\tNo baseline bad pixel file for {} {}. Any bad '
+ 'pixels found as part of this search will be considered new'.format(self.instrument, self.aperture)))
+ baseline_file = new_badpix_file
+ yd, xd = badpix_mask.shape
+ baseline_badpix_mask = np.zeros((yd, xd), type=np.int)
+ else:
+ logging.info('\tBaseline bad pixel file is {}'.format(baseline_file))
+ baseline_badpix_mask = fits.getdata(baseline_file)
+
+ # Exclude hot and dead pixels in the current bad pixel mask
+ #new_hot_pix = self.exclude_existing_badpix(new_hot_pix, 'hot')
+ new_since_reffile = exclude_crds_mask_pix(badpix_map, baseline_badpix_mask)
+
+ # Create a list of the new instances of each type of bad pixel
+ for bad_type in badpix_types:
+ bad_location_list = bad_map_to_list(new_since_reffile, bad_type)
+
+ # Add new hot and dead pixels to the database
+ logging.info('\tFound {} new {} pixels'.format(len(bad_location_list[0]), bad_type))
+
+ if bad_type in badpix_types_from_flats:
+ self.add_bad_pix(bad_location_list, bad_type, illuminated_slope_files, min_illum_time, mid_illum_time, max_illum_time, baseline_file)
+ elif bad_type in badpix_types_from_darks:
+ self.add_bad_pix(bad_location_list, bad_type, dark_slope_files, min_dark_time, mid_dark_time, max_dark_time, baseline_file)
+ else:
+ raise ValueError("Unrecognized type of bad pixel: {}. Cannot update database table.".format(bad_type))
+
+ @log_fail
+ @log_info
+ def run(self):
+ """The main method. See module docstrings for further details.
+
+ There are 2 parts to the bad pixel monitor:
+ 1. Bad pixels from illuminated data
+ 2. Bad pixels from dark data
+
+ For each, we will query MAST, copy new files from the filesystem
+ and pass the list of copied files into the ``process()`` method.
+ """
+ logging.info('Begin logging for bad_pixel_monitor')
+
+ # Get the output directory
+ self.output_dir = os.path.join(get_config()['outputs'], 'bad_pixel_monitor')
+
+ # Read in config file that defines the thresholds for the number
+ # of dark files that must be present in order for the monitor to run
+ limits = ascii.read(THRESHOLDS_FILE)
+
+ # Use the current time as the end time for MAST query
+ self.query_end = Time.now().mjd
+
+ # Loop over all instruments
+ for instrument in JWST_INSTRUMENT_NAMES:
+ self.instrument = instrument
+
+ # Identify which database tables to use
+ self.identify_tables()
+
+ # Get a list of all possible apertures from pysiaf
+ possible_apertures = self.get_possible_apertures()
+
+ for aperture in possible_apertures:
+ grating = None
+ detector_name = None
+ lamp = None
+
+ # NIRSpec flats use the MIRROR grating.
+ if self.instrument == 'nirspec':
+ grating = 'MIRROR'
+
+ # MIRI is unlike the other instruments. We basically treat
+ # the detector as the aperture name because there is no
+ # aperture name for a full frame MRS exposure.
+ if self.instrument == 'miri':
+ detector_name, aperture_name = aperture
+ self.aperture = detector_name
+ else:
+ self.aperture = aperture
+ aperture_name = aperture
+
+ # In flight, NIRISS plans to take darks using the LINE2 lamp
+ if self.instrument == 'niriss':
+ lamp = 'LINE2'
+
+ # What lamp is most appropriate for NIRSpec?
+ if self.instrument == 'nirspec':
+ lamp = 'LINE2'
+
+ # What lamp is most appropriate for FGS?
+ #if self.instrument == 'fgs':
+ # lamp = 'G2LAMP1'
+
+ logging.info('')
+ logging.info('Working on aperture {} in {}'.format(aperture, self.instrument))
+
+ # Find the appropriate threshold for the number of new files needed
+ match = self.aperture == limits['Aperture']
+ flat_file_count_threshold = limits['FlatThreshold'][match].data[0]
+ dark_file_count_threshold = limits['DarkThreshold'][match].data[0]
+
+ # Locate the record of the most recent MAST search
+ self.flat_query_start = self.most_recent_search(file_type='flat')
+ self.dark_query_start = self.most_recent_search(file_type='dark')
+ logging.info('\tFlat field query times: {} {}'.format(self.flat_query_start, self.query_end))
+ logging.info('\tDark current query times: {} {}'.format(self.dark_query_start, self.query_end))
+
+ # Query MAST using the aperture and the time of the most
+ # recent previous search as the starting time.
+ flat_templates = FLAT_EXP_TYPES[instrument]
+ dark_templates = DARK_EXP_TYPES[instrument]
+
+ new_flat_entries = mast_query(instrument, flat_templates, self.flat_query_start, self.query_end,
+ aperture=aperture_name, grating=grating, detector=detector_name,
+ lamp=lamp)
+ new_dark_entries = mast_query(instrument, dark_templates, self.dark_query_start, self.query_end,
+ aperture=aperture_name, detector=detector_name)
+
+ # Filter the results
+ # Filtering could be different for flats vs darks.
+ # Kevin says we shouldn't need to worry about mixing lamps in the data used to create the bad pixel
+ # mask. In flight, data will only be taken with LINE2, LEVEL 5. Currently in MAST all lamps are
+ # present, but Kevin is not concerned about variations in flat field strucutre.
+
+ # NIRISS - results can include rate, rateints, trapsfilled
+ # MIRI - Jane says they now use illuminated data for dead pixel checks, just like other insts.
+ # NIRSpec - can be cal, x1d, rate, rateints. Can have both cal and x1d so filter repeats
+ # FGS - rate, rateints, trapsfilled
+ # NIRCam - no int flats
+
+ # The query results can contain multiple entries for files
+ # in different calibration states (or for different output
+ # products), so we need to filter the list for duplicate
+ # entries and for the calibration state we are interested
+ # in before we know how many new entries there really are.
+
+ # In the end, we need rate files as well as uncal files
+ # because we're going to need to create jump files.
+ # In order to use a given file we must have at least the
+ # uncal version of the file. Get the uncal and rate file
+ # lists to align.
+
+ if new_flat_entries:
+ new_flat_entries = self.filter_query_results(new_flat_entries, datatype='flat')
+ flat_uncal_files = locate_uncal_files(new_flat_entries)
+ flat_uncal_files, run_flats = check_for_sufficient_files(flat_uncal_files, instrument, aperture, flat_file_count_threshold, 'flats')
+ flat_rate_files, flat_rate_files_to_copy = locate_rate_files(flat_uncal_files)
+ else:
+ run_flats = False
+ flat_uncal_files, flat_rate_files, flat_rate_files_to_copy = None, None, None
+
+ if new_dark_entries:
+ new_dark_entries = self.filter_query_results(new_dark_entries, datatype='dark')
+ dark_uncal_files = locate_uncal_files(new_dark_entries)
+ dark_uncal_files, run_darks = check_for_sufficient_files(dark_uncal_files, instrument, aperture, dark_file_count_threshold, 'darks')
+ dark_rate_files, dark_rate_files_to_copy = locate_rate_files(dark_uncal_files)
+ else:
+ run_darks = False
+ dark_uncal_files, dark_rate_files, dark_rate_files_to_copy = None, None, None
+
+ # Set up directories for the copied data
+ ensure_dir_exists(os.path.join(self.output_dir, 'data'))
+ self.data_dir = os.path.join(self.output_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower()))
+ ensure_dir_exists(self.data_dir)
+
+ # Copy files from filesystem
+ if run_flats:
+ flat_uncal_files, flat_rate_files = self.map_uncal_and_rate_file_lists(flat_uncal_files, flat_rate_files, flat_rate_files_to_copy, 'flat')
+ if run_darks:
+ dark_uncal_files, dark_rate_files = self.map_uncal_and_rate_file_lists(dark_uncal_files, dark_rate_files, dark_rate_files_to_copy, 'dark')
+
+ # Run the bad pixel monitor
+ if run_flats or run_darks:
+ self.process(flat_uncal_files, flat_rate_files, dark_uncal_files, dark_rate_files)
+
+ # Update the query history
+ if dark_uncal_files is None:
+ num_dark_files = 0
+ else:
+ num_dark_files = len(dark_uncal_files)
+
+ if flat_uncal_files is None:
+ num_flat_files = 0
+ else:
+ num_flat_files = len(flat_uncal_files)
+
+ new_entry = {'instrument': self.instrument.upper(),
+ 'aperture': self.aperture,
+ 'dark_start_time_mjd': self.dark_query_start,
+ 'dark_end_time_mjd': self.query_end,
+ 'flat_start_time_mjd': self.flat_query_start,
+ 'flat_end_time_mjd': self.query_end,
+ 'dark_files_found': num_dark_files,
+ 'flat_files_found': num_flat_files,
+ 'run_bpix_from_darks': run_darks,
+ 'run_bpix_from_flats': run_flats,
+ 'run_monitor': run_flats or run_darks,
+ 'entry_date': datetime.datetime.now()}
+ self.query_table.__table__.insert().execute(new_entry)
+ logging.info('\tUpdated the query history table')
+
+ logging.info('Bad Pixel Monitor completed successfully.')
+
+
+if __name__ == '__main__':
+
+ module = os.path.basename(__file__).strip('.py')
+ start_time, log_file = initialize_instrument_monitor(module)
+
+ monitor = BadPixels()
+ monitor.run()
+
+ update_monitor_table(module, start_time, log_file)
diff --git a/jwql/instrument_monitors/common_monitors/bias_monitor.py b/jwql/instrument_monitors/common_monitors/bias_monitor.py
new file mode 100755
index 000000000..c5c59cecf
--- /dev/null
+++ b/jwql/instrument_monitors/common_monitors/bias_monitor.py
@@ -0,0 +1,537 @@
+#! /usr/bin/env python
+
+"""This module contains code for the bias monitor, which monitors
+the bias levels in dark exposures as well as the performance of
+the pipeline superbias subtraction over time.
+
+For each instrument, the 0th group of full-frame dark exposures is
+saved to a fits file. The median signal levels in these images are
+recorded in the ``BiasStats`` database table for the
+odd/even columns of each amp.
+
+Next, these images are run through the jwst pipeline up through the
+reference pixel correction step. These calibrated images are saved
+to a fits file as well as a png file for visual inspection of the
+quality of the pipeline calibration. The median-collpsed row and
+column values, as well as the sigma-clipped mean and standard
+deviation of these images, are recorded in the
+``BiasStats`` database table.
+
+Author
+------
+ - Ben Sunnquist
+
+Use
+---
+ This module can be used from the command line as such:
+
+ ::
+
+ python bias_monitor.py
+"""
+
+import datetime
+import logging
+import os
+
+from astropy.io import fits
+from astropy.stats import sigma_clipped_stats
+from astropy.time import Time
+from astropy.visualization import ZScaleInterval
+from jwst.dq_init import DQInitStep
+from jwst.group_scale import GroupScaleStep
+from jwst.refpix import RefPixStep
+from jwst.saturation import SaturationStep
+from jwst.superbias import SuperBiasStep
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+from mpl_toolkits.axes_grid1 import make_axes_locatable
+import numpy as np
+from pysiaf import Siaf
+from sqlalchemy import func
+from sqlalchemy.sql.expression import and_
+
+from jwql.database.database_interface import session
+from jwql.database.database_interface import NIRCamBiasQueryHistory, NIRCamBiasStats
+from jwql.instrument_monitors import pipeline_tools
+from jwql.instrument_monitors.common_monitors.dark_monitor import mast_query_darks
+from jwql.utils import instrument_properties
+from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE
+from jwql.utils.logging_functions import log_info, log_fail
+from jwql.utils.permissions import set_permissions
+from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config, initialize_instrument_monitor, update_monitor_table
+
+class Bias():
+ """Class for executing the bias monitor.
+
+ This class will search for new full-frame dark current files in
+ the file system for each instrument and will run the monitor on
+ these files. The monitor will extract the 0th group from the new
+ dark files and output the contents into a new file located in
+ a working directory. It will then perform statistical measurements
+ on these files before and after pipeline calibration in order to
+ monitor the bias levels over time as well as ensure the pipeline
+ superbias is sufficiently calibrating new data. Results are all
+ saved to database tables.
+
+ Attributes
+ ----------
+ output_dir : str
+ Path into which outputs will be placed
+
+ data_dir : str
+ Path into which new dark files will be copied to be worked on
+
+ query_start : float
+ MJD start date to use for querying MAST
+
+ query_end : float
+ MJD end date to use for querying MAST
+
+ instrument : str
+ Name of instrument used to collect the dark current data
+
+ aperture : str
+ Name of the aperture used for the dark current (e.g.
+ ``NRCA1_FULL``)
+ """
+
+ def __init__(self):
+ """Initialize an instance of the ``Bias`` class."""
+
+ def collapse_image(self, image):
+ """Median-collapse the rows and columns of an image.
+
+ Parameters
+ ----------
+ image : numpy.ndarray
+ 2D array on which to calculate statistics
+
+ Returns
+ -------
+ collapsed_rows : numpy.ndarray
+ 1D array of the collapsed row values
+
+ collapsed_columns : numpy.ndarray
+ 1D array of the collapsed column values
+ """
+
+ collapsed_rows = np.nanmedian(image, axis=1)
+ collapsed_columns = np.nanmedian(image, axis=0)
+
+ return collapsed_rows, collapsed_columns
+
+ def extract_zeroth_group(self, filename):
+ """Extracts the 0th group of a fits image and outputs it into
+ a new fits file.
+
+ Parameters
+ ----------
+ filename : str
+ The fits file from which the 0th group will be extracted.
+
+ Returns
+ -------
+ output_filename : str
+ The full path to the output file
+ """
+
+ output_filename = os.path.join(self.data_dir, os.path.basename(filename).replace('.fits', '_0thgroup.fits'))
+
+ # Write a new fits file containing the primary and science
+ # headers from the input file, as well as the 0th group
+ # data of the first integration
+ if not os.path.isfile(output_filename):
+ hdu = fits.open(filename)
+ new_hdu = fits.HDUList([hdu['PRIMARY'], hdu['SCI']])
+ new_hdu['SCI'].data = hdu['SCI'].data[0:1, 0:1, :, :]
+ new_hdu.writeto(output_filename)
+ hdu.close()
+ new_hdu.close()
+ set_permissions(output_filename)
+ logging.info('\t{} created'.format(output_filename))
+ else:
+ logging.info('\t{} already exists'.format(output_filename))
+
+ return output_filename
+
+ def file_exists_in_database(self, filename):
+ """Checks if an entry for filename exists in the bias stats
+ database.
+
+ Parameters
+ ----------
+ filename : str
+ The full path to the uncal filename
+
+ Returns
+ -------
+ file_exists : bool
+ ``True`` if filename exists in the bias stats database
+ """
+
+ query = session.query(self.stats_table)
+ results = query.filter(self.stats_table.uncal_filename == filename).all()
+
+ if len(results) != 0:
+ file_exists = True
+ else:
+ file_exists = False
+
+ return file_exists
+
+ def get_amp_medians(self, image, amps):
+ """Calculates the median in the input image for each amplifier
+ and for odd and even columns separately.
+
+ Parameters
+ ----------
+ image : numpy.ndarray
+ 2D array on which to calculate statistics
+
+ amps : dict
+ Dictionary containing amp boundary coordinates (output from
+ ``amplifier_info`` function)
+ ``amps[key] = [(xmin, xmax, xstep), (ymin, ymax, ystep)]``
+
+ Returns
+ -------
+ amp_medians : dict
+ Median values for each amp. Keys are ramp numbers as
+ strings with even/odd designation (e.g. ``'1_even'``)
+ """
+
+ amp_medians = {}
+
+ for key in amps:
+ x_start, x_end, x_step = amps[key][0]
+ y_start, y_end, y_step = amps[key][1]
+
+ # Find median value of both even and odd columns for this amp
+ amp_med_even = np.nanmedian(image[y_start: y_end, x_start: x_end][:, 1::2])
+ amp_medians['amp{}_even_med'.format(key)] = amp_med_even
+ amp_med_odd = np.nanmedian(image[y_start: y_end, x_start: x_end][:, ::2])
+ amp_medians['amp{}_odd_med'.format(key)] = amp_med_odd
+
+ return amp_medians
+
+ def identify_tables(self):
+ """Determine which database tables to use for a run of the bias
+ monitor.
+ """
+
+ mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument]
+ self.query_table = eval('{}BiasQueryHistory'.format(mixed_case_name))
+ self.stats_table = eval('{}BiasStats'.format(mixed_case_name))
+
+ def image_to_png(self, image, outname):
+ """Ouputs an image array into a png file.
+
+ Parameters
+ ----------
+ image : numpy.ndarray
+ 2D image array
+
+ outname : str
+ The name given to the output png file
+
+ Returns
+ -------
+ output_filename : str
+ The full path to the output png file
+ """
+
+ output_filename = os.path.join(self.data_dir, '{}.png'.format(outname))
+
+ if not os.path.isfile(output_filename):
+ # Get image scale limits
+ z = ZScaleInterval()
+ vmin, vmax = z.get_limits(image)
+
+ # Plot the image
+ plt.figure(figsize=(12,12))
+ ax = plt.gca()
+ im = ax.imshow(image, cmap='gray', origin='lower', vmin=vmin, vmax=vmax)
+ ax.set_title('{}'.format(outname))
+
+ # Make the colorbar
+ divider = make_axes_locatable(ax)
+ cax = divider.append_axes("right", size="5%", pad=0.4)
+ cbar = plt.colorbar(im, cax=cax)
+ cbar.set_label('Signal [DN]')
+
+ plt.savefig(output_filename, bbox_inches='tight', dpi=200)
+ set_permissions(output_filename)
+ logging.info('\t{} created'.format(output_filename))
+ else:
+ logging.info('\t{} already exists'.format(output_filename))
+
+ return output_filename
+
+ def most_recent_search(self):
+ """Query the query history database and return the information
+ on the most recent query for the given ``aperture_name`` where
+ the bias monitor was executed.
+
+ Returns
+ -------
+ query_result : float
+ Date (in MJD) of the ending range of the previous MAST query
+ where the bias monitor was run.
+ """
+
+ sub_query = session.query(
+ self.query_table.aperture,
+ func.max(self.query_table.end_time_mjd).label('maxdate')
+ ).group_by(self.query_table.aperture).subquery('t2')
+
+ # Note that "self.query_table.run_monitor == True" below is
+ # intentional. Switching = to "is" results in an error in the query.
+ query = session.query(self.query_table).join(
+ sub_query,
+ and_(
+ self.query_table.aperture == self.aperture,
+ self.query_table.end_time_mjd == sub_query.c.maxdate,
+ self.query_table.run_monitor == True
+ )
+ ).all()
+
+ query_count = len(query)
+ if query_count == 0:
+ query_result = 57357.0 # a.k.a. Dec 1, 2015 == CV3
+ logging.info(('\tNo query history for {}. Beginning search date will be set to {}.'.format(self.aperture, query_result)))
+ elif query_count > 1:
+ raise ValueError('More than one "most recent" query?')
+ else:
+ query_result = query[0].end_time_mjd
+
+ return query_result
+
+ def process(self, file_list):
+ """The main method for processing darks. See module docstrings
+ for further details.
+
+ Parameters
+ ----------
+ file_list : list
+ List of filenames (including full paths) to the dark current
+ files
+ """
+
+ for filename in file_list:
+ logging.info('\tWorking on file: {}'.format(filename))
+
+ # Skip processing if an entry for this file already exists in
+ # the bias stats database.
+ file_exists = self.file_exists_in_database(filename)
+ if file_exists:
+ logging.info('\t{} already exists in the bias database table.'.format(filename))
+ continue
+
+ # Get the exposure start time of this file
+ expstart = '{}T{}'.format(fits.getheader(filename, 0)['DATE-OBS'], fits.getheader(filename, 0)['TIME-OBS'])
+
+ # Determine if the file needs group_scale in pipeline run
+ read_pattern = fits.getheader(filename, 0)['READPATT']
+ if read_pattern not in pipeline_tools.GROUPSCALE_READOUT_PATTERNS:
+ group_scale = False
+ else:
+ group_scale = True
+
+ # Run the file through the pipeline up through the refpix step
+ logging.info('\tRunning pipeline on {}'.format(filename))
+ processed_file = self.run_early_pipeline(filename, odd_even_rows=False, odd_even_columns=True, use_side_ref_pixels=True, group_scale=group_scale)
+ logging.info('\tPipeline complete. Output: {}'.format(processed_file))
+
+ # Find amplifier boundaries so per-amp statistics can be calculated
+ _, amp_bounds = instrument_properties.amplifier_info(processed_file, omit_reference_pixels=True)
+ logging.info('\tAmplifier boundaries: {}'.format(amp_bounds))
+
+ # Get the uncalibrated 0th group data for this file
+ uncal_data = fits.getdata(filename, 'SCI')[0, 0, :, :].astype(float)
+
+ # Calculate the uncal median values of each amplifier for odd/even columns
+ amp_medians = self.get_amp_medians(uncal_data, amp_bounds)
+ logging.info('\tCalculated uncalibrated image stats: {}'.format(amp_medians))
+
+ # Calculate image statistics and the collapsed row/column values
+ # in the calibrated image
+ cal_data = fits.getdata(processed_file, 'SCI')[0, 0, :, :]
+ dq = fits.getdata(processed_file, 'PIXELDQ')
+ mean, median, stddev = sigma_clipped_stats(cal_data[dq==0], sigma=3.0, maxiters=5)
+ logging.info('\tCalculated calibrated image stats: {:.3f} +/- {:.3f}'.format(mean, stddev))
+ collapsed_rows, collapsed_columns = self.collapse_image(cal_data)
+ logging.info('\tCalculated collapsed row/column values of calibrated image.')
+
+ # Save a png of the calibrated image for visual inspection
+ logging.info('\tCreating png of calibrated image')
+ output_png = self.image_to_png(cal_data, outname=os.path.basename(processed_file).replace('.fits',''))
+
+ # Construct new entry for this file for the bias database table.
+ # Can't insert values with numpy.float32 datatypes into database
+ # so need to change the datatypes of these values.
+ bias_db_entry = {'aperture': self.aperture,
+ 'uncal_filename': filename,
+ 'cal_filename': processed_file,
+ 'cal_image': output_png,
+ 'expstart': expstart,
+ 'mean': float(mean),
+ 'median': float(median),
+ 'stddev': float(stddev),
+ 'collapsed_rows': collapsed_rows.astype(float),
+ 'collapsed_columns': collapsed_columns.astype(float),
+ 'entry_date': datetime.datetime.now()
+ }
+ for key in amp_medians.keys():
+ bias_db_entry[key] = float(amp_medians[key])
+
+ # Add this new entry to the bias database table
+ self.stats_table.__table__.insert().execute(bias_db_entry)
+ logging.info('\tNew entry added to bias database table: {}'.format(bias_db_entry))
+
+ @log_fail
+ @log_info
+ def run(self):
+ """The main method. See module docstrings for further details."""
+
+ logging.info('Begin logging for bias_monitor')
+
+ # Get the output directory and setup a directory to store the data
+ self.output_dir = os.path.join(get_config()['outputs'], 'bias_monitor')
+ ensure_dir_exists(os.path.join(self.output_dir, 'data'))
+
+ # Use the current time as the end time for MAST query
+ self.query_end = Time.now().mjd
+
+ # Loop over all instruments
+ for instrument in ['nircam']:
+ self.instrument = instrument
+
+ # Identify which database tables to use
+ self.identify_tables()
+
+ # Get a list of all possible full-frame apertures for this instrument
+ siaf = Siaf(self.instrument)
+ possible_apertures = [aperture for aperture in siaf.apertures if siaf[aperture].AperType=='FULLSCA']
+
+ for aperture in possible_apertures:
+
+ logging.info('Working on aperture {} in {}'.format(aperture, instrument))
+ self.aperture = aperture
+
+ # Locate the record of the most recent MAST search; use this time
+ # (plus a 30 day buffer to catch any missing files from the previous
+ # run) as the start time in the new MAST search.
+ most_recent_search = self.most_recent_search()
+ self.query_start = most_recent_search - 30
+
+ # Query MAST for new dark files for this instrument/aperture
+ logging.info('\tQuery times: {} {}'.format(self.query_start, self.query_end))
+ new_entries = mast_query_darks(instrument, aperture, self.query_start, self.query_end)
+ logging.info('\tAperture: {}, new entries: {}'.format(self.aperture, len(new_entries)))
+
+ # Set up a directory to store the data for this aperture
+ self.data_dir = os.path.join(self.output_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower()))
+ if len(new_entries) > 0:
+ ensure_dir_exists(self.data_dir)
+
+ # Save the 0th group image from each new file in the output directory;
+ # some dont exist in JWQL filesystem.
+ new_files = []
+ for file_entry in new_entries:
+ try:
+ filename = filesystem_path(file_entry['filename'])
+ uncal_filename = filename.replace('_dark', '_uncal')
+ if not os.path.isfile(uncal_filename):
+ logging.info('\t{} does not exist in JWQL filesystem, even though {} does'.format(uncal_filename, filename))
+ else:
+ new_file = self.extract_zeroth_group(uncal_filename)
+ new_files.append(new_file)
+ except FileNotFoundError:
+ logging.info('\t{} does not exist in JWQL filesystem'.format(file_entry['filename']))
+
+ # Run the bias monitor on any new files
+ if len(new_files) > 0:
+ self.process(new_files)
+ monitor_run = True
+ else:
+ logging.info('\tBias monitor skipped. {} new dark files for {}, {}.'.format(len(new_files), instrument, aperture))
+ monitor_run = False
+
+ # Update the query history
+ new_entry = {'instrument': instrument,
+ 'aperture': aperture,
+ 'start_time_mjd': self.query_start,
+ 'end_time_mjd': self.query_end,
+ 'entries_found': len(new_entries),
+ 'files_found': len(new_files),
+ 'run_monitor': monitor_run,
+ 'entry_date': datetime.datetime.now()}
+ self.query_table.__table__.insert().execute(new_entry)
+ logging.info('\tUpdated the query history table')
+
+ logging.info('Bias Monitor completed successfully.')
+
+ def run_early_pipeline(self, filename, odd_even_rows=False, odd_even_columns=True,
+ use_side_ref_pixels=True, group_scale=False):
+ """Runs the early steps of the jwst pipeline (dq_init, saturation,
+ superbias, refpix) on uncalibrated files and outputs the result.
+
+ Parameters
+ ----------
+ filename : str
+ File on which to run the pipeline steps
+
+ odd_even_rows : bool
+ Option to treat odd and even rows separately during refpix step
+
+ odd_even_columns : bools
+ Option to treat odd and even columns separately during refpix step
+
+ use_side_ref_pixels : bool
+ Option to perform the side refpix correction during refpix step
+
+ group_scale : bool
+ Option to rescale pixel values to correct for instances where
+ on-board frame averaging did not result in the proper values
+
+ Returns
+ -------
+ output_filename : str
+ The full path to the calibrated file
+ """
+
+ output_filename = filename.replace('_uncal', '').replace('.fits', '_superbias_refpix.fits')
+
+ if not os.path.isfile(output_filename):
+ # Run the group_scale and dq_init steps on the input file
+ if group_scale:
+ model = GroupScaleStep.call(filename)
+ model = DQInitStep.call(model)
+ else:
+ model = DQInitStep.call(filename)
+
+ # Run the saturation and superbias steps
+ model = SaturationStep.call(model)
+ model = SuperBiasStep.call(model)
+
+ # Run the refpix step and save the output
+ model = RefPixStep.call(model, odd_even_rows=odd_even_rows, odd_even_columns=odd_even_columns, use_side_ref_pixels=use_side_ref_pixels)
+ model.save(output_filename)
+ set_permissions(output_filename)
+ else:
+ logging.info('\t{} already exists'.format(output_filename))
+
+ return output_filename
+
+
+if __name__ == '__main__':
+
+ module = os.path.basename(__file__).strip('.py')
+ start_time, log_file = initialize_instrument_monitor(module)
+
+ monitor = Bias()
+ monitor.run()
+
+ update_monitor_table(module, start_time, log_file)
diff --git a/jwql/instrument_monitors/common_monitors/readnoise_monitor.py b/jwql/instrument_monitors/common_monitors/readnoise_monitor.py
new file mode 100644
index 000000000..31076126a
--- /dev/null
+++ b/jwql/instrument_monitors/common_monitors/readnoise_monitor.py
@@ -0,0 +1,618 @@
+#! /usr/bin/env python
+
+"""This module contains code for the readnoise monitor, which monitors
+the readnoise levels in dark exposures as well as the accuracy of
+the pipeline readnoise reference files over time.
+
+For each instrument, the readnoise, technically the correlated double
+sampling (CDS) noise, is found by calculating the standard deviation
+through a stack of consecutive frame differences in each dark exposure.
+The sigma-clipped mean and standard deviation in each of these readnoise
+images, as well as histogram distributions, are recorded in the
+``ReadnoiseStats`` database table.
+
+Next, each of these readnoise images are differenced with the current
+pipeline readnoise reference file to identify the need for new reference
+files. A histogram distribution of these difference images, as well as
+the sigma-clipped mean and standard deviation, are recorded in the
+``ReadnoiseStats`` database table. A png version of these
+difference images is also saved for visual inspection.
+
+Author
+------
+ - Ben Sunnquist
+
+Use
+---
+ This module can be used from the command line as such:
+
+ ::
+
+ python readnoise_monitor.py
+"""
+
+from collections import OrderedDict
+import datetime
+import logging
+import os
+import shutil
+
+from astropy.io import fits
+from astropy.stats import sigma_clip
+from astropy.time import Time
+from astropy.visualization import ZScaleInterval
+import crds
+from jwst.dq_init import DQInitStep
+from jwst.group_scale import GroupScaleStep
+from jwst.refpix import RefPixStep
+from jwst.superbias import SuperBiasStep
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+import numpy as np
+from pysiaf import Siaf
+from sqlalchemy.sql.expression import and_
+
+from jwql.database.database_interface import session
+from jwql.database.database_interface import NIRCamReadnoiseQueryHistory, NIRCamReadnoiseStats, NIRISSReadnoiseQueryHistory, NIRISSReadnoiseStats
+from jwql.instrument_monitors import pipeline_tools
+from jwql.instrument_monitors.common_monitors.dark_monitor import mast_query_darks
+from jwql.utils import instrument_properties
+from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE
+from jwql.utils.logging_functions import log_info, log_fail
+from jwql.utils.permissions import set_permissions
+from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config, initialize_instrument_monitor, update_monitor_table
+
+class Readnoise():
+ """Class for executing the readnoise monitor.
+
+ This class will search for new dark current files in the file
+ system for each instrument and will run the monitor on these
+ files. The monitor will create a readnoise image for each of the
+ new dark files. It will then perform statistical measurements
+ on these readnoise images, as well as their differences with the
+ current pipeline readnoise reference file, in order to monitor
+ the readnoise levels over time as well as ensure the pipeline
+ readnoise reference file is sufficiently capturing the current
+ readnoise behavior. Results are all saved to database tables.
+
+ Attributes
+ ----------
+ output_dir : str
+ Path into which outputs will be placed.
+
+ data_dir : str
+ Path into which new dark files will be copied to be worked on.
+
+ query_start : float
+ MJD start date to use for querying MAST.
+
+ query_end : float
+ MJD end date to use for querying MAST.
+
+ instrument : str
+ Name of instrument used to collect the dark current data.
+
+ aperture : str
+ Name of the aperture used for the dark current (e.g.
+ ``NRCA1_FULL``).
+ """
+
+ def __init__(self):
+ """Initialize an instance of the ``Readnoise`` class."""
+
+ def determine_pipeline_steps(self):
+ """Determines the necessary JWST pipelines steps to run on a
+ given dark file.
+
+ Returns
+ -------
+ pipeline_steps : collections.OrderedDict
+ The pipeline steps to run.
+ """
+
+ pipeline_steps = OrderedDict({})
+
+ # Determine if the file needs group_scale step run
+ if self.read_pattern not in pipeline_tools.GROUPSCALE_READOUT_PATTERNS:
+ pipeline_steps['group_scale'] = False
+ else:
+ pipeline_steps['group_scale'] = True
+
+ # Run the DQ step on all files
+ pipeline_steps['dq_init'] = True
+
+ # Only run the superbias step for NIR instruments
+ if self.instrument.upper() != 'MIRI':
+ pipeline_steps['superbias'] = True
+ else:
+ pipeline_steps['superbias'] = False
+
+ # Run the refpix step on all files
+ pipeline_steps['refpix'] = True
+
+ return pipeline_steps
+
+ def file_exists_in_database(self, filename):
+ """Checks if an entry for filename exists in the readnoise stats
+ database.
+
+ Parameters
+ ----------
+ filename : str
+ The full path to the uncal filename.
+
+ Returns
+ -------
+ file_exists : bool
+ ``True`` if filename exists in the readnoise stats database.
+ """
+
+ query = session.query(self.stats_table)
+ results = query.filter(self.stats_table.uncal_filename == filename).all()
+
+ if len(results) != 0:
+ file_exists = True
+ else:
+ file_exists = False
+
+ return file_exists
+
+ def get_amp_stats(self, image, amps):
+ """Calculates the sigma-clipped mean and stddev, as well as the
+ histogram stats in the input image for each amplifier.
+
+ Parameters
+ ----------
+ image : numpy.ndarray
+ 2D array on which to calculate statistics.
+
+ amps : dict
+ Dictionary containing amp boundary coordinates (output from
+ ``amplifier_info`` function)
+ ``amps[key] = [(xmin, xmax, xstep), (ymin, ymax, ystep)]``
+
+ Returns
+ -------
+ amp_stats : dict
+ Contains the image statistics for each amp.
+ """
+
+ amp_stats = {}
+
+ for key in amps:
+ x_start, x_end, x_step = amps[key][0]
+ y_start, y_end, y_step = amps[key][1]
+
+ # Find sigma-clipped mean/stddev values for this amp
+ amp_data = image[y_start: y_end: y_step, x_start: x_end: x_step]
+ clipped = sigma_clip(amp_data, sigma=3.0, maxiters=5)
+ amp_stats['amp{}_mean'.format(key)] = np.nanmean(clipped)
+ amp_stats['amp{}_stddev'.format(key)] = np.nanstd(clipped)
+
+ # Find the histogram stats for this amp
+ n, bin_centers = self.make_histogram(amp_data)
+ amp_stats['amp{}_n'.format(key)] = n
+ amp_stats['amp{}_bin_centers'.format(key)] = bin_centers
+
+ return amp_stats
+
+ def get_metadata(self, filename):
+ """Collect basic metadata from a fits file.
+
+ Parameters
+ ----------
+ filename : str
+ Name of fits file to examine.
+ """
+
+ header = fits.getheader(filename)
+
+ try:
+ self.detector = header['DETECTOR']
+ self.read_pattern = header['READPATT']
+ self.subarray = header['SUBARRAY']
+ self.nints = header['NINTS']
+ self.ngroups = header['NGROUPS']
+ self.substrt1 = header['SUBSTRT1']
+ self.substrt2 = header['SUBSTRT2']
+ self.subsize1 = header['SUBSIZE1']
+ self.subsize2 = header['SUBSIZE2']
+ self.date_obs = header['DATE-OBS']
+ self.time_obs = header['TIME-OBS']
+ self.expstart = '{}T{}'.format(self.date_obs, self.time_obs)
+ except KeyError as e:
+ logging.error(e)
+
+ def identify_tables(self):
+ """Determine which database tables to use for a run of the
+ readnoise monitor.
+ """
+
+ mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument]
+ self.query_table = eval('{}ReadnoiseQueryHistory'.format(mixed_case_name))
+ self.stats_table = eval('{}ReadnoiseStats'.format(mixed_case_name))
+
+ def image_to_png(self, image, outname):
+ """Outputs an image array into a png file.
+
+ Parameters
+ ----------
+ image : numpy.ndarray
+ 2D image array.
+
+ outname : str
+ The name given to the output png file.
+
+ Returns
+ -------
+ output_filename : str
+ The full path to the output png file.
+ """
+
+ output_filename = os.path.join(self.data_dir, '{}.png'.format(outname))
+
+ # Get image scale limits
+ zscale = ZScaleInterval()
+ vmin, vmax = zscale.get_limits(image)
+
+ # Plot the image
+ plt.figure(figsize=(12,12))
+ im = plt.imshow(image, cmap='gray', origin='lower', vmin=vmin, vmax=vmax)
+ plt.colorbar(im, label='Readnoise Difference (most recent dark - reffile) [DN]')
+ plt.title('{}'.format(outname))
+
+ # Save the figure
+ plt.savefig(output_filename, bbox_inches='tight', dpi=200, overwrite=True)
+ set_permissions(output_filename)
+ logging.info('\t{} created'.format(output_filename))
+
+ return output_filename
+
+ def make_crds_parameter_dict(self):
+ """Construct a paramter dictionary to be used for querying CRDS
+ for the current reffiles in use by the JWST pipeline.
+
+ Returns
+ -------
+ parameters : dict
+ Dictionary of parameters, in the format expected by CRDS.
+ """
+
+ parameters = {}
+ parameters['INSTRUME'] = self.instrument.upper()
+ parameters['DETECTOR'] = self.detector.upper()
+ parameters['READPATT'] = self.read_pattern.upper()
+ parameters['SUBARRAY'] = self.subarray.upper()
+ parameters['DATE-OBS'] = datetime.date.today().isoformat()
+ current_date = datetime.datetime.now()
+ parameters['TIME-OBS'] = current_date.time().isoformat()
+
+ return parameters
+
+ def make_histogram(self, data):
+ """Creates a histogram of the input data and returns the bin
+ centers and the counts in each bin.
+
+ Parameters
+ ----------
+ data : numpy.ndarray
+ The input data.
+
+ Returns
+ -------
+ counts : numpy.ndarray
+ The counts in each histogram bin.
+
+ bin_centers : numpy.ndarray
+ The histogram bin centers.
+ """
+
+ # Calculate the histogram range as that within 5 sigma from the mean
+ data = data.flatten()
+ clipped = sigma_clip(data, sigma=3.0, maxiters=5)
+ mean, stddev = np.nanmean(clipped), np.nanstd(clipped)
+ lower_thresh, upper_thresh = mean - 4 * stddev, mean + 4 * stddev
+
+ # Some images, e.g. readnoise images, will never have values below zero
+ if (lower_thresh < 0) & (len(data[data < 0]) == 0):
+ lower_thresh = 0.0
+
+ # Make the histogram
+ counts, bin_edges = np.histogram(data, bins='auto', range=(lower_thresh, upper_thresh))
+ bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
+
+ return counts, bin_centers
+
+ def make_readnoise_image(self, data):
+ """Calculates the readnoise for the given input dark current
+ ramp.
+
+ Parameters
+ ----------
+ data : numpy.ndarray
+ The input ramp data. The data shape is assumed to be a 4D
+ array in DMS format (integration, group, y, x).
+
+ Returns
+ -------
+ readnoise : numpy.ndarray
+ The 2D readnoise image.
+ """
+
+ # Create a stack of correlated double sampling (CDS) images using the input
+ # ramp data, combining multiple integrations if necessary.
+ logging.info('\tCreating stack of CDS difference frames')
+ num_ints, num_groups, num_y, num_x = data.shape
+ for integration in range(num_ints):
+ if num_groups % 2 == 0:
+ cds = data[integration, 1::2, :, :] - data[integration, ::2, :, :]
+ else:
+ # Omit the last group if the number of groups is odd
+ cds = data[integration, 1::2, :, :] - data[integration, ::2, :, :][:-1]
+
+ if integration == 0:
+ cds_stack = cds
+ else:
+ cds_stack = np.concatenate((cds_stack, cds), axis=0)
+
+ # Calculate the readnoise by taking the clipped stddev through the CDS stack
+ logging.info('\tCreating readnoise image')
+ clipped = sigma_clip(cds_stack, sigma=3.0, maxiters=3, axis=0)
+ readnoise = np.std(clipped, axis=0)
+ readnoise = readnoise.filled(fill_value=np.nan) # converts masked array to normal array and fills missing data
+
+ return readnoise
+
+ def most_recent_search(self):
+ """Query the query history database and return the information
+ on the most recent query for the given ``aperture_name`` where
+ the readnoise monitor was executed.
+
+ Returns
+ -------
+ query_result : float
+ Date (in MJD) of the ending range of the previous MAST query
+ where the readnoise monitor was run.
+ """
+
+ query = session.query(self.query_table).filter(and_(self.query_table.aperture==self.aperture,
+ self.query_table.run_monitor==True)).order_by(self.query_table.end_time_mjd).all()
+
+ if len(query) == 0:
+ query_result = 57357.0 # a.k.a. Dec 1, 2015 == CV3
+ logging.info(('\tNo query history for {}. Beginning search date will be set to {}.'.format(self.aperture, query_result)))
+ else:
+ query_result = query[-1].end_time_mjd
+
+ return query_result
+
+ def process(self, file_list):
+ """The main method for processing darks. See module docstrings
+ for further details.
+
+ Parameters
+ ----------
+ file_list : list
+ List of filenames (including full paths) to the dark current
+ files.
+ """
+
+ for filename in file_list:
+ logging.info('\tWorking on file: {}'.format(filename))
+
+ # Get relevant header information for this file
+ self.get_metadata(filename)
+
+ # Run the file through the necessary pipeline steps
+ pipeline_steps = self.determine_pipeline_steps()
+ logging.info('\tRunning pipeline on {}'.format(filename))
+ try:
+ processed_file = pipeline_tools.run_calwebb_detector1_steps(filename, pipeline_steps)
+ logging.info('\tPipeline complete. Output: {}'.format(processed_file))
+ set_permissions(processed_file)
+ except:
+ logging.info('\tPipeline processing failed for {}'.format(filename))
+ continue
+
+ # Find amplifier boundaries so per-amp statistics can be calculated
+ _, amp_bounds = instrument_properties.amplifier_info(processed_file, omit_reference_pixels=True)
+ logging.info('\tAmplifier boundaries: {}'.format(amp_bounds))
+
+ # Get the ramp data; remove first 5 groups and last group for MIRI to avoid reset/rscd effects
+ cal_data = fits.getdata(processed_file, 'SCI', uint=False)
+ if self.instrument == 'MIRI':
+ cal_data = cal_data[:, 5:-1, :, :]
+
+ # Make the readnoise image
+ readnoise_outfile = os.path.join(self.data_dir, os.path.basename(processed_file.replace('.fits', '_readnoise.fits')))
+ readnoise = self.make_readnoise_image(cal_data)
+ fits.writeto(readnoise_outfile, readnoise, overwrite=True)
+ logging.info('\tReadnoise image saved to {}'.format(readnoise_outfile))
+
+ # Calculate the full image readnoise stats
+ clipped = sigma_clip(readnoise, sigma=3.0, maxiters=5)
+ full_image_mean, full_image_stddev = np.nanmean(clipped), np.nanstd(clipped)
+ full_image_n, full_image_bin_centers = self.make_histogram(readnoise)
+ logging.info('\tReadnoise image stats: {:.5f} +/- {:.5f}'.format(full_image_mean, full_image_stddev))
+
+ # Calculate readnoise stats in each amp separately
+ amp_stats = self.get_amp_stats(readnoise, amp_bounds)
+ logging.info('\tReadnoise image stats by amp: {}'.format(amp_stats))
+
+ # Get the current JWST Readnoise Reference File data
+ parameters = self.make_crds_parameter_dict()
+ reffile_mapping = crds.getreferences(parameters, reftypes=['readnoise'])
+ readnoise_file = reffile_mapping['readnoise']
+ if 'NOT FOUND' in readnoise_file:
+ logging.warning('\tNo pipeline readnoise reffile match for this file - assuming all zeros.')
+ pipeline_readnoise = np.zeros(readnoise.shape)
+ else:
+ logging.info('\tPipeline readnoise reffile is {}'.format(readnoise_file))
+ pipeline_readnoise = fits.getdata(readnoise_file)
+
+ # Find the difference between the current readnoise image and the pipeline readnoise reffile, and record image stats.
+ # Sometimes, the pipeline readnoise reffile needs to be cutout to match the subarray.
+ pipeline_readnoise = pipeline_readnoise[self.substrt2-1:self.substrt2+self.subsize2-1, self.substrt1-1:self.substrt1+self.subsize1-1]
+ readnoise_diff = readnoise - pipeline_readnoise
+ clipped = sigma_clip(readnoise_diff, sigma=3.0, maxiters=5)
+ diff_image_mean, diff_image_stddev = np.nanmean(clipped), np.nanstd(clipped)
+ diff_image_n, diff_image_bin_centers = self.make_histogram(readnoise_diff)
+ logging.info('\tReadnoise difference image stats: {:.5f} +/- {:.5f}'.format(diff_image_mean, diff_image_stddev))
+
+ # Save a png of the readnoise difference image for visual inspection
+ logging.info('\tCreating png of readnoise difference image')
+ readnoise_diff_png = self.image_to_png(readnoise_diff, outname=os.path.basename(readnoise_outfile).replace('.fits', '_diff'))
+
+ # Construct new entry for this file for the readnoise database table.
+ # Can't insert values with numpy.float32 datatypes into database
+ # so need to change the datatypes of these values.
+ readnoise_db_entry = {'uncal_filename': filename,
+ 'aperture': self.aperture,
+ 'detector': self.detector,
+ 'subarray': self.subarray,
+ 'read_pattern': self.read_pattern,
+ 'nints': self.nints,
+ 'ngroups': self.ngroups,
+ 'expstart': self.expstart,
+ 'readnoise_filename': readnoise_outfile,
+ 'full_image_mean': float(full_image_mean),
+ 'full_image_stddev': float(full_image_stddev),
+ 'full_image_n': full_image_n.astype(float),
+ 'full_image_bin_centers': full_image_bin_centers.astype(float),
+ 'readnoise_diff_image': readnoise_diff_png,
+ 'diff_image_mean': float(diff_image_mean),
+ 'diff_image_stddev': float(diff_image_stddev),
+ 'diff_image_n': diff_image_n.astype(float),
+ 'diff_image_bin_centers': diff_image_bin_centers.astype(float),
+ 'entry_date': datetime.datetime.now()
+ }
+ for key in amp_stats.keys():
+ if isinstance(amp_stats[key], (int, float)):
+ readnoise_db_entry[key] = float(amp_stats[key])
+ else:
+ readnoise_db_entry[key] = amp_stats[key].astype(float)
+
+ # Add this new entry to the readnoise database table
+ self.stats_table.__table__.insert().execute(readnoise_db_entry)
+ logging.info('\tNew entry added to readnoise database table')
+
+ # Remove the raw and calibrated files to save memory space
+ os.remove(filename)
+ os.remove(processed_file)
+
+ @log_fail
+ @log_info
+ def run(self):
+ """The main method. See module docstrings for further
+ details.
+ """
+
+ logging.info('Begin logging for readnoise_monitor\n')
+
+ # Get the output directory and setup a directory to store the data
+ self.output_dir = os.path.join(get_config()['outputs'], 'readnoise_monitor')
+ ensure_dir_exists(os.path.join(self.output_dir, 'data'))
+
+ # Use the current time as the end time for MAST query
+ self.query_end = Time.now().mjd
+
+ # Loop over all instruments
+ for instrument in ['nircam', 'niriss']:
+ self.instrument = instrument
+
+ # Identify which database tables to use
+ self.identify_tables()
+
+ # Get a list of all possible apertures for this instrument
+ siaf = Siaf(self.instrument)
+ possible_apertures = list(siaf.apertures)
+
+ for aperture in possible_apertures:
+
+ logging.info('\nWorking on aperture {} in {}'.format(aperture, instrument))
+ self.aperture = aperture
+
+ # Locate the record of the most recent MAST search; use this time
+ # (plus a 30 day buffer to catch any missing files from the previous
+ # run) as the start time in the new MAST search.
+ most_recent_search = self.most_recent_search()
+ self.query_start = most_recent_search - 30
+
+ # Query MAST for new dark files for this instrument/aperture
+ logging.info('\tQuery times: {} {}'.format(self.query_start, self.query_end))
+ new_entries = mast_query_darks(instrument, aperture, self.query_start, self.query_end)
+ logging.info('\tAperture: {}, new entries: {}'.format(self.aperture, len(new_entries)))
+
+ # Set up a directory to store the data for this aperture
+ self.data_dir = os.path.join(self.output_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower()))
+ if len(new_entries) > 0:
+ ensure_dir_exists(self.data_dir)
+
+ # Get any new files to process
+ new_files = []
+ checked_files = []
+ for file_entry in new_entries:
+ output_filename = os.path.join(self.data_dir, file_entry['filename'].replace('_dark', '_uncal'))
+
+ # Sometimes both the dark and uncal name of a file is picked up in new_entries
+ if output_filename in checked_files:
+ logging.info('\t{} already checked in this run.'.format(output_filename))
+ continue
+ checked_files.append(output_filename)
+
+ # Dont process files that already exist in the readnoise stats database
+ file_exists = self.file_exists_in_database(output_filename)
+ if file_exists:
+ logging.info('\t{} already exists in the readnoise database table.'.format(output_filename))
+ continue
+
+ # Save any new uncal files with enough groups in the output directory; some dont exist in JWQL filesystem
+ try:
+ filename = filesystem_path(file_entry['filename'])
+ uncal_filename = filename.replace('_dark', '_uncal')
+ if not os.path.isfile(uncal_filename):
+ logging.info('\t{} does not exist in JWQL filesystem, even though {} does'.format(uncal_filename, filename))
+ else:
+ num_groups = fits.getheader(uncal_filename)['NGROUPS']
+ if num_groups > 1: # skip processing if the file doesnt have enough groups to calculate the readnoise; TODO change to 10 before incorporating MIRI
+ shutil.copy(uncal_filename, self.data_dir)
+ logging.info('\tCopied {} to {}'.format(uncal_filename, output_filename))
+ set_permissions(output_filename)
+ new_files.append(output_filename)
+ else:
+ logging.info('\tNot enough groups to calculate readnoise in {}'.format(uncal_filename))
+ except FileNotFoundError:
+ logging.info('\t{} does not exist in JWQL filesystem'.format(file_entry['filename']))
+
+ # Run the readnoise monitor on any new files
+ if len(new_files) > 0:
+ self.process(new_files)
+ monitor_run = True
+ else:
+ logging.info('\tReadnoise monitor skipped. {} new dark files for {}, {}.'.format(len(new_files), instrument, aperture))
+ monitor_run = False
+
+ # Update the query history
+ new_entry = {'instrument': instrument,
+ 'aperture': aperture,
+ 'start_time_mjd': self.query_start,
+ 'end_time_mjd': self.query_end,
+ 'entries_found': len(new_entries),
+ 'files_found': len(new_files),
+ 'run_monitor': monitor_run,
+ 'entry_date': datetime.datetime.now()}
+ self.query_table.__table__.insert().execute(new_entry)
+ logging.info('\tUpdated the query history table')
+
+ logging.info('Readnoise Monitor completed successfully.')
+
+if __name__ == '__main__':
+
+ module = os.path.basename(__file__).strip('.py')
+ start_time, log_file = initialize_instrument_monitor(module)
+
+ monitor = Readnoise()
+ monitor.run()
+
+ update_monitor_table(module, start_time, log_file)
diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py b/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py
index c2532edf1..54fa286b2 100755
--- a/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py
+++ b/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py
@@ -19,9 +19,9 @@
----------
'''
-import .utils.mnemonics as mn
-import .utils.sql_interface as sql
-from .utils.process_data import whole_day_routine, wheelpos_routine
+import utils.mnemonics as mn
+import utils.sql_interface as sql
+from utils.process_data import whole_day_routine, wheelpos_routine
from jwql.utils.engineering_database import query_single_mnemonic
import pandas as pd
diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py
index 01fe1ab74..f4932db5a 100644
--- a/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py
+++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py
@@ -24,9 +24,9 @@
Notes
-----
'''
-import .utils.mnemonics as mn
-import .utils.sql_interface as sql
-from .utils.process_data import whole_day_routine, wheelpos_routine
+import utils.mnemonics as mn
+import utils.sql_interface as sql
+from utils.process_data import whole_day_routine, wheelpos_routine
from jwql.utils.engineering_database import query_single_mnemonic
import pandas as pd
diff --git a/jwql/instrument_monitors/pipeline_tools.py b/jwql/instrument_monitors/pipeline_tools.py
index e81e7cfce..1b15de9e2 100644
--- a/jwql/instrument_monitors/pipeline_tools.py
+++ b/jwql/instrument_monitors/pipeline_tools.py
@@ -18,6 +18,7 @@
from collections import OrderedDict
import copy
import numpy as np
+import os
from astropy.io import fits
from jwst.dq_init import DQInitStep
@@ -29,6 +30,7 @@
from jwst.lastframe import LastFrameStep
from jwst.linearity import LinearityStep
from jwst.persistence import PersistenceStep
+from jwst.pipeline.calwebb_detector1 import Detector1Pipeline
from jwst.ramp_fitting import RampFitStep
from jwst.refpix import RefPixStep
from jwst.rscd import RSCD_Step
@@ -222,6 +224,99 @@ def run_calwebb_detector1_steps(input_file, steps):
return output_filename
+def calwebb_detector1_save_jump(input_file, output_dir, ramp_fit=True, save_fitopt=True):
+ """Call ``calwebb_detector1`` on the provided file, running all
+ steps up to the ``ramp_fit`` step, and save the result. Optionally
+ run the ``ramp_fit`` step and save the resulting slope file as well.
+
+ Parameters
+ ----------
+ input_file : str
+ Name of fits file to run on the pipeline
+
+ output_dir : str
+ Directory into which the pipeline outputs are saved
+
+ ramp_fit : bool
+ If ``False``, the ``ramp_fit`` step is not run. The output file
+ will be a ``*_jump.fits`` file.
+ If ``True``, the ``*jump.fits`` file will be produced and saved.
+ In addition, the ``ramp_fit`` step will be run and a
+ ``*rate.fits`` or ``*_rateints.fits`` file will be saved.
+ (``rateints`` if the input file has >1 integration)
+
+ save_fitopt : bool
+ If ``True``, the file of optional outputs from the ramp fitting
+ step of the pipeline is saved.
+
+ Returns
+ -------
+ jump_output : str
+ Name of the saved file containing the output prior to the
+ ``ramp_fit`` step.
+
+ pipe_output : str
+ Name of the saved file containing the output after ramp-fitting
+ is performed (if requested). Otherwise ``None``.
+ """
+ input_file_only = os.path.basename(input_file)
+
+ # Find the instrument used to collect the data
+ instrument = fits.getheader(input_file)['INSTRUME'].lower()
+
+ # Switch to calling the pipeline rather than individual steps,
+ # and use the run() method so that we can set parameters
+ # progammatically.
+ model = Detector1Pipeline()
+
+ # Always true
+ if instrument == 'nircam':
+ model.refpix.odd_even_rows = False
+
+ # Default CR rejection threshold is too low
+ model.jump.rejection_threshold = 15
+
+ model.jump.save_results = True
+ model.jump.output_dir = output_dir
+ jump_output = os.path.join(output_dir, input_file_only.replace('uncal', 'jump'))
+
+ # Check to see if the jump version of the requested file is already
+ # present
+ run_jump = not os.path.isfile(jump_output)
+
+ if ramp_fit:
+ model.ramp_fit.save_results = True
+ #model.save_results = True
+ model.output_dir = output_dir
+ #pipe_output = os.path.join(output_dir, input_file_only.replace('uncal', 'rate'))
+ pipe_output = os.path.join(output_dir, input_file_only.replace('uncal', '0_ramp_fit'))
+ run_slope = not os.path.isfile(pipe_output)
+ if save_fitopt:
+ model.ramp_fit.save_opt = True
+ fitopt_output = os.path.join(output_dir, input_file_only.replace('uncal', 'fitopt'))
+ run_fitopt = not os.path.isfile(fitopt_output)
+ else:
+ model.ramp_fit.save_opt = False
+ fitopt_output = None
+ run_fitopt = False
+ else:
+ model.ramp_fit.skip = True
+ pipe_output = None
+ fitopt_output = None
+ run_slope = False
+ run_fitopt = False
+
+ # Call the pipeline if any of the files at the requested calibration
+ # states are not present in the output directory
+ if run_jump or (ramp_fit and run_slope) or (save_fitopt and run_fitopt):
+ model.run(input_file)
+ else:
+ print(("Files with all requested calibration states for {} already present in "
+ "output directory. Skipping pipeline call.".format(input_file)))
+
+ return jump_output, pipe_output, fitopt_output
+
+
def steps_to_run(all_steps, finished_steps):
"""Given a list of pipeline steps that need to be completed as well
as a list of steps that have already been completed, return a list
diff --git a/jwql/jwql_monitors/monitor_mast.py b/jwql/jwql_monitors/monitor_mast.py
index 4a36e5e4c..825b9e1c5 100755
--- a/jwql/jwql_monitors/monitor_mast.py
+++ b/jwql/jwql_monitors/monitor_mast.py
@@ -249,6 +249,8 @@ def monitor_mast():
"""
logging.info('Beginning database monitoring.')
+ outputs_dir = os.path.join(get_config()['outputs'], 'monitor_mast')
+
# Perform inventory of the JWST service
jwst_inventory(instruments=JWST_INSTRUMENT_NAMES,
dataproducts=['image', 'spectrum', 'cube'],
@@ -267,4 +269,4 @@ def monitor_mast():
configure_logging(module)
# Run the monitors
- monitor_mast()
\ No newline at end of file
+ monitor_mast()
diff --git a/jwql/tests/test_database_interface.py b/jwql/tests/test_database_interface.py
index 898af27fb..e35e18608 100755
--- a/jwql/tests/test_database_interface.py
+++ b/jwql/tests/test_database_interface.py
@@ -25,7 +25,7 @@
import string
from jwql.database import database_interface as di
-from jwql.utils.constants import ANOMALIES
+from jwql.utils.constants import ANOMALIES_PER_INSTRUMENT
from jwql.utils.utils import get_config
# Determine if tests are being run on jenkins
@@ -61,14 +61,14 @@ def test_anomaly_orm_factory():
creates an ORM and contains the appropriate columns"""
test_table_name = 'test_anomaly_table'
- TestAnomalyTable = di.anomaly_orm_factory('test_anomaly_table')
+ TestAnomalyTable = di.anomaly_orm_factory(test_table_name)
table_attributes = TestAnomalyTable.__dict__.keys()
assert str(TestAnomalyTable) == ""\
.format(test_table_name)
- for anomaly in ANOMALIES:
- assert anomaly in table_attributes
+ for item in ['id', 'rootname', 'flag_date', 'user']:
+ assert item in table_attributes
@pytest.mark.skipif(ON_JENKINS, reason='Requires access to development database server.')
@@ -79,15 +79,15 @@ def test_anomaly_records():
random_rootname = ''.join(random.SystemRandom().choice(string.ascii_lowercase + \
string.ascii_uppercase + \
string.digits) for _ in range(10))
- di.session.add(di.Anomaly(rootname=random_rootname,
+ di.session.add(di.FGSAnomaly(rootname=random_rootname,
flag_date=datetime.datetime.today(),
user='test', ghost=True))
di.session.commit()
# Test the ghosts column
- ghosts = di.session.query(di.Anomaly)\
- .filter(di.Anomaly.rootname == random_rootname)\
- .filter(di.Anomaly.ghost == "True")
+ ghosts = di.session.query(di.FGSAnomaly)\
+ .filter(di.FGSAnomaly.rootname == random_rootname)\
+ .filter(di.FGSAnomaly.ghost == "True")
assert ghosts.data_frame.iloc[0]['ghost'] == True
diff --git a/jwql/utils/anomaly_query_config.py b/jwql/utils/anomaly_query_config.py
new file mode 100644
index 000000000..bcd797996
--- /dev/null
+++ b/jwql/utils/anomaly_query_config.py
@@ -0,0 +1,40 @@
+"""Globally defined and used variables for the JWQL query anomaly
+feature. Variables will be re-defined when anomaly query forms are
+submitted.
+
+Authors
+-------
+
+ - Teagan King
+
+
+Use
+---
+ This variables within this module are intended to be directly
+ imported, e.g.:
+ ::
+
+ from jwql.utils.query_config import CHOSEN_INSTRUMENTS
+"""
+
+# Apertures selected by user in query_anomaly_2
+APERTURES_CHOSEN = ["No apertures selected"]
+
+# Anomalies available to select after instruments are selected in query_anomaly
+# Default is all anomalies common to all instruments
+CURRENT_ANOMALIES = ['cosmic_ray_shower', 'diffraction_spike', 'excessive_saturation',
+ 'guidestar_failure', 'persistence', 'other']
+
+# Instruments selected by user in query_anomaly
+INSTRUMENTS_CHOSEN = ["No instruments selected"]
+
+print("INSTRUMENTS_CHOSEN", INSTRUMENTS_CHOSEN)
+
+# Observing modes selected by user
+OBSERVING_MODES_CHOSEN = ["No observing modes selected"]
+
+# Anomalies selected by user in query_anomaly_3
+ANOMALIES_CHOSEN_FROM_CURRENT_ANOMALIES = ["No anomalies selected"]
+
+# Filters selected by user in query_anomaly_2
+FILTERS_CHOSEN = ["No filters selected"]
diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py
index b8f0f209c..d09c0ecfc 100644
--- a/jwql/utils/constants.py
+++ b/jwql/utils/constants.py
@@ -4,6 +4,10 @@
-------
- Johannes Sahlmann
+ - Matthew Bourque
+ - Bryan Hilbert
+ - Ben Sunnquist
+ - Teagan King
Use
---
@@ -47,16 +51,74 @@
'3': [(2, 1032, 4), (0, 1024, 1)],
'4': [(3, 1032, 4), (0, 1024, 1)]}}
-
-# Defines the possible anomalies to flag through the web app
-ANOMALIES = ['snowball', 'cosmic_ray_shower', 'crosstalk', 'data_transfer_error', 'diffraction_spike',
- 'excessive_saturation', 'ghost', 'guidestar_failure', 'persistence', 'satellite_trail', 'other']
+# Dictionary describing instruments to which anomalies apply
+ANOMALIES_PER_INSTRUMENT = {
+ # anomalies affecting all instruments:
+ 'cosmic_ray_shower': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'],
+ 'diffraction_spike': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'],
+ 'excessive_saturation': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'],
+ 'guidestar_failure': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'],
+ 'persistence': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'],
+ #anomalies affecting multiple instruments:
+ 'crosstalk': ['fgs', 'nircam', 'niriss', 'nirspec'],
+ 'data_transfer_error': ['fgs', 'nircam', 'niriss', 'nirspec'],
+ 'ghost': ['fgs', 'nircam', 'niriss', 'nirspec'],
+ 'snowball': ['fgs', 'nircam', 'niriss', 'nirspec'],
+ # instrument-specific anomalies:
+ 'column_pull_up': ['miri'],
+ 'dominant_msa_leakage': ['nirspec'],
+ 'dragons_breath': ['nircam'],
+ 'glow': ['miri'],
+ 'internal_reflection': ['miri'],
+ 'optical_short': ['nirspec'], # Only for MOS observations
+ 'row_pull_down': ['miri'],
+ # additional anomalies:
+ 'other': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec']}
# Defines the possible anomalies (with rendered name) to flag through the web app
-ANOMALY_CHOICES = [(anomaly, inflection.titleize(anomaly)) for anomaly in ANOMALIES]
+ANOMALY_CHOICES = [(anomaly, inflection.titleize(anomaly)) for anomaly in ANOMALIES_PER_INSTRUMENT]
+
+# Possible exposure types for dark current data
+DARK_EXP_TYPES = {'nircam': ['NRC_DARK'],
+ 'niriss': ['NIS_DARK'],
+ 'miri': ['MIR_DARKIMG', 'MIR_DARKMRS', 'MIR_DARKALL'],
+ 'nirspec': ['NRS_DARK'],
+ 'fgs': ['FGS_DARK']}
+
+EXPTYPES = {"nircam": {"imaging": "NRC_IMAGE", "ts_imaging": "NRC_TSIMAGE",
+ "wfss": "NRC_WFSS", "ts_grism": "NRC_TSGRISM"},
+ "niriss": {"imaging": "NIS_IMAGE", "ami": "NIS_IMAGE", "pom": "NIS_IMAGE",
+ "wfss": "NIS_WFSS"},
+ "fgs": {"imaging": "FGS_IMAGE"}}
+
+FLAT_EXP_TYPES = {'nircam': ['NRC_FLAT'],
+ 'niriss': ['NIS_LAMP'],
+ 'miri': ['MIR_FLATIMAGE', 'MIR_FLATMRS'],
+ 'nirspec': ['NRS_AUTOFLAT', 'NRS_LAMP'],
+ 'fgs': ['FGS_INTFLAT']}
+
+FILTERS_PER_INSTRUMENT = {'miri': ['F560W', 'F770W', 'F1000W', 'F1065C', 'F1130W', 'F1140C', 'F1280W',
+ 'F1500W', 'F1550C', 'F1800W', 'F2100W', 'F2300C', 'F2550W'],
+ 'nircam': ['F070W', 'F090W', 'F115W', 'F140M', 'F150W', 'F150W2', 'F162M',
+ 'F164N', 'F182M', 'F187N', 'F200W', 'F210M', 'F212N', 'F250M',
+ 'F277W', 'F300M', 'F322W2', 'F323N', 'F335M', 'F356W', 'F360M',
+ 'F405N', 'F410M', 'F430M', 'F444W', 'F460M', 'F466N', 'F470N',
+ 'F480M'],
+ 'niriss': ['F090W', 'F115W', 'F140M', 'F150W', 'F185M', 'F200W', 'F227W',
+ 'F356W', 'F380M', 'F430M', 'F444W', 'F480M'],
+ 'nirspec': ['CLEAR', 'F070LP', 'F100LP', 'F170LP', 'F290LP']}
FOUR_AMP_SUBARRAYS = ['WFSS128R', 'WFSS64R', 'WFSS128C', 'WFSS64C']
+# Names of full-frame apertures for all instruments
+FULL_FRAME_APERTURES = {'NIRCAM': ['NRCA1_FULL', 'NRCA2_FULL', 'NRCA3_FULL', 'NRCA4_FULL',
+ 'NRCA5_FULL', 'NRCB1_FULL', 'NRCB2_FULL', 'NRCB3_FULL',
+ 'NRCB4_FULL', 'NRCB5_FULL'],
+ 'NIRISS': ['NIS_CEN'],
+ 'NIRSPEC': ['NRS1_FULL', 'NRS2_FULL'],
+ 'MIRI': ['MIRIM_FULL']
+ }
+
# Possible suffix types for nominal files
GENERIC_SUFFIX_TYPES = ['uncal', 'cal', 'rateints', 'rate', 'trapsfilled', 'i2d',
'x1dints', 'x1d', 's2d', 's3d', 'dark', 'crfints',
@@ -65,8 +127,10 @@
# Possible suffix types for guider exposures
GUIDER_SUFFIX_TYPES = ['stream', 'stacked_uncal', 'image_uncal', 'stacked_cal', 'image_cal']
+# Instrument monitor database tables
INSTRUMENT_MONITOR_DATABASE_TABLES = {
- 'dark_monitor': ['nircam_dark_dark_current', 'nircam_dark_pixel_stats', 'nircam_dark_query_history']}
+ 'dark_monitor': ['_dark_dark_current', '_dark_pixel_stats', '_dark_query_history'],
+ 'bad_pixel_monitor': ['_bad_pixel_stats', '_bad_pixel_query_history']}
# JWST data products
JWST_DATAPRODUCTS = ['IMAGE', 'SPECTRUM', 'SED', 'TIMESERIES', 'VISIBILITY',
@@ -112,7 +176,7 @@
'nircam': [('Bias Monitor', '#'),
('Readnoise Monitor', '#'),
('Gain Level Monitor', '#'),
- ('Mean Dark Current Rate Monitor', '#'),
+ ('Mean Dark Current Rate Monitor', '/nircam/dark_monitor'),
('Photometric Stability Monitor', '#')],
'niriss': [('Bad Pixel Monitor', '#'),
('Readnoise Monitor', '#'),
@@ -146,6 +210,15 @@
# Possible suffix types for AMI files
NIRISS_AMI_SUFFIX_TYPES = ['amiavg', 'aminorm', 'ami']
+# Dictionary of observing modes available for each instrument
+OBSERVING_MODE_PER_INSTRUMENT = {'miri': ['Imaging', '4QPM Coronagraphic Imaging',
+ 'Lyot Coronagraphic Imaging', 'LRS', 'MRS'],
+ 'nircam': ['Imaging', 'Coronagraphic Imaging', 'WFSS',
+ 'Time-Series Imaging', 'Grism Time Series'],
+ 'niriss': ['WFSS', 'SOSS', 'AMI', 'Imaging'],
+ 'nirspec': ['Multi-Object Spectroscopy', 'IFU Spectroscopy',
+ 'Fixed Slit Spectroscopy', 'Bright Object Time Series']}
+
SUBARRAYS_ONE_OR_FOUR_AMPS = ['SUBGRISMSTRIPE64', 'SUBGRISMSTRIPE128', 'SUBGRISMSTRIPE256']
# Possible suffix types for time-series exposures
diff --git a/jwql/utils/crds_tools.py b/jwql/utils/crds_tools.py
new file mode 100644
index 000000000..1d059d2b6
--- /dev/null
+++ b/jwql/utils/crds_tools.py
@@ -0,0 +1,200 @@
+#! /usr/bin/env python
+
+"""This module contains functions used to indentify and download
+reference files from CRDS and place them in the expected location, for
+JWQL to find.
+
+This module uses the ``crds`` software package
+(``https://hst-crds.stsci.edu/static/users_guide/index.html``) which is
+installed when the JWST calibration pipeline package is installed.
+Reference files are identified by supplying some basic metadata from the
+exposure being calibrated. See
+https://hst-crds.stsci.edu/static/users_guide/library_use.html#crds-getreferences
+for a description of the function used for this task.
+
+Author
+------
+
+ - Bryan Hilbert
+
+Use
+---
+
+ This module can be used as such:
+ ::
+ from mirage.reference_files import crds
+ params = {'INSTRUME': 'NIRCAM', 'DETECTOR': 'NRCA1'}
+ reffiles = crds.get_reffiles(params)
+"""
+
+import datetime
+import os
+
+from jwql.utils.utils import ensure_dir_exists
+from jwql.utils.constants import EXPTYPES
+
+
+def env_variables():
+ """Check the values of the CRDS-related environment variables
+
+ Returns
+ -------
+ crds_data_path : str
+ Full path to the location of the CRDS reference files
+ """
+ crds_data_path = path_check()
+ server_check()
+
+ return crds_data_path
+
+
+def path_check():
+ """Check that the ``CRDS_PATH`` environment variable is set. This
+ will be the location to which CRDS reference files are downloaded.
+ If the env variable is not set, default to use ``$HOME/crds_cache/``
+
+ Returns
+ -------
+ crds_path : str
+ Full path to the location of the CRDS reference files
+ """
+ crds_path = os.environ.get('CRDS_PATH')
+ if crds_path is None:
+ reffile_dir = '{}/crds_cache'.format(os.environ.get('HOME'))
+ os.environ["CRDS_PATH"] = reffile_dir
+ ensure_dir_exists(reffile_dir)
+ print('CRDS_PATH environment variable not set. Setting to {}'.format(reffile_dir))
+ return reffile_dir
+ else:
+ return crds_path
+
+
+def server_check():
+ """Check that the ``CRDS_SERVER_URL`` environment variable is set.
+ This controls where Mirage will look for CRDS information. If the
+ env variable is not set, set it to the JWST CRDS server.
+ """
+ crds_server = os.environ.get('CRDS_SERVER_URL')
+ if crds_server is None:
+ os.environ["CRDS_SERVER_URL"] = "https://jwst-crds.stsci.edu"
+
+
+def dict_from_yaml(yaml_dict):
+ """Create a dictionary to be used as input to the CRDS getreferences
+ function from the nested dictionary created when a standard Mirage
+ input yaml file is read in.
+
+ Parameters
+ ----------
+ yaml_dict : dict
+ Nested dictionary from reading in yaml file
+ Returns
+ -------
+ crds_dict : dict
+ Dictionary of information necessary to select refernce files
+ via getreferences().
+ """
+ crds_dict = {}
+ instrument = yaml_dict['Inst']['instrument'].upper()
+ crds_dict['INSTRUME'] = instrument
+ crds_dict['READPATT'] = yaml_dict['Readout']['readpatt'].upper()
+
+ # Currently, all reference files that use SUBARRAY as a selection
+ # criteria contain SUBARRAY = 'GENERIC', meaning that SUBARRAY
+ # actually isn't important. So let's just set it to FULL here.
+ crds_dict['SUBARRAY'] = 'FULL'
+
+ # Use the current date and time in order to get the most recent
+ # reference file
+ crds_dict['DATE-OBS'] = datetime.date.today().isoformat()
+ current_date = datetime.datetime.now()
+ crds_dict['TIME-OBS'] = current_date.time().isoformat()
+
+ array_name = yaml_dict['Readout']['array_name']
+ crds_dict['DETECTOR'] = array_name.split('_')[0].upper()
+ if '5' in crds_dict['DETECTOR']:
+ crds_dict['DETECTOR'] = crds_dict['DETECTOR'].replace('5', 'LONG')
+
+ if 'FGS' in crds_dict['DETECTOR']:
+ crds_dict['DETECTOR'] = 'GUIDER{}'.format(crds_dict['DETECTOR'][-1])
+
+ if instrument == 'NIRCAM':
+ if crds_dict['DETECTOR'] in ['NRCALONG', 'NRCBLONG']:
+ crds_dict['CHANNEL'] = 'LONG'
+ else:
+ crds_dict['CHANNEL'] = 'SHORT'
+
+ # For the purposes of choosing reference files, the exposure type should
+ # always be set to imaging, since it is used to locate sources in the
+ # seed image, prior to any dispersion.
+ crds_dict['EXP_TYPE'] = EXPTYPES[instrument.lower()]["imaging"]
+
+ # This assumes that filter and pupil names match up with reality,
+ # as opposed to the more user-friendly scheme of allowing any
+ # filter to be in the filter field.
+ crds_dict['FILTER'] = yaml_dict['Readout']['filter']
+ crds_dict['PUPIL'] = yaml_dict['Readout']['pupil']
+
+ return crds_dict
+
+
+def get_reffiles(parameter_dict, reffile_types, download=True):
+ """Determine CRDS's best reference files to use for a particular
+ observation, and download them if they are not already present in
+ the ``CRDS_PATH``. The determination is made based on the
+ information in the ``parameter_dictionary``.
+
+ Parameters
+ ----------
+ parameter_dict : dict
+ Dictionary of basic metadata from the file to be processed by
+ the returned reference files (e.g. ``INSTRUME``, ``DETECTOR``,
+ etc)
+
+ reffile_types : list
+ List of reference file types to look up and download. These must
+ be contained in CRDS's list of reference file types.
+
+ download : bool
+ If ``True`` (default), the identified best reference files will
+ be downloaded. If ``False``, the dictionary of best reference
+ files will still be returned, but the files will not be
+ downloaded. The use of ``False`` is primarily intended to
+ support testing on Travis.
+
+ Returns
+ -------
+ reffile_mapping : dict
+ Mapping of downloaded CRDS file locations
+ """
+
+ # IMPORTANT: Import of crds package must be done AFTER the environment
+ # variables are set in the functions above
+ import crds
+ from crds import CrdsLookupError
+
+ if download:
+ try:
+ reffile_mapping = crds.getreferences(parameter_dict, reftypes=reffile_types)
+ except CrdsLookupError:
+ raise ValueError("ERROR: CRDSLookupError when trying to find reference files for parameters: {}".format(parameter_dict))
+ else:
+ # If the files will not be downloaded, still return the same local
+ # paths that are returned when the files are downloaded. Note that
+ # this follows the directory structure currently assumed by CRDS.
+ crds_path = os.environ.get('CRDS_PATH')
+ try:
+ reffile_mapping = crds.getrecommendations(parameter_dict, reftypes=reffile_types)
+ except CrdsLookupError:
+ raise ValueError("ERROR: CRDSLookupError when trying to find reference files for parameters: {}".format(parameter_dict))
+
+ for key, value in reffile_mapping.items():
+ # Check for NOT FOUND must be done here because the following
+ # line will raise an exception if NOT FOUND is present
+ if "NOT FOUND" in value:
+ reffile_mapping[key] = "NOT FOUND"
+ else:
+ instrument = value.split('_')[1]
+ reffile_mapping[key] = os.path.join(crds_path, 'references/jwst', instrument, value)
+
+ return reffile_mapping
diff --git a/jwql/utils/instrument_properties.py b/jwql/utils/instrument_properties.py
index 470724081..083bb6dae 100644
--- a/jwql/utils/instrument_properties.py
+++ b/jwql/utils/instrument_properties.py
@@ -124,7 +124,10 @@ def amplifier_info(filename, omit_reference_pixels=True):
try:
data_quality = hdu['DQ'].data
except KeyError:
- raise KeyError('DQ extension not found.')
+ try:
+ data_quality = hdu['PIXELDQ'].data
+ except KeyError:
+ raise KeyError('DQ extension not found.')
# Reference pixels should be flagged in the DQ array with the
# REFERENCE_PIXEL flag. Find the science pixels by looping for
diff --git a/jwql/utils/logging_functions.py b/jwql/utils/logging_functions.py
index 1f38bb851..b929c83c0 100644
--- a/jwql/utils/logging_functions.py
+++ b/jwql/utils/logging_functions.py
@@ -10,8 +10,9 @@
-------
- Catherine Martlin
- - Alex Viana (WFC3 QL Version)
+ - Alex Viana (wfc3ql Version)
- Matthew Bourque
+ - Jason Neal
Use
---
@@ -59,6 +60,7 @@ def my_main_function():
import os
import pwd
import socket
+import subprocess
import sys
import time
import traceback
@@ -236,7 +238,10 @@ def wrapped(*args, **kwargs):
except (ImportError, AttributeError) as err:
logging.warning(err)
- logging.info('')
+ environment = subprocess.check_output(['conda', 'env', 'export'], universal_newlines=True)
+ logging.info('Environment:')
+ for line in environment.split('\n'):
+ logging.info(line)
# Call the function and time it
t1_cpu = time.clock()
diff --git a/jwql/utils/mast_utils.py b/jwql/utils/mast_utils.py
new file mode 100644
index 000000000..d9e2c58bb
--- /dev/null
+++ b/jwql/utils/mast_utils.py
@@ -0,0 +1,172 @@
+"""Various utility functions for interacting with MAST
+
+Authors
+-------
+
+ - Bryan Hilbert
+
+Use
+---
+
+ This module can be imported as such:
+
+ >>> import mast_utils
+ results = mast_utils.mast_query('nircam', 'NRCA1_FULL', 'NRC_DARK', 53005.1, 53005.2)
+
+ """
+
+from jwql.jwql_monitors import monitor_mast
+from jwql.utils.constants import JWST_DATAPRODUCTS, JWST_INSTRUMENT_NAMES_MIXEDCASE
+
+
+def mast_query(instrument, templates, start_date, end_date, aperture=None, detector=None, filter_name=None,
+ pupil=None, grating=None, readpattern=None, lamp=None):
+ """Use ``astroquery`` to search MAST for data for given observation
+ templates over a given time range
+
+ Parameters
+ ----------
+ instrument : str
+ Instrument name (e.g. ``nircam``)
+
+ templates : str or list
+ Single, or list of, templates for the query (e.g. ``NRC_DARK``,
+ ``MIR_FLATMRS``)
+
+ start_date : float
+ Starting date for the search in MJD
+
+ end_date : float
+ Ending date for the search in MJD
+
+ aperture : str
+ Detector aperture to search for (e.g. ``NRCA1_FULL``)
+
+ detector : str
+ Detector name (e.g. ``MIRIMAGE``)
+
+ filter_name : str
+ Fitler element (e.g. ``F200W``)
+
+ pupil : str
+ Pupil element (e.g. ``F323N``)
+
+ grating : str
+ Grating element (e.g. ``MIRROR``)
+
+ readpattern : str
+ Detector read out pattern (e.g. ``NISRAPID``)
+
+ lamp : str
+ Lamp name (e.g. ``LINE2``)
+
+ Returns
+ -------
+ query_results : list
+ List of dictionaries containing the query results
+ """
+
+ # If a single template name is input as a string, put it in a list
+ if isinstance(templates, str):
+ templates = [templates]
+
+ # Make sure instrument is correct case
+ instrument = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()]
+
+ # monitor_mast.instrument_inventory does not allow list inputs to
+ # the added_filters input (or at least if you do provide a list, then
+ # it becomes a nested list when it sends the query to MAST. The
+ # nested list is subsequently ignored by MAST.)
+ # So query once for each flat template, and combine outputs into a
+ # single list.
+ query_results = []
+ for template_name in templates:
+
+ # Create dictionary of parameters to add
+ parameters = {"date_obs_mjd": {"min": start_date, "max": end_date},
+ "exp_type": template_name}
+
+ if detector is not None:
+ parameters["detector"] = detector
+ if aperture is not None:
+ parameters["apername"] = aperture
+ if filter_name is not None:
+ parameters["filter"] = filter_name
+ if pupil is not None:
+ parameters["pupil"] = pupil
+ if grating is not None:
+ parameters["grating"] = grating
+ if readpattern is not None:
+ parameters["readpatt"] = readpattern
+ if lamp is not None:
+ parameters["lamp"] = lamp
+
+ query = monitor_mast.instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS,
+ add_filters=parameters, return_data=True, caom=False)
+ if len(query['data']) > 0:
+ query_results.extend(query['data'])
+
+ return query_results
+
+
+def mast_query_miri(detector, aperture, templates, start_date, end_date):
+ """Use ``astroquery`` to search MAST for data for given observation
+ templates over a given time range for MIRI. MIRI is different than
+ the other instruments in that (to find full frame flats and darks at
+ least) you need to use the detector name rather than the aperture
+ name. There is no full frame aperture name for the MRS detectors.
+
+ Parameters
+ ----------
+ detector : str
+ Name of the detector to search for. One of ``MIRIMAGE``,
+ ``MIRIFULONG``, ``MIRIFUSHORT``.
+
+ aperture : str
+ Aperture name on the detector (e.g. ``MIRIM_FULL``)
+
+ templates : str or list
+ Single, or list of, templates for the query (e.g. ``NRC_DARK``,
+ ``MIR_FLATMRS``)
+
+ start_date : float
+ Starting date for the search in MJD
+
+ end_date : float
+ Ending date for the search in MJD
+
+ Returns
+ -------
+ query_results : list
+ List of dictionaries containing the query results
+ """
+
+ # If a single template name is input as a string, put it in a list
+ if isinstance(templates, str):
+ templates = [templates]
+
+ instrument = 'MIRI'
+
+ # monitor_mast.instrument_inventory does not allow list inputs to
+ # the added_filters input (or at least if you do provide a list, then
+ # it becomes a nested list when it sends the query to MAST. The
+ # nested list is subsequently ignored by MAST.)
+ # So query once for each flat template, and combine outputs into a
+ # single list.
+ query_results = []
+ for template_name in templates:
+
+ # Create dictionary of parameters to add
+ if aperture.lower() != 'none':
+ parameters = {"date_obs_mjd": {"min": start_date, "max": end_date},
+ "detector": detector, "apername": aperture, "exp_type": template_name}
+ else:
+ parameters = {"date_obs_mjd": {"min": start_date, "max": end_date},
+ "detector": detector, "exp_type": template_name}
+
+ query = monitor_mast.instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS,
+ add_filters=parameters, return_data=True, caom=False)
+ if len(query['data']) > 0:
+ query_results.extend(query['data'])
+
+ return query_results
diff --git a/jwql/website/apps/jwql/api_views.py b/jwql/website/apps/jwql/api_views.py
index 13e129049..b74b74129 100644
--- a/jwql/website/apps/jwql/api_views.py
+++ b/jwql/website/apps/jwql/api_views.py
@@ -26,6 +26,7 @@
-------
- Matthew Bourque
+ - Teagan King
Use
---
@@ -56,7 +57,6 @@
from .data_containers import get_thumbnails_by_instrument
from .data_containers import get_thumbnails_by_proposal
from .data_containers import get_thumbnails_by_rootname
-from .oauth import auth_required
def all_proposals(request):
@@ -216,7 +216,7 @@ def thumbnails_by_instrument(request, inst):
Incoming request from the webpage
inst : str
The instrument of interest. The name of the instrument must
- mach one of the following: (``nircam``, ``NIRCam``, ``niriss``,
+ match one of the following: (``nircam``, ``NIRCam``, ``niriss``,
``NIRISS``, ``nirspec``, ``NIRSpec``, ``miri``, ``MIRI``,
``fgs``, ``FGS``).
diff --git a/jwql/website/apps/jwql/bokeh_containers.py b/jwql/website/apps/jwql/bokeh_containers.py
index dfcb3ffe1..b46c7a230 100644
--- a/jwql/website/apps/jwql/bokeh_containers.py
+++ b/jwql/website/apps/jwql/bokeh_containers.py
@@ -1,8 +1,8 @@
-"""Various functions to generate Bokeh objects to be used by the ``views`` of
-the ``jwql`` app.
+"""Various functions to generate Bokeh objects to be used by the
+``views`` of the ``jwql`` app.
-This module contains several functions that instantiate BokehTemplate objects
-to be rendered in ``views.py`` for use by the ``jwql`` app.
+This module contains several functions that instantiate
+``BokehTemplate`` objects to be rendered in ``views.py``.
Authors
-------
@@ -16,17 +16,18 @@
used by ``views.py``, e.g.:
::
- from .data_containers import get_mast_monitor
+ from .bokeh_containers import dark_monitor_tabs
"""
-import glob
import os
-from astropy.io import fits
-import numpy as np
+from bokeh.embed import components
+from bokeh.layouts import layout
+from bokeh.models.widgets import Tabs, Panel
-from jwql.preview_image.preview_image import PreviewImage
-from jwql.utils.utils import get_config, filename_parser, MONITORS
+from . import monitor_pages
+from jwql.utils.constants import FULL_FRAME_APERTURES
+from jwql.utils.utils import get_config
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
FILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem')
@@ -34,375 +35,111 @@
REPO_DIR = os.path.split(PACKAGE_DIR)[0]
-def get_acknowledgements():
- """Returns a list of individuals who are acknowledged on the
- ``about`` page.
-
- The list is generated by reading in the contents of the ``jwql``
- ``README`` file. In this way, the website will automatically
- update with updates to the ``README`` file.
-
- Returns
- -------
- acknowledgements : list
- A list of individuals to be acknowledged.
- """
-
- # Locate README file
- readme_file = os.path.join(REPO_DIR, 'README.md')
-
- # Get contents of the README file
- with open(readme_file, 'r') as f:
- data = f.readlines()
-
- # Find where the acknowledgements start
- for i, line in enumerate(data):
- if 'Acknowledgments' in line:
- index = i
-
- # Parse out the list of individuals
- acknowledgements = data[index + 1:]
- acknowledgements = [item.strip().replace('- ', '').split(' [@')[0].strip() for item in acknowledgements]
-
- return acknowledgements
-
-
-def get_dashboard_components():
- """Build and return a dictionary containing components needed for
- the dashboard.
-
- Returns
- -------
- dashboard_components : dict
- A dictionary containing components needed for the dashboard.
- """
-
- output_dir = get_config()['outputs']
- name_dict = {'': '',
- 'monitor_mast': 'Database Monitor',
- 'database_monitor_jwst': 'JWST',
- 'database_monitor_caom': 'JWST (CAOM)',
- 'monitor_filesystem': 'Filesystem Monitor',
- 'filecount_type': 'Total File Counts by Type',
- 'size_type': 'Total File Sizes by Type',
- 'filecount': 'Total File Counts',
- 'system_stats': 'System Statistics'}
-
- dashboard_components = {}
- for dir_name, subdir_list, file_list in os.walk(output_dir):
- monitor_name = os.path.basename(dir_name)
- dashboard_components[name_dict[monitor_name]] = {}
- for fname in file_list:
- if 'component' in fname:
- full_fname = '{}/{}'.format(monitor_name, fname)
- plot_name = fname.split('_component')[0]
-
- # Get the div
- html_file = full_fname.split('.')[0] + '.html'
- with open(os.path.join(output_dir, html_file)) as f:
- div = f.read()
-
- # Get the script
- js_file = full_fname.split('.')[0] + '.js'
- with open(os.path.join(output_dir, js_file)) as f:
- script = f.read()
- dashboard_components[name_dict[monitor_name]][name_dict[plot_name]] = [div, script]
-
- return dashboard_components
-
-
-def get_filenames_by_instrument(instrument):
- """Returns a list of paths to files that match the given
- ``instrument``.
+def dark_monitor_tabs(instrument):
+ """Creates the various tabs of the dark monitor results page.
Parameters
----------
instrument : str
- The instrument of interest (e.g. `FGS`).
-
- Returns
- -------
- filepaths : list
- A list of full paths to the files that match the given
- instrument.
- """
-
- # Query files from MAST database
- # filepaths, filenames = DatabaseConnection('MAST', instrument=instrument).\
- # get_files_for_instrument(instrument)
-
- # Find all of the matching files in filesytem
- # (TEMPORARY WHILE THE MAST STUFF IS BEING WORKED OUT)
- instrument_match = {'FGS': 'guider',
- 'MIRI': 'mir',
- 'NIRCam': 'nrc',
- 'NIRISS': 'nis',
- 'NIRSpec': 'nrs'}
- search_filepath = os.path.join(FILESYSTEM_DIR, '*', '*.fits')
- filepaths = [f for f in glob.glob(search_filepath) if instrument_match[instrument] in f]
-
- return filepaths
-
-
-def get_header_info(file):
- """Return the header information for a given ``file``.
-
- Parameters
- ----------
- file : str
- The name of the file of interest.
-
- Returns
- -------
- header : str
- The primary FITS header for the given ``file``.
- """
-
- dirname = file[:7]
- fits_filepath = os.path.join(FILESYSTEM_DIR, dirname, file)
- header = fits.getheader(fits_filepath, ext=0).tostring(sep='\n')
-
- return header
-
-
-def get_image_info(file_root, rewrite):
- """Build and return a dictionary containing information for a given
- ``file_root``.
-
- Parameters
- ----------
- file_root : str
- The rootname of the file of interest.
- rewrite : bool
- ``True`` if the corresponding JPEG needs to be rewritten,
- ``False`` if not.
-
- Returns
- -------
- image_info : dict
- A dictionary containing various information for the given
- ``file_root``.
- """
-
- # Initialize dictionary to store information
- image_info = {}
- image_info['all_jpegs'] = []
- image_info['suffixes'] = []
- image_info['num_ints'] = {}
-
- preview_dir = os.path.join(get_config()['jwql_dir'], 'preview_images')
-
- # Find all of the matching files
- dirname = file_root[:7]
- search_filepath = os.path.join(FILESYSTEM_DIR, dirname, file_root + '*.fits')
- image_info['all_files'] = glob.glob(search_filepath)
-
- for file in image_info['all_files']:
-
- # Get suffix information
- suffix = os.path.basename(file).split('_')[4].split('.')[0]
- image_info['suffixes'].append(suffix)
-
- # Determine JPEG file location
- jpg_dir = os.path.join(preview_dir, dirname)
- jpg_filename = os.path.basename(os.path.splitext(file)[0] + '_integ0.jpg')
- jpg_filepath = os.path.join(jpg_dir, jpg_filename)
-
- # Check that a jpg does not already exist. If it does (and rewrite=False),
- # just call the existing jpg file
- if os.path.exists(jpg_filepath) and not rewrite:
- pass
-
- # If it doesn't, make it using the preview_image module
- else:
- if not os.path.exists(jpg_dir):
- os.makedirs(jpg_dir)
- im = PreviewImage(file, 'SCI')
- im.output_directory = jpg_dir
- im.make_image()
-
- # Record how many integrations there are per filetype
- search_jpgs = os.path.join(preview_dir, dirname, file_root + '_{}_integ*.jpg'.format(suffix))
- num_jpgs = len(glob.glob(search_jpgs))
- image_info['num_ints'][suffix] = num_jpgs
-
- image_info['all_jpegs'].append(jpg_filepath)
-
- return image_info
-
-
-def get_proposal_info(filepaths):
- """Builds and returns a dictionary containing various information
- about the proposal(s) that correspond to the given ``filepaths``.
-
- The information returned contains such things as the number of
- proposals, the paths to the corresponding thumbnails, and the total
- number of files.
-
- Parameters
- ----------
- filepaths : list
- A list of full paths to files of interest.
-
- Returns
- -------
- proposal_info : dict
- A dictionary containing various information about the
- proposal(s) and files corresponding to the given ``filepaths``.
- """
-
- proposals = list(set([f.split('/')[-1][2:7] for f in filepaths]))
- thumbnail_dir = os.path.join(get_config()['jwql_dir'], 'thumbnails')
- thumbnail_paths = []
- num_files = []
- for proposal in proposals:
- thumbnail_search_filepath = os.path.join(thumbnail_dir, 'jw{}'.format(proposal), 'jw{}*rate*.thumb'.format(proposal))
- thumbnail = glob.glob(thumbnail_search_filepath)
- if len(thumbnail) > 0:
- thumbnail = thumbnail[0]
- thumbnail = '/'.join(thumbnail.split('/')[-2:])
- thumbnail_paths.append(thumbnail)
-
- fits_search_filepath = os.path.join(FILESYSTEM_DIR, 'jw{}'.format(proposal), 'jw{}*.fits'.format(proposal))
- num_files.append(len(glob.glob(fits_search_filepath)))
-
- # Put the various information into a dictionary of results
- proposal_info = {}
- proposal_info['num_proposals'] = len(proposals)
- proposal_info['proposals'] = proposals
- proposal_info['thumbnail_paths'] = thumbnail_paths
- proposal_info['num_files'] = num_files
-
- return proposal_info
-
-
-def split_files(file_list, page_type):
- """JUST FOR USE DURING DEVELOPMENT WITH FILESYSTEM
-
- Splits the files in the filesystem into "unlooked" and "archived",
- with the "unlooked" images being the most recent 10% of files.
- """
- exp_times = []
- for file in file_list:
- hdr = fits.getheader(file, ext=0)
- exp_start = hdr['EXPSTART']
- exp_times.append(exp_start)
-
- exp_times_sorted = sorted(exp_times)
- i_cutoff = int(len(exp_times) * .1)
- t_cutoff = exp_times_sorted[i_cutoff]
-
- mask_unlooked = np.array([t < t_cutoff for t in exp_times])
-
- if page_type == 'unlooked':
- print('ONLY RETURNING {} "UNLOOKED" FILES OF {} ORIGINAL FILES'.format(len([m for m in mask_unlooked if m]), len(file_list)))
- return [f for i, f in enumerate(file_list) if mask_unlooked[i]]
- elif page_type == 'archive':
- print('ONLY RETURNING {} "ARCHIVED" FILES OF {} ORIGINAL FILES'.format(len([m for m in mask_unlooked if not m]), len(file_list)))
- return [f for i, f in enumerate(file_list) if not mask_unlooked[i]]
-
-
-def thumbnails(inst, proposal=None):
- """Generate a page showing thumbnail images corresponding to
- activities, from a given ``proposal``
-
- Parameters
- ----------
- inst : str
- Name of JWST instrument
- proposal : str (optional)
- Number of APT proposal to filter
+ The JWST instrument of interest (e.g. ``nircam``).
Returns
-------
- dict_to_render : dict
- Dictionary of parameters for the thumbnails
+ div : str
+ The HTML div to render dark monitor plots
+ script : str
+ The JS script to render dark monitor plots
"""
- filepaths = get_filenames_by_instrument(inst)
-
- # JUST FOR DEVELOPMENT
- # Split files into "archived" and "unlooked"
- if proposal is not None:
- page_type = 'archive'
- else:
- page_type = 'unlooked'
- filepaths = split_files(filepaths, page_type)
-
- # Determine file ID (everything except suffix)
- # e.g. jw00327001001_02101_00002_nrca1
- full_ids = set(['_'.join(f.split('/')[-1].split('_')[:-1]) for f in filepaths])
-
- # If the proposal is specified (i.e. if the page being loaded is
- # an archive page), only collect data for given proposal
- if proposal is not None:
- full_ids = [f for f in full_ids if f[2:7] == proposal]
-
- # Group files by ID
- file_data = []
- detectors = []
- proposals = []
- for i, file_id in enumerate(full_ids):
- suffixes = []
- count = 0
- for file in filepaths:
- if '_'.join(file.split('/')[-1].split('_')[:-1]) == file_id:
- count += 1
-
- # Parse filename
- try:
- file_dict = filename_parser(file)
- except ValueError:
- # Temporary workaround for noncompliant files in filesystem
- file_dict = {'activity': file_id[17:19],
- 'detector': file_id[26:],
- 'exposure_id': file_id[20:25],
- 'observation': file_id[7:10],
- 'parallel_seq_id': file_id[16],
- 'program_id': file_id[2:7],
- 'suffix': file.split('/')[-1].split('.')[0].split('_')[-1],
- 'visit': file_id[10:13],
- 'visit_group': file_id[14:16]}
-
- # Determine suffix
- suffix = file_dict['suffix']
- suffixes.append(suffix)
-
- hdr = fits.getheader(file, ext=0)
- exp_start = hdr['EXPSTART']
-
- suffixes = list(set(suffixes))
-
- # Add parameters to sort by
- if file_dict['detector'] not in detectors and \
- not file_dict['detector'].startswith('f'):
- detectors.append(file_dict['detector'])
- if file_dict['program_id'] not in proposals:
- proposals.append(file_dict['program_id'])
-
- file_dict['exp_start'] = exp_start
- file_dict['suffixes'] = suffixes
- file_dict['file_count'] = count
- file_dict['file_root'] = file_id
-
- file_data.append(file_dict)
- file_indices = np.arange(len(file_data))
-
- # Extract information for sorting with dropdown menus
- # (Don't include the proposal as a sorting parameter if the
- # proposal has already been specified)
- if proposal is not None:
- dropdown_menus = {'detector': detectors}
- else:
- dropdown_menus = {'detector': detectors,
- 'proposal': proposals}
-
- dict_to_render = {'inst': inst,
- 'all_filenames': [os.path.basename(f) for f in filepaths],
- 'tools': MONITORS,
- 'thumbnail_zipped_list': zip(file_indices, file_data),
- 'dropdown_menus': dropdown_menus,
- 'n_fileids': len(file_data),
- 'prop': proposal}
-
- return dict_to_render
+ full_apertures = FULL_FRAME_APERTURES[instrument.upper()]
+
+ templates_all_apertures = {}
+ for aperture in full_apertures:
+
+ # Start with default values for instrument and aperture because
+ # BokehTemplate's __init__ method does not allow input arguments
+ monitor_template = monitor_pages.DarkMonitor()
+
+ # Set instrument and monitor using DarkMonitor's setters
+ monitor_template.aperture_info = (instrument, aperture)
+ templates_all_apertures[aperture] = monitor_template
+
+ # Histogram tab
+ histograms_all_apertures = []
+ for aperture_name, template in templates_all_apertures.items():
+ histogram = template.refs["dark_full_histogram_figure"]
+ histogram.sizing_mode = "scale_width" # Make sure the sizing is adjustable
+ histograms_all_apertures.append(histogram)
+
+ if instrument == 'NIRCam':
+ a1, a2, a3, a4, a5, b1, b2, b3, b4, b5 = histograms_all_apertures
+ histogram_layout = layout(
+ [a2, a4, b3, b1],
+ [a1, a3, b4, b2],
+ [a5, b5]
+ )
+
+ elif instrument in ['NIRISS', 'MIRI']:
+ single_aperture = histograms_all_apertures[0]
+ histogram_layout = layout(
+ [single_aperture]
+ )
+
+ elif instrument == 'NIRSpec':
+ d1, d2 = histograms_all_apertures
+ histogram_layout = layout(
+ [d1, d2]
+ )
+
+ histogram_layout.sizing_mode = "scale_width" # Make sure the sizing is adjustable
+ histogram_tab = Panel(child=histogram_layout, title="Histogram")
+
+ # Current v. time tab
+ lines_all_apertures = []
+ for aperture_name, template in templates_all_apertures.items():
+ line = template.refs["dark_current_time_figure"]
+ line.title.align = "center"
+ line.title.text_font_size = "20px"
+ line.sizing_mode = "scale_width" # Make sure the sizing is adjustable
+ lines_all_apertures.append(line)
+
+ if instrument == 'NIRCam':
+ a1, a2, a3, a4, a5, b1, b2, b3, b4, b5 = lines_all_apertures
+ line_layout = layout(
+ [a2, a4, b3, b1],
+ [a1, a3, b4, b2],
+ [a5, b5]
+ )
+
+ elif instrument in ['NIRISS', 'MIRI']:
+ single_aperture = lines_all_apertures[0]
+ line_layout = layout(
+ [single_aperture]
+ )
+
+ elif instrument == 'NIRSpec':
+ d1, d2 = lines_all_apertures
+ line_layout = layout(
+ [d1, d2]
+ )
+
+ line_layout.sizing_mode = "scale_width" # Make sure the sizing is adjustable
+ line_tab = Panel(child=line_layout, title="Trending")
+
+ # Mean dark image tab
+
+ # The three lines below work for displaying a single image
+ image = templates_all_apertures['NRCA3_FULL'].refs["mean_dark_image_figure"]
+ image.sizing_mode = "scale_width" # Make sure the sizing is adjustable
+ image_layout = layout(image)
+ image.height = 250 # Not working
+ image_layout.sizing_mode = "scale_width"
+ image_tab = Panel(child=image_layout, title="Mean Dark Image")
+
+ # Build tabs
+ tabs = Tabs(tabs=[histogram_tab, line_tab, image_tab])
+
+ # Return tab HTML and JavaScript to web app
+ script, div = components(tabs)
+
+ return div, script
diff --git a/jwql/website/apps/jwql/data_containers.py b/jwql/website/apps/jwql/data_containers.py
index 45adecc47..626348f38 100644
--- a/jwql/website/apps/jwql/data_containers.py
+++ b/jwql/website/apps/jwql/data_containers.py
@@ -10,6 +10,7 @@
- Lauren Chambers
- Matthew Bourque
+ - Teagan King
Use
---
@@ -28,9 +29,12 @@
import tempfile
from astropy.io import fits
+from astropy.table import Table
from astropy.time import Time
from django.conf import settings
import numpy as np
+from operator import itemgetter
+
# astroquery.mast import that depends on value of auth_mast
# this import has to be made before any other import of astroquery.mast
@@ -45,6 +49,7 @@
from jwedb.edb_interface import mnemonic_inventory
from jwql.database import database_interface as di
+from jwql.database.database_interface import load_connection
from jwql.edb.engineering_database import get_mnemonic, get_mnemonic_info
from jwql.instrument_monitors.miri_monitors.data_trending import dashboard as miri_dash
from jwql.instrument_monitors.nirspec_monitors.data_trending import dashboard as nirspec_dash
@@ -147,7 +152,7 @@ def get_all_proposals():
return proposals
-def get_current_flagged_anomalies(rootname):
+def get_current_flagged_anomalies(rootname, instrument):
"""Return a list of currently flagged anomalies for the given
``rootname``
@@ -164,7 +169,13 @@ def get_current_flagged_anomalies(rootname):
(e.g. ``['snowball', 'crosstalk']``)
"""
- query = di.session.query(di.Anomaly).filter(di.Anomaly.rootname == rootname).order_by(di.Anomaly.flag_date.desc()).limit(1)
+ table_dict = {}
+ for instrument in JWST_INSTRUMENT_NAMES_MIXEDCASE:
+ table_dict[instrument.lower()] = getattr(di, '{}Anomaly'.format(JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument]))
+
+ table = table_dict[instrument.lower()]
+ query = di.session.query(table).filter(table.rootname == rootname).order_by(table.flag_date.desc()).limit(1)
+
all_records = query.data_frame
if not all_records.empty:
current_anomalies = [col for col, val in np.sum(all_records, axis=0).items() if val]
@@ -475,7 +486,8 @@ def get_filenames_by_rootname(rootname):
Parameters
----------
rootname : str
- The rootname of interest (e.g. ``jw86600008001_02101_00007_guider2``).
+ The rootname of interest (e.g.
+ ``jw86600008001_02101_00007_guider2``).
Returns
-------
@@ -494,7 +506,7 @@ def get_filenames_by_rootname(rootname):
def get_header_info(filename):
- """Return the header information for a given ``file``.
+ """Return the header information for a given ``filename``.
Parameters
----------
@@ -508,11 +520,47 @@ def get_header_info(filename):
The primary FITS header for the given ``file``.
"""
- dirname = filename[:7]
- fits_filepath = os.path.join(FILESYSTEM_DIR, dirname, filename)
- header = fits.getheader(fits_filepath, ext=0).tostring(sep='\n')
+ # Initialize dictionary to store header information
+ header_info = {}
+
+ # Open the file
+ fits_filepath = os.path.join(FILESYSTEM_DIR, filename[:7], '{}.fits'.format(filename))
+ hdulist = fits.open(fits_filepath)
+
+ # Extract header information from file
+ for ext in range(0, len(hdulist)):
+
+ # Initialize dictionary to store header information for particular extension
+ header_info[ext] = {}
+
+ # Get header
+ header = fits.getheader(fits_filepath, ext=ext)
+
+ # Determine the extension name
+ if ext == 0:
+ header_info[ext]['EXTNAME'] = 'PRIMARY'
+ else:
+ header_info[ext]['EXTNAME'] = header['EXTNAME']
+
+ # Get list of keywords and values
+ exclude_list = ['', 'COMMENT']
+ header_info[ext]['keywords'] = [item for item in list(header.keys()) if item not in exclude_list]
+ header_info[ext]['values'] = []
+ for key in header_info[ext]['keywords']:
+ header_info[ext]['values'].append(hdulist[ext].header[key])
+
+ # Close the file
+ hdulist.close()
+
+ # Build tables
+ for ext in header_info:
+ table = Table([header_info[ext]['keywords'], header_info[ext]['values']], names=('Key', 'Value'))
+ temp_path_for_html = os.path.join(tempfile.mkdtemp(), '{}_table.html'.format(header_info[ext]['EXTNAME']))
+ with open(temp_path_for_html, 'w') as f:
+ table.write(f, format='jsviewer', jskwargs={'display_length': 20})
+ header_info[ext]['table'] = open(temp_path_for_html, 'r').read()
- return header
+ return header_info
def get_image_info(file_root, rewrite):
@@ -679,7 +727,8 @@ def get_preview_images_by_rootname(rootname):
Parameters
----------
rootname : str
- The rootname of interest (e.g. ``jw86600008001_02101_00007_guider2``).
+ The rootname of interest (e.g.
+ ``jw86600008001_02101_00007_guider2``).
Returns
-------
@@ -746,6 +795,89 @@ def get_proposal_info(filepaths):
return proposal_info
+
+def get_thumbnails_all_instruments(instruments):
+ """Return a list of thumbnails available in the filesystem for all
+ instruments given requested parameters.
+
+ Returns
+ -------
+ thumbnails : list
+ A list of thumbnails available in the filesystem for the
+ given instrument.
+ """
+
+ # Make sure instruments are of the proper format (e.g. "Nircam")
+ thumbnail_list = []
+ for inst in instruments: # JWST_INSTRUMENT_NAMES:
+ instrument = inst[0].upper()+inst[1:].lower()
+
+ ### adjust query based on request
+
+ # Query MAST for all rootnames for the instrument
+ service = "Mast.Jwst.Filtered.{}".format(instrument)
+ params = {"columns": "filename, expstart, filter, readpatt, date_beg, date_end, apername, exp_type",
+ "filters": [{"paramName": "expstart",
+ "values": [{"min": 57404.04, "max": 57404.07}], }]}
+ response = Mast.service_request_async(service, params)
+ results = response[0].json()['data']
+
+ # Parse the results to get the rootnames
+ filenames = [result['filename'].split('.')[0] for result in results]
+
+ # Get list of all thumbnails
+ thumbnails = glob.glob(os.path.join(THUMBNAIL_FILESYSTEM, '*', '*.thumb'))
+
+ thumbnail_list.extend(thumbnails)
+
+ # Get subset of preview images that match the filenames
+ thumbnails = [os.path.basename(item) for item in thumbnail_list if
+ os.path.basename(item).split('_integ')[0] in filenames]
+
+ return thumbnails
+
+
+def get_jwqldb_table_view_components(request):
+ """Renders view for JWQLDB table viewer.
+
+ Parameters
+ ----------
+ request : HttpRequest object
+ Incoming request from the webpage
+
+ Returns
+ -------
+ None
+ """
+
+ if request.method == 'POST':
+ # Make dictionary of tablename : class object
+ # This matches what the user selects in the drop down to the python obj.
+ tables_of_interest = {}
+ for item in di.__dict__.keys():
+ table = getattr(di, item)
+ if hasattr(table, '__tablename__'):
+ tables_of_interest[table.__tablename__] = table
+
+ session, base, engine, meta = load_connection(get_config()['connection_string'])
+ tablename_from_dropdown = request.POST['db_table_select']
+ table_object = tables_of_interest[tablename_from_dropdown] # Select table object
+
+ result = session.query(table_object)
+
+ result_dict = [row.__dict__ for row in result.all()] # Turn query result into list of dicts
+ column_names = table_object.__table__.columns.keys()
+
+ # Build list of column data based on column name.
+ data = []
+ for column in column_names:
+ column_data = list(map(itemgetter(column), result_dict))
+ data.append(column_data)
+
+ # Build table.
+ table_to_display = Table(data, names=column_names)
+ table_to_display.show_in_browser(jsviewer=True, max_lines=-1) # Negative max_lines shows all lines avaliable.
+
def get_thumbnails_by_instrument(inst):
"""Return a list of thumbnails available in the filesystem for the
@@ -816,7 +948,8 @@ def get_thumbnails_by_rootname(rootname):
Parameters
----------
rootname : str
- The rootname of interest (e.g. ``jw86600008001_02101_00007_guider2``).
+ The rootname of interest (e.g.
+ ``jw86600008001_02101_00007_guider2``).
Returns
-------
diff --git a/jwql/website/apps/jwql/forms.py b/jwql/website/apps/jwql/forms.py
index 06e8aed35..4d4a9e6e2 100644
--- a/jwql/website/apps/jwql/forms.py
+++ b/jwql/website/apps/jwql/forms.py
@@ -11,6 +11,7 @@
- Lauren Chambers
- Johannes Sahlmann
- Matthew Bourque
+ - Teagan King
Use
---
@@ -39,7 +40,6 @@ def view_function(request):
------------
The user must have a configuration file named ``config.json``
placed in the ``jwql/utils/`` directory.
-
"""
import datetime
@@ -51,12 +51,35 @@ def view_function(request):
from django.shortcuts import redirect
from jwedb.edb_interface import is_valid_mnemonic
+# from data_containers import get_thumbnails_all_instruments
from jwql.database import database_interface as di
-from jwql.utils.constants import ANOMALY_CHOICES, JWST_INSTRUMENT_NAMES_SHORTHAND
+from jwql.utils.constants import ANOMALY_CHOICES
+from jwql.utils.constants import FILTERS_PER_INSTRUMENT
+from jwql.utils.constants import FULL_FRAME_APERTURES
+from jwql.utils.constants import GENERIC_SUFFIX_TYPES
+from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE
+from jwql.utils.constants import JWST_INSTRUMENT_NAMES_SHORTHAND
+from jwql.utils.constants import OBSERVING_MODE_PER_INSTRUMENT
from jwql.utils.utils import get_config, filename_parser
+# from jwql.website.apps.jwql.views import current_anomalies ### global variable defined once query_anomaly page has forms filled
FILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem')
+# from jwql.utils import anomaly_query_config
+# from jwql.website.apps.jwql import views # update anomaly_query_config
+
+
+class AnomalyForm(forms.Form):
+ """Creates a ``AnomalyForm`` object that allows for anomaly input
+ in a form field."""
+ query = forms.MultipleChoiceField(choices=ANOMALY_CHOICES, widget=forms.CheckboxSelectMultiple()) # Update depending on chosen instruments
+
+ def clean_anomalies(self):
+
+ anomalies = self.cleaned_data['query']
+
+ return anomalies
+
class AnomalySubmitForm(forms.Form):
"""A multiple choice field for specifying flagged anomalies."""
@@ -89,6 +112,80 @@ def update_anomaly_table(self, rootname, user, anomaly_choices):
data_dict[choice] = True
di.engine.execute(di.Anomaly.__table__.insert(), data_dict)
+ def clean_anomalies(self):
+
+ anomalies = self.cleaned_data['anomaly_choices']
+
+ return anomalies
+
+
+class ApertureForm(forms.Form):
+ """Creates an ``ApertureForm`` object that allows for ``aperture``
+ input in a form field."""
+
+ aperture_list = []
+ for instrument in FULL_FRAME_APERTURES.keys():
+ for aperture in FULL_FRAME_APERTURES[instrument]:
+ item = [aperture, aperture]
+ aperture_list.append(item)
+ aperture = forms.MultipleChoiceField(required=False, choices=aperture_list, widget=forms.CheckboxSelectMultiple)
+
+ def clean_apertures(self):
+
+ apertures = self.cleaned_data['aperture']
+
+ return apertures
+
+
+class EarlyDateForm(forms.Form):
+ """Creates a ``EarlyDateForm`` object that allows for ``early_date``
+ input in a form field."""
+
+ early_date = forms.DateField(required=False, initial="eg, 2021-10-02 12:04:39 or 2021-10-02")
+
+ # still working out whether we can have initial pre-fill without setting values in request
+ def clean_early_date(self):
+ early_date = self.cleaned_data['early_date']
+
+ return early_date
+
+
+class ExptimeMaxForm(forms.Form):
+ """Creates a ``ExptimeMaxForm`` object that allows for
+ ``exp_time_max`` input in a form field."""
+
+ exp_time_max = forms.DecimalField(initial="57404.70")
+
+ def clean_exptime_max(self):
+ exptime_max = self.cleaned_data['exp_time_max']
+
+ return exptime_max
+
+
+class ExptimeMinForm(forms.Form):
+ """Creates a ``ExptimeMinForm`` object that allows for
+ ``exp_time_min`` input in a form field."""
+
+ exp_time_min = forms.DecimalField(initial="57404.04")
+
+ def clean_exptime_min(self):
+ """Validate the "exp_time_min" field.
+
+ Check that the input is greater than or equal to zero.
+
+ Returns
+ -------
+ exptime_min : int
+ The cleaned data input into the "exp_time_min" field
+
+ """
+ exptime_min = self.cleaned_data['exp_time_min']
+ if int(exptime_min) < 0:
+ raise forms.ValidationError("""Invalid minimum exposure time {}.
+ Please provide positive value""".format(exptime_min))
+
+ return exptime_min
+
class FileSearchForm(forms.Form):
"""Single-field form to search for a proposal or fileroot."""
@@ -112,8 +209,8 @@ def clean_search(self):
-------
str
The cleaned data input into the "search" field
-
"""
+
# Get the cleaned search data
search = self.cleaned_data['search']
@@ -175,8 +272,8 @@ def _search_is_fileroot(self, search):
-------
bool
Is the search term formatted like a fileroot?
-
"""
+
try:
self.fileroot_dict = filename_parser(search)
return True
@@ -192,6 +289,7 @@ def redirect_to_files(self):
Outgoing redirect response sent to the webpage
"""
+
# Process the data in form.cleaned_data as required
search = self.cleaned_data['search']
proposal_string = '{:05d}'.format(int(search))
@@ -205,6 +303,84 @@ def redirect_to_files(self):
return redirect('/{}/{}'.format(self.instrument, search))
+class FiletypeForm(forms.Form):
+ """Creates a ``FiletypeForm`` object that allows for ``filetype``
+ input in a form field."""
+
+ file_type_list = []
+ for filetype in GENERIC_SUFFIX_TYPES:
+ item = [filetype, filetype]
+ file_type_list.append(item)
+ filetype = forms.MultipleChoiceField(required=False, choices=file_type_list, widget=forms.CheckboxSelectMultiple)
+
+ def clean_filetypes(self):
+
+ file_types = self.cleaned_data['filetype']
+
+ return file_types
+
+# from jwql.website.apps.jwql import views # update anomaly_query_config
+class FilterForm(forms.Form):
+ """Creates a ``FilterForm`` object that allows for ``filter``
+ input in a form field."""
+
+ filter_list = []
+ for instrument in FILTERS_PER_INSTRUMENT.keys():
+ # if instrument in anomaly_query_config.INSTRUMENTS_CHOSEN: # eg ['nirspec']: selects relevant filters, but not specific to chosen instruments
+ filters_per_inst = FILTERS_PER_INSTRUMENT[instrument]
+ for filter in filters_per_inst:
+ filter_list.append([filter, filter]) if [filter, filter] not in filter_list else filter_list
+ filter = forms.MultipleChoiceField(required=False, choices=filter_list, widget=forms.CheckboxSelectMultiple)
+
+ def clean_filters(self):
+
+ filters = self.cleaned_data['filter']
+
+ return filters
+
+
+class InstrumentForm(forms.Form):
+ """Creates a ``InstrumentForm`` object that allows for ``query``
+ input in a form field."""
+
+ query = forms.MultipleChoiceField(required=False,
+ choices=[(inst, JWST_INSTRUMENT_NAMES_MIXEDCASE[inst]) for inst in JWST_INSTRUMENT_NAMES_MIXEDCASE],
+ widget=forms.CheckboxSelectMultiple())
+
+ def clean_instruments(self):
+
+ instruments_chosen = self.cleaned_data['query']
+
+ return instruments_chosen
+
+ def redirect_to_files(self):
+ """Determine where to redirect the web app based on user input.
+
+ Returns
+ -------
+ HttpResponseRedirect object
+ Outgoing redirect response sent to the webpage
+
+ """
+ # Process the data in form.clean_instruments as required
+ instruments = self.cleaned_data['query']
+
+ # get_thumbnails_all_instruments(instruments)
+ return instruments
+
+
+class LateDateForm(forms.Form):
+ """Creates a ``LateDateForm`` object that allows for ``late_date``
+ input in a form field."""
+
+ late_date = forms.DateField(required=False, initial="eg, 2021-11-25 14:30:59 or 2021-11-25")
+
+ def clean_late_date(self):
+ latedate = self.cleaned_data['late_date']
+
+ return latedate
+
+
class MnemonicSearchForm(forms.Form):
"""A single-field form to search for a mnemonic in the DMS EDB."""
@@ -302,8 +478,8 @@ def clean_search(self):
-------
str
The cleaned data input into the "search" field
-
"""
+
# Stop now if not logged in
if not self.logged_in:
raise forms.ValidationError('Could not log into MAST. Please login or provide MAST '
@@ -327,14 +503,15 @@ def clean_start_time(self):
-------
str
The cleaned data input into the start_time field
-
"""
+
start_time = self.cleaned_data['start_time']
try:
Time(start_time, format='iso')
except ValueError:
raise forms.ValidationError('Invalid start time {}. Please enter a time in iso format, '
'e.g. {}'.format(start_time, self.default_start_time))
+
return self.cleaned_data['start_time']
def clean_end_time(self):
@@ -344,8 +521,8 @@ def clean_end_time(self):
-------
str
The cleaned data input into the end_time field
-
"""
+
end_time = self.cleaned_data['end_time']
try:
Time(end_time, format='iso')
@@ -380,3 +557,21 @@ class MnemonicExplorationForm(forms.Form):
help_text="String ID (tlmMnemonic)")
unit = forms.CharField(label='unit', max_length=500, required=False,
help_text="unit")
+
+
+class ObservingModeForm(forms.Form): # Add instruments chosen parameter
+ """Creates a ``ObservingModeForm`` object that allows for ``mode``
+ input in a form field."""
+
+ mode_list = []
+ for instrument in OBSERVING_MODE_PER_INSTRUMENT.keys(): # Add AND in instruments chosen
+ modes_per_inst = OBSERVING_MODE_PER_INSTRUMENT[instrument]
+ for mode in modes_per_inst:
+ mode_list.append([mode, mode]) if [mode, mode] not in mode_list else mode_list
+ mode = forms.MultipleChoiceField(required=False, choices=mode_list, widget=forms.CheckboxSelectMultiple)
+
+ def clean_modes(self):
+
+ modes = self.cleaned_data['mode']
+
+ return modes
diff --git a/jwql/website/apps/jwql/models.py b/jwql/website/apps/jwql/models.py
index 2e4b2976f..90454b267 100644
--- a/jwql/website/apps/jwql/models.py
+++ b/jwql/website/apps/jwql/models.py
@@ -40,6 +40,7 @@
('NIRISS', 'NIRISS'),
('NIRSpec', 'NIRSpec'))
+
class BaseModel(models.Model):
"""A base model that other classes will inherit. Created to avoid
an obscure error about a missing ``app_label``.
@@ -64,9 +65,9 @@ class ImageData(BaseModel):
Date and time when datum was added to the database.
"""
- inst = models.CharField('instrument', max_length=6, choices=INSTRUMENT_LIST, default=None)
+ inst = models.CharField('instrument', max_length=7, choices=INSTRUMENT_LIST, default=None)
pub_date = models.DateTimeField('date published')
- filepath = models.FilePathField(path='/user/lchambers/jwql/') #upload_to=str(inst))
+ filepath = models.FilePathField(path='/user/lchambers/jwql/')
def filename(self):
return os.path.basename(self.filepath)
diff --git a/jwql/website/apps/jwql/monitor_pages/__init__.py b/jwql/website/apps/jwql/monitor_pages/__init__.py
index be7161b60..29f975f8a 100644
--- a/jwql/website/apps/jwql/monitor_pages/__init__.py
+++ b/jwql/website/apps/jwql/monitor_pages/__init__.py
@@ -1,3 +1,3 @@
-from .monitor_ta_bokeh import MonitorTA
-from .monitor_mast_bokeh import MastMonitor
+from .monitor_dark_bokeh import DarkMonitor
from .monitor_filesystem_bokeh import MonitorFilesystem
+from .monitor_mast_bokeh import MastMonitor
diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py
new file mode 100755
index 000000000..1199a840e
--- /dev/null
+++ b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py
@@ -0,0 +1,193 @@
+"""This module contains code for the dark current monitor Bokeh plots.
+
+Author
+------
+
+ - Bryan Hilbert
+ - Gray Kanarek
+ - Lauren Chambers
+
+Use
+---
+
+ This module can be used from the command line as such:
+
+ ::
+
+ from jwql.website.apps.jwql import monitor_pages
+ monitor_template = monitor_pages.DarkMonitor('NIRCam', 'NRCA3_FULL')
+ script, div = monitor_template.embed("dark_current_time_figure")
+"""
+
+import os
+
+from astropy.io import fits
+from astropy.time import Time
+from bokeh.models.tickers import LogTicker
+import numpy as np
+
+from jwql.database.database_interface import session
+from jwql.database.database_interface import NIRCamDarkQueryHistory, NIRCamDarkPixelStats, NIRCamDarkDarkCurrent
+from jwql.database.database_interface import NIRISSDarkQueryHistory, NIRISSDarkPixelStats, NIRISSDarkDarkCurrent
+from jwql.database.database_interface import MIRIDarkQueryHistory, MIRIDarkPixelStats, MIRIDarkDarkCurrent
+from jwql.database.database_interface import NIRSpecDarkQueryHistory, NIRSpecDarkPixelStats, NIRSpecDarkDarkCurrent
+from jwql.database.database_interface import FGSDarkQueryHistory, FGSDarkPixelStats, FGSDarkDarkCurrent
+from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE
+from jwql.utils.utils import get_config
+from jwql.bokeh_templating import BokehTemplate
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+
+
+class DarkMonitor(BokehTemplate):
+
+ # Combine instrument and aperture into a single property because we
+ # do not want to invoke the setter unless both are updated
+ @property
+ def aperture_info(self):
+ return (self._instrument, self._aperture)
+
+ @aperture_info.setter
+ def aperture_info(self, info):
+ self._instrument, self._aperture = info
+ self.pre_init()
+ self.post_init()
+
+ def _dark_mean_image(self):
+ """Update bokeh objects with mean dark image data."""
+
+ # Open the mean dark current file and get the data
+ mean_dark_image_file = self.pixel_table[-1].mean_dark_image_file
+ mean_slope_dir = os.path.join(get_config()['outputs'], 'dark_monitor', 'mean_slope_images')
+ mean_dark_image_path = os.path.join(mean_slope_dir, mean_dark_image_file)
+ with fits.open(mean_dark_image_path) as hdulist:
+ data = hdulist[1].data
+
+ # Update the plot with the data and boundaries
+ y_size, x_size = np.shape(data)
+ self.refs["mean_dark_source"].data['image'] = [data]
+ self.refs["stamp_xr"].end = x_size
+ self.refs["stamp_yr"].end = y_size
+ self.refs["mean_dark_source"].data['dw'] = [x_size]
+ self.refs["mean_dark_source"].data['dh'] = [x_size]
+
+ # Set the image color scale
+ self.refs["log_mapper"].high = 0
+ self.refs["log_mapper"].low = -.2
+
+ # This should add ticks to the colorbar, but it doesn't
+ self.refs["mean_dark_cbar"].ticker = LogTicker()
+
+ # Add a title
+ self.refs['mean_dark_image_figure'].title.text = self._aperture
+ self.refs['mean_dark_image_figure'].title.align = "center"
+ self.refs['mean_dark_image_figure'].title.text_font_size = "20px"
+
+ def pre_init(self):
+ # Start with default values for instrument and aperture because
+ # BokehTemplate's __init__ method does not allow input arguments
+ try:
+ dummy_instrument = self._instrument
+ dummy_aperture = self._aperture
+ except AttributeError:
+ self._instrument = 'NIRCam'
+ self._aperture = 'NRCA1_FULL'
+
+ self._embed = True
+
+ # Fix aperture/detector name discrepency
+ if self._aperture in ['NRCA5_FULL', 'NRCB5_FULL']:
+ self.detector = '{}LONG'.format(self._aperture[0:4])
+ else:
+ self.detector = self._aperture.split('_')[0]
+
+ # App design
+ self.format_string = None
+ self.interface_file = os.path.join(SCRIPT_DIR, "yaml", "dark_monitor_interface.yaml")
+
+ # Load data tables
+ self.load_data()
+
+ # Data for mean dark versus time plot
+ datetime_stamps = [row.obs_mid_time for row in self.dark_table]
+ times = Time(datetime_stamps, format='datetime', scale='utc') # Convert to MJD
+ self.timestamps = times.mjd
+ self.dark_current = [row.mean for row in self.dark_table]
+
+ # Data for dark current histogram plot (full detector)
+ # Just show the last histogram, which is the one most recently
+ # added to the database
+ last_hist_index = -1
+ self.last_timestamp = datetime_stamps[last_hist_index].isoformat()
+ self.full_dark_bin_center = np.array([row.hist_dark_values for
+ row in self.dark_table])[last_hist_index]
+ self.full_dark_amplitude = [row.hist_amplitudes for
+ row in self.dark_table][last_hist_index]
+ self.full_dark_bottom = np.zeros(len(self.full_dark_amplitude))
+ deltas = self.full_dark_bin_center[1:] - self.full_dark_bin_center[0: -1]
+ self.full_dark_bin_width = np.append(deltas[0], deltas)
+
+ def post_init(self):
+
+ self._update_dark_v_time()
+ self._update_hist()
+ self._dark_mean_image()
+
+ def identify_tables(self):
+ """Determine which dark current database tables as associated with
+ a given instrument"""
+
+ mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self._instrument.lower()]
+ self.query_table = eval('{}DarkQueryHistory'.format(mixed_case_name))
+ self.pixel_table = eval('{}DarkPixelStats'.format(mixed_case_name))
+ self.stats_table = eval('{}DarkDarkCurrent'.format(mixed_case_name))
+
+ def load_data(self):
+ """Query the database tables to get data"""
+
+ # Determine which database tables are needed based on instrument
+ self.identify_tables()
+
+ # Query database for all data in NIRCamDarkDarkCurrent with a matching aperture
+ self.dark_table = session.query(self.stats_table) \
+ .filter(self.stats_table.aperture == self._aperture) \
+ .all()
+
+ self.pixel_table = session.query(self.pixel_table) \
+ .filter(self.pixel_table.detector == self.detector) \
+ .all()
+
+ def _update_dark_v_time(self):
+
+ # Define y range of dark current v. time plot
+ buffer_size = 0.05 * (max(self.dark_current) - min(self.dark_current))
+ self.refs['dark_current_yrange'].start = min(self.dark_current) - buffer_size
+ self.refs['dark_current_yrange'].end = max(self.dark_current) + buffer_size
+
+ # Define x range of dark current v. time plot
+ horizontal_half_buffer = (max(self.timestamps) - min(self.timestamps)) * 0.05
+ if horizontal_half_buffer == 0:
+ horizontal_half_buffer = 1. # day
+ self.refs['dark_current_xrange'].start = min(self.timestamps) - horizontal_half_buffer
+ self.refs['dark_current_xrange'].end = max(self.timestamps) + horizontal_half_buffer
+
+ # Add a title
+ self.refs['dark_current_time_figure'].title.text = self._aperture
+ self.refs['dark_current_time_figure'].title.align = "center"
+ self.refs['dark_current_time_figure'].title.text_font_size = "20px"
+
+ def _update_hist(self):
+
+ # Define y range of dark current histogram
+ buffer_size = 0.05 * (max(self.full_dark_amplitude) - min(self.full_dark_bottom))
+ self.refs['dark_histogram_yrange'].start = min(self.full_dark_bottom)
+ self.refs['dark_histogram_yrange'].end = max(self.full_dark_amplitude) + buffer_size
+
+ # Define x range of dark current histogram
+ self.refs['dark_histogram_xrange'].start = min(self.full_dark_bin_center)
+ self.refs['dark_histogram_xrange'].end = max(self.full_dark_bin_center)
+
+ # Add a title
+ self.refs['dark_full_histogram_figure'].title.text = self._aperture
+ self.refs['dark_full_histogram_figure'].title.align = "center"
+ self.refs['dark_full_histogram_figure'].title.text_font_size = "20px"
diff --git a/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yaml b/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yaml
new file mode 100755
index 000000000..cdadceb33
--- /dev/null
+++ b/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yaml
@@ -0,0 +1,109 @@
+# YAML file defining bokeh figures for the dark monitor
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+# Dark Current v. Time Figure
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+- !ColumnDataSource: &dark_current_source
+ ref: "dark_current_source"
+ data:
+ time: !self.timestamps
+ dark_current: !self.dark_current
+
+- !Range1d: &dark_current_xrange
+ ref: "dark_current_xrange"
+ #start: 0
+ #end: 1
+ #bounds: 'auto' #!!python/tuple [0, 1]
+
+- !Range1d: &dark_current_yrange
+ ref: "dark_current_yrange"
+ #start: 0
+ #end: 1
+ #bounds: !!python/tuple [-1, 1]
+
+- !Figure: &dark_current_time_figure
+ ref: "dark_current_time_figure"
+ x_axis_label: "Time (MJD)"
+ y_axis_label: "Dark current (e-)"
+ x_range: *dark_current_xrange
+ y_range: *dark_current_yrange
+ elements:
+ - {'kind': 'circle', 'x': 'time', 'y': 'dark_current', line_width: 5, 'source': *dark_current_source}
+
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+# Dark Histogram Figure
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+- !ColumnDataSource: &dark_full_hist_source
+ ref: "dark_full_hist_source"
+ data:
+ full_dark_bin_center: !self.full_dark_bin_center
+ full_dark_amplitude: !self.full_dark_amplitude
+ full_dark_bottom: !self.full_dark_bottom
+ full_dark_bin_width: !self.full_dark_bin_width
+
+- !Range1d: &dark_histogram_xrange
+ ref: "dark_histogram_xrange"
+ #start: 0
+ #end: 1
+ #bounds: 'auto' #!!python/tuple [0, 1]
+
+- !Range1d: &dark_histogram_yrange
+ ref: "dark_histogram_yrange"
+ #start: 0
+ #end: 1
+ #bounds: !!python/tuple [0, 1]
+
+- !Figure: &dark_full_histogram_figure
+ ref: "dark_full_histogram_figure"
+ x_axis_label: "Dark Current (DN/sec)"
+ y_axis_label: "Number of Pixels"
+ x_range: *dark_histogram_xrange
+ y_range: *dark_histogram_yrange
+ elements:
+ - {'kind': 'vbar', 'x': 'full_dark_bin_center', 'y': 'full_dark_bin_width', 'top': 'full_dark_amplitude', 'bottom': 'full_dark_bottom', 'source': *dark_full_hist_source}
+# - {'kind': 'text', 'x': 0, 'y': 20000, 'id': 1001}
+
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+# Mean Dark Image Figure
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+- !ColumnDataSource: &mean_dark_source
+ ref: "mean_dark_source"
+ data:
+ dh: [1]
+ dw: [1]
+ image: [[[1,0], [0, 1]]]
+- !Range1d: &stamp_xr
+ ref: "stamp_xr"
+ #start: 0
+ #end: 1
+ #bounds: !!python/tuple [0, 1]
+- !Range1d: &stamp_yr
+ ref: "stamp_yr"
+ #start: 0
+ #end: 1
+ #bounds: !!python/tuple [0, 1]
+- !LogColorMapper: &log_mapper
+ ref: "log_mapper"
+ palette: "Viridis256"
+ low: 0.
+ high: 1.
+- !ColorBar: &mean_dark_cbar
+ ref: "mean_dark_cbar"
+ color_mapper: *log_mapper
+ location: !!python/tuple [0, 0]
+- !Figure: &mean_dark_image_figure
+ ref: "mean_dark_image_figure"
+ x_axis_label: "Col = SIAF det Y"
+ y_axis_label: "Row = SIAF det X"
+ x_range: *stamp_xr
+ y_range: *stamp_yr
+ tools: ""
+ height: 250 # Not working
+ width: 250 # Not working
+ elements:
+ - {"kind": "image", "image": "image", "x": 0, "y": 0, "dh": 'dh', "dw": 'dh', "source": *mean_dark_source, "color_mapper": *log_mapper}
+ - {"kind": "layout", "obj": *mean_dark_cbar, "place": "right"}
+
+#- !Document:
+# - !column:
+# - *dark_current_time_figure
+# - *dark_full_histogram_figure
diff --git a/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yml b/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yml
deleted file mode 100644
index a60f0f643..000000000
--- a/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-- !ColumnDataSource: &dark_current_source
- ref: "dark_current_source"
- data:
- time: !self.timestamps
- dark_current: !self.dark_current
-- !Range1d: &dark_current_xrange
- ref: "dark_current_xrange"
- start: 0
- end: 1
- bounds: !!python/tuple [0, 1]
-- !Range1d: &dark_current_yrange
- ref: "dark_current_yrange"
- start: 0
- end: 1
- bounds: !!python/tuple [0, 1]
-- !Figure: &dark_current_time_figure
- ref: "dark_current_time_figure"
- x_axis_label: "Time (s)"
- y_axis_label: "Dark current (e-)"
- x_range: *dark_current_xrange
- y_range: *dark_current_yrange
- elements:
- - {'kind': 'line', 'x': 'time', 'y': 'dark_current', 'source': *dark_current_source}
-- !Figure: &hot_pixel_locations_figure
- ref: "hot_pixel_locations_figure"
-- !Document:
- - !column:
- - *dark_current_time_figure
- - *hot_pixel_locations_figure
\ No newline at end of file
diff --git a/jwql/website/apps/jwql/monitor_views.py b/jwql/website/apps/jwql/monitor_views.py
new file mode 100644
index 000000000..1668072d0
--- /dev/null
+++ b/jwql/website/apps/jwql/monitor_views.py
@@ -0,0 +1,74 @@
+"""Defines the views for the ``jwql`` web app instrument monitors.
+
+Authors
+-------
+
+ - Lauren Chambers
+
+Use
+---
+
+ This module is called in ``urls.py`` as such:
+ ::
+
+ from django.urls import path
+ from . import monitor_views
+ urlpatterns = [path('web/path/to/view/', monitor_views.view_name,
+ name='view_name')]
+
+References
+----------
+ For more information please see:
+ ``https://docs.djangoproject.com/en/2.0/topics/http/views/``
+
+Dependencies
+------------
+ The user must have a configuration file named ``config.json``
+ placed in the ``jwql/utils/`` directory.
+"""
+
+import os
+
+from django.shortcuts import render
+
+from . import bokeh_containers
+from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE
+from jwql.utils.utils import get_config
+
+FILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem')
+
+
+def dark_monitor(request, inst):
+ """Generate the dark monitor page for a given instrument
+
+ Parameters
+ ----------
+ request : HttpRequest object
+ Incoming request from the webpage
+ inst : str
+ Name of JWST instrument
+
+ Returns
+ -------
+ HttpResponse object
+ Outgoing response sent to the webpage
+ """
+
+ # Ensure the instrument is correctly capitalized
+ inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()]
+
+ # Deal with the fact that only the NIRCam database is populated
+ if inst == 'NIRCam':
+ tabs_components = bokeh_containers.dark_monitor_tabs(inst)
+ else:
+ tabs_components = None
+
+ template = "dark_monitor.html"
+
+ context = {
+ 'inst': inst,
+ 'tabs_components': tabs_components,
+ }
+
+ # Return a HTTP response with the template and dictionary of variables
+ return render(request, template, context)
diff --git a/jwql/website/apps/jwql/static/css/jwql.css b/jwql/website/apps/jwql/static/css/jwql.css
index 162bcbc8e..398363600 100644
--- a/jwql/website/apps/jwql/static/css/jwql.css
+++ b/jwql/website/apps/jwql/static/css/jwql.css
@@ -409,6 +409,37 @@ li:hover .nav-link, .navbar-brand:hover {
margin-bottom: 1rem;
}
+.slider{
+ -webkit-appearance: none;
+ width: 250px;
+ height: 15px;
+ background: #BEC4D4;
+ outline: none;
+}
+
+/* slider style for Chrome/Safari/Opera/Edge */
+.slider::-webkit-slider-thumb {
+ -webkit-appearance: none;
+ appearance: none;
+ width: 15px;
+ height: 30px;
+ background: #C85108;
+ cursor: pointer;
+}
+
+/* slider style for Firefox */
+.slider::-moz-range-thumb {
+ width: 15px;
+ height: 30px;
+ background: #C85108;
+ cursor: pointer;
+}
+
+/* remove slider outline for Firefox */
+.slider::-moz-focus-outer {
+ border: 0;
+ }
+
.row .row {
margin-top: 1rem;
margin-bottom: 0;
diff --git a/jwql/website/apps/jwql/static/js/jwql.js b/jwql/website/apps/jwql/static/js/jwql.js
index a15f03a7b..af47d3469 100644
--- a/jwql/website/apps/jwql/static/js/jwql.js
+++ b/jwql/website/apps/jwql/static/js/jwql.js
@@ -24,9 +24,9 @@ function change_filetype(type, file_root, num_ints, inst) {
var num_ints = JSON.parse(num_ints);
// Propogate the text fields showing the filename and APT parameters
- var fits_filename = file_root + '_' + type + '.fits'
+ var fits_filename = file_root + '_' + type
document.getElementById("jpg_filename").innerHTML = file_root + '_' + type + '_integ0.jpg';
- document.getElementById("fits_filename").innerHTML = fits_filename;
+ document.getElementById("fits_filename").innerHTML = fits_filename + '.fits';
document.getElementById("proposal").innerHTML = file_root.slice(2,7);
document.getElementById("obs_id").innerHTML = file_root.slice(7,10);
document.getElementById("visit_id").innerHTML = file_root.slice(10,13);
@@ -38,6 +38,11 @@ function change_filetype(type, file_root, num_ints, inst) {
img.src = jpg_filepath;
img.alt = jpg_filepath;
+ // Reset the slider values
+ document.getElementById("slider_range").value = 1
+ document.getElementById("slider_range").max = num_ints[type]
+ document.getElementById("slider_val").innerHTML = 1
+
// Update the number of integrations
var int_counter = document.getElementById("int_count");
int_counter.innerHTML = 'Displaying integration 1/' + num_ints[type];
@@ -50,55 +55,64 @@ function change_filetype(type, file_root, num_ints, inst) {
}
// Update the image download and header links
- document.getElementById("download_fits").href = '/static/filesystem/' + file_root.slice(0,7) + '/' + fits_filename;
+ document.getElementById("download_fits").href = '/static/filesystem/' + file_root.slice(0,7) + '/' + fits_filename + '.fits';
document.getElementById("download_jpg").href = jpg_filepath;
- document.getElementById("view_header").href = '/' + inst + '/' + fits_filename + '/hdr/';
+ document.getElementById("view_header").href = '/' + inst + '/' + fits_filename + '/header/';
// Disable the "left" button, since this will be showing integ0
document.getElementById("int_before").disabled = true;
};
+
/**
* Change the integration number of the displayed image
- * @param {String} direction - The direction to switch to, either "left" (decrease) or "right" (increase).
* @param {String} file_root - The rootname of the file
* @param {Dict} num_ints - A dictionary whose keys are suffix types and whose
* values are the number of integrations for that suffix
+ * @param {String} method - How the integration change was initialized, either "button" or "slider"
+ * @param {String} direction - The direction to switch to, either "left" (decrease) or "right" (increase).
+ * Only relevant if method is "button".
*/
-function change_int(direction, file_root, num_ints) {
+function change_int(file_root, num_ints, method, direction = 'right') {
// Figure out the current image and integration
var suffix = document.getElementById("jpg_filename").innerHTML.split('_');
var integration = Number(suffix[suffix.length - 1][5]);
var suffix = suffix[suffix.length - 2];
var program = file_root.slice(0,7);
-
+
+ // Find the total number of integrations for the current image
var num_ints = num_ints.replace(/'/g, '"');
var num_ints = JSON.parse(num_ints)[suffix];
+ // Get the desired integration value
+ switch (method) {
+ case "button":
+ if ((integration == num_ints - 1 && direction == 'right')||
+ (integration == 0 && direction == 'left')) {
+ return;
+ } else if (direction == 'right') {
+ new_integration = integration + 1
+ } else if (direction == 'left') {
+ new_integration = integration - 1
+ }
+ break;
+ case "slider":
+ new_integration = document.getElementById("slider_range").value - 1;
+ break;
+ }
- if ((integration == num_ints - 1 && direction == 'right')||
- (integration == 0 && direction == 'left')) {
- return;
- } else if (direction == 'right') {
- // Update integration number
- var new_integration = integration + 1
-
- // Don't let them go further if they're at the last integration
- if (new_integration == num_ints - 1) {
- document.getElementById("int_after").disabled = true;
- }
- document.getElementById("int_before").disabled = false;
- } else if (direction == 'left') {
- // Update integration number
- var new_integration = integration - 1
-
- // Don't let them go further if they're at the first integration
- if (new_integration == 0) {
- document.getElementById("int_before").disabled = true;
- }
+ // Update which button are disabled based on the new integration
+ if (new_integration == 0) {
+ document.getElementById("int_after").disabled = false;
+ document.getElementById("int_before").disabled = true;
+ } else if (new_integration < num_ints - 1) {
document.getElementById("int_after").disabled = false;
+ document.getElementById("int_before").disabled = false;
+ } else if (new_integration == num_ints - 1) {
+ document.getElementById("int_after").disabled = true;
+ document.getElementById("int_before").disabled = false;
}
// Update the JPG filename
@@ -118,8 +132,13 @@ function change_int(direction, file_root, num_ints) {
// Update the jpg download link
document.getElementById("download_jpg").href = jpg_filepath;
+
+ // Update the slider values
+ document.getElementById("slider_range").value = new_integration + 1
+ document.getElementById("slider_val").innerHTML = new_integration + 1
};
+
/**
* Determine what filetype to use for a thumbnail
* @param {String} thumbnail_dir - The path to the thumbnail directory
@@ -383,6 +402,29 @@ function update_filter_options(data) {
$("#thumbnail-filter")[0].innerHTML = content;
};
+/**
+ * Change the header extension displayed
+ * @param {String} extension - The extension of the header selected
+ * @param {String} num_extensions - The total number of extensions
+ */
+function update_header_display(extension, num_extensions) {
+
+ // Hide all headers
+ for (var i = 0; i < num_extensions; i++) {
+ var header_name = document.getElementById("header-display-name-extension" + i);
+ var header_table = document.getElementById("header-table-extension" + i);
+ header_name.style.display = 'none';
+ header_table.style.display = 'none';
+ };
+
+ // Display the header selected
+ var header_name_to_show = document.getElementById("header-display-name-extension" + extension);
+ var header_table_to_show = document.getElementById("header-table-extension" + extension);
+ header_name_to_show.style.display = 'inline';
+ header_table_to_show.style.display = 'inline';
+
+};
+
/**
* Updates the img_show_count component
* @param {Integer} count - The count to display
diff --git a/jwql/website/apps/jwql/templates/about.html b/jwql/website/apps/jwql/templates/about.html
index 6cfb01ec5..ad35a2f8e 100644
--- a/jwql/website/apps/jwql/templates/about.html
+++ b/jwql/website/apps/jwql/templates/about.html
@@ -38,14 +38,14 @@ Current Development Team
- Matthew Bourque (Technical Lead)
- Francesca Boffi (Project Manager)
- - Lauren Chambers (FGS)
- Misty Cracraft (MIRI)
- - Joseph Filippazzo (NIRISS)
+ - Mike Engesser (MIRI)
+ - Mees Fix (MESA)
- Bryan Hilbert (NIRCam)
- Graham Kanarek (NIRSpec)
- - Catherine Martlin (WFC3)
- - Sara Ogaz (DATB)
- - Johannes Sahlmann (NIRISS/FGS)
+ - Teagan King (NIRSpec)
+ - Maria Pena-Guerrero (NIRSpec)
+ - Ben Sunnquist (NIRCam)
diff --git a/jwql/website/apps/jwql/templates/base.html b/jwql/website/apps/jwql/templates/base.html
index 1b7eefb06..592a59b72 100644
--- a/jwql/website/apps/jwql/templates/base.html
+++ b/jwql/website/apps/jwql/templates/base.html
@@ -131,6 +131,9 @@
{% endfor %}
EDB(current)
+
+
+ JWQLDB(current)
Documentation(current)
diff --git a/jwql/website/apps/jwql/templates/dark_monitor.html b/jwql/website/apps/jwql/templates/dark_monitor.html
new file mode 100644
index 000000000..af1a8429b
--- /dev/null
+++ b/jwql/website/apps/jwql/templates/dark_monitor.html
@@ -0,0 +1,36 @@
+{% extends "base.html" %}
+
+{% block preamble %}
+
+ {{ inst }} Dark Monitor- JWQL
+
+{% endblock %}
+
+{% block content %}
+
+
+
+ {{ inst }} Dark Monitor
+
+
+
+ {% if inst == 'NIRCam' %}
+
+
+
+ {{ tabs_components[0] | safe }}
+
+
+
+ {{ tabs_components[1] | safe }}
+
+
+ {% else %}
+
+ The dark current monitor is not yet implemented for this instrument.
+
+ {% endif %}
+
+
+
+{% endblock %}
diff --git a/jwql/website/apps/jwql/templates/home.html b/jwql/website/apps/jwql/templates/home.html
index 62bc98cba..e61e05372 100644
--- a/jwql/website/apps/jwql/templates/home.html
+++ b/jwql/website/apps/jwql/templates/home.html
@@ -80,6 +80,12 @@ Find a JWST Proposal or File
+
+
+ Query the Archive for Anomalies
+
+ Enter Query Form
+
{% endblock %}
diff --git a/jwql/website/apps/jwql/templates/jwqldb_table_viewer.html b/jwql/website/apps/jwql/templates/jwqldb_table_viewer.html
new file mode 100644
index 000000000..8390222e2
--- /dev/null
+++ b/jwql/website/apps/jwql/templates/jwqldb_table_viewer.html
@@ -0,0 +1,34 @@
+{% extends "base.html" %}
+
+{% block preamble %}
+
+ Interactive Database Viewer - JWQL
+
+{% endblock %}
+
+{% block content %}
+
+
+ Explore JWQL database tables through the web browser
+
+
+ This page provides users to interactively explore the JWQL database tables with Astropy Tables Javascript Viewer. Simply select a table from the dropdown menu.
+
+
+
+
+
+{% endblock %}
\ No newline at end of file
diff --git a/jwql/website/apps/jwql/templates/query_anomaly.html b/jwql/website/apps/jwql/templates/query_anomaly.html
new file mode 100644
index 000000000..ce0637636
--- /dev/null
+++ b/jwql/website/apps/jwql/templates/query_anomaly.html
@@ -0,0 +1,121 @@
+{% extends "base.html" %}
+
+{% block preamble %}
+
+ Home - JWQL
+
+{% endblock %}
+
+{% block content %}
+
+
+ Anomaly Query Form
+
+
+
+
+ Submit form to search the database for anomalies across various instruments and metadata.
+
+
+
+
+
+
+
+
+
Minimum Exposure Time
+
+
+
+
+
Maximum Exposure Time
+
+
+
+
+
+
+
Earliest Date/Time Observed
+
+
+
+
+
Latest Date/Time Observed
+
+
+
+
+
+
+ Confirm current data from form before clicking submit.
+
+ Instruments: {{requested_insts}}
+
+
+ Possible anomalies corresponding with chosen instruments (can choose from these in subsequent forms):
+ {{ current_anomalies }}
+
+
+
+ Next
+
+
+{% endblock %}
diff --git a/jwql/website/apps/jwql/templates/query_anomaly_2.html b/jwql/website/apps/jwql/templates/query_anomaly_2.html
new file mode 100644
index 000000000..ed1a48b31
--- /dev/null
+++ b/jwql/website/apps/jwql/templates/query_anomaly_2.html
@@ -0,0 +1,109 @@
+{% extends "base.html" %}
+
+{% block preamble %}
+
+ Home - JWQL
+
+{% endblock %}
+
+{% block content %}
+
+
+ Anomaly Query Form Step 2
+
+
+
+
+ Continue to submit form to search the database for anomalies across various instruments and metadata.
+
+
+
+
+
+
+
+
+
Observing Mode
+
+
+
+
+
+
+
+ Current anomaly options: {{current_anomalies}}
+ Instruments chosen: {{instruments_chosen_cfg}}
+
+ Observing modes chosen: {{observing_modes_chosen}}
+
+
+ Filters chosen: {{filters_chosen}}
+
+
+ Apertures chosen: {{apertures_chosen}}
+
+
+
+ Next
+
+
+{% endblock %}
diff --git a/jwql/website/apps/jwql/templates/query_anomaly_3.html b/jwql/website/apps/jwql/templates/query_anomaly_3.html
new file mode 100644
index 000000000..8a57a00eb
--- /dev/null
+++ b/jwql/website/apps/jwql/templates/query_anomaly_3.html
@@ -0,0 +1,44 @@
+{% extends "base.html" %}
+
+{% block preamble %}
+
+ Home - JWQL
+
+{% endblock %}
+
+{% block content %}
+
+
+ Anomaly Query Form Step 3
+
+
+
+
+ Continue to submit form to search the database for anomalies across various instruments and metadata.
+
+
+
+
+
Anomalies to Query
+ All possible anomalies have been checked. Feel free to uncheck anomalies. Default with no instruments selected is all anomalies corresponding to all instruments.
+
+
+
+
+
+ Anomalies chosen given options from chosen instruments: {{chosen_current_anomalies}}
+
+ Next
+
+
+{% endblock %}
diff --git a/jwql/website/apps/jwql/templates/query_submit.html b/jwql/website/apps/jwql/templates/query_submit.html
new file mode 100644
index 000000000..9df9e424c
--- /dev/null
+++ b/jwql/website/apps/jwql/templates/query_submit.html
@@ -0,0 +1,91 @@
+{% extends "base.html" %}
+
+{% block preamble %}
+
+
+
+
+ Queried Thumbnails - JWQL
+
+{% endblock %}
+
+{% block content %}
+
+
+
+
+ Images of Queried Instruments
+
+ This page will provide thumbnails of images that satisfy your request.
+ The anomaly query forms are still under development.
+
+ We have recognized the request to view images from the following instruments: {{inst_list_chosen}}.
+
+
+
+ The anomalies that correspond with those instruments are the following: {{current_anomalies}}.
+
+
+
+ The anomalies chosen from the relevant set are the following: {{anomalies_chosen_from_current_anomalies}}
+
+
+
+ The requested filters are the following: {{filters_chosen}}
+
+
+
+ The requested apertures are the following: {{apertures_chosen}}
+
+
+
+ The requested observing modes are the following: {{observing_modes_chosen}}
+
+
+ thumbnails list: {{thumbnails}}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/jwql/website/apps/jwql/templates/view_header.html b/jwql/website/apps/jwql/templates/view_header.html
index f0625bf14..da9ee76d8 100644
--- a/jwql/website/apps/jwql/templates/view_header.html
+++ b/jwql/website/apps/jwql/templates/view_header.html
@@ -9,22 +9,45 @@
{% block content %}
-
diff --git a/jwql/website/apps/jwql/templates/view_image.html b/jwql/website/apps/jwql/templates/view_image.html
index a6b0d77b8..e32e02179 100644
--- a/jwql/website/apps/jwql/templates/view_image.html
+++ b/jwql/website/apps/jwql/templates/view_image.html
@@ -14,7 +14,7 @@ {{ file_root }}
-
+
+
+
Download FITS
diff --git a/jwql/website/apps/jwql/urls.py b/jwql/website/apps/jwql/urls.py
index fd0cb3642..439d0c583 100644
--- a/jwql/website/apps/jwql/urls.py
+++ b/jwql/website/apps/jwql/urls.py
@@ -12,6 +12,7 @@
- Lauren Chambers
- Matthew Bourque
- Johannes Sahlmann
+ - Teagan King
Use
---
@@ -44,6 +45,7 @@
from django.urls import re_path
from . import api_views
+from . import monitor_views
from . import oauth
from . import views
@@ -60,21 +62,29 @@
path('logout/', oauth.logout, name='logout'),
path('authorize/', oauth.authorize, name='authorize'),
- # NIRSpec views
+ # MIRI-specific views
+ path('miri/miri_data_trending/', views.miri_data_trending, name='miri_data_trending'),
+
+ # NIRSpec-specific views
path('nirspec/nirspec_data_trending/', views.nirspec_data_trending, name='nirspec_data_trending'),
- # MIRI views
- path('miri/miri_data_trending/', views.miri_data_trending, name='miri_data_trending'),
+ # Common monitor views
+ re_path(r'^(?P({}))/.+_monitor/$'.format(instruments), monitor_views.dark_monitor, name='dark_monitor'),
# Main site views
path('about/', views.about, name='about'),
path('dashboard/', views.dashboard, name='dashboard'),
path('edb/', views.engineering_database, name='edb'),
+ path('query_anomaly/', views.query_anomaly, name='query_anomaly'),
+ path('query_anomaly_2/', views.query_anomaly_2, name='query_anomaly_2'),
+ path('query_anomaly_3/', views.query_anomaly_3, name='query_anomaly_3'),
+ path('query_submit/', views.query_submit, name='query_submit'),
+ path('table_viewer', views.jwqldb_table_viewer, name='table_viewer'),
re_path(r'^(?P({}))/$'.format(instruments), views.instrument, name='instrument'),
re_path(r'^(?P({}))/archive/$'.format(instruments), views.archived_proposals, name='archive'),
re_path(r'^(?P({}))/unlooked/$'.format(instruments), views.unlooked_images, name='unlooked'),
re_path(r'^(?P({}))/(?P[\w]+)/$'.format(instruments), views.view_image, name='view_image'),
- re_path(r'^(?P({}))/(?P.+)/hdr/$'.format(instruments), views.view_header, name='view_header'),
+ re_path(r'^(?P({}))/(?P.+)/header/$'.format(instruments), views.view_header, name='view_header'),
re_path(r'^(?P({}))/archive/(?P[\d]{{1,5}})/$'.format(instruments), views.archive_thumbnails, name='archive_thumb'),
# AJAX views
@@ -83,6 +93,7 @@
# REST API views
path('api/proposals/', api_views.all_proposals, name='all_proposals'),
+ #path('api/queried_thumbnails/', api_views.thumbnails_all_instruments, name='thumbnails_all_instruments'),
re_path(r'^api/(?P({}))/proposals/$'.format(instruments), api_views.instrument_proposals, name='instrument_proposals'),
re_path(r'^api/(?P({}))/preview_images/$'.format(instruments), api_views.preview_images_by_instrument, name='preview_images_by_instrument'),
re_path(r'^api/(?P({}))/thumbnails/$'.format(instruments), api_views.thumbnails_by_instrument, name='thumbnails_by_instrument'),
diff --git a/jwql/website/apps/jwql/views.py b/jwql/website/apps/jwql/views.py
index fa944f35d..bda007e2f 100644
--- a/jwql/website/apps/jwql/views.py
+++ b/jwql/website/apps/jwql/views.py
@@ -12,6 +12,8 @@
- Lauren Chambers
- Johannes Sahlmann
+ - Teagan King
+ - Mees Fix
Use
---
@@ -35,27 +37,55 @@
placed in the ``jwql/utils/`` directory.
"""
-import datetime
import os
from django.http import JsonResponse
+# from django import forms
from django.shortcuts import render
-from .data_containers import get_acknowledgements, get_edb_components
+from jwql.database.database_interface import load_connection
+from jwql.utils.constants import ANOMALIES_PER_INSTRUMENT
+from jwql.utils.constants import FILTERS_PER_INSTRUMENT
+from jwql.utils.constants import FULL_FRAME_APERTURES
+from jwql.utils.constants import JWST_INSTRUMENT_NAMES
+from jwql.utils.constants import MONITORS
+from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE
+from jwql.utils.constants import OBSERVING_MODE_PER_INSTRUMENT
+from jwql.utils.utils import get_base_url
+from jwql.utils.utils import get_config
+
+from .data_containers import data_trending
+from .data_containers import get_acknowledgements
+from .data_containers import get_current_flagged_anomalies
from .data_containers import get_dashboard_components
+from .data_containers import get_edb_components
from .data_containers import get_filenames_by_instrument
from .data_containers import get_header_info
from .data_containers import get_image_info
-from .data_containers import get_current_flagged_anomalies
from .data_containers import get_proposal_info
+from .data_containers import get_thumbnails_all_instruments
+from .data_containers import nirspec_trending
from .data_containers import random_404_page
+from .data_containers import get_jwqldb_table_view_components
from .data_containers import thumbnails_ajax
-from .data_containers import data_trending
-from .data_containers import nirspec_trending
-from .forms import AnomalySubmitForm, FileSearchForm
+from .forms import AnomalyForm
+from .forms import AnomalySubmitForm
+from .forms import ApertureForm
+from .forms import EarlyDateForm
+from .forms import ExptimeMaxForm
+from .forms import ExptimeMinForm
+from .forms import FileSearchForm
+from .forms import FiletypeForm
+from .forms import FilterForm
+from .forms import InstrumentForm
+from .forms import LateDateForm
+from .forms import ObservingModeForm
from .oauth import auth_info, auth_required
-from jwql.utils.constants import JWST_INSTRUMENT_NAMES, MONITORS, JWST_INSTRUMENT_NAMES_MIXEDCASE
-from jwql.utils.utils import get_base_url, get_config
+
+# from jwql.utils.anomaly_query_config import APERTURES_CHOSEN, CURRENT_ANOMALIES
+# from jwql.utils.anomaly_query_config import INSTRUMENTS_CHOSEN, OBSERVING_MODES_CHOSEN
+# from jwql.utils.anomaly_query_config import ANOMALIES_CHOSEN_FROM_CURRENT_ANOMALIES
+from jwql.utils import anomaly_query_config
FILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem')
@@ -137,6 +167,7 @@ def about(request):
HttpResponse object
Outgoing response sent to the webpage
"""
+
template = 'about.html'
acknowledgements = get_acknowledgements()
context = {'acknowledgements': acknowledgements,
@@ -161,6 +192,7 @@ def archived_proposals(request, user, inst):
HttpResponse object
Outgoing response sent to the webpage
"""
+
# Ensure the instrument is correctly capitalized
inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()]
@@ -187,6 +219,7 @@ def archived_proposals_ajax(request, user, inst):
HttpResponse object
Outgoing response sent to the webpage
"""
+
# Ensure the instrument is correctly capitalized
inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()]
@@ -278,6 +311,7 @@ def dashboard(request):
HttpResponse object
Outgoing response sent to the webpage
"""
+
template = 'dashboard.html'
output_dir = get_config()['outputs']
dashboard_components, dashboard_html = get_dashboard_components()
@@ -309,6 +343,7 @@ def engineering_database(request, user):
Outgoing response sent to the webpage
"""
+
edb_components = get_edb_components(request)
template = 'engineering_database.html'
@@ -364,6 +399,7 @@ def instrument(request, inst):
HttpResponse object
Outgoing response sent to the webpage
"""
+
# Ensure the instrument is correctly capitalized
inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()]
@@ -382,6 +418,37 @@ def instrument(request, inst):
return render(request, template, context)
+def jwqldb_table_viewer(request):
+ """Generate the JWQL Table Viewer view.
+
+ Parameters
+ ----------
+ request : HttpRequest object
+ Incoming request from the webpage
+
+ user : dict
+ A dictionary of user credentials.
+
+ Returns
+ -------
+ HttpResponse object
+ Outgoing response sent to the webpage
+ """
+
+ table_view_components = get_jwqldb_table_view_components(request)
+
+ session, base, engine, meta = load_connection(get_config()['connection_string'])
+ all_jwql_tables = engine.table_names()
+
+ template = 'jwqldb_table_viewer.html'
+ context = {
+ 'inst': '',
+ 'all_jwql_tables': all_jwql_tables,
+ 'table_view_components': table_view_components}
+
+ return render(request, template, context)
+
+
def not_found(request, *kwargs):
"""Generate a ``not_found`` page
@@ -395,6 +462,7 @@ def not_found(request, *kwargs):
HttpResponse object
Outgoing response sent to the webpage
"""
+
template = random_404_page()
status_code = 404 # Note that this will show 400, 403, 404, and 500 as 404 status
context = {'inst': ''}
@@ -402,6 +470,211 @@ def not_found(request, *kwargs):
return render(request, template, context, status=status_code)
+def query_anomaly(request):
+ """Generate the anomaly query form page.
+
+ Parameters
+ ----------
+ request : HttpRequest object
+ Incoming request from the webpage
+ user : dict
+ A dictionary of user credentials.
+
+ Returns
+ -------
+ HttpResponse object
+ Outgoing response sent to the webpage
+ """
+
+ exposure_min_form = ExptimeMinForm(request.POST or None)
+ exposure_max_form = ExptimeMaxForm(request.POST or None)
+ instrument_form = InstrumentForm(request.POST or None)
+ early_date_form = EarlyDateForm(request.POST or None)
+ late_date_form = LateDateForm(request.POST or None)
+
+ # global current_anomalies
+ current_anomalies = ['cosmic_ray_shower', 'diffraction_spike', 'excessive_saturation',
+ 'guidestar_failure', 'persistence', 'other']
+
+ # global instruments_chosen
+ instruments_chosen = "No instruments chosen"
+ if request.method == 'POST':
+ if instrument_form.is_valid():
+ instruments_chosen = instrument_form.clean_instruments()
+
+ for anomaly in ANOMALIES_PER_INSTRUMENT:
+ for inst in instruments_chosen:
+ if inst in ANOMALIES_PER_INSTRUMENT[anomaly]:
+ current_anomalies.append(anomaly) if anomaly not in current_anomalies else current_anomalies
+
+ anomaly_query_config.INSTRUMENTS_CHOSEN = instruments_chosen
+ anomaly_query_config.CURRENT_ANOMALIES = current_anomalies
+
+ template = 'query_anomaly.html'
+ context = {'inst': '',
+ 'exposure_min_form': exposure_min_form,
+ 'exposure_max_form': exposure_max_form,
+ 'instrument_form': instrument_form,
+ 'early_date_form': early_date_form,
+ 'late_date_form': late_date_form,
+ 'requested_insts': anomaly_query_config.INSTRUMENTS_CHOSEN,
+ 'current_anomalies': anomaly_query_config.CURRENT_ANOMALIES,
+ 'None': "No instruments chosen"}
+
+ return render(request, template, context)
+
+
+def query_anomaly_2(request):
+ """Generate the second page of the anomaly query form.
+
+ Parameters
+ ----------
+ request : HttpRequest object
+ Incoming request from the webpage
+
+ Returns
+ -------
+ HttpResponse object
+ Outgoing response sent to the webpage
+ """
+
+ initial_aperture_list = []
+ for instrument in FULL_FRAME_APERTURES.keys():
+ if instrument.lower() in anomaly_query_config.INSTRUMENTS_CHOSEN:
+ for aperture in FULL_FRAME_APERTURES[instrument]:
+ initial_aperture_list.append(aperture)
+
+ initial_mode_list = []
+ for instrument in OBSERVING_MODE_PER_INSTRUMENT.keys():
+ if instrument in anomaly_query_config.INSTRUMENTS_CHOSEN:
+ for mode in OBSERVING_MODE_PER_INSTRUMENT[instrument]:
+ initial_mode_list.append(mode)
+
+ initial_filter_list = []
+ for instrument in FILTERS_PER_INSTRUMENT.keys():
+ if instrument in anomaly_query_config.INSTRUMENTS_CHOSEN:
+ for filter in FILTERS_PER_INSTRUMENT[instrument]:
+ initial_filter_list.append(filter)
+
+ aperture_form = ApertureForm(request.POST or None, initial={'aperture': initial_aperture_list})
+ filter_form = FilterForm(request.POST or None, initial={'filter': initial_filter_list})
+ filetype_form = FiletypeForm(request.POST or None)
+ observing_mode_form = ObservingModeForm(request.POST or None, initial={'mode': initial_mode_list})
+
+ # Saving one form currently removes initial choices of other forms on the page
+ # global apertures_chosen
+ apertures_chosen = "No apertures chosen"
+ if request.method == 'POST':
+ if aperture_form.is_valid():
+ apertures_chosen = aperture_form.clean_apertures()
+ initial_aperture_list = apertures_chosen
+ anomaly_query_config.APERTURES_CHOSEN = apertures_chosen
+
+ # global filters_chosen
+ filters_chosen = "No filters chosen"
+ if request.method == 'POST':
+ if filter_form.is_valid():
+ filters_chosen = filter_form.clean_filters()
+ initial_filter_list = filters_chosen
+ anomaly_query_config.FILTERS_CHOSEN = filters_chosen
+
+ # global observing_modes_chosen
+ observing_modes_chosen = "No observing modes chosen"
+ if request.method == 'POST':
+ if observing_mode_form.is_valid():
+ observing_modes_chosen = observing_mode_form.clean_modes()
+ initial_mode_list = observing_modes_chosen
+ anomaly_query_config.OBSERVING_MODES_CHOSEN = observing_modes_chosen
+
+ # if current_anomalies == None:
+ # print("PLEASE START AT THE FIRST PAGE IN THE FORMS! (eg, /query_anomaly/ ")
+
+ template = 'query_anomaly_2.html'
+ context = {'inst': '',
+ 'aperture_form': aperture_form,
+ 'filter_form': filter_form,
+ 'filetype_form': filetype_form,
+ 'observing_mode_form': observing_mode_form,
+ 'apertures_chosen': anomaly_query_config.APERTURES_CHOSEN,
+ 'current_anomalies': anomaly_query_config.CURRENT_ANOMALIES,
+ 'filters_chosen': anomaly_query_config.FILTERS_CHOSEN,
+ 'instruments_chosen_cfg': anomaly_query_config.INSTRUMENTS_CHOSEN,
+ 'observing_modes_chosen': anomaly_query_config.OBSERVING_MODES_CHOSEN
+ }
+
+ return render(request, template, context)
+
+
+def query_anomaly_3(request):
+ """Generate the second page of the anomaly query form.
+
+ Parameters
+ ----------
+ request : HttpRequest object
+ Incoming request from the webpage
+
+ Returns
+ -------
+ HttpResponse object
+ Outgoing response sent to the webpage
+ """
+
+ anomaly_form = AnomalyForm(request.POST or None, initial={'query': anomaly_query_config.CURRENT_ANOMALIES})
+
+ # if current_anomalies == None:
+ # print("PLEASE START AT THE FIRST PAGE IN THE FORMS! (eg, /query_anomaly/ ")
+ # global anomalies_chosen_from_current_anomalies
+ anomalies_chosen_from_current_anomalies = anomaly_query_config.CURRENT_ANOMALIES
+ if request.method == 'POST':
+ if anomaly_form.is_valid():
+ anomalies_chosen_from_current_anomalies = anomaly_form.clean_anomalies()
+ anomaly_query_config.ANOMALIES_CHOSEN_FROM_CURRENT_ANOMALIES = anomalies_chosen_from_current_anomalies
+
+ template = 'query_anomaly_3.html'
+ context = {'inst': '',
+ 'anomaly_form': anomaly_form,
+ 'chosen_current_anomalies': anomalies_chosen_from_current_anomalies
+ }
+
+ return render(request, template, context)
+
+
+def query_submit(request):
+ """Generate the page listing all archived images in the database
+ for a certain proposal
+
+ Parameters
+ ----------
+ request : HttpRequest object
+ Incoming request from the webpage
+
+ Returns
+ -------
+ HttpResponse object
+ Outgoing response sent to the webpage
+ """
+
+ # if current_anomalies == None:
+ # print("PLEASE START AT THE FIRST PAGE IN THE FORMS! (eg, /query_anomaly/ ")
+
+ template = 'query_submit.html'
+ # inst_list_chosen = ["NIRSpec", "NIRCam"]
+
+ # print(get_thumbnails_all_instruments(inst_list_chosen))
+
+ context = {'inst': '',
+ 'anomalies_chosen_from_current_anomalies': anomaly_query_config.ANOMALIES_CHOSEN_FROM_CURRENT_ANOMALIES,
+ 'apertures_chosen': anomaly_query_config.APERTURES_CHOSEN,
+ 'current_anomalies': anomaly_query_config.CURRENT_ANOMALIES,
+ 'filters_chosen': anomaly_query_config.FILTERS_CHOSEN,
+ 'inst_list_chosen': anomaly_query_config.INSTRUMENTS_CHOSEN,
+ 'observing_modes_chosen': anomaly_query_config.OBSERVING_MODES_CHOSEN
+ # 'thumbnails': get_thumbnails_all_instruments(inst_list_chosen)
+ }
+
+ return render(request, template, context)
+
+
def unlooked_images(request, inst):
"""Generate the page listing all unlooked images in the database
@@ -421,7 +694,7 @@ def unlooked_images(request, inst):
pass
-def view_header(request, inst, file):
+def view_header(request, inst, filename):
"""Generate the header view page
Parameters
@@ -430,7 +703,7 @@ def view_header(request, inst, file):
Incoming request from the webpage
inst : str
Name of JWST instrument
- file : str
+ filename : str
FITS filename of selected image in filesystem
Returns
@@ -442,13 +715,12 @@ def view_header(request, inst, file):
inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()]
template = 'view_header.html'
- header = get_header_info(file)
- file_root = '_'.join(file.split('_')[:-1])
+ file_root = '_'.join(filename.split('_')[:-1])
context = {'inst': inst,
- 'file': file,
- 'header': header,
- 'file_root': file_root}
+ 'filename': filename,
+ 'file_root': file_root,
+ 'header_info': get_header_info(filename)}
return render(request, template, context)
@@ -483,7 +755,7 @@ def view_image(request, user, inst, file_root, rewrite=False):
image_info = get_image_info(file_root, rewrite)
# Determine current flagged anomalies
- current_anomalies = get_current_flagged_anomalies(file_root)
+ current_anomalies = get_current_flagged_anomalies(file_root, inst)
# Create a form instance
form = AnomalySubmitForm(request.POST or None, initial={'anomaly_choices': current_anomalies})
@@ -496,6 +768,7 @@ def view_image(request, user, inst, file_root, rewrite=False):
# Build the context
context = {'inst': inst,
+ 'prop_id': file_root[2:7],
'file_root': file_root,
'jpg_files': image_info['all_jpegs'],
'fits_files': image_info['all_files'],
diff --git a/jwql/website/db.sqlite3 b/jwql/website/db.sqlite3
deleted file mode 100644
index 0902712bb..000000000
Binary files a/jwql/website/db.sqlite3 and /dev/null differ
diff --git a/presentations/JWQL_web_app.pdf b/presentations/JWQL_web_app.pdf
deleted file mode 100644
index 5c1765d3a..000000000
Binary files a/presentations/JWQL_web_app.pdf and /dev/null differ
diff --git a/presentations/jwql_web_app.pdf b/presentations/jwql_web_app.pdf
new file mode 100644
index 000000000..cb059894e
Binary files /dev/null and b/presentations/jwql_web_app.pdf differ
diff --git a/requirements.txt b/requirements.txt
index 918c873f8..92cd248a3 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,27 +1,30 @@
-asdf==2.3.3
-astropy==3.2.1
-astroquery==0.3.9
-authlib==0.11
+asdf==2.6.0
+astropy==4.0.1.post1
+astroquery==0.4
+authlib==0.14.3
bokeh==1.3.4
-codecov==2.0.15
-django==2.2.4
-flake8==3.7.8
-inflection==0.3.1
-ipython==7.7.0
-jinja2==2.10.1
-jsonschema==3.0.2
+codecov==2.1.3
+crds==7.5.0.0
+django==2.2.5
+flake8==3.8.2
+inflection==0.4.0
+ipython==7.15.0
+jinja2==2.11.2
+jsonschema==3.2.0
jwedb>=0.0.3
-matplotlib==3.1.1
-numpy==1.17.0
-numpydoc==0.9.1
-pandas==0.25.0
-psycopg2==2.8.3
-pysiaf==0.4.0
-pytest==5.1.0
-pytest-cov==2.7.1
-scipy==1.3.1
-sphinx==2.2.0
-sqlalchemy==1.3.7
+matplotlib==3.2.1
+nodejs==10.13.0
+numpy==1.18.4
+numpydoc==1.0.0
+pandas==1.0.4
+psycopg2==2.8.5
+pysiaf==0.7.1
+pytest==5.4.2
+pytest-cov==2.9.0
+scipy==1.4.1
+sphinx==3.0.4
+sqlalchemy==1.3.17
stsci_rtd_theme==0.0.2
-twine==1.13.0
-git+https://github.com/spacetelescope/jwst@stable
\ No newline at end of file
+twine==3.1.1
+git+https://github.com/spacetelescope/jwst@stable
+git+https://github.com/spacetelescope/jwst_reffiles
diff --git a/setup.py b/setup.py
index 3e5926276..382030c3a 100644
--- a/setup.py
+++ b/setup.py
@@ -2,20 +2,24 @@
from setuptools import setup
from setuptools import find_packages
-VERSION = '0.22.0'
+VERSION = '0.23.0'
-AUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
-AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'
+AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '
+AUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'
DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
+DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.16.2',
+ 'git+https://github.com/spacetelescope/jwst_reffiles'
+ ]
REQUIRES = [
'asdf>=2.3.3',
'astropy>=3.2.1',
'astroquery>=0.3.9',
'authlib',
- 'bokeh>=1.0',
+ 'bokeh>=1.0,<1.4',
'codecov',
+ 'crds',
'django>=2.0',
'flake8',
'inflection',
@@ -23,8 +27,8 @@
'jinja2',
'jsonschema==2.6.0',
'jwedb>=0.0.3',
- 'jwst==0.13.0',
'matplotlib',
+ 'nodejs',
'numpy',
'numpydoc',
'pandas',
@@ -51,6 +55,7 @@
classifiers=['Programming Language :: Python'],
packages=find_packages(),
install_requires=REQUIRES,
+ dependency_links=DEPENDENCY_LINKS,
include_package_data=True,
include_dirs=[np.get_include()],
)
diff --git a/style_guide/README.md b/style_guide/README.md
index 7a691fb44..cf05e8a79 100644
--- a/style_guide/README.md
+++ b/style_guide/README.md
@@ -11,7 +11,7 @@ It is assumed that the reader of this style guide has read and is familiar with
- The [PEP8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/)
- The [PEP257 Docstring Conventions Style Guide](https://www.python.org/dev/peps/pep-0257/)
-- The [`numpydoc` docstring convention](https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt)
+- The [`numpydoc` docstring convention](https://numpydoc.readthedocs.io/en/latest/format.html)
Workflow