Permalink
Browse files

Removes breakpoint and updates submodule references

Updates lookup tables for QC

Tests for gradient QC and fixes major bug

The Major bug is with how QCs treat missing or fill values and the
annoying difference between them.

Unlocks trend test

Updates submodule references

Removes numpy as a dependency
  • Loading branch information...
1 parent f0aef01 commit d11df341477a12c8e96730c0179663dd86cc2a50 @lukecampbell lukecampbell committed Jun 5, 2013
View
@@ -8,7 +8,6 @@ develop =
extern/ion-functions
parts =
project-directories
- numpy-install
python
eggs =
coi-services
@@ -37,7 +36,6 @@ find-links =
greenlet=0.4.0
mock=0.8
nose=1.1.2
-numpy=1.6.2
gevent=0.13.8
###
#
@@ -64,19 +62,6 @@ url = http://sddevrepo.oceanobservatories.org/releases/port_agent-ooici-master.t
configure-options =
--prefix=${port_agent:path}
-[numpy-src]
-recipe = hexagonit.recipe.download
-url = http://pypi.python.org/packages/source/n/numpy/numpy-1.6.2.tar.gz#md5=95ed6c9dcc94af1fc1642ea2a33c1bba
-ignore-existing = true
-
-[numpy-install]
-recipe = z3c.recipe.egg:setup
-setup = ${numpy-src:location}/numpy-1.6.2
-args =
- clean
- build
- install
-
[ceiextras]
recipe = zc.recipe.egg
eggs =
Submodule pyon updated 2 files
+1 −0 README
+0 −14 buildout.cfg
View
@@ -2,7 +2,6 @@
extends = buildout.cfg
parts =
project-directories
- numpy-install
gcoverage_patch
python
eggs +=
@@ -68,16 +68,33 @@ def test_stuck_value_test(self):
np.testing.assert_array_almost_equal(self.rdt['tempwat_stuckvl_qc'], [1, 1, 0, 0, 0, 0, 1, 1, 1, 1])
+ def test_trend_test(self):
+ self.svm.stored_value_cas('trend_QCTEST_TEMPWAT', {'time_interval':0, 'polynomial_order': 1, 'standard_deviation': 3})
+ self.rdt['time'] = np.arange(10)
+ self.rdt['temp'] = [0.8147, 0.9058, 0.1270, 0.9134, 0.6324, 0.0975, 0.2785, 0.5469, 0.9575, 0.9649]
+
+ self.rdt.fetch_lookup_values()
+
+ np.testing.assert_array_equal(self.rdt['tempwat_trndtst_qc'], [1] * 10)
+
+
def test_propagate_test(self):
self.rdt['time'] = np.arange(8)
self.rdt['temp'] = [9, 10, 16, 17, 18, 19, 20, 25]
self.rdt['tempwat_glblrng_qc'] = [0, 1, 1, 1, 1, 1, 1, 0]
self.rdt['tempwat_spketst_qc'] = [0, 1, 1, 1, 1, 1, 1, 0]
self.rdt['tempwat_stuckvl_qc'] = [0, 1, 1, 1, 1, 1, 1, 0]
-
- from pyon.util.breakpoint import breakpoint
- breakpoint(locals())
+ self.rdt['tempwat_gradtst_qc'] = [0, 1, 1, 1, 1, 1, 1, 0]
+ self.rdt['tempwat_trndtst_qc'] = [0, 1, 1, 1, 1, 1, 1, 0]
np.testing.assert_array_equal(self.rdt['cmbnflg_qc'], [0, 1, 1, 1, 1, 1, 1, 0])
+
+ def test_gradient_test(self):
+ self.svm.stored_value_cas('grad_QCTEST_TEMPWAT_time', {'d_dat_dx': 50, 'min_dx': 0, 'start_dat': 0, 'tol_dat': 5})
+ self.rdt['time'] = np.arange(5)
+ self.rdt['temp'] = [3, 5, 98, 99, 4]
+ self.rdt.fetch_lookup_values()
+
+ np.testing.assert_array_equal(self.rdt['tempwat_gradtst_qc'], [1, 1, 0, 0, 1])
@@ -113,6 +130,26 @@ def cb(event, *args, **kwargs):
np.testing.assert_array_almost_equal(rdt['tempwat_glblrng_qc'], [0, 1, 1, 1, 1, 1, 1, 0])
self.assertTrue(flagged.wait(10))
+ def test_fill_value_qc(self):
+ self.rdt['time'] = np.arange(5)
+ self.rdt['temp'] = [12] * 5
+ self.rdt.fetch_lookup_values()
+
+ np.testing.assert_array_equal(self.rdt['tempwat_glblrng_qc'], [-99] * 5)
+ np.testing.assert_array_equal(self.rdt['tempwat_spketst_qc'], [-99] * 5)
+ np.testing.assert_array_equal(self.rdt['tempwat_stuckvl_qc'], [-99] * 5)
+ np.testing.assert_array_equal(self.rdt['tempwat_trndtst_qc'], [-99] * 5)
+ np.testing.assert_array_equal(self.rdt['tempwat_gradtst_qc'], [-99] * 5)
+ self.ph.publish_rdt_to_data_product(self.dp_id, self.rdt)
+
+ self.dataset_monitor.event.wait(10)
+ rdt = RecordDictionaryTool.load_from_granule(self.data_retriever.retrieve(self.dataset_id))
+ np.testing.assert_array_equal(rdt['tempwat_glblrng_qc'], [-99] * 5)
+ np.testing.assert_array_equal(rdt['tempwat_spketst_qc'], [-99] * 5)
+ np.testing.assert_array_equal(rdt['tempwat_stuckvl_qc'], [-99] * 5)
+ np.testing.assert_array_equal(rdt['tempwat_trndtst_qc'], [-99] * 5)
+ np.testing.assert_array_equal(rdt['tempwat_gradtst_qc'], [-99] * 5)
+
def test_spike_test(self):
TestQCFunctions.test_spike_test(self)
self.ph.publish_rdt_to_data_product(self.dp_id, self.rdt)
@@ -129,4 +166,20 @@ def test_stuck_value_test(self):
rdt = RecordDictionaryTool.load_from_granule(self.data_retriever.retrieve(self.dataset_id))
np.testing.assert_array_almost_equal(rdt['tempwat_stuckvl_qc'], [1, 1, 0, 0, 0, 0, 1, 1, 1, 1])
+
+ def test_gradient_test(self):
+ TestQCFunctions.test_gradient_test(self)
+ self.ph.publish_rdt_to_data_product(self.dp_id, self.rdt)
+ self.dataset_monitor.event.wait(10)
+
+ rdt = RecordDictionaryTool.load_from_granule(self.data_retriever.retrieve(self.dataset_id))
+ np.testing.assert_array_equal(rdt['tempwat_gradtst_qc'], [1, 1, 0, 0, 1])
+
+ def test_trend_test(self):
+ TestQCFunctions.test_trend_test(self)
+ self.ph.publish_rdt_to_data_product(self.dp_id, self.rdt)
+ self.dataset_monitor.event.wait(10)
+
+ rdt = RecordDictionaryTool.load_from_granule(self.data_retriever.retrieve(self.dataset_id))
+ np.testing.assert_array_almost_equal(rdt['tempwat_trndtst_qc'], [1] * 10)
@@ -697,9 +697,6 @@ def create_simple_qc(self):
def create_simple_qc_pdict(self):
types_manager = TypesManager(self.dataset_management,None,None)
- self.create_global_range_function()
- self.create_spike_test_function()
- self.create_stuck_value_test_function()
contexts = self.create_simple_qc()
context_ids = [i[1] for i in contexts.itervalues()]
context_ids.extend(contexts['temp'][0].qc_contexts)
@@ -251,7 +251,8 @@ def make_qc_functions(self, name, data_product, registration_function):
self.make_grt_qc,
self.make_spike_qc,
self.make_stuckvalue_qc,
- #self.make_trendtest_qc, # Not supported
+ self.make_trendtest_qc, # was not supported
+ self.make_gradienttest_qc,
]
for factory in qc_factories:
@@ -329,14 +330,13 @@ def make_trendtest_qc(self, name, data_product):
pfunc_id, pfunc = self.find_trend_test()
- time_id, time_name = self.get_lookup_value('LV_trend_$designator_%s||time_interval' % data_product)
order_id, order_name = self.get_lookup_value('LV_trend_$designator_%s||polynomial_order' % data_product)
dev_id, dev_name = self.get_lookup_value('LV_trend_$designator_%s||standard_deviation' % data_product)
- pmap = {"dat":name ,"t":time_name,"ord_n":order_name,"ntsd":dev_name}
+ pmap = {"dat":name ,"t":'time',"ord_n":order_name,"ntsd":dev_name}
pfunc.param_map = pmap
- pfunc.lookup_values = [time_id, order_id, dev_id]
+ pfunc.lookup_values = [order_id, dev_id]
dp_name = self.dp_name(data_product)
pc = ParameterContext(name='%s_trndtst_qc' % dp_name.lower(), param_type=ParameterFunctionType(pfunc,value_encoding='|i1'))
pc.uom = '1'
@@ -346,6 +346,28 @@ def make_trendtest_qc(self, name, data_product):
ctxt_id = self.dataset_management.create_parameter_context(name='%s_trndtst_qc' % dp_name.lower(), parameter_type='function', parameter_context=pc.dump(), parameter_function_id=pfunc_id, ooi_short_name=pc.ooi_short_name, units='1', value_encoding='int8', display_name=pc.display_name, description=pc.description)
return ctxt_id, pc
+ def make_gradienttest_qc(self, name, data_product):
+
+ pfunc_id, pfunc = self.find_gradient_test()
+
+ ddatdx_id, ddatdx = self.get_lookup_value('LV_grad_$designator_%s_time||d_dat_dx' % data_product)
+ mindx_id, mindx = self.get_lookup_value('LV_grad_$designator_%s_time||min_dx' % data_product)
+ startdat_id, startdat = self.get_lookup_value('LV_grad_$designator_%s_time||start_dat' % data_product)
+ toldat_id, toldat = self.get_lookup_value('LV_grad_$designator_%s_time||tol_dat' % data_product)
+
+ pmap = {"dat":name, "x": 'time', 'ddatdx': ddatdx, 'mindx':mindx, 'startdat': startdat, 'toldat':toldat}
+ pfunc.param_map = pmap
+ pfunc.lookup_values = [ddatdx_id, mindx_id, startdat_id, toldat_id]
+ dp_name = self.dp_name(data_product)
+
+ pc = ParameterContext(name='%s_gradtst_qc' % dp_name.lower(), param_type=ParameterFunctionType(pfunc, value_encoding='|i1'))
+ pc.uom = '1'
+ pc.ooi_short_name = '%s_GRADTST_QC' % dp_name
+ pc.display_name = '%s Gradient Test Quality Control Flag' % dp_name
+ pc.description = 'The OOI Gradient Test is an automated quality control algorithm used on various OOI data products. This automated algorithm generates flags for data points according to whether changes between successive points are within a pre-determined range.'
+ ctxt_id = self.dataset_management.create_parameter_context(name='%s_gradtst_qc' % dp_name.lower(), parameter_type='function', parameter_context=pc.dump(), parameter_function_id=pfunc_id, ooi_short_name=pc.ooi_short_name, units='1', value_encoding='int8', display_name=pc.display_name, description=pc.description)
+ return ctxt_id, pc
+
def make_propagate_qc(self,inputs):
pfunc_id, pfunc = self.find_propagate_test()
@@ -6,6 +6,7 @@
from csv import DictReader
from StringIO import StringIO
+from pyon.util.log import log
def gradient_test_parser(document):
'''
@@ -55,20 +56,27 @@ def gradient_test_parser(document):
dr = DictReader(sio)
for row in dr:
- key = '_'.join(['grad',row['Reference Designator'],row['Data Product used as Input Data (DAT)'],row['Data Product used as Input Parameter X']])
+ try:
+ key = '_'.join(['grad',row['Reference Designator'],row['Data Product used as Input Data (DAT)'],row['Data Product used as Input Parameter X']])
- document = {}
- document['array'] = row['Array']
- document['instrument_class'] = row['Instrument Class']
- document['reference_designator'] = row['Reference Designator']
- document['dat'] = row['Data Product used as Input Data (DAT)']
- document['x'] = row['Data Product used as Input Parameter X']
- document['units_dat'] = row['Units of DAT']
- document['units_x'] = row['Units of X']
- document['d_dat_dx'] = float(row['DDATDX'])
- document['min_dx'] = float(row['MINDX'])
- document['start_dat'] = float(row['STARTDAT'])
- document['tol_dat'] = float(row['TOLDAT'])
- yield key,document
+ document = {}
+ document['array'] = row['Array']
+ document['instrument_class'] = row['Instrument Class']
+ document['reference_designator'] = row['Reference Designator']
+ document['dat'] = row['Data Product used as Input Data (DAT)']
+ document['x'] = row['Data Product used as Input Parameter X']
+ document['units_dat'] = row['Units of DAT']
+ document['units_x'] = row['Units of X']
+ document['d_dat_dx'] = float(row['DDATDX'])
+ document['min_dx'] = float(row['MINDX']) if row['MINDX'] else 0.
+ document['start_dat'] = float(row['STARTDAT']) if row['STARTDAT'] else 0.
+ document['tol_dat'] = float(row['TOLDAT'])
+ yield key,document
+ except TypeError:
+ log.exception("Couldn't parse row")
+ continue
+ except ValueError:
+ log.exception("Couldn't parse row")
+ continue
return

0 comments on commit d11df34

Please sign in to comment.