Permalink
Browse files

round of unittest and related changes for custom datasource adapter a…

…nd model runner tests
  • Loading branch information...
1 parent a22c248 commit d48b7db41058b229424379c53e07dbfbead14cc7 Ryan McCall committed Aug 16, 2016
@@ -30,6 +30,10 @@
"minResolution": {
"required": false,
"type": "number"
+ },
+ "enableClassifier": {
+ "required": false,
+ "type": "boolean"
}
}
},
@@ -389,18 +389,80 @@ def testMonitorMetricWithMinMax(self):
metricObj = repository.getMetric(conn,
metricId,
fields=[schema.metric.c.status,
- schema.metric.c.parameters])
+ schema.metric.c.parameters,
+ schema.metric.c.model_params])
self.assertIn(metricObj.status, (MetricStatus.CREATE_PENDING,
MetricStatus.ACTIVE))
self.assertEqual(json.loads(metricObj.parameters), modelSpec)
+ self._assertClassifierStatusInModelParams(metricObj.model_params,
+ classifierEnabled=False)
+
self._validateModelSpec(json.loads(metricObj.parameters))
g_log.info("Waiting for model to become active")
self.checkModelIsActive(metricId)
self.checkEncoderResolution(metricId, 0, 100)
+ def testMonitorMetricClassifierEnabled(self):
+ """ Test monitorMetric with request for enabled classifier in model
+ params """
+ metricName = "test-" + uuid.uuid1().hex
+
+ adapter = datasource_adapter_factory.createCustomDatasourceAdapter()
+
+ g_log.info("Creating htmengine custom metric; name=%s", metricName)
+ metricId = adapter.createMetric(metricName)
+ self.addCleanup(adapter.deleteMetricByName, metricName)
+
+ # Turn on monitoring
+ modelSpec = {
+ "datasource": "custom",
+
+ "metricSpec": {
+ "metric": metricName
+ },
+
+ "modelParams": {
+ "min": 0, # optional
+ "max": 100, # optional
+ "enableClassifier": True
+ }
+ }
+
+ adapter.monitorMetric(modelSpec)
+
+ with self.engine.connect() as conn:
+ metricObj = repository.getMetric(conn,
+ metricId,
+ fields=[schema.metric.c.status,
+ schema.metric.c.parameters,
+ schema.metric.c.model_params])
+ self.assertEqual(metricObj.status, MetricStatus.CREATE_PENDING)
+ self.assertEqual(json.loads(metricObj.parameters), modelSpec)
+
+ self._assertClassifierStatusInModelParams(metricObj.model_params,
+ classifierEnabled=True)
+
+ self._validateModelSpec(json.loads(metricObj.parameters))
+
+ g_log.info("Waiting for model to become active")
+ self.checkModelIsActive(metricId)
+ self.checkEncoderResolution(metricId, 0, 100)
+
+
+ def _assertClassifierStatusInModelParams(self, model_params,
+ classifierEnabled):
+ self.assertIsNotNone(model_params)
+ modelParams = json.loads(model_params)
+ self.assertIn("modelConfig", modelParams)
+ self.assertIn("modelParams", modelParams["modelConfig"])
+ self.assertIn("clEnable", modelParams["modelConfig"]["modelParams"])
+ self.assertEquals(modelParams["modelConfig"]["modelParams"]["clEnable"],
+ classifierEnabled)
+
+
@staticmethod
def _openTestDataFile(filename):
""" Opens specified test data file in the htmengine integration test data
@@ -749,10 +811,72 @@ def testActivateModel(self):
with self.engine.connect() as conn:
metricObj = repository.getMetric(conn,
metricId,
+ fields=[schema.metric.c.status,
+ schema.metric.c.model_params])
+ self.assertIn(metricObj.status, (MetricStatus.CREATE_PENDING,
+ MetricStatus.ACTIVE))
+
+ self._assertClassifierStatusInModelParams(metricObj.model_params,
+ classifierEnabled=False)
+
+ g_log.info("Waiting for model to become active")
+ self.checkModelIsActive(metricId)
+
+ g_log.info("Waiting at least one model result")
+ self.checkModelResultsSize(metricId, 1, atLeast=True)
+
+
+ def testActivateModelClassifierEnabled(self):
+ """ Test activateModel with classifier enabled in model spec. """
+ metricName = "test-" + uuid.uuid1().hex
+
+ adapter = datasource_adapter_factory.createCustomDatasourceAdapter()
+
+ g_log.info("Creating htmengine custom metric; name=%s", metricName)
+ metricId = adapter.createMetric(metricName)
+ self.addCleanup(adapter.deleteMetricByName, metricName)
+
+ # Turn on monitoring
+ modelSpec = {
+ "datasource": "custom",
+ "metricSpec": {
+ "metric": metricName
+ },
+ "modelParams": {
+ "enableClassifier": True
+ }
+ }
+
+ adapter.monitorMetric(modelSpec)
+
+ with self.engine.connect() as conn:
+ metricObj = repository.getMetric(conn,
+ metricId,
fields=[schema.metric.c.status])
+ self.assertEqual(metricObj.status, MetricStatus.PENDING_DATA)
+
+ # Add some data
+ data = [
+ (0, datetime.datetime.utcnow() - datetime.timedelta(minutes=5)),
+ (100, datetime.datetime.utcnow())
+ ]
+ with self.engine.connect() as conn:
+ repository.addMetricData(conn, metricId, data)
+
+ # Activate model
+ adapter.activateModel(metricId)
+
+ with self.engine.connect() as conn:
+ metricObj = repository.getMetric(conn,
+ metricId,
+ fields=[schema.metric.c.status,
+ schema.metric.c.model_params])
self.assertIn(metricObj.status, (MetricStatus.CREATE_PENDING,
MetricStatus.ACTIVE))
+ self._assertClassifierStatusInModelParams(metricObj.model_params,
+ classifierEnabled=True)
+
g_log.info("Waiting for model to become active")
self.checkModelIsActive(metricId)
@@ -1,6 +1,6 @@
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
-# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
+# Copyright (C) 2016, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
@@ -19,7 +19,7 @@
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
-"""adding multi_step_best_predictions column
+"""Adding multi_step_best_predictions column to metric_data table.
Revision ID: 315d6ad6c19f
Revises: 872a895b8e8
@@ -837,7 +837,7 @@ def testLoadFromFullAndSaveFull(
run=Mock(
side_effect=[
Mock(inferences=dict(anomalyScore=score,
- multiStepBestPredictions={score: score}))
+ multiStepBestPredictions={1: score}))
for score in anomalyScores
]
)
@@ -910,7 +910,7 @@ def testLoadFromFullAndSaveFull(
requestObjects = requests[0].objects
expectedResults = [
ModelInferenceResult(rowID=rowid, status=0, anomalyScore=score,
- multiStepBestPredictions={score: score})
+ multiStepBestPredictions={1: score})
for rowid, score in zip(
[obj.rowID for obj in requestObjects], anomalyScores)
]
@@ -935,9 +935,9 @@ def testLoadFromIncrementalAndSaveIncremental(
anomalyScore3 = 3.333333
anomalyScore4 = 4.444444
bestPredictions1 = {1: 1}
- bestPredictions2 = {2: 2}
- bestPredictions3 = {3: 3}
- bestPredictions4 = {4: 4}
+ bestPredictions2 = {1: 2}
+ bestPredictions3 = {1: 3}
+ bestPredictions4 = {1: 4}
modelInstanceMock = Mock(
run=Mock(
@@ -1063,7 +1063,7 @@ def testLoadFromIncrementalAndSaveFull(
run=Mock(
side_effect=[
Mock(inferences=dict(anomalyScore=score,
- multiStepBestPredictions={score: score}))
+ multiStepBestPredictions={1: score}))
for score in anomalyScores
]
)
@@ -1147,7 +1147,7 @@ def testLoadFromIncrementalAndSaveFull(
requestObjects = requests[0].objects
expectedResults = [
ModelInferenceResult(rowID=rowid, status=0, anomalyScore=score,
- multiStepBestPredictions={score: score})
+ multiStepBestPredictions={1: score})
for rowid, score in zip(
[obj.rowID for obj in requestObjects], anomalyScores[2:])
]
@@ -1178,6 +1178,7 @@ def testMultipleInputBatchesPerCheckpoint(
inferenceArgs = "b"
inputRecordSchema = [FieldMetaInfo("c1", "float", "")]
anomalyScore1 = 1.111111
+ bestPredictions1 = {1: 1}
dummyModelParams = dict(modelConfig=modelConfig,
inferenceArgs=inferenceArgs)
@@ -1190,7 +1191,8 @@ def testMultipleInputBatchesPerCheckpoint(
# Configure ModelFactory mock
modelInstanceMock = Mock(run=Mock(
- return_value=Mock(inferences=dict(anomalyScore=anomalyScore1))))
+ return_value=Mock(inferences=dict(anomalyScore=anomalyScore1,
+ bestPredictions=bestPredictions1))))
modelFactoryClassMock.create.return_value = modelInstanceMock
@@ -1261,6 +1263,7 @@ def testExactlyOnceProcessingOfInputBatches(
inferenceArgs = "b"
inputRecordSchema = [FieldMetaInfo("c1", "float", "")]
anomalyScore1 = 1.111111
+ bestPredictions1 = {1: 1}
dummyModelParams = dict(modelConfig=modelConfig,
inferenceArgs=inferenceArgs)
@@ -1282,7 +1285,8 @@ def testExactlyOnceProcessingOfInputBatches(
# Configure ModelFactory mock
modelInstanceMock = Mock(run=Mock(
- return_value=Mock(inferences=dict(anomalyScore=anomalyScore1))))
+ return_value=Mock(inferences=dict(anomalyScore=anomalyScore1,
+ bestPredictions=bestPredictions1))))
modelFactoryClassMock.create.return_value = modelInstanceMock
@@ -1,6 +1,6 @@
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
-# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
+# Copyright (C) 2016, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
@@ -19,7 +19,7 @@
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
-"""Adds multi-step-best-predictions column to metric data.
+"""Adds multi_step_best_predictions column to metric_data table.
Revision ID: 2695f59d78bd
Revises: a60d03066072

0 comments on commit d48b7db

Please sign in to comment.