Skip to content

Commit

Permalink
Merge pull request #57 from appukuttan-shailesh/test_m2m
Browse files Browse the repository at this point in the history
Test m2m
  • Loading branch information
rgerkin committed Nov 3, 2017
2 parents 4f7bbe2 + aba39a1 commit 20c7b07
Showing 1 changed file with 88 additions and 0 deletions.
88 changes: 88 additions & 0 deletions sciunit/unit_test/core_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,94 @@ def test_error_types(self):
InvalidScoreError()
BadParameterValueError('x',3)

def test_testm2m_with_observation(self):
import sciunit
from sciunit.scores import FloatScore
from sciunit.capabilities import ProducesNumber
from sciunit.models import ConstModel

class NumberTest_M2M(sciunit.TestM2M):
"""Dummy Test"""
score_type = FloatScore
description = ("Tests the parameter 'value' between two models")

def __init__(self, observation=None, name="ValueTest-M2M"):
sciunit.TestM2M.__init__(self,observation,name)
self.required_capabilities += (ProducesNumber,)

def generate_prediction(self, model, verbose=False):
"""Implementation of sciunit.Test.generate_prediction."""
prediction = model.produce_number()
return prediction

def compute_score(self, prediction1, prediction2):
"""Implementation of sciunit.Test.score_prediction."""
score = sciunit.scores.FloatScore(prediction1 - prediction2)
score.description = "Difference between model predictions"
return score

myModel1 = ConstModel(100.0, "Model1")
myModel2 = ConstModel(110.0, "Model2")
myTest = NumberTest_M2M(observation=95.0)
myScore = myTest.judge([myModel1, myModel2])

# Test model vs observation
self.assertEqual(myScore[myTest][myModel1], -5.0)
self.assertEqual(myScore[myModel1][myTest], 5.0)
self.assertEqual(myScore["observation"][myModel2], -15.0)
self.assertEqual(myScore[myModel2]["observation"], 15.0)

# Test model vs model
self.assertEqual(myScore[myModel1][myModel2], -10.0)
self.assertEqual(myScore[myModel2][myModel1], 10.0)

def test_testm2m_without_observation(self):
import sciunit
from sciunit.scores import FloatScore
from sciunit.capabilities import ProducesNumber
from sciunit.models import ConstModel

class NumberTest_M2M(sciunit.TestM2M):
"""Dummy Test"""
score_type = FloatScore
description = ("Tests the parameter 'value' between two models")

def __init__(self, observation=None, name="ValueTest-M2M"):
sciunit.TestM2M.__init__(self,observation,name)
self.required_capabilities += (ProducesNumber,)

def generate_prediction(self, model, verbose=False):
"""Implementation of sciunit.Test.generate_prediction."""
prediction = model.produce_number()
return prediction

def compute_score(self, prediction1, prediction2):
"""Implementation of sciunit.Test.score_prediction."""
score = sciunit.scores.FloatScore(prediction1 - prediction2)
score.description = "Difference between model predictions"
return score

myModel1 = ConstModel(100.0, "Model1")
myModel2 = ConstModel(110.0, "Model2")
myTest = NumberTest_M2M(observation=95.0)
myScore = myTest.judge([myModel1, myModel2])

# Test model vs observation; different ways of specifying individual scores
self.assertEqual(myScore[myTest][myModel1], -5.0)
self.assertEqual(myScore[myModel1][myTest], 5.0)
self.assertEqual(myScore["observation"][myModel2], -15.0)
self.assertEqual(myScore[myModel2]["observation"], 15.0)
self.assertEqual(myScore[myTest][myTest], 0.0)
self.assertEqual(myScore["observation"]["observation"], 0.0)

# Test model vs model; different ways of specifying individual scores
self.assertEqual(myScore[myModel1][myModel2], -10.0)
self.assertEqual(myScore[myModel2][myModel1], 10.0)
self.assertEqual(myScore["Model1"][myModel2], -10.0)
self.assertEqual(myScore["Model2"][myModel1], 10.0)
self.assertEqual(myScore[myModel1][myModel1], 0.0)
self.assertEqual(myScore["Model2"]["Model2"], 0.0)


class CapabilitiesTestCase(unittest.TestCase):
"""Unit tests for sciunit Capability classes"""
Expand Down

0 comments on commit 20c7b07

Please sign in to comment.