Skip to content

Commit

Permalink
tests: Add full Necker cube test (#1743)
Browse files Browse the repository at this point in the history
  • Loading branch information
jvesely committed Aug 28, 2020
2 parents af81f0d + 999363a commit 9f82056
Showing 1 changed file with 38 additions and 43 deletions.
81 changes: 38 additions & 43 deletions tests/models/test_bi_percepts.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,24 @@
from itertools import product


expected_3_10 = [[ 205.67990124], [ 205.536034], [ 206.29612605],
[-204.87230198], [-204.98539771], [-205.35434273]]
expected_8_10 = [[-71427.62150144271], [-71428.44255569541],
[-71427.73782852193], [-71428.18340850921],
[-71428.10767225616], [-71428.22607075438],
[-71427.55903615047], [-71427.81981141337],
[67029.19595769834], [67028.98515147284],
[67029.00062228851], [67029.22270778783],
[67029.64637519913], [67028.31812638397],
[67028.98446253323], [67028.45363893337]]


@pytest.mark.model
@pytest.mark.benchmark(group="Simplified Necker Cube")
@pytest.mark.benchmark
@pytest.mark.parametrize("n_nodes,n_time_steps,expected", [
pytest.param(3, 10, expected_3_10, id="3-10"),
pytest.param(8, 10, expected_8_10, id="8-10"),
])
@pytest.mark.parametrize("mode", [
'Python',
pytest.param('LLVM', marks=[pytest.mark.llvm]),
Expand All @@ -24,12 +38,12 @@
pytest.param('PTXExec', marks=[pytest.mark.llvm, pytest.mark.cuda]),
pytest.param('PTXRun', marks=[pytest.mark.llvm, pytest.mark.cuda]),
])
def test_simplified_necker_cube(benchmark, mode):
def test_necker_cube(benchmark, mode, n_nodes, n_time_steps, expected):
# this code only works for N_PERCEPTS == 2
ALL_PERCEPTS = ['a', 'b']

# variables
n_nodes_per_percepts = 3
n_nodes_per_percepts = n_nodes
excit_level = 1
inhib_level = 1
node_dict = {percept: None for percept in ALL_PERCEPTS}
Expand Down Expand Up @@ -112,36 +126,28 @@ def get_node(percept, node_id):

# bp_comp.show_graph()

n_time_steps = 10
input_dict = {
node_: np.random.normal(size=(n_time_steps,))
for node_ in bp_comp.nodes
}

# run the model
res = bp_comp.run(input_dict, num_trials=10, bin_execute=mode)
np.testing.assert_allclose(
res,
[[205.67990124], [205.536034], [206.29612605], [-204.87230198], [-204.98539771], [-205.35434273]]
)
res = bp_comp.run(input_dict, num_trials=n_time_steps, bin_execute=mode)
np.testing.assert_allclose(res, expected)

# Test that order of CIM ports follows order of Nodes in self.nodes
assert 'a-0' in bp_comp.input_CIM.input_ports.names[0]
assert 'a-1' in bp_comp.input_CIM.input_ports.names[1]
assert 'a-2' in bp_comp.input_CIM.input_ports.names[2]
assert 'b-0' in bp_comp.input_CIM.input_ports.names[3]
assert 'b-1' in bp_comp.input_CIM.input_ports.names[4]
assert 'b-2' in bp_comp.input_CIM.input_ports.names[5]

assert 'a-0' in bp_comp.output_CIM.output_ports.names[0]
assert 'a-1' in bp_comp.output_CIM.output_ports.names[1]
assert 'a-2' in bp_comp.output_CIM.output_ports.names[2]
assert 'b-0' in bp_comp.output_CIM.output_ports.names[3]
assert 'b-1' in bp_comp.output_CIM.output_ports.names[4]
assert 'b-2' in bp_comp.output_CIM.output_ports.names[5]
for i in range(n_nodes):
a_name = "a-{}".format(i)
assert a_name in bp_comp.input_CIM.input_ports.names[i]
assert a_name in bp_comp.output_CIM.output_ports.names[i]
b_name = "b-{}".format(i)
assert b_name in bp_comp.input_CIM.input_ports.names[i + n_nodes]
assert b_name in bp_comp.output_CIM.output_ports.names[i + n_nodes]

if benchmark.enabled:
benchmark(bp_comp.run, input_dict, num_trials=10, bin_execute=mode)
benchmark.group = "Necker Cube {}-{}".format(n_nodes, n_time_steps)
benchmark(bp_comp.run, input_dict, num_trials=n_time_steps, bin_execute=mode)


@pytest.mark.model
@pytest.mark.benchmark(group="Necker Cube")
Expand All @@ -152,7 +158,7 @@ def get_node(percept, node_id):
pytest.param('PTXExec', marks=[pytest.mark.llvm, pytest.mark.cuda]),
pytest.param('PTXRun', marks=[pytest.mark.llvm, pytest.mark.cuda]),
])
def test_necker_cube(benchmark, mode):
def test_vectorized_necker_cube(benchmark, mode):

Build_N_Matrix = np.zeros((16,5))
Build_N_Matrix[0,:] = [0, 1, 3, 4, 8]
Expand Down Expand Up @@ -223,31 +229,20 @@ def test_necker_cube(benchmark, mode):
)

comp2.add_linear_processing_pathway(pathway = (node3, connect_3_4, node4, connect_4_3, node3))
# MODIFIED 4/4/20 NEW:
np.random.seed(12345)
# MODIFIED 4/4/20 END
input_dict = {node3: np.random.random((1,16)),
node4: np.random.random((1,16))
}

result = comp2.run(input_dict, num_trials=10, bin_execute=mode)
assert np.allclose(result,
# [[ 2636.29181172, -662.53579899, 2637.35386946, -620.15550833,
# -595.55319772, 2616.74310649, -442.74286574, 2588.4778162 ,
# 725.33941441, -2645.25148476, 570.96811513, -2616.80319979,
# -2596.82097419, 547.30466563, -2597.99430789, 501.50648114],
# [ -733.2213593 , 2638.81033464, -578.76439993, 2610.55912376,
# 2590.69244696, -555.19824432, 2591.63200098, -509.58072358,
# -2618.88711219, 682.65814776, -2620.18294962, 640.09719335,
# 615.39758884, -2599.45663784, 462.67291695, -2570.99427346]])
[[ 753.49687364, 380.1835271 , 526.71129889, 253.30439596,
335.33291717, 796.34470018, 504.94661527, 664.84397208,
-228.29889962, -699.72265243, -395.45414321, -568.29933106,
-837.38658858, -477.94765341, -612.70717468, -348.86306586],
[ 217.19651713, 708.59009834, 384.29837558, 577.37836065,
846.10421744, 466.68904807, 621.40583149, 337.60282732,
-750.45164969, -357.23030678, -523.68504698, -230.35280883,
-312.48416776, -793.679849 , -482.15125099, -661.96753723]])
[[ 2636.29181172, -662.53579899, 2637.35386946, -620.15550833,
-595.55319772, 2616.74310649, -442.74286574, 2588.4778162 ,
725.33941441, -2645.25148476, 570.96811513, -2616.80319979,
-2596.82097419, 547.30466563, -2597.99430789, 501.50648114],
[ -733.2213593 , 2638.81033464, -578.76439993, 2610.55912376,
2590.69244696, -555.19824432, 2591.63200098, -509.58072358,
-2618.88711219, 682.65814776, -2620.18294962, 640.09719335,
615.39758884, -2599.45663784, 462.67291695, -2570.99427346]])

if benchmark.enabled:
benchmark(comp2.run, input_dict, num_trials=10, bin_execute=mode)

0 comments on commit 9f82056

Please sign in to comment.