forked from PGM-Lab/InferPy
/
test_probmodel.py
157 lines (128 loc) · 4.02 KB
/
test_probmodel.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import numpy as np
import pytest
import inferpy as inf
from inferpy import util
from tests import no_raised_exc
def test_build_model():
sample_size = 100
batch_shape = (2, 3)
@inf.probmodel
def model():
p = inf.Parameter(np.zeros(batch_shape, dtype=np.float32), name='p')
with inf.datamodel():
x = inf.Normal(p, 1., name='x')
inf.Normal(x, 1., name='y')
m = model()
# builder is a function
assert callable(m.builder)
# vars is an OrderedDict with the random variables
assert 'x' in m.vars
assert 'y' in m.vars
# params is an OrderedDict with the parameters
assert 'p' in m.params
# if fit for datamodel not used, then the shape of random vars is (1, batch_shape)
assert tuple(m.vars['x'].shape.as_list()) == (1, ) + batch_shape
assert tuple(m.vars['y'].shape.as_list()) == (1, ) + batch_shape
# if expanded datamodel, then the shape of random vars is (sample_shape, batch_shape)
expanded_vars, _ = m.expand_model(sample_size)
assert tuple(expanded_vars['x'].shape.as_list()) == (sample_size, ) + batch_shape
assert tuple(expanded_vars['y'].shape.as_list()) == (sample_size, ) + batch_shape
# assert variables and parameters are in graph
assert 'x' in m.graph
assert 'y' in m.graph
assert 'p' in m.graph
@pytest.mark.parametrize("data, expected_flow, expected_result", [
# empty dict
(
dict(),
no_raised_exc(),
1
),
# one field which does not exist
(
dict(othername=np.ones(100)),
no_raised_exc(),
1
),
# one variable data size
(
dict(x=np.ones(100)),
no_raised_exc(),
100
),
# two variable data with the same size
(
dict(x=np.ones(100), y=np.ones(100)),
no_raised_exc(),
100
),
# two variable data with different size
(
dict(x=np.ones(100), y=np.ones(150)),
pytest.raises(ValueError),
None
),
])
def test_probmodel_get_plate_size(data, expected_flow, expected_result):
@inf.probmodel
def model():
p = inf.Parameter(0., name='p')
with inf.datamodel():
x = inf.Normal(p, 1., name='x')
inf.Normal(x, 1., name='y')
with expected_flow:
m = model()
plate_size = util.iterables.get_plate_size(m.vars, data)
assert expected_result == plate_size
def test_sample():
@inf.probmodel
def model():
p = inf.Parameter(0., name='p')
with inf.datamodel():
x = inf.Normal(p, 1., name='x')
inf.Normal(x, 1., name='y')
N = 100
m = model()
sample_dict = m.prior().sample(N)
varnames = list(sample_dict.keys())
assert len(varnames) == 2
assert 'x' in varnames
assert 'y' in varnames
assert len(sample_dict['x']) == N
assert len(sample_dict['y']) == N
def test_sample_intercept():
@inf.probmodel
def model():
p = inf.Parameter(0., name='p')
with inf.datamodel():
x = inf.Normal(p, 1., name='x')
inf.Normal(x, 1., name='y')
N = 10
data_y = 1.0
m = model()
sample_dict = m.prior(data={'y': data_y}).sample(N)
varnames = list(sample_dict.keys())
assert len(varnames) == 2
assert 'x' in varnames
assert 'y' in varnames
assert len(sample_dict['x']) == N
assert len(sample_dict['y']) == N
def test_log_prob():
@inf.probmodel
def model():
p = inf.Parameter(0., name='p')
with inf.datamodel():
x = inf.Normal(p, 1., name='x')
inf.Normal(x, 1., name='y')
m = model()
data = m.prior(['x', 'y']).sample()
print(data)
logprob_dict = m.prior(['x', 'y']).log_prob()
varnames = list(logprob_dict.keys())
assert len(varnames) == 2
assert 'x' in varnames
assert 'y' in varnames
assert logprob_dict['x'] <= 0.0
assert logprob_dict['y'] <= 0.0
# assert that the result of sum_log_prob is a single float32 number
assert isinstance(m.prior(data=data).sum_log_prob(), np.float32)