forked from PGM-Lab/InferPy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
1.py
80 lines (51 loc) · 1.66 KB
/
1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import numpy as np
import inferpy as inf
N=1000
x_train = np.concatenate([inf.Normal([0.0,0.0], scale=1.).sample(int(N/2)), inf.Normal([10.0,10.0], scale=1.).sample(int(N/2))])
##########
# required pacakges
import inferpy as inf
import tensorflow as tf
# number of components
k = 1
# size of the hidden layer in the NN
d0 = 100
# dimensionality of the data
dx = 2
# number of observations (dataset size)
N = 1000
@inf.probmodel
def nlpca(k, d0, dx, decoder):
with inf.datamodel():
z = inf.Normal(tf.ones([k])*0.5, 1., name="z") # shape = [N,k]
output = decoder(z,d0,dx)
x_loc = output[:,:dx]
x_scale = tf.nn.softmax(output[:,dx:])
x = inf.Normal(x_loc, x_scale, name="x") # shape = [N,d]
def decoder(z,d0,dx):
h0 = tf.layers.dense(z, d0, tf.nn.relu)
return tf.layers.dense(h0, 2 * dx)
# Q-model approximating P
@inf.probmodel
def qmodel(k):
with inf.datamodel():
qz_loc = inf.Parameter(tf.ones([k])*0.5, name="qz_loc")
qz_scale = tf.math.softplus(inf.Parameter(tf.ones([k]),name="qz_scale"))
qz = inf.Normal(qz_loc, qz_scale, name="z")
# create an instance of the model
m = nlpca(k,d0,dx, decoder)
# set the inference algorithm
VI = inf.inference.VI(qmodel(k), epochs=5000)
# learn the parameters
m.fit({"x": x_train}, VI)
#extract the hidden representation
hidden_encoding = m.posterior("z")
print(hidden_encoding.sample())
#63
def decoder_keras(z,d0,dx):
h0 = tf.keras.layers.Dense(d0, activation=tf.nn.relu, name="encoder_h0")
h1 = tf.keras.layers.Dense(2*dx, name="encoder_h1")
return h1(h0(z))
# create an instance of the model
m = nlpca(k,d0,dx, decoder_keras)
#80