-
Notifications
You must be signed in to change notification settings - Fork 0
/
sandbox.py
72 lines (60 loc) · 2.4 KB
/
sandbox.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves import zip # pylint: disable=redefined-builtin
import tensorflow as tf
sess = tf.InteractiveSession()
import tensorflow_fold.public.blocks as td
print("[Sandbox] start. ==========")
# Map and Reduce
print("Map Reduce Processing sample. ==========")
print(td.Map(td.Scalar()))
print((td.Map(td.Scalar()) >> td.Reduce(td.Function(tf.multiply))).eval(range(1,10)))
# simple fc network
def reduce_net_block():
net_block = td.Concat() >> td.FC(20) >> td.FC(1, activation=None) >> td.Function(lambda xs: tf.squeeze(xs, axis=1))
return td.Map(td.Scalar()) >> td.Reduce(net_block)
# generate ramdom example data, result
def random_example(fn):
length = random.randrange(1, 10)
data = [random.uniform(0,1) for _ in range(length)]
result = fn(data)
return data, result
# indeterminant data length
print("Indeterminant data sample. function() is sum ==========")
print(random_example(sum))
print(random_example(sum))
print(random_example(sum))
print(random_example(sum))
# another function
print("Indeterminant data sample. function() is min ==========")
print(random_example(min))
print(random_example(min))
print(random_example(min))
print(random_example(min))
# train using simple fc network with indeterminant data
print("Training sum_block using simple fc network with indeterminant data. ==========")
def train(fn, batch_size=100):
net_block = reduce_net_block()
compiler = td.Compiler.create((net_block, td.Scalar()))
# compiler have 2 tds. output_tensor for each tds.
y, y_ = compiler.output_tensors
loss = tf.nn.l2_loss(y - y_)
train = tf.train.AdamOptimizer().minimize(loss)
sess.run(tf.global_variables_initializer())
validation_fd = compiler.build_feed_dict(random_example(fn) for _ in range(1000))
for i in range(2000):
sess.run(train, compiler.build_feed_dict(random_example(fn) for _ in range(batch_size)))
if i % 100 == 0:
print("step %d: loss %f" % (i, sess.run(loss, validation_fd)))
return net_block
# execute train
sum_block = train(sum)
# execute evaluation
print("evaluate trained sum_block. ==========")
print(sum_block.eval([1, 1]))
print(sum_block.eval([1, 8]))
print(sum_block.eval([1, 1000]))
print("[Sandbox] finish. ==========")