-
Notifications
You must be signed in to change notification settings - Fork 58
/
Copy pathdeep_model.py
69 lines (58 loc) · 2.99 KB
/
deep_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
#!/usr/bin/env python
import sys
import os
import tensorflow as tf
class Model:
def __init__(self, embedding_size, sparse_field, hidden_layer):
self.embedding_size = embedding_size
self.sparse_field =[]
if sparse_field != '':
for i in sparse_field.split(','):
self.sparse_field.append(int(i))
self.hidden_layer = []
for i in hidden_layer.split(','):
self.hidden_layer.append(int(i))
# sparse embedding and concat all field embedding
def concat(self, fields, sparse_ids, sparse_vals):
emb = []
for i, field_id in enumerate(fields):
mapping_ints = tf.constant([0])
table = tf.contrib.lookup.index_table_from_tensor(mapping=mapping_ints, num_oov_buckets=100000, dtype=tf.int64)
sparse_id_in_this_field = table.lookup(sparse_ids[i])
with tf.variable_scope("emb_"+str(field_id)):
embedding_variable = tf.get_variable(name="emb_"+str(field_id), initializer=tf.truncated_normal([100002, self.embedding_size], stddev=0.1))
embedding = tf.nn.embedding_lookup_sparse(embedding_variable, sparse_id_in_this_field, sparse_vals[i], combiner="sum")
emb.append(embedding)
self.embedding.append(embedding_variable)
return tf.concat(emb, 1, name='concat_embedding')
def forward(self, sparse_id, sparse_val):
'''
forward graph
'''
with tf.variable_scope("forward"):
self.embedding = []
self.hiddenW = []
self.hiddenB = []
# sparse field embedding
net = self.concat(self.sparse_field, sparse_id, sparse_val)
#hidden layers
for i, hidden_size in enumerate(self.hidden_layer):
#dim = net.get_shape().as_list()[1]
if i == 0:
dim = self.embedding_size * len(self.sparse_field)
else:
dim = self.hidden_layer[i-1]
weight = tf.get_variable(initializer=tf.truncated_normal([dim, hidden_size], stddev=0.1), name='fully_weight_'+str(i))
bias = tf.get_variable(initializer=tf.truncated_normal([hidden_size], stddev=0.1), name='fully_bias_'+str(i))
self.hiddenW.append(weight)
self.hiddenB.append(bias)
net = tf.nn.relu(tf.matmul(net, weight) + bias, name='fully_'+str(i))
#dim = net.get_shape().as_list()[1]
dim = self.hidden_layer[-1]
self.weight = tf.get_variable(initializer=tf.truncated_normal([dim, 2], stddev=0.1), name='weight_out')
self.bias = tf.get_variable(initializer=tf.truncated_normal([2], stddev=0.1), name='bias_out')
with tf.variable_scope("logit"):
logits = tf.matmul(net, self.weight) + self.bias
# add regularization
all_parameter = [self.weight, self.bias] + self.hiddenW + self.hiddenB + self.embedding
return logits, all_parameter