Skip to content

Commit

Permalink
update examples
Browse files Browse the repository at this point in the history
  • Loading branch information
kojima-r committed Sep 12, 2019
1 parent f01c5cc commit 4c206f8
Show file tree
Hide file tree
Showing 9 changed files with 152 additions and 193 deletions.
2 changes: 1 addition & 1 deletion example_config/deepchem.json
@@ -1,5 +1,5 @@
{
"model.py": "example_model.model_deepchem",
"model.py": "example_model.model_deepchem:GCN",
"save_result_test": "./result/test.deepchem.csv",
"save_result_train": "./result/train.deepchem.csv",
"load_model": "./model/model.sample_deepchem.ckpt",
Expand Down
2 changes: 1 addition & 1 deletion example_config/multimodal.json
@@ -1,5 +1,5 @@
{
"model.py": "example_model.model_multimodal",
"model.py": "example_model.model_multimodal:GCN",
"save_result_test": "./result/test.multimodal.csv",
"save_result_train": "./result/train.multimodal.csv",
"load_model": "./model/model.sample_multimodal.ckpt",
Expand Down
4 changes: 2 additions & 2 deletions example_config/synth.json
@@ -1,5 +1,5 @@
{
"model.py": "example_model.model",
"model.py": "example_model.model:GCN",
"load_model": "./model/synth.last.ckpt",
"validation_data_rate": 0.2,
"epoch": 10,
Expand All @@ -14,7 +14,7 @@
"patience": 0,
"dataset": "example_jbl/synthetic.jbl",
"split_adj_flag": false,
"plot_path": "result",
"plot_path": "result/",
"make_plot": true,
"shuffle_data": true,
"save_result_test": "./result/synth_test.csv",
Expand Down
Binary file modified example_jbl/sample.jbl
Binary file not shown.
Binary file modified example_jbl/synthetic.jbl
Binary file not shown.
1 change: 0 additions & 1 deletion example_model/model.py
Expand Up @@ -22,7 +22,6 @@ def build_model(self,placeholders,info,config,batch_size):
dropout_rate=placeholders["dropout_rate"]

layer=features
input_dim=info.feature_dim
layer=kgcn.layers.GraphConv(50,adj_channel_num)(layer,adj=in_adjs)
layer=tf.sigmoid(layer)
layer=kgcn.layers.GraphConv(50,adj_channel_num)(layer,adj=in_adjs)
Expand Down
150 changes: 63 additions & 87 deletions example_model/model_deepchem.py
@@ -1,98 +1,74 @@
import tensorflow as tf
import numpy as np
import joblib
import layers
import kgcn.layers
from kgcn.default_model import DefaultModel
import tensorflow.contrib.keras as K

def build_placeholders(info,batch_size=4,adj_channel_num=1,embedding_dim=10):
placeholders = {
'adjs':[[tf.sparse_placeholder(tf.float32,name="adj_"+str(a)+"_"+str(b)) for a in range(adj_channel_num)] for b in range(batch_size)],
'nodes': tf.placeholder(tf.int32, shape=(batch_size,info.graph_node_num),name="node"),
'labels': tf.placeholder(tf.float32, shape=(batch_size,info.label_dim),name="label"),
'mask': tf.placeholder(tf.float32, shape=(batch_size,),name="mask"),
'dropout_rate': tf.placeholder(tf.float32, name="dropout_rate"),
}
if info.feature_enabled:
placeholders['features']=tf.placeholder(tf.float32, shape=(batch_size,info.graph_node_num,info.feature_dim),name="feature")
else:
placeholders['features']=None
return placeholders
class GCN(DefaultModel):
def build_placeholders(self,info,config,batch_size):
# input data types (placeholders) of this neural network
return self.get_placeholders(info,config,batch_size,
['adjs','nodes','labels','mask','dropout_rate',
'enabled_node_nums','is_train','features'])

def build_model(self,placeholders,info,config,batch_size):
adj_channel_num=info.adj_channel_num
in_adjs=placeholders["adjs"]
features=placeholders["features"]
in_nodes=placeholders["nodes"]
labels=placeholders["labels"]
mask=placeholders["mask"]
enabled_node_nums=placeholders["enabled_node_nums"]
is_train=placeholders["is_train"]
dropout_rate=placeholders["dropout_rate"]

def build_model(placeholders,info,batch_size=4,adj_channel_num=1,embedding_dim=10):
in_adjs=placeholders["adjs"]
features=placeholders["features"]
in_nodes=placeholders["nodes"]
labels=placeholders["labels"]
mask=placeholders["mask"]
dropout_rate=placeholders["dropout_rate"]
wd_b=None
wd_w=0.1
layer=features
layer=kgcn.layers.GraphConv(64,adj_channel_num)(layer,adj=in_adjs)
layer=tf.nn.relu(layer)
layer=kgcn.layers.GraphMaxPooling(adj_channel_num)(layer,adj=in_adjs)
layer=kgcn.layers.GraphBatchNormalization()(
layer,max_node_num=info.graph_node_num,
enabled_node_nums=enabled_node_nums)
layer=K.layers.Dropout(dropout_rate)(layer)

layer=features
input_dim=info.feature_dim
if features is None:
layer=emmbeding_layer("embeding",in_nodes,info.all_node_num,embedding_dim,init_params_flag=True,params=None)
input_dim=embedding_dim
# layer: batch_size x graph_node_num x dim
with tf.variable_scope("gcn_1") as scope:
output_dim=64
layer = layers.gcn_layer("graph_conv",layer,in_adjs,input_dim,output_dim,
adj_channel_num=adj_channel_num,node_num=info.graph_node_num,batch_size=batch_size)
layer = tf.nn.relu(layer)
input_dim=output_dim
with tf.variable_scope("pooling_1") as scope:
layer = layers.graph_max_pooling_layer(layer,in_adjs, input_dim,
adj_channel_num=adj_channel_num,node_num=info.graph_node_num,batch_size=batch_size)
with tf.variable_scope("bn_1") as scope:
layer=layers.graph_batch_normalization("bn",layer,input_dim,info.graph_node_num,init_params_flag=True,params=None)
with tf.variable_scope("do_1") as scope:
layer=layers.graph_dropout_layer(layer,info.graph_node_num,input_dim,dropout_rate)
layer=kgcn.layers.GraphConv(128,adj_channel_num)(layer,adj=in_adjs)
layer=tf.nn.relu(layer)
layer=kgcn.layers.GraphMaxPooling(adj_channel_num)(layer,adj=in_adjs)
layer=kgcn.layers.GraphBatchNormalization()(
layer,max_node_num=info.graph_node_num,
enabled_node_nums=enabled_node_nums)
layer=K.layers.Dropout(dropout_rate)(layer)

with tf.variable_scope("gcn_2") as scope:
output_dim=128
layer = layers.gcn_layer("graph_conv",layer,in_adjs,input_dim,output_dim,adj_channel_num=adj_channel_num,node_num=info.graph_node_num,batch_size=batch_size)
layer = tf.sigmoid(layer)
input_dim=output_dim
with tf.variable_scope("pooling_2") as scope:
layer = layers.graph_max_pooling_layer(layer,in_adjs, input_dim,
adj_channel_num=adj_channel_num,node_num=info.graph_node_num,batch_size=batch_size)
with tf.variable_scope("bn_2") as scope:
layer=layers.graph_batch_normalization("bn",layer,input_dim,info.graph_node_num,init_params_flag=True,params=None)
with tf.variable_scope("do_2") as scope:
layer=layers.graph_dropout_layer(layer,info.graph_node_num,input_dim,dropout_rate)
layer=kgcn.layers.GraphConv(128,adj_channel_num)(layer,adj=in_adjs)
layer=tf.nn.relu(layer)
layer=kgcn.layers.GraphMaxPooling(adj_channel_num)(layer,adj=in_adjs)
layer=kgcn.layers.GraphBatchNormalization()(
layer,max_node_num=info.graph_node_num,
enabled_node_nums=enabled_node_nums)
layer=K.layers.Dropout(dropout_rate)(layer)

with tf.variable_scope("gcn_3") as scope:
output_dim=128
layer = layers.gcn_layer("graph_conv",layer,in_adjs,input_dim,output_dim,adj_channel_num=adj_channel_num,node_num=info.graph_node_num,batch_size=batch_size)
layer = tf.sigmoid(layer)
input_dim=output_dim
with tf.variable_scope("pooling_3") as scope:
layer = layers.graph_max_pooling_layer(layer,in_adjs, input_dim,
adj_channel_num=adj_channel_num,node_num=info.graph_node_num,batch_size=batch_size)
with tf.variable_scope("bn_3") as scope:
layer=layers.graph_batch_normalization("bn",layer,input_dim,info.graph_node_num,init_params_flag=True,params=None)
with tf.variable_scope("do_3") as scope:
layer=layers.graph_dropout_layer(layer,info.graph_node_num,input_dim,dropout_rate)
layer=kgcn.layers.GraphConv(64,adj_channel_num)(layer,adj=in_adjs)
layer=tf.nn.relu(layer)
layer=kgcn.layers.GraphMaxPooling(adj_channel_num)(layer,adj=in_adjs)
layer=kgcn.layers.GraphBatchNormalization()(
layer,max_node_num=info.graph_node_num,
enabled_node_nums=enabled_node_nums)
layer=K.layers.Dropout(dropout_rate)(layer)

layer=kgcn.layers.GraphDense(64)(layer)
layer=tf.sigmoid(layer)

with tf.variable_scope("fc4") as scope:
output_dim=64
layer = layers.graph_fc_layer("fc",layer,input_dim, output_dim,info.graph_node_num, init_params_flag=True,params=None,wd_w=wd_w,wd_b=wd_b,activate=tf.sigmoid,with_bn=False)
input_dim=output_dim
with tf.variable_scope("gathering") as scope:
layer = layers.graph_gathering_layer(layer)
with tf.variable_scope("fc5") as scope:
output_dim=2
model = layers.fc_layer("fc3",layer,input_dim, output_dim, init_params_flag=True,params=None,wd_w=wd_w,wd_b=wd_b,activate=None,with_bn=False)
layer=kgcn.layers.GraphGather()(layer)
layer=K.layers.Dense(2)(layer)
prediction=tf.nn.softmax(layer,name="output")
###
# computing cost and metrics
cost=mask*tf.nn.softmax_cross_entropy_with_logits(labels=labels,logits=layer)
cost_opt=tf.reduce_mean(cost)

prediction=tf.nn.softmax(model)
# computing cost and metrics
cost=mask*tf.nn.softmax_cross_entropy_with_logits(labels=labels,logits=model)
cost_opt=tf.reduce_mean(cost)
metrics={}
cost_sum=tf.reduce_sum(cost)

metrics={}
cost_sum=tf.reduce_sum(cost)

correct_count=mask*tf.cast(tf.equal(tf.argmax(prediction,1), tf.argmax(labels,1)),tf.float32)
metrics["correct_count"]=tf.reduce_sum(correct_count)
return model,prediction,cost_opt,cost_sum,metrics
correct_count=mask*tf.cast(tf.equal(tf.argmax(prediction,1), tf.argmax(labels,1)),tf.float32)
metrics["correct_count"]=tf.reduce_sum(correct_count)
return layer,prediction,cost_opt,cost_sum,metrics

184 changes: 83 additions & 101 deletions example_model/model_multimodal.py
@@ -1,112 +1,94 @@
from kgcn.default_model import DefaultModel
import tensorflow as tf
import numpy as np
import joblib
import layers
from keras.layers import *
import kgcn.layers
import tensorflow.contrib.keras as K

def build_placeholders(info,config,batch_size=4):
adj_channel_num=info.adj_channel_num
placeholders = {
'adjs':[[tf.sparse_placeholder(tf.float32,name="adj_"+str(a)+"_"+str(b)) for a in range(adj_channel_num)] for b in range(batch_size)],
'nodes': tf.placeholder(tf.int32, shape=(batch_size,info.graph_node_num),name="node"),
'labels': tf.placeholder(tf.float32, shape=(batch_size,info.label_dim),name="label"),
'mask': tf.placeholder(tf.float32, shape=(batch_size,),name="mask"),
'dropout_rate': tf.placeholder(tf.float32, name="dropout_rate"),
'sequences': tf.placeholder(tf.int32,shape=(batch_size,info.sequence_max_length),name="sequences"),
'sequences_len': tf.placeholder(tf.int32,shape=(batch_size,2), name="sequences_len"),
'is_train': tf.placeholder(tf.bool, name="is_train"),
'enabled_node_nums': tf.placeholder(tf.int32, shape=(batch_size,), name="enabled_node_nums"),
}
if info.feature_enabled:
placeholders['features']=tf.placeholder(tf.float32, shape=(batch_size,info.graph_node_num,info.feature_dim),name="feature")
else:
placeholders['features']=None
return placeholders
class GCN(DefaultModel):
def build_placeholders(self,info,config,batch_size):
# input data types (placeholders) of this neural network
return self.get_placeholders(info,config,batch_size,
['adjs','nodes','labels','mask','dropout_rate',
'enabled_node_nums','is_train','features',
'sequences','sequences_len'])

def build_model(self,placeholders,info,config,batch_size):
adj_channel_num=info.adj_channel_num
in_adjs=placeholders["adjs"]
features=placeholders["features"]
in_nodes=placeholders["nodes"]
labels=placeholders["labels"]
mask=placeholders["mask"]
enabled_node_nums=placeholders["enabled_node_nums"]
is_train=placeholders["is_train"]
dropout_rate=placeholders["dropout_rate"]
sequences=placeholders["sequences"]
sequences_len=placeholders["sequences_len"]
###
### Graph part
###
with tf.variable_scope("seq_nn") as scope_part:
layer=features
input_dim=info.feature_dim
# layer: batch_size x graph_node_num x dim
layer=kgcn.layers.GraphConv(50,adj_channel_num)(layer,adj=in_adjs)
layer=tf.sigmoid(layer)
layer=kgcn.layers.GraphConv(50,adj_channel_num)(layer,adj=in_adjs)
layer=tf.sigmoid(layer)
layer=kgcn.layers.GraphConv(50,adj_channel_num)(layer,adj=in_adjs)
layer=kgcn.layers.GraphMaxPooling(adj_channel_num)(layer,adj=in_adjs)
layer=kgcn.layers.GraphBatchNormalization()(layer,
max_node_num=info.graph_node_num,enabled_node_nums=enabled_node_nums)
layer=tf.sigmoid(layer)
layer=K.layers.Dropout(dropout_rate)(layer)
layer=kgcn.layers.GraphDense(50)(layer)
layer=tf.sigmoid(layer)
layer=kgcn.layers.GraphGather()(layer)
graph_output_layer=layer
graph_output_layer_dim=50

def build_model(placeholders,info,config,batch_size=4):
adj_channel_num=info.adj_channel_num
embedding_dim=config["embedding_dim"]
in_adjs=placeholders["adjs"]
features=placeholders["features"]
sequences=placeholders["sequences"]
sequences_len=placeholders["sequences_len"]
in_nodes=placeholders["nodes"]
labels=placeholders["labels"]
mask=placeholders["mask"]
dropout_rate=placeholders["dropout_rate"]
is_train=placeholders["is_train"]
enabled_node_nums=placeholders["enabled_node_nums"]
###
### Sequence part
###

###
### Graph part
###
with tf.variable_scope("seq_nn") as scope_part:
layer=features
input_dim=info.feature_dim
if features is None:
layer=K.layers.Embedding(info.all_node_num,embedding_dim)(in_nodes)
input_dim=embedding_dim
# layer: batch_size x graph_node_num x dim
layer=layers.GraphConv(50,adj_channel_num)(layer,adj=in_adjs)
layer=tf.sigmoid(layer)
layer=layers.GraphConv(50,adj_channel_num)(layer,adj=in_adjs)
layer=tf.sigmoid(layer)
layer=layers.GraphConv(50,adj_channel_num)(layer,adj=in_adjs)
layer=layers.GraphMaxPooling(adj_channel_num)(layer,adj=in_adjs)
layer=layers.GraphBatchNormalization()(layer,
max_node_num=info.graph_node_num,enabled_node_nums=enabled_node_nums)
layer=tf.sigmoid(layer)
layer=K.layers.Dropout(dropout_rate)(layer)
layer=layers.GraphDense(50)(layer)
layer=tf.sigmoid(layer)
layer=layers.GraphGather()(layer)
graph_output_layer=layer
graph_output_layer_dim=50
with tf.variable_scope("seq_nn") as scope_part:
# Embedding
embedding_dim=10
layer=K.layers.Embedding(info.sequence_symbol_num,embedding_dim)(sequences)
# CNN + Pooling
stride = 4
layer=K.layers.Conv1D(50,stride,padding="same", activation='relu')(layer)
layer=K.layers.MaxPooling1D(stride)(layer)
# LSTM 1
output_dim=32
layer=K.layers.LSTM(output_dim,return_sequences=True ,go_backwards=True)(layer)
# LSTM 2
layer=K.layers.LSTM(output_dim,return_sequences=False,go_backwards=True)(layer)
#layer = tf.squeeze(layer)
seq_output_layer=layer
seq_output_layer_dim=layer.shape[1]
###
### Shared part
###

###
### Sequence part
###
# 32dim (Graph part)+ 32 dim (Sequence part)
layer=tf.concat([seq_output_layer,graph_output_layer],axis=1)
input_dim=seq_output_layer_dim+graph_output_layer_dim
with tf.variable_scope("shared_nn") as scope_part:
layer=K.layers.Dense(52)(layer)
layer=K.layers.BatchNormalization()(layer)
layer=tf.nn.relu(layer)

with tf.variable_scope("seq_nn") as scope_part:
# Embedding
embedding_dim=10
layer=K.layers.Embedding(info.sequence_symbol_num,embedding_dim)(sequences)
# CNN + Pooling
stride = 4
layer=K.layers.Conv1D(50,stride,padding="same", activation='relu')(layer)
layer=K.layers.MaxPooling1D(stride)(layer)
# LSTM 1
output_dim=32
layer=K.layers.LSTM(output_dim,return_sequences=True ,go_backwards=True)(layer)
# LSTM 2
layer=K.layers.LSTM(output_dim,return_sequences=False,go_backwards=True)(layer)
#layer = tf.squeeze(layer)
seq_output_layer=layer
seq_output_layer_dim=layer.shape[1]
###
### Shared part
###
layer=K.layers.Dense(info.label_dim)(layer)

# 32dim (Graph part)+ 32 dim (Sequence part)
layer=tf.concat([seq_output_layer,graph_output_layer],axis=1)
input_dim=seq_output_layer_dim+graph_output_layer_dim
with tf.variable_scope("shared_nn") as scope_part:
layer=K.layers.Dense(52)(layer)
layer=K.layers.BatchNormalization()(layer)
layer=tf.nn.relu(layer)
prediction=tf.nn.softmax(layer)
# computing cost and metrics
cost=mask*tf.nn.softmax_cross_entropy_with_logits(labels=labels,logits=layer)
cost_opt=tf.reduce_mean(cost)

layer=K.layers.Dense(info.label_dim)(layer)
metrics={}
cost_sum=tf.reduce_sum(cost)

prediction=tf.nn.softmax(layer)
# computing cost and metrics
cost=mask*tf.nn.softmax_cross_entropy_with_logits(labels=labels,logits=layer)
cost_opt=tf.reduce_mean(cost)

metrics={}
cost_sum=tf.reduce_sum(cost)

correct_count=mask*tf.cast(tf.equal(tf.argmax(prediction,1), tf.argmax(labels,1)),tf.float32)
metrics["correct_count"]=tf.reduce_sum(correct_count)
return layer,prediction,cost_opt,cost_sum,metrics
correct_count=mask*tf.cast(tf.equal(tf.argmax(prediction,1), tf.argmax(labels,1)),tf.float32)
metrics["correct_count"]=tf.reduce_sum(correct_count)
return layer,prediction,cost_opt,cost_sum,metrics

0 comments on commit 4c206f8

Please sign in to comment.