Skip to content

Commit

Permalink
Standartized model names
Browse files Browse the repository at this point in the history
  • Loading branch information
Yauheni Selivonchyk authored and Yauheni Selivonchyk committed Jan 27, 2017
1 parent 39e3c9f commit 286fd14
Show file tree
Hide file tree
Showing 7 changed files with 71 additions and 17 deletions.
49 changes: 49 additions & 0 deletions ConvModel.py
@@ -0,0 +1,49 @@
"""Doom AE with dropout. """

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import prettytensor as pt
import DropoutModel


FLAGS = tf.app.flags.FLAGS


class ConvModel(DropoutModel.DropoutModel):
def __init__(self):
super(ConvModel, self).__init__()
self.model_id = 'conv'

def encoder(self, input_tensor):
print('Convolutional encoder')
template = (pt.wrap(input_tensor).
conv2d(5, 32, stride=2).
conv2d(5, 64, stride=2).
conv2d(5, 128, edges='VALID')
.dropout(FLAGS.dropout).flatten()
.fully_connected(self.layer_narrow))
return template

# def decoder(self, input):
# return (pt.wrap(input).
# reshape([FLAGS.batch_size, 1, 1, FLAGS.hidden_size]).
# deconv2d(3, 128, edges='VALID').
# deconv2d(5, 64, edges='VALID').
# deconv2d(5, 32, stride=2).
# deconv2d(5, 1, stride=2, activation_fn=tf.nn.sigmoid).
# flatten()).tensor

def get_meta(self, meta=None):
meta = super(ConvModel, self).get_meta()
return meta

# def load_meta(self, save_path):
# meta = super(ConvModel, self).load_meta()

if __name__ == '__main__':
model = ConvModel()
model.set_layer_sizes([500, 12, 500])
model.train(100)
6 changes: 3 additions & 3 deletions DCIGN_model.py → DCIGNModel.py
Expand Up @@ -18,14 +18,14 @@
import deconv
from tensorflow.python.ops import gradients
from prettytensor.tutorial import data_utils
import IGN_model
import IGNModel

FLAGS = tf.app.flags.FLAGS

DEV = False


class DCIGN_model(IGN_model.IGN_model):
class DCIGNModel(IGNModel.IGNModel):
model_id = 'dcign'

def _build_encoder(self):
Expand Down Expand Up @@ -104,7 +104,7 @@ def parse_params():
FLAGS.save_encodings_every = 2


model = DCIGN_model()
model = DCIGNModel()
args = dict([arg.split('=', maxsplit=1) for arg in sys.argv[1:]])
if len(args) == 0:
global DEV
Expand Down
18 changes: 11 additions & 7 deletions FF_model.py → FullyConnectedModel.py
Expand Up @@ -28,7 +28,7 @@ def _get_stats_template():
}


class FF_model(Model.Model):
class FullyConnected(Model.Model):
model_id = 'do'
decoder_scope = 'dec'
encoder_scope = 'enc'
Expand Down Expand Up @@ -71,12 +71,12 @@ def get_layer_info(self):
return self.layers

def get_meta(self, meta=None):
meta = super(FF_model, self).get_meta(meta=meta)
meta = super(FullyConnected, self).get_meta(meta=meta)
# meta['seq'] = FLAGS.stride
return meta

def load_meta(self, save_path):
meta = super(FF_model, self).load_meta(save_path)
meta = super(FullyConnected, self).load_meta(save_path)
self._weight_init = meta['init']
self._optimizer = tf.train.AdadeltaOptimizer \
if 'Adam' in meta['opt'] \
Expand Down Expand Up @@ -111,9 +111,6 @@ def _build_encoder(self):
for i in range(self.layer_narrow + 1):
size, desc = self.layers[i], 'enc_hidden_%d' % i
self._encode = self._encode.fully_connected(size, name=desc)
if i == self.layer_narrow-1 or i == self.layer_narrow-2:
ut.print_info('Dropout. layer:%d, layer_size:%d, DO_value:%f' % (i, self.layers[i], 1.0-FLAGS.dropout))
self._encode = self._encode.dropout(1.0-FLAGS.dropout)

def _build_decoder(self, weight_init=tf.truncated_normal):
narrow, layers = self.layers[self.layer_narrow], self.layers[self.layer_narrow+1:]
Expand All @@ -125,6 +122,9 @@ def _build_decoder(self, weight_init=tf.truncated_normal):
start = self._decode if i != 0 else self._encode
self._decode = start.fully_connected(size, name='enc_hidden_%d' % i)

self._decode = self._decode.dropout(1.0 - FLAGS.dropout)
ut.print_info('Dropout applied to the last layer of the network: %f' % (1. - FLAGS.dropout))

self._decode = (self._decode
.fully_connected(np.prod(self._image_shape), init=weight_init, name='output')
.reshape(self._batch_shape))
Expand All @@ -139,6 +139,7 @@ def fetch_datasets(self, activation_func_bounds):
self.dataset = inp.read_ds_zip(FLAGS.input_path)
if DEV:
self.dataset = self.dataset[:FLAGS.batch_size*5]
print('Dataset cropped')

shape = list(self.dataset.shape)
FLAGS.epoch_size = int(shape[0] / FLAGS.batch_size)
Expand All @@ -150,6 +151,8 @@ def fetch_datasets(self, activation_func_bounds):
self._image_shape = list(self.dataset.shape)[1:]

self.test_set = inp.read_ds_zip(FLAGS.test_path)
if DEV:
self.test_set = self.test_set[:FLAGS.batch_size*2]
test_max = int(FLAGS.test_max) if FLAGS.test_max >= 1 else int(FLAGS.test_max*len(self.test_set))
self.test_set = self.test_set[0:test_max]
self.test_set = inp.rescale_ds(self.test_set, self._activation.min, self._activation.max)
Expand Down Expand Up @@ -229,8 +232,9 @@ def _concatenate(x, y, take=None):
if __name__ == '__main__':
# FLAGS.load_from_checkpoint = './tmp/doom_bs__act|sigmoid__bs|20__h|500|5|500__init|na__inp|cbd4__lr|0.0004__opt|AO'
import sys
print(tf.__version__)

model = FF_model()
model = FullyConnected()
args = dict([arg.split('=', maxsplit=1) for arg in sys.argv[1:]])
print(args)
if len(args) <= 1:
Expand Down
10 changes: 5 additions & 5 deletions IGN_model.py → IGNModel.py
Expand Up @@ -48,7 +48,7 @@ def _declamp_grad(vae_grad, reco_grad, filter):
return res


class IGN_model(m.Model):
class IGNModel(m.Model):
model_id = 'ign'
decoder_scope = 'dec'
encoder_scope = 'enc'
Expand Down Expand Up @@ -90,7 +90,7 @@ def __init__(self,
weight_init=None,
activation=act.sigmoid,
optimizer=tf.train.AdamOptimizer):
super(IGN_model, self).__init__()
super(IGNModel, self).__init__()
FLAGS.batch_size = FLAGS.sequence_length
self._weight_init = weight_init
self._activation = activation
Expand All @@ -102,12 +102,12 @@ def get_layer_info(self):
return [self.layer_encoder, self.layer_narrow, self.layer_decoder]

def get_meta(self, meta=None):
meta = super(IGN_model, self).get_meta(meta=meta)
meta = super(IGNModel, self).get_meta(meta=meta)
meta['div'] = FLAGS.gradient_proportion
return meta

def load_meta(self, save_path):
meta = super(IGN_model, self).load_meta(save_path)
meta = super(IGNModel, self).load_meta(save_path)
self._weight_init = meta['init']
self._optimizer = tf.train.AdadeltaOptimizer \
if 'Adam' in meta['opt'] \
Expand Down Expand Up @@ -272,7 +272,7 @@ def train(self, epochs_to_train=5):
epochs = 300
import sys

model = IGN_model()
model = IGNModel()
args = dict([arg.split('=', maxsplit=1) for arg in sys.argv[1:]])
if len(args) == 0:
global DEV
Expand Down
2 changes: 1 addition & 1 deletion Model.py
Expand Up @@ -27,7 +27,7 @@

tf.app.flags.DEFINE_integer('max_epochs', 20, 'Train for at most this number of epochs')
tf.app.flags.DEFINE_integer('epoch_size', 100, 'Number of batches per epoch')
tf.app.flags.DEFINE_integer('save_every', 100, 'Save model state every INT epochs')
tf.app.flags.DEFINE_integer('save_every', 1000, 'Save model state every INT epochs')
tf.app.flags.DEFINE_integer('save_encodings_every', 50, 'Save encoding and visualizations every')
tf.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ')

Expand Down
1 change: 1 addition & 0 deletions input.py
Expand Up @@ -41,6 +41,7 @@ def get_action_data(folder):
res = [x[3][:2] for x in action_data]
return np.abs(np.asarray(res))


def read_ds_zip(path):
dep, img = {}, {}
tar = tarfile.open(path, "r:gz")
Expand Down
2 changes: 1 addition & 1 deletion visualization.py
Expand Up @@ -207,7 +207,7 @@ def visualize_cross_section(embeddings, fig=None):
_plot_single_cross_section(embeddings, [i, j], subplot)

if features >= 3:
embeddings = embeddings[:1000]
# embeddings = embeddings[:1000]
pos = (size+1)*size - size + 1
subplot = plt.subplot(size+1, size, pos)
_plot_single_cross_section(embeddings, [0, 1], subplot)
Expand Down

0 comments on commit 286fd14

Please sign in to comment.