Skip to content

Commit

Permalink
Remove depedencies to Kaffe + fix test bug
Browse files Browse the repository at this point in the history
  • Loading branch information
Alexandre Boulch committed Mar 14, 2018
1 parent aa0fe5a commit 438885f
Show file tree
Hide file tree
Showing 6 changed files with 83 additions and 206 deletions.
28 changes: 0 additions & 28 deletions python/models/VGG_ILSVRC_16_layers.py

This file was deleted.

87 changes: 66 additions & 21 deletions python/models/tensorflow_unet.py
@@ -1,21 +1,20 @@
import numpy as np
import tensorflow as tf
from .VGG_ILSVRC_16_layers import VGG_ILSVRC_16_layers as VGG16_net


###########################################
## NEEDED TO USE MAXPOOL WITh ARGMAX
###########################################
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_nn_ops
@ops.RegisterGradient("MaxPoolWithArgmax")
def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
return gen_nn_ops._max_pool_grad_with_argmax(op.inputs[0],
grad,
op.outputs[1],
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"))


# ###########################################
# ## NEEDED TO USE MAXPOOL WITh ARGMAX
# ###########################################
# from tensorflow.python.framework import ops
# from tensorflow.python.ops import gen_nn_ops
# @ops.RegisterGradient("MaxPoolWithArgmax")
# def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
# return gen_nn_ops._max_pool_grad_with_argmax(op.inputs[0],
# grad,
# op.outputs[1],
# op.get_attr("ksize"),
# op.get_attr("strides"),
# padding=op.get_attr("padding"))

def print_activations(t):
print(t.op.name, ' ', t.get_shape().as_list())
Expand All @@ -27,7 +26,7 @@ def print_activations(t):
padding="SAME",
activation_fn=tf.nn.relu,
#weights_initializer=tf.truncated_normal_initializer(0, 0.1)
weights_initializer=tf.contrib.layers.xavier_initializer()
weights_initializer=tf.contrib.layers.xavier_initializer(), **kwargs
)
convolution2d_transpose = lambda *args, **kwargs : tf.contrib.layers.convolution2d_transpose(*args,
kernel_size=[3,3],
Expand All @@ -37,8 +36,56 @@ def print_activations(t):
weights_initializer=tf.contrib.layers.xavier_initializer(), **kwargs
)


class VGG16_net:

def __init__(self, images):
self.layers = {}
self.layers["conv1_1"] = convolution2d(images, 64, stride=1, scope="conv1_1")
self.layers["conv1_2"] = convolution2d(self.layers["conv1_1"], 64, stride=1, scope="conv1_2")

self.layers["pool1"] = tf.nn.max_pool(self.layers["conv1_2"], [1, 2, 2, 1], [1,2,2,1], padding="SAME")

self.layers["conv2_1"] = convolution2d(self.layers["pool1"], 128, stride=1, scope="conv2_1")
self.layers["conv2_2"] = convolution2d(self.layers["conv2_1"], 128, stride=1, scope="conv2_2")

self.layers["pool2"] = tf.nn.max_pool(self.layers["conv2_2"], [1, 2, 2, 1], [1,2,2,1], padding="SAME")

self.layers["conv3_1"] = convolution2d(self.layers["pool2"], 256, stride=1, scope="conv3_1")
self.layers["conv3_2"] = convolution2d(self.layers["conv3_1"], 256, stride=1, scope="conv3_2")
self.layers["conv3_3"] = convolution2d(self.layers["conv3_2"], 256, stride=1, scope="conv3_3")

self.layers["pool3"] = tf.nn.max_pool(self.layers["conv3_3"], [1, 2, 2, 1], [1,2,2,1], padding="SAME")

self.layers["conv4_1"] = convolution2d(self.layers["pool3"], 512, stride=1, scope="conv4_1")
self.layers["conv4_2"] = convolution2d(self.layers["conv4_1"], 512, stride=1, scope="conv4_2")
self.layers["conv4_3"] = convolution2d(self.layers["conv4_2"], 512, stride=1, scope="conv4_3")

self.layers["pool4"] = tf.nn.max_pool(self.layers["conv4_3"], [1, 2, 2, 1], [1,2,2,1], padding="SAME")

self.layers["conv5_1"] = convolution2d(self.layers["pool4"], 512, stride=1, scope="conv5_1")
self.layers["conv5_2"] = convolution2d(self.layers["conv5_1"], 512, stride=1, scope="conv5_2")
self.layers["conv5_3"] = convolution2d(self.layers["conv5_2"], 512, stride=1, scope="conv5_3")

def load(self, filename, variable_scope, session):
with tf.variable_scope(variable_scope) as scope:
data_dict = np.load(filename, encoding="bytes").tolist()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].items():
param_name = param_name.decode('utf-8')
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
print("Error", op_name, param_name)
#if not ignore_missing:
# raise



def model(images, label_nbr, is_training=None):
net = VGG16_net({'data': images})
net = VGG16_net(images)

vgg_output = net.layers["conv5_3"]

Expand Down Expand Up @@ -69,7 +116,5 @@ def model(images, label_nbr, is_training=None):
deconv_net.append(convolution2d_transpose(deconv_net[-1], 64, stride=1))
deconv_net.append(convolution2d_transpose(deconv_net[-1], label_nbr, stride=1))
print_activations(deconv_net[-1])




return deconv_net, net
2 changes: 1 addition & 1 deletion python/tensorflow_tester_backprojeter.py
Expand Up @@ -163,7 +163,7 @@ def createLabelPLY(self, filename,
save_dir):

# create the semantizer
semantizer = Sem3D()
semantizer = PcTls.Semantic3D()
semantizer.set_voxel_size(0.1)

# loading data
Expand Down
3 changes: 2 additions & 1 deletion python/tensorflow_trainer.py
Expand Up @@ -57,7 +57,8 @@ def train(self,

# load net weights if needed
if net is not None:
net.load(net_weights_init, sess)
net.load(net_weights_init, variable_scope = variable_scope, session=sess)
#net.load(net_weights_init, sess)

# create the list of images in the folder
directory = os.path.join(dir_images, images_root)
Expand Down
141 changes: 0 additions & 141 deletions python/tensorflow_trainer_finetune.py

This file was deleted.

28 changes: 14 additions & 14 deletions sem3d_test_backproj_tf.py
Expand Up @@ -24,20 +24,20 @@

filenames = [
"birdfountain_station1_xyz_intensity_rgb",
"castleblatten_station1_intensity_rgb",
"castleblatten_station5_xyz_intensity_rgb",
"marketplacefeldkirch_station1_intensity_rgb",
"marketplacefeldkirch_station4_intensity_rgb",
"marketplacefeldkirch_station7_intensity_rgb",
"sg27_station10_intensity_rgb",
"sg27_station3_intensity_rgb",
"sg27_station6_intensity_rgb",
"sg27_station8_intensity_rgb",
"sg28_station2_intensity_rgb",
"sg28_station5_xyz_intensity_rgb",
"stgallencathedral_station1_intensity_rgb",
"stgallencathedral_station3_intensity_rgb",
"stgallencathedral_station6_intensity_rgb"
# "castleblatten_station1_intensity_rgb",
# "castleblatten_station5_xyz_intensity_rgb",
# "marketplacefeldkirch_station1_intensity_rgb",
# "marketplacefeldkirch_station4_intensity_rgb",
# "marketplacefeldkirch_station7_intensity_rgb",
# "sg27_station10_intensity_rgb",
# "sg27_station3_intensity_rgb",
# "sg27_station6_intensity_rgb",
# "sg27_station8_intensity_rgb",
# "sg28_station2_intensity_rgb",
# "sg28_station5_xyz_intensity_rgb",
# "stgallencathedral_station1_intensity_rgb",
# "stgallencathedral_station3_intensity_rgb",
# "stgallencathedral_station6_intensity_rgb"
]


Expand Down

0 comments on commit 438885f

Please sign in to comment.