Skip to content

Commit

Permalink
Remove unused parameters and code. Clean up point cloud functions.
Browse files Browse the repository at this point in the history
  • Loading branch information
Eldar Insafutdinov committed Nov 27, 2018
1 parent 96b0ade commit 114c7e2
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 57 deletions.
15 changes: 1 addition & 14 deletions dpc/resources/default_config.yaml
Expand Up @@ -4,15 +4,13 @@
config: "" # .yaml config file

inp_dir: "" # Directory path containing the input data (tfrecords).
dataset_name: "shapenet_single_category" # Dataset name that is to be used for training and evaluation.
synth_set: "03001627" # Class of Shapenet data
num_views: 5 # Number of viewpoints in the input data.
num_views_to_use: -1 # Num of views to actually use.
tfrecords_gzip_compressed: true # Compress TF record files.
image_size: 128 # Input images dimension (pixels) - width & height.
saved_camera: true
saved_depth: false
saved_voxels: false

# Model general
encoder_name: "img_encoder" # Name of the encoder network being used.
Expand Down Expand Up @@ -40,7 +38,6 @@ pose_predict_num_candidates: 1
pose_candidates_num_layers: 3
pose_predictor_student: true
pose_predictor_student_loss_weight: 1.0
pose_student_spherical_loss: true
pose_student_align_loss: false

predict_translation: false
Expand All @@ -52,8 +49,7 @@ predict_translation_init_stddev: 0.05
pc_relative_sigma: 1.0
pc_relative_sigma_end: 0.2 # specify -1.0 for constant sigma
pc_normalise_gauss: false
pc_normalise_gauss_analytical: false
pc_fix_bug_sigma: false
pc_normalise_gauss_analytical: true
pc_unit_cube: true
pc_fast: true
pc_gauss_kernel_size: 11
Expand All @@ -64,8 +60,6 @@ pc_occupancy_scaling_maximum: 1.0
# RGB
pc_rgb: false
pc_rgb_stop_points_gradient: false
pc_clip_after_conv: false
pc_do_not_clip: false
pc_rgb_clip_after_conv: false
pc_rgb_divide_by_occupancies: false
pc_rgb_divide_by_occupancies_epsilon: 0.01
Expand Down Expand Up @@ -129,16 +123,9 @@ max_number_of_steps: 600000 # Maximum number of steps for training.
compute_validation_loss: true
variable_num_views: false

save_snapshot_interval: -1
validation_interval: 1000 # Do validation every so often.
save_intermediate_pcs: false
save_intermediate_pcs_interval: 5000
save_early_frequent_snapshots: false

master: "" # The address of the tensorflow master

save_summaries_secs: 120 # Seconds interval for dumping TF summaries.
save_interval_secs: 300 # Seconds interval to save models.

# Predict and visualise
num_dataset_samples: -1
Expand Down
30 changes: 9 additions & 21 deletions dpc/util/point_cloud.py
Expand Up @@ -36,17 +36,13 @@ def pointcloud2voxels(cfg, input_pc, sigma): # [B,N,3]
sq_distance = tf.square(x_big - xg) + tf.square(y_big - yg) + tf.square(z_big - zg)

# compute gaussian
if cfg.pc_fix_bug_sigma:
func = tf.exp(-sq_distance / (2.0 * sigma * sigma)) # [B,N,G,G,G]
else:
func = tf.exp(-sq_distance/(sigma*sigma))
func = tf.exp(-sq_distance / (2.0 * sigma * sigma)) # [B,N,G,G,G]

# normalise gaussian
if cfg.pc_normalise_gauss:
normaliser = tf.reduce_sum(func, [2, 3, 4], keep_dims=True)
func /= normaliser
elif cfg.pc_normalise_gauss_analytical:
assert(cfg.pc_fix_bug_sigma)
# should work with any grid sizes
magic_factor = 1.78984352254 # see estimate_gauss_normaliser
sigma_normalised = sigma * vox_size
Expand All @@ -67,7 +63,6 @@ def pointcloud2voxels3d_fast(cfg, pc, rgb): # [B,N,3]
else:
vox_size_z = vox_size

filter_outsiders = True
batch_size = pc.shape[0]
num_points = tf.shape(pc)[1]

Expand All @@ -76,6 +71,7 @@ def pointcloud2voxels3d_fast(cfg, pc, rgb): # [B,N,3]
grid_size = 1.0
half_size = grid_size / 2

filter_outliers = True
valid = tf.logical_and(pc >= -half_size, pc <= half_size)
valid = tf.reduce_all(valid, axis=-1)

Expand All @@ -94,14 +90,14 @@ def pointcloud2voxels3d_fast(cfg, pc, rgb): # [B,N,3]
r = pc_grid - indices_floor # fractional part
rr = [1.0 - r, r]

if filter_outsiders:
if filter_outliers:
valid = tf.reshape(valid, [-1])
indices = tf.boolean_mask(indices, valid)

def interpolate_scatter3d(pos):
updates_raw = rr[pos[0]][:, :, 0] * rr[pos[1]][:, :, 1] * rr[pos[2]][:, :, 2]
updates = tf.reshape(updates_raw, [-1])
if filter_outsiders:
if filter_outliers:
updates = tf.boolean_mask(updates, valid)

indices_loc = indices
Expand All @@ -116,7 +112,7 @@ def interpolate_scatter3d(pos):
updates_raw = tf.stop_gradient(updates_raw)
updates_rgb = tf.expand_dims(updates_raw, axis=-1) * rgb
updates_rgb = tf.reshape(updates_rgb, [-1, 3])
if filter_outsiders:
if filter_outliers:
updates_rgb = tf.boolean_mask(updates_rgb, valid)
voxels_rgb = tf.scatter_nd(indices_loc, updates_rgb, [batch_size, vox_size_z, vox_size, vox_size, 3])
else:
Expand All @@ -134,13 +130,7 @@ def interpolate_scatter3d(pos):
voxels_rgb.append(vx_rgb)

voxels = tf.add_n(voxels)

if has_rgb:
voxels_rgb = tf.add_n(voxels_rgb)
if not cfg.pc_rgb_clip_after_conv:
voxels_rgb = tf.clip_by_value(voxels_rgb, 0.0, 1.0)
else:
voxels_rgb = None
voxels_rgb = tf.add_n(voxels_rgb) if has_rgb else None

return voxels, voxels_rgb

Expand All @@ -151,9 +141,6 @@ def smoothen_voxels3d(cfg, voxels, kernel):
voxels = tf.nn.conv3d(voxels, krnl, [1, 1, 1, 1, 1], padding="SAME")
else:
voxels = tf.nn.conv3d(voxels, kernel, [1, 1, 1, 1, 1], padding="SAME")
if cfg.pc_clip_after_conv and not cfg.pc_do_not_clip:
print("clipping occupancies after convolution")
voxels = tf.clip_by_value(voxels, 0.0, 1.0)
return voxels


Expand Down Expand Up @@ -249,12 +236,13 @@ def pointcloud_project_fast(cfg, point_cloud, transform, predicted_translation,
voxels = tf.expand_dims(voxels, axis=-1)
voxels_raw = voxels

if not cfg.pc_clip_after_conv and not cfg.pc_do_not_clip:
voxels = tf.clip_by_value(voxels, 0.0, 1.0)
voxels = tf.clip_by_value(voxels, 0.0, 1.0)

if kernel is not None:
voxels = smoothen_voxels3d(cfg, voxels, kernel)
if has_rgb:
if not cfg.pc_rgb_clip_after_conv:
voxels_rgb = tf.clip_by_value(voxels_rgb, 0.0, 1.0)
voxels_rgb = convolve_rgb(cfg, voxels_rgb, kernel)

if scaling_factor is not None:
Expand Down
22 changes: 0 additions & 22 deletions dpc/util/train.py
@@ -1,29 +1,7 @@
import tensorflow as tf


def get_train_op_for_scope(cfg, loss, optimizer, scopes):
"""Train operation function for the given scope used file training."""
is_trainable = lambda x: x in tf.trainable_variables()

var_list = []
update_ops = []

for scope in scopes:
var_list_raw = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)
var_list_scope = list(filter(is_trainable, var_list_raw))
var_list.extend(var_list_scope)
update_ops.extend(tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope))

return tf.contrib.slim.learning.create_train_op(
loss,
optimizer,
update_ops=update_ops,
variables_to_train=var_list,
clip_gradient_norm=cfg.clip_gradient_norm)


def get_trainable_variables(scopes):
"""Train operation function for the given scope used file training."""
is_trainable = lambda x: x in tf.trainable_variables()

var_list = []
Expand Down

0 comments on commit 114c7e2

Please sign in to comment.