Skip to content

Commit

Permalink
fix bugs in testing codes
Browse files Browse the repository at this point in the history
  • Loading branch information
zhijieW94 committed Aug 18, 2019
1 parent 2da3e93 commit e58a756
Show file tree
Hide file tree
Showing 7 changed files with 1,315 additions and 109 deletions.
11 changes: 6 additions & 5 deletions README.md
@@ -1,4 +1,8 @@
# SAGNet: Structure-aware Generative Network for 3D-Shape Modeling
by Zhijie Wu, Xiang Wang, Di Lin, Dani Lischinski, Daniel Cohen-Or, Hui Huang


## Introduction ##
This repository contains an official implementation for [SAGNet: Structure-aware Generative Network for 3D-Shape Modeling](http://vcc.szu.edu.cn/file/upload_file//image/research/att201905040553/SAGNet.pdf) by Wu et al.
This model takes voxel maps as geometry and bounding boxes as structure and learn a latent space through a joint analysis.
The network is implemented with Python and [TensorFlow](https://www.tensorflow.org/).
Expand Down Expand Up @@ -105,11 +109,8 @@ In the `TEST` section, this file (`config.yml`) has the following options:
- `RESULTS_DIRECTORY`: the directory to store the generation results.
- `PRETRAINED_MODEL_PATH`: the path to place the pretrained model, which will be used for generation.
- `SAMPLE_SIZE`: set the number of generated shapes to `SAMPLE_SIZE`.
- `SHAPE_NAME`: correspond to the generated 3D shape class. Five classes (`airplane`, `chair`, `guitar`, `lamp`, and `motorbike`) are supported.

Note that, the `SHAPE_NAME` and `PRETRAINED_MODEL_PATH` should be strictly consistent.
For example, if the `PRETRAINED_MODEL_PATH` indicates a model trained over `airplane`, we should set `airplane` for `SHAPE_NAME`.
Then we can directly run the `test_net.py` to synthesize `SAMPLE_SIZE` shapes as:
Then you can directly run the `test_net.py` to synthesize `SAMPLE_SIZE` shapes as:
```
python test_net.py
```
Expand All @@ -136,7 +137,7 @@ Please unzip the downloaded file and paste the path of a checkpoint file (`.cptk


## Cite ##
If you find this code or data useful in your work, please cite our paper:
If you use our code/model/data, please cite our paper:
```
@article{pageSAGnet19,
title = {SAGNet: Structure-aware Generative Network for 3D-Shape Modeling},
Expand Down
1,087 changes: 1,087 additions & 0 deletions SAG_network.py

Large diffs are not rendered by default.

20 changes: 10 additions & 10 deletions config.yml
@@ -1,7 +1,7 @@
RNN_STATE_DIM: 512 # the feature dimension for RNNs
RNN_CELL_DEPTH: 2 # the depth for RNNs

EMBEDDING_VECOTR_SIZE: 320 # the size of embedding vector in the network
EMBEDDING_VECOTR_SIZE: 200 # the size of embedding vector in the network

LEAK_VALUE: 0.05 # the value of leak relu

Expand All @@ -10,11 +10,11 @@ BOUNDING_BOX_SIZE: 6 # the size of feature vector of bounding box

TRAIN:
BATCH_SIZE: 10 # mini batch size
GPU_ID: [0, 1, 2, 3] # the gpus for training
GPU_ID: [4, 5, 6, 7] # the gpus for training

# Loss hyperparameters
VOXEL_BBOX_LOSS_RATIO: 0.4 # The training loss weight
GRAPH_REC_KL_LOSS_RATIO: 0.995
GRAPH_REC_KL_LOSS_RATIO: 0.99

RECON_GEN_INITIAL_LOSS_RATIO: 1.0 # control the influence between reconstruction and generation loss
RECON_GEN_RATIO_GAMMA: 0.9
Expand All @@ -23,7 +23,7 @@ TRAIN:
FINAL_KL_RATIO: 0.8

# Learning rate
VERT_LEARNING_RATE: 0.6 # Decay the learning rate
VERT_LEARNING_RATE: 0.6 # Learning rate decay
VERT_GAMMA: 0.9
VERT_LR_DECAY_STEP: 2300

Expand All @@ -40,7 +40,7 @@ TRAIN:
SNAPSHOT_FREQ: 2000 # The frequency to save model snapshot and summary
SUMMARY_FREQ: 20

ITERATION_NUM: 3 # the iteration number when exchange geometry and structure information
EXCHANGE_NUM: 3 # The iteration number when exchange geometry and structure information
DROPOUT_KEEP_PROB: 0.5

MAX_GRADIENT_NORM: 0.5
Expand All @@ -58,13 +58,13 @@ TRAIN:

PRETRAINED_MODEL_PATH: ""

SHAPE_NAME: "motorbike" # "motorbike"/"chair"/"airplane"/"guitar"/"lamp"/"toy_examples"
SHAPE_NAME: "guitar" # "motorbike"/"chair"/"airplane"/"guitar"/"lamp"/"toy_examples"

TEST:
GPU_ID: [7] # the gpus for testing
GPU_ID: [4] # the gpus for testing

SAMPLE_SIZE: 10
SHAPE_NAME: "motorbike" # "motorbike"/"chair"/"airplane"/"guitar"/"lamp"/"toy_examples"
SAMPLE_SIZE: 100

RESULTS_DIRECTORY: 'nn_exp_results/testing_results/'
PRETRAINED_MODEL_PATH: ''
PRETRAINED_MODEL_PATH: 'nn_exp_results/models/motorbike_2019_08_18_01_25_33/motorbike_79999.ckpt'

160 changes: 106 additions & 54 deletions data_helper.py
Expand Up @@ -23,75 +23,82 @@ def __init__(self, config_dict, for_training):
self.for_training = for_training

self.load_config_info(config_dict=config_dict)
self.load_dataset(config_dict) # load the dataset for a certain kind

if for_training:
self.load_dataset(config_dict) # load the dataset for a certain kind

def load_config_info(self, config_dict):
self.config_dict = config_dict

self.shape_name = self.config_dict['TRAIN']['SHAPE_NAME']

self.cube_len = self.config_dict['CUBE_LEN']
self.bbox_size = self.config_dict['BOUNDING_BOX_SIZE']
self.embedding_size = self.config_dict['EMBEDDING_VECOTR_SIZE']
self.max_part_size = self.config_dict['MAX_PART_SIZE']

self.input_pls = {}
self.obj_num = 0

if self.for_training:
self.gpu_nums = self.config_dict['TRAIN']['NUM_GPUS']

self.batch_size = self.config_dict['TRAIN']['BATCH_SIZE']
else:
self.gpu_nums = 1

self.batch_size = 1
self.shape_name = self.config_dict['TRAIN']['SHAPE_NAME']

self.input_pls = {}
self.obj_num = 0
self.voxel_array = np.array([]) # used to store the voxel information in a list
self.bbox_array = np.array([]) # used to store the baounding box information in a list
self.part_obj_index_list = [] # the index of part_obj_index_list stands for the index of a certain object and the value is the index of a part
self.obj_part_index_list = [] # the index of this list is the index of an object and the value stands for the index of a certain part

self.bbox_mean = 0
self.bbox_std = 0

self.global_steps = tf.placeholder(tf.float32, shape=(), name="global_steps")

initial_vert_lr = config_dict['TRAIN']['VERT_LEARNING_RATE']
initial_edge_lr = config_dict['TRAIN']['EDGE_LEARNING_RATE']
initial_graph_gen_lr = config_dict['TRAIN']['GRAPH_GEN_LEARNING_RATE']

vert_gamma = config_dict['TRAIN']['VERT_GAMMA']
edge_gamma = config_dict['TRAIN']['EDGE_GAMMA']
graph_gen_gamma = config_dict['TRAIN']['GRAPH_GEN_GAMMA']

vert_lr_decay_step = config_dict['TRAIN']['VERT_LR_DECAY_STEP']
edge_lr_decay_step = config_dict['TRAIN']['EDGE_LR_DECAY_STEP']
graph_gen_lr_decay_step = config_dict['TRAIN']['GRAPH_GEN_LR_DECAY_STEP']

self.vert_lr = tf.train.exponential_decay(initial_vert_lr,
global_step=self.global_steps,
decay_steps=vert_lr_decay_step,
decay_rate=vert_gamma,
staircase=True)
self.edge_lr = tf.train.exponential_decay(initial_edge_lr,
global_step=self.global_steps,
decay_steps=edge_lr_decay_step,
decay_rate=edge_gamma,
staircase=True)
self.graph_gen_lr = tf.train.exponential_decay(initial_graph_gen_lr,
self.voxel_array = np.array([]) # used to store the voxel information in a list
self.bbox_array = np.array([]) # used to store the baounding box information in a list
self.part_obj_index_list = [] # the index of part_obj_index_list stands for the index of a certain object and the value is the index of a part
self.obj_part_index_list = [] # the index of this list is the index of an object and the value stands for the index of a certain part

self.global_steps = tf.placeholder(tf.float32, shape=(), name="global_steps")

initial_vert_lr = config_dict['TRAIN']['VERT_LEARNING_RATE']
initial_edge_lr = config_dict['TRAIN']['EDGE_LEARNING_RATE']
initial_graph_gen_lr = config_dict['TRAIN']['GRAPH_GEN_LEARNING_RATE']

vert_gamma = config_dict['TRAIN']['VERT_GAMMA']
edge_gamma = config_dict['TRAIN']['EDGE_GAMMA']
graph_gen_gamma = config_dict['TRAIN']['GRAPH_GEN_GAMMA']

vert_lr_decay_step = config_dict['TRAIN']['VERT_LR_DECAY_STEP']
edge_lr_decay_step = config_dict['TRAIN']['EDGE_LR_DECAY_STEP']
graph_gen_lr_decay_step = config_dict['TRAIN']['GRAPH_GEN_LR_DECAY_STEP']

self.vert_lr = tf.train.exponential_decay(initial_vert_lr,
global_step=self.global_steps,
decay_steps=vert_lr_decay_step,
decay_rate=vert_gamma,
staircase=True)
self.edge_lr = tf.train.exponential_decay(initial_edge_lr,
global_step=self.global_steps,
decay_steps=graph_gen_lr_decay_step,
decay_rate=graph_gen_gamma,
decay_steps=edge_lr_decay_step,
decay_rate=edge_gamma,
staircase=True)
self.graph_gen_lr = tf.train.exponential_decay(initial_graph_gen_lr,
global_step=self.global_steps,
decay_steps=graph_gen_lr_decay_step,
decay_rate=graph_gen_gamma,
staircase=True)

initial_kl_loss_ratio = config_dict['TRAIN']['RECON_GEN_INITIAL_LOSS_RATIO']
kl_loss_gamma = config_dict['TRAIN']['RECON_GEN_RATIO_GAMMA']
kl_loss_decay_step = config_dict['TRAIN']['RECON_GEN_DECAY_STEP']
self.kl_loss_ratio = tf.train.exponential_decay(initial_kl_loss_ratio,
global_step=self.global_steps,
decay_steps=kl_loss_decay_step,
decay_rate=kl_loss_gamma,
staircase=False)
else:
self.gpu_nums = 1
self.batch_size = self.config_dict['TRAIN']['BATCH_SIZE']

self.shape_name = config_dict['model_info']['model_class']

initial_kl_loss_ratio = config_dict['TRAIN']['RECON_GEN_INITIAL_LOSS_RATIO']
kl_loss_gamma = config_dict['TRAIN']['RECON_GEN_RATIO_GAMMA']
kl_loss_decay_step = config_dict['TRAIN']['RECON_GEN_DECAY_STEP']
self.kl_loss_ratio = tf.train.exponential_decay(initial_kl_loss_ratio,
global_step=self.global_steps,
decay_steps=kl_loss_decay_step,
decay_rate=kl_loss_gamma,
staircase=False)
self.visible_part_indexes_array = config_dict['model_info']['part_masks']
self.obj_num = len(config_dict['model_info']['part_masks'])

self.bbox_means_array = config_dict['model_info']['bbox_info']['bbox_mean']
self.bbox_stds_array = config_dict['model_info']['bbox_info']['bbox_variance']
self.bbox_benchmark_array = config_dict['model_info']['bbox_info']['bbox_benchmark']


##############################################################################
Expand Down Expand Up @@ -431,16 +438,20 @@ def get_placeholders_for_training(self):

return dict(input_pls)

def get_inputs_for_testing(self):
def get_inputs_for_testing(self, cur_iter):
cur_selected_obj_idx = np.random.randint(self.obj_num, size=[self.gpu_nums, self.batch_size])

current_visible_part_index = self.visible_part_indexes_array[cur_selected_obj_idx]
gaussian_noise = np.random.normal(0, 1, [self.gpu_nums, self.batch_size, self.embedding_size])
# gaussian_noise = np.full((self.gpu_nums, self.batch_size, self.embedding_size), 1) * (cur_iter + 1)

print "Gaussian noise:\n"
print gaussian_noise

input_pls = {}
input_pls['gaussian_noise'] = gaussian_noise
input_pls['visible_part_index'] = current_visible_part_index
input_pls['selected_model_idx'] = np.random.randint(self.obj_num, size=[self.gpu_nums, self.batch_size])
input_pls['selected_model_idx'] = cur_selected_obj_idx

return input_pls

Expand Down Expand Up @@ -734,3 +745,44 @@ def write_bboxs_to_file(self, bboxs_info, file_name, seq_num, output_dir):
output_content = "%f " % (value)
out_f.write(output_content)
out_f.write("\n")


##############################################################################
# Functions to output model config info
##############################################################################
def write_model_info_to_file(self, output_dir=None):
if not os.path.exists(output_dir):
os.makedirs(output_dir)

output_file_name = "%s_model_info.txt" % self.shape_name
output_file_path = os.path.join(output_dir, output_file_name)

with open(output_file_path, "w") as out_f:
out_f.write("model_class\n")
out_f.write("%s\n" % self.shape_name)

out_f.write("\nbbox_mean\n")
for p_means in self.bbox_means_array:
for value in p_means:
output_content = "%f " % (value)
out_f.write(output_content)
out_f.write("\n")

out_f.write("\nbbox_variance\n")
for p_stds in self.bbox_stds_array:
for value in p_stds:
output_content = "%f " % (value)
out_f.write(output_content)
out_f.write("\n")

out_f.write("\nbbox_benchmark\n")
for p_benchmark in self.bbox_benchmark_array:
for value in p_benchmark:
output_content = "%f " % (value)
out_f.write(output_content)
out_f.write("\n")

mask_file_name = "%s_mask.mat" % self.shape_name
mask_file_path = os.path.join(output_dir, mask_file_name)

io.savemat(mask_file_path, {'masks': self.visible_part_indexes_array})

0 comments on commit e58a756

Please sign in to comment.