Skip to content

Commit

Permalink
fix dataset last batch check
Browse files Browse the repository at this point in the history
  • Loading branch information
charlesq34 committed Feb 23, 2018
1 parent cce809f commit 6550604
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 7 deletions.
6 changes: 5 additions & 1 deletion README.md
Expand Up @@ -43,7 +43,11 @@ You can type as below to see the optional arguments for training:
If you have multiple GPUs on your machine, you can also run the multi-gpu version (similar implementation to the tensorflow cifar10 tutorial):

python train_multi_gpu.py --num_gpus 2


After training, we can evaluate the classification results (with optional multi-angle voting).

python evaluate.py --num_votes 12

<i>Side Note:</i> For the XYZ+normal experiment reported in our paper: (1) 5000 points are used and (2) a further random data dropout augmentation is used during training (see commented line after `augment_batch_data` in `train.py` and (3) the model architecture is updated such that the `nsample=128` in the first two set abstraction levels, which is suited for the larger point density in 5000-point samplings.

To use normal features for classification: You can get our sampled point clouds of ModelNet40 (XYZ and normal from mesh, 10k points per shape) at this <a href="https://1drv.ms/u/s!ApbTjxa06z9CgQfKl99yUDHL_wHs">OneDrive link</a>. Move the uncompressed data folder to `data/modelnet40_normal_resampled`
Expand Down
9 changes: 5 additions & 4 deletions evaluate.py
Expand Up @@ -23,11 +23,12 @@
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='pointnet2_cls_ssg', help='Model name. [default: pointnet2_cls_ssg]')
parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 1]')
parser.add_argument('--batch_size', type=int, default=16, help='Batch Size during training [default: 16]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')
parser.add_argument('--dump_dir', default='dump', help='dump folder path [dump]')
parser.add_argument('--normal', action='store_true', help='Whether to use normal information')
parser.add_argument('--num_votes', type=int, default=1, help='Aggregate classification scores from multiple rotations [default: 1]')
FLAGS = parser.parse_args()


Expand Down Expand Up @@ -73,7 +74,7 @@ def evaluate(num_votes):
# simple model
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl)
MODEL.get_loss(pred, labels_pl, end_points)
losses = tf.get_collection('losses', scope)
losses = tf.get_collection('losses')
total_loss = tf.add_n(losses, name='total_loss')

# Add ops to save and restore all the variables.
Expand Down Expand Up @@ -116,7 +117,7 @@ def eval_one_epoch(sess, ops, num_votes=1, topk=1):
while TEST_DATASET.has_next_batch():
batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
bsize = batch_data.shape[0]
print('Batch: %d\t batch_size: %d'%(batch_idx, bsize))
print('Batch: %03d, batch size: %d'%(batch_idx, bsize))
# for the last batch in the epoch, the bsize:end are from last batch
cur_batch_data[0:bsize,...] = batch_data
cur_batch_label[0:bsize] = batch_label
Expand Down Expand Up @@ -159,5 +160,5 @@ def eval_one_epoch(sess, ops, num_votes=1, topk=1):

if __name__=='__main__':
with tf.Graph().as_default():
evaluate(num_votes=1)
evaluate(num_votes=FLAGS.num_votes)
LOG_FOUT.close()
2 changes: 1 addition & 1 deletion modelnet_h5_dataset.py
Expand Up @@ -90,7 +90,7 @@ def _load_data_file(self, filename):
self.current_data, self.current_label, _ = shuffle_data(self.current_data,self.current_label)

def _has_next_batch_in_file(self):
return (self.batch_idx+1)*self.batch_size <= self.current_data.shape[0]
return self.batch_idx*self.batch_size < self.current_data.shape[0]

def num_channel(self):
return 3
Expand Down
2 changes: 1 addition & 1 deletion train.py
Expand Up @@ -120,7 +120,7 @@ def train():
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
MODEL.get_loss(pred, labels_pl, end_points)
losses = tf.get_collection('losses', scope)
losses = tf.get_collection('losses')
total_loss = tf.add_n(losses, name='total_loss')
tf.summary.scalar('total_loss', total_loss)
for l in losses + [total_loss]:
Expand Down

0 comments on commit 6550604

Please sign in to comment.