Skip to content

Commit

Permalink
add more docstrings and update readme (#105)
Browse files Browse the repository at this point in the history
* docstrning for kpconv

* Add script usage in readme

* Update heading
  • Loading branch information
sanskar107 committed Oct 14, 2020
1 parent b813558 commit 32cd562
Show file tree
Hide file tree
Showing 3 changed files with 79 additions and 7 deletions.
22 changes: 22 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,28 @@ For more examples see [`examples/`](https://github.com/intel-isl/Open3D-ML/tree/
and the [`scripts/`](https://github.com/intel-isl/Open3D-ML/tree/master/scripts) directories.


### Using predefined scripts

[`scripts/semseg.py`](https://github.com/intel-isl/Open3D-ML/tree/master/scripts/semseg.py)
provides an easy interface for training and evaluating a model on a dataset. It saves
the trouble of defining specific model and passing exact configuration.

`python scripts/semseg.py {tf/torch} -c <path-to-config> --<extra args>`

Note that `extra args` will be prioritized over the same parameter present in the configuration file.
So instead of changing param in config file, you may pass the same as a command line argument while launching the script.

For eg.
```
# Launch training for RandLANet on SemanticKITTI with torch.
python scripts/semseg.py torch -c ml3d/configs/randlanet_semantickitti.yml --dataset.dataset_path <path-to-dataset> --dataset.use_cache True
# Launch testing for KPConv on Toronto3D with tensorflow.
python scripts/semseg.py tf -c ml3d/configs/kpconv_toronto3d.yml --dataset.dataset_path <path-to-dataset> --model.ckpt_path <path-to-checkpoint>
```
For further help, run `python scripts/semseg.py --help`.


## Repository structure
The core part of Open3D-ML lives in the `ml3d` subfolder, which is integrated
into Open3D in the `ml` namespace. In addition to the core part, the directories
Expand Down
3 changes: 3 additions & 0 deletions ml3d/tf/models/kpconv.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@


class KPFCNN(BaseModel):
"""
Class defining KPFCNN
"""

def __init__(
self,
Expand Down
61 changes: 54 additions & 7 deletions ml3d/tf/models/network_blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,20 @@ def __init__(self,
deform_fitting_power=1.0,
offset_param=False,
**kwargs):

"""
Initialize parameters for Kernel Point Convolution.
:param kernel_size: Number of kernel points.
:param p_dim: dimension of the point space.
:param in_channels: dimension of input features.
:param out_channels: dimension of output features.
:param KP_extent: influence radius of each kernel point.
:param radius: radius used for kernel point init. Even for deformable, use the config.conv_radius.
:param fixed_kernel_points: fix position of certain kernel points ('none', 'center' or 'verticals').
:param KP_influence: influence function of the kernel points ('constant', 'linear', 'gaussian').
:param aggregation_mode: choose to sum influences, or only keep the closest ('closest', 'sum').
:param deformable: choose deformable or not.
:param modulated: choose if kernel weights are modulated in addition to deformed.
"""
super(KPConv, self).__init__(**kwargs)

self.KP_extent = KP_extent # TODO : verify correct kp extent
Expand Down Expand Up @@ -418,6 +431,12 @@ def __repr__(self):
class BatchNormBlock(tf.keras.layers.Layer):

def __init__(self, in_dim, use_bn, bn_momentum):
"""
Initialize a batch normalization block. If network does not use batch normalization, replace with biases.
:param in_dim: dimension input features.
:param use_bn: boolean indicating if we use Batch Norm.
:param bn_momentum: Batch norm momentum.
"""
super(BatchNormBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
Expand Down Expand Up @@ -453,7 +472,13 @@ def __init__(self,
bn_momentum,
no_relu=False,
l_relu=0.1):

"""
Initialize a standard unary block with its ReLU and BatchNorm.
:param in_dim: dimension input features.
:param out_dim: dimension input features.
:param use_bn: boolean indicating if we use Batch Norm.
:param bn_momentum: Batch norm momentum.
"""
super(UnaryBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
Expand Down Expand Up @@ -485,6 +510,13 @@ def __repr__(self):
class SimpleBlock(tf.keras.layers.Layer):

def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, cfg):
"""
Initialize a simple convolution block with its ReLU and BatchNorm.
:param in_dim: dimension input features.
:param out_dim: dimension input features.
:param radius: current radius of convolution.
:param cfg: parameters.
"""
super(SimpleBlock, self).__init__()

current_extent = radius * cfg.KP_extent / cfg.conv_radius
Expand Down Expand Up @@ -530,6 +562,9 @@ def call(self, x, batch, training=False):
class IdentityBlock(tf.keras.layers.Layer):

def __init__(self):
"""
Initialize an Identity block.
"""
super(IdentityBlock, self).__init__()

def call(self, x, training=False):
Expand All @@ -539,6 +574,13 @@ def call(self, x, training=False):
class ResnetBottleneckBlock(tf.keras.layers.Layer):

def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, cfg):
"""
Initialize a resnet bottleneck block.
:param in_dim: dimension input features.
:param out_dim: dimension input features.
:param radius: current radius of convolution.
:param cfg: parameters.
"""

super(ResnetBottleneckBlock, self).__init__()

Expand Down Expand Up @@ -644,7 +686,9 @@ def __repr__(self):
class NearestUpsampleBlock(tf.keras.layers.Layer):

def __init__(self, layer_ind):

"""
Initialize a nearest upsampling block.
"""
super(NearestUpsampleBlock, self).__init__()
self.layer_ind = layer_ind
return
Expand All @@ -660,20 +704,23 @@ def __repr__(self):
class MaxPoolBlock(tf.keras.layers.Layer):

def __init__(self, layer_ind):

"""
Initialize a Max Pool block.
"""
super(MaxPoolBlock, self).__init__()
self.layer_ind = layer_ind
return

def forward(self, x, batch):
return max_pool(x, batch['pools'][self.layer_ind +
1]) # TODO : check 1 here
return max_pool(x, batch['pools'][self.layer_ind + 1])


class GlobalAverageBlock(tf.keras.layers.Layer):

def __init__(self):

"""
Initialize a global average block.
"""
super(GlobalAverageBlock, self).__init__()
return

Expand Down

0 comments on commit 32cd562

Please sign in to comment.