Skip to content

Commit

Permalink
Update and fix examples to new ModelNet class (#121)
Browse files Browse the repository at this point in the history
Update classification example to use new ModelNet class in python scripts.

Signed-off-by: Jean-Francois Lafleche <jlafleche@nvidia.com>
  • Loading branch information
Jean-Francois-Lafleche committed Jan 13, 2020
1 parent 48ec4b3 commit 6d22f6c
Show file tree
Hide file tree
Showing 3 changed files with 113 additions and 60 deletions.
63 changes: 32 additions & 31 deletions examples/Classification/point_cloud_classification.ipynb

Large diffs are not rendered by default.

65 changes: 46 additions & 19 deletions examples/Classification/pointcloud_classification.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,46 @@
import argparse

from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm, trange

import kaolin as kal
from kaolin import ClassificationEngine
from kaolin.datasets import ModelNet
from kaolin.models.PointNet import PointNetClassifier
import kaolin.transforms as tfs
from utils import visualize_batch

parser = argparse.ArgumentParser()
parser.add_argument('--modelnet-root', type=str, help='Root directory of the ModelNet dataset.')
parser.add_argument('--categories', type=str, nargs='+', default=['chair', 'sofa'], help='list of object classes to use.')
parser.add_argument('--num-points', type=int, default=1024, help='Number of points to sample from meshes.')
parser.add_argument('--epochs', type=int, default=10, help='Number of train epochs.')
parser.add_argument('-lr', '--learning-rate', type=float, default=1e-3, help='Learning rate.')
parser.add_argument('--batch-size', type=int, default=12, help='Batch size.')
parser.add_argument('--device', type=str, default='cuda', help='Device to use.')

args = parser.parse_args()


transform = tfs.Compose([
tfs.TriangleMeshToPointCloud(num_samples=args.num_points),
tfs.NormalizePointCloud()
])

train_loader = DataLoader(ModelNet(args.modelnet_root, categories=args.categories,
split='train', transform=transform, device=args.device),
batch_size=args.batch_size, shuffle=True)

epochs = 10
lr = 1e-3
device = 'cuda:0'
normpc = kal.transforms.NormalizePointCloud()
data = kal.datasets.ModelNet10('/path/to/ModelNet10',
categories=['bed', 'bathtub'],
split='train', rep='pointcloud',
transform=normpc, device=device)
loader = DataLoader(data, batch_size=12, shuffle=True)
val_data = kal.datasets.ModelNet10('/path/to/ModelNet10',
categories=['bed', 'bathtub'],
split='test', rep='pointcloud',
transform=normpc, device=device)
val_loader = DataLoader(val_data, batch_size=10, shuffle=False)
model = kal.models.PointNet.PointNetClassifier(num_classes=2).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
val_loader = DataLoader(ModelNet(args.modelnet_root, categories=args.categories,
split='test',transform=transform, device=args.device),
batch_size=args.batch_size)

model = PointNetClassifier(num_classes=len(args.categories)).to(args.device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
criterion = torch.nn.CrossEntropyLoss()

for e in range(epochs):
for e in range(args.epochs):

print('###################')
print('Epoch:', e)
Expand All @@ -36,7 +53,7 @@
model.train()

optimizer.zero_grad()
for idx, batch in enumerate(tqdm(loader)):
for idx, batch in enumerate(tqdm(train_loader)):
pred = model(batch[0])
loss = criterion(pred, batch[1].view(-1))
train_loss += loss.item()
Expand Down Expand Up @@ -72,3 +89,13 @@

print('Val loss:', val_loss / num_batches)
print('Val accuracy:', val_accuracy / num_batches)

test_loader = DataLoader(ModelNet(args.modelnet_root, categories=args.categories,
split='test',transform=transform, device=args.device),
shuffle=True, batch_size=15)

test_batch, labels = next(iter(test_loader))
preds = model(test_batch)
pred_labels = torch.max(preds, axis=1)[1]

visualize_batch(test_batch, pred_labels, labels, args.categories)
45 changes: 35 additions & 10 deletions examples/Classification/pointcloud_classification_engine.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,42 @@
import argparse

import torch
from torch.utils.data import DataLoader

import kaolin as kal
import kaolin.transforms as tfs
from kaolin import ClassificationEngine
from kaolin.datasets import ModelNet10
from kaolin.models.PointNet import PointNetClassifier as PointNet
from kaolin.datasets import ModelNet
from kaolin.models.PointNet import PointNetClassifier
from kaolin.transforms import NormalizePointCloud

norm = NormalizePointCloud()
train_loader = DataLoader(ModelNet10('/path/to/ModelNet10', categories=['chair', 'sofa'],
split='train', rep='pointcloud', transform=norm, device='cuda:0'),
batch_size=12, shuffle=True)
val_loader = DataLoader(ModelNet10('/path/to/ModelNet10', categories=['chair', 'sofa'],
split='test', rep='pointcloud', transform=norm, device='cuda:0'),
batch_size=12)
engine = ClassificationEngine(PointNet(num_classes=2), train_loader, val_loader, device='cuda:0')

parser = argparse.ArgumentParser()
parser.add_argument('--modelnet-root', type=str, help='Root directory of the ModelNet dataset.')
parser.add_argument('--categories', type=str, nargs='+', default=['chair', 'sofa'], help='list of object classes to use.')
parser.add_argument('--num-points', type=int, default=1024, help='Number of points to sample from meshes.')
parser.add_argument('--epochs', type=int, default=10, help='Number of train epochs.')
parser.add_argument('-lr', '--learning-rate', type=float, default=1e-3, help='Learning rate.')
parser.add_argument('--batch-size', type=int, default=12, help='Batch size.')
parser.add_argument('--device', type=str, default='cuda', help='Device to use.')

args = parser.parse_args()

assert len(args.categories) >= 2, 'At least two categories must be specified.'

transform = tfs.Compose([
tfs.TriangleMeshToPointCloud(num_samples=args.num_points),
tfs.NormalizePointCloud()
])

train_loader = DataLoader(ModelNet(args.modelnet_root, categories=args.categories,
split='train', transform=transform, device=args.device),
batch_size=args.batch_size, shuffle=True)

val_loader = DataLoader(ModelNet(args.modelnet_root, categories=args.categories,
split='test',transform=transform, device=args.device),
batch_size=args.batch_size)

model = PointNetClassifier(num_classes=len(args.categories))
engine = ClassificationEngine(model, train_loader, val_loader, device=args.device)
engine.fit()

0 comments on commit 6d22f6c

Please sign in to comment.