''' My attempt at kinect point cloud recounstruction using ifnet ''' import numpy as np import data_processing.implicit_waterproofing as iw from scipy.spatial import cKDTree as KDTree import plotly.graph_objects as go def scale(points): # taken from data_processing/convert_to_scaled_off.py """ mesh = trimesh.load(path + '/isosurf.off', process=False) # bounds – Bounding box with [min, max] coordinates If mesh is empty will return None total_size = (mesh.bounds[1] - mesh.bounds[0]).max() centers = (mesh.bounds[1] + mesh.bounds[0]) /2 mesh.apply_translation(-centers) mesh.apply_scale(1/total_size) mesh.export(path + '/isosurf_scaled.off') """ # bounds – Bounding box with [min, max] coordinates bound_min = points.min(axis=0)[np.newaxis, :] bound_max = points.max(axis=0)[np.newaxis, :] bounds = np.concatenate([bound_min, bound_max], axis=0) total_size = (bounds[1] - bounds[0]).max() centers = (bounds[1] + bounds[0]) /2 points -= centers points /= total_size points /= 1.4 return points def occupancy(points, res, sample_size): # taken from data_processing/voxelized_pointcloud_sampling.py bb_min = -0.5 bb_max = 0.5 # make the 3D voxelized grid, the returned gridpoints are flattened grid_points = iw.create_grid_points_from_bounds(bb_min, bb_max, res) kdtree = KDTree(grid_points) # sampling the points random_indexces = np.random.choice(points.shape[0], size=sample_size, replace=False) sampled_points = points[random_indexces] # make the occupancy flat array occupancies = np.zeros(len(grid_points), dtype=np.int8) # get the index of the xlosest point in the grid and set it's occupancy to 1 _, idx = kdtree.query(sampled_points) occupancies[idx] = 1 # reshape the flat occupancy into a 3D tensor input = np.reshape(occupancies, (res,)*3) return input def plot_occupancy(occ): # just scatter plot it # make integer grid res = occ.shape[0] X,Y,Z = np.mgrid[0:res,0:res,0:res] x = X.flatten() y = Y.flatten() z = Z.flatten() occ =occ.flatten() # select the ones x = x[occ==1] y = y[occ==1] z = z[occ==1] fig = go.Figure(data=[go.Scatter3d( x=x, y=y, z=z, mode='markers', marker=dict( size=1, color=z, # set color to an array/list of desired values colorscale='Viridis', # choose a colorscale opacity=0.8 ) )]) # tight layout fig.update_layout(scene_aspectmode='data') fig.show() import models.local_model as model from models.generation import Generator import os import torch import time if __name__ == "__main__": # read the point cloud from a CSV file kinect_pc = np.genfromtxt('kiya_data/frame_20.csv', delimiter=',') kinect_pc = kinect_pc[1:] # does the orientation matter ? yes !!! # reverse y and z kinect_pc[:,1] = -kinect_pc[:,1] kinect_pc[:, 2] = -kinect_pc[:, 2] # I follow the steps of processing set out by the readme file # scale kinect_pc = scale(kinect_pc) # calculate occupancy res = 400 #256 #128 sample_size = 36000 kinect_occupancy = occupancy(kinect_pc, res, sample_size) # plot occupancy #plot_occupancy(kinect_occupancy) # create the model net = model.SVR() # prepare the model and load checkpoint retrieval_res = 400 #256 batch_points = 100000 gen = Generator(net,0.5, exp_name='SVR', checkpoint= 6, resolution=retrieval_res, batch_points=batch_points) # time the process start = time.perf_counter() # predict inputs = torch.tensor(kinect_occupancy, dtype=torch.float32) inputs = torch.unsqueeze(inputs, 0) # add batch dimesnion data = {'inputs': inputs} logits = gen.generate_mesh(data) # time it middle = time.perf_counter() elapsed = (middle-start)/60 print(f'Prediction time: {elapsed}') # make the mesh from prediction mesh = gen.mesh_from_logits(logits) #time it end = time.perf_counter() elapsed = (end-middle)/60 print(f'Mesh generation time: {elapsed}') # save to file export_path = 'kiya_out/' mesh.export(export_path + 'surface_reconstruction.off')