@@ -1,6 +1,5 @@
import cv2
import numpy as np
from util import *

PATH_PLY = "scan.ply"
PATH_IMAGES ="images/"
@@ -19,6 +18,12 @@
cx = K[0][2]
cy = K[1][2]

def vflip_image(image):
return cv2.flip(image, 0)

def hflip_image(image):
return cv2.flip(image, 1)

def init_ply(path_ply=PATH_PLY):
# Write the file header.
headers = ['ply\n', 'format ascii 1.0\n', 'element vertex 0\n',
@@ -82,85 +87,24 @@ def update_vertex_count_ply(updated_vertex_count, path_ply=PATH_PLY):
def r_rgb(image):
return cv2.split(image)[0]

def pattern_detection(image):
rows, columns, square_width = 5, 9, 17
# Convert image to 1 channel
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (columns, rows), flags=cv2.CALIB_CB_FAST_CHECK)

# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

# Find corners with subpixel accuracy
cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)

return corners, ret

def draw_pattern(image, corners, ret):
rows, columns, square_width = 5, 9, 17
# Draw corners into image
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.drawChessboardCorners(image, (columns, rows), corners, ret)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

return image

def compute_calibration(images, show=False):

# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
chessboards = [] # images with chessboard painted
def point_detection(imlaser, imbk):

for fname in images:
img = cv2.imread(fname)
img = cv2.transpose(img)
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (columns,rows), None)

# If found, add object points, image points (after refining them)
if ret:
objpoints.append(objp)

# Perform corner subpixel detection
cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)

img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
chessboards.append(img)

# Perform camera calibration
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
# Compute calibration error
n = len(objpoints)
error = 0
for i in range(n):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error += cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/(len(imgpoints2))
error /= n

return error, mtx, dist, chessboards

def point_detection(image1, image2):


image_laser = r_rgb(image1)
image_background = r_rgb(image2)
# image subtraction and channel subtraction
imsub = cv2.subtract(imlaser, imbk)
r, g, b = cv2.split(imsub)
imsub = cv2.subtract(r, cv2.divide(cv2.add(g, b), 2))

imsub = r_rgb(cv2.subtract(image_laser, image_background))
# rotation
imsub = vflip_image(imsub)
imsub = hflip_image(imsub)


start_px = 0
stop_px = imsub.shape[0]
sample_rate = 2
threshold = np.uint8(30)
threshold = np.uint8(25)

pcl = np.zeros((3,5000))
pcl = np.zeros((3,2500))

pcl_count = 0
y = 0
@@ -173,176 +117,40 @@ def point_detection(image1, image2):
pcl[:, pcl_count] = np.array([x,y,z])
pcl_count += 1
return pcl[:, :pcl_count]

def roi_point_detection(image1, image2):
rows, columns, square_width = 5, 9, 17
h, w, d = image1.shape

corners, ret = pattern_detection(image1)

corners = corners.astype(np.int)

p1 = corners[0][0]
p2 = corners[columns - 1][0]
p3 = corners[columns * (rows - 1)][0]
p4 = corners[columns * rows - 1][0]

# Compute ROI

roi_mask = np.zeros((h, w), np.uint8)
points = np.array([p1, p2, p4, p3])
cv2.fillConvexPoly(roi_mask, points, 255)

image_laser = r_rgb(image1)
image_background = r_rgb(image2)

image_sub = r_rgb(cv2.subtract(image_laser, image_background))
image_sub = cv2.bitwise_and(image_sub, roi_mask)

threshold_value = 30
image_threshold = cv2.threshold(image_sub, threshold_value, 255, cv2.THRESH_TOZERO)[1]

# Blur image

blur_value = 5
image_blur = cv2.blur(image_threshold, (blur_value, blur_value))

image_blur_threshold = cv2.threshold(image_blur, threshold_value, 255, cv2.THRESH_TOZERO)[1]


window = 4
peak = image_blur_threshold.argmax(axis=1)
_min = peak - window
_max = peak + window + 1
mask = np.zeros_like(image_blur_threshold)

for i in range(image_blur_threshold.shape[0]):
mask[i, _min[i]:_max[i]] = 255

image_stripe = cv2.bitwise_and(image_sub, mask)

h, w = image_stripe.shape
weight_matrix = np.array((np.matrix(np.linspace(0, w - 1, w)).T * np.matrix(np.ones(h))).T)
s = image_stripe.sum(axis=1)
v = np.where(s > 0)[0]
u = (weight_matrix * image_stripe).sum(axis=1)[v] / s[v]

data = np.vstack((v.ravel(), u.ravel())).T
model, inliers = ransac(data, LinearLeastSquares2D(), 2, 2)

dr, thetar = model
f = (dr - v * math.sin(thetar)) / math.cos(thetar)

image_line_lr = np.zeros_like(image_threshold)
image_line_lr[v, np.around(f).astype(int)] = 255

image_stripe = cv2.merge((cv2.add(image_sub, image_line_lr), image_line_lr, image_line_lr))

image_stripe_r = r_rgb(image_stripe)

start_px = 0
stop_px = image_stripe_r.shape[0]
sample_rate = 2
threshold = np.uint8(30)

pcl = np.zeros((3,3500))

pcl_count = 0
y = 0

for z in range(start_px, stop_px, sample_rate):
intensity = np.amax(image_stripe_r[z,:])

if intensity > threshold:
x = np.argmax(image_stripe_r[z,:])
pcl[:, pcl_count] = np.array([x,y,z])
pcl_count += 1
return pcl[:, :pcl_count]

def world_unit_transform(image1, image2):

points = roi_point_detection(image1, image2)
u = points[0, :]
v = points[2, :]

x = np.empty_like(points)

x[0, :] = (u - cx) / fx
x[1, :] = (v - cy) / fy
x[2, :] = 1

corners, objp = findCorners(image_background)
ret, rvecs, tvecs = cv2.solvePnP(objp, corners, K, np.array([]))
if ret:
R = cv2.Rodrigues(rvecs)[0]
t = tvecs.T[0]
n = R.T[2]
d = np.dot(n, t)

Xc = (d / n.T.dot(x)) * x

else:
Xc = np.zeros_like(x)

return Xc

def compute_laser_matrices(filename):
# Load point cloud
X = load_ply(filename)

n = X.shape[0]
Xm = X.sum(axis=0) / n
M = np.array(X - Xm).T

# Equivalent to:
# numpy.linalg.svd(M)[0][:,2]
# But 1200x times faster for large point clouds
U = linalg.svds(M, k=2)[0]
normal = np.cross(U.T[0], U.T[1])
if normal[2] < 0:
normal *= -1

dist = np.dot(normal, Xm)
std = np.dot(M.T, normal).std()

print("\nNormal vector\n\n{0}\n".format(normal))
print("\nPlane distance\n\n{0} mm\n".format(dist))
print("\nStandard deviation\n\n{0} mm\n".format(std))

def main():
def main(path_images=PATH_IMAGES, path_ply=PATH_PLY):

init_ply()
init_ply(path_ply=path_ply)
vcount = 0

# bigPCL = np.zeros(0)

for i in range(1, 401):

imfile = PATH_IMAGES + "image" + str(i) + ".jpg"
imfile2 = PATH_IMAGES + "image" + str(i) + "_laserOff.jpg"
theta = (i)*(np.pi/200)
imfile = path_images + "image" + str(i) + ".jpg"
imfile2 = path_images + "image" + str(i) + "_laserOff.jpg"

image_laser = load_image(imfile)
image_bk = load_image(imfile2)

theta = (i)*(np.pi/200)

pcl = point_detection(image_laser, image_bk)

diff = np.zeros((3, pcl.shape[1]))

diff[0].fill(1751)
diff[0].fill(750)

pcl -= diff

rot = pcl_rotate(theta, pcl)

# Save us time from opening a file if there aren't any points to write.
if rot.size != 0:
append_ply(rot)
append_ply(rot, path_ply=path_ply)

vcount += pcl.shape[1]
#print("Processed image DOG {} with {} points.".format(i, rot.shape[1]))
print("Processed image {} with {} points.".format(i, rot.shape[1]))

update_vertex_count_ply(vcount)
update_vertex_count_ply(vcount, path_ply=path_ply)

def load_ply(filename):
with open(filename, 'r') as f:
@@ -356,74 +164,4 @@ def load_ply(filename):
data = [l.split()[:3] for l in data]
data = [tuple(l) for l in data]

return np.array(data, dtype=np.float64)

class LinearLeastSquares2D(object):
'''
2D linear least squares using the hesse normal form:
d = x*sin(theta) + y*cos(theta)
which allows you to have vertical lines.
'''

def fit(self, data):
data_mean = data.mean(axis=0)
x0, y0 = data_mean
if data.shape[0] > 2: # over determined
u, v, w = np.linalg.svd(data - data_mean)
vec = w[0]
theta = math.atan2(vec[0], vec[1])
elif data.shape[0] == 2: # well determined
theta = math.atan2(data[1, 0] - data[0, 0], data[1, 1] - data[0, 1])
theta = (theta + math.pi * 5 / 2) % (2 * math.pi)
d = x0 * math.sin(theta) + y0 * math.cos(theta)
return d, theta

def residuals(self, model, data):
d, theta = model
dfit = data[:, 0] * math.sin(theta) + data[:, 1] * math.cos(theta)
return np.abs(d - dfit)

def is_degenerate(self, sample):
return False

def ransac(data, model_class, min_samples, threshold, max_trials=100):
'''
Fits a model to data with the RANSAC algorithm.
:param data: numpy.ndarray
data set to which the model is fitted, must be of shape NxD where
N is the number of data points and D the dimensionality of the data
:param model_class: object
object with the following methods implemented:
* fit(data): return the computed model
* residuals(model, data): return residuals for each data point
* is_degenerate(sample): return boolean value if sample choice is
degenerate
see LinearLeastSquares2D class for a sample implementation
:param min_samples: int
the minimum number of data points to fit a model
:param threshold: int or float
maximum distance for a data point to count as an inlier
:param max_trials: int, optional
maximum number of iterations for random sample selection, default 100
:returns: tuple
best model returned by model_class.fit, best inlier indices
'''

best_model = None
best_inlier_num = 0
best_inliers = None
data_idx = np.arange(data.shape[0])
for _ in range(max_trials):
sample = data[np.random.randint(0, data.shape[0], 2)]
if model_class.is_degenerate(sample):
continue
sample_model = model_class.fit(sample)
sample_model_residua = model_class.residuals(sample_model, data)
sample_model_inliers = data_idx[sample_model_residua < threshold]
inlier_num = sample_model_inliers.shape[0]
if inlier_num > best_inlier_num:
best_inlier_num = inlier_num
best_inliers = sample_model_inliers
if best_inliers is not None:
best_model = model_class.fit(data[best_inliers])
return best_model, best_inliers
return np.array(data, dtype=np.float64)
Binary file not shown.
@@ -1 +1 @@
meshlabserver -i /home/isaias/Documents/surface_reconstruction/cylinder.ply -o /home/isaias/Documents/surface_reconstruction/cylinder_mesh.stl -s /home/isaias/Documents/new_sdp/script.mlx
meshlabserver -i scan.ply -o mesh.stl -s script.mlx

Large diffs are not rendered by default.

@@ -1,15 +1,23 @@
<!DOCTYPE FilterScript>

<FilterScript>
<filter name ="Compute normals for point sets">
<Param type="RichInt" value="100" name="K"/>
<Param type="RichBool" value="false" name="flipFlag"/>
<Param x="0" y="0" z="0" type="RichPoint3f" name="viewPos"/>
</filter>
<filter name="Surface Reconstruction: Poisson">
<Param type="RichInt" value="8" name="OctDepth"/>
<Param type="RichInt" value="6" name="SolverDivide"/>
<Param type="RichFloat" value="1" name="SamplesPerNode"/>
<Param type="RichFloat" value="1" name="Offset"/>
</filter>
<filter name="Poisson-disk Sampling">
<Param name="SampleNum" description="Number of samples" value="250000" tooltip="The desired number of samples. The ray of the disk is calculated according to the sampling density." type="RichInt"/>
<Param name="Radius" description="Explicit Radius" min="0" max="119.088" value="0" tooltip="If not zero this parameter override the previous parameter to allow exact radius specification" type="RichAbsPerc"/>
<Param name="MontecarloRate" description="MonterCarlo OverSampling" value="2000" tooltip="The over-sampling rate that is used to generate the intial Montecarlo samples (e.g. if this parameter is &lt;i>K&lt;/i> means that&lt;i>K&lt;/i> x &lt;i>poisson sample&lt;/i> points will be used). The generated Poisson-disk samples are a subset of these initial Montecarlo samples. Larger this number slows the process but make it a bit more accurate." type="RichInt"/>
<Param name="ApproximateGeodesicDistance" description="Approximate Geodesic Distance" value="false" tooltip="If true Poisson Disc distances are computed using an approximate geodesic distance, e.g. an euclidean distance weighted by a function of the difference between the normals of the two points." type="RichBool"/>
<Param name="Subsample" description="Base Mesh Subsampling" value="true" tooltip="If true the original vertices of the base mesh are used as base set of points. In this case the SampleNum should be obviously much smaller than the original vertex number.&lt;br>Note that this option is very useful in the case you want to subsample a dense point cloud." type="RichBool"/>
<Param name="RefineFlag" description="Refine Existing Samples" value="false" tooltip="If true the vertices of the below mesh are used as starting vertices, and they will utterly refined by adding more and more points until possible. " type="RichBool"/>
<Param name="RefineMesh" description="Samples to be refined" value="0" tooltip="Used only if the above option is checked. " type="RichMesh"/>
</filter>
<filter name="Compute normals for point sets">
<Param name="K" description="Neighbour num" value="200" tooltip="The number of neighbors used to estimate normals." type="RichInt"/>
<Param name="flipFlag" description="Flip normals w.r.t. viewpoint" value="false" tooltip="If the 'viewpoint' (i.e. scanner position) is known, it can be used to disambiguate normals orientation, so that all the normals will be oriented in the same direction." type="RichBool"/>
<Param name="viewPos" description="Viewpoint Pos." x="0" y="0" z="0" tooltip="The viewpoint position can be set by hand (i.e. getting the current viewpoint) or it can be retrieved from mesh camera, if the viewpoint position is stored there." type="RichPoint3f"/>
</filter>
<filter name="Surface Reconstruction: Poisson">
<Param name="OctDepth" description="Octree Depth" value="10" tooltip="Set the depth of the Octree used for extracting the final surface. Suggested range 5..10. Higher numbers mean higher precision in the reconstruction but also higher processing times. Be patient.&#xa;" type="RichInt"/>
<Param name="SolverDivide" description="Solver Divide" value="6" tooltip="This integer argument specifies the depth at which a block Gauss-Seidel solver is used to solve the Laplacian equation.&#xa;Using this parameter helps reduce the memory overhead at the cost of a small increase in reconstruction time. &#xa;In practice, the authors have found that for reconstructions of depth 9 or higher a subdivide depth of 7 or 8 can reduce the memory usage.&#xa;The default value is 8.&#xa;" type="RichInt"/>
<Param name="SamplesPerNode" description="Samples per Node" value="1" tooltip="This floating point value specifies the minimum number of sample points that should fall within an octree node as the octree&#xa;construction is adapted to sampling density. For noise-free samples, small values in the range [1.0 - 5.0] can be used.&#xa;For more noisy samples, larger values in the range [15.0 - 20.0] may be needed to provide a smoother, noise-reduced, reconstruction.&#xa;The default value is 1.0." type="RichFloat"/>
<Param name="Offset" description="Surface offsetting" value="1" tooltip="This floating point value specifies a correction value for the isosurface threshold that is chosen.&#xa;Values &lt; 1 means internal offsetting, >1 external offsetting.Good values are in the range 0.5 .. 2.&#xa;The default value is 1.0 (no offsetting)." type="RichFloat"/>
</filter>
</FilterScript>
@@ -0,0 +1,15 @@
<!DOCTYPE FilterScript>

<FilterScript>
<filter name ="Compute normals for point sets">
<Param type="RichInt" value="100" name="K"/>
<Param type="RichBool" value="false" name="flipFlag"/>
<Param x="0" y="0" z="0" type="RichPoint3f" name="viewPos"/>
</filter>
<filter name="Surface Reconstruction: Poisson">
<Param type="RichInt" value="8" name="OctDepth"/>
<Param type="RichInt" value="6" name="SolverDivide"/>
<Param type="RichFloat" value="1" name="SamplesPerNode"/>
<Param type="RichFloat" value="1" name="Offset"/>
</filter>
</FilterScript>
@@ -0,0 +1,16 @@
import subprocess
import os

command = "echo DOG"
command = "meshlabserver -i scan.ply -o mesh.stl -s script.mlx"

# p = subprocess.call(["meshlabserver", "-i scan.ply -o mesh.stl -s script.mlx"], stdout=subprocess.PIPE)
# os.system("meshlabserver -i scan.ply -o mesh.stl -s script.mlx")
# while p == None:
# print("Damn! Still running")

process = subprocess.Popen("meshlabserver -i scan.ply -o mesh.stl -s script.mlx"command, shell=True, stdout=subprocess.PIPE)
process.wait()
print(process.returncode)
# print(process.communicate())
print("COMPLETE")
@@ -0,0 +1,10 @@
from picamera import PiCamera
import time

camera = PiCamera()
camera.shutter_speed = 5000
camera.resolution = (1080, 1080)

time.sleep(3)

camera.capture('testing.jpg', use_video_port=True)