forked from colmap/colmap
/
patch_match_cuda.cu
1888 lines (1649 loc) · 72.4 KB
/
patch_match_cuda.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright (c) 2023, ETH Zurich and UNC Chapel Hill.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#define _USE_MATH_DEFINES
#include "colmap/mvs/patch_match_cuda.h"
#include "colmap/util/cuda.h"
#include "colmap/util/cudacc.h"
#include "colmap/util/logging.h"
#include <algorithm>
#include <cfloat>
#include <cmath>
#include <cstdint>
#include <sstream>
// The number of threads per Cuda thread. Warning: Do not change this value,
// since the templated window sizes rely on this value.
#define THREADS_PER_BLOCK 32
// We must not include "util/math.h" to avoid any Eigen includes here,
// since Visual Studio cannot compile some of the Eigen/Boost expressions.
#ifndef DEG2RAD
#define DEG2RAD(deg) deg * 0.0174532925199432
#endif
namespace colmap {
namespace mvs {
// Calibration of reference image as {fx, cx, fy, cy}.
__constant__ float ref_K[4];
// Calibration of reference image as {1/fx, -cx/fx, 1/fy, -cy/fy}.
__constant__ float ref_inv_K[4];
__device__ inline void Mat33DotVec3(const float mat[9],
const float vec[3],
float result[3]) {
result[0] = mat[0] * vec[0] + mat[1] * vec[1] + mat[2] * vec[2];
result[1] = mat[3] * vec[0] + mat[4] * vec[1] + mat[5] * vec[2];
result[2] = mat[6] * vec[0] + mat[7] * vec[1] + mat[8] * vec[2];
}
__device__ inline void Mat33DotVec3Homogeneous(const float mat[9],
const float vec[2],
float result[2]) {
const float inv_z = 1.0f / (mat[6] * vec[0] + mat[7] * vec[1] + mat[8]);
result[0] = inv_z * (mat[0] * vec[0] + mat[1] * vec[1] + mat[2]);
result[1] = inv_z * (mat[3] * vec[0] + mat[4] * vec[1] + mat[5]);
}
__device__ inline float DotProduct3(const float vec1[3], const float vec2[3]) {
return vec1[0] * vec2[0] + vec1[1] * vec2[1] + vec1[2] * vec2[2];
}
__device__ inline float GenerateRandomDepth(const float depth_min,
const float depth_max,
curandState* rand_state) {
return curand_uniform(rand_state) * (depth_max - depth_min) + depth_min;
}
__device__ inline void GenerateRandomNormal(const int row,
const int col,
curandState* rand_state,
float normal[3]) {
// Unbiased sampling of normal, according to George Marsaglia, "Choosing a
// Point from the Surface of a Sphere", 1972.
float v1 = 0.0f;
float v2 = 0.0f;
float s = 2.0f;
while (s >= 1.0f) {
v1 = 2.0f * curand_uniform(rand_state) - 1.0f;
v2 = 2.0f * curand_uniform(rand_state) - 1.0f;
s = v1 * v1 + v2 * v2;
}
const float s_norm = sqrt(1.0f - s);
normal[0] = 2.0f * v1 * s_norm;
normal[1] = 2.0f * v2 * s_norm;
normal[2] = 1.0f - 2.0f * s;
// Make sure normal is looking away from camera.
const float view_ray[3] = {ref_inv_K[0] * col + ref_inv_K[1],
ref_inv_K[2] * row + ref_inv_K[3],
1.0f};
if (DotProduct3(normal, view_ray) > 0) {
normal[0] = -normal[0];
normal[1] = -normal[1];
normal[2] = -normal[2];
}
}
__device__ inline float PerturbDepth(const float perturbation,
const float depth,
curandState* rand_state) {
const float depth_min = (1.0f - perturbation) * depth;
const float depth_max = (1.0f + perturbation) * depth;
return GenerateRandomDepth(depth_min, depth_max, rand_state);
}
__device__ inline void PerturbNormal(const int row,
const int col,
const float perturbation,
const float normal[3],
curandState* rand_state,
float perturbed_normal[3],
const int num_trials = 0) {
// Perturbation rotation angles.
const float a1 = (curand_uniform(rand_state) - 0.5f) * perturbation;
const float a2 = (curand_uniform(rand_state) - 0.5f) * perturbation;
const float a3 = (curand_uniform(rand_state) - 0.5f) * perturbation;
const float sin_a1 = sin(a1);
const float sin_a2 = sin(a2);
const float sin_a3 = sin(a3);
const float cos_a1 = cos(a1);
const float cos_a2 = cos(a2);
const float cos_a3 = cos(a3);
// R = Rx * Ry * Rz
float R[9];
R[0] = cos_a2 * cos_a3;
R[1] = -cos_a2 * sin_a3;
R[2] = sin_a2;
R[3] = cos_a1 * sin_a3 + cos_a3 * sin_a1 * sin_a2;
R[4] = cos_a1 * cos_a3 - sin_a1 * sin_a2 * sin_a3;
R[5] = -cos_a2 * sin_a1;
R[6] = sin_a1 * sin_a3 - cos_a1 * cos_a3 * sin_a2;
R[7] = cos_a3 * sin_a1 + cos_a1 * sin_a2 * sin_a3;
R[8] = cos_a1 * cos_a2;
// Perturb the normal vector.
Mat33DotVec3(R, normal, perturbed_normal);
// Make sure the perturbed normal is still looking in the same direction as
// the viewing direction, otherwise try again but with smaller perturbation.
const float view_ray[3] = {ref_inv_K[0] * col + ref_inv_K[1],
ref_inv_K[2] * row + ref_inv_K[3],
1.0f};
if (DotProduct3(perturbed_normal, view_ray) >= 0.0f) {
const int kMaxNumTrials = 3;
if (num_trials < kMaxNumTrials) {
PerturbNormal(row,
col,
0.5f * perturbation,
normal,
rand_state,
perturbed_normal,
num_trials + 1);
return;
} else {
perturbed_normal[0] = normal[0];
perturbed_normal[1] = normal[1];
perturbed_normal[2] = normal[2];
return;
}
}
// Make sure normal has unit norm.
const float inv_norm = rsqrt(DotProduct3(perturbed_normal, perturbed_normal));
perturbed_normal[0] *= inv_norm;
perturbed_normal[1] *= inv_norm;
perturbed_normal[2] *= inv_norm;
}
__device__ inline void ComputePointAtDepth(const float row,
const float col,
const float depth,
float point[3]) {
point[0] = depth * (ref_inv_K[0] * col + ref_inv_K[1]);
point[1] = depth * (ref_inv_K[2] * row + ref_inv_K[3]);
point[2] = depth;
}
// Transfer depth on plane from viewing ray at row1 to row2. The returned
// depth is the intersection of the viewing ray through row2 with the plane
// at row1 defined by the given depth and normal.
__device__ inline float PropagateDepth(const float depth1,
const float normal1[3],
const float row1,
const float row2) {
// Point along first viewing ray.
const float x1 = depth1 * (ref_inv_K[2] * row1 + ref_inv_K[3]);
const float y1 = depth1;
// Point on plane defined by point along first viewing ray and plane normal1.
const float x2 = x1 + normal1[2];
const float y2 = y1 - normal1[1];
// Origin of second viewing ray.
// const float x3 = 0.0f;
// const float y3 = 0.0f;
// Point on second viewing ray.
const float x4 = ref_inv_K[2] * row2 + ref_inv_K[3];
// const float y4 = 1.0f;
// Intersection of the lines ((x1, y1), (x2, y2)) and ((x3, y3), (x4, y4)).
const float denom = x2 - x1 + x4 * (y1 - y2);
constexpr float kEps = 1e-5f;
if (abs(denom) < kEps) {
return depth1;
}
const float nom = y1 * x2 - x1 * y2;
return nom / denom;
}
// First, compute triangulation angle between reference and source image for 3D
// point. Second, compute incident angle between viewing direction of source
// image and normal direction of 3D point. Both angles are cosine distances.
__device__ inline void ComputeViewingAngles(
const cudaTextureObject_t poses_texture,
const float point[3],
const float normal[3],
const int image_idx,
float* cos_triangulation_angle,
float* cos_incident_angle) {
*cos_triangulation_angle = 0.0f;
*cos_incident_angle = 0.0f;
// Projection center of source image.
float C[3];
for (int i = 0; i < 3; ++i) {
C[i] = tex2D<float>(poses_texture, i + 16, image_idx);
}
// Ray from point to camera.
const float SX[3] = {C[0] - point[0], C[1] - point[1], C[2] - point[2]};
// Length of ray from reference image to point.
const float RX_inv_norm = rsqrt(DotProduct3(point, point));
// Length of ray from source image to point.
const float SX_inv_norm = rsqrt(DotProduct3(SX, SX));
*cos_incident_angle = DotProduct3(SX, normal) * SX_inv_norm;
*cos_triangulation_angle = DotProduct3(SX, point) * RX_inv_norm * SX_inv_norm;
}
__device__ inline void ComposeHomography(
const cudaTextureObject_t poses_texture,
const int image_idx,
const int row,
const int col,
const float depth,
const float normal[3],
float H[9]) {
// Calibration of source image.
float K[4];
for (int i = 0; i < 4; ++i) {
K[i] = tex2D<float>(poses_texture, i, image_idx);
}
// Relative rotation between reference and source image.
float R[9];
for (int i = 0; i < 9; ++i) {
R[i] = tex2D<float>(poses_texture, i + 4, image_idx);
}
// Relative translation between reference and source image.
float T[3];
for (int i = 0; i < 3; ++i) {
T[i] = tex2D<float>(poses_texture, i + 13, image_idx);
}
// Distance to the plane.
const float dist =
depth * (normal[0] * (ref_inv_K[0] * col + ref_inv_K[1]) +
normal[1] * (ref_inv_K[2] * row + ref_inv_K[3]) + normal[2]);
const float inv_dist = 1.0f / dist;
const float inv_dist_N0 = inv_dist * normal[0];
const float inv_dist_N1 = inv_dist * normal[1];
const float inv_dist_N2 = inv_dist * normal[2];
// Homography as H = K * (R - T * n' / d) * Kref^-1.
H[0] = ref_inv_K[0] * (K[0] * (R[0] + inv_dist_N0 * T[0]) +
K[1] * (R[6] + inv_dist_N0 * T[2]));
H[1] = ref_inv_K[2] * (K[0] * (R[1] + inv_dist_N1 * T[0]) +
K[1] * (R[7] + inv_dist_N1 * T[2]));
H[2] = K[0] * (R[2] + inv_dist_N2 * T[0]) +
K[1] * (R[8] + inv_dist_N2 * T[2]) +
ref_inv_K[1] * (K[0] * (R[0] + inv_dist_N0 * T[0]) +
K[1] * (R[6] + inv_dist_N0 * T[2])) +
ref_inv_K[3] * (K[0] * (R[1] + inv_dist_N1 * T[0]) +
K[1] * (R[7] + inv_dist_N1 * T[2]));
H[3] = ref_inv_K[0] * (K[2] * (R[3] + inv_dist_N0 * T[1]) +
K[3] * (R[6] + inv_dist_N0 * T[2]));
H[4] = ref_inv_K[2] * (K[2] * (R[4] + inv_dist_N1 * T[1]) +
K[3] * (R[7] + inv_dist_N1 * T[2]));
H[5] = K[2] * (R[5] + inv_dist_N2 * T[1]) +
K[3] * (R[8] + inv_dist_N2 * T[2]) +
ref_inv_K[1] * (K[2] * (R[3] + inv_dist_N0 * T[1]) +
K[3] * (R[6] + inv_dist_N0 * T[2])) +
ref_inv_K[3] * (K[2] * (R[4] + inv_dist_N1 * T[1]) +
K[3] * (R[7] + inv_dist_N1 * T[2]));
H[6] = ref_inv_K[0] * (R[6] + inv_dist_N0 * T[2]);
H[7] = ref_inv_K[2] * (R[7] + inv_dist_N1 * T[2]);
H[8] = R[8] + ref_inv_K[1] * (R[6] + inv_dist_N0 * T[2]) +
ref_inv_K[3] * (R[7] + inv_dist_N1 * T[2]) + inv_dist_N2 * T[2];
}
// Each thread in the current warp / thread block reads in 3 columns of the
// reference image. The shared memory holds 3 * THREADS_PER_BLOCK columns and
// kWindowSize rows of the reference image. Each thread copies every
// THREADS_PER_BLOCK-th column from global to shared memory offset by its ID.
// For example, if THREADS_PER_BLOCK = 32, then thread 0 reads columns 0, 32, 64
// and thread 1 columns 1, 33, 65. When computing the photoconsistency, which is
// shared among each thread block, each thread can then read the reference image
// colors from shared memory. Note that this limits the window radius to a
// maximum of THREADS_PER_BLOCK.
template <int kWindowSize>
struct LocalRefImage {
const static int kWindowRadius = kWindowSize / 2;
const static int kThreadBlockRadius = 1;
const static int kThreadBlockSize = 2 * kThreadBlockRadius + 1;
const static int kNumRows = kWindowSize;
const static int kNumColumns = kThreadBlockSize * THREADS_PER_BLOCK;
const static int kDataSize = kNumRows * kNumColumns;
__device__ explicit LocalRefImage(const cudaTextureObject_t ref_image_texture)
: ref_image_texture_(ref_image_texture) {}
float* data = nullptr;
__device__ inline void Read(const int row) {
// For the first row, read the entire block into shared memory. For all
// consecutive rows, it is only necessary to shift the rows in shared memory
// up by one element and then read in a new row at the bottom of the shared
// memory. Note that this assumes that the calling loop starts with the
// first row and then consecutively reads in the next row.
const int thread_id = threadIdx.x;
const int thread_block_first_id = blockDim.x * blockIdx.x;
const int local_col_start = thread_id;
const int global_col_start = thread_block_first_id -
kThreadBlockRadius * THREADS_PER_BLOCK +
thread_id;
if (row == 0) {
int global_row = row - kWindowRadius;
for (int local_row = 0; local_row < kNumRows; ++local_row, ++global_row) {
int local_col = local_col_start;
int global_col = global_col_start;
#pragma unroll
for (int block = 0; block < kThreadBlockSize; ++block) {
data[local_row * kNumColumns + local_col] =
tex2D<float>(ref_image_texture_, global_col, global_row);
local_col += THREADS_PER_BLOCK;
global_col += THREADS_PER_BLOCK;
}
}
} else {
// Move rows in shared memory up by one row.
for (int local_row = 1; local_row < kNumRows; ++local_row) {
int local_col = local_col_start;
#pragma unroll
for (int block = 0; block < kThreadBlockSize; ++block) {
data[(local_row - 1) * kNumColumns + local_col] =
data[local_row * kNumColumns + local_col];
local_col += THREADS_PER_BLOCK;
}
}
// Read next row into the last row of shared memory.
const int local_row = kNumRows - 1;
const int global_row = row + kWindowRadius;
int local_col = local_col_start;
int global_col = global_col_start;
#pragma unroll
for (int block = 0; block < kThreadBlockSize; ++block) {
data[local_row * kNumColumns + local_col] =
tex2D<float>(ref_image_texture_, global_col, global_row);
local_col += THREADS_PER_BLOCK;
global_col += THREADS_PER_BLOCK;
}
}
}
private:
const cudaTextureObject_t ref_image_texture_;
};
// The return values is 1 - NCC, so the range is [0, 2], the smaller the
// value, the better the color consistency.
template <int kWindowSize, int kWindowStep>
struct PhotoConsistencyCostComputer {
const static int kWindowRadius = kWindowSize / 2;
__device__ PhotoConsistencyCostComputer(
const cudaTextureObject_t ref_image_texture,
const cudaTextureObject_t src_images_texture,
const cudaTextureObject_t poses_texture,
const float sigma_spatial,
const float sigma_color)
: local_ref_image(ref_image_texture),
src_images_texture_(src_images_texture),
poses_texture_(poses_texture),
bilateral_weight_computer_(sigma_spatial, sigma_color) {}
// Maximum photo consistency cost as 1 - min(NCC).
const float kMaxCost = 2.0f;
// Thread warp local reference image data around current patch.
typedef LocalRefImage<kWindowSize> LocalRefImageType;
LocalRefImageType local_ref_image;
// Precomputed sum of raw and squared image intensities.
float local_ref_sum = 0.0f;
float local_ref_squared_sum = 0.0f;
// Index of source image.
int src_image_idx = -1;
// Center position of patch in reference image.
int row = -1;
int col = -1;
// Depth and normal for which to warp patch.
float depth = 0.0f;
const float* normal = nullptr;
__device__ inline void Read(const int row) {
local_ref_image.Read(row);
__syncthreads();
}
__device__ inline float Compute() const {
float tform[9];
ComposeHomography(
poses_texture_, src_image_idx, row, col, depth, normal, tform);
float tform_step[8];
for (int i = 0; i < 8; ++i) {
tform_step[i] = kWindowStep * tform[i];
}
const int thread_id = threadIdx.x;
const int row_start = row - kWindowRadius;
const int col_start = col - kWindowRadius;
float col_src = tform[0] * col_start + tform[1] * row_start + tform[2];
float row_src = tform[3] * col_start + tform[4] * row_start + tform[5];
float z = tform[6] * col_start + tform[7] * row_start + tform[8];
float base_col_src = col_src;
float base_row_src = row_src;
float base_z = z;
int ref_image_idx = THREADS_PER_BLOCK - kWindowRadius + thread_id;
int ref_image_base_idx = ref_image_idx;
const float ref_center_color =
local_ref_image
.data[ref_image_idx + kWindowRadius * 3 * THREADS_PER_BLOCK +
kWindowRadius];
const float ref_color_sum = local_ref_sum;
const float ref_color_squared_sum = local_ref_squared_sum;
float src_color_sum = 0.0f;
float src_color_squared_sum = 0.0f;
float src_ref_color_sum = 0.0f;
float bilateral_weight_sum = 0.0f;
for (int row = -kWindowRadius; row <= kWindowRadius; row += kWindowStep) {
for (int col = -kWindowRadius; col <= kWindowRadius; col += kWindowStep) {
const float inv_z = 1.0f / z;
const float norm_col_src = inv_z * col_src + 0.5f;
const float norm_row_src = inv_z * row_src + 0.5f;
const float ref_color = local_ref_image.data[ref_image_idx];
const float src_color = tex2DLayered<float>(
src_images_texture_, norm_col_src, norm_row_src, src_image_idx);
const float bilateral_weight = bilateral_weight_computer_.Compute(
row, col, ref_center_color, ref_color);
const float bilateral_weight_src = bilateral_weight * src_color;
src_color_sum += bilateral_weight_src;
src_color_squared_sum += bilateral_weight_src * src_color;
src_ref_color_sum += bilateral_weight_src * ref_color;
bilateral_weight_sum += bilateral_weight;
ref_image_idx += kWindowStep;
// Accumulate warped source coordinates per row to reduce numerical
// errors. Note that this is necessary since coordinates usually are in
// the order of 1000s as opposed to the color values which are
// normalized to the range [0, 1].
col_src += tform_step[0];
row_src += tform_step[3];
z += tform_step[6];
}
ref_image_base_idx += kWindowStep * 3 * THREADS_PER_BLOCK;
ref_image_idx = ref_image_base_idx;
base_col_src += tform_step[1];
base_row_src += tform_step[4];
base_z += tform_step[7];
col_src = base_col_src;
row_src = base_row_src;
z = base_z;
}
const float inv_bilateral_weight_sum = 1.0f / bilateral_weight_sum;
src_color_sum *= inv_bilateral_weight_sum;
src_color_squared_sum *= inv_bilateral_weight_sum;
src_ref_color_sum *= inv_bilateral_weight_sum;
const float ref_color_var =
ref_color_squared_sum - ref_color_sum * ref_color_sum;
const float src_color_var =
src_color_squared_sum - src_color_sum * src_color_sum;
// Based on Jensen's Inequality for convex functions, the variance
// should always be larger than 0. Do not make this threshold smaller.
constexpr float kMinVar = 1e-5f;
if (ref_color_var < kMinVar || src_color_var < kMinVar) {
return kMaxCost;
} else {
const float src_ref_color_covar =
src_ref_color_sum - ref_color_sum * src_color_sum;
const float src_ref_color_var = sqrt(ref_color_var * src_color_var);
return max(0.0f,
min(kMaxCost, 1.0f - src_ref_color_covar / src_ref_color_var));
}
}
private:
const cudaTextureObject_t src_images_texture_;
const cudaTextureObject_t poses_texture_;
const BilateralWeightComputer bilateral_weight_computer_;
};
__device__ inline float ComputeGeomConsistencyCost(
const cudaTextureObject_t poses_texture,
const cudaTextureObject_t src_depth_maps_texture,
const float row,
const float col,
const float depth,
const int image_idx,
const float max_cost) {
// Extract projection matrices for source image.
float P[12];
for (int i = 0; i < 12; ++i) {
P[i] = tex2D<float>(poses_texture, i + 19, image_idx);
}
float inv_P[12];
for (int i = 0; i < 12; ++i) {
inv_P[i] = tex2D<float>(poses_texture, i + 31, image_idx);
}
// Project point in reference image to world.
float forward_point[3];
ComputePointAtDepth(row, col, depth, forward_point);
// Project world point to source image.
const float inv_forward_z =
1.0f / (P[8] * forward_point[0] + P[9] * forward_point[1] +
P[10] * forward_point[2] + P[11]);
float src_col =
inv_forward_z * (P[0] * forward_point[0] + P[1] * forward_point[1] +
P[2] * forward_point[2] + P[3]);
float src_row =
inv_forward_z * (P[4] * forward_point[0] + P[5] * forward_point[1] +
P[6] * forward_point[2] + P[7]);
// Extract depth in source image.
const float src_depth = tex2DLayered<float>(
src_depth_maps_texture, src_col + 0.5f, src_row + 0.5f, image_idx);
// Projection outside of source image.
if (src_depth == 0.0f) {
return max_cost;
}
// Project point in source image to world.
src_col *= src_depth;
src_row *= src_depth;
const float backward_point_x =
inv_P[0] * src_col + inv_P[1] * src_row + inv_P[2] * src_depth + inv_P[3];
const float backward_point_y =
inv_P[4] * src_col + inv_P[5] * src_row + inv_P[6] * src_depth + inv_P[7];
const float backward_point_z = inv_P[8] * src_col + inv_P[9] * src_row +
inv_P[10] * src_depth + inv_P[11];
const float inv_backward_point_z = 1.0f / backward_point_z;
// Project world point back to reference image.
const float backward_col =
inv_backward_point_z *
(ref_K[0] * backward_point_x + ref_K[1] * backward_point_z);
const float backward_row =
inv_backward_point_z *
(ref_K[2] * backward_point_y + ref_K[3] * backward_point_z);
// Return truncated reprojection error between original observation and
// the forward-backward projected observation.
const float diff_col = col - backward_col;
const float diff_row = row - backward_row;
return min(max_cost, sqrt(diff_col * diff_col + diff_row * diff_row));
}
// Find index of minimum in given values.
template <int kNumCosts>
__device__ inline int FindMinCost(const float costs[kNumCosts]) {
float min_cost = costs[0];
int min_cost_idx = 0;
for (int idx = 1; idx < kNumCosts; ++idx) {
if (costs[idx] <= min_cost) {
min_cost = costs[idx];
min_cost_idx = idx;
}
}
return min_cost_idx;
}
__device__ inline void TransformPDFToCDF(float* probs, const int num_probs) {
float prob_sum = 0.0f;
for (int i = 0; i < num_probs; ++i) {
prob_sum += probs[i];
}
const float inv_prob_sum = 1.0f / prob_sum;
float cum_prob = 0.0f;
for (int i = 0; i < num_probs; ++i) {
const float prob = probs[i] * inv_prob_sum;
cum_prob += prob;
probs[i] = cum_prob;
}
}
class LikelihoodComputer {
public:
__device__ LikelihoodComputer(const float ncc_sigma,
const float min_triangulation_angle,
const float incident_angle_sigma)
: cos_min_triangulation_angle_(cos(min_triangulation_angle)),
inv_incident_angle_sigma_square_(
-0.5f / (incident_angle_sigma * incident_angle_sigma)),
inv_ncc_sigma_square_(-0.5f / (ncc_sigma * ncc_sigma)),
ncc_norm_factor_(ComputeNCCCostNormFactor(ncc_sigma)) {}
// Compute forward message from current cost and forward message of
// previous / neighboring pixel.
__device__ float ComputeForwardMessage(const float cost,
const float prev) const {
return ComputeMessage<true>(cost, prev);
}
// Compute backward message from current cost and backward message of
// previous / neighboring pixel.
__device__ float ComputeBackwardMessage(const float cost,
const float prev) const {
return ComputeMessage<false>(cost, prev);
}
// Compute the selection probability from the forward and backward message.
__device__ inline float ComputeSelProb(const float alpha,
const float beta,
const float prev,
const float prev_weight) const {
const float zn0 = (1.0f - alpha) * (1.0f - beta);
const float zn1 = alpha * beta;
const float curr = zn1 / (zn0 + zn1);
return prev_weight * prev + (1.0f - prev_weight) * curr;
}
// Compute NCC probability. Note that cost = 1 - NCC.
__device__ inline float ComputeNCCProb(const float cost) const {
return exp(cost * cost * inv_ncc_sigma_square_) * ncc_norm_factor_;
}
// Compute the triangulation angle probability.
__device__ inline float ComputeTriProb(
const float cos_triangulation_angle) const {
const float abs_cos_triangulation_angle = abs(cos_triangulation_angle);
if (abs_cos_triangulation_angle > cos_min_triangulation_angle_) {
const float scaled = 1.0f - (1.0f - abs_cos_triangulation_angle) /
(1.0f - cos_min_triangulation_angle_);
const float likelihood = 1.0f - scaled * scaled;
return min(1.0f, max(0.0f, likelihood));
} else {
return 1.0f;
}
}
// Compute the incident angle probability.
__device__ inline float ComputeIncProb(const float cos_incident_angle) const {
const float x = 1.0f - max(0.0f, cos_incident_angle);
return exp(x * x * inv_incident_angle_sigma_square_);
}
// Compute the warping/resolution prior probability.
template <int kWindowSize>
__device__ inline float ComputeResolutionProb(const float H[9],
const float row,
const float col) const {
const int kWindowRadius = kWindowSize / 2;
// Warp corners of patch in reference image to source image.
float src1[2];
const float ref1[2] = {col - kWindowRadius, row - kWindowRadius};
Mat33DotVec3Homogeneous(H, ref1, src1);
float src2[2];
const float ref2[2] = {col - kWindowRadius, row + kWindowRadius};
Mat33DotVec3Homogeneous(H, ref2, src2);
float src3[2];
const float ref3[2] = {col + kWindowRadius, row + kWindowRadius};
Mat33DotVec3Homogeneous(H, ref3, src3);
float src4[2];
const float ref4[2] = {col + kWindowRadius, row - kWindowRadius};
Mat33DotVec3Homogeneous(H, ref4, src4);
// Compute area of patches in reference and source image.
const float ref_area = kWindowSize * kWindowSize;
const float src_area =
abs(0.5f * (src1[0] * src2[1] - src2[0] * src1[1] - src1[0] * src4[1] +
src2[0] * src3[1] - src3[0] * src2[1] + src4[0] * src1[1] +
src3[0] * src4[1] - src4[0] * src3[1]));
if (ref_area > src_area) {
return src_area / ref_area;
} else {
return ref_area / src_area;
}
}
private:
// The normalization for the likelihood function, i.e. the normalization for
// the prior on the matching cost.
__device__ static inline float ComputeNCCCostNormFactor(
const float ncc_sigma) {
// A = sqrt(2pi)*sigma/2*erf(sqrt(2)/sigma)
// erf(x) = 2/sqrt(pi) * integral from 0 to x of exp(-t^2) dt
return 2.0f / (sqrt(2.0f * M_PI) * ncc_sigma *
erff(2.0f / (ncc_sigma * 1.414213562f)));
}
// Compute the forward or backward message.
template <bool kForward>
__device__ inline float ComputeMessage(const float cost,
const float prev) const {
constexpr float kUniformProb = 0.5f;
constexpr float kNoChangeProb = 0.99999f;
const float kChangeProb = 1.0f - kNoChangeProb;
const float emission = ComputeNCCProb(cost);
float zn0; // Message for selection probability = 0.
float zn1; // Message for selection probability = 1.
if (kForward) {
zn0 = (prev * kChangeProb + (1.0f - prev) * kNoChangeProb) * kUniformProb;
zn1 = (prev * kNoChangeProb + (1.0f - prev) * kChangeProb) * emission;
} else {
zn0 = prev * emission * kChangeProb +
(1.0f - prev) * kUniformProb * kNoChangeProb;
zn1 = prev * emission * kNoChangeProb +
(1.0f - prev) * kUniformProb * kChangeProb;
}
return zn1 / (zn0 + zn1);
}
const float cos_min_triangulation_angle_;
const float inv_incident_angle_sigma_square_;
const float inv_ncc_sigma_square_;
const float ncc_norm_factor_;
};
// Rotate normals by 90deg around z-axis in counter-clockwise direction.
__global__ void InitNormalMap(GpuMat<float> normal_map,
GpuMat<curandState> rand_state_map) {
const int row = blockDim.y * blockIdx.y + threadIdx.y;
const int col = blockDim.x * blockIdx.x + threadIdx.x;
if (col < normal_map.GetWidth() && row < normal_map.GetHeight()) {
curandState rand_state = rand_state_map.Get(row, col);
float normal[3];
GenerateRandomNormal(row, col, &rand_state, normal);
normal_map.SetSlice(row, col, normal);
rand_state_map.Set(row, col, rand_state);
}
}
// Rotate normals by 90deg around z-axis in counter-clockwise direction.
__global__ void RotateNormalMap(GpuMat<float> normal_map) {
const int row = blockDim.y * blockIdx.y + threadIdx.y;
const int col = blockDim.x * blockIdx.x + threadIdx.x;
if (col < normal_map.GetWidth() && row < normal_map.GetHeight()) {
float normal[3];
normal_map.GetSlice(row, col, normal);
float rotated_normal[3];
rotated_normal[0] = normal[1];
rotated_normal[1] = -normal[0];
rotated_normal[2] = normal[2];
normal_map.SetSlice(row, col, rotated_normal);
}
}
template <int kWindowSize, int kWindowStep>
__global__ void ComputeInitialCost(GpuMat<float> cost_map,
const GpuMat<float> depth_map,
const GpuMat<float> normal_map,
const cudaTextureObject_t ref_image_texture,
const GpuMat<float> ref_sum_image,
const GpuMat<float> ref_squared_sum_image,
const cudaTextureObject_t src_images_texture,
const cudaTextureObject_t poses_texture,
const float sigma_spatial,
const float sigma_color) {
const int col = blockDim.x * blockIdx.x + threadIdx.x;
typedef PhotoConsistencyCostComputer<kWindowSize, kWindowStep>
PhotoConsistencyCostComputerType;
PhotoConsistencyCostComputerType pcc_computer(ref_image_texture,
src_images_texture,
poses_texture,
sigma_spatial,
sigma_color);
pcc_computer.col = col;
__shared__ float local_ref_image_data
[PhotoConsistencyCostComputerType::LocalRefImageType::kDataSize];
pcc_computer.local_ref_image.data = &local_ref_image_data[0];
float normal[3] = {0};
pcc_computer.normal = normal;
for (int row = 0; row < cost_map.GetHeight(); ++row) {
// Note that this must be executed even for pixels outside the borders,
// since pixels are used in the local neighborhood of the current pixel.
pcc_computer.Read(row);
if (col < cost_map.GetWidth()) {
pcc_computer.depth = depth_map.Get(row, col);
normal_map.GetSlice(row, col, normal);
pcc_computer.row = row;
pcc_computer.local_ref_sum = ref_sum_image.Get(row, col);
pcc_computer.local_ref_squared_sum = ref_squared_sum_image.Get(row, col);
for (int image_idx = 0; image_idx < cost_map.GetDepth(); ++image_idx) {
pcc_computer.src_image_idx = image_idx;
cost_map.Set(row, col, image_idx, pcc_computer.Compute());
}
}
}
}
struct SweepOptions {
float perturbation = 1.0f;
float depth_min = 0.0f;
float depth_max = 1.0f;
int num_samples = 15;
float sigma_spatial = 3.0f;
float sigma_color = 0.3f;
float ncc_sigma = 0.6f;
float min_triangulation_angle = 0.5f;
float incident_angle_sigma = 0.9f;
float prev_sel_prob_weight = 0.0f;
float geom_consistency_regularizer = 0.1f;
float geom_consistency_max_cost = 5.0f;
float filter_min_ncc = 0.1f;
float filter_min_triangulation_angle = 3.0f;
int filter_min_num_consistent = 2;
float filter_geom_consistency_max_cost = 1.0f;
};
template <int kWindowSize,
int kWindowStep,
bool kGeomConsistencyTerm = false,
bool kFilterPhotoConsistency = false,
bool kFilterGeomConsistency = false>
__global__ void SweepFromTopToBottom(
GpuMat<float> global_workspace,
GpuMat<curandState> rand_state_map,
GpuMat<float> cost_map,
GpuMat<float> depth_map,
GpuMat<float> normal_map,
GpuMat<uint8_t> consistency_mask,
GpuMat<float> sel_prob_map,
const GpuMat<float> prev_sel_prob_map,
const cudaTextureObject_t ref_image_texture,
const GpuMat<float> ref_sum_image,
const GpuMat<float> ref_squared_sum_image,
const cudaTextureObject_t src_images_texture,
const cudaTextureObject_t src_depth_maps_texture,
const cudaTextureObject_t poses_texture,
const SweepOptions options) {
const int col = blockDim.x * blockIdx.x + threadIdx.x;
// Probability for boundary pixels.
constexpr float kUniformProb = 0.5f;
LikelihoodComputer likelihood_computer(options.ncc_sigma,
options.min_triangulation_angle,
options.incident_angle_sigma);
float* forward_message =
&global_workspace.GetPtr()[col * global_workspace.GetHeight()];
float* sampling_probs =
&global_workspace.GetPtr()[global_workspace.GetWidth() *
global_workspace.GetHeight() +
col * global_workspace.GetHeight()];
//////////////////////////////////////////////////////////////////////////////
// Compute backward message for all rows. Note that the backward messages are
// temporarily stored in the sel_prob_map and replaced row by row as the
// updated forward messages are computed further below.
//////////////////////////////////////////////////////////////////////////////
if (col < cost_map.GetWidth()) {
for (int image_idx = 0; image_idx < cost_map.GetDepth(); ++image_idx) {
// Compute backward message.
float beta = kUniformProb;
for (int row = cost_map.GetHeight() - 1; row >= 0; --row) {
const float cost = cost_map.Get(row, col, image_idx);
beta = likelihood_computer.ComputeBackwardMessage(cost, beta);
sel_prob_map.Set(row, col, image_idx, beta);
}
// Initialize forward message.
forward_message[image_idx] = kUniformProb;
}
}
//////////////////////////////////////////////////////////////////////////////
// Estimate parameters for remaining rows and compute selection probabilities.
//////////////////////////////////////////////////////////////////////////////
typedef PhotoConsistencyCostComputer<kWindowSize, kWindowStep>
PhotoConsistencyCostComputerType;
PhotoConsistencyCostComputerType pcc_computer(ref_image_texture,
src_images_texture,
poses_texture,
options.sigma_spatial,
options.sigma_color);
pcc_computer.col = col;
__shared__ float local_ref_image_data
[PhotoConsistencyCostComputerType::LocalRefImageType::kDataSize];
pcc_computer.local_ref_image.data = &local_ref_image_data[0];
struct ParamState {
float depth = 0.0f;
float normal[3] = {0};
};
// Parameters of previous pixel in column.
ParamState prev_param_state;
// Parameters of current pixel in column.
ParamState curr_param_state;
// Randomly sampled parameters.
ParamState rand_param_state;
// Cuda PRNG state for random sampling.
curandState rand_state;
if (col < cost_map.GetWidth()) {
// Read random state for current column.
rand_state = rand_state_map.Get(0, col);
// Parameters for first row in column.
prev_param_state.depth = depth_map.Get(0, col);
normal_map.GetSlice(0, col, prev_param_state.normal);
}
for (int row = 0; row < cost_map.GetHeight(); ++row) {
// Note that this must be executed even for pixels outside the borders,
// since pixels are used in the local neighborhood of the current pixel.
pcc_computer.Read(row);
if (col >= cost_map.GetWidth()) {
continue;
}
pcc_computer.row = row;
pcc_computer.local_ref_sum = ref_sum_image.Get(row, col);
pcc_computer.local_ref_squared_sum = ref_squared_sum_image.Get(row, col);