/
Experiments.cpp
15974 lines (13372 loc) · 684 KB
/
Experiments.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#include "Tools.cpp" // this includes rapt and rosic
#include "Attractors.cpp"
#include <regex>
//-------------------------------------------------------------------------------------------------
// move some of this code to rapt:
// simple box filter for smoothing/blurring an image
// maybe have options for edge handling: periodic, ignore, repeat, ...
template<class T>
void boxBlur3x3(const RAPT::rsImage<T>& x, RAPT::rsImage<T>& y)
{
//rsAssert(y.hasSameShapeAs(x)); // todo!!
rsAssert(y.getPixelPointer(0,0) != x.getPixelPointer(0,0), "Cant be used in place");
int w = x.getWidth();
int h = x.getHeight();
for(int j = 1; j < h-1; j++) // loop over lines
{
for(int i = 1; i < w-1; i++) // loop over pixels in current line
{
T tmp = x(i-1, j-1) + x(i-1, j) + x(i-1, j+1)
+ x(i, j-1) + x(i, j) + x(i, j+1)
+ x(i+1, j-1) + x(i+1, j) + x(i+1, j+1);
tmp *= T(1) / T(9);
y(i, j) = tmp;
}
}
// todo: handle edges and corners
// maybe use a weighting that is more circular - the shape of the disease spread looks a bit
// squarish
// see https://en.wikipedia.org/wiki/Kernel_(image_processing)
}
template<class T>
void gaussBlur3x3(const RAPT::rsImage<T>& x, RAPT::rsImage<T>& y)
{
//rsAssert(y.hasSameShapeAs(x)); // todo!!
rsAssert(y.getPixelPointer(0,0) != x.getPixelPointer(0,0), "Cant be used in place");
rsAssert(y.hasSameShapeAs(x), "Input and output images must have the same shape");
int w = x.getWidth();
int h = x.getHeight();
for(int j = 1; j < h-1; j++) // loop over lines
{
for(int i = 1; i < w-1; i++) // loop over pixels in current line
{
T d = (T(1)/T(16)) * (x(i-1, j-1) + x(i-1, j+1) + x(i+1, j-1) + x(i+1, j+1)); // diagonal
T a = (T(1)/T(8)) * (x(i-1, j ) + x(i, j-1) + x(i, j+1) + x(i+1, j )); // adjacent
y(i, j) = (T(1)/T(4))*x(i, j) + a + d;
}
}
// ToDo:
// -Handle edges
}
template<class T>
void gaussBlur5x5(const RAPT::rsImage<T>& x, RAPT::rsImage<T>& y)
{
//rsAssert(y.hasSameShapeAs(x)); // todo!!
rsAssert(y.getPixelPointer(0,0) != x.getPixelPointer(0,0), "Cant be used in place");
rsAssert(y.hasSameShapeAs(x), "Input and output images must have the same shape");
int w = x.getWidth();
int h = x.getHeight();
T c = T(1) / T(273);
for(int j = 2; j < h-2; j++)
{
for(int i = 2; i < w-2; i++)
{
// Outer ring:
T t1 = 1*c * (x(i-2, j-2) + x(i-2, j+2) + x(i+2, j-2) + x(i+2, j+2)); // corners
T t2 = 4*c * (x(i-1, j-2) + x(i-1, j+2) + x(i+1, j-2) + x(i+1, j+2) +
x(i-2, j-1) + x(i-2, j+1) + x(i+2, j-1) + x(i+2, j+1) );
T t3 = 7*c * (x(i-0, j-2) + x(i-0, j+2) + x(i-2, j-0) + x(i+2, j+0));
// Inner ring:
T t4 = 16*c * (x(i-1, j-1) + x(i-1, j+1) + x(i+1, j-1) + x(i+1, j+1));
T t5 = 26*c * (x(i-0, j-1) + x(i-0, j+1) + x(i-1, j-0) + x(i+1, j+0));
// Center:
T t6 = 41*c * x(i,j);
// Sum:
y(i,j) = t1 + t2 + t3 + t4 + t5 + t6;
}
}
}
template<class T>
void gaussBlur7x7(const RAPT::rsImage<T>& x, RAPT::rsImage<T>& y)
{
//rsAssert(y.hasSameShapeAs(x)); // todo!!
rsAssert(y.getPixelPointer(0,0) != x.getPixelPointer(0,0), "Cant be used in place");
rsAssert(y.hasSameShapeAs(x), "Input and output images must have the same shape");
int w = x.getWidth();
int h = x.getHeight();
T c = T(1) / T(1003);
for(int j = 3; j < h-3; j++)
{
for(int i = 3; i < w-3; i++)
{
// Outer ring:
T t1 = 2*c * (x(i-0, j-3) + x(i-0, j+3) + x(i-3, j-0) + x(i+3, j+0));
T t2 = 1*c * (x(i-1, j-3) + x(i-1, j+3) + x(i+1, j-3) + x(i+1, j+3) +
x(i-3, j-1) + x(i-3, j+1) + x(i+3, j-1) + x(i+3, j+1) );
// Middle ring:
T t3 = 3*c * (x(i-2, j-2) + x(i-2, j+2) + x(i+2, j-2) + x(i+2, j+2));
T t4 = 13*c * (x(i-1, j-2) + x(i-1, j+2) + x(i+1, j-2) + x(i+1, j+2) +
x(i-2, j-1) + x(i-2, j+1) + x(i+2, j-1) + x(i+2, j+1) );
T t5 = 22*c * (x(i-0, j-2) + x(i-0, j+2) + x(i-2, j-0) + x(i+2, j+0));
// Inner ring:
T t6 = 59*c * (x(i-1, j-1) + x(i-1, j+1) + x(i+1, j-1) + x(i+1, j+1));
T t7 = 97*c * (x(i-0, j-1) + x(i-0, j+1) + x(i-1, j-0) + x(i+1, j+0));
// Center:
T t8 = 159*c * x(i,j);
// Sum:
y(i,j) = t1 + t2 + t3 + t4 + t5 + t6 + t7 + t8;
}
}
}
// This 7x7 kernel seems to be just a more precise version of the 5x5 kernel but has the same width
// Todo:
// -handle boundaries
// -implement a general filter3x3 function that takes a 3x3 image to be used as filter kernel
// -implement a general filer(img, kernel) function. Maybe use the convolution routine from
// rsMatrix (we may create an rsMatrixView). ...but maybe the 2D convolution routine should be
// dragged out of rsMatrix. Maybe have a class rsArrayTools2D similar to rsArrayTools and let it
// have a function convolve(const T *x, int Mx, int Nx, const T* h, int Mh, int Nh, T* y)
// -Implement Gaussian filters of sizes 5x5, 7x7, see:
// https://homepages.inf.ed.ac.uk/rbf/HIPR2/gsmooth.htm
// https://www.researchgate.net/figure/Discrete-approximation-of-the-Gaussian-kernels-3x3-5x5-7x7_fig2_325768087
// ...done - but we don't handle the edges yet
// -Implement the "magic kernel" and its sharp variant:
// http://www.johncostella.com/magic/ http://www.johncostella.com/magic/mks.pdf
template<class T>
void sobelEdgeDetector3x3(const RAPT::rsImage<T>& x, RAPT::rsImage<T>& G, RAPT::rsImage<T>& t)
{
//rsAssert(y.getPixelPointer(0,0) != x.getPixelPointer(0,0), "Cant be used in place");
//rsAssert(y.hasSameShapeAs(x), "Input and output images must have the same shape");
//int w = x.getWidth();
//int h = x.getHeight();
for(int j = 1; j < h-1; j++) // loop over lines
{
for(int i = 1; i < w-1; i++) // loop over pixels in current line
{
// verify these:
T Gx = x(i-1, j-1) - x(i+1, j-1)
+ 2*(x(i-1, j ) - x(i+1, j ))
+ x(i-1, j+1) - x(i+1, j+1);
T Gy = x(i-1, j-1) - x(i-1, j+1)
+ 2*(x(i, j-1) - x(i, j+1))
+ x(i+1, j-1) - x(i+1, j+1);
G(i, j) = sqrt(Gx*Gx, + Gy*Gy);
t(i, j) = atan2(Gy, Gx);
}
}
}
// ...needs tests
// https://en.wikipedia.org/wiki/Sobel_operator
// maybe factor into sobelX, sobelY - can use same temp images as used for G and t
// see also:
// https://en.wikipedia.org/wiki/Prewitt_operator
// https://en.wikipedia.org/wiki/Roberts_cross
/** Approximates a Gaussian blur by using a first order bidirectional IIR lowpass filter several
times in the horizontal and vertical direction ("bidirectional" here refers to forward/backward,
not horizontal/vertical). The impulse response of a single forward pass is a decaying exponential,
but by virtue of the central limit theorem, the more such impulse responses we convolve in, the
more gaussian the shape becomes. A number of passes of 6 seems to be good enough to get a kernel
that appears visually circular. Using just a single pass, the shape looks more diamond-like - which
is not an isotropic kernel. If we want to realize isotropic kernels by filtering horizontally and
vertically, we need to start from a separable kernel - which the gaussian kernel is. */
template<class T>
void gaussBlurIIR(const RAPT::rsImage<T>& x, RAPT::rsImage<T>& y, T radius, int numPasses = 6)
{
rsAssert(y.getPixelPointer(0,0) != x.getPixelPointer(0,0), "Cant be used in place");
rsAssert(y.hasSameShapeAs(x), "Input and output images must have the same shape");
int w = x.getWidth();
int h = x.getHeight();
T scaledRadius = radius / numPasses;
// ad-hoc - maybe try to find better formula - maybe plot the variance as function of the number
// of passes (in 1D) to figure out the right formula experimentally - or maybe an analytic
// formula can be derived?...maybe normalize the area under the curve to unity?
// Create 1D IIR filter and set up its coefficients - we want a^r = 1/2 -> a = 2^(-1/r). This
// means the impulse response decays down to 1/2 after r pixels for a single pass (right?):
//rsOnePoleFilter<float, float> flt; // maybe use rsFirstOrderFilterBase
rsFirstOrderFilterBase<T, T> flt; // maybe use rsFirstOrderFilterBase
T a = pow(2.f, -1.f/scaledRadius); // is this formula right? we need something that lets b approach 1 as r approaches 0
T b = 1.f - a;
flt.setCoefficients(b, 0.f, a);
// horizontal passes:
y.copyPixelDataFrom(x);
for(int n = 0; n < numPasses; n++) // loop over the passes
for(int j = 0; j < h; j++) // loop over the rows
{
flt.applyForwardBackward(y.getPixelPointer(0, j), y.getPixelPointer(0, j), w); // old
//// new:
//T xL = y(0, j); // maybe we should use x(0, j) instead - likewise for xR
//T xR = y(w-1, j);
//flt.applyForwardBackward(y.getPixelPointer(0, j), y.getPixelPointer(0, j), w, xL, xR);
// I think, the new version handles the boudary condition differently - instead of assuming
// zero samples, it assumes infinite repetition of the boundary pixel values
}
// vertical passes:
for(int n = 0; n < numPasses; n++)
for(int i = 0; i < w; i++)
{
flt.applyForwardBackward(y.getPixelPointer(i, 0), y.getPixelPointer(i, 0), h, w); // old
//// new
//T xL = y(i, 0);
//T xR = y(i, h-1);
//flt.applyForwardBackward(y.getPixelPointer(i, 0), y.getPixelPointer(i, 0), h, w, xL, xR);
}
// todo: maybe let the user decide, how the boundaries are handled (repeat last pixel or assume
// zeros...maybe other options, like "reflect" could be uesed as well)
// todo: scale the filter coefficient b, such that the integral of the impulse response becomes
// 1 (or maybe the sum of the discrete implementation)....or maybe the sum-of-squares? maybe make
// that user selectable - the sum of the values seems to be appropriate, if we want to use it
// for local averaging, as we do in the SIRP model
// i think, this is already ensured because the geometric series
// S = sum_k a^k = 1/(1-a)
// where k runs from 0 to infinity - so with b=1, we would get a sum of 1/(1-a) - scaling that by
// the reciprocal, would scale by 1-a, which is exactly the formula for b
// -Maybe combine horizontal and vertical passes with diagonal passes. Maybe that converges
// faster to an isotropic filter. I think, a diagonal filter has stride w+1 or w-1. It also
// needs to scale the coeff by 1/sqrt(2) to compensate for the greater distance.
// -Maybe for the boundaries, use c*xL, c*xR where 0 <= c <= 1 is a user parameter to dial
// between zero and repeat boundary conditions
}
void testGaussBlurFIR()
{
int w = 21;
int h = 21;
using IP = RAPT::rsImageProcessor<float>;
// Allocate input and output images:
RAPT::rsImage<float> x(w,h), y3(w,h), y5(w,h), y7(w,h), y33(w,h), y53(w,h), y55(w,h);
// Create black input with single white spot at the center:
x.fillAll(0.f);
x(w/2, h/2) = 1.f;
// Create filtered versions and write them to files:
gaussBlur3x3(x, y3); IP::normalize(y3); writeImageToFilePPM(y3, "Gauss3.ppm");
gaussBlur5x5(x, y5); IP::normalize(y5); writeImageToFilePPM(y5, "Gauss5.ppm");
gaussBlur7x7(x, y7); IP::normalize(y7); writeImageToFilePPM(y7, "Gauss7.ppm");
gaussBlur3x3(y3, y33); IP::normalize(y33); writeImageToFilePPM(y33, "Gauss3+3.ppm");
gaussBlur3x3(y5, y53); IP::normalize(y53); writeImageToFilePPM(y53, "Gauss5+3.ppm");
gaussBlur5x5(y5, y55); IP::normalize(y55); writeImageToFilePPM(y55, "Gauss5+5.ppm");
// Maybe factor out into a lambda function to be called like
// writeKernelFile(x, &gausBlur3x3, "3x3") etc.
// Observations:
// -Gauss5 and Gauss7 look almost indistiguishable. I guess, they have the same width of the
// Gaussian and in the 7x7 kernel, the outer sections have negligible amplitude.
// ToDo:
// -Compare 5x5 blur to applying a 3x3 blur twice, likewise with 7x7 blurs etc.
int dummy = 0;
}
void testGaussBlurIIR()
{
int w = 101;
int h = 101;
float radius = 20.0f; // decay down to 1/2 after that many pixels (for 1 pass)
int numPasses = 6; // 6 seems good enough for a visually isotropic filter
// controls shape - the higher, the better we approximate a true Gaussian - 5 or 6 seems to be
// good enough
//radius = 30.f; // test
//numPasses = 1; // for testing decay
// try with w != h
RAPT::rsImage<float> x(w,h), y(w,h); // input and output images
// input is a single white pixel in the middle on black background:
x.fillAll(0.f);
x(w/2, h/2) = 1.f;
//x(w/4, h/4) = 1.f;
gaussBlurIIR(x, y, radius, numPasses);
//float ySum = rsArrayTools::sum(y.getPixelPointer(0,0), w*h);
// should be 1, regardless of radius and numPasses
RAPT::rsImageProcessor<float>::normalize(y);
// without normalization, it looks very dark - what we actually want is an energy-preserving
// filter (i think)...or maybe a filter that preserves the total sum of pixel values?
writeImageToFilePPM(x, "InputGaussIIR.ppm");
writeImageToFilePPM(y, "OutputGaussIIR.ppm");
writeScaledImageToFilePPM(y, "OutputGaussIIRx4.ppm", 4);
// Observations:
// -when using a low number of passes, we get a diamond-like shape which becomes more and more
// circular, the more passes we use
// -without normalizing the output, it tends to get very dark, such that we don't really see
// anything - however, the sum of the pixel values is nevertheless 1, as it should be
// (it tends to get less than one, when the radius becomes so large that there would be a
// significant number nonzero zero pixels outside the image - but if we choose the radius such
// that nonzero pixels onyl exist within the image, it seems to work)
// -when using last-pixel repetition for the boundary conditions and the white pixel is not at
// the center (like using x(w/4, h/4) = 1.f;) and the radius is sufficiently large, the
// "center of mass" in the filtered image shifts toward the corner - i.e. the corner is brighter
// than it should be. that makes sense, because assuming the out-of-range pixels to just repeat
// the final brightness value - where it should actually decay - we assume then to be brighter
// than they actually would be, when using the same filter on a larger image and cropping later
// -maybe we should use the final pixel values from the opriginal image in each pass - and not
// the ones of the output of the previous pass
}
// mathematically, all these implementations should give the same results, but they may behave
// differently numerically and/or may be more or less efficient:
/** Applies the given filter to the given image multiple times. This implementation interleaves
the horizontal and vertical passes. */
template<class T>
void applyMultiPass1(rsFirstOrderFilterBase<T, T>& flt, rsImage<T>& img, int numPasses)
{
int w = img.getWidth();
int h = img.getHeight();
for(int n = 0; n < numPasses; n++) // loop over the passes
{
// horizontal pass:
for(int j = 0; j < h; j++) // loop over the rows
flt.applyForwardBackward(img.getPixelPointer(0, j), img.getPixelPointer(0, j), w);
// vertical pass:
for(int i = 0; i < w; i++) // loop over the columns
flt.applyForwardBackward(img.getPixelPointer(i, 0), img.getPixelPointer(i, 0), h, w);
}
}
// instead of using a serial connection of forward and backward passes, we could also try a
// parallel connection - the center sample would be counted twice, so maybe, we should use
// y(x) = forward(x) + backward(x) - b*x
// where forward, backward mean the forward and backward application of the filter and b is the
// feedforward coeff of the filter. x is the input signal and y is the output
/** Apply filter in west-south-west/east-north-east direction (and back). */
template<class T>
void applySlantedWSW2ENE(rsFirstOrderFilterBase<T, T>& flt, const rsImage<T>& x, rsImage<T>& y)
{
rsAssert(y.hasSameShapeAs(x));
int w = x.getWidth();
int h = x.getHeight();
int numDiagonals = (w+1)/2 + h - 1; // verify this!
for(int d = 0; d < numDiagonals; d++)
{
// figure out start and end coordinates of the current diagonal:
int iStart = 0;
int jStart = d;
if(d >= h) {
jStart = h-1;
iStart = 2*(d-jStart); }
// apply forward pass from west-south-west to east-north-east:
int i = iStart;
int j = jStart;
flt.reset(); // old - todo: use some xL - maybe x(iStart, jStart)
//T xij = x(i,j);
//flt.setStateForConstInput(x(i, j)); // new - seems to cause problems
//int dummy = 0;
// todo: figure out, if it also causes similar problems when doing horizontal and vertical
// filtering instead of slanted
while(i < w && j >= 0) {
y(i, j) = flt.getSample(x(i, j));
i++; if(i >= w) { j--; break; }
y(i, j) = flt.getSample(x(i, j));
i++; j--; }
// apply backward pass from east-north-east to west-south-west:
i--; j++;
flt.prepareForBackwardPass(); // old
//flt.prepareForBackwardPass(x(i, j)); // new
// no - that's wrong when the filter is used in place - then x(i,j) has already been
// overwritten by now and now contains y(i,j) ...we somehow must extract the last sample of the
// slanted line *before* overwriting it ...the filter-state x1 should still contain it, so
// maybe flt.prepareForBackwardPass(flt.getStateX()); should work
//T x1 = flt.getStateX();
//flt.prepareForBackwardPass(flt.getStateX());
if(rsIsOdd(w) && i == w-1) {
y(i, j) = flt.getSample(x(i, j));
i--; j++; }
while(i >= 0 && j <= jStart) {
y(i, j) = flt.getSample(x(i, j));
i--; if(i < 0) break;
y(i, j) = flt.getSample(x(i, j));
i--; j++; }
}
}
// h.. the "new" lines lead to weird results
// example: image with w=9, h=6:
// │ 0 1 2 3 4 5 6 7 8
// ──┼──────────────────
// 0 │ 0 0 1 1 2 2 3 3 4
// 1 │ 1 1 2 2 3 3 4 4 5
// 2 │ 2 2 3 3 4 4 5 5 6
// 3 │ 3 3 4 4 5 5 6 6 7
// 4 │ 4 4 5 5 6 6 7 7 8
// 5 │ 5 5 6 6 7 7 8 8 9
// matrix entries stand for the index of the diagonal d - we mark each pixel with the index of the
// diagonal that crosses it
// traversal of pixel locations (only forward)
// d (i,j),(i,j),(i,j),(i,j),(i,j),(i,j),(i,j),(i,j),(i,j),(i,j)
// 0: (0,0),(1,0)
// 1: (0,1),(1,1),(2,0),(3,0)
// 2: (0,2),(1,2),(2,1),(3,1),(4,0),(5,0)
// 3: (0,3),(1,3),(2,2),(3,2),(4,1),(5,1),(6,0),(7,0)
// 4: (0,4),(1,4),(2,3),(3,3),(4,2),(5,2),(6,1),(7,1),(8,0)
// 5: (0,5),(1,5),(2,4),(3,4),(4,3),(5,3),(6,2),(7,2),(8,1)
// 6: (2,5),(3,5),(4,4),(5,4),(6,3),(7,3),(8,2)
// 7: (4,5),(5,5),(6,4),(7,4),(8,3)
// 8: (6,5),(7,5),(8,4)
// 9: (8,5)
// desired start pixels for (i,j) for backward pass - always the last index pair in each line:
// (1,0),(3,0),(5,0),(7,0),(8,0),(8,1),(8,2),(8,3),(8,4),(8,5) ..corresponding start indices:
// (0,0),(0,1),(0,2),(0,3),(0,4),(0,5),(2,5),(4,5),(6,5),(8,5)
// https://theasciicode.com.ar/extended-ascii-code/box-drawings-single-horizontal-line-character-ascii-code-196.html
template<class T>
void applySlanted(rsImage<T>& img, T kernelWidth)
{
rsFirstOrderFilterBase<T, T> flt;
kernelWidth /= sqrt(T(1.25));
// == sqrt(1*1 + 0.5*0.5): length of line segment in each pixel -> verify
T a = pow(T(2), T(-1)/kernelWidth);
flt.setCoefficients(T(1)-a, T(0), a);
applySlantedWSW2ENE(flt, img, img);
flipLeftRight(img, img);
applySlantedWSW2ENE(flt, img, img);
flipLeftRight(img, img);
// -it makes a difference whether we do apply->flip->apply->flip or flip->apply->flip->apply
// ->the corners look different (test with high kernel width)
// -maybe this will not happen, when the apsect ratio is 2x1? if so, maybe we should extend the
// image to have aspect ratio 2x1, filter and then crop? similarly, for the diagonal filters, we
// should extend to aspect ratio 1x1 and then we can get rid of having to do it in two ways and
// taking the average? -> try it
// -todo: try in preparForBackwardPass not to assume to go down to zero but to some
// constant and then pass the last input pixel brightness to that - maybe that fixes it?
// -use flt.prepareForBackwardPass(flt.getX1())..or something - the filter stores the previous
// input so we can use that without extra code
// ..then also apply the filters to the transposed image
// -what could be meaningful boundary conditions for images - just repeating the last pixel value
// would make the filter very sensitive to cropping pixels away when there are fast changes at
// the boundary (i.e. last pixel black, 2nd to last white -> crop by 1 pixel -> get totally
// different result) ...maybe we should use some sort of local average near the boundary - say,
// over 10 pixels?
}
template<class T>
void testImageFilterSlanted(int w, int h, T kernelWidth)
{
rsImage<T> img(w, h);
//img(1, 1) = 1.f;
//img(2, 2) = 1.f; // try 1,1; 1,2; 2,1; 3,3; 3,2; 2,3
//img(3, 3) = 1.f;
img(w/2, h/2) = 1.f;
//img(20, 20) = 1.f;
//img(21, 21) = 1.f;
applySlanted(img, kernelWidth);
rsImageProcessor<T>::normalize(img);
std::string name = "SlantedFilter_" + std::to_string(w) + "x" + std::to_string(h) + ".ppm";
writeImageToFilePPM(img, name.c_str());
}
void testImageFilterSlanted()
{
testImageFilterSlanted( 80, 40, 15.f);
//testImageFilterSlanted( 80, 40, 30.f);
//testImageFilterSlanted( 9, 6, 2.f);
//testImageFilterSlanted( 9, 7, 2.f); // result not symmetrical
//testImageFilterSlanted( 20, 10, 3.f);
//testImageFilterSlanted( 21, 11, 3.f);
/*
testImageFilterSlanted( 50, 100, 30.f);
testImageFilterSlanted( 51, 100, 30.f);
testImageFilterSlanted(100, 30, 30.f);
testImageFilterSlanted(100, 50, 30.f);
testImageFilterSlanted(100, 70, 30.f);
testImageFilterSlanted(100, 100, 30.f);
testImageFilterSlanted(101, 30, 30.f);
testImageFilterSlanted(101, 50, 30.f);
testImageFilterSlanted(101, 70, 30.f);
testImageFilterSlanted(101, 101, 30.f);
testImageFilterSlanted(102, 52, 30.f);
*/
// Observations:
// -When the image width is even, we see a checkerboard pattern. I think, in the second pass,
// every even-numbered lines encounter a doublet of nonblack pixels along its journey and the
// odd numbered lines encounter only black pixels (or vice versa). When w is odd, every line of
// the second pass encounters one nonblack pixel.
// -the checkerboard pattern is undesirable, when the filter is used on its own, but when used
// as part of a higher-level filtering algo that also includes vertical and horizontal passes,
// i think, the effect will be smeared out by these passes, so it may not be a problem in
// practice - we need tests that use the filter in this context
}
/** Apply filter in south-west/north-east direction (and back). */
template<class T>
void applyDiagonalSW2NE(rsFirstOrderFilterBase<T, T>& flt, rsImage<T>& img)
{
int w = img.getWidth();
int h = img.getHeight();
int numDiagonals = w + h - 1;
for(int d = 0; d < numDiagonals; d++)
{
// figure out start and end coordinates of the current diagonal:
int iStart = d;
int jStart = 0;
if(d >= w) {
iStart = w-1;
jStart += d-w+1; }
// go from top-right to bottom-left:
flt.reset();
int i = iStart;
int j = jStart;
while(i >= 0 && j < h) {
img(i, j) = flt.getSample(img(i, j));
i--; j++; } // go one pixel to bottom-left
// go from bottom-left to top-right:
flt.prepareForBackwardPass();
i++; j--;
while(i <= iStart && j >= jStart) {
img(i, j) = flt.getSample(img(i, j));
i++; j--; } // go one pixel to top-right
}
}
/** Apply filter in south-east/north-west direction (and back). */
template<class T>
void applyDiagonalSE2NW(rsFirstOrderFilterBase<T, T>& flt, rsImage<T>& img)
{
int w = img.getWidth();
int h = img.getHeight();
int numDiagonals = w + h - 1;
for(int d = 0; d < numDiagonals; d++)
{
int iStart = 0;
int jStart = h-d-1;
if(d >= h) {
iStart = d-h+1;
jStart = 0; }
flt.reset();
int i = iStart;
int j = jStart;
while(i < w && j < h) {
img(i, j) = flt.getSample(img(i, j)); i++; j++; }
flt.prepareForBackwardPass();
i--; j--;
while(i >= iStart && j >= jStart) {
img(i, j) = flt.getSample(img(i, j)); i--; j--; }
}
}
template<class T>
void applyDiagonal(rsFirstOrderFilterBase<T, T>& flt, rsImage<T>& img)
{
//// test:
//// SW -> NE first, then SE -> NW:
//applyDiagonalSW2NE(flt, img);
//applyDiagonalSE2NW(flt, img);
//return;
int w = img.getWidth();
int h = img.getHeight();
rsImage<T> tmp1 = img, tmp2 = img;
// The order in which these two functions are called determines, how the edge effects manifest
// themselves, so we do it in both orders and take the average:
// SW -> NE first, then SE -> NW:
applyDiagonalSW2NE(flt, tmp1);
applyDiagonalSE2NW(flt, tmp1);
// SE -> NW first, then SW -> NE:
applyDiagonalSE2NW(flt, tmp2);
applyDiagonalSW2NE(flt, tmp2);
// average:
rsArrayTools::weightedSum(tmp1.getPixelPointer(0,0), tmp2.getPixelPointer(0,0),
img.getPixelPointer(0,0), w*h, T(0.5), T(0.5));
// hmm... well - it's not ideal, but at least, it's symmetrical now - but try to get rid of
// having to do it twice
// todo: test this with images for which h > w - this probably needs different code
// ..hmmm - it seems to work
}
template<class T>
void exponentialBlur(rsImage<T>& img, T radius)
{
rsFirstOrderFilterBase<T, T> flt;
T a = pow(T(2), T(-1)/radius);
flt.setCoefficients(T(1)-a, T(0), a);
applyMultiPass1(flt, img, 1); // replace by apply HorzVert (no MultiPass)
radius /= sqrt(T(2)); // because Pythagoras
a = pow(T(2), T(-1)/radius);
flt.setCoefficients(T(1)-a, T(0), a);
applyDiagonal(flt, img);
}
template<class T>
void exponentialBlur(rsImage<T>& img, T radius, int numPasses)
{
//radius /= numPasses; // does that formula make sense? ...nope: it contracts too much
radius /= sqrt(T(numPasses)); // looks better
for(int n = 0; n < numPasses; n++)
exponentialBlur(img, radius);
}
// todo: figure out the right formula for contracting the radius as function of the number of
// passes by considering the impulse response of the N-pass filter (given by the N-fold convolution
// of the 1-pass impulse response with itself). maybe normalize the area under that impulse
// response (or maybe the squared impulse response - but that may be equivalent to what i already
// have implemented for the butterworth scaler - energy normalization). let
// h_1(t) = exp(-t/T) for t >= 0, h_N(t) = conv(h_1, h_{N-1}) be the impulse responses of 1-pass
// and N-pass filters -> obtain expressions for h_N(t) and \int_0^{inf} h_N(t) dt...maybe it's
// something like h_N(t) = k_N * t^N * exp(-t/T) ? -> use sage
// try a (recursive) moving-average filter - should create a rectangular block - needs to
// implement prepareForBackwardPass - when input goes to zero, output will go to zero in a
// straight line - so the y-state should be just half of the final y-state?
void testExponentialBlur()
{
int w = 401;
int h = 401;
float radius = 12.f;
int numPasses = 3; // i'd say, 3 is the sweet spot in terms of the tradeoff between isotropy
// and CPU requirements - maybe even just 2 - with 3, the non-isotropy is not even visible in the
// outermost contours anymore, with two it seems invisible in the 1/256 contour - and so it
// should be even less visible in the kernel itself - but more passes may be required, if the
// shape of the decay is desired to be more Gaussian
rsImage<float> img(w, h);
img(w/2, h/2) = 1.f;
exponentialBlur(img, radius, numPasses);
rsImageProcessor<float>::normalize(img);
writeImageToFilePPM(img, "ExponentialBlur.ppm");
// plot contours:
std::vector<float> levels = { 1.f/8192, 1.f/4096, 1.f/2048, 1.f/1024, 1.f/512, 1.f/256, 1.f/128,
1.f/64, 1.f/32, 1.f/16, 1.f/8, 1.f/4, 1.f/2 };
std::vector<float> colors(levels.size());
rsFill(colors, 1.f);
rsImageContourPlotter<float, float> contourPlotter;
rsImage<float> contourLines = contourPlotter.getContourLines(img, levels, colors, true);
colors = rsRangeLinear(0.f, 1.f, (int) levels.size());
rsImage<float> contourFills = contourPlotter.getContourFills(img, levels, colors, true);
writeImageToFilePPM(contourLines, "ExponentialBlurContourLines.ppm");
writeImageToFilePPM(contourFills, "ExponentialBlurContourFills.ppm"); // looks wrong!
// -the shape of the contours is a bit like a rounded octagon, but viewing the actual kernel,
// that's not really an issue
// -the brightness decays away from the center exponentially, as expected (the spacing of the
// contours at the center is a bit wider than the given radius in pixels because we have
// multiple passes and the decay is exponential only asymptotically)
// -maybe gaussian blur can be achieved by using multiple passes of the exponential blur
// -next step: try to use a complex version of that - what if we just use the existing functions
// with a complex radius? would that work?
// -as an alternative to normalizing after applying the filter: calculate total energy of the
// image before and after filtering and multiply by the sqrt of the quotient - preserving the
// total energy preserves perceived overall brightness (right?)
// -according to this video: https://www.youtube.com/watch?v=LKnqECcg6Gw human brightness/color
// perception follows the log of the energy where the energy is given by the squared pixel
// brightnesses - that implies, a perceptually correct filter should do it like this:
// square pixel values -> apply filter -> square-root pixel values. this is especially important
// when the filter is applied to RGB channels of a color image
}
// move to rsImageProcessor when finished:
template<class T>
void flipLeftRight(const rsImage<T>& x, rsImage<T>& y)
{
int w = x.getWidth();
int h = x.getHeight();
y.setSize(w, h);
for(int j = 0; j < h; j++)
rsArrayTools::reverse(&x(0, j), &y(0, j), w);
}
template <class T>
void rsReverse(T* x, int N, int stride)
{
for(int i = 0; i < stride*N/2; i += stride)
rsSwap(x[i], x[N-i-1]);
}
// needs test - if it works, move to rsArrayTools
template<class T>
void flipTopBottom(const rsImage<T>& x, rsImage<T>& y)
{
int w = x.getWidth();
int h = x.getHeight();
y.setSize(w, h);
for(int i = 0; i < w; i++)
rsReverse(&x(i, 0), h, w); // w is the stride
}
template<class T>
void transpose(const rsImage<T>& x, rsImage<T>& y)
{
int w = x.getWidth();
int h = x.getHeight();
y.setSize(h, w);
for(int i = 0; i < w; i++)
for(int j = 0; j < h; j++)
y(j, i) = x(i, j);
}
template<class T>
bool writeComplexImageToFilePPM(const rsImage<std::complex<T>>& img, const char* path)
{
int w = img.getWidth();
int h = img.getHeight();
rsImage<T> imgR(w,h), imgI(w,h), imgA(w,h), imgP(w,h);
for(int j = 0; j < h; j++) {
for(int i = 0; i < w; i++) {
imgR(i, j) = img(i, j).real();
imgI(i, j) = img(i, j).imag();
imgA(i, j) = abs(img(i, j));
imgP(i, j) = arg(img(i, j)); }}
rsImageProcessor<T>::normalize(imgR);
rsImageProcessor<T>::normalize(imgI);
rsImageProcessor<T>::normalize(imgA);
rsImageProcessor<T>::normalize(imgP);
writeImageToFilePPM(imgR, "RealPart.ppm");
writeImageToFilePPM(imgI, "ImagPart.ppm");
writeImageToFilePPM(imgA, "AbsValue.ppm");
writeImageToFilePPM(imgP, "Phase.ppm");
// todo: make writing all parts (re,im,abs,phase) optional, use single temp image for all parts
// -use path variable - append Re,Im,Abs,Phs - requires that the writer function does not expect
// the .ppm extension to be passed - the function should append it itself - this requires a lot
// of code to be modified
return true; // preliminary
}
template<class T>
void applyComplexExpBlur(rsImage<std::complex<T>>& img, T radius, T omega, int numPasses,
T diagRadiusScaler = sqrt(T(2)), T diagFreqScaler = sqrt(T(2)))
{
// compensate for number of passes:
radius /= sqrt(T(numPasses));
omega *= sqrt(T(numPasses));
// compute filter coeffs for and apply horizontal and vertical passes:
using Complex = std::complex<T>;
rsFirstOrderFilterBase<Complex, Complex> flt;
Complex j(T(0), T(1));
Complex ar = pow(T(2), T(-1)/radius);
Complex ai = j*omega;
Complex a = ar + ai;
Complex b = Complex(1) - a;
flt.setCoefficients(b, T(0), a);
applyMultiPass1(flt, img, numPasses);
// i think the phase response is controlled by the angle of b, but due to bidirectional
// application, this will cancel out, so the initial phase will always be 0, regardless of the
// angle of b
// compute filter coeffs for and apply diagonal and antidiagonal passes:
radius /= diagRadiusScaler; // because Pythagoras
omega *= diagFreqScaler; //
ar = pow(T(2), T(-1)/radius);
ai = j*omega;
a = ar + ai;
b = Complex(1) - a;
flt.setCoefficients(b, 0.f, a);
for(int n = 1; n <= numPasses; n++)
applyDiagonal(flt, img);
}
// Interesting interference patterns can be created when using a rather high frequency (in
// relation to the radius). Also, the multiplication factors for the diagonal passes could be
// different, leading to different results - this is only interesting for artistic purposes - for
// generating isotropic kernels, the factors should be as they are - maybe make them optional
// parameters (done)
void testComplexExponentialBlur()
{
int w = 401;
int h = 401;
int numPasses = 3;
using Complex = std::complex<float>;
Complex j(0.f, 1.f);
float radius = 20.f;
float omega = float(PI/60);
rsImage<Complex> img(w, h);
img(w/2, h/2) = 1.f;
rsImage<Complex> imgC(w, h);
imgC.convertPixelDataFrom(img); // maybe make it more convenient: imgC = img.convert<Complex>();
applyComplexExpBlur(imgC, radius, omega, numPasses);
writeComplexImageToFilePPM(imgC, "ComplexExponentialBlur"); // filename not yet used by function
// -for spotting anisotropy, the phase-plot seems to be useful
// -the abs looks cool!
}
void animateComplexExponentialBlur()
{
//rsVideoFileWriter v;
//std::string str = v.getFfmpegInvocationCommand("Balh");
// video parameters:
int w = 400; // ffmpeg accepts only even widths and heights
int h = 400;
int numFrames = 400; // 400 is nice
int fps = 25;
// animation parameters:
int numPasses = 3;
float radius1 = 20.f;
float radius2 = 20.f;
float omega1 = float(PI/60);
float omega2 = float(PI/20);
float radScl1 = sqrt(2.f);
float radScl2 = sqrt(2.f);
float frqScl1 = sqrt(2.f);
float frqScl2 = sqrt(2.f);
//// test:
//omega1 = float(-PI/20);
//omega2 = float(PI/10);
// create animation:
using Complex = std::complex<float>;
rsImage<Complex> imgC(w, h); // complex image
rsImage<float> imgR(w, h); // real image
rsVideoRGB video(w, h);
//video.setPixelClipping(true); // turn this off to reveal problems
for(int n = 0; n < numFrames; n++)
{
// compute frame parameters:
float c = float(n) / float(numFrames-1);
float radius = (1.f-c)*radius1 + c*radius2;
//float c2 = 1 - (c-1)*(c-1); // slower toward the end
//float c2 = sqrt(c);
float c2 = pow(c, 1.f/3.f);
float omega = (1.f-c2)*omega1 + c2*omega2;
float radScl = (1.f-c)*radScl1 + c*radScl2;
float frqScl = (1.f-c)*frqScl1 + c*frqScl2;
// compute complex frame:
imgC.clear();
imgC(w/2, h/2) = 1.f;
imgC(w/2, h/2-1) = 1.f;
imgC(w/2-1, h/2) = 1.f;
imgC(w/2-1, h/2-1) = 1.f;
applyComplexExpBlur(imgC, radius, omega, numPasses, radScl, frqScl);
// compute absolute value or real part and append to video:
for(int j = 0; j < h; j++)
{
for(int i = 0; i < w; i++)
{
imgR(i, j) = abs(imgC(i, j));
}
}
rsImageProcessor<float>::normalize(imgR);
video.appendFrame(imgR, imgR, imgR);
}
rsVideoFileWriter vw;
vw.setFrameRate(fps);
vw.setCompressionLevel(10); // 0: lossless, 10: good enough, 51: worst
vw.setDeleteTemporaryFiles(false);
//vw.writeVideoToFile(video, fileName);
vw.writeVideoToFile(video, "ComplexExpBlur");
//ffmpeg -y -r 25 -i VideoTempFrame%d.ppm -vcodec libx264 -crf 10 -pix_fmt yuv420p -preset veryslow ExpBlur.mp4
// -the frequency sweep should probably be nonlinear (slower at the end - where the frequency
// is higher)
//int dummy = 0;
}
/** This implementation first does all the horizontal passes and then all the vertical passes. */
template<class T>
void applyMultiPass2(rsFirstOrderFilterBase<T, T>& flt, rsImage<T>& img, int numPasses)
{
int w = img.getWidth();
int h = img.getHeight();
// horizontal passes:
for(int n = 0; n < numPasses; n++) // loop over the passes
for(int j = 0; j < h; j++) // loop over the rows
flt.applyBidirectionally(img.getPixelPointer(0, j), img.getPixelPointer(0, j), w);
// vertical passes:
for(int n = 0; n < numPasses; n++)
for(int i = 0; i < w; i++)
flt.applyBidirectionally(img.getPixelPointer(i, 0), img.getPixelPointer(i, 0), h, w);
}
/** Implements a chain of identical first order filters.
the prepareForBackwardPass function does not work correctly because onyl for the first stage, we
can assume that the output goes to zero at the edges - for all follwoign stages, this assumption is
wrong because the filters that com before still produce nonzero outputs - bottom line: it doesn't
work as intended for image processing.
*/
template<class TSig, class TPar> // use TSig, TPar