-
Notifications
You must be signed in to change notification settings - Fork 11
/
analysis.py
1564 lines (1303 loc) · 56.1 KB
/
analysis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""Helper functions for processing images."""
import cv2
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from functools import reduce
from scipy.optimize import curve_fit, minimize
from slmsuite.holography.toolbox import format_2vectors
from slmsuite.misc.math import REAL_TYPES
from slmsuite.misc.fitfunctions import gaussian2d
def take(
images, vectors, size,
centered=True, integrate=False, clip=False,
return_mask=False, plot=False, mp=np
):
"""
Crop integration regions around an array of ``vectors``, yielding an array of images.
Each integration region is a rectangle of the same ``size``. Similar to but more
general than :meth:`numpy.take`. Useful for gathering data from spots in spot
arrays. Operates with some speed due to the vectorized nature of implemented slicing.
Parameters
----------
images : array_like
2D image or array of 2D images.
vectors : array_like of floats
2-vector (or 2-vector array). Location(s) of integration region anchor(s) in pixels,
see ``centered``.
See :meth:`~slmsuite.holography.toolbox.format_2vectors`.
size : int or (int, int)
Size of the rectangular integration region in ``(w, h)`` format in pixels.
If a scalar is given, assume square ``(w, w)``.
centered : bool
Whether to center the integration region on the ``vectors``.
If ``False``, ``vectors`` indicates the upper-left corner of the integration region.
Defaults to ``True``.
integrate : bool
If ``True``, the spatial dimension are integrated (summed), yielding a result of the
same length as the number of vectors. Defaults to ``False``.
clip : bool
Whether to allow out-of-range integration regions. ``True`` allows regions outside
the valid area, setting the invalid region to ``np.nan``
(or zero if the array datatype does not support ``np.nan``).
``False`` throws an error upon out of range. Defaults to ``False``.
return_mask : bool
If ``True``, returns a boolean mask corresponding to the regions which are taken
from. Defaults to ``False``. The average user will ignore this.
plot : bool
Calls :meth:`take_plot()` to visualize the images regions.
mp : module
If ``images`` are :mod:`cupy` objects, then :mod:`cupy` must be passed as
``mp``. Very useful to minimize the cost of moving data between the GPU and CPU.
Defaults to :mod:`numpy`.
Indexing variables inside `:meth:`take` still use :mod:`numpy` for speed, no
matter what module is used.
Returns
-------
numpy.ndarray OR cupy.ndarray
If ``integrate`` is ``False``, returns an array containing the images cropped
from the regions of size ``(image_count, h, w)``.
If ``integrate`` is ``True``, instead returns an array of floats of size ``(image_count,)``
where each float corresponds to the :meth:`numpy.sum` of a cropped image.
If ``mp`` is :mod:`cupy`, then a ``cupy.ndarray`` is returned.
"""
# Clean variables.
if isinstance(size, REAL_TYPES):
size = (size, size)
vectors = format_2vectors(vectors)
# Prepare helper variables. Future: consider caching for speed, if not negligible.
edge_x = np.arange(size[0]) - ((int(size[0] - 1) / 2) if centered else 0)
edge_y = np.arange(size[1]) - ((int(size[1] - 1) / 2) if centered else 0)
region_x, region_y = np.meshgrid(edge_x, edge_y)
# Get the lists for the integration regions.
integration_x = np.around(np.add(
region_x.ravel()[:, np.newaxis].T, vectors[:][0][:, np.newaxis]
)).astype(int)
integration_y = np.around(np.add(
region_y.ravel()[:, np.newaxis].T, vectors[:][1][:, np.newaxis]
)).astype(int)
images = mp.array(images, copy=False)
shape = mp.shape(images)
if clip: # Prevent out-of-range errors by clipping.
mask = (
(integration_x < 0) | (integration_x >= shape[-1]) |
(integration_y < 0) | (integration_y >= shape[-2])
)
if np.any(mask):
# Clip these indices to prevent errors.
np.clip(integration_x, 0, shape[-1] - 1, out=integration_x)
np.clip(integration_y, 0, shape[-2] - 1, out=integration_y)
else:
# No clipping needed.
clip = False
else:
pass # Don't prevent out-of-range errors.
if return_mask:
canvas = np.zeros(shape[:2], dtype=bool)
canvas[integration_y, integration_x] = True
if plot:
plt.imshow(canvas)
plt.show()
return canvas
else:
# Take the data, depending on the shape of the images.
if len(shape) == 2:
result = images[np.newaxis, integration_y, integration_x]
elif len(shape) == 3:
result = images[:, integration_y, integration_x]
else:
raise RuntimeError("Unexpected shape for images: {}".format(shape))
if clip: # Set values that were out of range to nan instead of erroring.
try: # If the datatype of result is incompatible with nan, set to zero instead.
result[:, mask] = np.nan
except:
result[:, mask] = 0
else:
pass
if integrate: # Sum over the integration axis.
return mp.squeeze(mp.sum(result, axis=-1))
else: # Reshape the integration axis.
return mp.reshape(result, (vectors.shape[1], size[1], size[0]))
def take_plot(images):
"""
Plots non-integrated results of :meth:`.take()` in a square array of subplots.
Parameters
----------
images : numpy.ndarray
Stack of 2D images, usually a :meth:`take()` output.
"""
# Gather helper variables and set the min and max of all the subplots.
(img_count, sy, sx) = np.shape(images)
M = int(np.ceil(np.sqrt(img_count)))
sx = sx / 2.0 - 0.5
sy = sy / 2.0 - 0.5
extent = (-sx, sx, -sy, sy)
vmin = np.nanmin(images)
vmax = np.nanmax(images)
# Make the figure and subplots.
plt.figure(figsize=(12, 12))
for x in range(img_count):
ax = plt.subplot(M, M, x + 1)
ax.imshow(
images[x, :, :],
vmin=vmin,
vmax=vmax,
extent=extent,
interpolation='none'
)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
plt.show()
def image_remove_field(images, deviations=1, out=None):
r"""
Zeros the field of a stack of images such that moment calculations will succeed.
Consider, for example, a small spot on a field with strong background.
Moment calculations in this situation will dominantly measure the moments
of the background (i.e. the field). This function zeros the image below some threshold.
This threshold is set to either the mean plus ``deviations`` standard deviations,
computed uniquely for each image, or the median of each image if ``deviations``
is ``None``. This is equivalent to background subtraction.
Parameters
----------
images : numpy.ndarray
A matrix in the style of the output of :meth:`take()`, with shape ``(image_count, h, w)``, where
``(h, w)`` is the width and height of the 2D images and ``image_count`` is the number of
images. A single image is interpreted correctly as ``(1, h, w)`` even if
``(h, w)`` is passed, though the returned image remains shape ``(h, w)`` in that case.
deviations : int OR None
Number of standard deviations above the mean to set the threshold.
If ``None``, uses the median as the threshold instead.
Defaults to ``None``.
out : numpy.ndarray or None
The array to place the output data into. Should be the same shape as ``images``.
This function operates in-place if ``out`` equals ``images``.
Returns
-------
out : numpy.ndarray
``images`` or a copy of ``images``, with each image background-subtracted.
"""
# Parse images. Convert to float.
images = np.array(images, copy=False)
if not isinstance(images.dtype, np.floating):
images = np.array(images, copy=False, dtype=float) # Hack to prevent integer underflow.
# Parse out.
if out is None:
out = np.copy(images)
elif out is not images:
out = np.copyto(out, images)
# Make sure that we're testing 3D images.
single_image = len(images.shape) == 2
if single_image:
images_ = np.reshape(images, (1, images.shape[0], images.shape[1]))
else:
images_ = images
img_count = images_.shape[0]
# Generate the threshold.
if deviations is None: # Median case
threshold = np.nanmedian(images_, axis=(1, 2))
else: # Mean + deviations * std case
threshold = (
np.nanmean(images_, axis=(1, 2))
+ deviations*np.nanstd(images_, axis=(1, 2))
)
if not single_image:
threshold = np.reshape(threshold, (img_count, 1, 1))
# Remove the field. This needs the float from before. Unsigned integer could underflow.
out -= threshold
out[out < 0] = 0
return out
def image_moment(images, moment=(1, 0), centers=(0, 0), normalize=True, nansum=False):
r"""
Computes the given `moment <https://en.wikipedia.org/wiki/Moment_(mathematics)>`_
:math:`M_{m_xm_y}` for a stack of images.
This involves integrating each image against polynomial trial functions:
.. math:: M_{m_xm_y} = \frac{ \int_{-w_x/2}^{+w_x/2} dx \, (x-c_x)^{m_x}
\int_{-w_y/2}^{+w_y/2} dy \, (y-c_y)^{m_y}
P(x+x_0, y+y_0)
}{ \int_{-w_x/2}^{+w_x/2} dx \,
\int_{-w_y/2}^{+w_y/2} dy \,
P(x, y)},
where :math:`P(x, y)` is a given 2D image, :math:`(x_0, y_0)` is the center of a
window of size :math:`w_x \times w_y`, and :math:`(c_x, c_y)` is a shift in the
center of the trial functions.
Warning
~~~~~~~
This function does not check for or correct for negative values in ``images``.
Negative values may produce unusual results.
Warning
~~~~~~~
Higher order even moments (e.g. 2) will potentially yield unexpected results if
the images are not background-subtracted. For instance, a calculation on an image
with large background will yield the moment of the window, rather than say anything
about the image. Consider using :meth:`image_remove_field()` to background-subtract.
Parameters
----------
images : numpy.ndarray
A matrix in the style of the output of :meth:`take()`, with shape ``(image_count, h, w)``, where
``(h, w)`` is the width and height of the 2D images and ``image_count`` is the number of
images. A single image is interpreted correctly as ``(1, h, w)`` even if
``(h, w)`` is passed.
moment : (int, int)
The moments in the :math:`x` and :math:`y` directions: :math:`(m_x, m_y)`. For instance,
- :math:`M_{m_xm_y} = M_{10}` corresponds to the :math:`x` moment or
the position in the :math:`x` dimension.
- :math:`M_{m_xm_y} = M_{11}` corresponds to :math:`xy` shear.
- :math:`M_{m_xm_y} = M_{02}` corresponds to the :math:`y^2` moment, or the variance
(squared width for a Gaussian) in the :math:`y` direction,
given a zero or zeroed (via ``centers``) :math:`M_{01}` moment.
centers : tuple or numpy.ndarray
Perturbations to the center of the trial function, :math:`(c_x, c_y)`.
normalize : bool
Whether to normalize ``images``.
If ``False``, normalization is assumed to have been precomputed.
nansum : bool
Whether to use :meth:`numpy.nansum()` in place of :meth:`numpy.sum()`.
:meth:`numpy.nansum()` treats ``nan`` values as zeros.
This is useful in the case where ``clip=True`` is passed to :meth:`take()`
(out of range is set to ``nan``).
Returns
-------
numpy.ndarray
The moment :math:`M_{m_xm_y}` evaluated for every image. This is of size ``(image_count,)``
for provided ``images`` data of shape ``(image_count, h, w)``.
"""
images = np.array(images, copy=False)
if len(images.shape) == 2:
images = np.reshape(images, (1, images.shape[0], images.shape[1]))
(img_count, w_y, w_x) = images.shape
if nansum:
np_sum = np.nansum
else:
np_sum = np.sum
if normalize:
normalization = np_sum(images, axis=(1, 2), keepdims=False)
reciprical = np.reciprocal(
normalization, where=normalization != 0, out=np.zeros(img_count,)
)
else:
reciprical = 1
if moment[0] == 0 and moment[1] == 0: # 0,0 (norm) case
if normalize:
return np.ones((img_count,))
else:
return np_sum(images, axis=(1, 2), keepdims=False)
else:
if len(np.shape(centers)) == 2:
c_x = np.reshape(centers[0], (img_count, 1, 1))
c_y = np.reshape(centers[1], (img_count, 1, 1))
elif len(np.shape(centers)) == 1:
c_x = centers[0]
c_y = centers[1]
edge_x = np.reshape(np.arange(w_x) - (w_x - 1) / 2.0, (1, 1, w_x)) - c_x
edge_y = np.reshape(np.arange(w_y) - (w_y - 1) / 2.0, (1, w_y, 1)) - c_y
edge_x = np.power(edge_x, moment[0], out=edge_x)
edge_y = np.power(edge_y, moment[1], out=edge_y)
if moment[1] == 0: # only x case
return np_sum(images * edge_x, axis=(1, 2), keepdims=False) * reciprical
elif moment[0] == 0: # only y case
return np_sum(images * edge_y, axis=(1, 2), keepdims=False) * reciprical
else: # shear case
return np_sum(images * edge_x * edge_y, axis=(1, 2), keepdims=False) * reciprical
def image_normalization(images, nansum=False):
"""
Computes the zeroth order moments, equivalent to spot mass or normalization,
for a stack of images.
Parameters
----------
images : numpy.ndarray
A matrix in the style of the output of :meth:`take()`, with shape ``(image_count, h, w)``, where
``(h, w)`` is the width and height of the 2D images and ``image_count`` is the number of
images. A single image is interpreted correctly as ``(1, h, w)`` even if
``(h, w)`` is passed.
nansum : bool
Whether to use :meth:`numpy.nansum()` in place of :meth:`numpy.sum()`.
Returns
-------
numpy.ndarray
The normalization factor :math:`M_{11}` in an array of shape ``(image_count,)``.
"""
return image_moment(images, (0, 0), normalize=False, nansum=nansum)
def image_normalize(images, nansum=False, remove_field=False):
"""
Normalizes of a stack of images via the the zeroth order moments.
Parameters
----------
images : numpy.ndarray
A matrix in the style of the output of :meth:`take()`, with shape ``(image_count, h, w)``, where
``(h, w)`` is the width and height of the 2D images and ``image_count`` is the number of
images. A single image is interpreted correctly as ``(1, h, w)`` even if
``(h, w)`` is passed, though the returned image remains shape ``(h, w)`` in that case.
nansum : bool
Whether to use :meth:`numpy.nansum()` in place of :meth:`numpy.sum()`.
remove_field : bool
Whether to apply :meth:`.image_remove_field()` to avoid background-dominated moments.
Returns
-------
images_normalized : numpy.ndarray
A copy of ``images``, with each image normalized.
"""
if remove_field:
images = image_remove_field(images)
else:
images = np.array(images, copy=False, dtype=float)
single_image = len(images.shape) == 2
normalization = image_normalization(images, nansum=nansum)
if single_image:
normalization = np.asscalar(normalization)
if normalization == 0:
return np.zeros_like(images)
else:
return images / normalization
else:
reciprical = np.reciprocal(
normalization, where=normalization != 0, out=np.zeros(len(normalization))
)
return images * np.reshape(reciprical, (len(normalization), 1, 1))
def image_positions(images, normalize=True, nansum=False):
"""
Computes the two first order moments, equivalent to spot position relative to image center,
for a stack of images.
Specifically, returns :math:`M_{10}` and :math:`M_{01}`.
Parameters
----------
images : numpy.ndarray
A matrix in the style of the output of :meth:`take()`, with shape ``(image_count, h, w)``, where
``(h, w)`` is the width and height of the 2D images and ``image_count`` is the number of
images. A single image is interpreted correctly as ``(1, h, w)`` even if
``(h, w)`` is passed.
normalize : bool
Whether to normalize ``images``.
If ``False``, normalization is assumed to have been precomputed.
nansum : bool
Whether to use :meth:`numpy.nansum()` in place of :meth:`numpy.sum()`.
Returns
-------
numpy.ndarray
Stack of :math:`M_{10}`, :math:`M_{01}`.
"""
if normalize:
images = image_normalize(images, nansum=nansum)
return np.vstack(
(
image_moment(images, (1, 0), normalize=False, nansum=nansum),
image_moment(images, (0, 1), normalize=False, nansum=nansum),
)
)
def image_variances(images, centers=None, normalize=True, nansum=False):
r"""
Computes the three second order central moments, equivalent to variance, for a stack
of images.
Specifically, this function returns a stack of the moments :math:`M_{20}` and
:math:`M_{02}`, along with :math:`M_{11}`, which are the variance in the :math:`x`
and :math:`y` directions, along with the so-called shear variance.
Recall that variance defined as
.. math:: (\Delta x)^2 = \left<(x - \left<x\right>)^2\right>.
This equation is made central by subtraction of :math:`\left<x\right>`.
The user can of course use :meth:`take_moment` directly to access the
non-central moments; this function is a helper to access useful quantities
for analysis of spot size and skewness.
Parameters
----------
images : numpy.ndarray
A matrix in the style of the output of :meth:`take()`, with shape ``(image_count, h, w)``, where
``(h, w)`` is the width and height of the 2D images and ``image_count`` is the number of
images. A single image is interpreted correctly as ``(1, h, w)`` even if
``(h, w)`` is passed.
centers : numpy.ndarray OR None
If the user has already computed :math:`\left<x\right>`, for example via
:meth:`image_positions()`, then this can be passed though ``centers``. The default
None computes ``centers`` internally.
normalize : bool
Whether to normalize ``images``.
If ``False``, normalization is assumed to have been precomputed.
nansum : bool
Whether to use :meth:`numpy.nansum()` in place of :meth:`numpy.sum()`.
Returns
-------
numpy.ndarray
Stack of :math:`M_{20}`, :math:`M_{02}`, and :math:`M_{11}`. Shape ``(3, image_count)``.
"""
if normalize:
images = image_normalize(images, nansum=nansum)
if centers is None:
centers = image_positions(images, normalize=False, nansum=nansum)
m20 = image_moment(images, (2, 0), centers=centers, normalize=False, nansum=nansum)
m11 = image_moment(images, (1, 1), centers=centers, normalize=False, nansum=nansum)
m02 = image_moment(images, (0, 2), centers=centers, normalize=False, nansum=nansum)
return np.vstack((m20, m02, m11))
def image_ellipticity(variances):
r"""
Given the output of :meth:`image_variances()`,
return a measure of spot ellipticity for each moment triplet.
The output of :meth:`image_variances()` contains the moments :math:`M_{20}`,
:math:`M_{02}`, and :math:`M_{11}`. These terms make up a :math:`2 \times 2` matrix,
which is equivalent to a rotated elliptical scaling according to the eigenvalues
:math:`\lambda_+` and :math:`\lambda_-` and some rotation matrix :math:`R(\phi)`.
.. math:: \begin{bmatrix}
M_{20} & M_{11} \\
M_{11} & M_{02} \\
\end{bmatrix}
=
R(-\phi)
\begin{bmatrix}
\lambda_+ & 0 \\
0 & \lambda_- \\
\end{bmatrix}
R(\phi).
We use this knowledge, along with tricks for eigenvalue calculations on
:math:`2 \times 2` matrices, to build up a metric for ellipticity:
.. math:: \mathcal{E} = 1 - \frac{\lambda_-}{\lambda_+}.
Notice that
- when :math:`\lambda_+ = \lambda_-` (isotropic scaling), the metric is zero and
- when :math:`\lambda_- = 0` (flattened to a line), the metric is unity.
Parameters
----------
variances : numpy.ndarray
The output of :meth:`image_variances()`. Shape ``(3, image_count)``.
Returns
-------
numpy.ndarray
Array of ellipticities for the given moments. Shape ``(image_count,)``.
"""
m20 = variances[0, :]
m02 = variances[1, :]
m11 = variances[2, :]
# We can use a trick for eigenvalue calculations of 2x2 matrices to avoid
# more complicated calculations.
half_trace = (m20 + m02) / 2
determinant = m20 * m02 - m11 * m11
eig_half_difference = np.sqrt(np.square(half_trace) - determinant)
eig_plus = half_trace + eig_half_difference
eig_minus = half_trace - eig_half_difference
return 1 - (eig_minus / eig_plus)
def image_ellipticity_angle(variances):
r"""
Given the output of :meth:`image_variances()`,
return the rotation angle of the scaled basis for each moment triplet.
This is the angle between the :math:`x` axis and the
major axis (large eigenvalue axis).
Parameters
----------
moment2 : numpy.ndarray
The output of :meth:`image_variances()`. Shape ``(3, image_count)``.
Returns
-------
numpy.ndarray
Array of angles for the given moments.
For highly circular spots, this angle is not meaningful, and dominated by
experimental noise.
For perfectly circular spots, zero is returned.
Shape ``(image_count,)``.
"""
m20 = variances[0, :]
m02 = variances[1, :]
m11 = variances[2, :]
# Some quick math (see image_ellipticity).
half_trace = (m20 + m02) / 2
determinant = m20 * m02 - m11 * m11
eig_plus = half_trace + np.sqrt(np.square(half_trace) - determinant)
# We know that M * v = eig_plus * v. This yields a system of equations:
# m20 * x + m11 * y = eig_plus * x
# m11 * x + m02 * y = eig_plus * y
# We're trying to solve for angle, which is just atan(x/y). We can solve for x/y:
# m11 * x = (eig_plus - m02) * y ==> x/y = (eig_plus - m02) / m11
return np.arctan2(eig_plus - m02, m11, where=m11 != 0, out=np.zeros_like(m11))
def image_fit(images, grid_ravel=None, function=gaussian2d, guess=None, plot=False):
"""
Fit each image in a stack of images to a 2D ``function``.
Parameters
----------
images : numpy.ndarray (image_count, height, width)
An image or array of images to fit. A single image is interpreted correctly as
``(1, h, w)`` even if ``(h, w)`` is passed.
grid_ravel : 2-tuple of array_like of reals (height * width)
Raveled components of the meshgrid describing coordinates over the images.
function : lambda ((float, float), ... ) -> float
Some fitfunction which accepts ``(x,y)`` coordinates as first argument.
Defaults to :meth:`~slmsuite.misc.fitfunctions.gaussian2d()`.
guess : None OR numpy.ndarray (parameter_count, image_count)
- If ``guess`` is ``None``, will construct a guess based on the ``function`` passed.
Functions for which guesses are implemented include:
- :meth:`~~slmsuite.misc.fitfunctions.gaussian2d()`
- If ``guess`` is ``None`` and ``function`` does not have a guess
implemented, no guess will be provided to the optimizer.
- If ``guess`` is a ``numpy.ndarray``, a slice of the array will be provided
to the optimizer as a guess for the fit parameters for each image.
plot : bool
Whether to create a plot for each fit.
show : bool
Whether or not to call :meth:`matplotlib.pyplot.show` after generating
the plot.
Returns
-------
numpy.ndarray (``result_count``, ``image_count``)
A matrix with the fit results. The first row
contains the rsquared quality of each fit.
The values in the remaining rows correspond to the parameters
for the supplied fit function.
Failed fits have an rsquared of ``numpy.nan`` and parameters
are set to the provided or constructed guess or ``numpy.nan``
if no guess was provided or constructed.
Raises
------
NotImplementedError
If the provided ``function`` does not have a guess implemented.
"""
# Setup.
if images.ndim == 2:
images = images.reshape((1, *images.shape))
(image_count, w_y, w_x) = images.shape
img_shape = (w_y, w_x)
if grid_ravel is None:
edge_x = np.reshape(np.arange(w_x) - (w_x - 1) / 2.0, (1, 1, w_x))
edge_y = np.reshape(np.arange(w_y) - (w_y - 1) / 2.0, (1, w_y, 1))
grid = np.meshgrid(edge_x, edge_y)
grid_ravel = (grid[0].ravel(), grid[1].ravel())
# Number of fit parameters the function accepts.
param_count = function.__code__.co_argcount - 1
# Number of parameters to return.
result_count = param_count + 1
result = np.full((result_count, image_count), np.nan)
# Construct guesses.
if guess is None:
if function is gaussian2d:
images_normalized = image_normalize(images, remove_field=True)
centers = image_positions(images_normalized, normalize=False)
variances = image_variances(images_normalized, centers=centers, normalize=False)
maxs = np.amax(images, axis=(1, 2))
mins = np.amin(images, axis=(1, 2))
guess = np.vstack((
centers,
maxs - mins,
mins,
np.sqrt(variances[0:2, :]),
variances[2, :]
))
guess_raw = np.vstack((
centers,
maxs - mins,
mins,
variances[0:2, :],
variances[2, :]
))
# Fit and plot each image.
for img_idx in range(image_count):
img = images[img_idx, :, :].ravel()
# Get guess.
p0 = None if guess is None else guess[:, img_idx]
# Attempt fit.
fit_succeeded = True
popt = None
try:
popt, _ = curve_fit(function, grid_ravel, img, ftol=1e-5, p0=p0,)
except RuntimeError: # The fit failed if scipy says so.
fit_succeeded = False
else: # The fit failed if any of the parameters aren't finite.
if np.any(np.logical_not(np.isfinite(popt))):
fit_succeeded = False
if fit_succeeded: # Calculate r2.
ss_res = np.sum(np.square(img - function(grid_ravel, *popt)))
ss_tot = np.sum(np.square(img - np.mean(img)))
r2 = 1 - (ss_res / ss_tot)
else: # r2 is nan and the fit parameters are the guess or nan.
popt = p0 if p0 is not None else np.full(param_count, np.nan)
r2 = np.nan
result[0, img_idx] = r2
result[1:, img_idx] = popt
# Plot.
if plot:
# Data.
data = np.reshape(img, img_shape)
if p0 is not None:
guess_ = np.reshape(function(grid_ravel, *p0), img_shape)
else:
guess_ = np.zeros(img_shape)
result_ = np.reshape(function(grid_ravel, *popt), img_shape)
vmin = np.min((
np.min(data),
np.min(guess_) if p0 is not None else np.inf,
np.min(result_)
))
vmax = np.max((
np.max(data),
np.max(guess_) if p0 is not None else -np.inf,
np.max(result_)
))
# Plot.
fig, axs = plt.subplots(1, 3, figsize=(3 * 6.4, 4.8))
fig.suptitle("Image {}".format(img_idx))
ax0, ax1, ax2 = axs
ax0.imshow(data, vmin=vmin, vmax=vmax)
ax0.set_title("Data")
ax1.imshow(guess_, vmin=vmin, vmax=vmax)
ax1.set_title("Guess")
ax2.imshow(result_, vmin=vmin, vmax=vmax)
ax2.set_title("Result")
plt.show()
return result
def fit_affine(x, y, guess_affine=None, plot=False):
r"""
For two sets of points with equal length, find the best-fit affine
transformation that transforms from the first basis to the second.
Best fit is defined as minimization on the least squares euclidean norm.
.. math:: \vec{y} = M \cdot \vec{x} + \vec{b}
Parameters
----------
x, y : array_like
Array of vectors of shape ``(2, N)`` in the style of :meth:`format_2vectors()`.
plot : bool
Whether to produce a debug plot.
Returns
-------
dict
A dictionary with fields ``"M"`` and ``"b"``.
"""
x = format_2vectors(x)
y = format_2vectors(y)
assert x.shape == y.shape
if guess_affine is None:
# Calculate the centroids and the centered coordinates.
xc = np.nanmean(x, axis=1)[:, np.newaxis]
yc = np.nanmean(y, axis=1)[:, np.newaxis]
if np.all(np.isnan(xc)) or np.all(np.isnan(yc)):
raise ValueError("analysis.py: vectors cannot contain a row of all-nan values")
x_ = x - xc
y_ = y - yc
# Points very close to the centroid have disproportionate influence on the guess.
# Ignore the points which are closer than a median-dependent threshold.
threshold = np.median(np.sqrt(np.sum(np.square(x_), axis=0))) / 2
# Generate a guess transformation.
nan_list = np.full_like(y_[0,:], np.nan)
# This could probably be vectorized more. Also not sure if all corner cases work.
M_guess = np.array([
[
np.nanmean(np.divide(y_[0,:], x_[0,:], where=x_[0,:] > threshold, out=nan_list.copy())),
np.nanmean(np.divide(y_[0,:], x_[1,:], where=x_[1,:] > threshold, out=nan_list.copy()))
],
[
np.nanmean(np.divide(y_[1,:], x_[0,:], where=x_[0,:] > threshold, out=nan_list.copy())),
np.nanmean(np.divide(y_[1,:], x_[1,:], where=x_[1,:] > threshold, out=nan_list.copy()))
]
])
# Fix nan instances. This means the matrix is no longer unique, so we choose the
# case where the nans are mapped to zero.
M_guess[np.isnan(M_guess)] = 0
# Back compute the offset.
b_guess = yc - np.matmul(M_guess, xc)
else:
if isinstance(guess_affine, dict) and "M" in guess_affine and "b" in guess_affine:
M_guess = guess_affine["M"]
b_guess = guess_affine["b"]
else:
raise ValueError("analysis.py: guess_affine must be a dictionary with 'M' and 'b' fields.")
# Least squares fit.
def err(p):
M = np.array([[p[0], p[1]], [p[2], p[3]]])
b = format_2vectors([p[4], p[5]])
y_ = np.matmul(M, x) + b
return np.nansum(np.square(y_ - y))
guess = (M_guess[0,0], M_guess[0,1], M_guess[1,0], M_guess[1,1], b_guess[0,0], b_guess[1,0])
try: # Try with default scipy minimization. (Future: better opt than minimize?).
m = minimize(err, x0=guess)
p = [float(pp) for pp in m.x]
M = np.array([[p[0], p[1]], [p[2], p[3]]])
b = format_2vectors([p[4], p[5]])
except: # Fail elegantly (warn the user?).
M = M_guess
b = b_guess
# Debug plot if desired.
if plot and x.shape[0] == 2:
plt.scatter(y[0,:], y[1,:], s=20, fc="b", ec="b")
result_guess = np.matmul(M_guess, x) + b_guess
plt.scatter(result_guess[0,:], result_guess[1,:], s=40, fc="none", ec="r")
result = np.matmul(M, x) + b
plt.scatter(result[0,:], result[1,:], s=60, fc="none", ec="g")
plt.gca().set_aspect("equal")
plt.show()
# Return as a dictionary
return {"M":M, "b":b}
def blob_detect(
img,
filter=None,
plot=False,
title="",
fig=None,
axs=None,
zoom=False,
show=False,
**kwargs
):
"""
Detect blobs in an image.
Wraps :class:`cv2.SimpleBlobDetector` (see
`these <https://docs.opencv.org/3.4/d8/da7/structcv_1_1SimpleBlobDetector_1_1Params.html>`_
`links <https://learnopencv.com/blob-detection-using-opencv-python-c/>`_)
Default parameters are optimized for bright spot detection on dark background,
but can be changed with ``**kwargs``.
Parameters
----------
img : numpy.ndarray
The image to perform blob detection on.
filter : {"dist_to_center", "max_amp"} OR None
One of ``dist_to_center`` or ``max_amp``.
plot : bool
Whether to show a debug plot.
title : str
Plot title.
fig : matplotlib.figure.Figure
Figure for plotting.
axs : list of matplotlib.axes.Axis or matplotlib.axes.Axis
Axes for plotting.
show : bool
Whether or not to show the plot.
**kwargs
Extra arguments for :class:`cv2.SimpleBlobDetector`.
Returns
-------
blobs : ndarray
List of blobs found by ``detector``.
detector : :class:`cv2.SimpleBlobDetector`
A blob detector with customized parameters.
"""
img_8it = _make_8bit(np.copy(img))
params = cv2.SimpleBlobDetector_Params()
# Configure default parameters
params.blobColor = 255
params.minThreshold = 10
params.maxThreshold = 255
params.thresholdStep = 10
# params.minArea = 0
params.filterByArea = False # Can be changed to compute rel to diffraction limit
params.filterByCircularity = False
params.filterByConvexity = False
params.filterByInertia = False
# Load in custom configuration
for key, val in kwargs.items():
setattr(params, key, val)
# Create the detector and detect blobs
detector = cv2.SimpleBlobDetector_create(params)
blobs = detector.detect(img_8it)
if len(blobs) == 0:
raise Exception("No blobs found! Try blurring image.")
# Sort blobs according to `filter`.
if filter == "dist_to_center":
dist_to_center = [
np.linalg.norm(np.array(blob.pt) - np.array(img.shape[::-1]) / 2)
for blob in blobs
]
blobs = [blobs[np.argmin(dist_to_center)]]
elif filter == "max_amp":
bin_size = int(np.mean([blob.size for blob in blobs]))
for i, blob in enumerate(blobs):
# Try fails when blob is on edge of camera.
try:
blobs[i].response = float(
img_8it[
np.ix_(
int(blob.pt[1]) + np.arange(-bin_size, bin_size),
int(blob.pt[0]) + np.arange(-bin_size, bin_size),
)
].sum()
)
except Exception:
blobs[i].response = float(0)
blobs = [blobs[np.argmax([blob.response for blob in blobs])]]
if plot:
# Get blob statistics.
blob_count = len(blobs)
blob_centers = np.zeros((2, blob_count))
blob_diameters = np.zeros(blob_count)
for (blob_idx, blob) in enumerate(blobs):
blob_centers[:, blob_idx] = blob.pt
blob_diameters[blob_idx] = blob.size
blob_xs = blob_centers[0, :]
blob_ys = blob_centers[1, :]
blob_xmin = np.min(blob_xs)
blob_xmax = np.max(blob_xs)
blob_ymin = np.min(blob_ys)
blob_ymax = np.max(blob_ys)
zoom_padx = 2 * (blob_xmax - blob_xmin) / blob_count
zoom_pady = 2 * (blob_ymax - blob_ymin) / blob_count
# Plot setup.
if fig is None and axs is None:
if zoom:
fig, axs = plt.subplots(1, 2)
else:
fig, axs = plt.subplots(1, 1)
axs = (axs,)
if zoom:
ax0, ax1 = axs
else:
ax0, = axs
fig.suptitle(title)
# Full image.
vmin = np.min(img_8it)
vmax = np.max(img_8it)
im = ax0.imshow(img_8it, vmin=vmin, vmax=vmax)
# Zoomed Image.
if zoom:
ax1.imshow(img_8it, vmin=vmin, vmax=vmax)
xmax = img.shape[1] + 0.5
xmin = 0.5