/
planar_imaging.py
2708 lines (2470 loc) · 103 KB
/
planar_imaging.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""The planar imaging module analyzes phantom images taken with the kV or MV imager in 2D.
The following phantoms are supported:
* Leeds TOR 18
* Standard Imaging QC-3
* Standard Imaging QC-kV
* Las Vegas
* Elekta Las Vegas
* Doselab MC2 MV
* Doselab MC2 kV
* SNC kV
* SNC MV
* PTW EPID QC
Features:
* **Automatic phantom localization** - Set up your phantom any way you like; automatic positioning,
angle, and inversion correction mean you can set up how you like, nor will setup variations give you headache.
* **High and low contrast determination** - Analyze both low and high contrast ROIs. Set thresholds
as you see fit.
"""
from __future__ import annotations
import io
import math
import os.path as osp
import warnings
import webbrowser
from functools import cached_property
from pathlib import Path
from typing import BinaryIO, Callable, Literal
import matplotlib.pyplot as plt
import numpy as np
from py_linq import Enumerable
from scipy.ndimage import median_filter
from skimage import exposure, feature, measure
from skimage.measure._regionprops import RegionProperties
from . import Normalization
from .core import geometry, image, pdf
from .core.contrast import Contrast
from .core.decorators import lru_cache
from .core.geometry import Circle, Point, Rectangle, Vector
from .core.io import get_url, retrieve_demo_file
from .core.mtf import MTF
from .core.profile import CollapsedCircleProfile, FWXMProfilePhysical
from .core.roi import DiskROI, HighContrastDiskROI, LowContrastDiskROI, bbox_center
from .core.utilities import ResultBase, ResultsDataMixin
from .metrics.image import SizedDiskLocator
class PlanarResult(ResultBase):
"""This class should not be called directly. It is returned by the ``results_data()`` method.
It is a dataclass under the hood and thus comes with all the dunder magic.
Use the following attributes as normal class attributes."""
analysis_type: str #:
median_contrast: float #:
median_cnr: float #:
num_contrast_rois_seen: int #:
phantom_center_x_y: tuple[float, float] #:
low_contrast_rois: list[dict] #:
phantom_area: float #: The area of the phantom in pixels^2
mtf_lp_mm: tuple[float, float, float] | None = None #:
percent_integral_uniformity: float | None = None #:
def _middle_of_bbox_region(region: RegionProperties) -> tuple:
return (
(region.bbox[2] - region.bbox[0]) / 2 + region.bbox[0],
(region.bbox[3] - region.bbox[1]) / 2 + region.bbox[1],
)
def is_square(region: RegionProperties, instance: object, rtol=0.2) -> bool:
"""Whether the region has symmetric height and width"""
height = region.bbox[2] - region.bbox[0]
width = region.bbox[3] - region.bbox[1]
return math.isclose(height / width, 1, rel_tol=rtol)
def is_centered(region: RegionProperties, instance: object, rtol=0.3) -> bool:
"""Whether the region is centered on the image"""
img_center = (instance.image.center.y, instance.image.center.x)
# we don't want centroid because that could be offset by missing lengths of the outline. Center of bbox is more robust
return np.allclose(_middle_of_bbox_region(region), img_center, rtol=rtol)
def is_right_size(region: RegionProperties, instance: object, rtol=0.1) -> bool:
"""Whether the region is close to the expected size of the phantom, given the SSD and physical phantom size."""
return bool(
np.isclose(
region.bbox_area,
instance.phantom_bbox_size_px,
rtol=rtol,
)
)
def percent_integral_uniformity(max: float, min: float) -> float:
"""Calculate the percent integral uniformity. A small constant is
added to avoid possible division by zero."""
return 100 * (1 - (max - min + 1e-6) / (max + min + 1e-6))
class ImagePhantomBase(ResultsDataMixin[PlanarResult]):
"""Base class for planar phantom classes.
Attributes
----------
common_name : str
The human-readable name of the phantom. Used in plots and PDF report.
phantom_outline_object : {None, 'Circle', 'Rectangle'}
What type of outline to display on the plotted image. Helps to visually determine the accuracy of the
phantom size, position, and scale.
high_contrast_rois : list
:class:`~pylinac.core.roi.HighContrastDiskROI` instances of the
high contrast line pair regions.
high_contrast_roi_settings : dict
Settings of the placement of the high-contrast ROIs.
low_contrast_rois : list
:class:`~pylinac.core.roi.LowContrastDiskROI` instances of the low
contrast ROIs, other than the reference ROI (below).
low_contrast_roi_settings : dict
Settings of the placement of the low-contrast ROIs.
low_contrast_background_rois : list
:class:`~pylinac.core.roi.LowContrastDiskROI` instances of the low
contrast background ROIs.
low_contrast_background_roi_settings : dict
Settings of the placement of the background low-contrast ROIs.
low_contrast_background_value : float
The average pixel value of all the low-contrast background ROIs.
detection_conditions: list of callables
This should be a list of functions that return a boolean. It is used for finding the phantom outline in the image.
E.g. is_at_center().
phantom_bbox_size_mm2: float
This is the expected size of the **BOUNDING BOX** of the phantom. Additionally, it is usually smaller than the
physical bounding box because we sometimes detect an inner ring/square. Typically, x0.9-1.0 of the physical size.
"""
_demo_filename: str
common_name: str
high_contrast_roi_settings = {}
high_contrast_rois = []
low_contrast_roi_settings = {}
low_contrast_rois = []
low_contrast_background_roi_settings = {}
low_contrast_background_rois = []
low_contrast_background_value = None
phantom_outline_object = None
detection_conditions: list[Callable] = [is_centered, is_right_size]
detection_canny_settings = {"sigma": 2, "percentiles": (0.001, 0.01)}
phantom_bbox_size_mm2: float
roi_match_condition: Literal["max", "closest"] = "max"
mtf: MTF
_ssd: float
def __init__(
self,
filepath: str | BinaryIO | Path,
normalize: bool = True,
image_kwargs: dict | None = None,
):
"""
Parameters
----------
filepath : str
Path to the image file.
normalize: bool
Whether to "ground" and normalize the image. This can affect contrast measurements, but for
backwards compatibility this is True. You may want to set this to False if trying to compare with other software.
image_kwargs : dict
Keywords passed to the image load function; this would include things like DPI or SID if applicable
"""
img_kwargs = image_kwargs or {}
self.image = image.load(filepath, **img_kwargs)
if normalize:
self.image.ground()
self.image.normalize()
self._angle_override = None
self._size_override = None
self._center_override = None
self._high_contrast_threshold = None
self._low_contrast_threshold = None
@classmethod
def from_demo_image(cls):
"""Instantiate and load the demo image."""
demo_file = retrieve_demo_file(name=cls._demo_filename)
return cls(demo_file)
@classmethod
def from_url(cls, url: str):
"""
Parameters
----------
url : str
The URL to the image.
"""
image_file = get_url(url)
return cls(image_file)
def _preprocess(self):
pass
def _check_inversion(self):
pass
def window_floor(self) -> float | None:
"""The value to use as the minimum when displaying the image (see https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.imshow.html)
Helps show contrast of images, specifically if there is an open background"""
return
def window_ceiling(self) -> float | None:
"""The value to use as the maximum when displaying the image. Helps show contrast of images, specifically if there is an open background"""
return
@property
def magnification_factor(self) -> float:
"""The mag factor of the image based on SSD vs SAD"""
return self.image.sad / self._ssd
@property
def phantom_bbox_size_px(self) -> float:
"""The phantom bounding box size in pixels^2 at the isoplane."""
return (
self.phantom_bbox_size_mm2
* (self.image.dpmm**2)
* (self.magnification_factor**2)
)
@cached_property
def phantom_ski_region(self) -> RegionProperties:
"""The skimage region of the phantom outline."""
regions = self._get_canny_regions()
sorted_regions = (
Enumerable(regions)
.where(lambda r: r.area_bbox > 100)
.order_by_descending(lambda r: r.area_bbox)
.to_list()
)
# sorted_regions = sorted(regions, key=lambda r: r.area_bbox, reverse=True)
# search through all the canny ROIs to see which ones pass the detection conditions
blobs = []
for phantom_idx, region in enumerate(sorted_regions):
conditions_met = [
condition(region, self) for condition in self.detection_conditions
]
if all(conditions_met):
blobs.append(phantom_idx)
if not blobs:
raise ValueError(
"Unable to find the phantom in the image. Potential solutions: check the SSD was passed correctly, check that the phantom isn't at the edge of the field, check that the phantom is centered along the CAX."
)
if self.roi_match_condition == "max":
# take the biggest ROI and call that the phantom outline
best_roi_idx = np.argsort(
[sorted_regions[phan].bbox_area for phan in blobs]
)[-1]
elif (
self.roi_match_condition == "closest"
): # take the one most similar in size to known size
best_roi_idx = np.argsort(
[
abs(sorted_regions[phan].bbox_area - self.phantom_bbox_size_px)
for phan in blobs
]
)[0]
phantom_idx = blobs[best_roi_idx]
return sorted_regions[phantom_idx]
def analyze(
self,
low_contrast_threshold: float = 0.05,
high_contrast_threshold: float = 0.5,
invert: bool = False,
angle_override: float | None = None,
center_override: tuple | None = None,
size_override: float | None = None,
ssd: float | Literal["auto"] = "auto",
low_contrast_method: str = Contrast.MICHELSON,
visibility_threshold: float = 100,
) -> None:
"""Analyze the phantom using the provided thresholds and settings.
Parameters
----------
low_contrast_threshold : float
This is the contrast threshold value which defines any low-contrast ROI as passing or failing.
high_contrast_threshold : float
This is the contrast threshold value which defines any high-contrast ROI as passing or failing.
invert : bool
Whether to force an inversion of the image. This is useful if pylinac's automatic inversion algorithm fails
to properly invert the image.
angle_override : None, float
A manual override of the angle of the phantom. If None, pylinac will automatically determine the angle. If
a value is passed, this value will override the automatic detection.
.. Note::
0 is pointing from the center toward the right and positive values go counterclockwise.
center_override : None, 2-element tuple
A manual override of the center point of the phantom. If None, pylinac will automatically determine the center. If
a value is passed, this value will override the automatic detection. Format is (x, y)/(col, row).
size_override : None, float
A manual override of the relative size of the phantom. This size value is used to scale the positions of
the ROIs from the center. If None, pylinac will automatically determine the size.
If a value is passed, this value will override the automatic sizing.
.. Note::
This value is not necessarily the physical size of the phantom. It is an arbitrary value.
ssd
The SSD of the phantom itself in mm. If set to "auto", will first search for the phantom at the SAD, then at 5cm above the SID.
low_contrast_method
The equation to use for calculating low contrast.
visibility_threshold
The threshold for whether an ROI is "seen".
"""
self._angle_override = angle_override
self._center_override = center_override
self._size_override = size_override
self._high_contrast_threshold = high_contrast_threshold
self._low_contrast_threshold = low_contrast_threshold
self._low_contrast_method = low_contrast_method
self.visibility_threshold = visibility_threshold
self._ssd = ssd
self._find_ssd()
self._check_inversion()
if invert:
self.image.invert()
self._preprocess()
if self.high_contrast_roi_settings:
self.high_contrast_rois = self._sample_high_contrast_rois()
# generate rMTF
spacings = [
roi["lp/mm"] for roi in self.high_contrast_roi_settings.values()
]
self.mtf = MTF.from_high_contrast_diskset(
diskset=self.high_contrast_rois, spacings=spacings
)
if self.low_contrast_background_roi_settings:
(
self.low_contrast_background_rois,
self.low_contrast_background_value,
) = self._sample_low_contrast_background_rois()
if self.low_contrast_roi_settings:
self.low_contrast_rois = self._sample_low_contrast_rois()
def _sample_low_contrast_rois(self) -> list[LowContrastDiskROI]:
"""Sample the low-contrast sample regions for calculating contrast values."""
lc_rois = []
for stng in self.low_contrast_roi_settings.values():
roi = LowContrastDiskROI(
self.image,
self.phantom_angle + stng["angle"],
self.phantom_radius * stng["roi radius"],
self.phantom_radius * stng["distance from center"],
self.phantom_center,
self._low_contrast_threshold,
self.low_contrast_background_value,
contrast_method=self._low_contrast_method,
visibility_threshold=self.visibility_threshold,
)
lc_rois.append(roi)
return lc_rois
def _sample_low_contrast_background_rois(
self,
) -> tuple[list[LowContrastDiskROI], float]:
"""Sample the low-contrast background regions for calculating contrast values."""
bg_rois = []
for stng in self.low_contrast_background_roi_settings.values():
roi = LowContrastDiskROI(
self.image,
self.phantom_angle + stng["angle"],
self.phantom_radius * stng["roi radius"],
self.phantom_radius * stng["distance from center"],
self.phantom_center,
self._low_contrast_threshold,
)
bg_rois.append(roi)
avg_bg = np.mean([roi.pixel_value for roi in bg_rois])
return bg_rois, avg_bg
def _sample_high_contrast_rois(self) -> list[HighContrastDiskROI]:
"""Sample the high-contrast line pair regions."""
hc_rois = []
for stng in self.high_contrast_roi_settings.values():
roi = HighContrastDiskROI(
self.image,
self.phantom_angle + stng["angle"],
self.phantom_radius * stng["roi radius"],
self.phantom_radius * stng["distance from center"],
self.phantom_center,
self._high_contrast_threshold,
)
hc_rois.append(roi)
return hc_rois
def save_analyzed_image(
self,
filename: None | str | BinaryIO = None,
split_plots: bool = False,
to_streams: bool = False,
**kwargs,
) -> dict[str, BinaryIO] | list[str] | None:
"""Save the analyzed image to disk or to stream. Kwargs are passed to plt.savefig()
Parameters
----------
filename : None, str, stream
A string representing where to save the file to. If split_plots and to_streams are both true, leave as None as newly-created streams are returned.
split_plots: bool
If split_plots is True, multiple files will be created that append a name. E.g. `my_file.png` will become `my_file_image.png`, `my_file_mtf.png`, etc.
If to_streams is False, a list of new filenames will be returned
to_streams: bool
This only matters if split_plots is True. If both of these are true, multiple streams will be created and returned as a dict.
"""
if filename is None and to_streams is False:
raise ValueError("Must pass in a filename unless saving to streams.")
figs, names = self.plot_analyzed_image(
show=False, split_plots=split_plots, **kwargs
)
# remove plot keywords as savefig complains about extra kwargs
for key in (
"image",
"low_contrast",
"high_contrast",
"show",
):
kwargs.pop(key, None)
if not split_plots:
plt.savefig(filename, **kwargs)
else:
# append names to filename if it's file-like
if not to_streams:
filenames = []
f, ext = osp.splitext(filename)
for name in names:
filenames.append(f + "_" + name + ext)
else: # it's a stream buffer
filenames = [io.BytesIO() for _ in names]
for fig, name in zip(figs, filenames):
fig.savefig(name, **kwargs)
if to_streams:
return {name: stream for name, stream in zip(names, filenames)}
if split_plots:
return filenames
def _get_canny_regions(self) -> list[RegionProperties]:
"""Compute the canny edges of the image and return the connected regions found."""
# compute the canny edges with very low thresholds (detects nearly everything)
canny_img = feature.canny(
self.image.array,
low_threshold=self.detection_canny_settings["percentiles"][0],
high_threshold=self.detection_canny_settings["percentiles"][1],
use_quantiles=True,
sigma=self.detection_canny_settings["sigma"],
)
# label the canny edge regions
labeled = measure.label(canny_img)
regions = measure.regionprops(labeled, intensity_image=self.image.array)
return regions
def _create_phantom_outline_object(self) -> tuple[Rectangle | Circle, dict]:
"""Construct the phantom outline object which will be plotted on the image for visual inspection."""
outline_type = list(self.phantom_outline_object)[0]
outline_settings = list(self.phantom_outline_object.values())[0]
settings = {}
if outline_type == "Rectangle":
side_a = self.phantom_radius * outline_settings["width ratio"]
side_b = self.phantom_radius * outline_settings["height ratio"]
half_hyp = np.sqrt(side_a**2 + side_b**2) / 2
internal_angle = np.rad2deg(np.arctan(side_b / side_a))
new_x = self.phantom_center.x + half_hyp * (
geometry.cos(internal_angle)
- geometry.cos(internal_angle + self.phantom_angle)
)
new_y = self.phantom_center.y + half_hyp * (
geometry.sin(internal_angle)
- geometry.sin(internal_angle + self.phantom_angle)
)
obj = Rectangle(
width=self.phantom_radius * outline_settings["width ratio"],
height=self.phantom_radius * outline_settings["height ratio"],
center=Point(new_x, new_y),
)
settings["angle"] = self.phantom_angle
elif outline_type == "Circle":
obj = Circle(
center_point=self.phantom_center,
radius=self.phantom_radius * outline_settings["radius ratio"],
)
else:
raise ValueError(
"An outline object was passed but was not a Circle or Rectangle."
)
return obj, settings
def percent_integral_uniformity(
self, percentiles: tuple[float, float] = (1, 99)
) -> float | None:
"""Calculate and return the percent integral uniformity (PIU). This uses
a similar equation as ACR does for CT protocols. The PIU is calculated
over all the low contrast ROIs and the lowest (worst) PIU is returned.
If the phantom does not contain low-contrast ROIs, None is returned."""
if not self.low_contrast_rois:
return
pius = []
for roi in self.low_contrast_rois:
low = roi.percentile(percentiles[0])
high = roi.percentile(percentiles[1])
pius.append(percent_integral_uniformity(max=high, min=low))
return min(pius)
def plot_analyzed_image(
self,
image: bool = True,
low_contrast: bool = True,
high_contrast: bool = True,
show: bool = True,
split_plots: bool = False,
**plt_kwargs: dict,
) -> tuple[list[plt.Figure], list[str]]:
"""Plot the analyzed image.
Parameters
----------
image : bool
Show the image.
low_contrast : bool
Show the low contrast values plot.
high_contrast : bool
Show the high contrast values plot.
show : bool
Whether to actually show the image when called.
split_plots : bool
Whether to split the resulting image into individual plots. Useful for saving images into individual files.
plt_kwargs : dict
Keyword args passed to the plt.figure() method. Allows one to set things like figure size.
"""
plot_low_contrast = low_contrast and any(self.low_contrast_rois)
plot_high_contrast = high_contrast and any(self.high_contrast_rois)
num_plots = sum((image, plot_low_contrast, plot_high_contrast))
if num_plots < 1:
warnings.warn(
"Nothing was plotted because either all parameters were false or there were no actual high/low ROIs"
)
return
# set up axes and make axes iterable
figs = []
names = []
if split_plots:
axes = []
for n in range(num_plots):
fig, axis = plt.subplots(1, **plt_kwargs)
figs.append(fig)
axes.append(axis)
else:
fig, axes = plt.subplots(1, num_plots, **plt_kwargs)
fig.subplots_adjust(wspace=0.4)
if num_plots < 2:
axes = (axes,)
axes = iter(axes)
# plot the marked image
if image:
img_ax = next(axes)
names.append("image")
self.image.plot(
ax=img_ax,
show=False,
vmin=self.window_floor(),
vmax=self.window_ceiling(),
)
img_ax.axis("off")
img_ax.set_title(f"{self.common_name} Phantom Analysis")
# plot the outline image
if self.phantom_outline_object is not None:
outline_obj, settings = self._create_phantom_outline_object()
outline_obj.plot2axes(img_ax, edgecolor="b", **settings)
# plot the low contrast background ROIs
for roi in self.low_contrast_background_rois:
roi.plot2axes(img_ax, edgecolor="b")
# plot the low contrast ROIs
for roi in self.low_contrast_rois:
roi.plot2axes(img_ax, edgecolor=roi.plot_color)
# plot the high-contrast ROIs along w/ pass/fail coloration
if self.high_contrast_rois:
for roi, mtf in zip(
self.high_contrast_rois, self.mtf.norm_mtfs.values()
):
color = "b" if mtf > self._high_contrast_threshold else "r"
roi.plot2axes(img_ax, edgecolor=color)
# plot the center of the detected ROI; used for qualitative eval of detection algorithm
img_ax.scatter(x=self.phantom_center.x, y=self.phantom_center.y, marker="x")
# plot the low contrast value graph
if plot_low_contrast:
lowcon_ax = next(axes)
names.append("low_contrast")
self._plot_lowcontrast_graph(lowcon_ax)
# plot the high contrast MTF graph
if plot_high_contrast:
hicon_ax = next(axes)
names.append("high_contrast")
self._plot_highcontrast_graph(hicon_ax)
plt.tight_layout()
if show:
plt.show()
return figs, names
def _plot_lowcontrast_graph(self, axes: plt.Axes):
"""Plot the low contrast ROIs to an axes."""
(line1,) = axes.plot(
[roi.contrast for roi in self.low_contrast_rois],
marker="o",
color="m",
label="Contrast",
)
axes.axhline(self._low_contrast_threshold, color="m")
axes.grid(True)
axes.set_title("Low-frequency Contrast")
axes.set_xlabel("ROI #")
axes.set_ylabel("Contrast")
axes2 = axes.twinx()
(line2,) = axes2.plot(
[roi.contrast_to_noise for roi in self.low_contrast_rois],
marker="^",
label="CNR",
)
axes2.set_ylabel("CNR")
axes.legend(handles=[line1, line2])
def _plot_highcontrast_graph(self, axes: plt.Axes):
"""Plot the high contrast ROIs to an axes."""
axes.plot(self.mtf.spacings, list(self.mtf.norm_mtfs.values()), marker="*")
axes.axhline(self._high_contrast_threshold, color="k")
axes.grid(True)
axes.set_title("High-frequency rMTF")
axes.set_xlabel("Line pairs / mm")
axes.set_ylabel("relative MTF")
def results(self, as_list: bool = False) -> str | list[str]:
"""Return the results of the analysis.
Parameters
----------
as_list : bool
Whether to return as a list of strings vs single string. Pretty much for internal usage.
"""
text = [f"{self.common_name} results:", f"File: {self.image.truncated_path}"]
if self.low_contrast_rois:
text += [
f"Median Contrast: {np.median([roi.contrast for roi in self.low_contrast_rois]):2.2f}",
f"Median CNR: {np.median([roi.contrast_to_noise for roi in self.low_contrast_rois]):2.1f}",
f'# Low contrast ROIs "seen": {sum(roi.passed_visibility for roi in self.low_contrast_rois):2.0f} of {len(self.low_contrast_rois)}',
f"Area: {self.phantom_area:2.2f} mm^2",
]
if self.high_contrast_rois:
text += [
f"MTF 80% (lp/mm): {self.mtf.relative_resolution(80):2.2f}",
f"MTF 50% (lp/mm): {self.mtf.relative_resolution(50):2.2f}",
f"MTF 30% (lp/mm): {self.mtf.relative_resolution(30):2.2f}",
]
if not as_list:
text = "\n".join(text)
return text
def _generate_results_data(self) -> PlanarResult:
data = PlanarResult(
analysis_type=self.common_name,
median_contrast=np.median([roi.contrast for roi in self.low_contrast_rois]),
median_cnr=np.median(
[roi.contrast_to_noise for roi in self.low_contrast_rois]
),
num_contrast_rois_seen=sum(
roi.passed_visibility for roi in self.low_contrast_rois
),
phantom_center_x_y=(self.phantom_center.x, self.phantom_center.y),
low_contrast_rois=[roi.as_dict() for roi in self.low_contrast_rois],
percent_integral_uniformity=self.percent_integral_uniformity(),
phantom_area=self.phantom_area,
)
if self.mtf is not None:
data.mtf_lp_mm = [
{p: self.mtf.relative_resolution(p)} for p in (80, 50, 30)
]
return data
def publish_pdf(
self,
filename: str,
notes: str = None,
open_file: bool = False,
metadata: dict | None = None,
logo: Path | str | None = None,
):
"""Publish (print) a PDF containing the analysis, images, and quantitative results.
Parameters
----------
filename : (str, file-like object}
The file to write the results to.
notes : str, list of strings
Text; if str, prints single line.
If list of strings, each list item is printed on its own line.
open_file : bool
Whether to open the file using the default program after creation.
metadata : dict
Extra data to be passed and shown in the PDF. The key and value will be shown with a colon.
E.g. passing {'Author': 'James', 'Unit': 'TrueBeam'} would result in text in the PDF like:
--------------
Author: James
Unit: TrueBeam
--------------
logo: Path, str
A custom logo to use in the PDF report. If nothing is passed, the default pylinac logo is used.
"""
canvas = pdf.PylinacCanvas(
filename,
page_title=f"{self.common_name} Phantom Analysis",
metadata=metadata,
logo=logo,
)
# write the text/numerical values
text = self.results(as_list=True)
canvas.add_text(text=text, location=(1.5, 25), font_size=14)
if notes is not None:
canvas.add_text(text="Notes:", location=(1, 5.5), font_size=12)
canvas.add_text(text=notes, location=(1, 5))
# plot the image
data = io.BytesIO()
self.save_analyzed_image(
data, image=True, low_contrast=False, high_contrast=False
)
canvas.add_image(data, location=(1, 3.5), dimensions=(19, 19))
# plot the high contrast
if self.high_contrast_rois:
canvas.add_new_page()
data = io.BytesIO()
self.save_analyzed_image(
data, image=False, low_contrast=False, high_contrast=True
)
canvas.add_image(data, location=(1, 7), dimensions=(19, 19))
# plot the low contrast
if self.low_contrast_rois:
canvas.add_new_page()
data = io.BytesIO()
self.save_analyzed_image(
data, image=False, low_contrast=True, high_contrast=False
)
canvas.add_image(data, location=(1, 7), dimensions=(19, 19))
canvas.finish()
if open_file:
webbrowser.open(filename)
@property
def phantom_center(self) -> Point:
return (
Point(self._center_override)
if self._center_override is not None
else self._phantom_center_calc()
)
@property
def phantom_radius(self) -> float:
return (
self._size_override
if self._size_override is not None
else self._phantom_radius_calc()
)
@property
def phantom_angle(self) -> float:
return (
self._angle_override
if self._angle_override is not None
else self._phantom_angle_calc()
)
@property
def phantom_area(self) -> float:
"""The area of the detected ROI in mm^2"""
area_px = self._create_phantom_outline_object()[0].area
return area_px / self.image.dpmm**2
def _phantom_center_calc(self):
return bbox_center(self.phantom_ski_region)
def _phantom_angle_calc(self):
pass
def _phantom_radius_calc(self):
return math.sqrt(self.phantom_ski_region.bbox_area)
def _find_ssd(self):
"""If the SSD parameter is set to auto, search at SAD, then at -5cm SID"""
if isinstance(self._ssd, str) and self._ssd.lower() == "auto":
self._ssd = self.image.metadata.get("RadiationMachineSAD", 1000)
try:
# cached property; no error means it found it.
self.phantom_ski_region
except ValueError:
# 5cm up from SID
self._ssd = self.image.metadata.get("RTImageSID", 1500) - 50
self.phantom_ski_region
class LightRadResult(ResultBase):
"""This class should not be called directly. It is returned by the ``results_data()`` method.
It is a dataclass under the hood and thus comes with all the dunder magic.
Use the following attributes as normal class attributes."""
field_size_x_mm: float #:
field_size_y_mm: float #:
field_epid_offset_x_mm: float #:
field_epid_offset_y_mm: float #:
field_bb_offset_x_mm: float #:
field_bb_offset_y_mm: float #:
class StandardImagingFC2(ImagePhantomBase):
common_name = "SI FC-2"
_demo_filename = "fc2.dcm"
# these positions are the offset in mm from the center of the image to the nominal position of the BBs
bb_positions_10x10 = {
"TL": [-40, -40],
"BL": [-40, 40],
"TR": [40, -40],
"BR": [40, 40],
}
bb_positions_15x15 = {
"TL": [-65, -65],
"BL": [-65, 65],
"TR": [65, -65],
"BR": [65, 65],
}
bb_sampling_box_size_mm = 10
field_strip_width_mm = 5
bb_size_mm = 4
bb_edge_threshold_mm: float
bb_centers: dict[str, Point]
@staticmethod
def run_demo() -> None:
"""Run the Standard Imaging FC-2 phantom analysis demonstration."""
fc2 = StandardImagingFC2.from_demo_image()
fc2.analyze()
fc2.plot_analyzed_image()
def analyze(
self, invert: bool = False, fwxm: int = 50, bb_edge_threshold_mm: float = 10
) -> None:
"""Analyze the FC-2 phantom to find the BBs and the open field and compare to each other as well as the EPID.
Parameters
----------
invert : bool
Whether to force-invert the image from the auto-detected inversion.
fwxm : int
The FWXM value to use to detect the field. For flattened fields, the default of 50 should be fine.
For FFF fields, consider using a lower value such as 25-30.
bb_edge_threshold_mm : float
The threshold in mm to use to determine if the BB is near the edge of the image. If the BB is within this threshold,
a different algorithm is used to determine the BB position that is more robust to edge effects but
can give uncertainty when in a flat region (i.e. away from the field edge).
"""
self.bb_edge_threshold_mm = bb_edge_threshold_mm
self._check_inversion()
if invert:
self.image.invert()
(
self.field_center,
self.field_width_x,
self.field_width_y,
) = self._find_field_info(fwxm=fwxm)
self.bb_center = self._find_overall_bb_centroid(fwxm=fwxm)
self.epid_center = self.image.center
def results(self, as_list: bool = False) -> str | list[str]:
"""Return the results of the analysis."""
text = [
f"{self.common_name} results:",
f"File: {self.image.truncated_path}",
f"The detected inplane field size was {self.field_width_y:2.1f}mm",
f"The detected crossplane field size was {self.field_width_x:2.1f}mm",
f"The inplane field was {self.field_epid_offset_mm.y:2.1f}mm from the EPID CAX",
f"The crossplane field was {self.field_epid_offset_mm.x:2.1f}mm from the EPID CAX",
f"The inplane field was {self.field_bb_offset_mm.y:2.1f}mm from the BB inplane center",
f"The crossplane field was {self.field_bb_offset_mm.x:2.1f}mm from the BB crossplane center",
]
if as_list:
return text
else:
text = "\n".join(text)
return text
@property
def field_epid_offset_mm(self) -> Vector:
"""Field offset from CAX using vector difference"""
return (
self.epid_center.as_vector() - self.field_center.as_vector()
) / self.image.dpmm
@property
def field_bb_offset_mm(self) -> Vector:
"""Field offset from BB centroid using vector difference"""
return (self.bb_center - self.field_center) / self.image.dpmm
def _generate_results_data(self) -> LightRadResult:
"""Return the results as a dict or dataclass"""
return LightRadResult(
field_size_x_mm=self.field_width_x,
field_size_y_mm=self.field_width_y,
field_epid_offset_x_mm=self.field_epid_offset_mm.x,
field_epid_offset_y_mm=self.field_epid_offset_mm.y,
field_bb_offset_x_mm=self.field_bb_offset_mm.x,
field_bb_offset_y_mm=self.field_bb_offset_mm.y,
)
def _check_inversion(self):
"""Perform a normal corner-check inversion. Since these are always 10x10 or 15x15 fields it seems unlikely the corners will be exposed."""
self.image.check_inversion()
def _find_field_info(self, fwxm: int) -> (Point, float, float):
"""Determine the center and field widths of the detected field by sampling a strip through the center of the image in inplane and crossplane"""
sample_width = self.field_strip_width_mm / 2 * self.image.dpmm
# sample the strip (nominally 5mm) centered about the image center. Average the strip to reduce noise.
x_bounds = (
int(self.image.center.x - sample_width),
int(self.image.center.x + sample_width),
)
y_img = np.mean(self.image[:, x_bounds[0] : x_bounds[1]], 1)
y_prof = FWXMProfilePhysical(
values=y_img,
dpmm=self.image.dpmm,
normalization=Normalization.BEAM_CENTER,
ground=True,
fwxm_height=fwxm,
)
y = y_prof.center_idx
field_width_y = y_prof.field_width_mm
y_bounds = (
int(self.image.center.y - sample_width),
int(self.image.center.y + sample_width),
)
x_img = np.mean(self.image[y_bounds[0] : y_bounds[1], :], 0)
x_prof = FWXMProfilePhysical(
values=x_img,
dpmm=self.image.dpmm,
normalization=Normalization.BEAM_CENTER,
ground=True,
fwxm_height=fwxm,
)
x = x_prof.center_idx
field_width_x = x_prof.field_width_mm
return Point(x=x, y=y), field_width_x, field_width_y
def _find_overall_bb_centroid(self, fwxm: int) -> Point:
"""Determine the geometric center of the 4 BBs"""
self.bb_centers = bb_centers = self._detect_bb_centers(fwxm)
central_x = np.mean([p.x for p in bb_centers.values()])
central_y = np.mean([p.y for p in bb_centers.values()])
return Point(x=central_x, y=central_y)
def _detect_bb_centers(self, fwxm: int) -> dict:
"""Sample a 10x10mm square about each BB to detect it. Adjustable using self.bb_sampling_box_size_mm"""
bb_positions = {}
nominal_positions = self._determine_bb_set(fwxm=fwxm)
dpmm = self.image.dpmm
self.image.filter(size=3, kind="median")
# sample the square, use skimage to find the ROI weighted centroid of the BBs
for key, position in nominal_positions.items():
near_edge = self._is_bb_near_edge(bb_position=position)
if near_edge:
# we apply a local histogram equalizer.
# this is local contrast enhancer. It helps the BBs stand out from the background
# and sharpen the gap between the BB and field edge.
# we need to replace and reset the array since we're in the loop
# This is a bit of BMF.
original_array = np.copy(self.image.array)
bb_radius_px = self.bb_size_mm / 2 * dpmm