/
targetpixelfile.py
2748 lines (2432 loc) · 106 KB
/
targetpixelfile.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""Defines TargetPixelFile, KeplerTargetPixelFile, and TessTargetPixelFile."""
from __future__ import division
import datetime
import os
import warnings
import logging
import collections
from astropy.io import fits
from astropy.io.fits import Undefined, BinTableHDU
from astropy.nddata import Cutout2D
from astropy.table import Table
from astropy.wcs import WCS
from astropy.utils.exceptions import AstropyWarning
from astropy.coordinates import SkyCoord
from astropy.stats.funcs import median_absolute_deviation as MAD
from astropy.utils.decorators import deprecated
from astropy.time import Time
from astropy.units import Quantity
import astropy.units as u
from matplotlib import patches
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from scipy.ndimage import label
from tqdm import tqdm
from copy import deepcopy
from . import PACKAGEDIR, MPLSTYLE
from .lightcurve import LightCurve, KeplerLightCurve, TessLightCurve
from .prf import KeplerPRF
from .utils import (
KeplerQualityFlags,
TessQualityFlags,
plot_image,
LightkurveWarning,
LightkurveDeprecationWarning,
validate_method,
centroid_quadratic,
_query_solar_system_objects,
)
from .io import detect_filetype
__all__ = ["KeplerTargetPixelFile", "TessTargetPixelFile"]
log = logging.getLogger(__name__)
# OPEN: consider to move to utils and
# consolidate with the helper in lightcurve.py (for time label)
_TIME_LABEL_DICT_BRIEF = {"": "Phase", "bkjd": "[BKJD days]", "btjd": "[BTJD days]"}
def _time_label_brief(time):
format = getattr(time, "format", "")
return _TIME_LABEL_DICT_BRIEF.get(format, format.upper())
class HduToMetaMapping(collections.abc.Mapping):
"""Provides a read-only view of HDU header in `astropy.timeseries.TimeSeries.meta` format"""
def __init__(self, hdu):
self._dict = {}
self._dict.update(hdu.header)
def __getitem__(self, key):
return self._dict[key]
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
class TargetPixelFile(object):
"""Abstract class representing FITS files which contain time series imaging data.
You should probably not be using this abstract class directly;
see `KeplerTargetPixelFile` and `TessTargetPixelFile` instead.
"""
def __init__(self, path, quality_bitmask="default", targetid=None, **kwargs):
self.path = path
if isinstance(path, fits.HDUList):
self.hdu = path
else:
self.hdu = fits.open(self.path, **kwargs)
self.quality_bitmask = quality_bitmask
self.targetid = targetid
# For consistency with `LightCurve`, provide a `meta` dictionary
self.meta = HduToMetaMapping(self.hdu[0])
def __getitem__(self, key):
"""Implements indexing and slicing.
Note: the implementation below cannot be be simplified using
`copy[1].data = copy[1].data[self.quality_mask][key]`
due to the complicated behavior of AstroPy's `FITS_rec`.
"""
# Step 1: determine the indexes of the data to return.
# We start by determining the indexes of the good-quality cadences.
quality_idx = np.where(self.quality_mask)[0]
# Then we apply the index or slice to the good-quality indexes.
if isinstance(key, int):
# Ensure we always have a range; this is necessary to ensure
# that we always ge a `FITS_rec` instead of a `FITS_record` below.
if key == -1:
selected_idx = quality_idx[key:]
else:
selected_idx = quality_idx[key : key + 1]
else:
selected_idx = quality_idx[key]
# Step 2: use the indexes to create a new copy of the data.
with warnings.catch_warnings():
# Ignore warnings about empty fields
warnings.simplefilter("ignore", UserWarning)
# AstroPy added `HDUList.copy()` in v3.1, allowing us to avoid manually
# copying the HDUs, which brought along unexpected memory leaks.
copy = self.hdu.copy()
copy[1] = BinTableHDU(
data=self.hdu[1].data[selected_idx], header=self.hdu[1].header
)
return self.__class__(
copy, quality_bitmask=self.quality_bitmask, targetid=self.targetid
)
def __len__(self):
return len(self.time)
def __add__(self, other):
if isinstance(other, Quantity):
other = other.value
hdu = deepcopy(self.hdu)
hdu[1].data["FLUX"][self.quality_mask] += other
return type(self)(hdu, quality_bitmask=self.quality_bitmask)
def __mul__(self, other):
if isinstance(other, Quantity):
other = other.value
hdu = deepcopy(self.hdu)
hdu[1].data["FLUX"][self.quality_mask] *= other
hdu[1].data["FLUX_ERR"][self.quality_mask] *= other
return type(self)(hdu, quality_bitmask=self.quality_bitmask)
def __rtruediv__(self, other):
if isinstance(other, Quantity):
other = other.value
hdu = deepcopy(self.hdu)
hdu[1].data["FLUX"][self.quality_mask] /= other
hdu[1].data["FLUX_ERR"][self.quality_mask] /= other
return type(self)(hdu, quality_bitmask=self.quality_bitmask)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return self.__add__(-1 * other)
def __rsub__(self, other):
return (-1 * self).__add__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.__mul__(1.0 / other)
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
@property
@deprecated("2.0", alternative="time", warning_type=LightkurveDeprecationWarning)
def astropy_time(self):
"""Returns an AstroPy Time object for all good-quality cadences."""
return self.time
@property
def hdu(self):
return self._hdu
@hdu.setter
def hdu(self, value, keys=("FLUX", "QUALITY")):
"""Verify the file format when setting the value of `self.hdu`.
Raises a ValueError if `value` does not appear to be a Target Pixel File.
"""
for key in keys:
if ~(
np.any(
[
value[1].header[ttype] == key
for ttype in value[1].header["TTYPE*"]
]
)
):
raise ValueError(
"File {} does not have a {} column, "
"is this a target pixel file?".format(self.path, key)
)
self._hdu = value
def get_keyword(self, keyword, hdu=0, default=None):
"""Returns a header keyword value.
If the keyword is Undefined or does not exist,
then return ``default`` instead.
"""
return self.hdu[hdu].header.get(keyword, default)
@property
@deprecated(
"2.0", alternative="get_header()", warning_type=LightkurveDeprecationWarning
)
def header(self):
"""DEPRECATED. Please use ``get_header()`` instead."""
return self.hdu[0].header
def get_header(self, ext=0):
"""Returns the metadata embedded in the file.
Target Pixel Files contain embedded metadata headers spread across three
different FITS extensions:
1. The "PRIMARY" extension (``ext=0``) provides a metadata header
providing details on the target and its CCD position.
2. The "PIXELS" extension (``ext=1``) provides details on the
data column and their coordinate system (WCS).
3. The "APERTURE" extension (``ext=2``) provides details on the
aperture pixel mask and the expected coordinate system (WCS).
Parameters
----------
ext : int or str
FITS extension name or number.
Returns
-------
header : `~astropy.io.fits.header.Header`
Header object containing metadata keywords.
"""
return self.hdu[ext].header
@property
def ra(self):
"""Right Ascension of target ('RA_OBJ' header keyword)."""
return self.get_keyword("RA_OBJ")
@property
def dec(self):
"""Declination of target ('DEC_OBJ' header keyword)."""
return self.get_keyword("DEC_OBJ")
@property
def column(self):
"""CCD pixel column number ('1CRV5P' header keyword)."""
return self.get_keyword("1CRV5P", hdu=1, default=0)
@property
def row(self):
"""CCD pixel row number ('2CRV5P' header keyword)."""
return self.get_keyword("2CRV5P", hdu=1, default=0)
@property
def pos_corr1(self):
"""Returns the column position correction."""
return self.hdu[1].data["POS_CORR1"][self.quality_mask]
@property
def pos_corr2(self):
"""Returns the row position correction."""
return self.hdu[1].data["POS_CORR2"][self.quality_mask]
@property
def pipeline_mask(self):
"""Returns the optimal aperture mask used by the pipeline."""
# Both Kepler and TESS flag the pixels in the optimal aperture using
# bit number 2 in the aperture mask extension, e.g. see Section 6 of
# the TESS Data Products documentation (EXP-TESS-ARC-ICD-TM-0014.pdf).
try:
return self.hdu[2].data & 2 > 0
except TypeError: # Early versions of TESScut returned floats in HDU 2
return np.ones(self.hdu[2].data.shape, dtype=bool)
@property
def shape(self):
"""Return the cube dimension shape."""
return self.flux.shape
@property
def time(self) -> Time:
"""Returns the time for all good-quality cadences."""
time_values = self.hdu[1].data["TIME"][self.quality_mask]
# Some data products have missing time values;
# we need to set these to zero or `Time` cannot be instantiated.
time_values[~np.isfinite(time_values)] = 0
bjdrefi = self.hdu[1].header.get("BJDREFI")
if bjdrefi == 2454833:
time_format = "bkjd"
elif bjdrefi == 2457000:
time_format = "btjd"
else:
time_format = "jd"
return Time(
time_values,
scale=self.hdu[1].header.get("TIMESYS", "tdb").lower(),
format=time_format,
)
@property
def cadenceno(self):
"""Return the cadence number for all good-quality cadences."""
cadenceno = self.hdu[1].data["CADENCENO"][self.quality_mask]
# The TESScut service returns an array of zeros as CADENCENO.
# If this is the case, return frame numbers from 0 instead.
if cadenceno[0] == 0:
return np.arange(0, len(cadenceno), 1, dtype=int)
return cadenceno
@property
def nan_time_mask(self):
"""Returns a boolean mask flagging cadences whose time is `nan`."""
return self.time.value == 0
@property
def flux(self) -> Quantity:
"""Returns the flux for all good-quality cadences."""
if self.get_header(1)["TUNIT5"] == "e-/s":
unit = "electron/s"
else:
unit = "dimensionless"
return Quantity(self.hdu[1].data["FLUX"][self.quality_mask], unit=unit)
@property
def flux_err(self) -> Quantity:
"""Returns the flux uncertainty for all good-quality cadences."""
if self.get_header(1)["TUNIT6"] == "e-/s":
unit = "electron/s"
else:
unit = "dimensionless"
return Quantity(self.hdu[1].data["FLUX_ERR"][self.quality_mask], unit=unit)
@property
def flux_bkg(self) -> Quantity:
"""Returns the background flux for all good-quality cadences."""
return Quantity(
self.hdu[1].data["FLUX_BKG"][self.quality_mask], unit="electron/s"
)
@property
def flux_bkg_err(self) -> Quantity:
return Quantity(
self.hdu[1].data["FLUX_BKG_ERR"][self.quality_mask], unit="electron/s"
)
@property
def quality(self):
"""Returns the quality flag integer of every good cadence."""
return self.hdu[1].data["QUALITY"][self.quality_mask]
@property
def wcs(self) -> WCS:
"""Returns an `astropy.wcs.WCS` object with the World Coordinate System
solution for the target pixel file.
Returns
-------
w : `astropy.wcs.WCS` object
WCS solution
"""
if "MAST" in self.hdu[0].header["ORIGIN"]: # Is it a TessCut TPF?
# TPF's generated using the TESSCut service in early 2019 only appear
# to contain a valid WCS in the second extension (the aperture
# extension), so we treat such files as a special case.
return WCS(self.hdu[2])
else:
# For standard (Ames-pipeline-produced) TPF files, we use the WCS
# keywords provided in the first extension (the data table extension).
# Specifically, we use the WCS keywords for the 5th data column (FLUX).
wcs_keywords = {
"1CTYP5": "CTYPE1",
"2CTYP5": "CTYPE2",
"1CRPX5": "CRPIX1",
"2CRPX5": "CRPIX2",
"1CRVL5": "CRVAL1",
"2CRVL5": "CRVAL2",
"1CUNI5": "CUNIT1",
"2CUNI5": "CUNIT2",
"1CDLT5": "CDELT1",
"2CDLT5": "CDELT2",
"11PC5": "PC1_1",
"12PC5": "PC1_2",
"21PC5": "PC2_1",
"22PC5": "PC2_2",
"NAXIS1": "NAXIS1",
"NAXIS2": "NAXIS2",
}
mywcs = {}
for oldkey, newkey in wcs_keywords.items():
if self.hdu[1].header.get(oldkey, None) is not None:
mywcs[newkey] = self.hdu[1].header[oldkey]
return WCS(mywcs)
def get_coordinates(self, cadence="all"):
"""Returns two 3D arrays of RA and Dec values in decimal degrees.
If cadence number is given, returns 2D arrays for that cadence. If
cadence is 'all' returns one RA, Dec value for each pixel in every cadence.
Uses the WCS solution and the POS_CORR data from TPF header.
Parameters
----------
cadence : 'all' or int
Which cadences to return the RA Dec coordinates for.
Returns
-------
ra : numpy array, same shape as tpf.flux[cadence]
Array containing RA values for every pixel, for every cadence.
dec : numpy array, same shape as tpf.flux[cadence]
Array containing Dec values for every pixel, for every cadence.
"""
w = self.wcs
X, Y = np.meshgrid(np.arange(self.shape[2]), np.arange(self.shape[1]))
pos_corr1_pix = np.copy(self.hdu[1].data["POS_CORR1"])
pos_corr2_pix = np.copy(self.hdu[1].data["POS_CORR2"])
# We zero POS_CORR* when the values are NaN or make no sense (>50px)
with warnings.catch_warnings(): # Comparing NaNs to numbers is OK here
warnings.simplefilter("ignore", RuntimeWarning)
bad = np.any(
[
~np.isfinite(pos_corr1_pix),
~np.isfinite(pos_corr2_pix),
np.abs(pos_corr1_pix - np.nanmedian(pos_corr1_pix)) > 50,
np.abs(pos_corr2_pix - np.nanmedian(pos_corr2_pix)) > 50,
],
axis=0,
)
pos_corr1_pix[bad], pos_corr2_pix[bad] = 0, 0
# Add in POSCORRs
X = np.atleast_3d(X).transpose([2, 0, 1]) + np.atleast_3d(
pos_corr1_pix
).transpose([1, 2, 0])
Y = np.atleast_3d(Y).transpose([2, 0, 1]) + np.atleast_3d(
pos_corr2_pix
).transpose([1, 2, 0])
# Pass through WCS
ra, dec = w.wcs_pix2world(X.ravel(), Y.ravel(), 1)
ra = ra.reshape((pos_corr1_pix.shape[0], self.shape[1], self.shape[2]))
dec = dec.reshape((pos_corr2_pix.shape[0], self.shape[1], self.shape[2]))
ra, dec = ra[self.quality_mask], dec[self.quality_mask]
if cadence != "all":
return ra[cadence], dec[cadence]
return ra, dec
def show_properties(self):
"""Prints a description of all non-callable attributes.
Prints in order of type (ints, strings, lists, arrays, others).
"""
attrs = {}
for attr in dir(self):
if not attr.startswith("_") and attr != "header" and attr != "astropy_time":
res = getattr(self, attr)
if callable(res):
continue
if attr == "hdu":
attrs[attr] = {"res": res, "type": "list"}
for idx, r in enumerate(res):
if idx == 0:
attrs[attr]["print"] = "{}".format(r.header["EXTNAME"])
else:
attrs[attr]["print"] = "{}, {}".format(
attrs[attr]["print"], "{}".format(r.header["EXTNAME"])
)
continue
else:
attrs[attr] = {"res": res}
if isinstance(res, int):
attrs[attr]["print"] = "{}".format(res)
attrs[attr]["type"] = "int"
elif isinstance(res, np.ndarray):
attrs[attr]["print"] = "array {}".format(res.shape)
attrs[attr]["type"] = "array"
elif isinstance(res, list):
attrs[attr]["print"] = "list length {}".format(len(res))
attrs[attr]["type"] = "list"
elif isinstance(res, str):
if res == "":
attrs[attr]["print"] = "{}".format("None")
else:
attrs[attr]["print"] = "{}".format(res)
attrs[attr]["type"] = "str"
elif attr == "wcs":
attrs[attr]["print"] = "astropy.wcs.wcs.WCS"
attrs[attr]["type"] = "other"
else:
attrs[attr]["print"] = "{}".format(type(res))
attrs[attr]["type"] = "other"
output = Table(names=["Attribute", "Description"], dtype=[object, object])
idx = 0
types = ["int", "str", "list", "array", "other"]
for typ in types:
for attr, dic in attrs.items():
if dic["type"] == typ:
output.add_row([attr, dic["print"]])
idx += 1
output.pprint(max_lines=-1, max_width=-1)
def to_lightcurve(self, method="aperture", **kwargs):
"""Performs photometry on the pixel data and returns a LightCurve object.
See the docstring of `aperture_photometry()` for valid
arguments if the method is 'aperture'. Otherwise, see the docstring
of `prf_photometry()` for valid arguments if the method is 'prf'.
Parameters
----------
method : 'aperture' or 'prf'.
Photometry method to use.
**kwargs : dict
Extra arguments to be passed to the `aperture_photometry` or the
`prf_photometry` method of this class.
Returns
-------
lc : LightCurve object
Object containing the resulting lightcurve.
"""
if method == "aperture":
return self.extract_aperture_photometry(**kwargs)
elif method == "prf":
return self.prf_lightcurve(**kwargs)
else:
raise ValueError("Photometry method must be 'aperture' or 'prf'.")
def _resolve_default_aperture_mask(self, aperture_mask):
if isinstance(aperture_mask, str) and (aperture_mask == "default"):
# returns 'pipeline', unless it is missing. Falls back to 'threshold'
return "pipeline" if np.any(self.pipeline_mask) else "threshold"
else:
return aperture_mask
def _parse_aperture_mask(self, aperture_mask):
"""Parse the `aperture_mask` parameter as given by a user.
The `aperture_mask` parameter is accepted by a number of methods.
This method ensures that the parameter is always parsed in the same way.
Parameters
----------
aperture_mask : array-like, 'pipeline', 'all', 'threshold', 'default',
'background', or None
A boolean array describing the aperture such that `True` means
that the pixel will be used.
If None or 'all' are passed, all pixels will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
If 'default' is passed, 'pipeline' mask will be used when available,
with 'threshold' as the fallback.
If 'background' is passed, all pixels fainter than the median flux
will be used.
If 'empty' is passed, no pixels will be used.
Returns
-------
aperture_mask : ndarray
2D boolean numpy array containing `True` for selected pixels.
"""
aperture_mask = self._resolve_default_aperture_mask(aperture_mask)
# If 'pipeline' mask is requested but missing, fall back to 'threshold'
if (
isinstance(aperture_mask, str)
and (aperture_mask == "pipeline")
and ~np.any(self.pipeline_mask)
):
raise ValueError(
"_parse_aperture_mask: 'pipeline' is requested, but it is missing or empty."
)
# Input validation
if hasattr(aperture_mask, "shape") and (
aperture_mask.shape != self.flux[0].shape
):
raise ValueError(
"`aperture_mask` has shape {}, "
"but the flux data has shape {}"
"".format(aperture_mask.shape, self.flux[0].shape)
)
with warnings.catch_warnings():
# `aperture_mask` supports both arrays and string values; these yield
# uninteresting FutureWarnings when compared, so let's ignore that.
warnings.simplefilter(action="ignore", category=FutureWarning)
if aperture_mask is None or aperture_mask == "all":
aperture_mask = np.ones((self.shape[1], self.shape[2]), dtype=bool)
elif aperture_mask == "pipeline":
aperture_mask = self.pipeline_mask
elif aperture_mask == "threshold":
aperture_mask = self.create_threshold_mask()
elif aperture_mask == "background":
aperture_mask = ~self.create_threshold_mask(
threshold=0, reference_pixel=None
)
elif aperture_mask == "empty":
aperture_mask = np.zeros((self.shape[1], self.shape[2]), dtype=bool)
elif (
np.issubdtype(aperture_mask.dtype, np.integer)
and ((aperture_mask & 2) == 2).any()
):
# Kepler and TESS pipeline style integer flags
aperture_mask = (aperture_mask & 2) == 2
elif isinstance(aperture_mask.flat[0], (np.integer, np.float)):
aperture_mask = aperture_mask.astype(bool)
self._last_aperture_mask = aperture_mask
return aperture_mask
def create_threshold_mask(self, threshold=3, reference_pixel="center"):
"""Returns an aperture mask creating using the thresholding method.
This method will identify the pixels in the TargetPixelFile which show
a median flux that is brighter than `threshold` times the standard
deviation above the overall median. The standard deviation is estimated
in a robust way by multiplying the Median Absolute Deviation (MAD)
with 1.4826.
If the thresholding method yields multiple contiguous regions, then
only the region closest to the (col, row) coordinate specified by
`reference_pixel` is returned. For exmaple, `reference_pixel=(0, 0)`
will pick the region closest to the bottom left corner.
By default, the region closest to the center of the mask will be
returned. If `reference_pixel=None` then all regions will be returned.
Parameters
----------
threshold : float
A value for the number of sigma by which a pixel needs to be
brighter than the median flux to be included in the aperture mask.
reference_pixel: (int, int) tuple, 'center', or None
(col, row) pixel coordinate closest to the desired region.
For example, use `reference_pixel=(0,0)` to select the region
closest to the bottom left corner of the target pixel file.
If 'center' (default) then the region closest to the center pixel
will be selected. If `None` then all regions will be selected.
Returns
-------
aperture_mask : ndarray
2D boolean numpy array containing `True` for pixels above the
threshold.
"""
if reference_pixel == "center":
reference_pixel = (self.shape[2] / 2, self.shape[1] / 2)
# Calculate the median image
with warnings.catch_warnings():
warnings.simplefilter("ignore")
median_image = np.nanmedian(self.flux, axis=0)
vals = median_image[np.isfinite(median_image)].flatten()
# Calculate the theshold value in flux units
mad_cut = (1.4826 * MAD(vals) * threshold) + np.nanmedian(median_image)
# Create a mask containing the pixels above the threshold flux
threshold_mask = np.nan_to_num(median_image) >= mad_cut
if (reference_pixel is None) or (not threshold_mask.any()):
# return all regions above threshold
return threshold_mask
else:
# Return only the contiguous region closest to `region`.
# First, label all the regions:
labels = label(threshold_mask)[0]
# For all pixels above threshold, compute distance to reference pixel:
label_args = np.argwhere(labels > 0)
distances = [
np.hypot(crd[0], crd[1])
for crd in label_args
- np.array([reference_pixel[1], reference_pixel[0]])
]
# Which label corresponds to the closest pixel?
closest_arg = label_args[np.argmin(distances)]
closest_label = labels[closest_arg[0], closest_arg[1]]
return labels == closest_label
def estimate_background(self, aperture_mask="background"):
"""Returns an estimate of the median background level in the FLUX column.
In the case of official Kepler and TESS Target Pixel Files, the
background estimates should be close to zero because these products
have already been background-subtracted by the pipeline (i.e. the values
in the `FLUX_BKG` column have been subtracted from the values in `FLUX`).
Background subtraction is often imperfect however, and this method aims
to allow users to estimate residual background signals using different
methods.
Target Pixel Files created by the MAST TESSCut service have
not been background-subtracted. For such products, or other community-
generated pixel files, this method provides a first-order estimate of
the background levels.
This method estimates the per-pixel background flux over time by
computing the median pixel value across the `aperture mask`.
Parameters
----------
aperture_mask : 'background', 'all', or array-like
Which pixels should be used to estimate the background?
If None or 'all' are passed, all pixels in the pixel file will be
used. If 'background' is passed, all pixels fainter than the
median flux will be used. Alternatively, users can pass a boolean
array describing the aperture mask such that `True` means that the
pixel will be used.
Returns
-------
lc : `LightCurve` object
Median background flux in units electron/second/pixel.
"""
mask = self._parse_aperture_mask(aperture_mask)
# For each cadence, compute the median pixel flux across the background
simple_bkg = np.nanmedian(self.flux[:, mask], axis=1) / u.pixel
return LightCurve(time=self.time, flux=simple_bkg)
def estimate_centroids(self, aperture_mask="default", method="moments"):
"""Returns the flux center of an object inside ``aperture_mask``.
Telescopes tend to smear out the light from a point-like star over
multiple pixels. For this reason, it is common to estimate the position
of a star by computing the *geometric center* of its image.
Astronomers refer to this position as the *centroid* of the object,
i.e. the term *centroid* is often used as a generic synonym to refer
to the measured position of an object in a telescope exposure.
This function provides two methods to estimate the position of a star:
* `method='moments'` will compute the "center of mass" of the light
based on the 2D image moments of the pixels inside ``aperture_mask``.
* `method='quadratic'` will fit a two-dimensional, second-order
polynomial to the 3x3 patch of pixels centered on the brightest pixel
inside the ``aperture_mask``, and return the peak of that polynomial.
Following Vakili & Hogg 2016 (ArXiv:1610.05873, Section 3.2).
Parameters
----------
aperture_mask : 'pipeline', 'threshold', 'all', 'default', or array-like
Which pixels contain the object to be measured, i.e. which pixels
should be used in the estimation? If None or 'all' are passed,
all pixels in the pixel file will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
If 'default' is passed, 'pipeline' mask will be used when available,
with 'threshold' as the fallback.
Alternatively, users can pass a boolean array describing the
aperture mask such that `True` means that the pixel will be used.
method : 'moments' or 'quadratic'
Defines which method to use to estimate the centroids. 'moments'
computes the centroid based on the sample moments of the data.
'quadratic' fits a 2D polynomial to the data and returns the
coordinate of the peak of that polynomial.
Returns
-------
columns, rows : `~astropy.units.Quantity`, `~astropy.units.Quantity`
Arrays containing the column and row positions for the centroid
for each cadence, or NaN for cadences where the estimation failed.
"""
method = validate_method(method, ["moments", "quadratic"])
if method == "moments":
return self._estimate_centroids_via_moments(aperture_mask=aperture_mask)
elif method == "quadratic":
return self._estimate_centroids_via_quadratic(aperture_mask=aperture_mask)
def _estimate_centroids_via_moments(self, aperture_mask):
"""Compute the "center of mass" of the light based on the 2D moments;
this is a helper method for `estimate_centroids()`."""
aperture_mask = self._parse_aperture_mask(aperture_mask)
yy, xx = np.indices(self.shape[1:]) + 0.5
yy = self.row + yy
xx = self.column + xx
total_flux = np.nansum(self.flux[:, aperture_mask], axis=1)
with warnings.catch_warnings():
# RuntimeWarnings may occur below if total_flux contains zeros
warnings.simplefilter("ignore", RuntimeWarning)
col_centr = (
np.nansum(xx * aperture_mask * self.flux, axis=(1, 2)) / total_flux
)
row_centr = (
np.nansum(yy * aperture_mask * self.flux, axis=(1, 2)) / total_flux
)
return col_centr * u.pixel, row_centr * u.pixel
def _estimate_centroids_via_quadratic(self, aperture_mask):
"""Estimate centroids by fitting a 2D quadratic to the brightest pixels;
this is a helper method for `estimate_centroids()`."""
aperture_mask = self._parse_aperture_mask(aperture_mask)
col_centr, row_centr = [], []
for idx in range(len(self.time)):
col, row = centroid_quadratic(self.flux[idx], mask=aperture_mask)
col_centr.append(col)
row_centr.append(row)
# Finally, we add .5 to the result bellow because the convention is that
# pixels are centered at .5, 1.5, 2.5, ...
col_centr = np.asfarray(col_centr) + self.column + 0.5
row_centr = np.asfarray(row_centr) + self.row + 0.5
col_centr = Quantity(col_centr, unit="pixel")
row_centr = Quantity(row_centr, unit="pixel")
return col_centr, row_centr
def _aperture_photometry(
self, aperture_mask, flux_method="sum", centroid_method="moments"
):
"""Helper method for ``extract_aperture photometry``.
Returns
-------
flux, flux_err, centroid_col, centroid_row
"""
# Validate the aperture mask
apmask = self._parse_aperture_mask(aperture_mask)
if apmask.sum() == 0:
log.warning("Warning: aperture mask contains zero pixels.")
# Estimate centroids
centroid_col, centroid_row = self.estimate_centroids(
apmask, method=centroid_method
)
# Estimate flux
if flux_method == "sum":
flux = np.nansum(self.flux[:, apmask], axis=1)
elif flux_method == "median":
flux = np.nanmedian(self.flux[:, apmask], axis=1)
elif flux_method == "mean":
flux = np.nanmean(self.flux[:, apmask], axis=1)
else:
raise ValueError("`flux_method` must be one of 'sum', 'median', or 'mean'.")
# In the future we may wish to add a user specified function
# We use ``np.nansum`` above to be robust against a subset of pixels
# being NaN, however if *all* pixels are NaN, we propagate a NaN.
is_allnan = ~np.any(np.isfinite(self.flux[:, apmask]), axis=1)
flux[is_allnan] = np.nan
# Similarly, if *all* pixel values across the TPF are exactly zero,
# we propagate NaN (cf. #873 for an example of this happening)
is_allzero = np.all(self.flux == 0, axis=(1, 2))
flux[is_allzero] = np.nan
# Estimate flux_err
with warnings.catch_warnings():
# Ignore warnings due to negative errors
warnings.simplefilter("ignore", RuntimeWarning)
if flux_method == "sum":
flux_err = np.nansum(self.flux_err[:, apmask] ** 2, axis=1) ** 0.5
elif flux_method == "median":
flux_err = np.nanmedian(self.flux_err[:, apmask] ** 2, axis=1) ** 0.5
elif flux_method == "mean":
flux_err = np.nanmean(self.flux_err[:, apmask] ** 2, axis=1) ** 0.5
is_allnan = ~np.any(np.isfinite(self.flux_err[:, apmask]), axis=1)
flux_err[is_allnan] = np.nan
if self.get_header(1)["TUNIT5"] == "e-/s":
flux = Quantity(flux, unit="electron/s")
flux_err = Quantity(flux_err, unit="electron/s")
return flux, flux_err, centroid_col, centroid_row
def query_solar_system_objects(
self,
cadence_mask="outliers",
radius=None,
sigma=3,
cache=True,
return_mask=False,
):
"""Returns a list of asteroids or comets which affected the target pixel files.
Light curves of stars or galaxies are frequently affected by solar
system bodies (e.g. asteroids, comets, planets). These objects can move
across a target's photometric aperture mask on time scales of hours to
days. When they pass through a mask, they tend to cause a brief spike
in the brightness of the target. They can also cause dips by moving
through a local background aperture mask (if any is used).
The artifical spikes and dips introduced by asteroids are frequently
confused with stellar flares, planet transits, etc. This method helps
to identify false signals injects by asteroids by providing a list of
the solar system objects (name, brightness, time) that passed in the
vicinity of the target during the span of the light curve.
This method queries the `SkyBot API <http://vo.imcce.fr/webservices/skybot/>`_,
which returns a list of asteroids/comets/planets given a location, time,
and search cone.
Notes
-----
* This method will use the `ra` and `dec` properties of the `LightCurve`
object to determine the position of the search cone.
* The size of the search cone is 5 spacecraft pixels + TPF dimension by default. You
can change this by passing the `radius` parameter (unit: degrees).
* By default, this method will only search points in time during which the light
curve showed 3-sigma outliers in flux. You can override this behavior
and search for specific times by passing `cadence_mask`. See examples for details.
Parameters
----------
cadence_mask : str, or boolean array with length of self.time
mask in time to select which frames or points should be searched for SSOs.
Default "outliers" will search for SSOs at points that are `sigma` from the mean.
"all" will search all cadences. Alternatively, pass a boolean array with values of "True"
for times to search for SSOs.
radius : optional, float
Radius to search for bodies. If None, will search for SSOs within 5 pixels of
all pixels in the TPF.
sigma : optional, float
If `cadence_mask` is set to `"outlier"`, `sigma` will be used to identify
outliers.
cache : optional, bool
If True will cache the search result in the astropy cache. Set to False
to request the search again.
return_mask: optional, bool
If True will return a boolean mask in time alongside the result
Returns
-------
result : pandas.DataFrame
DataFrame containing the list objects in frames that were identified to contain
SSOs.
Examples
--------
Find if there are SSOs affecting the target pixel file for the given time frame:
>>> df_sso = tpf.query_solar_system_objects(cadence_mask=(tpf.time.value >= 2014.1) & (tpf.time.value <= 2014.9)) # doctest: +SKIP
Find if there are SSOs affecting the target pixel file for all times, but it will be much slower:
>>> df_sso = tpf.query_solar_system_objects(cadence_mask='all') # doctest: +SKIP
"""
for attr in ["mission", "ra", "dec"]:
if not hasattr(self, "{}".format(attr)):
raise ValueError("Input does not have a `{}` attribute.".format(attr))
location = self.mission.lower()
if isinstance(cadence_mask, str):
if cadence_mask == "outliers":
aper = self.pipeline_mask
if aper.sum() == 0:
aper = "all"
lc = self.to_lightcurve(aperture_mask=aper)
cadence_mask = lc.remove_outliers(sigma=sigma, return_mask=True)[1]
# Avoid searching times with NaN flux; this is necessary because e.g.
# `remove_outliers` includes NaNs in its mask.
cadence_mask &= ~np.isnan(lc.flux)
elif cadence_mask == "all":
cadence_mask = np.ones(len(self.time)).astype(bool)
else:
raise ValueError("invalid `cadence_mask` string argument")
elif isinstance(cadence_mask, collections.abc.Sequence):
cadence_mask = np.array(cadence_mask)
elif isinstance(cadence_mask, (bool)):
# for boundary case of a single element tuple, e.g., (True)
cadence_mask = np.array([cadence_mask])
elif not isinstance(cadence_mask, np.ndarray):
raise ValueError("Pass a cadence_mask method or a cadence_mask")
if (location == "kepler") | (location == "k2"):
pixel_scale = 4
if location == "tess":
pixel_scale = 27
if radius == None:
radius = (
2 ** 0.5 * (pixel_scale * (np.max(self.shape[1:]) + 5))