/
io.py
839 lines (678 loc) · 25.7 KB
/
io.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for supernova light curve I/O"""
import json
import math
import os
from collections import OrderedDict
import numpy as np
from astropy import wcs
from astropy.io import fits
from astropy.table import Table
from .bandpasses import get_bandpass
from .utils import dict_to_array
__all__ = ['read_lc', 'write_lc', 'load_example_data',
'load_example_spectrum_data', 'read_griddata_ascii',
'read_griddata_fits', 'write_griddata_ascii',
'write_griddata_fits']
def _stripcomment(line, char='#'):
pos = line.find(char)
if pos == -1:
return line
else:
return line[:pos]
def _cast_str(s):
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s.strip()
def read_griddata_ascii(name_or_obj):
"""Read 2-d grid data from a text file.
Each line has values `x0 x1 y`. Space separated.
x1 values are only read for first x0 value. Others are assumed
to match.
Parameters
----------
name_or_obj : str or file-like object
Returns
-------
x0 : numpy.ndarray
1-d array.
x1 : numpy.ndarray
1-d array.
y : numpy.ndarray
2-d array of shape (len(x0), len(x1)).
"""
if isinstance(name_or_obj, str):
f = open(name_or_obj, 'r')
else:
f = name_or_obj
x0 = [] # x0 values.
x1 = None # x1 values for first x0 value, assume others are the same.
y = [] # 2-d array of internal values
x0_current = None
x1_current = []
y1_current = []
for line in f:
stripped_line = _stripcomment(line)
if len(stripped_line) == 0:
continue
x0_tmp, x1_tmp, y_tmp = map(float, stripped_line.split())
if x0_current is None:
x0_current = x0_tmp # Initialize first time
# If there is a new x0 value, ingest the old one and reset values
if x0_tmp != x0_current:
x0.append(x0_current)
if x1 is None:
x1 = x1_current
y.append(y1_current)
x0_current = x0_tmp
x1_current = []
y1_current = []
x1_current.append(x1_tmp)
y1_current.append(y_tmp)
# Ingest the last x0 value and y1 array
x0.append(x0_current)
y.append(y1_current)
f.close()
return np.array(x0), np.array(x1), np.array(y)
def read_multivector_griddata_ascii(name_or_obj):
"""Read 2-d grid data from a text file.
Each line has values `x0 x1 y0 y1 ...`. Space separated.
Assumed to be grid of values.
Parameters
----------
name_or_obj : str or file-like object
The name of the file or a file-like object containing the
data.
Returns
-------
x0 : numpy.ndarray
1-d array.
x1 : numpy.ndarray
1-d array.
y : numpy.ndarray
3-d array of shape ``(n, len(x0), len(x1))`` where ``n`` is
the number of y values on each line.
"""
data = np.loadtxt(name_or_obj)
x0 = np.sort(np.unique(data[:, 0]))
x1 = np.sort(np.unique(data[:, 1]))
y = np.zeros((len(data[0]) - 2, len(x0), len(x1)))
for i0, p in enumerate(x0):
for i1, q in enumerate(x1):
ind = (data[:, 0] == p) & (data[:, 1] == q)
y[:, i0, i1] = data[ind, 2:]
return x0, x1, y
def read_griddata_fits(name_or_obj, ext=0):
"""Read a multi-dimensional grid of data from a FITS file, where the
grid coordinates are encoded in the FITS-WCS header keywords.
Parameters
----------
name_or_obj : str or file-like object
Returns
-------
x0, x1, ... : `~numpy.ndarray`
1-d arrays giving coordinates of grid. The number of these arrays will
depend on the dimension of the data array. For example, if the data
have two dimensions, a total of three arrays will be returned:
``x0, x1, y``, with ``x0`` giving the coordinates of the first axis
of ``y``. If the data have three dimensions, a total of four arrays
will be returned: ``x0, x1, x2, y``, and so on with higher dimensions.
y : `~numpy.ndarray`
n-d array of shape ``(len(x0), len(x1), ...)``. For three dimensions
for example, the value at ``y[i, j, k]`` corresponds to coordinates
``(x0[i], x1[j], x2[k])``.
"""
hdulist = fits.open(name_or_obj)
w = wcs.WCS(hdulist[ext].header)
y = hdulist[ext].data
# get abcissa values (coordinates at grid values)
xs = []
for i in range(y.ndim):
j = y.ndim - i # The i-th axis (in Python) corresponds to FITS AXISj
coords = np.zeros((y.shape[i], y.ndim), dtype=np.float32)
coords[:, j-1] = np.arange(y.shape[i])
x = w.wcs_pix2world(coords, 0)[:, j-1]
xs.append(x)
hdulist.close()
return tuple(xs) + (y,)
def write_griddata_ascii(x0, x1, y, name_or_obj):
"""Write 2-d grid data to a text file.
Each line has values `x0 x1 y`. Space separated.
Parameters
----------
x0 : numpy.ndarray
1-d array.
x1 : numpy.ndarray
1-d array.
y : numpy.ndarray
2-d array of shape (len(x0), len(x1)).
name_or_obj : str or file-like object
Filename to write to or open file.
"""
if isinstance(name_or_obj, str):
f = open(name_or_obj, 'w')
else:
f = name_or_obj
for j in range(len(x0)):
for i in range(len(x1)):
f.write("{0:.7g} {1:.7g} {2:.7g}\n".format(x0[j], x1[i], y[j, i]))
if isinstance(name_or_obj, str):
f.close()
def write_griddata_fits(x0, x1, y, name_or_obj):
"""Write a 2-d grid of data to a FITS file
The grid coordinates are encoded in the FITS-WCS header keywords.
Parameters
----------
x0 : numpy.ndarray
1-d array.
x1 : numpy.ndarray
1-d array.
y : numpy.ndarray
2-d array of shape (len(x0), len(x1)).
name_or_obj : str or file-like object
Filename to write to or open file.
"""
d0, d1 = np.ediff1d(x0), np.ediff1d(x1)
if not (np.allclose(d0, d0[0]) and np.allclose(d1, d1[0])):
raise ValueError('grid must be regularly spaced in both x0 and x1')
if not (len(x0), len(x1)) == y.shape:
raise ValueError('length of x0 and x1 do not match shape of y')
w = wcs.WCS(naxis=2)
w.wcs.crpix = [1, 1]
w.wcs.crval = [x1[0], x0[0]]
w.wcs.cdelt = [d1[0], d0[0]]
hdu = fits.PrimaryHDU(y, header=w.to_header())
hdu.writeto(name_or_obj)
# -----------------------------------------------------------------------------
# Reader: ascii
def _read_ascii(f, delim=None, metachar='@', commentchar='#'):
meta = OrderedDict()
colnames = []
cols = []
readingdata = False
for line in f:
# strip leading & trailing whitespace, newline, and comments
line = line.strip()
pos = line.find(commentchar)
if pos > -1:
line = line[:pos]
if len(line) == 0:
continue
if not readingdata:
# Read metadata
if line[0] == metachar:
pos = line.find(' ') # Find first space.
if pos in [-1, 1]: # Space must exist and key must exist.
raise ValueError('Incorrectly formatted metadata line: ' +
line)
meta[line[1:pos]] = _cast_str(line[pos:])
continue
# Read header line
for item in line.split(delim):
colnames.append(item.strip())
cols.append([])
readingdata = True
continue
# Now we're reading data
items = line.split(delim)
for col, item in zip(cols, items):
col.append(_cast_str(item))
data = OrderedDict(zip(colnames, cols))
return meta, data
# -----------------------------------------------------------------------------
# Reader: salt2
def _expand_bands(band_list, meta):
"""Given a list containing band names, return a list of Bandpass objects"""
# Treat dependent bandpasses based on metadata contents
# TODO: need a way to figure out which bands are position dependent!
# for now, we assume *all* or none are.
if "X_FOCAL_PLANE" in meta and "Y_FOCAL_PLANE" in meta:
r = math.sqrt(meta["X_FOCAL_PLANE"]**2 + meta["Y_FOCAL_PLANE"]**2)
# map name to object for unique bands
name_to_band = {name: get_bandpass(name, r)
for name in set(band_list)}
return [name_to_band[name] for name in band_list]
else:
# For other bandpasses, get_bandpass will return the same object
# on each call, so just use it directly.
return [get_bandpass(name) for name in band_list]
def _read_salt2(name_or_obj, read_covmat=False, expand_bands=False):
"""Read a new-style SALT2 file.
Such a file has metadata on lines starting with '@' and column names
on lines starting with '#' and containing a ':' after the column name.
There is optionally a line containing '#end' before the start of data.
"""
if isinstance(name_or_obj, str):
f = open(name_or_obj, 'r')
else:
f = name_or_obj
meta = OrderedDict()
colnames = []
cols = []
readingdata = False
for line in f:
# strip leading & trailing whitespace & newline
line = line.strip()
if len(line) == 0:
continue
if not readingdata:
# Read metadata
if line[0] == '@':
pos = line.find(' ') # Find first space.
if pos in [-1, 1]: # Space must exist and key must exist.
raise ValueError('Incorrectly formatted metadata line: ' +
line)
meta[line[1:pos]] = _cast_str(line[pos:])
continue
# Read header line
if line[0] == '#':
pos = line.find(':')
if pos in [-1, 1]:
continue # comment line
colname = line[1:pos].strip()
if colname == 'end':
continue
colnames.append(colname)
cols.append([])
continue
# If the first non-whitespace character is not '@' or '#',
# assume the line is the first data line.
readingdata = True
# strip comments
pos = line.find('#')
if pos > -1:
line = line[:pos]
if len(line) == 0:
continue
# Now we're reading data
items = line.split()
for col, item in zip(cols, items):
col.append(_cast_str(item))
if isinstance(name_or_obj, str):
f.close()
# read covariance matrix file, if requested and present
if read_covmat and 'COVMAT' in meta:
fname = os.path.join(os.path.dirname(f.name), meta['COVMAT'])
# use skiprows=1 because first row has array dimensions
fluxcov = np.loadtxt(fname, skiprows=1)
# asethetics: capitalize 'Fluxcov' to match salt2 colnames
# such as 'Fluxerr'
colnames.append('Fluxcov')
cols.append(fluxcov)
data = OrderedDict(zip(colnames, cols))
if expand_bands:
data['Filter'] = _expand_bands(data['Filter'], meta)
return meta, data
# -----------------------------------------------------------------------------
# Reader: salt2-old
def _read_salt2_old(dirname, filenames=None):
"""Read old-style SALT2 files from a directory.
A file named 'lightfile' must exist in the directory.
"""
# Get list of files in directory.
if not (os.path.exists(dirname) and os.path.isdir(dirname)):
raise IOError("Not a directory: '{0}'".format(dirname))
dirfilenames = os.listdir(dirname)
# Read metadata from lightfile.
if 'lightfile' not in dirfilenames:
raise IOError("no lightfile in directory: '{0}'".format(dirname))
with open(os.path.join(dirname, 'lightfile'), 'r') as lightfile:
meta = OrderedDict()
for line in lightfile.readlines():
line = line.strip()
if len(line) == 0:
continue
try:
key, val = line.split()
except ValueError:
raise ValueError('expected space-separated key value pairs in '
'lightfile: {0}'
.format(os.path.join(dirname, 'lightfile')))
meta[key] = _cast_str(val)
# Get list of filenames to read.
if filenames is None:
filenames = dirfilenames
if 'lightfile' in filenames:
filenames.remove('lightfile') # We already read the lightfile.
fullfilenames = [os.path.join(dirname, f) for f in filenames]
# Read data from files.
data = None
for fname in fullfilenames:
with open(fname, 'r') as f:
filemeta, filedata = _read_salt2(f)
# Check that all necessary file metadata was defined.
if not ('INSTRUMENT' in filemeta and 'BAND' in filemeta and
'MAGSYS' in filemeta):
raise ValueError('not all necessary global keys (INSTRUMENT, '
'BAND, MAGSYS) are defined in file {0}'
.format(fname))
# Add the instrument/band to the file data, in anticipation of
# aggregating it with other files.
firstcol = next(iter(filedata.values()))
data_length = len(firstcol)
filter_name = '{0}::{1}'.format(filemeta.pop('INSTRUMENT'),
filemeta.pop('BAND'))
filedata['Filter'] = data_length * [filter_name]
filedata['MagSys'] = data_length * [filemeta.pop('MAGSYS')]
# If this if the first file, initialize data lists, otherwise if keys
# match, append this file's data to the main data.
if data is None:
data = filedata
elif set(filedata.keys()) == set(data.keys()):
for key in data:
data[key].extend(filedata[key])
else:
raise ValueError('column names do not match between files')
# Append any extra metadata in this file to the master metadata.
if len(filemeta) > 0:
meta[filter_name] = filemeta
return meta, data
# -----------------------------------------------------------------------------
# Reader: json
def _read_json(f):
t = json.load(f)
# Encode data keys as ascii rather than UTF-8 so that they can be
# used as numpy structured array names later.
d = {}
for key, value in t['data'].items():
d[key] = value
return t['meta'], d
# -----------------------------------------------------------------------------
# All readers
READERS = {'ascii': _read_ascii,
'json': _read_json,
'salt2': _read_salt2,
'salt2-old': _read_salt2_old}
def read_lc(file_or_dir, format='ascii', **kwargs):
"""Read light curve data for a single supernova.
Parameters
----------
file_or_dir : str
Filename (formats 'ascii', 'json', 'salt2') or directory name
(format 'salt2-old'). For 'salt2-old' format, directory must contain
a file named 'lightfile'. All other files in the directory are
assumed to be photometry files, unless the `filenames` keyword argument
is set.
format : {'ascii', 'json', 'salt2', 'salt2-old'}, optional
Format of file. Default is 'ascii'. 'salt2' is the new format available
in snfit version >= 2.3.0.
read_covmat : bool, optional
**[salt2 only]** If True, and if a ``COVMAT`` keyword is present in
header, read the covariance matrix from the filename specified
by ``COVMAT`` (assumed to be in the same directory as the lightcurve
file) and include it as a column named ``Fluxcov`` in the returned
table. Default is False.
*New in version 1.5.0*
expand_bands : bool, optional
**[salt2 only]** If True, convert band names into equivalent Bandpass
objects. This is particularly useful for position-dependent
bandpasses in the salt2 file format: the position information is
read from the header and used when creating the bandpass objects.
*New in version 1.5.0*
delim : str, optional
**[ascii only]** Used to split entries on a line. Default is `None`.
Extra whitespace is ignored.
metachar : str, optional
**[ascii only]** Lines whose first non-whitespace character is
`metachar` are treated as metadata lines, where the key and value
are split on the first whitespace. Default is ``'@'``
commentchar : str, optional
**[ascii only]** One-character string indicating a comment. Default is
'#'.
filenames : list, optional
**[salt2-old only]** Only try to read the given filenames as
photometry files. Default is to try to read all files in directory.
Returns
-------
t : astropy `~astropy.table.Table`
Table of data. Metadata (as an `OrderedDict`) can be accessed via
the ``t.meta`` attribute. For example: ``t.meta['key']``. The key
is case-sensitive.
Examples
--------
Read an ascii format file that includes metadata (``StringIO``
behaves like a file object):
>>> from io import StringIO
>>> f = StringIO('''
... @id 1
... @RA 36.0
... @description good
... time band flux fluxerr zp zpsys
... 50000. g 1. 0.1 25. ab
... 50000.1 r 2. 0.1 25. ab
... ''')
>>> t = read_lc(f, format='ascii')
>>> print(t)
time band flux fluxerr zp zpsys
------- ---- ---- ------- ---- -----
50000.0 g 1.0 0.1 25.0 ab
50000.1 r 2.0 0.1 25.0 ab
>>> t.meta
OrderedDict([('id', 1), ('RA', 36.0), ('description', 'good')])
"""
try:
readfunc = READERS[format]
except KeyError:
raise ValueError("Reader not defined for format {0!r}. Options: "
.format(format) + ", ".join(READERS.keys()))
if format == 'salt2-old':
meta, data = readfunc(file_or_dir, **kwargs)
elif isinstance(file_or_dir, str):
with open(file_or_dir, 'r') as f:
meta, data = readfunc(f, **kwargs)
else:
meta, data = readfunc(file_or_dir, **kwargs)
return Table(data, meta=meta)
# =========================================================================== #
# Writers #
# =========================================================================== #
# -----------------------------------------------------------------------------
# Writer: ascii
def _write_ascii(f, data, meta, **kwargs):
delim = kwargs.get('delim', ' ')
metachar = kwargs.get('metachar', '@')
if meta is not None:
for key, val in meta.items():
f.write('{0}{1}{2}{3}\n'.format(metachar, key, delim, str(val)))
keys = data.dtype.names
length = len(data)
f.write(delim.join(keys))
f.write('\n')
for i in range(length):
f.write(delim.join([str(data[key][i]) for key in keys]))
f.write('\n')
# -----------------------------------------------------------------------------
# Writer: salt2
KEY_TO_SALT2KEY_META = {
'Z': 'REDSHIFT', # Not sure if this is used.
'Z_HELIOCENTRIC': 'Z_HELIO',
'MAGSYS': 'MagSys',
'Z_SOURCE': 'z_source'}
KEY_TO_SALT2KEY_COLUMN = {
'Mjd': 'Date',
'Time': 'Date',
'Flux': 'FluxPsf',
'Fluxpsf': 'FluxPsf',
'Fluxerr': 'FluxPsferr',
'Fluxpsferr': 'FluxPsferr',
'Airmass': 'AirMass',
'Zp': 'ZP',
'Zpsys': 'MagSys',
'Magsys': 'MagSys',
'Band': 'Filter'}
def _write_salt2(f, data, meta, **kwargs):
raw = kwargs.get('raw', False)
pedantic = kwargs.get('pedantic', True)
if meta is not None:
for key, val in meta.items():
if not raw:
key = key.upper()
key = KEY_TO_SALT2KEY_META.get(key, key)
f.write('@{0} {1}\n'.format(key, str(val)))
keys = data.dtype.names
length = len(data)
# Write column names
keys_as_written = []
for key in keys:
if not raw:
key = key.capitalize()
key = KEY_TO_SALT2KEY_COLUMN.get(key, key)
f.write('#{0} :\n'.format(key))
keys_as_written.append(key)
f.write('#end :\n')
# Check that necessary fields exist
if pedantic:
if not ('Filter' in keys_as_written and 'MagSys' in keys_as_written):
raise ValueError('photometry data missing required some fields '
': Filter, MagSys')
# Write the data itself
for i in range(length):
f.write(' '.join([str(data[key][i]) for key in keys]))
f.write('\n')
# -----------------------------------------------------------------------------
# Writer: snana
KEY_TO_SNANAKEY_COLUMN = {
'TIME': 'MJD',
'DATE': 'MJD',
'FILTER': 'FLT',
'BAND': 'FLT',
'FLUX': 'FLUXCAL',
'FLUXERR': 'FLUXCALERR',
'ZP': 'ZPT',
'ZEROPOINT': 'ZPT'}
KEY_TO_SNANAKEY_META = {
'DEC': 'DECL'}
SNANA_REQUIRED_META = ['RA', 'DECL', 'SURVEY', 'FILTERS', 'MWEBV']
SNANA_REQUIRED_COLUMN = ['MJD', 'FLT', 'FLUXCAL', 'FLUXCALERR', 'ZPT']
def _write_snana(f, data, meta, **kwargs):
raw = kwargs.get('raw', False)
pedantic = kwargs.get('pedantic', True)
# Write metadata
keys_as_written = []
if meta is not None:
for key, val in meta.items():
if not raw:
key = key.upper()
key = KEY_TO_SNANAKEY_META.get(key, key)
f.write('{0}: {1}\n'.format(key, str(val)))
keys_as_written.append(key)
# Check that necessary metadata was written
if pedantic:
for key in SNANA_REQUIRED_META:
if key not in keys_as_written:
raise ValueError('Missing required metadata kw: ' + key)
# Get column names and data length
keys = data.dtype.names
length = len(data)
# Convert column names
keys_to_write = []
for key in keys:
if not raw:
key = key.upper()
key = KEY_TO_SNANAKEY_COLUMN.get(key, key)
keys_to_write.append(key)
# Check that necessary column names are included
if pedantic:
for key in SNANA_REQUIRED_COLUMN:
if key not in keys_to_write:
raise ValueError('Missing required column name: ' + key)
# Write the header
f.write('\n'
'# ==========================================\n'
'# TERSE LIGHT CURVE OUTPUT:\n'
'#\n'
'NOBS: {0:d}\n'
'NVAR: {1:d}\n'
'VARLIST: {2}\n'
.format(length, len(keys), ' '.join(keys_to_write)))
# Write data
for i in range(length):
f.write('OBS: ')
f.write(' '.join([str(data[key][i]) for key in keys]))
f.write('\n')
# -----------------------------------------------------------------------------
# Writer: json
def _write_json(f, data, meta, **kwargs):
# Build a dictionary of pure-python objects
output = OrderedDict([('meta', meta),
('data', OrderedDict())])
for key in data.dtype.names:
output['data'][key] = data[key].tolist()
json.dump(output, f)
del output
# -----------------------------------------------------------------------------
# All writers
WRITERS = {'ascii': _write_ascii,
'salt2': _write_salt2,
'snana': _write_snana,
'json': _write_json}
def write_lc(data, fname, format='ascii', **kwargs):
"""Write light curve data.
Parameters
----------
data : `~astropy.table.Table`
Light curve data.
fname : str
Filename.
format : {'ascii', 'salt2', 'snana', 'json'}, optional
Format of file. Default is 'ascii'. 'salt2' is the new format available
in snfit version >= 2.3.0.
delim : str, optional
**[ascii only]** Character used to separate entries on a line.
Default is ' '.
metachar : str, optional
**[ascii only]** Metadata designator. Default is '@'.
raw : bool, optional
**[salt2, snana]** By default, the SALT2 and SNANA writers rename
some metadata keys and column names in order to comply with what
snfit and SNANA expect. Set to True to override this.
Default is False.
pedantic : bool, optional
**[salt2, snana]** If True, check that output column names and header
keys comply with expected formatting, and raise a ValueError if not.
It is probably a good idea to set to False when raw is True.
Default is True.
"""
if format not in WRITERS:
raise ValueError("Writer not defined for format {0!r}. Options: "
.format(format) + ", ".join(WRITERS.keys()))
if isinstance(data, Table):
meta = data.meta
data = np.asarray(data)
else:
meta = OrderedDict()
if not isinstance(data, np.ndarray):
data = dict_to_array(data)
with open(fname, 'w') as f:
WRITERS[format](f, data, meta, **kwargs)
def load_example_data():
"""
Load an example photometric data table.
Returns
-------
data : `~astropy.table.Table`
"""
from astropy.utils.data import get_pkg_data_filename
filename = get_pkg_data_filename(
'data/examples/example_photometric_data.dat')
return read_lc(filename, format='ascii')
def load_example_spectrum_data():
"""
Load example spectrum data.
Returns
-------
wave : `~numpy.array`
Wavelengths of each spectral bin
flux : `~numpy.array`
Flux in each spectral bin
fluxerr : `~numpy.array`
Flux error in each spectral bin
"""
from astropy.utils.data import get_pkg_data_filename
filename = get_pkg_data_filename(
'data/examples/example_spectrum.dat')
return np.genfromtxt(filename).T