Skip to content

Commit

Permalink
Recipes have been isolated
Browse files Browse the repository at this point in the history
  • Loading branch information
Pica authored and sergiopasra committed Nov 19, 2015
1 parent 0ce7b59 commit 2130091
Show file tree
Hide file tree
Showing 5 changed files with 95 additions and 197 deletions.
120 changes: 34 additions & 86 deletions megaradrp/recipes/calibration/arc.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,47 +17,39 @@
# along with Megara DRP. If not, see <http://www.gnu.org/licenses/>.
#

'''Calibration Recipes for Megara'''
"""Arc Calibration Recipe for Megara"""

from __future__ import division, print_function

import logging

import numpy
from astropy.io import fits
from scipy.interpolate import interp1d

from numina.core import Requirement, Product, Parameter
from numina.core import DataFrameType
from numina.core.products import ArrayType
from numina.core.requirements import ObservationResultRequirement
from numina.array.combine import median as c_median
from numina.flow import SerialFlow
from numina.flow.processing import BiasCorrector

# For WL calibration
# FIXME: remove this later
from numina.core.products import LinesCatalog
from scipy.interpolate import interp1d
from numina.array.wavecal.arccalibration import arccalibration_direct
from numina.array.wavecal.arccalibration import fit_solution
from numina.array.wavecal.arccalibration import gen_triplets_master
from numina.array.wavecal.statsummary import sigmaG
# FIXME: still using findpeaks1D functions
from numina.array.peaks.findpeaks1D import findPeaks_spectrum
from numina.array.peaks.findpeaks1D import refinePeaks_spectrum

from megaradrp.core import MegaraBaseRecipe
from megaradrp.processing import OverscanCorrector, TrimImage

from megaradrp.products import TraceMap
from megaradrp.requirements import MasterBiasRequirement

from megaradrp.core import apextract_tracemap
from megaradrp.core.processing import apextract_tracemap

_logger = logging.getLogger('numina.recipes.megara')


class ArcCalibrationRecipe(MegaraBaseRecipe):
'''Process ARC images and create WL_CALIBRATION.'''
"""Process ARC images and create WL_CALIBRATION."""

# Requirements
obresult = ObservationResultRequirement()
Expand All @@ -71,13 +63,11 @@ class ArcCalibrationRecipe(MegaraBaseRecipe):
wlcalib = Product(ArrayType)

def __init__(self):
super(ArcCalibrationRecipe, self).__init__(
version="0.1.0"
)
super(ArcCalibrationRecipe, self).__init__("0.1.0")

def run(self, rinput):
# Basic processing
reduced = self.process_common(rinput.obresult, rinput.master_bias)
reduced = self.bias_process_common(rinput.obresult, rinput.master_bias)

_logger.info('extract fibers')
rssdata = apextract_tracemap(reduced[0].data, rinput.tracemap)
Expand All @@ -93,12 +83,12 @@ def run(self, rinput):
wlcalib=coeff_table)

def calibrate_wl(self, rss, lines_catalog, poldeg, times_sigma=50.0):
#
#
# read master table (TBM) and generate auxiliary parameters (valid for
# all the slits) for the wavelength calibration
wv_master = lines_catalog[:,0]
wv_master = lines_catalog[:, 0]
ntriplets_master, ratios_master_sorted, triplets_master_sorted_list = \
gen_triplets_master(wv_master)
gen_triplets_master(wv_master)
# FIXME: this depends on the spectral and dispersion axes
nspec = rss.shape[0]
coeff_table = numpy.zeros((nspec, poldeg + 1))
Expand All @@ -107,19 +97,16 @@ def calibrate_wl(self, rss, lines_catalog, poldeg, times_sigma=50.0):
for idx, row in enumerate(rss):
_logger.info('Starting row %d', idx)
# find peaks (initial search providing integer numbers)
threshold = numpy.median(row)+times_sigma*sigmaG(row)
ipeaks_int = findPeaks_spectrum(row, nwinwidth=nwinwidth,
data_threshold=threshold)
# refine peaks fitting an appropriate function (providing float
threshold = numpy.median(row) + times_sigma * sigmaG(row)
ipeaks_int = findPeaks_spectrum(row, nwinwidth, threshold)
# refine peaks fitting an appropriate function (providing float
# numbers)
ipeaks_float = refinePeaks_spectrum(row, ipeaks_int, nwinwidth,
method=2)


# define interpolation function and interpolate the refined peak
# location, passing from index number (within the row array)
# to channel number (note that this step takes care of the fact
# that the extracted spectrum may correspond to a subregion in the
ipeaks_float = refinePeaks_spectrum(row, ipeaks_int, nwinwidth)

# define interpolation function and interpolate the refined peak
# location, passing from index number (within the row array)
# to channel number (note that this step takes care of the fact
# that the extracted spectrum may correspond to a subregion in the
# spectral direction)

# FIXME: xchannel ???
Expand All @@ -128,7 +115,7 @@ def calibrate_wl(self, rss, lines_catalog, poldeg, times_sigma=50.0):
naxis1 = row.shape[0]
xchannel = numpy.arange(1, naxis1 + 1)

finterp_channel = interp1d(range(xchannel.size), xchannel,
finterp_channel = interp1d(range(xchannel.size), xchannel,
kind='linear')
xpeaks_refined = finterp_channel(ipeaks_float)

Expand All @@ -139,7 +126,7 @@ def calibrate_wl(self, rss, lines_catalog, poldeg, times_sigma=50.0):
triplets_master_sorted_list,
xpeaks_refined,
naxis1,
wv_ini_search=3500,
wv_ini_search=3500,
wv_end_search=4500,
error_xpos_arc=2.0,
times_sigma_r=3.0,
Expand All @@ -148,64 +135,25 @@ def calibrate_wl(self, rss, lines_catalog, poldeg, times_sigma=50.0):
poly_degree_wfit=2,
times_sigma_polfilt=10.0,
times_sigma_inclusion=5.0)

_logger.info('Solution for row %d completed', idx)
_logger.info('Fitting solution for row %d', idx)
numpy_array_with_coeff, crval1_approx, cdelt1_approx = \
fit_solution(wv_master,
xpeaks_refined,
solution,
poly_degree_wfit=2,
weighted=False)
fit_solution(wv_master,
xpeaks_refined,
solution,
poly_degree_wfit=2,
weighted=False)

_logger.info('approximate crval1, cdelt1: %f %f',
crval1_approx,cdelt1_approx)
_logger.info('fitted coefficients %s',numpy_array_with_coeff)
crval1_approx,
cdelt1_approx)

_logger.info('fitted coefficients %s', numpy_array_with_coeff)

coeff_table[idx] = numpy_array_with_coeff

except TypeError as error:
_logger.error("%s", error)

return coeff_table


def process_common(self, obresult, master_bias):
_logger.info('starting prereduction')

o_c = OverscanCorrector()
t_i = TrimImage()

with master_bias.open() as hdul:
mbias = hdul[0].data.copy()
b_c = BiasCorrector(mbias)

basicflow = SerialFlow([o_c, t_i, b_c])

cdata = []

try:
for frame in obresult.images:
hdulist = frame.open()
hdulist = basicflow(hdulist)
cdata.append(hdulist)

_logger.info('stacking %d images using median', len(cdata))

data = c_median([d[0].data for d in cdata], dtype='float32')
template_header = cdata[0][0].header
hdu = fits.PrimaryHDU(data[0], header=template_header)
finally:
for hdulist in cdata:
hdulist.close()

hdr = hdu.header
hdr['IMGTYP'] = ('FIBER_FLAT', 'Image type')
hdr['NUMTYP'] = ('MASTER_FIBER_FLAT', 'Data product type')
hdr = self.set_base_headers(hdr)
hdr['CCDMEAN'] = data[0].mean()

varhdu = fits.ImageHDU(data[1], name='VARIANCE')
num = fits.ImageHDU(data[2], name='MAP')
result = fits.HDUList([hdu, varhdu, num])

_logger.info('prereduction ended')

return result
2 changes: 1 addition & 1 deletion megaradrp/recipes/calibration/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ class S_And_E_FromStdStarsRecipe(MegaraBaseRecipe):
traces = Product(ArrayType)

def __init__(self):
super(SensitivityFromStdStarRecipe, self).__init__(
super(S_And_E_FromStdStarsRecipe, self).__init__(
version="0.1.0"
)

Expand Down
58 changes: 27 additions & 31 deletions megaradrp/recipes/calibration/bias.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,37 @@
#
# Copyright 2014-2015 Universidad Complutense de Madrid
#
# This file is part of Megara DRP
#
# Megara DRP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Megara DRP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Megara DRP. If not, see <http://www.gnu.org/licenses/>.
#

import logging

from astropy.io import fits

from megaradrp.core import MegaraBaseRecipe
from megaradrp.processing import OverscanCorrector, TrimImage
from megaradrp.products import MasterBias

from numina.array.combine import median as c_median
from numina.core import Product, RecipeError
from numina.core.requirements import ObservationResultRequirement
from numina.flow import SerialFlow

from megaradrp.core import MegaraBaseRecipe
from megaradrp.products import MasterBias

_logger = logging.getLogger('numina.recipes.megara')


class BiasRecipe(MegaraBaseRecipe):
'''Process BIAS images and create MASTER_BIAS.'''
"""Process BIAS images and create MASTER_BIAS."""

obresult = ObservationResultRequirement()
biasframe = Product(MasterBias)
Expand All @@ -24,33 +40,13 @@ def __init__(self):
super(BiasRecipe, self).__init__(version="0.1.0")

def run(self, rinput):
return self.process(rinput.obresult)

def process(self, obresult):
_logger.info('starting bias reduction')

if not obresult.images:
if not rinput.obresult.images:
raise RecipeError('Frame list is empty')

cdata = []
o_c = OverscanCorrector()
t_i = TrimImage()
basicflow = SerialFlow([o_c, t_i])

try:
for frame in obresult.images:
hdulist = frame.open()
hdulist = basicflow(hdulist)
cdata.append(hdulist)

_logger.info('stacking %d images using median', len(cdata))

data = c_median([d[0].data for d in cdata], dtype='float32')
template_header = cdata[0][0].header
hdu = fits.PrimaryHDU(data[0], header=template_header)
finally:
for hdulist in cdata:
hdulist.close()
hdu, data = self.hdu_creation(rinput.obresult)

hdr = hdu.header
hdr = self.set_base_headers(hdr)
Expand All @@ -63,5 +59,5 @@ def process(self, obresult):
hdulist = fits.HDUList([hdu, varhdu, num])
_logger.info('bias reduction ended')

result = self.create_result(biasframe=hdu)
return result
result = self.create_result(biasframe=hdulist)
return result
Loading

0 comments on commit 2130091

Please sign in to comment.