/
mipp_xrit.py
235 lines (203 loc) · 9.16 KB
/
mipp_xrit.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016.
# Author(s):
# Martin Raspaud <martin.raspaud@smhi.se>
# Esben S. Nielsen <esn@dmi.dk>
# Panu Lahtinen <panu.lahtinen@fmi.fi>
# Adam Dybbroe <adam.dybbroe@smhi.se>
# This file is part of satpy.
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Interface to Eumetcast level 1.5 HRIT/LRIT format. Uses the MIPP reader.
"""
import logging
import os
from fnmatch import fnmatch
from mipp import CalibrationError, ReaderError, xrit
from satpy.dataset import Dataset
from satpy.readers import DatasetDict
from satpy.readers.helper_functions import area_defs_to_extent
from satpy.readers.yaml_reader import FileYAMLReader
from trollsift.parser import globify, parse
LOGGER = logging.getLogger(__name__)
try:
# Work around for on demand import of pyresample. pyresample depends
# on scipy.spatial which memory leaks on multiple imports
IS_PYRESAMPLE_LOADED = False
from pyresample import geometry
IS_PYRESAMPLE_LOADED = True
except ImportError:
LOGGER.warning("pyresample missing. Can only work in satellite projection")
class xRITFile(FileYAMLReader):
'''Class for reading XRIT data.
'''
def __init__(self, config_files, **kwargs):
super(xRITFile, self).__init__(config_files, **kwargs)
self.info['filenames'] = []
self.file_patterns = []
for file_type in self.config['file_types'].values():
self.file_patterns.extend(file_type['file_patterns'])
@property
def start_time(self):
return self._start_time
@property
def end_time(self):
return self._end_time
def load(self, dataset_keys):
image_files = []
pattern = self.file_patterns[0]
prologue_file = None
epilogue_file = None
for filename in self.info['filenames']:
try:
file_info = parse(pattern, os.path.basename(filename))
except ValueError:
continue
if file_info["segment"] == "EPI":
epilogue_file = filename
elif file_info["segment"] == "PRO":
prologue_file = filename
else:
image_files.append(filename)
start_times = set()
datasets = DatasetDict()
area_converted_to_extent = False
area_extent = None
for ds in dataset_keys:
channel_files = []
for filename in image_files:
file_info = parse(pattern, os.path.basename(filename))
if file_info["dataset_name"] == ds.name:
channel_files.append(filename)
start_times.add(file_info['start_time'])
if not channel_files:
continue
kwargs = {}
if 'platform_name' in self.info:
kwargs['platform_name'] = self.info['platform_name']
# Convert area definitions to maximal area_extent
if not area_converted_to_extent and self.filter_parameters.get('area') is not None:
metadata = xrit.sat.load_files(prologue_file,
channel_files,
epilogue_file,
only_metadata=True,
**kwargs)
# otherwise use the default value (MSG3 extent at
# lon0=0.0), that is, do not pass default_extent=area_extent
area_extent = area_defs_to_extent(
[self.filter_parameters['area']], metadata.proj4_params)
area_converted_to_extent = True
try:
calibrate = 1
if ds.calibration == 'counts':
calibrate = 0
elif ds.calibration == 'radiance':
calibrate = 2
image = xrit.sat.load_files(prologue_file,
channel_files,
epilogue_file,
mask=True,
calibrate=calibrate,
**kwargs)
if area_extent:
metadata, data = image(area_extent)
else:
metadata, data = image()
except CalibrationError:
LOGGER.warning(
"Loading non calibrated data since calibration failed.")
image = xrit.sat.load_files(prologue_file,
channel_files,
epilogue_file,
mask=True,
calibrate=False,
**kwargs)
if area_extent:
metadata, data = image(area_extent)
else:
metadata, data = image()
except ReaderError as err:
# if dataset can't be found, go on with next dataset
LOGGER.error(str(err))
continue
if len(metadata.instruments) != 1:
sensor = None
else:
sensor = metadata.instruments[0]
units = {'ALBEDO(%)': '%',
'KELVIN': 'K'}
standard_names = {'1': 'counts',
'W m-2 sr-1 m-1':
'toa_outgoing_radiance_per_unit_wavelength',
'%': 'toa_bidirectional_reflectance',
'K':
'toa_brightness_temperature'}
unit = units.get(metadata.calibration_unit,
metadata.calibration_unit)
projectable = Dataset(
data,
name=ds.name,
units=unit,
standard_name=standard_names[unit],
sensor=sensor,
start_time=min(start_times),
id=ds)
# Build an area on the fly from the mipp metadata
proj_params = getattr(metadata, "proj4_params").split(" ")
proj_dict = {}
for param in proj_params:
key, val = param.split("=")
proj_dict[key] = val
if IS_PYRESAMPLE_LOADED:
# Build area_def on-the-fly
projectable.info["area"] = geometry.AreaDefinition(
str(metadata.area_extent) + str(data.shape),
"On-the-fly area", proj_dict["proj"], proj_dict,
data.shape[1], data.shape[0], metadata.area_extent)
else:
LOGGER.info("Could not build area, pyresample missing...")
datasets[ds] = projectable
return datasets
def create_filehandlers(self, filenames):
matching_filenames = []
# Organize filenames in to file types and create file handlers
remaining_filenames = set(filenames)
start_times = []
end_times = []
for filetype, filetype_info in self.config['file_types'].items():
patterns = filetype_info['file_patterns']
for pattern in patterns:
used_filenames = set()
for filename in remaining_filenames:
if fnmatch(os.path.basename(filename), globify(pattern)):
# we know how to use this file (even if we may not use
# it later)
used_filenames.add(filename)
filename_info = parse(pattern,
os.path.basename(filename))
# Only add this file handler if it is within the time
# we want
file_start = filename_info['start_time']
file_end = filename_info.get('end_time', file_start)
if self._start_time and file_start < self._start_time:
continue
if self._end_time and file_end > self._end_time:
continue
start_times.append(file_start)
end_times.append(file_end)
matching_filenames.append(filename)
# TODO: Area filtering
remaining_filenames -= used_filenames
if matching_filenames:
# Assign the start time and end time
self._start_time = min(start_times)
self._end_time = max(end_times)
self.info['filenames'] = matching_filenames