Skip to content

Commit

Permalink
Merge 9e8ca14 into 271dc79
Browse files Browse the repository at this point in the history
  • Loading branch information
ericpre committed Mar 22, 2018
2 parents 271dc79 + 9e8ca14 commit d0cd646
Show file tree
Hide file tree
Showing 11 changed files with 232 additions and 117 deletions.
58 changes: 33 additions & 25 deletions appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,30 +19,34 @@ environment:
PYTHON_ARCH: "32"
CONDA_PY: "35"
CONDA_NPY: "19"
WP_URL: 'https://github.com/winpython/winpython/releases/download/1.3.20160209/WinPython-32bit-3.5.1.2.exe'
WP_CRC: '172d19a743ccfaf55af779d15f29f67fca83a46f08b0af855dfaf809b4184c0d'
DEPS: "numpy scipy matplotlib ipython h5py sympy scikit-learn dill setuptools natsort scikit-image cython ipyparallel dask"
WP_URL: 'https://github.com/winpython/winpython/releases/download/1.9.20171031/WinPython-32bit-3.5.4.1Qt5.exe'
WP_CRC: 'e58b0c86fc4e6ae4fe3f9f467008fd4e3447b5f35b7ad689ab01cdc93733d19e'
DEPS: "numpy scipy matplotlib ipython h5py sympy scikit-learn dill setuptools natsort scikit-image cython lxml ipyparallel dask"

- PYTHON: "C:\\Miniconda35-x64"
PYTHON_VERSION: "3.5.x"
PYTHON_MAJOR: 3
PYTHON_ARCH: "64"
CONDA_PY: "35"
CONDA_NPY: "19"
WP_URL: 'https://github.com/winpython/winpython/releases/download/1.3.20160209/WinPython-64bit-3.5.1.2.exe'
WP_CRC: '07e854b9aa7a31d8bbf7829d04a45b6d6266603690520e365199af2d98751ab1'
DEPS: "numpy scipy matplotlib ipython h5py sympy scikit-learn dill setuptools natsort scikit-image cython ipyparallel dask"
WP_URL: 'https://github.com/winpython/winpython/releases/download/1.9.20171031/WinPython-64bit-3.5.4.1Qt5.exe'
WP_CRC: 'e522c8adfbd9c967fa2f692d3c313fec1f0e53724b4651ea9e969228532a9586'
DEPS: "numpy scipy matplotlib ipython h5py sympy scikit-learn dill setuptools natsort scikit-image cython lxml ipyparallel dask"

- PYTHON: "C:\\Miniconda36"
PYTHON_VERSION: "3.6.x"
PYTHON_MAJOR: 3
PYTHON_ARCH: "32"
WP_URL: 'https://github.com/winpython/winpython/releases/download/1.9.20171031/WinPython-32bit-3.6.3.0Qt5.exe'
WP_CRC: '56259865127c3f802f1eca29528f2676317cad2fd6573ce516fc35b4441a1d3c'
CONDA_PY: "36"
DEPS: "numpy scipy matplotlib ipython h5py sympy scikit-learn dill setuptools natsort scikit-image cython ipyparallel dask"

- PYTHON: "C:\\Miniconda36-x64"
PYTHON_VERSION: "3.6.x"
PYTHON_MAJOR: 3
WP_URL: 'https://github.com/winpython/winpython/releases/download/1.9.20171031/WinPython-64bit-3.6.3.0Qt5.exe'
WP_CRC: 'a5efea23ede143fdacab60b8db95835de25c0173bd0d9fd53d5952648bde69b9'
PYTHON_ARCH: "64"
CONDA_PY: "36"
DEPS: "numpy scipy matplotlib ipython h5py sympy scikit-learn dill setuptools natsort scikit-image cython ipyparallel dask"
Expand Down Expand Up @@ -72,8 +76,7 @@ install:
# Having 'sip' folder on path confuses import of `sip`.
#- "%CMD_IN_ENV% conda install -yq pip"
- "pip install pytest-mpl blosc"
# TODO: Remove once anaconda taitsui package is at v5:
- "IF \"%PYTHON_MAJOR%\" EQU \"3\" pip install --upgrade traitsui tqdm"

- ps: Add-AppveyorMessage "Installing hyperspy..."
- "python setup.py install"

Expand Down Expand Up @@ -103,7 +106,7 @@ on_failure:
before_deploy:
- ps: Add-AppveyorMessage "Running deployment step..."
- "pip install winpython"
- "pip install https://github.com/hyperspy/hyperspy-bundle/archive/master.zip"
- "pip install https://github.com/ericpre/hyperspy-bundle/archive/fix_get_current_hyperspy_version.zip"
# Download WinPython installer if not cached
- ps: Add-AppveyorMessage "Installing WinPython..."
- "SET WP_INSTDIR=%APPDATA%\\wpdir\\WinPython-%PYTHON_ARCH%bit\\"
Expand Down Expand Up @@ -134,21 +137,25 @@ before_deploy:
- "%CMD_IN_ENV% %WP_INSTDIR%/scripts/env.bat"
# Give info about python vesion and compiler used to compile the python
- "%CMD_IN_ENV% python.exe -c \"import sys; print(sys.version)\""
# Install scikit-image from Christoph Gohlke binaries repository
- cinst wget
- ps: Add-AppveyorMessage "Downloading scikit-image..."
- ps: if($Env:PYTHON_ARCH -eq "64") {$Env:SCIKIT_IMAGE="https://www.dropbox.com/s/1xn8mgudmtph19i/scikit_image-0.13.0-cp35-cp35m-win_amd64.whl?dl=1"} else {$Env:SCIKIT_IMAGE="https://www.dropbox.com/s/6hwotxy9hoalj25/scikit_image-0.13.0-cp35-cp35m-win32.whl?dl=1"}
- "ECHO %SCIKIT_IMAGE%"
# - "%CMD_IN_ENV% wget http://www.lfd.uci.edu/~gohlke/pythonlibs/tuoh5y4k/%SCIKIT_IMAGE% --header User-Agent:Chrome/23.0.1271.97"
- "%CMD_IN_ENV% pip install %SCIKIT_IMAGE%"
- "%CMD_IN_ENV% pip install --upgrade tqdm notebook cython ipython configobj start_jupyter_cm ipywidgets ipyparallel sympy pytest blosc"
# uninstall and reinstall matplotlib to get the 2.0 version without using --upgrade option (to avoid upgrading numpy and breaking scipy...)

# install and upgrade dependencies
- "%CMD_IN_ENV% pip install --upgrade tqdm notebook cython ipython configobj start_jupyter_cm ipywidgets ipyparallel sympy pytest dask"
- "%CMD_IN_ENV% pip uninstall -y matplotlib"
- "%CMD_IN_ENV% pip install matplotlib pytest-mpl"
- "%CMD_IN_ENV% pip install dask==0.13"
- "%CMD_IN_ENV% pip install matplotlib==2.1.2 pytest-mpl"

# Install pyxem and dependencies first
- "%CMD_IN_ENV% pip install spglib pymatgen transforms3d"
- "%CMD_IN_ENV% pip install https://github.com/pyxem/pyxem/archive/master.zip"

# Install atomap
- "%CMD_IN_ENV% pip install https://gitlab.com/atomap/atomap/repository/Absolute_Integrator/archive.zip"

# install pyface and traitsui from master to get pyqt5 compatibility
- "%CMD_IN_ENV% pip install https://github.com/enthought/pyface/archive/master.zip"
- "%CMD_IN_ENV% pip install https://github.com/enthought/traitsui/archive/master.zip"
- "%CMD_IN_ENV% pip install .[all]"
# Try to run twice as workaround for permission error
- "%CMD_IN_ENV% pip install hyperspyui || pip install hyperspyui"
- "%CMD_IN_ENV% pip install https://github.com/ericpre/hyperspyUI/archive/qt5.zip || pip install https://github.com/ericpre/hyperspyUI/archive/qt5.zip"
# setting back the config:
- "ren %WP_INSTDIR%\\settings\\pydistutils_bak.cfg pydistutils.cfg"
# Custom installer step
Expand All @@ -157,7 +164,8 @@ before_deploy:
- "\"%NSIS_DIR%/makensis.exe\" /V3 NSIS_installer_script-%PYTHON_ARCH%bit.nsi"
- ps: Add-AppveyorMessage "Installer created! Re-run tests in Winpython environment..."
# Re-run tests in WinPython environment
- ps: if($Env:PYTHON_ARCH -eq "64") {$Env:PYTHON_DIR_NAME="python-3.5.1.amd64"} else {$Env:PYTHON_DIR_NAME="python-3.5.1"}
- ps: if($Env:PYTHON_VERSION -eq "3.5.x") {$Env:PY_VERSION="3.5.4"} else {$Env:PY_VERSION="3.6.3"}
- ps: if($Env:PYTHON_ARCH -eq "64") {$Env:PYTHON_DIR_NAME="python-"+$Env:PY_VERSION+".amd64"} else {$Env:PYTHON_DIR_NAME="python-"+$Env:PY_VERSION}
- "SET HYPERSPY_DIR=%WP_INSTDIR%\\%PYTHON_DIR_NAME%\\Lib\\site-packages\\hyperspy"
- ps: py.test --mpl $Env:HYPERSPY_DIR
- ps: Add-AppveyorMessage "Tests finished! Pushing to GitHub..."
Expand All @@ -167,16 +175,16 @@ deploy:
provider: GitHub
auth_token:
# to266:
secure: ptV5Dkz3pSVdjD0qRDpxJgjVlddFtleZ+B+c2X1Fg67P8OX3bHWVktRmlj6hfLhM
#secure: ptV5Dkz3pSVdjD0qRDpxJgjVlddFtleZ+B+c2X1Fg67P8OX3bHWVktRmlj6hfLhM
# vidartf:
#secure: KwAfARhGEqOnZHltPB6kKu8xmnoiGSk7NMYJBIEbWvFCuVnepoPV7ZcIjUN3pUpK
# sem-geologist:
#secure: RRqUkx9H5VuFNITmm+YzgB0qnqgVGPH1yrPVxb4oCD+FAjcTch2WZAiPEKn4L6w6
# ericpre:
#secure: ae8XsPI+vKJI9AWm0r9+ec71CIkXcnCHlNIQ57v+87hh5k1xuAAxIOi1CFKEmmZv
secure: ae8XsPI+vKJI9AWm0r9+ec71CIkXcnCHlNIQ57v+87hh5k1xuAAxIOi1CFKEmmZv
artifact: /.*\.exe/, win_wheels # upload all exe installers and wheels to release assets
draft: false
prerelease: false
force_update: true
on:
appveyor_repo_tag: true # deploy on tag push only
appveyor_repo_tag: True # deploy on tag push only
157 changes: 104 additions & 53 deletions hyperspy/io_plugins/emd.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import logging
from tempfile import mkdtemp
import os.path as path
import traits.api as t

from hyperspy.misc.elements import atomic_number2name

Expand Down Expand Up @@ -500,6 +501,7 @@ def __init__(self, filename, select_type=None, first_frame=0,
self.SI_data_dtype = SI_dtype
self.load_SI_image_stack = load_SI_image_stack
self.lazy = lazy
self.detector_name = None

self.original_metadata = {}
with h5py.File(filename, 'r') as f:
Expand Down Expand Up @@ -571,14 +573,15 @@ def _read_spectrum(self, spectrum_group, spectrum_sub_group_key):
spectrum_sub_group_key)
original_metadata.update(self.original_metadata)

dispersion, offset = self._get_dispersion_offset(original_metadata)
dispersion, offset, unit = self._get_dispersion_offset(
original_metadata)

axes = [{'index_in_array': 0,
'name': 'E',
'offset': offset,
'scale': dispersion,
'size': data.shape[0],
'units': 'keV',
'units': unit,
'navigate': False}
]

Expand Down Expand Up @@ -614,11 +617,14 @@ def _read_image(self, image_group, image_sub_group_key):
original_metadata = _parse_metadata(image_group, image_sub_group_key)
original_metadata.update(self.original_metadata)
try:
self.detector_name = original_metadata['BinaryResult']['Detector']
except KeyError:
if 'Detector' in original_metadata['BinaryResult']:
self.detector_name = original_metadata['BinaryResult']['Detector']
# if the `BinaryResult/Detector` is not available, there should be
# only one detector in `Detectors`
self.detector_name = original_metadata['Detectors']['Detector-01']['DetectorName']
elif 'DetectorName' in original_metadata['Detectors']['Detector-01']:
self.detector_name = original_metadata['Detectors']['Detector-01']['DetectorName']
except KeyError:
pass

read_stack = (self.load_SI_image_stack or self.im_type == 'Image')
if read_stack:
Expand All @@ -630,25 +636,27 @@ def _read_image(self, image_group, image_sub_group_key):
# Get the scanning area shape of the SI from the images
self.SI_shape = data.shape

pix_scale = original_metadata['BinaryResult'].get(
'PixelSize', {'height': 1.0, 'width': 1.0})
offsets = original_metadata['BinaryResult'].get(
'Offset', {'x': 0.0, 'y': 0.0})
original_units = original_metadata['BinaryResult'].get(
'PixelUnitX', '')
try:
pix_scale = original_metadata['BinaryResult']['PixelSize']
offsets = original_metadata['BinaryResult']['Offset']
original_units = original_metadata['BinaryResult']['PixelUnitX']
except KeyError:
_logger.warning("The calibration can't be loaded.")
pix_scale = {'height': 1.0, 'width': 1.0}
offsets = {'x': 0.0, 'y': 0.0}
original_units = t.Undefined

axes = []
# stack of images
if read_stack and data.shape[0] > 1:
frame_time = original_metadata['Scan']['FrameTime']
scale_time = self._convert_scale_units(
frame_time, 's', 2 * data.shape[0])
frame_time, time_unit = self._parse_frame_time(original_metadata,
data.shape[0])
axes.append({'index_in_array': 0,
'name': 'Time',
'offset': 0,
'scale': scale_time[0],
'scale': frame_time,
'size': data.shape[0],
'units': scale_time[1],
'units': time_unit,
'navigate': True})
i = 1
else:
Expand Down Expand Up @@ -692,28 +700,46 @@ def _read_image(self, image_group, image_sub_group_key):
'original_metadata': original_metadata,
'mapping': self._get_mapping(map_selected_element=False)}

def _parse_frame_time(self, original_metadata, factor=1):
try:
frame_time = original_metadata['Scan']['FrameTime']
time_unit = 's'
except KeyError:
frame_time, time_unit = None, t.Undefined

frame_time, time_unit = self._convert_scale_units(
frame_time, time_unit, factor)
return frame_time, time_unit

def _parse_image_display(self):
image_display_group = self.p_grp.get('Displays/ImageDisplay')
key_list = _get_keys_from_group(image_display_group)
self.map_label_dict = {}
for key in key_list:
v = json.loads(image_display_group[key].value[0].decode('utf-8'))
data_key = v['dataPath'].split('/')[-1] # key in data group
self.map_label_dict[data_key] = v['display']['label']
try:
image_display_group = self.p_grp.get('Displays/ImageDisplay')
key_list = _get_keys_from_group(image_display_group)
self.map_label_dict = {}
for key in key_list:
v = json.loads(
image_display_group[key].value[0].decode('utf-8'))
data_key = v['dataPath'].split('/')[-1] # key in data group
self.map_label_dict[data_key] = v['display']['label']
except:
pass

def _parse_metadata_group(self, group, group_name):
d = {}
for group_key in _get_keys_from_group(group):
subgroup = group.get(group_key)
if hasattr(subgroup, 'keys'):
sub_dict = {}
for subgroup_key in _get_keys_from_group(subgroup):
v = json.loads(
subgroup[subgroup_key].value[0].decode('utf-8'))
sub_dict[subgroup_key] = v
else:
sub_dict = json.loads(subgroup.value[0].decode('utf-8'))
d[group_key] = sub_dict
try:
for group_key in _get_keys_from_group(group):
subgroup = group.get(group_key)
if hasattr(subgroup, 'keys'):
sub_dict = {}
for subgroup_key in _get_keys_from_group(subgroup):
v = json.loads(
subgroup[subgroup_key].value[0].decode('utf-8'))
sub_dict[subgroup_key] = v
else:
sub_dict = json.loads(subgroup.value[0].decode('utf-8'))
d[group_key] = sub_dict
except:
_logger.warning("Some metadata can't be read.")
self.original_metadata.update({group_name: d})

def _read_spectrum_stream(self):
Expand Down Expand Up @@ -742,7 +768,7 @@ def _read_spectrum_stream(self):
original_metadata.update(self.original_metadata)

pixel_size, offsets, original_units = streams.get_pixelsize_offset_unit()
dispersion, offset = self._get_dispersion_offset(original_metadata)
dispersion, offset, unit = self._get_dispersion_offset(original_metadata)

scale_x = self._convert_scale_units(
pixel_size['width'], original_units, spectrum_image_shape[1])
Expand All @@ -757,13 +783,14 @@ def _read_spectrum_stream(self):
axes = []
# add a supplementary axes when we import all frames individualy
if not self.sum_frames:
frame_time = float(original_metadata['Scan']['FrameTime'])
frame_time, time_unit = self._parse_frame_time(original_metadata,
spectrum_image_shape[i])
axes.append({'index_in_array': i,
'name': 'Time',
'offset': 0,
'scale': frame_time,
'size': spectrum_image_shape[i],
'units': 's',
'units': time_unit,
'navigate': True})
i = 1
axes.extend([{'index_in_array': i,
Expand All @@ -785,7 +812,7 @@ def _read_spectrum_stream(self):
'offset': offset,
'scale': dispersion,
'size': spectrum_image_shape[i + 2],
'units': 'keV',
'units': unit,
'navigate': False}])

md = self._get_metadata_dict(original_metadata)
Expand All @@ -808,15 +835,21 @@ def _read_spectrum_stream(self):
'mapping': self._get_mapping()})

def _get_dispersion_offset(self, original_metadata):
for detectorname, detector in original_metadata['Detectors'].items():
if original_metadata['BinaryResult']['Detector'] in detector['DetectorName']:
dispersion = float(
detector['Dispersion']) / 1000.0 * self.rebin_energy
offset = float(
detector['OffsetEnergy']) / 1000.0
return dispersion, offset
try:
for detectorname, detector in original_metadata['Detectors'].items():
if original_metadata['BinaryResult']['Detector'] in detector['DetectorName']:
dispersion = float(
detector['Dispersion']) / 1000.0 * self.rebin_energy
offset = float(
detector['OffsetEnergy']) / 1000.0
return dispersion, offset, 'keV'
except KeyError:
_logger.warning("The spectrum calibration can't be loaded.")
return 1, 0, t.Undefined

def _convert_scale_units(self, value, units, factor=1):
if units == t.Undefined:
return value, units
factor /= 2
v = np.float(value) * self.ureg(units)
converted_v = (factor * v).to_compact()
Expand All @@ -827,14 +860,25 @@ def _convert_scale_units(self, value, units, factor=1):
def _get_metadata_dict(self, om):
meta_gen = {}
meta_gen['original_filename'] = os.path.split(self.filename)[1]
meta_gen['title'] = self.detector_name
if self.detector_name is not None:
meta_gen['title'] = self.detector_name
# We have only one entry in the original_metadata, so we can't use the
# mapping of the original_metadata to set the date and time in the
# metadata: need to set manually here
unix_time = om['Acquisition']['AcquisitionStartDatetime']['DateTime']
date, time = self._convert_datetime(unix_time).split('T')
meta_gen['date'] = date
meta_gen['time'] = time
# metadata: need to set it manually here
try:
if 'AcquisitionStartDatetime' in om['Acquisition'].keys():
unix_time = om['Acquisition']['AcquisitionStartDatetime']['DateTime']
# Workaround when the 'AcquisitionStartDatetime' key is missing
# This timestamp corresponds to when the data is stored
elif 'Detectors[BM-Ceta].TimeStamp' in om['CustomProperties'].keys():
unix_time = float(
om['CustomProperties']['Detectors[BM-Ceta].TimeStamp']['value'])/1E6
date, time = self._convert_datetime(unix_time).split('T')
meta_gen['date'] = date
meta_gen['time'] = time
meta_gen['time_zone'] = self._get_local_time_zone()
except (KeyError, UnboundLocalError):
pass

meta_sig = {}
meta_sig['signal_type'] = ''
Expand Down Expand Up @@ -980,8 +1024,15 @@ def get_SI_shape(self):
return self.streams[0].spectrum_image.shape

def get_pixelsize_offset_unit(self, stream_index=0):
om_br = self.streams[stream_index].original_metadata['BinaryResult']
return om_br['PixelSize'], om_br['Offset'], om_br['PixelUnitX']
try:
om_br = self.streams[stream_index].original_metadata['BinaryResult']
return om_br['PixelSize'], om_br['Offset'], om_br['PixelUnitX']
except KeyError:
_logger.warning("The calibration can't be loaded.")
pix_scale = {'height': 1.0, 'width': 1.0}
offsets = {'x': 0.0, 'y': 0.0}
original_units = t.Undefined
return pix_scale, offsets, original_units


class FeiSpectrumStream(object):
Expand Down

0 comments on commit d0cd646

Please sign in to comment.