diff --git a/nibabel/__init__.py b/nibabel/__init__.py index cf4173fc27..8e5f14646d 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -51,6 +51,8 @@ from .nifti2 import Nifti2Header, Nifti2Image, Nifti2Pair from .minc1 import Minc1Image from .minc2 import Minc2Image +from .brainvoyager import (BvMskHeader, BvMskImage, BvVmpHeader, BvVmpImage, + BvVtcHeader, BvVtcImage, BvVmrHeader, BvVmrImage) # Deprecated backwards compatiblity for MINC1 from .deprecated import ModuleProxy as _ModuleProxy minc = _ModuleProxy('nibabel.minc') diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index aa0df4eebc..1ae244d5e7 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -164,3 +164,8 @@ def is_proxy(obj): return obj.is_proxy except AttributeError: return False + + +class CArrayProxy(ArrayProxy): + # Assume C array memory layout + order = 'C' diff --git a/nibabel/brainvoyager/__init__.py b/nibabel/brainvoyager/__init__.py new file mode 100644 index 0000000000..5507978c52 --- /dev/null +++ b/nibabel/brainvoyager/__init__.py @@ -0,0 +1,18 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +''' Support for BrainVoyager file formats ''' +from .bv_msk import BvMskHeader, BvMskImage +from .bv_vmp import BvVmpHeader, BvVmpImage +from .bv_vtc import BvVtcHeader, BvVtcImage +from .bv_vmr import BvVmrHeader, BvVmrImage + +__all__ = ('BvMskHeader', 'BvMskImage', + 'BvVmpHeader', 'BvVmpImage', + 'BvVtcHeader', 'BvVtcImage', + 'BvVmrHeader', 'BvVmrImage') diff --git a/nibabel/brainvoyager/bv.py b/nibabel/brainvoyager/bv.py new file mode 100644 index 0000000000..cd5879ef92 --- /dev/null +++ b/nibabel/brainvoyager/bv.py @@ -0,0 +1,979 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +"""Reading / writing functions for Brainvoyager (BV) file formats. + +please look at the support site of BrainInnovation for further informations +about the file formats: http://support.brainvoyager.com/ + +This file implements basic functionality for BV file formats. Look into bv_*.py +files for implementations of the different file formats. + +Author: Thomas Emmerling +""" +from __future__ import division +import numpy as np + +from ..volumeutils import array_to_file, array_from_file, make_dt_codes +from ..spatialimages import Header, HeaderDataError, SpatialImage +from ..fileholders import copy_file_map +from ..arrayproxy import CArrayProxy +from .. import imageglobals as imageglobals +from ..batteryrunners import BatteryRunner, Report +from struct import pack, unpack, calcsize +from ..externals import OrderedDict + +_dtdefs = ( # code, conversion function, equivalent dtype, aliases + (1, 'int16', np.uint16), + (2, 'float32', np.float32), + (3, 'uint8', np.uint8)) + +# Make full code alias bank, including dtype column +data_type_codes = make_dt_codes(_dtdefs) + +# Set example hdr_dict_proto for BV file formats +BV_HDR_DICT_PROTO = ( + ('resolution', 'h', 3), + ('x_start', 'h', 57), + ('x_end', 'h', 231), + ('y_start', 'h', 52), + ('y_end', 'h', 172), + ('z_start', 'h', 59), + ('z_end', 'h', 197), +) + + +def read_c_string(f, n_strings=1, bufsize=1000, start_pos=None, strip=True, + rewind=False): + """Read a zero-terminated string from a file object. + + Read and return a zero-terminated string from a file object. + + Parameters + ---------- + f : fileobj + File object to use. Object should implement tell, seek, and read. + n_strings: int, optional + Number of strings to search (and return). Default is 1. + bufsize: int, optional + Define the buffer size that should be searched for the string. + Default is 1000 bytes. + start_pos: int, optional + Define the start file position from which to search. If None then start + where the file object currently points to. Default is None. + strip : bool, optional + Whether to strip the trailing zero from the returned string. + Default is True. + rewind: bool, optional + Whether the fileobj f should be returned to the initial position after + reading. Default is False. + + Returns + ------- + str_list : generator of string(s) + """ + current_pos = f.tell() + suffix = b'' if strip else b'\x00' + if start_pos is not None: + f.seek(start_pos) + data = f.read(bufsize) + lines = data.split(b'\x00') + str_list = [] + if rewind: + f.seek(current_pos) + else: + offsets = [len(lines[s]) + 1 for s in range(n_strings)] + f.seek(current_pos + sum(offsets)) + for s in range(n_strings): + str_list.append(lines[s] + suffix) + return str_list + + +def parse_BV_header(hdr_dict_proto, fileobj, parent_hdr_dict=None): + """Parse the header of a BV file format. + + This function can be (and is) called recursively to iterate through nested + fields (e.g. the ``prts`` field of the VTC header). + + Parameters + ---------- + hdr_dict_proto: tuple + tuple of format described in Notes below. + fileobj : fileobj + File object to use. Make sure that the current position is at the + beginning of the header (e.g. at 0). Object should implement tell, + seek, and read. + parent_hdr_dict: None or OrderedDict, optional + Default is None. None results in empty `OrderedDict`. + When parse_BV_header() is called recursively the already filled + (parent) hdr_dict is passed to give access to n_fields_name fields + outside the current scope (see below). + + Returns + ------- + hdr_dict : OrderedDict + An OrderedDict containing all header fields parsed from the file. + + Notes + ----- + The description of `hdr_dict_proto` below is notated according to + https://docs.python.org/3/reference/introduction.html#notation + + hdr_dict_proto ::= ((element_proto))* + element_proto ::= '(' name ',' pack_format ',' default ')' | + '(' name ',' pack_format ',' '(' default ',' + c_fields_name ',' c_fields_value ')' ')' | + '(' name ',' hdr_dict_proto ',' n_fields_name ')' + pack_format ::= 'b' | 'h' | 'f' | 'z' + name ::= str + n_fields_name ::= str + c_fields_name ::= str + c_fields_value ::= int | float | bytes + default ::= int | float | bytes + + The pack_format codes have meaning:: + + b := signed char (1 byte) + B := unsigned char (1 byte) + h := signed short integer (2 bytes) + i := signed integer (4 bytes) + I := unsigned integer (4 bytes) + f := float (4 bytes) + z := zero-terminated string (variable bytes) + + The n_fields_name is used to indicate the name of a header field that + contains a number for nested header fields loops (e.g. 'NrOfSubMaps' in the + VMP file header). + + The c_fields_name and c_fields_value parameters are used for header fields + that are only written depending on the value of another header field (e.g. + 'NrOfLags' in the VMP file header). + """ + hdr_dict = OrderedDict() + for name, pack_format, def_or_name in hdr_dict_proto: + # handle zero-terminated strings + if pack_format == 'z': + value = read_c_string(fileobj)[0] + # handle array fields + elif isinstance(pack_format, tuple): + value = [] + # check the length of the array to expect + if def_or_name in hdr_dict: + n_values = hdr_dict[def_or_name] + else: + n_values = parent_hdr_dict[def_or_name] + for i in range(n_values): + value.append(parse_BV_header(pack_format, fileobj, hdr_dict)) + # handle conditional fields + elif isinstance(def_or_name, tuple): + if hdr_dict[def_or_name[1]] == def_or_name[2]: + raw_bytes = fileobj.read(calcsize(pack_format)) + value = unpack('<' + pack_format, raw_bytes)[0] + else: # assign the default value + value = def_or_name[0] + else: # unpack raw_bytes of type pack_format + raw_bytes = fileobj.read(calcsize(pack_format)) + value = unpack('<' + pack_format, raw_bytes)[0] + hdr_dict[name] = value + return hdr_dict + + +def pack_BV_header(hdr_dict_proto, hdr_dict, parent_hdr_dict=None): + """Pack the header of a BV file format into a byte string. + + This function can be (and is) called recursively to iterate through nested + fields (e.g. the ``prts`` field of the VTC header). + + Parameters + ---------- + hdr_dict_proto: tuple + tuple of format described in Notes of :func:`parse_BV_header` + hdr_dict: OrderedDict + hdr_dict that contains the fields and values to for the respective + BV file format. + parent_hdr_dict: None or OrderedDict, optional + Default is None. None results in empty `OrderedDict`. + When parse_BV_header() is called recursively the already filled + (parent) hdr_dict is passed to give access to n_fields_name fields + outside the current scope (see below). + + Returns + ------- + binaryblock : bytes + Binary representation of header ready for writing to file. + """ + binary_parts = [] + for name, pack_format, def_or_name in hdr_dict_proto: + value = hdr_dict[name] + # handle zero-terminated strings + if pack_format == 'z': + part = value + b'\x00' + # handle array fields + elif isinstance(pack_format, tuple): + # check the length of the array to expect + if def_or_name in hdr_dict: + n_values = hdr_dict[def_or_name] + else: + n_values = parent_hdr_dict[def_or_name] + sub_parts = [] + for i in range(n_values): + sub_parts.append(pack_BV_header(pack_format, value[i], + hdr_dict)) + part = b''.join(sub_parts) + # handle conditional fields + elif isinstance(def_or_name, tuple): + if hdr_dict[def_or_name[1]] == def_or_name[2]: + part = pack('<' + pack_format, value) + else: + # skip to next header field if condition is not met + continue + else: + part = pack('<' + pack_format, value) + binary_parts.append(part) + return b''.join(binary_parts) + + +def calc_BV_header_size(hdr_dict_proto, hdr_dict, parent_hdr_dict=None): + """Calculate the binary size of a hdr_dict for a BV file format header. + + This function can be (and is) called recursively to iterate through nested + fields (e.g. the prts field of the VTC header). + + Parameters + ---------- + hdr_dict_proto: tuple + tuple of format described in Notes of :func:`parse_BV_header` + hdr_dict: OrderedDict + hdr_dict that contains the fields and values to for the respective + BV file format. + parent_hdr_dict: None or OrderedDict, optional + Default is None. None results in empty `OrderedDict`. + When parse_BV_header() is called recursively the already filled + (parent) hdr_dict is passed to give access to n_fields_name fields + outside the current scope (see below). + + Returns + ------- + hdr_size : int + Size of header when packed into bytes ready for writing to file. + """ + hdr_size = 0 + for name, pack_format, def_or_name in hdr_dict_proto: + value = hdr_dict[name] + # handle zero-terminated strings + if pack_format == 'z': + hdr_size += len(value) + 1 + # handle array fields + elif isinstance(pack_format, tuple): + # check the length of the array to expect + if def_or_name in hdr_dict: + n_values = hdr_dict[def_or_name] + # handle cases when n_values is resides outside of the + # current scope (e.g. nr_of_timepoints in VMP_HDR_DICT_PROTO) + else: + n_values = parent_hdr_dict[def_or_name] + for i in range(n_values): + # recursively iterate through the fields of all items + # in the array + hdr_size += calc_BV_header_size(pack_format, value[i], + hdr_dict) + # handle conditional fields + elif isinstance(def_or_name, tuple): + if hdr_dict[def_or_name[1]] == def_or_name[2]: + hdr_size += calcsize(pack_format) + else: + continue + else: + hdr_size += calcsize(pack_format) + return hdr_size + + +def update_BV_header(hdr_dict_proto, hdr_dict_old, hdr_dict_new, + parent_old=None, parent_new=None): + """Update a hdr_dict after changed nested-loops-number or conditional fields. + + This function can be (and is) called recursively to iterate through nested + fields (e.g. the prts field of the VTC header). + + Parameters + ---------- + hdr_dict_proto: tuple + tuple of format described in Notes of :func:`parse_BV_header` + hdr_dict_old: OrderedDict + hdr_dict before any changes. + hdr_dict_new: OrderedDict + hdr_dict with changed fields in n_fields_name or c_fields_name fields. + parent_old: None or OrderedDict, optional + When update_BV_header() is called recursively the not yet updated + (parent) hdr_dict is passed to give access to n_fields_name fields + outside the current scope (see below). + parent_new: None or OrderedDict, optional + When update_BV_header() is called recursively the not yet updated + (parent) hdr_dict is passed to give access to n_fields_name fields + outside the current scope (see below). + + Returns + ------- + hdr_dict_new : OrderedDict + An updated version hdr_dict correcting effects of changed nested and + conditional fields. + """ + for name, pack_format, def_or_name in hdr_dict_proto: + # handle only nested loop fields + if not isinstance(pack_format, tuple): + continue + # calculate the change of array length and the new array length + if def_or_name in hdr_dict_old: + delta_values = (hdr_dict_new[def_or_name] - + hdr_dict_old[def_or_name]) + n_values = hdr_dict_new[def_or_name] + else: + delta_values = (parent_new[def_or_name] - + parent_old[def_or_name]) + n_values = parent_new[def_or_name] + if delta_values > 0: # add nested loops + for i in range(delta_values): + hdr_dict_new[name].append(_proto2default(pack_format, + hdr_dict_new)) + elif delta_values < 0: # remove nested loops + for i in range(abs(delta_values)): + hdr_dict_new[name].pop() + # loop over nested fields + for i in range(n_values): + update_BV_header(pack_format, hdr_dict_old[name][i], + hdr_dict_new[name][i], hdr_dict_old, + hdr_dict_new) + return hdr_dict_new + + +def _proto2default(proto, parent_default_hdr=None): + """Helper for creating a BV header OrderedDict with default parameters. + + Create an OrderedDict that contains keys with the header fields, and + default values. + + See :func:`parse_BV_header` for description of `proto` format. + """ + default_hdr = OrderedDict() + for name, pack_format, def_or_name in proto: + if isinstance(pack_format, tuple): + value = [] + # check the length of the array to expect + if def_or_name in default_hdr: + n_values = default_hdr[def_or_name] + else: + n_values = parent_default_hdr[def_or_name] + for i in range(n_values): + value.append(_proto2default(pack_format, default_hdr)) + default_hdr[name] = value + # handle conditional fields + elif isinstance(def_or_name, tuple): + default_hdr[name] = def_or_name[0] + else: + default_hdr[name] = def_or_name + return default_hdr + + +def combine_st(st_array, inv=False): + """Combine spatial transformation matrices. + + This recursive function returns the dot product of all spatial + transformation matrices given in st_array for applying them in one go. + The order of multiplication follow the order in the given array. + + Parameters + ---------- + st_array: array of shape (n, 4, 4) + array filled with n transformation matrices of shape (4, 4) + + inv: boolean + Set to true to invert the transformation matrices before + multiplication. + + Returns + ------- + combined_st : array of shape (4, 4) + """ + if len(st_array) == 1: + if inv: + return np.linalg.inv(st_array[0]) + else: + return st_array[0] + if inv: + return np.dot(np.linalg.inv(st_array[0, :, :]), + combine_st(st_array[1:, :, :], inv=inv)) + else: + return np.dot(st_array[0, :, :], + combine_st(st_array[1:, :, :], inv=inv)) + + +def parse_st(st_dict): + """Parse spatial transformation stored in a BV header OrderedDict. + + This function parses a given OrderedDict from a BV header field and returns + a spatial transformation matrix as a numpy array. + + Parameters + ---------- + st_dict: OrderedDict + OrderedDict filled with transformation matrices of shape (4, 4) + + Returns + ------- + st_array : array of shape (4, 4) + """ + if st_dict['nr_of_trans_val'] != 16: + raise BvError('spatial transformation has to be of shape (4, 4)') + st_array = [] + for v in range(st_dict['nr_of_trans_val']): + st_array.append(st_dict['trans_val'][v]['value']) + return np.array(st_array).reshape((4, 4)) + + +class BvError(Exception): + """Exception for BV format related problems. + + To be raised whenever there is a problem with a BV fileformat. + """ + + pass + + +class BvFileHeader(Header): + """Class to hold information from a BV file header.""" + + # Copies of module-level definitions + _data_type_codes = data_type_codes + _field_recoders = {'datatype': data_type_codes} + + # format defaults + # BV files are radiological (left-is-right) by default + # (VTC files have a flag for that, however) + default_xflip = True + default_endianness = '<' # BV files are always little-endian + allowed_dtypes = [1, 2, 3] + default_dtype = 2 + allowed_dimensions = [3] + data_layout = 'C' + hdr_dict_proto = BV_HDR_DICT_PROTO + + def __init__(self, + hdr_dict=None, + endianness=default_endianness, + check=True, + offset=None): + """Initialize header from binary data block. + + Parameters + ---------- + hdr_dict : None or OrderedDict, optional + An OrderedDict containing all header fields parsed from the file. + By default, None, in which case we create a default hdr_dict from + the corresponding _HDR_DICT_PROTO + endianness : {None, '<','>', other endian code} string, optional + endianness of the binaryblock. If None, guess endianness + from the data. + check : bool, optional + Whether to check content of header in initialization. + Default is True. + offset : int, optional + offset of the actual data into to binary file (in bytes) + """ + if endianness != self.default_endianness: + raise BvError('BV files are always little-endian') + self.endianness = self.default_endianness + if hdr_dict is None: + hdr_dict = _proto2default(self.hdr_dict_proto) + self._hdr_dict = hdr_dict + if offset is None: + self.set_data_offset(calc_BV_header_size( + self.hdr_dict_proto, self._hdr_dict)) + if 'framing_cube' in self._hdr_dict: + self._framing_cube = self._hdr_dict['framing_cube'] + else: + self._framing_cube = self._guess_framing_cube() + if check: + self.check_fix() + return + + @classmethod + def from_fileobj(klass, fileobj, endianness=default_endianness, + check=True): + """Return read structure with given or guessed endiancode. + + Parameters + ---------- + fileobj : file-like object + Needs to implement ``read`` method + endianness : None or endian code, optional + Code specifying endianness of read data + + Returns + ------- + header : BvFileHeader object + BvFileHeader object initialized from data in fileobj + """ + hdr_dict = parse_BV_header(klass.hdr_dict_proto, fileobj) + offset = fileobj.tell() + return klass(hdr_dict, endianness, check, offset) + + @classmethod + def from_header(klass, header=None, check=False): + """Class method to create header from another header. + + Parameters + ---------- + header : ``Header`` instance or mapping + a header of this class, or another class of header for + conversion to this type + check : {True, False} + whether to check header for integrity + + Returns + ------- + hdr : header instance + fresh header instance of our own class + """ + # own type, return copy + if type(header) == klass: + obj = header.copy() + if check: + obj.check_fix() + return obj + # not own type, make fresh header instance + obj = klass(check=check) + if header is None: + return obj + try: # check if there is a specific conversion routine + mapping = header.as_bv_map() + except AttributeError: + # most basic conversion + obj.set_data_dtype(header.get_data_dtype()) + obj.set_data_shape(header.get_data_shape()) + obj.set_zooms(header.get_zooms()) + return obj + # header is convertible from a field mapping + for key, value in mapping.items(): + try: + obj[key] = value + except (ValueError, KeyError): + # the presence of the mapping certifies the fields as + # being of the same meaning as for BV types + pass + # set any fields etc that are specific to this format (overriden by + # sub-classes) + obj._set_format_specifics() + # Check for unsupported datatypes + orig_code = header.get_data_dtype() + try: + obj.set_data_dtype(orig_code) + except HeaderDataError: + raise HeaderDataError('Input header %s has datatype %s but ' + 'output header %s does not support it' + % (header.__class__, + header.get_value_label('datatype'), + klass)) + if check: + obj.check_fix() + return obj + + def copy(self): + """Copy object to independent representation. + + The copy should not be affected by any changes to the original + object. + """ + return self.__class__(self._hdr_dict) + + def _set_format_specifics(self): + """Utility routine to set format specific header stuff.""" + pass + + def data_from_fileobj(self, fileobj): + """Read data array from `fileobj`. + + Parameters + ---------- + fileobj : file-like + Must be open, and implement ``read`` and ``seek`` methods + + Returns + ------- + arr : ndarray + data array + """ + dtype = self.get_data_dtype() + shape = self.get_data_shape() + offset = self.get_data_offset() + return array_from_file(shape, dtype, fileobj, offset, + order=self.data_layout) + + def get_data_dtype(self): + """Get numpy dtype for data. + + For examples see ``set_data_dtype`` + """ + if 'datatype' in self._hdr_dict: + code = self._hdr_dict['datatype'] + else: + code = self.default_dtype + dtype = self._data_type_codes.dtype[code] + return dtype.newbyteorder(self.endianness) + + def set_data_dtype(self, datatype): + """Set numpy dtype for data from code or dtype or type.""" + try: + code = self._data_type_codes[datatype] + except KeyError: + raise HeaderDataError( + 'data dtype "%s" not recognized' % datatype) + if code not in self.allowed_dtypes: + raise HeaderDataError( + 'data dtype "%s" not supported' % datatype) + dtype = self._data_type_codes.dtype[code] + if 'datatype' in self._hdr_dict.keys(): + self._hdr_dict['datatype'] = code + return + if dtype.newbyteorder(self.endianness) != self.get_data_dtype(): + raise HeaderDataError( + 'File format does not support setting of header!') + + @property + def xflip(self): + return self.default_xflip + + @xflip.setter + def xflip(self, xflip): + """Set xflip for data.""" + if xflip is True: + return + else: + raise BvError('cannot change Left-right convention!') + + def get_data_shape(self): + """Get shape of data.""" + raise NotImplementedError + + def set_data_shape(self, shape): + """Set shape of data.""" + raise NotImplementedError + + def get_base_affine(self): + """Get affine from basic (shared) header fields. + + Note that we get the translations from the center of the + (guessed) framing cube of the referenced VMR (anatomical) file. + + Internal storage of the image is ZYXT, where (in patient coordinates/ + real world orientations): + Z := axis increasing from right to left (R to L) + Y := axis increasing from superior to inferior (S to I) + X := axis increasing from anterior to posterior (A to P) + T := volumes (if present in file format) + """ + zooms = self.get_zooms() + if not self.xflip: + # make the BV internal Z axis neurological (left-is-left); + # not default in BV files! + zooms = (-zooms[0], zooms[1], zooms[2]) + + # compute the rotation + rot = np.zeros((3, 3)) + # make the flipped BV Z axis the new R axis + rot[:, 0] = [-zooms[0], 0, 0] + # make the flipped BV X axis the new A axis + rot[:, 1] = [0, 0, -zooms[2]] + # make the flipped BV Y axis the new S axis + rot[:, 2] = [0, -zooms[1], 0] + + # compute the translation + fcc = np.array(self.framing_cube) / 2 # center of framing cube + bbc = np.array(self.get_bbox_center()) # center of bounding box + tra = np.dot((bbc - fcc), rot) + + # assemble + M = np.eye(4, 4) + M[0:3, 0:3] = rot + M[0:3, 3] = tra.T + + return M + + def get_best_affine(self): + return self.get_base_affine() + + def get_default_affine(self): + return self.get_base_affine() + + def get_affine(self): + return self.get_base_affine() + + def _guess_framing_cube(self): + """Guess the dimensions of the framing cube. + + Guess the dimensions of the framing cube that constitutes the + coordinate system boundaries for the bounding box. + + For most BV file formats this need to be guessed from + x_end, y_end, and z_end in the header. + """ + # then start guessing... + hdr = self._hdr_dict + # get the ends of the bounding box (highest values in each dimension) + x = hdr['x_end'] + y = hdr['y_end'] + z = hdr['z_end'] + + # compare with possible framing cubes + for fc in [256, 384, 512, 768, 1024]: + if any([d > fc for d in (x, y, z)]): + continue + else: + return fc, fc, fc + + @property + def framing_cube(self): + """Get the dimensions of the framing cube. + + Get the dimensions of the framing cube that constitutes the + coordinate system boundaries for the bounding box. + For most BV file formats this need to be guessed from + x_end, y_end, and z_end in the header. + """ + return self._framing_cube + + @framing_cube.setter + def framing_cube(self, fc): + """Set the dimensions of the framing cube. + + Set the dimensions of the framing cube that constitutes the + coordinate system boundaries for the bounding box + For most BV file formats this need to be guessed from + x_end, y_end, and z_end in the header. + Use this if you know about the framing cube for the BV file. + """ + self._framing_cube = fc + + def get_bbox_center(self): + """Get the center coordinate of the bounding box. + + Get the center coordinate of the bounding box with respect to the + framing cube. + """ + hdr = self._hdr_dict + x = (hdr['x_start'] + + ((hdr['x_end'] - hdr['x_start']) / 2)) + y = (hdr['y_start'] + + ((hdr['y_end'] - hdr['y_start']) / 2)) + z = (hdr['z_start'] + + ((hdr['z_end'] - hdr['z_start']) / 2)) + return z, y, x + + def get_zooms(self): + shape = self.get_data_shape() + return tuple(float(self._hdr_dict['resolution']) + for d in shape[0:3]) + + def set_zooms(self, zooms): + """Set the zooms for the image. + + Voxel dimensions of functional data in BV file formats are + always in relationship to the voxel dimensions in a VMR file and + therefore need to be equal for all three spatial dimensions. + + Parameters + ---------- + zooms : int or sequence + An integer or a sequence of integers specifying the relationship + between voxel dimensions and real-world dimensions. If a single + integer is used it is applied to all spatial dimensions. If a + sequence of integers is used all dimensions have to be equal. + """ + if type(zooms) == int: + self._hdr_dict['resolution'] = zooms + else: + if np.any(np.diff(zooms)): + raise BvError('Zooms for all dimensions must be equal!') + else: + self._hdr_dict['resolution'] = int(zooms[0]) + + def as_analyze_map(self): + raise NotImplementedError + + def set_data_offset(self, offset): + """Set offset into data file to read data.""" + self._data_offset = offset + + def get_data_offset(self): + """Return offset into data file to read data.""" + self.set_data_offset(calc_BV_header_size( + self.hdr_dict_proto, self._hdr_dict)) + return self._data_offset + + def get_slope_inter(self): + """BV formats do not do scaling.""" + return None, None + + def write_to(self, fileobj): + """Write header to fileobj. + + Write starts at fileobj current file position. + + Parameters + ---------- + fileobj : file-like object + Should implement ``write`` method + + Returns + ------- + None + """ + binaryblock = pack_BV_header(self.hdr_dict_proto, self._hdr_dict) + fileobj.write(binaryblock) + + def check_fix(self, logger=None, error_level=None): + """Check BV header with checks.""" + if logger is None: + logger = imageglobals.logger + if error_level is None: + error_level = imageglobals.error_level + battrun = BatteryRunner(self.__class__._get_checks()) + self, reports = battrun.check_fix(self) + for report in reports: + report.log_raise(logger, error_level) + + @classmethod + def _get_checks(klass): + """Return sequence of check functions for this class""" + return (klass._chk_fileversion,) + + ''' Check functions in format expected by BatteryRunner class ''' + + @classmethod + def _chk_fileversion(klass, hdr, fix=False): + rep = Report(HeaderDataError) + if 'version' in hdr._hdr_dict: + version = hdr._hdr_dict['version'] + if version in klass.supported_fileversions: + return hdr, rep + else: + rep.problem_level = 40 + rep.problem_msg = 'fileversion %d is not supported' % version + if fix: + rep.fix_msg = 'not attempting fix' + return hdr, rep + return hdr, rep + + +class BvFileImage(SpatialImage): + """Class to hold information from a BV image file.""" + + # Set the class of the corresponding header + header_class = BvFileHeader + + # Set the label ('image') and the extension ('.bv') for a (dummy) BV file + files_types = (('image', '.bv'),) + + # BV files are not compressed... + _compressed_exts = () + + # use the row-major CArrayProxy + ImageArrayProxy = CArrayProxy + + def update_header(self): + """Harmonize header with image data and affine. + + >>> data = np.zeros((2,3,4)) + >>> affine = np.diag([1.0,2.0,3.0,1.0]) + >>> img = SpatialImage(data, affine) + >>> hdr = img.get_header() + >>> img.shape == (2, 3, 4) + True + >>> img.update_header() + >>> hdr.get_data_shape() == (2, 3, 4) + True + >>> hdr.get_zooms() + (1.0, 2.0, 3.0) + """ + hdr = self._header + shape = self._dataobj.shape + # We need to update the header if the data shape has changed. It's a + # bit difficult to change the data shape using the standard API, but + # maybe it happened + if hdr.get_data_shape() != shape: + hdr.set_data_shape(shape) + + @classmethod + def from_file_map(klass, file_map): + """Load image from `file_map`. + + Parameters + ---------- + file_map : None or mapping, optional + files mapping. If None (default) use object's ``file_map`` + attribute instead + """ + bvf = file_map['image'].get_prepare_fileobj('rb') + header = klass.header_class.from_fileobj(bvf) + affine = header.get_affine() + hdr_copy = header.copy() + # use row-major memory presentation! + data = klass.ImageArrayProxy(bvf, hdr_copy) + img = klass(data, affine, header, file_map=file_map) + img._load_cache = {'header': hdr_copy, + 'affine': None, + 'file_map': copy_file_map(file_map)} + return img + + def _write_header(self, header_file, header): + """Utility routine to write BV header. + + Parameters + ---------- + header_file : file-like + file-like object implementing ``write``, open for writing + header : header object + """ + header.write_to(header_file) + + def _write_data(self, bvfile, data, header): + """Utility routine to write BV image. + + Parameters + ---------- + bvfile : file-like + file-like object implementing ``seek`` or ``tell``, and + ``write`` + data : array-like + array to write + header : analyze-type header object + header + """ + shape = header.get_data_shape() + if data.shape != shape: + raise HeaderDataError('Data should be shape (%s)' % + ', '.join(str(s) for s in shape)) + offset = header.get_data_offset() + out_dtype = header.get_data_dtype() + array_to_file(data, bvfile, out_dtype, offset, order='C') + + def to_file_map(self, file_map=None): + """Write image to `file_map` or contained ``self.file_map``. + + Parameters + ---------- + file_map : None or mapping, optional + files mapping. If None (default) use object's ``file_map`` + attribute instead + """ + if file_map is None: + file_map = self.file_map + data = self.get_data() + with file_map['image'].get_prepare_fileobj('wb') as bvf: + self._write_header(bvf, self.header) + self._write_data(bvf, data, self.header) + self.file_map = file_map diff --git a/nibabel/brainvoyager/bv_msk.py b/nibabel/brainvoyager/bv_msk.py new file mode 100644 index 0000000000..c623c15fbe --- /dev/null +++ b/nibabel/brainvoyager/bv_msk.py @@ -0,0 +1,105 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +"""Reading / writing functions for Brainvoyager (BV) MSK files. + +for documentation on the file format see: +http://www.brainvoyager.com/ubb/Forum8/HTML/000087.html + +Author: Thomas Emmerling +""" +from __future__ import division +from .bv import BvFileHeader, BvFileImage +from ..spatialimages import HeaderDataError + +MSK_HDR_DICT_PROTO = ( + ('resolution', 'h', 3), + ('x_start', 'h', 57), + ('x_end', 'h', 231), + ('y_start', 'h', 52), + ('y_end', 'h', 172), + ('z_start', 'h', 59), + ('z_end', 'h', 197), +) + + +class BvMskHeader(BvFileHeader): + """Class for BrainVoyager MSK header.""" + + # format defaults + allowed_dtypes = [3] + default_dtype = 3 + hdr_dict_proto = MSK_HDR_DICT_PROTO + + def get_data_shape(self): + """Get shape of data.""" + hdr = self._hdr_dict + # calculate dimensions + z = (hdr['z_end'] - + hdr['z_start']) / hdr['resolution'] + y = (hdr['y_end'] - + hdr['y_start']) / hdr['resolution'] + x = (hdr['x_end'] - + hdr['x_start']) / hdr['resolution'] + + return tuple(int(d) for d in [z, y, x]) + + def set_data_shape(self, shape=None, zyx=None): + """Set shape of data. + + To conform with nibabel standards this implements shape. + However, to fill the VtcHeader with sensible information use + the zyxt parameter instead. + + Parameters + ---------- + shape : sequence + sequence of integers specifying data array shape + zyx: 3x2 nested list of integers, optional + [[z_start,z_end],[y_start,y_end],[x_start,x_end]] + array storing borders of data + """ + if (shape is None) and (zyx is None): + raise HeaderDataError('Shape or zyx needs to be specified!') + if shape is not None: + # Use zyx and t parameters instead of shape. + # Dimensions will start from standard coordinates. + if len(shape) != 3: + raise HeaderDataError('Shape for MSK files must be\ + 3 dimensional (ZYX)!') + self._hdr_dict['x_end'] = self._hdr_dict['x_start'] + \ + (shape[2] * self._hdr_dict['resolution']) + self._hdr_dict['y_end'] = self._hdr_dict['y_start'] + \ + (shape[1] * self._hdr_dict['resolution']) + self._hdr_dict['z_end'] = self._hdr_dict['z_start'] + \ + (shape[0] * self._hdr_dict['resolution']) + return + self._hdr_dict['z_start'] = zyx[0][0] + self._hdr_dict['z_end'] = zyx[0][1] + self._hdr_dict['y_start'] = zyx[1][0] + self._hdr_dict['y_end'] = zyx[1][1] + self._hdr_dict['x_start'] = zyx[2][0] + self._hdr_dict['x_end'] = zyx[2][1] + + +class BvMskImage(BvFileImage): + """Class for BrainVoyager MSK masks. + + MSK files are technically binary images + """ + + # Set the class of the corresponding header + header_class = BvMskHeader + + # Set the label ('image') and the extension ('.msk') for a MSK file + files_types = (('image', '.msk'),) + valid_exts = ('.msk',) + _compressed_suffixes = () + +load = BvMskImage.load +save = BvMskImage.instance_to_filename diff --git a/nibabel/brainvoyager/bv_vmp.py b/nibabel/brainvoyager/bv_vmp.py new file mode 100644 index 0000000000..ba5b4a9ed9 --- /dev/null +++ b/nibabel/brainvoyager/bv_vmp.py @@ -0,0 +1,199 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +"""Reading / writing functions for Brainvoyager (BV) VMP files. + +for documentation on the file format see: +http://support.brainvoyager.com/installation-introduction/23-file-formats/377-users-guide-23-the-format-of-nr-vmp-files.html + +Author: Thomas Emmerling +""" +from __future__ import division +from .bv import (BvFileHeader, BvFileImage, update_BV_header) +from ..spatialimages import HeaderDataError + +VMP_HDR_DICT_PROTO = ( + ('magic_number', 'I', 2712847316), + ('version', 'h', 6), + ('document_type', 'h', 1), + ('nr_of_submaps', 'i', 1), + ('nr_of_timepoints', 'i', 0), + ('nr_of_component_params', 'i', 0), + ('show_params_range_from', 'i', 0), + ('show_params_range_to', 'i', 0), + ('use_for_fingerprint_params_range_from', 'i', 0), + ('use_for_fingerprint_params_range_to', 'i', 0), + ('x_start', 'i', 57), + ('x_end', 'i', 231), + ('y_start', 'i', 52), + ('y_end', 'i', 172), + ('z_start', 'i', 59), + ('z_end', 'i', 197), + ('resolution', 'i', 3), + ('dim_x', 'i', 256), + ('dim_y', 'i', 256), + ('dim_z', 'i', 256), + ('vtc_filename', 'z', b''), + ('prt_filename', 'z', b''), + ('voi_filename', 'z', b''), + ('maps', ( + ('type_of_map', 'i', 1), + ('map_threshold', 'f', 1.6500), + ('upper_threshold', 'f', 8.0), + ('map_name', 'z', b'New Map'), + ('pos_min_r', 'B', 255), + ('pos_min_g', 'B', 0), + ('pos_min_b', 'B', 0), + ('pos_max_r', 'B', 255), + ('pos_max_g', 'B', 255), + ('pos_max_b', 'B', 0), + ('neg_min_r', 'B', 255), + ('neg_min_g', 'B', 0), + ('neg_min_b', 'B', 255), + ('neg_max_r', 'B', 0), + ('neg_max_g', 'B', 0), + ('neg_max_b', 'B', 255), + ('use_vmp_color', 'B', 0), + ('lut_filename', 'z', b''), + ('transparent_color_factor', 'f', 1.0), + ('nr_of_lags', 'i', (0, 'type_of_map', 3)), + ('display_min_lag', 'i', (0, 'type_of_map', 3)), + ('display_max_lag', 'i', (0, 'type_of_map', 3)), + ('show_correlation_or_lag', 'i', (0, 'type_of_map', 3)), + ('cluster_size_threshold', 'i', 50), + ('enable_cluster_size_threshold', 'B', 0), + ('show_values_above_upper_threshold', 'i', 1), + ('df1', 'i', 249), + ('df2', 'i', 0), + ('show_pos_neg_values', 'B', 3), + ('nr_of_used_voxels', 'i', 45555), + ('size_of_fdr_table', 'i', 0), + ('fdr_table_info', ( + ('q', 'f', 0.0), + ('crit_standard', 'f', 0.0), + ('crit_conservative', 'f', 0.0), + ), 'size_of_fdr_table'), + ('use_fdr_table_index', 'i', 0), + ), 'nr_of_submaps'), + ('component_time_points', ( + ('timepoints', (('timepoint', 'f', 0.0),), 'nr_of_timepoints'), + ), 'nr_of_submaps'), + ('component_params', ( + ('param_name', 'z', b''), + ('param_values', (('value', 'f', 0.0),), 'nr_of_submaps') + ), 'nr_of_component_params') +) + + +class BvVmpHeader(BvFileHeader): + ''' Class for BrainVoyager NR-VMP header + ''' + + # format defaults + allowed_dtypes = [2] + default_dtype = 2 + hdr_dict_proto = VMP_HDR_DICT_PROTO + supported_fileversions = [6] + + def get_data_shape(self): + ''' Get shape of data + ''' + hdr = self._hdr_dict + # calculate dimensions + z = (hdr['z_end'] - hdr['z_start']) / hdr['resolution'] + y = (hdr['y_end'] - hdr['y_start']) / hdr['resolution'] + x = (hdr['x_end'] - hdr['x_start']) / hdr['resolution'] + n = hdr['nr_of_submaps'] + return tuple(int(d) for d in [n, z, y, x]) + + def set_data_shape(self, shape=None, zyx=None, n=None): + ''' Set shape of data + + To conform with nibabel standards this implements shape. + However, to fill the BvVmpHeader with sensible information use the + zyx and the n parameter instead. + + Parameters + ---------- + shape: sequence + sequence of integers specifying data array shape + zyx: 3x2 nested list of integers, optional + [[z_start,z_end],[y_start,y_end],[x_start,x_end]] + array storing borders of data + n: int, optional + number of submaps + + ''' + hdr_dict_old = self._hdr_dict.copy() + if (shape is None) and (zyx is None) and (n is None): + raise HeaderDataError('Shape, zyx, or n needs to be specified!') + + if ((n is not None) and (n < 1)) or \ + ((shape is not None) and (shape[0] < 1)): + raise HeaderDataError('NR-VMP files need at least one sub-map!') + + if shape is not None: + # Use zyx and t parameters instead of shape. + # Dimensions will start from default coordinates. + if len(shape) != 4: + raise HeaderDataError( + 'Shape for VMP files must be 4 dimensional (NZYX)!') + self._hdr_dict['x_end'] = self._hdr_dict['x_start'] + \ + (shape[3] * self._hdr_dict['resolution']) + self._hdr_dict['y_end'] = self._hdr_dict['y_start'] + \ + (shape[2] * self._hdr_dict['resolution']) + self._hdr_dict['z_end'] = self._hdr_dict['z_start'] + \ + (shape[1] * self._hdr_dict['resolution']) + self._hdr_dict['nr_of_submaps'] = int(shape[0]) + self._hdr_dict = update_BV_header(self.hdr_dict_proto, + hdr_dict_old, self._hdr_dict) + return + if zyx is not None: + self._hdr_dict['z_start'] = zyx[0][0] + self._hdr_dict['z_end'] = zyx[0][1] + self._hdr_dict['y_start'] = zyx[1][0] + self._hdr_dict['y_end'] = zyx[1][1] + self._hdr_dict['x_start'] = zyx[2][0] + self._hdr_dict['x_end'] = zyx[2][1] + if n is not None: + self._hdr_dict['nr_of_submaps'] = int(n) + self._hdr_dict = update_BV_header(self.hdr_dict_proto, + hdr_dict_old, self._hdr_dict) + + @property + def framing_cube(self): + ''' Get the dimensions of the framing cube that constitutes + the coordinate system boundaries for the bounding box. + ''' + hdr = self._hdr_dict + return hdr['dim_z'], hdr['dim_y'], hdr['dim_x'] + + @framing_cube.setter + def framing_cube(self, fc): + ''' Set the dimensions of the framing cube that constitutes + the coordinate system boundaries for the bounding box. + + For VMP files this puts the values also into the header. + ''' + self._hdr_dict['dim_z'] = fc[0] + self._hdr_dict['dim_y'] = fc[1] + self._hdr_dict['dim_x'] = fc[2] + self._framing_cube = fc + + +class BvVmpImage(BvFileImage): + # Set the class of the corresponding header + header_class = BvVmpHeader + + # Set the label ('image') and the extension ('.vmp') for a VMP file + files_types = (('image', '.vmp'),) + valid_exts = ('.vmp',) + + +load = BvVmpImage.load +save = BvVmpImage.instance_to_filename diff --git a/nibabel/brainvoyager/bv_vmr.py b/nibabel/brainvoyager/bv_vmr.py new file mode 100644 index 0000000000..23dba82349 --- /dev/null +++ b/nibabel/brainvoyager/bv_vmr.py @@ -0,0 +1,249 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +"""Reading / writing functions for Brainvoyager (BV) VMR files. + +for documentation on the file format see: +http://support.brainvoyager.com/automation-aamp-development/23-file-formats/385-developer-guide-26-the-format-of-vmr-files.html + +Author: Sabrina Fontanella and Thomas Emmerling +""" +from __future__ import division +from .bv import (BvError, BvFileHeader, BvFileImage, parse_BV_header, + pack_BV_header, calc_BV_header_size, combine_st, parse_st) +from ..spatialimages import HeaderDataError +import numpy as np + + +VMR_PRHDR_DICT_PROTO = ( + ('version', 'h', 4), + ('dim_x', 'h', 256), + ('dim_y', 'h', 256), + ('dim_z', 'h', 256), +) + +VMR_PSHDR_DICT_PROTO = ( + ('offset_x', 'h', 0), + ('offset_y', 'h', 0), + ('offset_z', 'h', 0), + ('framing_cube', 'h', 256), + ('pos_infos_verified', 'i', 0), + ('coordinate_system_entry', 'i', 1), + ('slice_first_center_x', 'f', 127.5), + ('slice_first_center_y', 'f', 0.0), + ('slice_first_center_z', 'f', 0.0), + ('slice_last_center_x', 'f', -127.5), + ('slice_last_center_y', 'f', 0.0), + ('slice_last_center_z', 'f', 0.0), + ('row_dir_x', 'f', 0.0), + ('row_dir_y', 'f', 1.0), + ('row_dir_z', 'f', 0.0), + ('col_dir_x', 'f', 0.0), + ('col_dir_y', 'f', 0.0), + ('col_dir_z', 'f', -1.0), + ('nr_rows', 'i', 256), + ('nr_cols', 'i', 256), + ('fov_row_dir', 'f', 256.0), + ('fov_col_dir', 'f', 256.0), + ('slice_thickness', 'f', 1.0), + ('gap_thickness', 'f', 0.0), + ('nr_of_past_spatial_trans', 'i', 0), + ('past_spatial_trans', ( + ('name', 'z', b''), + ('type', 'i', b''), + ('source_file', 'z', b''), + ('nr_of_trans_val', 'i', b''), + ('trans_val', (('value', 'f', 0.0),), 'nr_of_trans_val') + ), 'nr_of_past_spatial_trans'), + ('lr_convention', 'B', 1), + ('reference_space', 'B', 0), + ('vox_res_x', 'f', 1.0), + ('vox_res_y', 'f', 1.0), + ('vox_res_z', 'f', 1.0), + ('flag_vox_resolution', 'B', 0), + ('flag_tal_space', 'B', 0), + ('min_intensity', 'i', 0), + ('mean_intensity', 'i', 127), + ('max_intensity', 'i', 255), +) + + +def compute_offset_post_hdr(hdr_dict, fileobj): + current_seek = fileobj.tell() + return current_seek + (hdr_dict['dim_x'] * hdr_dict['dim_y'] * + hdr_dict['dim_z']) + + +def merge_pre_pos(preDict, posDict): + temp = preDict.copy() + temp.update(posDict) + return temp + + +class BvVmrHeader(BvFileHeader): + """Class for BrainVoyager VMR header.""" + + # format defaults + default_endianness = '<' + allowed_dtypes = [3] + default_dtype = 3 + hdr_dict_proto = VMR_PRHDR_DICT_PROTO + VMR_PSHDR_DICT_PROTO + supported_fileversions = [4] + + def get_data_shape(self): + hdr = self._hdr_dict + # calculate dimensions + z = hdr['dim_z'] + y = hdr['dim_y'] + x = hdr['dim_x'] + return tuple(int(d) for d in [z, y, x]) + + def set_data_shape(self, shape): + """Set shape of data. + + Parameters + ---------- + shape : sequence + sequence of integers specifying data array shape + """ + if len(shape) != 3: + raise HeaderDataError( + 'Shape for VMR files must be 3 dimensional (ZYX)!') + self._hdr_dict['dim_x'] = shape[2] + self._hdr_dict['dim_y'] = shape[1] + self._hdr_dict['dim_z'] = shape[0] + + def set_data_offset(self, offset): + """Set offset into data file to read data. + + The offset is always 8 for VMR files. + """ + self._data_offset = 8 + + def get_data_offset(self): + """Return offset into data file to read data. + + The offset is always 8 for VMR files. + """ + return 8 + + def set_xflip(self, xflip): + if xflip is True: + self._hdr_dict['lr_convention'] = 1 + elif xflip is False: + self._hdr_dict['lr_convention'] = 2 + else: + self._hdr_dict['lr_convention'] = 0 + + def get_xflip(self): + xflip = int(self._hdr_dict['lr_convention']) + if xflip == 1: + return True + elif xflip == 2: + return False + else: + raise BvError('Left-right convention is unknown!') + + def get_base_affine(self): + """Get affine from VMR header fields. + + Internal storage of the image is ZYXT, where (in patient coordiante/ + real world orientations): + Z := axis increasing from right to left (R to L) + Y := axis increasing from superior to inferior (S to I) + X := axis increasing from anterior to posterior (A to P) + T := volumes (if present in file format) + """ + zooms = self.get_zooms() + if not self.get_xflip(): + # make the BV internal Z axis neurological (left-is-left); + # not default in BV files! + zooms = (-zooms[0], zooms[1], zooms[2]) + + # compute the rotation + rot = np.zeros((3, 3)) + # make the flipped BV Z axis the new R axis + rot[:, 0] = [-zooms[0], 0, 0] + # make the flipped BV X axis the new A axis + rot[:, 1] = [0, 0, -zooms[2]] + # make the flipped BV Y axis the new S axis + rot[:, 2] = [0, -zooms[1], 0] + + # compute the translation + fcc = np.array(self.framing_cube) / 2 # center of framing cube + bbc = np.array(self.get_bbox_center()) # center of bounding box + tra = np.dot((bbc - fcc), rot) + + # assemble + M = np.eye(4, 4) + M[0:3, 0:3] = rot + M[0:3, 3] = tra.T + + # look for additional transformations in past_spatial_trans and combine + # with M + if self._hdr_dict['past_spatial_trans']: + st_array = np.zeros((len(self._hdr_dict['past_spatial_trans']), + 4, 4)) + for st in range(len(self._hdr_dict['past_spatial_trans'])): + st_array[st, :, :] = \ + parse_st(self._hdr_dict['past_spatial_trans'][st]) + combined_st = combine_st(st_array, inv=True) + M = np.dot(M, combined_st) + return M + + @classmethod + def from_fileobj(klass, fileobj, endianness=default_endianness, + check=True): + hdr_dict_pre = parse_BV_header(VMR_PRHDR_DICT_PROTO, fileobj) + # calculate new seek for the post data header + new_seek = compute_offset_post_hdr(hdr_dict_pre, fileobj) + fileobj.seek(new_seek) + hdr_dict_pos = parse_BV_header(VMR_PSHDR_DICT_PROTO, fileobj) + hdr_dict = merge_pre_pos(hdr_dict_pre, hdr_dict_pos) + # The offset is always 8 for VMR files. + offset = 8 + return klass(hdr_dict, endianness, check, offset) + + def get_bbox_center(self): + """Get the center coordinate of the bounding box. + Not required for VMR files + """ + return np.array([self.framing_cube / 2 for d in range(3)]) + + def get_zooms(self): + return (self._hdr_dict['vox_res_z'], self._hdr_dict['vox_res_y'], + self._hdr_dict['vox_res_x']) + + def set_zooms(self, zooms): + self._hdr_dict['vox_res_z'] = float(zooms[0]) + self._hdr_dict['vox_res_y'] = float(zooms[1]) + self._hdr_dict['vox_res_x'] = float(zooms[2]) + + def write_to(self, fileobj): + binaryblock = pack_BV_header(self.hdr_dict_proto, self._hdr_dict) + # calculate size of preDataHeader + sizePrH = calc_BV_header_size(VMR_PRHDR_DICT_PROTO, self._hdr_dict) + # write the preHeader + fileobj.write(binaryblock[0:sizePrH]) + fileobj.seek(compute_offset_post_hdr(self._hdr_dict, fileobj)) + fileobj.write(binaryblock[sizePrH:]) + + +class BvVmrImage(BvFileImage): + """Class for BrainVoyager VMR images.""" + + # Set the class of the corresponding header + header_class = BvVmrHeader + + # Set the label ('image') and the extension ('.vtc') for a VMR file + files_types = (('image', '.vmr'),) + valid_exts = ('.vmr',) + + +load = BvVmrImage.load +save = BvVmrImage.instance_to_filename diff --git a/nibabel/brainvoyager/bv_vtc.py b/nibabel/brainvoyager/bv_vtc.py new file mode 100644 index 0000000000..d6d0e006d7 --- /dev/null +++ b/nibabel/brainvoyager/bv_vtc.py @@ -0,0 +1,154 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +"""Reading / writing functions for Brainvoyager (BV) VTC files. + +for documentation on the file format see: +http://support.brainvoyager.com/installation-introduction/23-file-formats/379-users-guide-23-the-format-of-vtc-files.html + +Author: Thomas Emmerling +""" +from __future__ import division +from .bv import BvError, BvFileHeader, BvFileImage +from ..spatialimages import HeaderDataError + +VTC_HDR_DICT_PROTO = ( + ('version', 'h', 3), + ('fmr', 'z', b''), + ('nr_prts', 'h', 0), + ('prts', (('filename', 'z', b''),), 'nr_prts'), + ('current_prt', 'h', 0), + ('datatype', 'h', 2), + ('volumes', 'h', 0), + ('resolution', 'h', 3), + ('x_start', 'h', 57), + ('x_end', 'h', 231), + ('y_start', 'h', 52), + ('y_end', 'h', 172), + ('z_start', 'h', 59), + ('z_end', 'h', 197), + ('lr_convention', 'b', 1), + ('ref_space', 'b', 3), + ('tr', 'f', 2000.0), +) + + +class BvVtcHeader(BvFileHeader): + """Header for Brainvoyager (BV) VTC files. + + For documentation on the file format see: + http://support.brainvoyager.com/installation-introduction/23-file-formats/379-users-guide-23-the-format-of-vtc-files.html + """ + + """ + Header for Brainvoyager (BV) VTC files. + + For documentation on the file format see: + http://support.brainvoyager.com/installation-introduction/23-file-formats/379-users-guide-23-the-format-of-vtc-files.html + """ + + # format defaults + allowed_dtypes = [1, 2] + default_dtype = 2 + hdr_dict_proto = VTC_HDR_DICT_PROTO + supported_fileversions = [3] + + def get_data_shape(self): + """Get shape of data.""" + hdr = self._hdr_dict + # calculate dimensions + z = (hdr['z_end'] - + hdr['z_start']) / hdr['resolution'] + y = (hdr['y_end'] - + hdr['y_start']) / hdr['resolution'] + x = (hdr['x_end'] - + hdr['x_start']) / hdr['resolution'] + t = hdr['volumes'] + return tuple(int(d) for d in [z, y, x, t]) + + def set_data_shape(self, shape=None, zyx=None, t=None): + """Set shape of data. + + To conform with nibabel standards this implements shape. + However, to fill the BvVtcHeader with sensible information + use the zyx and the t parameter instead. + + Parameters + ---------- + shape : sequence + sequence of integers specifying data array shape + zyx: 3x2 nested list of integers, optional + [[z_start,z_end],[y_start,y_end],[x_start,x_end]] + array storing borders of data + t: int + number of volumes + """ + if (shape is None) and (zyx is None) and (t is None): + raise HeaderDataError('Shape, zyx, or t needs to be specified!') + if ((t is not None) and (t < 0)) or \ + ((shape is not None) and (len(shape) == 4) and (shape[3] < 0)): + raise HeaderDataError('VTC files need at least one volume!') + if shape is not None: + # Use zyx and t parameters instead of shape. + # Dimensions will start from standard coordinates. + if len(shape) != 4: + raise HeaderDataError( + 'Shape for VTC files must be 4 dimensional (ZYXT)!') + self._hdr_dict['x_end'] = \ + self._hdr_dict['x_start'] + \ + (shape[2] * self._hdr_dict['resolution']) + self._hdr_dict['y_end'] = \ + self._hdr_dict['y_start'] + \ + (shape[1] * self._hdr_dict['resolution']) + self._hdr_dict['z_end'] = \ + self._hdr_dict['z_start'] + \ + (shape[0] * self._hdr_dict['resolution']) + self._hdr_dict['volumes'] = shape[3] + return + if zyx is not None: + self._hdr_dict['z_start'] = zyx[0][0] + self._hdr_dict['z_end'] = zyx[0][1] + self._hdr_dict['y_start'] = zyx[1][0] + self._hdr_dict['y_end'] = zyx[1][1] + self._hdr_dict['x_start'] = zyx[2][0] + self._hdr_dict['x_end'] = zyx[2][1] + if t is not None: + self._hdr_dict['volumes'] = t + + def get_xflip(self): + """Get xflip for data.""" + xflip = int(self._hdr_dict['lr_convention']) + if xflip == 1: + return True + elif xflip == 2: + return False + else: + raise BvError('Left-right convention is unknown!') + + def set_xflip(self, xflip): + """Set xflip for data.""" + if xflip is True: + self._hdr_dict['lr_convention'] = 1 + elif xflip is False: + self._hdr_dict['lr_convention'] = 2 + else: + self._hdr_dict['lr_convention'] = 0 + + +class BvVtcImage(BvFileImage): + """Class for BrainVoyager VTC images.""" + + # Set the class of the corresponding header + header_class = BvVtcHeader + + # Set the label ('image') and the extension ('.vtc') for a VTC file + files_types = (('image', '.vtc'),) + valid_exts = ('.vtc',) + +load = BvVtcImage.load +save = BvVtcImage.instance_to_filename diff --git a/nibabel/brainvoyager/tests/__init__.py b/nibabel/brainvoyager/tests/__init__.py new file mode 100644 index 0000000000..e4659cfab1 --- /dev/null +++ b/nibabel/brainvoyager/tests/__init__.py @@ -0,0 +1,23 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +''' Tests for BrainVoyager file formats ''' +from .test_bv_vtc import BVVTC_EXAMPLE_IMAGES, BVVTC_EXAMPLE_HDRS +from .test_bv_msk import BVMSK_EXAMPLE_IMAGES, BVMSK_EXAMPLE_HDRS +from .test_bv_vmp import BVVMP_EXAMPLE_IMAGES, BVVMP_EXAMPLE_HDRS +from .test_bv_vmr import BVVMR_EXAMPLE_IMAGES, BVVMR_EXAMPLE_HDRS + +__all__ = ('BVVTC_EXAMPLE_IMAGES', 'BVMSK_EXAMPLE_IMAGES', + 'BVVMP_EXAMPLE_IMAGES', 'BVVMR_EXAMPLE_IMAGES') + +# assemble example images and corresponding example headers for testing +BV_EXAMPLE_IMAGES = (BVVTC_EXAMPLE_IMAGES, BVMSK_EXAMPLE_IMAGES, + BVVMP_EXAMPLE_IMAGES, BVVMR_EXAMPLE_IMAGES) + +BV_EXAMPLE_HDRS = (BVVTC_EXAMPLE_HDRS, BVMSK_EXAMPLE_HDRS, + BVVMP_EXAMPLE_HDRS, BVVMR_EXAMPLE_HDRS) diff --git a/nibabel/brainvoyager/tests/test_bv.py b/nibabel/brainvoyager/tests/test_bv.py new file mode 100644 index 0000000000..6ec915ae73 --- /dev/null +++ b/nibabel/brainvoyager/tests/test_bv.py @@ -0,0 +1,306 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +"""Test main BV module.""" + +import os +import numpy as np +from ...loadsave import load +from ...tmpdirs import InTemporaryDirectory +from ..bv import (read_c_string, parse_BV_header, pack_BV_header, BvFileHeader, + calc_BV_header_size, _proto2default, update_BV_header, + parse_st, combine_st, BvError) +from ..bv_vtc import VTC_HDR_DICT_PROTO, BvVtcHeader +from ..bv_vmr import BvVmrImage +from ...testing import (assert_equal, assert_array_equal, data_path, + assert_true, assert_raises) +from . import BV_EXAMPLE_IMAGES, BV_EXAMPLE_HDRS +from ...externals import OrderedDict + + +vtc_file = os.path.join(data_path, 'test.vtc') +vmp_file = os.path.join(data_path, 'test.vmp') +vmr_file = os.path.join(data_path, 'test.vmr') + +TEST_PROTO = ( + ('some_signed_char', 'b', 1), + ('some_unsigned_char', 'B', 255), + ('some_signed_short_integer', 'h', 6), + ('some_signed_integer', 'i', 1), + ('another_signed_integer', 'i', 3), + ('some_counter_integer', 'i', 4), + ('another_counter_integer', 'i', 0), + ('some_unsigned_long_integer', 'I', 2712847316), + ('some_float', 'f', 1.0), + ('some_zero_terminated_string', 'z', b'HelloWorld!'), + ('some_conditional_integer', 'i', (0, 'some_signed_integer', 1)), + ('another_conditional_integer', 'i', (23, 'another_signed_integer', 1)), + ('some_nested_field', ( + ('a_number', 'i', 1), + ('a_float', 'f', 1.6500), + ('a_string', 'z', b'test.txt'), + ('nested_counter_integer', 'i', 2), + ('fdr_table_info', ( + ('another_float', 'f', 0.0), + ('another_string', 'z', b'sample'), + ), 'nested_counter_integer'), + ), 'some_counter_integer'), + ('another_nested_field', ( + ('b_float', 'f', 1.234), + ), 'another_counter_integer'), +) + +TEST_HDR = OrderedDict([ + ('some_signed_char', 1), + ('some_unsigned_char', 255), + ('some_signed_short_integer', 6), + ('some_signed_integer', 1), + ('another_signed_integer', 3), + ('some_counter_integer', 4), + ('another_counter_integer', 0), + ('some_unsigned_long_integer', 2712847316), + ('some_float', 1.0), + ('some_zero_terminated_string', b'HelloWorld!'), + ('some_conditional_integer', 0), + ('another_conditional_integer', 23), + ('some_nested_field', + [OrderedDict([('a_number', 1), + ('a_float', 1.65), + ('a_string', b'test.txt'), + ('nested_counter_integer', 2), + ('fdr_table_info', + [OrderedDict([('another_float', 0.0), + ('another_string', b'sample')]), + OrderedDict([('another_float', 0.0), + ('another_string', b'sample')])])]), + OrderedDict([('a_number', 1), + ('a_float', 1.65), + ('a_string', b'test.txt'), + ('nested_counter_integer', 2), + ('fdr_table_info', + [OrderedDict([('another_float', 0.0), + ('another_string', b'sample')]), + OrderedDict([('another_float', 0.0), + ('another_string', b'sample')])])]), + OrderedDict([('a_number', 1), + ('a_float', 1.65), + ('a_string', b'test.txt'), + ('nested_counter_integer', 2), + ('fdr_table_info', + [OrderedDict([('another_float', 0.0), + ('another_string', b'sample')]), + OrderedDict([('another_float', 0.0), + ('another_string', b'sample')])])]), + OrderedDict([('a_number', 1), + ('a_float', 1.65), + ('a_string', b'test.txt'), + ('nested_counter_integer', 2), + ('fdr_table_info', + [OrderedDict([('another_float', 0.0), + ('another_string', b'sample')]), + OrderedDict([('another_float', 0.0), + ('another_string', + b'sample')])])])]), + ('another_nested_field', [])]) + +TEST_HDR_PACKED = \ + b''.join([b'\x01\xff\x06\x00\x01\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00', + b'\x00\x00\x00\x00\x00\xd4\xc3\xb2\xa1\x00\x00\x80?HelloWorld!', + b'\x00\x00\x00\x00\x00\x01\x00\x00\x0033\xd3?test.txt\x00\x02', + b'\x00\x00\x00\x00\x00\x00\x00sample\x00\x00\x00\x00\x00sample', + b'\x00\x01\x00\x00\x0033\xd3?test.txt\x00\x02\x00\x00\x00\x00', + b'\x00\x00\x00sample\x00\x00\x00\x00\x00sample\x00\x01\x00\x00', + b'\x0033\xd3?test.txt\x00\x02\x00\x00\x00\x00\x00\x00\x00sample', + b'\x00\x00\x00\x00\x00sample\x00\x01\x00\x00\x0033\xd3?test.txt', + b'\x00\x02\x00\x00\x00\x00\x00\x00\x00sample\x00\x00\x00\x00', + b'\x00sample\x00']) + + +def test_read_c_string(): + # sample binary block + binary = b'test.fmr\x00test.prt\x00' + with InTemporaryDirectory(): + # create a tempfile and write the binary block to it + path = 'test.header' + with open(path, 'wb') as fwrite: + fwrite.write(binary) + + # open it again + with open(path, 'rb') as fread: + # test readout of one string + assert_equal([s for s in read_c_string(fread)], [b'test.fmr']) + # test new file position + assert_equal(fread.tell(), 9) + # manually rewind + fread.seek(0) + # test readout of two strings + assert_equal([s for s in read_c_string(fread, 2, rewind=True)], + [b'test.fmr', b'test.prt']) + # test automatic rewind + assert_equal(fread.tell(), 0) + # test readout of two strings with trailing zeros + assert_equal([s for s in read_c_string(fread, 2, strip=False)], + [b'test.fmr\x00', b'test.prt\x00']) + # test new file position + assert_equal(fread.tell(), 18) + # test readout of one string from given position + fread.seek(0) + assert_equal([s for s in read_c_string(fread, start_pos=9)], + [b'test.prt']) + + +def test_combine_st(): + vmr = BvVmrImage.from_filename(vmr_file) + st_array = [] + for st in range(vmr.header._hdr_dict['nr_of_past_spatial_trans']): + st_array.append(parse_st( + vmr.header._hdr_dict['past_spatial_trans'][st])) + st_array = np.array(st_array) + combined_st = combine_st(st_array, inv=True) + correct_combined_st = [[1., 0., 0., 0.], + [0., 1., 0., -1.], + [0., 0., 1., 1.], + [0., 0., 0., 1.]] + assert_array_equal(combined_st, correct_combined_st) + combined_st = combine_st(st_array, inv=False) + correct_combined_st = [[1., 0., 0., 0.], + [0., 1., 0., 1.], + [0., 0., 1., -1.], + [0., 0., 0., 1.]] + assert_array_equal(combined_st, correct_combined_st) + + +def test_parse_st(): + vmr = BvVmrImage.from_filename(vmr_file) + ST = parse_st(vmr.header._hdr_dict['past_spatial_trans'][0]) + correct_st = [[1., 0., 0., -1.], + [0., 1., 0., 0.], + [0., 0., 1., -1.], + [0., 0., 0., 1.]] + assert_array_equal(ST, correct_st) + + # parse_st will only handle 4x4 matrices + vmr.header._hdr_dict['past_spatial_trans'][0]['nr_of_trans_val'] = 10 + assert_raises(BvError, parse_st, + vmr.header._hdr_dict['past_spatial_trans'][0]) + + +def compare_header_values(header, expected_header): + '''recursively compare every value in header with expected_header''' + + for key in header: + if (type(header[key]) is list): + for i in range(len(expected_header[key])): + compare_header_values(header[key][i], expected_header[key][i]) + assert_equal(header[key], expected_header[key]) + + +def test_BvFileHeader_parse_BV_header(): + bv = _proto2default(TEST_PROTO) + compare_header_values(bv, TEST_HDR) + + +def test_BvFileHeader_pack_BV_header(): + bv = _proto2default(TEST_PROTO) + packed_bv = pack_BV_header(TEST_PROTO, bv) + assert_equal(packed_bv, TEST_HDR_PACKED) + + # open vtc test file + fileobj = open(vtc_file, 'rb') + hdr_dict = parse_BV_header(VTC_HDR_DICT_PROTO, fileobj) + binaryblock = pack_BV_header(VTC_HDR_DICT_PROTO, hdr_dict) + assert_equal(binaryblock, b''.join([ + b'\x03\x00test.fmr\x00\x01\x00test.prt\x00\x00\x00\x02\x00\x05\x00', + b'\x03\x00x\x00\x96\x00x\x00\x96\x00x\x00\x96\x00\x01\x01\x00\x00\xfaD' + ])) + + +def test_BvFileHeader_calc_BV_header_size(): + bv = _proto2default(TEST_PROTO) + assert_equal(calc_BV_header_size(TEST_PROTO, bv), 216) + + # change a header field + bv['some_zero_terminated_string'] = 'AnotherString' + assert_equal(calc_BV_header_size(TEST_PROTO, bv), 218) + + # open vtc test file + fileobj = open(vtc_file, 'rb') + hdr_dict = parse_BV_header(VTC_HDR_DICT_PROTO, fileobj) + hdrSize = calc_BV_header_size(VTC_HDR_DICT_PROTO, hdr_dict) + assert_equal(hdrSize, 48) + + +def test_BvFileHeader_update_BV_header(): + # increase a nested field counter + bv = _proto2default(TEST_PROTO) + bv_new = bv.copy() + bv_new['some_counter_integer'] = 5 + bv_updated = update_BV_header(TEST_PROTO, bv, bv_new) + assert_equal(len(bv_updated['some_nested_field']), 5) + + # decrease a nested field counter + bv = _proto2default(TEST_PROTO) + bv_new = bv.copy() + bv_new['some_counter_integer'] = 3 + bv_updated = update_BV_header(TEST_PROTO, bv, bv_new) + assert_equal(len(bv_updated['some_nested_field']), 3) + + +def test_BvFileHeader_xflip(): + bv = BvFileHeader() + assert_true(bv.xflip) + + # should only return + bv.xflip = True + + def set_xflip_false(): + bv.xflip = False + + # cannot flip most BV images + assert_raises(BvError, set_xflip_false) + + +def test_BvFileHeader_endianness(): + assert_raises(BvError, BvFileHeader, endianness='>') + + +def test_BvFileHeader_not_implemented(): + bv = BvFileHeader() + assert_raises(NotImplementedError, bv.get_data_shape) + assert_raises(NotImplementedError, bv.set_data_shape, (1, 2, 3)) + + +def test_BvVtcHeader_from_header(): + vtc = load(vtc_file) + vtc_data = vtc.get_data() + + # try the same load through the header + fread = open(vtc_file, 'rb') + header = BvVtcHeader.from_fileobj(fread) + image = header.data_from_fileobj(fread) + assert_array_equal(vtc_data, image) + fread.close() + + +def test_BvVtcHeader_data_from_fileobj(): + vtc = load(vtc_file) + vtc_data = vtc.get_data() + + # try the same load through the header + fread = open(vtc_file, 'rb') + header = BvVtcHeader.from_fileobj(fread) + image = header.data_from_fileobj(fread) + assert_array_equal(vtc_data, image) + fread.close() + + +def test_parse_all_BV_headers(): + for images, headers in zip(BV_EXAMPLE_IMAGES, BV_EXAMPLE_HDRS): + for i in range(len(images)): + image = load(images[i]['fname']) + compare_header_values(image.header._hdr_dict, headers[i]) diff --git a/nibabel/brainvoyager/tests/test_bv_msk.py b/nibabel/brainvoyager/tests/test_bv_msk.py new file mode 100644 index 0000000000..c17bc4f6b9 --- /dev/null +++ b/nibabel/brainvoyager/tests/test_bv_msk.py @@ -0,0 +1,61 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +"""Test BV module for MSK files.""" + +from os.path import join as pjoin +import numpy as np +from ..bv_msk import BvMskImage, BvMskHeader +from ...testing import (assert_equal, assert_raises, data_path) +from ...spatialimages import HeaderDataError +from ...externals import OrderedDict + +# Example images in format expected for ``test_image_api``, adding ``zooms`` +# item. +BVMSK_EXAMPLE_IMAGES = [ + dict( + fname=pjoin(data_path, 'test.msk'), + shape=(10, 10, 10), + dtype=np.uint8, + affine=np.array([[-3., 0, 0, -21.], + [0, 0, -3., -21.], + [0, -3., 0, -21.], + [0, 0, 0, 1.]]), + zooms=(3., 3., 3.), + fileformat=BvMskImage, + # These values are from NeuroElf + data_summary=dict( + min=0, + max=1, + mean=0.499), + is_proxy=True) +] + +BVMSK_EXAMPLE_HDRS = [ + OrderedDict([('resolution', 3), + ('x_start', 120), + ('x_end', 150), + ('y_start', 120), + ('y_end', 150), + ('z_start', 120), + ('z_end', 150)]) +] + + +def test_BvMskHeader_set_data_shape(): + msk = BvMskHeader() + assert_equal(msk.get_data_shape(), (46, 40, 58)) + msk.set_data_shape((45, 39, 57)) + assert_equal(msk.get_data_shape(), (45, 39, 57)) + + # Use zyx parameter instead of shape + msk.set_data_shape(None, [[57, 240], [52, 178], [59, 191]]) + assert_equal(msk.get_data_shape(), (61, 42, 44)) + + # raise error when neither shape nor xyz is specified + assert_raises(HeaderDataError, msk.set_data_shape, None, None) diff --git a/nibabel/brainvoyager/tests/test_bv_vmp.py b/nibabel/brainvoyager/tests/test_bv_vmp.py new file mode 100644 index 0000000000..5a275d97ab --- /dev/null +++ b/nibabel/brainvoyager/tests/test_bv_vmp.py @@ -0,0 +1,357 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +"""Test BV module for VMP files.""" + +from os.path import join as pjoin +import numpy as np +from ..bv_vmp import BvVmpImage, BvVmpHeader +from ...testing import (assert_equal, assert_raises, data_path) +from ...spatialimages import HeaderDataError +from ...externals import OrderedDict + +# Example images in format expected for ``test_image_api``, adding ``zooms`` +# item. +BVVMP_EXAMPLE_IMAGES = [ + dict( + fname=pjoin(data_path, 'test.vmp'), + shape=(1, 10, 10, 10), + dtype=np.float32, + affine=np.array([[-3., 0, 0, -21.], + [0, 0, -3., -21.], + [0, -3., 0, -21.], + [0, 0, 0, 1.]]), + zooms=(3., 3., 3.), + fileformat=BvVmpImage, + # These values are from NeuroElf + data_summary=dict( + min=0.0033484352752566338, + max=7.996956825256348, + mean=3.9617851), + is_proxy=True), + dict( + fname=pjoin(data_path, 'test2.vmp'), + shape=(2, 10, 10, 10), + dtype=np.float32, + affine=np.array([[-3., 0, 0, -21.], + [0, 0, -3., -21.], + [0, -3., 0, -21.], + [0, 0, 0, 1.]]), + zooms=(3., 3., 3.), + fileformat=BvVmpImage, + # These values are from NeuroElf + data_summary=dict( + min=0.0033484352752566338, + max=7.996956825256348, + mean=3.9617851), + is_proxy=True), + dict( + fname=pjoin(data_path, 'test3.vmp'), + shape=(1, 5, 4, 3), + dtype=np.float32, + affine=np.array([[-2., 0, 0, 122.], + [0, 0, -2., 46.], + [0, -2., 0, 140.], + [0, 0, 0, 1.]]), + zooms=(2., 2., 2.), + fileformat=BvVmpImage, + # These values are from NeuroElf + data_summary=dict( + min=0.0, + max=7.163260459899902, + mean=2.1438238620758057), + is_proxy=True) +] + +BVVMP_EXAMPLE_HDRS = [ + OrderedDict([('magic_number', 2712847316), + ('version', 6), + ('document_type', 1), + ('nr_of_submaps', 1), + ('nr_of_timepoints', 0), + ('nr_of_component_params', 0), + ('show_params_range_from', 0), + ('show_params_range_to', 0), + ('use_for_fingerprint_params_range_from', 0), + ('use_for_fingerprint_params_range_to', 0), + ('x_start', 120), + ('x_end', 150), + ('y_start', 120), + ('y_end', 150), + ('z_start', 120), + ('z_end', 150), + ('resolution', 3), + ('dim_x', 256), + ('dim_y', 256), + ('dim_z', 256), + ('vtc_filename', b'test.vtc'), + ('prt_filename', b''), + ('voi_filename', b''), + ('maps', + [OrderedDict([('type_of_map', 1), + ('map_threshold', 1.649999976158142), + ('upper_threshold', 8.0), + ('map_name', b'Testmap'), + ('pos_min_r', 255), + ('pos_min_g', 0), + ('pos_min_b', 0), + ('pos_max_r', 255), + ('pos_max_g', 255), + ('pos_max_b', 0), + ('neg_min_r', 255), + ('neg_min_g', 0), + ('neg_min_b', 255), + ('neg_max_r', 0), + ('neg_max_g', 0), + ('neg_max_b', 255), + ('use_vmp_color', 0), + ('lut_filename', b''), + ('transparent_color_factor', 1.0), + ('nr_of_lags', 0), + ('display_min_lag', 0), + ('display_max_lag', 0), + ('show_correlation_or_lag', 0), + ('cluster_size_threshold', 50), + ('enable_cluster_size_threshold', 0), + ('show_values_above_upper_threshold', 1), + ('df1', 249), + ('df2', 1), + ('show_pos_neg_values', 3), + ('nr_of_used_voxels', 45555), + ('size_of_fdr_table', 0), + ('fdr_table_info', []), + ('use_fdr_table_index', 0)])]), + ('component_time_points', [OrderedDict([('timepoints', [])])]), + ('component_params', [])]), + OrderedDict([('magic_number', 2712847316), + ('version', 6), + ('document_type', 1), + ('nr_of_submaps', 2), + ('nr_of_timepoints', 0), + ('nr_of_component_params', 0), + ('show_params_range_from', 0), + ('show_params_range_to', 0), + ('use_for_fingerprint_params_range_from', 0), + ('use_for_fingerprint_params_range_to', 0), + ('x_start', 120), + ('x_end', 150), + ('y_start', 120), + ('y_end', 150), + ('z_start', 120), + ('z_end', 150), + ('resolution', 3), + ('dim_x', 256), + ('dim_y', 256), + ('dim_z', 256), + ('vtc_filename', b'test.vtc'), + ('prt_filename', b''), + ('voi_filename', b''), + ('maps', + [OrderedDict([('type_of_map', 1), + ('map_threshold', 1.649999976158142), + ('upper_threshold', 8.0), + ('map_name', b'Testmap'), + ('pos_min_r', 255), + ('pos_min_g', 0), + ('pos_min_b', 0), + ('pos_max_r', 255), + ('pos_max_g', 255), + ('pos_max_b', 0), + ('neg_min_r', 255), + ('neg_min_g', 0), + ('neg_min_b', 255), + ('neg_max_r', 0), + ('neg_max_g', 0), + ('neg_max_b', 255), + ('use_vmp_color', 0), + ('lut_filename', b''), + ('transparent_color_factor', 1.0), + ('nr_of_lags', 0), + ('display_min_lag', 0), + ('display_max_lag', 0), + ('show_correlation_or_lag', 0), + ('cluster_size_threshold', 50), + ('enable_cluster_size_threshold', 0), + ('show_values_above_upper_threshold', 1), + ('df1', 249), + ('df2', 1), + ('show_pos_neg_values', 3), + ('nr_of_used_voxels', 45555), + ('size_of_fdr_table', 0), + ('fdr_table_info', []), + ('use_fdr_table_index', 0)]), + OrderedDict([('type_of_map', 1), + ('map_threshold', 1.649999976158142), + ('upper_threshold', 8.0), + ('map_name', b'Testmap'), + ('pos_min_r', 255), + ('pos_min_g', 0), + ('pos_min_b', 0), + ('pos_max_r', 255), + ('pos_max_g', 255), + ('pos_max_b', 0), + ('neg_min_r', 255), + ('neg_min_g', 0), + ('neg_min_b', 255), + ('neg_max_r', 0), + ('neg_max_g', 0), + ('neg_max_b', 255), + ('use_vmp_color', 0), + ('lut_filename', b''), + ('transparent_color_factor', 1.0), + ('nr_of_lags', 0), + ('display_min_lag', 0), + ('display_max_lag', 0), + ('show_correlation_or_lag', 0), + ('cluster_size_threshold', 50), + ('enable_cluster_size_threshold', 0), + ('show_values_above_upper_threshold', 1), + ('df1', 249), + ('df2', 1), + ('show_pos_neg_values', 3), + ('nr_of_used_voxels', 45555), + ('size_of_fdr_table', 0), + ('fdr_table_info', []), + ('use_fdr_table_index', 0)])]), + ('component_time_points', + [OrderedDict([('timepoints', [])]), + OrderedDict([('timepoints', [])])]), + ('component_params', [])]), + OrderedDict([('magic_number', 2712847316), + ('version', 6), + ('document_type', 1), + ('nr_of_submaps', 1), + ('nr_of_timepoints', 0), + ('nr_of_component_params', 0), + ('show_params_range_from', 0), + ('show_params_range_to', 0), + ('use_for_fingerprint_params_range_from', 0), + ('use_for_fingerprint_params_range_to', 0), + ('x_start', 102), + ('x_end', 108), + ('y_start', 54), + ('y_end', 62), + ('z_start', 62), + ('z_end', 72), + ('resolution', 2), + ('dim_x', 256), + ('dim_y', 256), + ('dim_z', 256), + ('vtc_filename', b'/path/to/test.vtc'), + ('prt_filename', b''), + ('voi_filename', b''), + ('maps', + [OrderedDict([('type_of_map', 3), + ('map_threshold', 0.16120874881744385), + ('upper_threshold', 0.800000011920929), + ('map_name', b''), + ('pos_min_r', 0), + ('pos_min_g', 0), + ('pos_min_b', 100), + ('pos_max_r', 0), + ('pos_max_g', 0), + ('pos_max_b', 255), + ('neg_min_r', 100), + ('neg_min_g', 100), + ('neg_min_b', 50), + ('neg_max_r', 200), + ('neg_max_g', 200), + ('neg_max_b', 100), + ('use_vmp_color', 0), + ('lut_filename', b''), + ('transparent_color_factor', 1.0), + ('nr_of_lags', 8), + ('display_min_lag', 0), + ('display_max_lag', 7), + ('show_correlation_or_lag', 0), + ('cluster_size_threshold', 4), + ('enable_cluster_size_threshold', 0), + ('show_values_above_upper_threshold', 1), + ('df1', 254), + ('df2', 0), + ('show_pos_neg_values', 3), + ('nr_of_used_voxels', 78498), + ('size_of_fdr_table', 8), + ('fdr_table_info', + [OrderedDict([('q', 0.10000000149011612), + ('crit_standard', + 0.13649293780326843), + ('crit_conservative', + 0.20921018719673157)]), + OrderedDict([('q', 0.05000000074505806), + ('crit_standard', + 0.16120874881744385), + ('crit_conservative', + 0.2241515964269638)]), + OrderedDict([('q', 0.03999999910593033), + ('crit_standard', + 0.16819879412651062), + ('crit_conservative', + 0.2286316156387329)]), + OrderedDict([('q', 0.029999999329447746), + ('crit_standard', + 0.1767669916152954), + ('crit_conservative', + 0.2341766357421875)]), + OrderedDict([('q', 0.019999999552965164), + ('crit_standard', + 0.1880645751953125), + ('crit_conservative', + 0.2415686398744583)]), + OrderedDict([('q', 0.009999999776482582), + ('crit_standard', + 0.20535583794116974), + ('crit_conservative', + 0.2533867359161377)]), + OrderedDict([('q', 0.004999999888241291), + ('crit_standard', + 0.2205917239189148), + ('crit_conservative', + 0.2645622491836548)]), + OrderedDict([('q', 0.0010000000474974513), + ('crit_standard', + 0.25055694580078125), + ('crit_conservative', + 0.2873672842979431)])]), + ('use_fdr_table_index', 1)])]), + ('component_time_points', [OrderedDict([('timepoints', [])])]), + ('component_params', [])]) +] + + +def test_BvVmpHeader_set_data_shape(): + vmp = BvVmpHeader() + assert_equal(vmp.get_data_shape(), (1, 46, 40, 58)) + vmp.set_data_shape((1, 45, 39, 57)) + assert_equal(vmp.get_data_shape(), (1, 45, 39, 57)) + + # Use zyx parameter instead of shape + vmp.set_data_shape(None, [[57, 240], [52, 178], [59, 191]]) + assert_equal(vmp.get_data_shape(), (1, 61, 42, 44)) + + # Change number of submaps + vmp.set_data_shape(None, None, 5) # via n parameter + assert_equal(vmp.get_data_shape(), (5, 61, 42, 44)) + vmp.set_data_shape((3, 61, 42, 44)) # via shape parameter + assert_equal(vmp.get_data_shape(), (3, 61, 42, 44)) + + # raise error when neither shape nor zyx nor n is specified + assert_raises(HeaderDataError, vmp.set_data_shape, None, None, None) + + # raise error when n is negative + assert_raises(HeaderDataError, vmp.set_data_shape, (-1, 45, 39, 57)) + assert_raises(HeaderDataError, vmp.set_data_shape, None, None, -1) + + +def test_BvVmpHeader_set_framing_cube(): + vmp = BvVmpHeader() + assert_equal(vmp.framing_cube, (256, 256, 256)) + vmp.framing_cube = (512, 512, 512) + assert_equal(vmp.framing_cube, (512, 512, 512)) + vmp.framing_cube = (512, 513, 514) + assert_equal(vmp.framing_cube, (512, 513, 514)) diff --git a/nibabel/brainvoyager/tests/test_bv_vmr.py b/nibabel/brainvoyager/tests/test_bv_vmr.py new file mode 100644 index 0000000000..e6abbddf07 --- /dev/null +++ b/nibabel/brainvoyager/tests/test_bv_vmr.py @@ -0,0 +1,155 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +"""Test BV module for VMR files.""" + +from os.path import join as pjoin +import numpy as np +from ..bv import BvError +from ..bv_vmr import BvVmrImage, BvVmrHeader +from ...testing import (assert_equal, assert_true, assert_false, assert_raises, + assert_array_equal, data_path) +from ...externals import OrderedDict + +vmr_file = pjoin(data_path, 'test.vmr') + +# Example images in format expected for ``test_image_api``, adding ``zooms`` +# item. +BVVMR_EXAMPLE_IMAGES = [ + dict( + fname=pjoin(data_path, 'test.vmr'), + shape=(5, 4, 3), + dtype=np.uint8, + affine=np.array([[-1., 0., 0., 0.], + [0., 0., -1., -1.], + [0., -1., 0., 1.], + [0., 0., 0., 1.]]), + zooms=(3., 3., 3.), + fileformat=BvVmrImage, + # These values are from NeuroElf + data_summary=dict( + min=7, + max=218, + mean=120.3), + is_proxy=True) +] + +BVVMR_EXAMPLE_HDRS = [ + OrderedDict([('version', 4), + ('dim_x', 3), + ('dim_y', 4), + ('dim_z', 5), + ('offset_x', 0), + ('offset_y', 0), + ('offset_z', 0), + ('framing_cube', 256), + ('pos_infos_verified', 0), + ('coordinate_system_entry', 1), + ('slice_first_center_x', 127.5), + ('slice_first_center_y', 0.0), + ('slice_first_center_z', 0.0), + ('slice_last_center_x', -127.5), + ('slice_last_center_y', 0.0), + ('slice_last_center_z', 0.0), + ('row_dir_x', 0.0), + ('row_dir_y', 1.0), + ('row_dir_z', 0.0), + ('col_dir_x', 0.0), + ('col_dir_y', 0.0), + ('col_dir_z', -1.0), + ('nr_rows', 256), + ('nr_cols', 256), + ('fov_row_dir', 256.0), + ('fov_col_dir', 256.0), + ('slice_thickness', 1.0), + ('gap_thickness', 0.0), + ('nr_of_past_spatial_trans', 2), + ('past_spatial_trans', + [OrderedDict([('name', b'NoName'), + ('type', 2), + ('source_file', b'/home/test.vmr'), + ('nr_of_trans_val', 16), + ('trans_val', + [OrderedDict([('value', 1.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', -1.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 1.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 1.0)]), + OrderedDict([('value', -1.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 1.0)])])]), + OrderedDict([('name', b'NoName'), + ('type', 2), + ('source_file', b'/home/test_TRF.vmr'), + ('nr_of_trans_val', 16), + ('trans_val', + [OrderedDict([('value', 1.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 1.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 1.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 1.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 1.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 0.0)]), + OrderedDict([('value', 1.0)])])])]), + ('lr_convention', 1), + ('reference_space', 0), + ('vox_res_x', 1.0), + ('vox_res_y', 1.0), + ('vox_res_z', 1.0), + ('flag_vox_resolution', 0), + ('flag_tal_space', 0), + ('min_intensity', 0), + ('mean_intensity', 127), + ('max_intensity', 255)]) +] + + +def test_BvVmrHeader_xflip(): + vmr = BvVmrHeader() + assert_true(vmr.get_xflip()) + vmr.set_xflip(False) + assert_false(vmr.get_xflip()) + assert_equal(vmr._hdr_dict['lr_convention'], 2) + vmr.set_xflip(True) + assert_true(vmr.get_xflip()) + assert_equal(vmr._hdr_dict['lr_convention'], 1) + vmr.set_xflip(0) + assert_equal(vmr._hdr_dict['lr_convention'], 0) + assert_raises(BvError, vmr.get_xflip) + + vmr = BvVmrImage.from_filename(vmr_file) + vmr.header.set_xflip(False) + expected_affine = [[1., 0., 0., 0.], + [0., 0., -1., -1.], + [0., -1., 0., 1.], + [0., 0., 0., 1.]] + assert_array_equal(vmr.header.get_affine(), expected_affine) + + +def test_BvVmrHeader_set_zooms(): + vmr = BvVmrHeader() + assert_equal(vmr.get_zooms(), (1.0, 1.0, 1.0)) + vmr.set_zooms((1.1, 2.2, 3.3)) + assert_equal(vmr.get_zooms(), (1.1, 2.2, 3.3)) + assert_equal(vmr._hdr_dict['vox_res_z'], 1.1) diff --git a/nibabel/brainvoyager/tests/test_bv_vtc.py b/nibabel/brainvoyager/tests/test_bv_vtc.py new file mode 100644 index 0000000000..9c8693c434 --- /dev/null +++ b/nibabel/brainvoyager/tests/test_bv_vtc.py @@ -0,0 +1,146 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +"""Test BV module for VTC files.""" + +from os.path import join as pjoin +import numpy as np +from ..bv import BvError +from ..bv_vtc import BvVtcImage, BvVtcHeader +from ...testing import (data_path, assert_equal, assert_raises, assert_true, + assert_false) +from numpy.testing import (assert_array_equal) +from ...spatialimages import HeaderDataError +from ...externals import OrderedDict + +# Example images in format expected for ``test_image_api``, adding ``zooms`` +# item. +BVVTC_EXAMPLE_IMAGES = [ + dict( + fname=pjoin(data_path, 'test.vtc'), + shape=(10, 10, 10, 5), + dtype=np.float32, + affine=np.array([[-3., 0, 0, -21.], + [0, 0, -3., -21.], + [0, -3., 0, -21.], + [0, 0, 0, 1.]]), + zooms=(3., 3., 3.), + fileformat=BvVtcImage, + # These values are from NeuroElf + data_summary=dict( + min=0.0096689118, + max=199.93549, + mean=100.19728), + is_proxy=True) +] + +BVVTC_EXAMPLE_HDRS = [ + OrderedDict([('version', 3), + ('fmr', b'test.fmr'), + ('nr_prts', 1), + ('prts', [OrderedDict([('filename', b'test.prt')])]), + ('current_prt', 0), + ('datatype', 2), + ('volumes', 5), + ('resolution', 3), + ('x_start', 120), + ('x_end', 150), + ('y_start', 120), + ('y_end', 150), + ('z_start', 120), + ('z_end', 150), + ('lr_convention', 1), + ('ref_space', 1), + ('tr', 2000.0)]) +] + + +def test_get_base_affine(): + hdr = BvVtcHeader() + hdr.set_data_shape((3, 5, 7, 9)) + hdr.set_zooms((3, 3, 3, 3)) + assert_array_equal(hdr.get_base_affine(), + np.asarray([[-3., 0., 0., 193.5], + [0., 0., -3., 181.5], + [0., -3., 0., 205.5], + [0., 0., 0., 1.]])) + + +def test_BvVtcHeader_set_data_shape(): + vtc = BvVtcHeader() + assert_equal(vtc.get_data_shape(), (46, 40, 58, 0)) + vtc.set_data_shape((45, 39, 57, 0)) + assert_equal(vtc.get_data_shape(), (45, 39, 57, 0)) + + # Use zyx parameter instead of shape + vtc.set_data_shape(None, [[57, 240], [52, 178], [59, 191]]) + assert_equal(vtc.get_data_shape(), (61, 42, 44, 0)) + + # Change number of submaps + vtc.set_data_shape(None, None, 5) # via t parameter + assert_equal(vtc.get_data_shape(), (61, 42, 44, 5)) + vtc.set_data_shape((61, 42, 44, 3)) # via shape parameter + assert_equal(vtc.get_data_shape(), (61, 42, 44, 3)) + + # raise error when neither shape nor zyx nor t is specified + assert_raises(HeaderDataError, vtc.set_data_shape, None, None, None) + + # raise error when n is negative + assert_raises(HeaderDataError, vtc.set_data_shape, (45, 39, 57, -1)) + assert_raises(HeaderDataError, vtc.set_data_shape, None, None, -1) + + +def test_BvVtcHeader_set_framing_cube(): + vtc = BvVtcHeader() + assert_equal(vtc.framing_cube, (256, 256, 256)) + vtc.framing_cube = (512, 512, 512) + assert_equal(vtc.framing_cube, (512, 512, 512)) + vtc.framing_cube = (512, 513, 514) + assert_equal(vtc.framing_cube, (512, 513, 514)) + + +def test_BvVtcHeader_xflip(): + vtc = BvVtcHeader() + assert_true(vtc.get_xflip()) + vtc.set_xflip(False) + assert_false(vtc.get_xflip()) + vtc.set_xflip(True) + assert_true(vtc.get_xflip()) + vtc.set_xflip(0) + assert_raises(BvError, vtc.get_xflip) + + +def test_BvVtcHeader_guess_framing_cube(): + vtc = BvVtcHeader() + assert_equal(vtc._guess_framing_cube(), (256, 256, 256)) + vtc._hdr_dict['x_end'] = 400 + vtc._hdr_dict['y_end'] = 400 + vtc._hdr_dict['z_end'] = 400 + assert_equal(vtc._guess_framing_cube(), (512, 512, 512)) + + +def test_BvVtcHeader_zooms(): + vtc = BvVtcHeader() + assert_equal(vtc.get_zooms(), (3.0, 3.0, 3.0)) + + # set all zooms to one value (default for VTC files) + vtc.set_zooms(2) + assert_equal(vtc.get_zooms(), (2.0, 2.0, 2.0)) + vtc.set_zooms((1.0, 1.0, 1.0)) + assert_equal(vtc.get_zooms(), (1.0, 1.0, 1.0)) + vtc.set_zooms((4, 4, 4)) + assert_equal(vtc.get_zooms(), (4.0, 4.0, 4.0)) + + # set zooms to different values for the three dimensions (not possible) + assert_raises(BvError, vtc.set_zooms, (1.0, 2.0, 3.0)) + + +def test_BvVtcHeader_fileversion_error(): + vtc = BvVtcHeader() + vtc._hdr_dict['version'] = 4 + assert_raises(HeaderDataError, BvVtcHeader.from_header, vtc) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index efe51c7d5a..421f78e202 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -281,7 +281,7 @@ def set_data_dtype(self, datatype): try: code = self._data_type_codes[datatype] except KeyError: - raise MGHError('datatype dtype "%s" not recognized' % datatype) + raise HeaderDataError('datatype dtype "%s" not recognized' % datatype) self._header_data['type'] = code def get_zooms(self): @@ -332,6 +332,8 @@ def set_data_shape(self, shape): dims = self._header_data['dims'] # If len(dims) is 3, add a dimension. MGH header always # needs 4 dimensions. + if len(shape) == 2: + raise HeaderDataError('shape cannot have 2 dimensions') if len(shape) == 3: shape = list(shape) shape.append(1) diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 9683148e5f..fe45154af1 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -17,6 +17,7 @@ from .. import load, save from ...openers import ImageOpener from ..mghformat import MGHHeader, MGHError, MGHImage +from ...spatialimages import HeaderDataError from ...tmpdirs import InTemporaryDirectory from ...fileholders import FileHolder @@ -138,7 +139,7 @@ def bad_dtype_mgh(): def test_bad_dtype_mgh(): # Now test the above function - assert_raises(MGHError, bad_dtype_mgh) + assert_raises(HeaderDataError, bad_dtype_mgh) def test_filename_exts(): diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index 265e77f78a..ac530f9fd7 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -1,11 +1,11 @@ # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the NiBabel package for the # copyright and license terms. # -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Define supported image classes and names ''' from .analyze import AnalyzeImage @@ -20,6 +20,7 @@ from .spm2analyze import Spm2AnalyzeImage from .volumeutils import Recoder from .deprecated import deprecate_with_version +from .brainvoyager import BvMskImage, BvVtcImage, BvVmpImage, BvVmrImage from .optpkg import optional_package _, have_scipy, _ = optional_package('scipy') @@ -29,7 +30,8 @@ all_image_classes = [Nifti1Pair, Nifti1Image, Nifti2Pair, Nifti2Image, Spm2AnalyzeImage, Spm99AnalyzeImage, AnalyzeImage, Minc1Image, Minc2Image, MGHImage, - PARRECImage, GiftiImage] + PARRECImage, GiftiImage, + BvMskImage, BvVtcImage, BvVmpImage, BvVmrImage] # DEPRECATED: mapping of names to classes and class functionality @@ -81,6 +83,26 @@ def __getitem__(self, *args, **kwargs): 'has_affine': True, 'makeable': True, 'rw': True}, + vtc={'class': BvVtcImage, + 'ext': '.vtc', + 'has_affine': True, + 'makeable': True, + 'rw': True}, + msk={'class': BvMskImage, + 'ext': '.msk', + 'has_affine': True, + 'makeable': True, + 'rw': True}, + vmp={'class': BvVmpImage, + 'ext': '.vmp', + 'has_affine': True, + 'makeable': True, + 'rw': True}, + vmr={'class': BvVmrImage, + 'ext': '.vmr', + 'has_affine': True, + 'makeable': True, + 'rw': True}, par={'class': PARRECImage, 'ext': '.par', 'has_affine': True, @@ -103,6 +125,10 @@ def __getitem__(self, *args, **kwargs): ('mgh', '.mgh'), ('mgz', '.mgz'), ('par', '.par'), + ('vtc', '.vtc'), + ('msk', '.msk'), + ('vmp', '.vmp'), + ('vmr', '.vmr'), )) # Image classes known to require spatial axes to be first in index ordering. diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index d910f7bb22..f8e00ebbee 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -297,13 +297,44 @@ def supported_np_types(obj): except HeaderDataError: continue # Did set work? - if np.dtype(obj.get_data_dtype()) == np.dtype(np_type): + if np.dtype(obj.get_data_dtype()) == np.dtype(np_type) or \ + np.dtype(obj.get_data_dtype()) == np.dtype(np_type).newbyteorder('S'): supported.append(np_type) # Reset original header dtype obj.set_data_dtype(dt) return set(supported) +def supported_dimensions(obj): + """ Data dimensions that instance `obj` supports + + Parameters + ---------- + obj : object + Object implementing `get_data_shape` and `set_data_shape`. The object + should raise ``HeaderDataError`` for setting unsupported dimensions. + The object will likely be a header or a :class:`SpatialImage` + + Returns + ------- + dimensions : set + set of data dimensions that `obj` supports + """ + shape = obj.get_data_shape() + dimensions = [] + for dim in [1, 2, 3, 4]: + try: + obj.set_data_shape(np.ones(dim)) + except HeaderDataError: + continue + # Did set work? + if len(obj.get_data_shape()) == dim: + dimensions.append(dim) + # Reset original header dtype + obj.set_data_shape(shape) + return set(dimensions) + + class Header(SpatialHeader): '''Alias for SpatialHeader; kept for backwards compatibility.''' diff --git a/nibabel/tests/data/test.msk b/nibabel/tests/data/test.msk new file mode 100644 index 0000000000..038f6eb45f Binary files /dev/null and b/nibabel/tests/data/test.msk differ diff --git a/nibabel/tests/data/test.vmp b/nibabel/tests/data/test.vmp new file mode 100644 index 0000000000..4ee875c732 Binary files /dev/null and b/nibabel/tests/data/test.vmp differ diff --git a/nibabel/tests/data/test.vmr b/nibabel/tests/data/test.vmr new file mode 100644 index 0000000000..394f14c971 Binary files /dev/null and b/nibabel/tests/data/test.vmr differ diff --git a/nibabel/tests/data/test.vtc b/nibabel/tests/data/test.vtc new file mode 100644 index 0000000000..8b226acbd1 Binary files /dev/null and b/nibabel/tests/data/test.vtc differ diff --git a/nibabel/tests/data/test2.vmp b/nibabel/tests/data/test2.vmp new file mode 100644 index 0000000000..1dff5febed Binary files /dev/null and b/nibabel/tests/data/test2.vmp differ diff --git a/nibabel/tests/data/test3.vmp b/nibabel/tests/data/test3.vmp new file mode 100644 index 0000000000..d59752a45a Binary files /dev/null and b/nibabel/tests/data/test3.vmp differ diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index abc7856623..7dcc0ce39e 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -17,7 +17,7 @@ import numpy as np -from ..arrayproxy import ArrayProxy, is_proxy +from ..arrayproxy import ArrayProxy, CArrayProxy, is_proxy from ..nifti1 import Nifti1Header from numpy.testing import assert_array_equal, assert_array_almost_equal @@ -50,11 +50,6 @@ def copy(self): return FunkyHeader(self.shape) -class CArrayProxy(ArrayProxy): - # C array memory layout - order = 'C' - - def test_init(): bio = BytesIO() shape = [2, 3, 4] diff --git a/nibabel/tests/test_files_interface.py b/nibabel/tests/test_files_interface.py index 788be6a31a..59853e8092 100644 --- a/nibabel/tests/test_files_interface.py +++ b/nibabel/tests/test_files_interface.py @@ -15,7 +15,8 @@ from .. import Nifti1Image, Nifti1Pair, MGHImage, all_image_classes from ..externals.six import BytesIO from ..fileholders import FileHolderError -from ..spatialimages import SpatialImage +from ..spatialimages import (SpatialImage, supported_np_types, + supported_dimensions) from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) @@ -23,9 +24,6 @@ def test_files_spatialimages(): - # test files creation in image classes - arr = np.zeros((2, 3, 4)) - aff = np.eye(4) klasses = [klass for klass in all_image_classes if klass.rw and issubclass(klass, SpatialImage)] for klass in klasses: @@ -37,12 +35,18 @@ def test_files_spatialimages(): # If we can't create new images in memory without loading, bail here if not klass.makeable: continue - # MGHImage accepts only a few datatypes - # so we force a type change to float32 - if klass == MGHImage: - img = klass(arr.astype(np.float32), aff) - else: - img = klass(arr, aff) + # test files creation in image classes + arr = np.zeros((2, 3, 4)) + aff = np.eye(4) + # some Image types accept only a few datatypes and shapes + # so we check and force a type change to a compatible dtype + supported_dims = supported_dimensions(klass.header_class()) + if len(arr.shape) not in supported_dims: + arr = np.ones(tuple([d+2 for d in range(supported_dims.pop())])) + supported_dtypes = supported_np_types(klass.header_class()) + if arr.dtype not in supported_dtypes: + arr = arr.astype(supported_dtypes.pop()) + img = klass(arr, aff) for key, value in img.file_map.items(): assert_equal(value.filename, None) assert_equal(value.fileobj, None) @@ -86,14 +90,22 @@ def test_files_interface(): def test_round_trip_spatialimages(): # write an image to files - data = np.arange(24, dtype='i4').reshape((2, 3, 4)) - aff = np.eye(4) klasses = [klass for klass in all_image_classes if klass.rw and issubclass(klass, SpatialImage)] for klass in klasses: file_map = klass.make_file_map() for key in file_map: file_map[key].fileobj = BytesIO() + data = np.arange(24, dtype='i4').reshape((2, 3, 4)) + aff = np.eye(4) + # some Image types accept only a few datatypes and shapes + # so we check and force a type change to a compatible dtype + supported_dims = supported_dimensions(klass.header_class()) + if len(data.shape) not in supported_dims: + data = np.ones(tuple([d+2 for d in range(supported_dims.pop())])) + supported_dtypes = supported_np_types(klass.header_class()) + if data.dtype not in supported_dtypes: + data = data.astype(supported_dtypes.pop()) img = klass(data, aff) img.file_map = file_map img.to_file_map() diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index e86b8c8ea7..f968f29963 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -32,11 +32,14 @@ _, have_scipy, _ = optional_package('scipy') _, have_h5py, _ = optional_package('h5py') -from .. import (AnalyzeImage, Spm99AnalyzeImage, Spm2AnalyzeImage, - Nifti1Pair, Nifti1Image, Nifti2Pair, Nifti2Image, - MGHImage, Minc1Image, Minc2Image) -from ..spatialimages import SpatialImage -from .. import minc1, minc2, parrec +from nibabel import (AnalyzeImage, Spm99AnalyzeImage, Spm2AnalyzeImage, + Nifti1Pair, Nifti1Image, Nifti2Pair, Nifti2Image, + MGHImage, Minc1Image, Minc2Image, BvVtcImage, BvMskImage, + BvVmpImage, BvVmrImage) +from nibabel.spatialimages import (SpatialImage, supported_np_types, + supported_dimensions) +from nibabel.ecat import EcatImage +from nibabel import minc1, minc2, parrec from nose import SkipTest from nose.tools import (assert_true, assert_false, assert_raises, @@ -51,6 +54,10 @@ from .test_minc1 import EXAMPLE_IMAGES as MINC1_EXAMPLE_IMAGES from .test_minc2 import EXAMPLE_IMAGES as MINC2_EXAMPLE_IMAGES from .test_parrec import EXAMPLE_IMAGES as PARREC_EXAMPLE_IMAGES +from nibabel.brainvoyager.tests import BVVTC_EXAMPLE_IMAGES +from nibabel.brainvoyager.tests import BVMSK_EXAMPLE_IMAGES +from nibabel.brainvoyager.tests import BVVMP_EXAMPLE_IMAGES +from nibabel.brainvoyager.tests import BVVMR_EXAMPLE_IMAGES class GenericImageAPI(ValidateAPI): @@ -140,6 +147,10 @@ def validate_header_deprecated(self, imaker, params): img = imaker() with clear_and_catch_warnings() as w: warnings.simplefilter('always', DeprecationWarning) + # Ignore numpy.rint warning in python3/windows + warnings.filterwarnings('ignore', + 'invalid value encountered in rint') + img = imaker() hdr = img.get_header() assert_equal(len(w), 1) assert_true(hdr is img.header) @@ -160,6 +171,9 @@ def validate_shape_deprecated(self, imaker, params): # Check deprecated get_shape API with clear_and_catch_warnings() as w: warnings.simplefilter('always', DeprecationWarning) + # Ignore numpy.rint warning in python3/windows + warnings.filterwarnings('ignore', + 'invalid value encountered in rint') img = imaker() assert_equal(img.get_shape(), params['shape']) assert_equal(len(w), 1) @@ -175,12 +189,21 @@ def validate_dtype(self, imaker, params): rt_img = bytesio_round_trip(img) assert_equal(rt_img.get_data_dtype().type, params['dtype']) # Setting to a different dtype - img.set_data_dtype(np.float32) # assumed supported for all formats - assert_equal(img.get_data_dtype().type, np.float32) + new_dtype = np.float32 + # some Image types accept only a few datatypes and shapes + # so we check and force a type change to a compatible dtype + try: + supported_dtypes = supported_np_types(img.header_class()) + if new_dtype not in supported_dtypes: + new_dtype = supported_dtypes.pop() + except TypeError: + pass + img.set_data_dtype(new_dtype) + assert_equal(img.get_data_dtype().type, new_dtype) # dtype survives round trip if self.can_save: rt_img = bytesio_round_trip(img) - assert_equal(rt_img.get_data_dtype().type, np.float32) + assert_equal(rt_img.get_data_dtype().type, new_dtype) def validate_data(self, imaker, params): # Check get data returns array, and caches @@ -267,7 +290,14 @@ def validate_filenames(self, imaker, params): if not self.can_save: raise SkipTest img = imaker() - img.set_data_dtype(np.float32) # to avoid rounding in load / save + # Setting to a different dtype to avoid rounding in load / save + new_dtype = np.float32 + # some Image types accept only a few datatypes and shapes + # so we check and force a type change to a compatible dtype + supported_dtypes = supported_np_types(img.header_class()) + if new_dtype not in supported_dtypes: + new_dtype = supported_dtypes.pop() + img.set_data_dtype(new_dtype) # The bytesio_round_trip helper tests bytesio load / save via file_map rt_img = bytesio_round_trip(img) assert_array_equal(img.shape, rt_img.shape) @@ -323,6 +353,8 @@ class MakeImageAPI(LoadImageAPI): header_maker = None # Example shapes for created images example_shapes = ((2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)) + # Example dtypes for created images + example_dtypes = (np.uint8, np.int16, np.float32) def img_from_arr_aff(self, arr, aff, header=None): return self.image_maker(arr, aff, header) @@ -334,10 +366,19 @@ def obj_params(self): # Create a new images aff = np.diag([1, 2, 3, 1]) + # Try to retrieve allowed dims + supported_dims = supported_dimensions(self.header_maker()) + self.example_shapes = (shape for shape in self.example_shapes + if len(shape) in supported_dims) + # Try to retrieve allowed dtypes + supported_dtypes = supported_np_types(self.header_maker()) + self.example_dtypes = (dtype for dtype in self.example_dtypes + if dtype in supported_dtypes) + def make_imaker(arr, aff, header=None): return lambda: self.image_maker(arr, aff, header) for shape in self.example_shapes: - for dtype in (np.uint8, np.int16, np.float32): + for dtype in self.example_dtypes: arr = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) hdr = self.header_maker() hdr.set_data_dtype(dtype) @@ -453,3 +494,39 @@ class TestMGHAPI(ImageHeaderAPI): has_scaling = True can_save = True standard_extension = '.mgh' + + +class TestBvVtcAPI(ImageHeaderAPI): + klass = image_maker = BvVtcImage + loader = BvVtcImage.load + example_images = BVVTC_EXAMPLE_IMAGES + has_scaling = False + can_save = True + standard_extension = '.vtc' + + +class TestBvMskAPI(ImageHeaderAPI): + klass = image_maker = BvMskImage + loader = BvMskImage.load + example_images = BVMSK_EXAMPLE_IMAGES + has_scaling = False + can_save = True + standard_extension = '.msk' + + +class TestBvVmpAPI(ImageHeaderAPI): + klass = image_maker = BvVmpImage + loader = BvVmpImage.load + example_images = BVVMP_EXAMPLE_IMAGES + has_scaling = False + can_save = True + standard_extension = '.vmp' + + +class TestBvVmrAPI(TestBvMskAPI): + klass = image_maker = BvVmrImage + loader = BvVmrImage.load + example_images = BVVMR_EXAMPLE_IMAGES + has_scaling = False + can_save = True + standard_extension = '.vmr' diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index d43d1ee581..f71f7156b3 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -23,11 +23,13 @@ from .. import loadsave as nils from .. import (Nifti1Image, Nifti1Header, Nifti1Pair, Nifti2Image, Nifti2Pair, Minc1Image, Minc2Image, Spm2AnalyzeImage, Spm99AnalyzeImage, - AnalyzeImage, MGHImage, all_image_classes) + AnalyzeImage, MGHImage, BvVtcImage, BvMskImage, BvVmpImage, + BvVmrImage, all_image_classes) from ..tmpdirs import InTemporaryDirectory from ..volumeutils import native_code, swapped_code from ..optpkg import optional_package -from ..spatialimages import SpatialImage +from ..spatialimages import (SpatialImage, supported_np_types, + supported_dimensions) from numpy.testing import assert_array_equal, assert_array_almost_equal from nose.tools import assert_true, assert_equal @@ -56,11 +58,17 @@ def test_conversion_spatialimages(): for r_class in klasses: if not r_class.makeable: continue + if npt not in supported_np_types(r_class.header_class()) or \ + len(shape) not in supported_dimensions(r_class.header_class()): + continue img = r_class(data, affine) img.set_data_dtype(npt) for w_class in klasses: if not w_class.makeable: continue + if npt not in supported_np_types(w_class.header_class()) or \ + len(shape) not in supported_dimensions(w_class.header_class()): + continue img2 = w_class.from_image(img) assert_array_equal(img2.get_data(), data) assert_array_equal(img2.affine, affine) @@ -322,3 +330,15 @@ def test_guessed_image_type(): assert_equal(nils.guessed_image_type( pjoin(DATA_PATH, 'analyze.hdr')), Spm2AnalyzeImage) + assert_equal(nils.guessed_image_type( + pjoin(DATA_PATH, 'test.vtc')), + BvVtcImage) + assert_equal(nils.guessed_image_type( + pjoin(DATA_PATH, 'test.msk')), + BvMskImage) + assert_equal(nils.guessed_image_type( + pjoin(DATA_PATH, 'test.vmp')), + BvVmpImage) + assert_equal(nils.guessed_image_type( + pjoin(DATA_PATH, 'test.vmr')), + BvVmrImage) diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index e72ad6bbbc..f33a483c35 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -19,7 +19,8 @@ AnalyzeImage, AnalyzeHeader, Minc1Image, Minc2Image, Spm2AnalyzeImage, Spm99AnalyzeImage, - MGHImage, all_image_classes) + MGHImage, BvVtcImage, BvMskImage, + BvVmpImage, BvVmrImage, all_image_classes) from nose.tools import assert_true @@ -116,7 +117,11 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, ('tiny.mnc', Minc1Image), ('small.mnc', Minc2Image), ('test.mgz', MGHImage), - ('analyze.hdr', Spm2AnalyzeImage)]: + ('analyze.hdr', Spm2AnalyzeImage), + ('test.vtc', BvVtcImage), + ('test.msk', BvMskImage), + ('test.vmp', BvVmpImage), + ('test.vmr', BvVmrImage)]: # print('Testing: %s %s' % (img_filename, image_klass.__name__)) test_image_class(pjoin(DATA_PATH, img_filename), image_klass) diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index a5f36100d9..e5d7af3ed1 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -33,7 +33,11 @@ def test_read_img_data(): 'minc1_1_scale.mnc', 'minc1_4d.mnc', 'test.mgz', - 'tiny.mnc' + 'tiny.mnc', + 'test.vtc', + 'test.msk', + 'test.vmp', + 'test.vmr' ): fpath = pjoin(data_path, fname) img = load(fpath) diff --git a/setup.py b/setup.py index 5e9bf51c29..7ee5cba916 100755 --- a/setup.py +++ b/setup.py @@ -92,6 +92,8 @@ def main(**extra_args): 'nibabel.benchmarks', 'nibabel.streamlines', 'nibabel.streamlines.tests', + 'nibabel.brainvoyager', + 'nibabel.brainvoyager.tests', # install nisext as its own package 'nisext', 'nisext.tests'],